forked from CCCHH/ansible-infra
Compare commits
211 commits
move_to_so
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
f6c15773e1 |
|||
| 2aed20393f | |||
|
c1e1897cda |
|||
|
17ba7c04f2 |
|||
|
536eedeffc |
|||
|
397285655b |
|||
|
8e75f1ad14 |
|||
|
c3b20abab3 |
|||
|
9c2fe5ea9b |
|||
| 06ae220857 | |||
| 1f2a08cf15 | |||
|
2e5b0ab940 |
|||
| 3bba747dab | |||
| b90a57ffb0 | |||
| ad783e4a15 | |||
|
200e8019ed |
|||
| 4f0c4bb276 | |||
| 3abc375984 | |||
| c8edde4d11 | |||
|
ca20721f04 |
|||
|
42b23eb181 |
|||
| 0f3cd2c70a | |||
| c33ae36af3 | |||
| 2cd0811b29 | |||
|
6a92aa68c1 |
|||
|
5693989c38 |
|||
|
c7d51af5b4 |
|||
|
995dbb06e2 |
|||
|
11779ab21d |
|||
|
8f7990acc0 |
|||
|
c6c0272448 |
|||
|
1523b15952 |
|||
|
a5d291cea8 |
|||
|
652aa32e21 |
|||
|
0939771d08 |
|||
| c285694aaa | |||
| d35f1cc779 | |||
| cee1fe970a | |||
| 0c782caee7 | |||
| f887de25c5 | |||
| 664b9115b8 | |||
| b492472179 | |||
|
ddaa069204 |
|||
| 28f80a85f3 | |||
|
d514688574 |
|||
|
d7b463ecb9 |
|||
| 0b6847493c | |||
| 744dc00ae5 | |||
|
fe52127e82 |
|||
|
51bbdd42a2 |
|||
|
428b5c70bc |
|||
|
92601ab9ea |
|||
|
3e0fdfa8de |
|||
|
951ec7ebcd |
|||
|
a92e144cfc |
|||
| c638790819 | |||
|
70461c98ba |
|||
|
968e29ccb8 |
|||
|
255327952e |
|||
|
1971598e71 |
|||
|
372f264bcb |
|||
|
2fbb37db18 |
|||
|
bb30e88404 |
|||
|
a41b07949c |
|||
|
ff550cbd8a |
|||
|
49e3ecb986 |
|||
|
a622f21b54 |
|||
|
40b67c6bc3 |
|||
|
fbd3ea5496 |
|||
|
80ddb2efc9 |
|||
| a328e92971 | |||
| 25db54b8ad | |||
|
944c8cde82 |
|||
|
366456eff8 |
|||
|
1ca71a053e |
|||
|
b9add5bda3 |
|||
|
570600fce3 |
|||
|
5a476f2103 |
|||
|
b72dee0d6d |
|||
|
8b94a49f5e |
|||
|
5f98dca56c |
|||
| 66ee44366b | |||
|
183b91b9f2 |
|||
|
d0618e3820 |
|||
|
a9e394da06 |
|||
|
d6ba70523c |
|||
|
766aa125c4 |
|||
|
c39cb0e390 |
|||
|
df3710f019 |
|||
| 0eaaf9227c | |||
|
ddab157600 |
|||
|
80acd5fdc6 |
|||
|
5f6000adca |
|||
| 6fea98ffd2 | |||
|
63917722ff |
|||
|
aeec08fce8 |
|||
|
cffe5c2b16 |
|||
|
d690f81e3d |
|||
|
ae60d6fea6 |
|||
|
9f8d2d89cd |
|||
|
e390b7c202 |
|||
|
8cefd07618 |
|||
| c3f71b1f08 | |||
|
dc6c7cbfb7 |
|||
|
a11ccaf16c |
|||
|
0f4fb68c97 |
|||
| ea5b4b8d69 | |||
|
23ea666906 |
|||
|
f7918e7b6f |
|||
|
83fd868977 |
|||
|
a979fccd12 |
|||
| a03ed9a362 | |||
| 01890fecbd | |||
|
df32e1cac8 |
|||
|
747e5b2d4c |
|||
|
3840553f9d |
|||
|
839a9b2c0a |
|||
|
37cedb1ad0 |
|||
|
658a50d19b |
|||
|
b2961c5664 |
|||
| a13d23c7ea | |||
| 2f8897751b | |||
| a60946b3b8 | |||
|
8f612d1d9c |
|||
|
d0d517d97d |
|||
|
78a6be6f5d |
|||
|
43fac32424 |
|||
|
282e82728b |
|||
|
f842723e9a |
|||
|
7de516dc43 |
|||
|
94e1920388 |
|||
|
f3902b43b1 |
|||
|
425d302fa9 |
|||
|
b46747d251 |
|||
|
2aa55770d9 |
|||
|
8a8fdf5f97 |
|||
|
cad2f036e5 |
|||
|
4b1c64b615 |
|||
|
e76c66d74c |
|||
|
a32998d8da |
|||
|
8388657d33 |
|||
|
dce4e7c4d4 |
|||
|
f646cc0bf2 |
|||
|
dea66771e0 |
|||
|
9afbc71801 |
|||
|
1d6d1094bc |
|||
|
eadae7a09b |
|||
|
17fd71f079 |
|||
|
afceb886dc |
|||
|
18dda95c46 |
|||
|
1f394a08dd |
|||
|
f943e95e2e |
|||
|
9b8e14f3c4 |
|||
|
952fbf85c5 |
|||
|
bd281713f1 |
|||
|
648489ed09 |
|||
|
434ddfc955 |
|||
|
8cb6ab3d04 |
|||
|
1322bcec58 |
|||
|
1eaf85501f |
|||
|
dec68ab994 |
|||
|
2ae8692603 |
|||
|
1355d4d834 |
|||
| 592afdced9 | |||
| 13a8dc9b6f | |||
| 9c50708b4e | |||
| cbb4beceb6 | |||
|
9f87fa0225 |
|||
|
72489be8bd |
|||
|
8bc9534ce6 |
|||
|
2e66e5de3b |
|||
|
a4c703b185 |
|||
|
9f0c276240 |
|||
| c119f91aeb | |||
| e628dcbce2 | |||
|
d734a1cc6c |
|||
| ef4b45925c | |||
| 2edb3443d6 | |||
| 4e651bca25 | |||
| 2a322f9c85 | |||
| 3a80459fa0 | |||
| eefcbe0c43 | |||
| 4c4ca9127a | |||
|
5863b2f9e3 |
|||
|
a49b8b6d13 |
|||
|
ff5f8ffc80 |
|||
|
1cc4ca6947 |
|||
|
2cb9dc6dae |
|||
|
0a50ee470a |
|||
|
db99b153e4 |
|||
|
00bcd45111 |
|||
|
7900e458aa |
|||
|
5fe5304463 |
|||
|
9b444ec4c4 |
|||
|
75bf485eac |
|||
|
06c1ebbd5f |
|||
|
3a9673b113 |
|||
| 15e200d96d | |||
| 36273da78b | |||
|
0248b64670 |
|||
|
c620f0f86b |
|||
|
6824bf5e4f |
|||
|
0e61131c1b |
|||
| 73c19cc58d | |||
| e827005059 | |||
| 7526d1c6a1 | |||
| 9c44edece2 | |||
|
8ff457b01e |
|||
|
b14083302b |
|||
| abd751237b | |||
|
7f1afef50d |
3773 changed files with 397672 additions and 3200 deletions
|
|
@ -4,3 +4,6 @@ skip_list:
|
||||||
|
|
||||||
exclude_paths:
|
exclude_paths:
|
||||||
- .forgejo/
|
- .forgejo/
|
||||||
|
- "**/*.sops.yaml"
|
||||||
|
- ansible_collections/
|
||||||
|
- galaxy_roles/
|
||||||
|
|
|
||||||
|
|
@ -10,7 +10,7 @@ jobs:
|
||||||
name: Ansible Lint
|
name: Ansible Lint
|
||||||
runs-on: docker
|
runs-on: docker
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v6
|
||||||
- name: Install pip
|
- name: Install pip
|
||||||
run: |
|
run: |
|
||||||
apt update
|
apt update
|
||||||
|
|
@ -24,7 +24,7 @@ jobs:
|
||||||
# work in our environmnet.
|
# work in our environmnet.
|
||||||
# Rather manually setup python (pip) before instead.
|
# Rather manually setup python (pip) before instead.
|
||||||
- name: Run ansible-lint
|
- name: Run ansible-lint
|
||||||
uses: https://github.com/ansible/ansible-lint@v24.10.0
|
uses: https://github.com/ansible/ansible-lint@v26.1.1
|
||||||
with:
|
with:
|
||||||
setup_python: "false"
|
setup_python: "false"
|
||||||
requirements_file: "requirements.yml"
|
requirements_file: "requirements.yml"
|
||||||
|
|
|
||||||
1
.gitignore
vendored
1
.gitignore
vendored
|
|
@ -0,0 +1 @@
|
||||||
|
.ansible/
|
||||||
330
.sops.yaml
330
.sops.yaml
|
|
@ -1,181 +1,203 @@
|
||||||
keys:
|
keys:
|
||||||
- &admin_gpg_djerun EF643F59E008414882232C78FFA8331EEB7D6B70
|
admins:
|
||||||
- &admin_gpg_stb F155144FC925A1BEA1F8A2C59A2A4CD59BFDC5EC
|
gpg: &admin_gpg_keys
|
||||||
- &admin_gpg_jtbx 18DFCE01456DAB52EA38A6584EDC64F35FA1D6A5
|
- &admin_gpg_djerun EF643F59E008414882232C78FFA8331EEB7D6B70
|
||||||
- &admin_gpg_yuri 87AB00D45D37C9E9167B5A5A333448678B60E505
|
- &admin_gpg_stb F155144FC925A1BEA1F8A2C59A2A4CD59BFDC5EC
|
||||||
- &admin_gpg_june 91213ABAA73B0B73D3C02B5B4E5F372D17BBE67C
|
- &admin_gpg_jtbx 18DFCE01456DAB52EA38A6584EDC64F35FA1D6A5
|
||||||
- &admin_gpg_haegar F38C9D4228FC6F674E322D9C3326D914EB9B8F55
|
- &admin_gpg_yuri 87AB00D45D37C9E9167B5A5A333448678B60E505
|
||||||
- &admin_gpg_dario 5DA93D5C9D7320E1BD3522C79C78172B3551C9FD
|
- &admin_gpg_june 057870A2C72CD82566A3EC983695F4FCBCAE4912
|
||||||
- &admin_gpg_echtnurich 8996B62CBD159DCADD3B6DC08BB33A8ABCF7BC4A
|
- &admin_gpg_haegar F38C9D4228FC6F674E322D9C3326D914EB9B8F55
|
||||||
- &admin_gpg_max 9DFA033E3DAEBAD7FDD71B056C7AAA54BE05F7BA
|
- &admin_gpg_dario 5DA93D5C9D7320E1BD3522C79C78172B3551C9FD
|
||||||
- &admin_gpg_c6ristian B71138A6A8964A3C3B8899857B4F70C356765BAB
|
- &admin_gpg_echtnurich 8996B62CBD159DCADD3B6DC08BB33A8ABCF7BC4A
|
||||||
- &admin_gpg_lilly D2E9C0807BF681F5E164DAFC5EE1B61CD90954CD
|
- &admin_gpg_c6ristian B71138A6A8964A3C3B8899857B4F70C356765BAB
|
||||||
- &admin_gpg_langoor 878FEA3CB6A6F6E7CD80ECBE28506E3585F9F533
|
- &admin_gpg_lilly D2E9C0807BF681F5E164DAFC5EE1B61CD90954CD
|
||||||
|
- &admin_gpg_langoor 878FEA3CB6A6F6E7CD80ECBE28506E3585F9F533
|
||||||
|
hosts:
|
||||||
|
chaosknoten:
|
||||||
|
age: &host_chaosknoten_age_keys
|
||||||
|
- &host_netbox_ansible_pull_age_key age1ss82zwqkj438re78355p886r89csqrrfmkfp8lrrf8v23nza492qza4ey3
|
||||||
|
- &host_cloud_ansible_pull_age_key age1gdfhx5hy829uqkw4nwjwlpvl7zqvljguzsnjv0dpwz5q5u7dtf6s90wndt
|
||||||
|
- &host_eh22_wiki_ansible_pull_age_key age13nm6hfz66ce4wpn89fye05mag3l3h04etvz6wj7szm3vzrdlfupqhrp3fa
|
||||||
|
- &host_grafana_ansible_pull_age_key age1jtusr294t8mzar2qy857v6s329ret9s353y4kuulxwnlyy4dvpjsvyl67m
|
||||||
|
- &host_onlyoffice_ansible_pull_age_key age1a27euccw8j23wec76ls8vmzp7mntfcn4v8tkyegmg8alzfhk3suqwm6vgv
|
||||||
|
- &host_pretalx_ansible_pull_age_key age133wy6sxhgx3kkwxecra6xf9ey2uhnvtjpgwawwfmpvz0jpd0s5dqe385u3
|
||||||
|
- &host_sunders_ansible_pull_age_key age1na0nh9ndnr9cxpnlvstrxskr4fxf4spnkw48ufl7m43f98y40y7shhnvgd
|
||||||
|
- &host_wiki_ansible_pull_age_key age1sqs05anv4acculyap35e6vehdxw3g6ycwnvh6hsuv8u33re984zsnqfvqv
|
||||||
|
- &host_renovate_ansible_pull_age_key age18qam683rva3ee3wgue7r0ey4ws4jttz4a4dpe3q8kq8lmrp97ezq2cns8d
|
||||||
|
- &host_ccchoir_ansible_pull_age_key age19rg2cuj9smv8nzxmr03azfqe69edhep53dep6kvh83paf08zv58sntm0fg
|
||||||
|
- &host_tickets_ansible_pull_age_key age16znyzvquuy8467gg27mdwdt8k6kcu3fjrvfm6gnl4nmqp8tuvqaspqgcet
|
||||||
|
- &host_keycloak_ansible_pull_age_key age1azkgwrcwqhc6flj7gturptpl2uvay6pd94cam4t6yuk2n4wlnsqsj38hca
|
||||||
|
- &host_lists_ansible_pull_age_key age17x20h3m6wgfhereusc224u95ac8aj68fzlkkj5ptvs9c5vlz3usqdu7crq
|
||||||
|
- &host_mumble_ansible_pull_age_key age1wnympe3x8ce8hk87cymmt6wvccs4aes5rhhs44hq0s529v5z4g5sfyphwx
|
||||||
|
- &host_pad_ansible_pull_age_key age172pk7lyc6p4ewy0f2h6pau5d5sz6z8cq66hm4u4tpzx3an496a2sljx7x5
|
||||||
|
- &host_public_reverse_proxy_ansible_pull_age_key age1p7pxgq5kwcpdkhkh3qq4pvnltrdk4gwf60hdhv8ka0mdxmgnjepqyleyen
|
||||||
|
- &host_zammad_ansible_pull_age_key age1sv7uhpnk9d3u3je9zzvlux0kd83f627aclpamnz2h3ksg599838qjgrvqs
|
||||||
|
- &host_ntfy_ansible_pull_age_key age1dkecypmfuj0tcm2cz8vnvq5drpu2ddhgnfkzxvscs7m4e79gpseqyhr9pg
|
||||||
|
- &host_spaceapiccc_ansible_pull_age_key age1mdtnk78aeqnwqadjqje5pfha04wu92d3ecchyqajjmy434kwq98qksq2wa
|
||||||
|
- &host_acmedns_ansible_pull_age_key age16pxqxdj25xz6w200sf8duc62vyk0xkhzc7y63nyhg29sm077vp8qy4sywv
|
||||||
|
external:
|
||||||
|
age: &host_external_age_keys
|
||||||
|
- &host_status_ansible_pull_age_key age1yl9ts8k6ceymaxjs72r5puetes5mtuzxuger7qgme9qkagfrm9hqzxx9qr
|
||||||
creation_rules:
|
creation_rules:
|
||||||
- path_regex: resources/chaosknoten/cloud/.*
|
## group vars
|
||||||
|
- path_regex: inventories/chaosknoten/group_vars/all.*
|
||||||
key_groups:
|
key_groups:
|
||||||
- pgp:
|
- pgp:
|
||||||
- *admin_gpg_djerun
|
*admin_gpg_keys
|
||||||
- *admin_gpg_stb
|
age:
|
||||||
- *admin_gpg_jtbx
|
*host_chaosknoten_age_keys
|
||||||
- *admin_gpg_yuri
|
- path_regex: inventories/external/group_vars/all.*
|
||||||
- *admin_gpg_june
|
|
||||||
- *admin_gpg_haegar
|
|
||||||
- *admin_gpg_dario
|
|
||||||
- *admin_gpg_echtnurich
|
|
||||||
- *admin_gpg_max
|
|
||||||
- *admin_gpg_c6ristian
|
|
||||||
- *admin_gpg_lilly
|
|
||||||
- *admin_gpg_langoor
|
|
||||||
- path_regex: resources/chaosknoten/keycloak/.*
|
|
||||||
key_groups:
|
key_groups:
|
||||||
- pgp:
|
- pgp:
|
||||||
- *admin_gpg_djerun
|
*admin_gpg_keys
|
||||||
- *admin_gpg_stb
|
age:
|
||||||
- *admin_gpg_jtbx
|
*host_external_age_keys
|
||||||
- *admin_gpg_yuri
|
- path_regex: inventories/z9/group_vars/all.*
|
||||||
- *admin_gpg_june
|
|
||||||
- *admin_gpg_haegar
|
|
||||||
- *admin_gpg_dario
|
|
||||||
- *admin_gpg_echtnurich
|
|
||||||
- *admin_gpg_max
|
|
||||||
- *admin_gpg_c6ristian
|
|
||||||
- *admin_gpg_lilly
|
|
||||||
- *admin_gpg_langoor
|
|
||||||
- path_regex: resources/chaosknoten/grafana/.*
|
|
||||||
key_groups:
|
key_groups:
|
||||||
- pgp:
|
- pgp:
|
||||||
- *admin_gpg_djerun
|
*admin_gpg_keys
|
||||||
- *admin_gpg_stb
|
## host vars
|
||||||
- *admin_gpg_jtbx
|
# chaosknoten hosts
|
||||||
- *admin_gpg_yuri
|
- path_regex: inventories/chaosknoten/host_vars/acmedns.*
|
||||||
- *admin_gpg_june
|
|
||||||
- *admin_gpg_haegar
|
|
||||||
- *admin_gpg_dario
|
|
||||||
- *admin_gpg_echtnurich
|
|
||||||
- *admin_gpg_max
|
|
||||||
- *admin_gpg_c6ristian
|
|
||||||
- *admin_gpg_lilly
|
|
||||||
- *admin_gpg_langoor
|
|
||||||
- path_regex: resources/chaosknoten/pad/.*
|
|
||||||
key_groups:
|
key_groups:
|
||||||
- pgp:
|
- pgp:
|
||||||
- *admin_gpg_djerun
|
*admin_gpg_keys
|
||||||
- *admin_gpg_stb
|
age:
|
||||||
- *admin_gpg_jtbx
|
- *host_acmedns_ansible_pull_age_key
|
||||||
- *admin_gpg_yuri
|
- path_regex: inventories/chaosknoten/host_vars/cloud.*
|
||||||
- *admin_gpg_june
|
|
||||||
- *admin_gpg_haegar
|
|
||||||
- *admin_gpg_dario
|
|
||||||
- *admin_gpg_echtnurich
|
|
||||||
- *admin_gpg_max
|
|
||||||
- *admin_gpg_c6ristian
|
|
||||||
- *admin_gpg_lilly
|
|
||||||
- *admin_gpg_langoor
|
|
||||||
- path_regex: resources/chaosknoten/ccchoir/.*
|
|
||||||
key_groups:
|
key_groups:
|
||||||
- pgp:
|
- pgp:
|
||||||
- *admin_gpg_djerun
|
*admin_gpg_keys
|
||||||
- *admin_gpg_stb
|
age:
|
||||||
- *admin_gpg_jtbx
|
- *host_cloud_ansible_pull_age_key
|
||||||
- *admin_gpg_yuri
|
- path_regex: inventories/chaosknoten/host_vars/keycloak.*
|
||||||
- *admin_gpg_june
|
|
||||||
- *admin_gpg_haegar
|
|
||||||
- *admin_gpg_dario
|
|
||||||
- *admin_gpg_echtnurich
|
|
||||||
- *admin_gpg_max
|
|
||||||
- *admin_gpg_c6ristian
|
|
||||||
- *admin_gpg_lilly
|
|
||||||
- *admin_gpg_langoor
|
|
||||||
- path_regex: resources/chaosknoten/pretalx/.*
|
|
||||||
key_groups:
|
key_groups:
|
||||||
- pgp:
|
- pgp:
|
||||||
- *admin_gpg_djerun
|
*admin_gpg_keys
|
||||||
- *admin_gpg_stb
|
age:
|
||||||
- *admin_gpg_jtbx
|
- *host_keycloak_ansible_pull_age_key
|
||||||
- *admin_gpg_yuri
|
- path_regex: inventories/chaosknoten/host_vars/grafana.*
|
||||||
- *admin_gpg_june
|
|
||||||
- *admin_gpg_haegar
|
|
||||||
- *admin_gpg_dario
|
|
||||||
- *admin_gpg_echtnurich
|
|
||||||
- *admin_gpg_max
|
|
||||||
- *admin_gpg_c6ristian
|
|
||||||
- *admin_gpg_lilly
|
|
||||||
- *admin_gpg_langoor
|
|
||||||
- path_regex: resources/chaosknoten/netbox/.*
|
|
||||||
key_groups:
|
key_groups:
|
||||||
- pgp:
|
- pgp:
|
||||||
- *admin_gpg_djerun
|
*admin_gpg_keys
|
||||||
- *admin_gpg_stb
|
age:
|
||||||
- *admin_gpg_jtbx
|
- *host_grafana_ansible_pull_age_key
|
||||||
- *admin_gpg_yuri
|
- path_regex: inventories/chaosknoten/host_vars/pad.*
|
||||||
- *admin_gpg_june
|
|
||||||
- *admin_gpg_haegar
|
|
||||||
- *admin_gpg_dario
|
|
||||||
- *admin_gpg_echtnurich
|
|
||||||
- *admin_gpg_max
|
|
||||||
- *admin_gpg_c6ristian
|
|
||||||
- *admin_gpg_lilly
|
|
||||||
- *admin_gpg_langoor
|
|
||||||
- path_regex: resources/chaosknoten/tickets/.*
|
|
||||||
key_groups:
|
key_groups:
|
||||||
- pgp:
|
- pgp:
|
||||||
- *admin_gpg_djerun
|
*admin_gpg_keys
|
||||||
- *admin_gpg_stb
|
age:
|
||||||
- *admin_gpg_jtbx
|
- *host_pad_ansible_pull_age_key
|
||||||
- *admin_gpg_yuri
|
- path_regex: inventories/chaosknoten/host_vars/ccchoir.*
|
||||||
- *admin_gpg_june
|
|
||||||
- *admin_gpg_haegar
|
|
||||||
- *admin_gpg_dario
|
|
||||||
- *admin_gpg_echtnurich
|
|
||||||
- *admin_gpg_max
|
|
||||||
- *admin_gpg_c6ristian
|
|
||||||
- *admin_gpg_lilly
|
|
||||||
- *admin_gpg_langoor
|
|
||||||
- path_regex: resources/chaosknoten/onlyoffice/.*
|
|
||||||
key_groups:
|
key_groups:
|
||||||
- pgp:
|
- pgp:
|
||||||
- *admin_gpg_djerun
|
*admin_gpg_keys
|
||||||
- *admin_gpg_stb
|
age:
|
||||||
- *admin_gpg_jtbx
|
- *host_ccchoir_ansible_pull_age_key
|
||||||
- *admin_gpg_yuri
|
- path_regex: inventories/chaosknoten/host_vars/pretalx.*
|
||||||
- *admin_gpg_june
|
|
||||||
- *admin_gpg_haegar
|
|
||||||
- *admin_gpg_dario
|
|
||||||
- *admin_gpg_echtnurich
|
|
||||||
- *admin_gpg_max
|
|
||||||
- *admin_gpg_c6ristian
|
|
||||||
- *admin_gpg_lilly
|
|
||||||
- *admin_gpg_langoor
|
|
||||||
- path_regex: resources/chaosknoten/zammad/.*
|
|
||||||
key_groups:
|
key_groups:
|
||||||
- pgp:
|
- pgp:
|
||||||
- *admin_gpg_djerun
|
*admin_gpg_keys
|
||||||
- *admin_gpg_stb
|
age:
|
||||||
- *admin_gpg_jtbx
|
- *host_pretalx_ansible_pull_age_key
|
||||||
- *admin_gpg_yuri
|
- path_regex: inventories/chaosknoten/host_vars/netbox.*
|
||||||
- *admin_gpg_june
|
key_groups:
|
||||||
- *admin_gpg_haegar
|
- pgp:
|
||||||
- *admin_gpg_dario
|
*admin_gpg_keys
|
||||||
- *admin_gpg_echtnurich
|
age:
|
||||||
- *admin_gpg_max
|
- *host_netbox_ansible_pull_age_key
|
||||||
- *admin_gpg_c6ristian
|
- path_regex: inventories/chaosknoten/host_vars/tickets.*
|
||||||
- *admin_gpg_lilly
|
key_groups:
|
||||||
- *admin_gpg_langoor
|
- pgp:
|
||||||
|
*admin_gpg_keys
|
||||||
|
age:
|
||||||
|
- *host_tickets_ansible_pull_age_key
|
||||||
|
- path_regex: inventories/chaosknoten/host_vars/onlyoffice.*
|
||||||
|
key_groups:
|
||||||
|
- pgp:
|
||||||
|
*admin_gpg_keys
|
||||||
|
age:
|
||||||
|
- *host_onlyoffice_ansible_pull_age_key
|
||||||
|
- path_regex: inventories/chaosknoten/host_vars/zammad.*
|
||||||
|
key_groups:
|
||||||
|
- pgp:
|
||||||
|
*admin_gpg_keys
|
||||||
|
age:
|
||||||
|
- *host_zammad_ansible_pull_age_key
|
||||||
|
- path_regex: inventories/chaosknoten/host_vars/ntfy.*
|
||||||
|
key_groups:
|
||||||
|
- pgp:
|
||||||
|
*admin_gpg_keys
|
||||||
|
age:
|
||||||
|
- *host_ntfy_ansible_pull_age_key
|
||||||
|
- path_regex: inventories/chaosknoten/host_vars/eh22-wiki.*
|
||||||
|
key_groups:
|
||||||
|
- pgp:
|
||||||
|
*admin_gpg_keys
|
||||||
|
age:
|
||||||
|
- *host_eh22_wiki_ansible_pull_age_key
|
||||||
|
- path_regex: inventories/chaosknoten/host_vars/sunders.*
|
||||||
|
key_groups:
|
||||||
|
- pgp:
|
||||||
|
*admin_gpg_keys
|
||||||
|
age:
|
||||||
|
- *host_sunders_ansible_pull_age_key
|
||||||
|
- path_regex: inventories/chaosknoten/host_vars/wiki.*
|
||||||
|
key_groups:
|
||||||
|
- pgp:
|
||||||
|
*admin_gpg_keys
|
||||||
|
age:
|
||||||
|
- *host_wiki_ansible_pull_age_key
|
||||||
|
- path_regex: inventories/chaosknoten/host_vars/renovate.*
|
||||||
|
key_groups:
|
||||||
|
- pgp:
|
||||||
|
*admin_gpg_keys
|
||||||
|
age:
|
||||||
|
- *host_renovate_ansible_pull_age_key
|
||||||
|
- path_regex: inventories/chaosknoten/host_vars/lists.*
|
||||||
|
key_groups:
|
||||||
|
- pgp:
|
||||||
|
*admin_gpg_keys
|
||||||
|
age:
|
||||||
|
- *host_lists_ansible_pull_age_key
|
||||||
|
- path_regex: inventories/chaosknoten/host_vars/mumble.*
|
||||||
|
key_groups:
|
||||||
|
- pgp:
|
||||||
|
*admin_gpg_keys
|
||||||
|
age:
|
||||||
|
- *host_mumble_ansible_pull_age_key
|
||||||
|
- path_regex: inventories/chaosknoten/host_vars/public-reverse-proxy.*
|
||||||
|
key_groups:
|
||||||
|
- pgp:
|
||||||
|
*admin_gpg_keys
|
||||||
|
age:
|
||||||
|
- *host_public_reverse_proxy_ansible_pull_age_key
|
||||||
|
- path_regex: inventories/chaosknoten/host_vars/spaceapiccc.*
|
||||||
|
key_groups:
|
||||||
|
- pgp:
|
||||||
|
*admin_gpg_keys
|
||||||
|
age:
|
||||||
|
- *host_spaceapiccc_ansible_pull_age_key
|
||||||
|
# external hosts
|
||||||
|
- path_regex: inventories/external/host_vars/status.*
|
||||||
|
key_groups:
|
||||||
|
- pgp:
|
||||||
|
*admin_gpg_keys
|
||||||
|
age:
|
||||||
|
- *host_status_ansible_pull_age_key
|
||||||
|
# z9 hosts
|
||||||
|
- path_regex: inventories/z9/host_vars/dooris.*
|
||||||
|
key_groups:
|
||||||
|
- pgp:
|
||||||
|
*admin_gpg_keys
|
||||||
|
- path_regex: inventories/z9/host_vars/yate.*
|
||||||
|
key_groups:
|
||||||
|
- pgp:
|
||||||
|
*admin_gpg_keys
|
||||||
|
# general
|
||||||
- key_groups:
|
- key_groups:
|
||||||
- pgp:
|
- pgp:
|
||||||
- *admin_gpg_djerun
|
*admin_gpg_keys
|
||||||
- *admin_gpg_stb
|
|
||||||
- *admin_gpg_jtbx
|
|
||||||
- *admin_gpg_yuri
|
|
||||||
- *admin_gpg_june
|
|
||||||
- *admin_gpg_haegar
|
|
||||||
- *admin_gpg_dario
|
|
||||||
- *admin_gpg_echtnurich
|
|
||||||
- *admin_gpg_max
|
|
||||||
- *admin_gpg_c6ristian
|
|
||||||
- *admin_gpg_lilly
|
|
||||||
- *admin_gpg_langoor
|
|
||||||
stores:
|
stores:
|
||||||
yaml:
|
yaml:
|
||||||
indent: 2
|
indent: 2
|
||||||
|
|
|
||||||
17
README.md
17
README.md
|
|
@ -7,22 +7,25 @@ Folgende Geräte und Server werden duch dieses Ansible Repository verwaltet:
|
||||||
|
|
||||||
Host-spezifische Konfigurationsdateien liegen unter `resources/` und werden für jeweils über eine `host_vars`-Datei im Inventory geladen.
|
Host-spezifische Konfigurationsdateien liegen unter `resources/` und werden für jeweils über eine `host_vars`-Datei im Inventory geladen.
|
||||||
|
|
||||||
## Galaxy-Collections und -Rollen installieren
|
## Galaxy-Collections und -Rollen
|
||||||
|
|
||||||
Für einige Aspekte verwenden wir Rollen aus Ansible Galaxy. Die müssen zunächst installiert werden:
|
Für einige Aspekte verwenden wir Collections und Rollen aus Ansible Galaxy. Diese werden in [`ansible_collections`](./ansible_collections/) bzw. [`galaxy-roles`](./galaxy-roles/) hier im Repo vorgehalten.
|
||||||
|
|
||||||
|
Um unsere gevendorte Version zu aktualisieren, kann man folgendes machen:
|
||||||
```bash
|
```bash
|
||||||
ansible-galaxy install -r requirements.yml
|
ansible-galaxy install -r requirements.yml
|
||||||
|
ansible-galaxy role install -r requirements.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
## Secrets
|
## Secrets
|
||||||
|
|
||||||
Grundsätzlich sollten Secrets vermieden werden. (Also z.B.: Nutze SSH Keys statt Passwort.)
|
Generally try to avoid secrets (e.g. use SSH keys instead of passwords).
|
||||||
|
|
||||||
Da Secrets aber durchaus doch gebraucht werden, verwenden wir [SOPS](https://github.com/getsops/sops), um Secrets verschlüsselt in diesem Repo zu speichern.
|
Because secrets are nonetheless needed sometimes, we use [SOPS](https://github.com/getsops/sops) to securely store secrets in this repository.
|
||||||
SOPS verschlüsselt hier die Secrets nach den "creation rules", welche in der `sops.yaml` festgelegt sind.
|
SOPS encrypts secrets according to "creation rules" which are defined in the `.sops.yaml`.
|
||||||
Grundsätzlich werden hier alle Secrets für alle GPG-Keys aller Mitglieder des Infra-Teams verschlüsselt.
|
Generally all secrets get encrypted for all GPG-keys of all members of the infrastructure team.
|
||||||
Das eigentliche Laden der Secrets durch Ansible geschieht mit Hilfe des `community.sops.sops` lookup Plugins, welches entsprechend den lokalen GPG-Key benutzt, um die Secrets zu entschlüsseln.
|
Ansible then has access to the secrets with the help of the [`community.sops.sops` vars plugin](https://docs.ansible.com/ansible/latest/collections/community/sops/docsite/guide.html#working-with-encrypted-variables), which is configured in this repository.
|
||||||
|
A local Ansible run then uses the locally available GPG-key to decrypt the secrets.
|
||||||
|
|
||||||
For a tutorial on how to set up secrets using SOPS for a new host, see [Setting Up Secrets Using SOPS for a New Host](./docs/setting_up_secrets_using_sops_for_a_new_host.md).
|
For a tutorial on how to set up secrets using SOPS for a new host, see [Setting Up Secrets Using SOPS for a New Host](./docs/setting_up_secrets_using_sops_for_a_new_host.md).
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,6 @@
|
||||||
[defaults]
|
[defaults]
|
||||||
inventory = ./inventories/z9/hosts.yaml
|
inventory = ./inventories/z9/hosts.yaml
|
||||||
pipelining = True
|
pipelining = True
|
||||||
|
vars_plugins_enabled = host_group_vars,community.sops.sops
|
||||||
|
collections_path = ./
|
||||||
|
roles_path = ./galaxy-roles
|
||||||
|
|
|
||||||
30
ansible_collections/community/docker/.ansible-lint
Normal file
30
ansible_collections/community/docker/.ansible-lint
Normal file
|
|
@ -0,0 +1,30 @@
|
||||||
|
---
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
skip_list:
|
||||||
|
# Ignore rules that make no sense:
|
||||||
|
- galaxy[tags]
|
||||||
|
- galaxy[version-incorrect]
|
||||||
|
- meta-runtime[unsupported-version]
|
||||||
|
- no-changed-when
|
||||||
|
- sanity[cannot-ignore] # some of the rules you cannot ignore actually MUST be ignored, like yamllint:unparsable-with-libyaml
|
||||||
|
- yaml # we're using yamllint ourselves
|
||||||
|
- run-once[task] # wtf???
|
||||||
|
|
||||||
|
# To be checked and maybe fixed:
|
||||||
|
- ignore-errors
|
||||||
|
- key-order[task]
|
||||||
|
- name[casing]
|
||||||
|
- name[missing]
|
||||||
|
- name[play]
|
||||||
|
- name[template]
|
||||||
|
- no-free-form
|
||||||
|
- no-handler
|
||||||
|
- risky-file-permissions
|
||||||
|
- risky-shell-pipe
|
||||||
|
- var-naming[no-reserved]
|
||||||
|
- var-naming[no-role-prefix]
|
||||||
|
- var-naming[pattern]
|
||||||
|
- var-naming[read-only]
|
||||||
|
|
@ -0,0 +1,9 @@
|
||||||
|
<!--
|
||||||
|
Copyright (c) Ansible Project
|
||||||
|
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
-->
|
||||||
|
|
||||||
|
## Azure Pipelines Configuration
|
||||||
|
|
||||||
|
Please see the [Documentation](https://github.com/ansible/community/wiki/Testing:-Azure-Pipelines) for more information.
|
||||||
|
|
@ -0,0 +1,280 @@
|
||||||
|
---
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
trigger:
|
||||||
|
batch: true
|
||||||
|
branches:
|
||||||
|
include:
|
||||||
|
- main
|
||||||
|
- stable-*
|
||||||
|
|
||||||
|
pr:
|
||||||
|
autoCancel: true
|
||||||
|
branches:
|
||||||
|
include:
|
||||||
|
- main
|
||||||
|
- stable-*
|
||||||
|
|
||||||
|
schedules:
|
||||||
|
- cron: 0 9 * * *
|
||||||
|
displayName: Nightly
|
||||||
|
always: true
|
||||||
|
branches:
|
||||||
|
include:
|
||||||
|
- main
|
||||||
|
- cron: 0 12 * * 0
|
||||||
|
displayName: Weekly (old stable branches)
|
||||||
|
always: true
|
||||||
|
branches:
|
||||||
|
include:
|
||||||
|
- stable-4
|
||||||
|
|
||||||
|
variables:
|
||||||
|
- name: checkoutPath
|
||||||
|
value: ansible_collections/community/docker
|
||||||
|
- name: coverageBranches
|
||||||
|
value: main
|
||||||
|
- name: entryPoint
|
||||||
|
value: tests/utils/shippable/shippable.sh
|
||||||
|
- name: fetchDepth
|
||||||
|
value: 0
|
||||||
|
|
||||||
|
resources:
|
||||||
|
containers:
|
||||||
|
- container: default
|
||||||
|
image: quay.io/ansible/azure-pipelines-test-container:7.0.0
|
||||||
|
|
||||||
|
pool: Standard
|
||||||
|
|
||||||
|
stages:
|
||||||
|
|
||||||
|
### Sanity & units
|
||||||
|
- stage: Ansible_devel
|
||||||
|
displayName: Sanity & Units devel
|
||||||
|
dependsOn: []
|
||||||
|
jobs:
|
||||||
|
- template: templates/matrix.yml
|
||||||
|
parameters:
|
||||||
|
targets:
|
||||||
|
- name: Sanity
|
||||||
|
test: 'devel/sanity/1'
|
||||||
|
- name: Units
|
||||||
|
test: 'devel/units/1'
|
||||||
|
- stage: Ansible_2_20
|
||||||
|
displayName: Sanity & Units 2.20
|
||||||
|
dependsOn: []
|
||||||
|
jobs:
|
||||||
|
- template: templates/matrix.yml
|
||||||
|
parameters:
|
||||||
|
targets:
|
||||||
|
- name: Sanity
|
||||||
|
test: '2.20/sanity/1'
|
||||||
|
- name: Units
|
||||||
|
test: '2.20/units/1'
|
||||||
|
- stage: Ansible_2_19
|
||||||
|
displayName: Sanity & Units 2.19
|
||||||
|
dependsOn: []
|
||||||
|
jobs:
|
||||||
|
- template: templates/matrix.yml
|
||||||
|
parameters:
|
||||||
|
targets:
|
||||||
|
- name: Sanity
|
||||||
|
test: '2.19/sanity/1'
|
||||||
|
- name: Units
|
||||||
|
test: '2.19/units/1'
|
||||||
|
- stage: Ansible_2_18
|
||||||
|
displayName: Sanity & Units 2.18
|
||||||
|
dependsOn: []
|
||||||
|
jobs:
|
||||||
|
- template: templates/matrix.yml
|
||||||
|
parameters:
|
||||||
|
targets:
|
||||||
|
- name: Sanity
|
||||||
|
test: '2.18/sanity/1'
|
||||||
|
- name: Units
|
||||||
|
test: '2.18/units/1'
|
||||||
|
|
||||||
|
### Docker
|
||||||
|
- stage: Docker_devel
|
||||||
|
displayName: Docker devel
|
||||||
|
dependsOn: []
|
||||||
|
jobs:
|
||||||
|
- template: templates/matrix.yml
|
||||||
|
parameters:
|
||||||
|
testFormat: devel/linux/{0}
|
||||||
|
targets:
|
||||||
|
- name: Fedora 42
|
||||||
|
test: fedora42
|
||||||
|
- name: Ubuntu 22.04
|
||||||
|
test: ubuntu2204
|
||||||
|
- name: Ubuntu 24.04
|
||||||
|
test: ubuntu2404
|
||||||
|
- name: Alpine 3.22
|
||||||
|
test: alpine322
|
||||||
|
groups:
|
||||||
|
- 4
|
||||||
|
- 5
|
||||||
|
- stage: Docker_2_20
|
||||||
|
displayName: Docker 2.20
|
||||||
|
dependsOn: []
|
||||||
|
jobs:
|
||||||
|
- template: templates/matrix.yml
|
||||||
|
parameters:
|
||||||
|
testFormat: 2.20/linux/{0}
|
||||||
|
targets:
|
||||||
|
- name: Fedora 42
|
||||||
|
test: fedora42
|
||||||
|
- name: Alpine 3.22
|
||||||
|
test: alpine322
|
||||||
|
groups:
|
||||||
|
- 4
|
||||||
|
- 5
|
||||||
|
- stage: Docker_2_19
|
||||||
|
displayName: Docker 2.19
|
||||||
|
dependsOn: []
|
||||||
|
jobs:
|
||||||
|
- template: templates/matrix.yml
|
||||||
|
parameters:
|
||||||
|
testFormat: 2.19/linux/{0}
|
||||||
|
targets:
|
||||||
|
- name: Fedora 41
|
||||||
|
test: fedora41
|
||||||
|
- name: Alpine 3.21
|
||||||
|
test: alpine321
|
||||||
|
groups:
|
||||||
|
- 4
|
||||||
|
- 5
|
||||||
|
- stage: Docker_2_18
|
||||||
|
displayName: Docker 2.18
|
||||||
|
dependsOn: []
|
||||||
|
jobs:
|
||||||
|
- template: templates/matrix.yml
|
||||||
|
parameters:
|
||||||
|
testFormat: 2.18/linux/{0}
|
||||||
|
targets:
|
||||||
|
- name: Fedora 40
|
||||||
|
test: fedora40
|
||||||
|
- name: Ubuntu 22.04
|
||||||
|
test: ubuntu2204
|
||||||
|
- name: Alpine 3.20
|
||||||
|
test: alpine320
|
||||||
|
groups:
|
||||||
|
- 4
|
||||||
|
- 5
|
||||||
|
|
||||||
|
### Community Docker
|
||||||
|
- stage: Docker_community_devel
|
||||||
|
displayName: Docker (community images) devel
|
||||||
|
dependsOn: []
|
||||||
|
jobs:
|
||||||
|
- template: templates/matrix.yml
|
||||||
|
parameters:
|
||||||
|
testFormat: devel/linux-community/{0}
|
||||||
|
targets:
|
||||||
|
- name: Debian 11 Bullseye
|
||||||
|
test: debian-bullseye/3.9
|
||||||
|
- name: Debian 12 Bookworm
|
||||||
|
test: debian-bookworm/3.11
|
||||||
|
- name: Debian 13 Trixie
|
||||||
|
test: debian-13-trixie/3.13
|
||||||
|
- name: ArchLinux
|
||||||
|
test: archlinux/3.13
|
||||||
|
groups:
|
||||||
|
- 4
|
||||||
|
- 5
|
||||||
|
|
||||||
|
### Remote
|
||||||
|
- stage: Remote_devel
|
||||||
|
displayName: Remote devel
|
||||||
|
dependsOn: []
|
||||||
|
jobs:
|
||||||
|
- template: templates/matrix.yml
|
||||||
|
parameters:
|
||||||
|
testFormat: devel/{0}
|
||||||
|
targets:
|
||||||
|
- name: RHEL 10.0
|
||||||
|
test: rhel/10.0
|
||||||
|
- name: RHEL 9.6 with Docker SDK, urllib3, requests from sources
|
||||||
|
test: rhel/9.6-dev-latest
|
||||||
|
# For some reason, Ubuntu 24.04 is *extremely* slower than RHEL 9.6
|
||||||
|
# - name: Ubuntu 24.04
|
||||||
|
# test: ubuntu/24.04
|
||||||
|
groups:
|
||||||
|
- 1
|
||||||
|
- 2
|
||||||
|
- 3
|
||||||
|
- 4
|
||||||
|
- 5
|
||||||
|
- stage: Remote_2_20
|
||||||
|
displayName: Remote 2.20
|
||||||
|
dependsOn: []
|
||||||
|
jobs:
|
||||||
|
- template: templates/matrix.yml
|
||||||
|
parameters:
|
||||||
|
testFormat: 2.20/{0}
|
||||||
|
targets:
|
||||||
|
- name: RHEL 9.6
|
||||||
|
test: rhel/9.6
|
||||||
|
groups:
|
||||||
|
- 1
|
||||||
|
- 2
|
||||||
|
- 3
|
||||||
|
- 4
|
||||||
|
- 5
|
||||||
|
- stage: Remote_2_19
|
||||||
|
displayName: Remote 2.19
|
||||||
|
dependsOn: []
|
||||||
|
jobs:
|
||||||
|
- template: templates/matrix.yml
|
||||||
|
parameters:
|
||||||
|
testFormat: 2.19/{0}
|
||||||
|
targets:
|
||||||
|
- name: RHEL 9.5
|
||||||
|
test: rhel/9.5
|
||||||
|
- name: Ubuntu 22.04
|
||||||
|
test: ubuntu/22.04
|
||||||
|
groups:
|
||||||
|
- 1
|
||||||
|
- 2
|
||||||
|
- 3
|
||||||
|
- 4
|
||||||
|
- 5
|
||||||
|
- stage: Remote_2_18
|
||||||
|
displayName: Remote 2.18
|
||||||
|
dependsOn: []
|
||||||
|
jobs:
|
||||||
|
- template: templates/matrix.yml
|
||||||
|
parameters:
|
||||||
|
testFormat: 2.18/{0}
|
||||||
|
targets:
|
||||||
|
- name: RHEL 9.4
|
||||||
|
test: rhel/9.4
|
||||||
|
groups:
|
||||||
|
- 1
|
||||||
|
- 2
|
||||||
|
- 3
|
||||||
|
- 4
|
||||||
|
- 5
|
||||||
|
|
||||||
|
## Finally
|
||||||
|
|
||||||
|
- stage: Summary
|
||||||
|
condition: succeededOrFailed()
|
||||||
|
dependsOn:
|
||||||
|
- Ansible_devel
|
||||||
|
- Ansible_2_20
|
||||||
|
- Ansible_2_19
|
||||||
|
- Ansible_2_18
|
||||||
|
- Remote_devel
|
||||||
|
- Remote_2_20
|
||||||
|
- Remote_2_19
|
||||||
|
- Remote_2_18
|
||||||
|
- Docker_devel
|
||||||
|
- Docker_2_20
|
||||||
|
- Docker_2_19
|
||||||
|
- Docker_2_18
|
||||||
|
- Docker_community_devel
|
||||||
|
jobs:
|
||||||
|
- template: templates/coverage.yml
|
||||||
|
|
@ -0,0 +1,28 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# Aggregate code coverage results for later processing.
|
||||||
|
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
set -o pipefail -eu
|
||||||
|
|
||||||
|
agent_temp_directory="$1"
|
||||||
|
|
||||||
|
PATH="${PWD}/bin:${PATH}"
|
||||||
|
|
||||||
|
mkdir "${agent_temp_directory}/coverage/"
|
||||||
|
|
||||||
|
if [[ "$(ansible --version)" =~ \ 2\.9\. ]]; then
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
options=(--venv --venv-system-site-packages --color -v)
|
||||||
|
|
||||||
|
ansible-test coverage combine --group-by command --export "${agent_temp_directory}/coverage/" "${options[@]}"
|
||||||
|
|
||||||
|
if ansible-test coverage analyze targets generate --help >/dev/null 2>&1; then
|
||||||
|
# Only analyze coverage if the installed version of ansible-test supports it.
|
||||||
|
# Doing so allows this script to work unmodified for multiple Ansible versions.
|
||||||
|
ansible-test coverage analyze targets generate "${agent_temp_directory}/coverage/coverage-analyze-targets.json" "${options[@]}"
|
||||||
|
fi
|
||||||
|
|
@ -0,0 +1,64 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
"""
|
||||||
|
Combine coverage data from multiple jobs, keeping the data only from the most recent attempt from each job.
|
||||||
|
Coverage artifacts must be named using the format: "Coverage $(System.JobAttempt) {StableUniqueNameForEachJob}"
|
||||||
|
The recommended coverage artifact name format is: Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)
|
||||||
|
Keep in mind that Azure Pipelines does not enforce unique job display names (only names).
|
||||||
|
It is up to pipeline authors to avoid name collisions when deviating from the recommended format.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import shutil
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Main program entry point."""
|
||||||
|
source_directory = sys.argv[1]
|
||||||
|
|
||||||
|
if '/ansible_collections/' in os.getcwd():
|
||||||
|
output_path = "tests/output"
|
||||||
|
else:
|
||||||
|
output_path = "test/results"
|
||||||
|
|
||||||
|
destination_directory = os.path.join(output_path, 'coverage')
|
||||||
|
|
||||||
|
if not os.path.exists(destination_directory):
|
||||||
|
os.makedirs(destination_directory)
|
||||||
|
|
||||||
|
jobs = {}
|
||||||
|
count = 0
|
||||||
|
|
||||||
|
for name in os.listdir(source_directory):
|
||||||
|
match = re.search('^Coverage (?P<attempt>[0-9]+) (?P<label>.+)$', name)
|
||||||
|
label = match.group('label')
|
||||||
|
attempt = int(match.group('attempt'))
|
||||||
|
jobs[label] = max(attempt, jobs.get(label, 0))
|
||||||
|
|
||||||
|
for label, attempt in jobs.items():
|
||||||
|
name = 'Coverage {attempt} {label}'.format(label=label, attempt=attempt)
|
||||||
|
source = os.path.join(source_directory, name)
|
||||||
|
source_files = os.listdir(source)
|
||||||
|
|
||||||
|
for source_file in source_files:
|
||||||
|
source_path = os.path.join(source, source_file)
|
||||||
|
destination_path = os.path.join(destination_directory, source_file + '.' + label)
|
||||||
|
print('"%s" -> "%s"' % (source_path, destination_path))
|
||||||
|
shutil.copyfile(source_path, destination_path)
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
print('Coverage file count: %d' % count)
|
||||||
|
print('##vso[task.setVariable variable=coverageFileCount]%d' % count)
|
||||||
|
print('##vso[task.setVariable variable=outputPath]%s' % output_path)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|
@ -0,0 +1,28 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# Check the test results and set variables for use in later steps.
|
||||||
|
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
set -o pipefail -eu
|
||||||
|
|
||||||
|
if [[ "$PWD" =~ /ansible_collections/ ]]; then
|
||||||
|
output_path="tests/output"
|
||||||
|
else
|
||||||
|
output_path="test/results"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "##vso[task.setVariable variable=outputPath]${output_path}"
|
||||||
|
|
||||||
|
if compgen -G "${output_path}"'/junit/*.xml' > /dev/null; then
|
||||||
|
echo "##vso[task.setVariable variable=haveTestResults]true"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if compgen -G "${output_path}"'/bot/ansible-test-*' > /dev/null; then
|
||||||
|
echo "##vso[task.setVariable variable=haveBotResults]true"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if compgen -G "${output_path}"'/coverage/*' > /dev/null; then
|
||||||
|
echo "##vso[task.setVariable variable=haveCoverageData]true"
|
||||||
|
fi
|
||||||
105
ansible_collections/community/docker/.azure-pipelines/scripts/publish-codecov.py
Executable file
105
ansible_collections/community/docker/.azure-pipelines/scripts/publish-codecov.py
Executable file
|
|
@ -0,0 +1,105 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
"""
|
||||||
|
Upload code coverage reports to codecov.io.
|
||||||
|
Multiple coverage files from multiple languages are accepted and aggregated after upload.
|
||||||
|
Python coverage, as well as PowerShell and Python stubs can all be uploaded.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import dataclasses
|
||||||
|
import pathlib
|
||||||
|
import shutil
|
||||||
|
import subprocess
|
||||||
|
import tempfile
|
||||||
|
import typing as t
|
||||||
|
import urllib.request
|
||||||
|
|
||||||
|
|
||||||
|
@dataclasses.dataclass(frozen=True)
|
||||||
|
class CoverageFile:
|
||||||
|
name: str
|
||||||
|
path: pathlib.Path
|
||||||
|
flags: t.List[str]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclasses.dataclass(frozen=True)
|
||||||
|
class Args:
|
||||||
|
dry_run: bool
|
||||||
|
path: pathlib.Path
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args() -> Args:
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument('-n', '--dry-run', action='store_true')
|
||||||
|
parser.add_argument('path', type=pathlib.Path)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Store arguments in a typed dataclass
|
||||||
|
fields = dataclasses.fields(Args)
|
||||||
|
kwargs = {field.name: getattr(args, field.name) for field in fields}
|
||||||
|
|
||||||
|
return Args(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def process_files(directory: pathlib.Path) -> t.Tuple[CoverageFile, ...]:
|
||||||
|
processed = []
|
||||||
|
for file in directory.joinpath('reports').glob('coverage*.xml'):
|
||||||
|
name = file.stem.replace('coverage=', '')
|
||||||
|
|
||||||
|
# Get flags from name
|
||||||
|
flags = name.replace('-powershell', '').split('=') # Drop '-powershell' suffix
|
||||||
|
flags = [flag if not flag.startswith('stub') else flag.split('-')[0] for flag in flags] # Remove "-01" from stub files
|
||||||
|
|
||||||
|
processed.append(CoverageFile(name, file, flags))
|
||||||
|
|
||||||
|
return tuple(processed)
|
||||||
|
|
||||||
|
|
||||||
|
def upload_files(codecov_bin: pathlib.Path, files: t.Tuple[CoverageFile, ...], dry_run: bool = False) -> None:
|
||||||
|
for file in files:
|
||||||
|
cmd = [
|
||||||
|
str(codecov_bin),
|
||||||
|
'--name', file.name,
|
||||||
|
'--file', str(file.path),
|
||||||
|
]
|
||||||
|
for flag in file.flags:
|
||||||
|
cmd.extend(['--flags', flag])
|
||||||
|
|
||||||
|
if dry_run:
|
||||||
|
print(f'DRY-RUN: Would run command: {cmd}')
|
||||||
|
continue
|
||||||
|
|
||||||
|
subprocess.run(cmd, check=True)
|
||||||
|
|
||||||
|
|
||||||
|
def download_file(url: str, dest: pathlib.Path, flags: int, dry_run: bool = False) -> None:
|
||||||
|
if dry_run:
|
||||||
|
print(f'DRY-RUN: Would download {url} to {dest} and set mode to {flags:o}')
|
||||||
|
return
|
||||||
|
|
||||||
|
with urllib.request.urlopen(url) as resp:
|
||||||
|
with dest.open('w+b') as f:
|
||||||
|
# Read data in chunks rather than all at once
|
||||||
|
shutil.copyfileobj(resp, f, 64 * 1024)
|
||||||
|
|
||||||
|
dest.chmod(flags)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
args = parse_args()
|
||||||
|
url = 'https://ansible-ci-files.s3.amazonaws.com/codecov/linux/codecov'
|
||||||
|
with tempfile.TemporaryDirectory(prefix='codecov-') as tmpdir:
|
||||||
|
codecov_bin = pathlib.Path(tmpdir) / 'codecov'
|
||||||
|
download_file(url, codecov_bin, 0o755, args.dry_run)
|
||||||
|
|
||||||
|
files = process_files(args.path)
|
||||||
|
upload_files(codecov_bin, files, args.dry_run)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|
@ -0,0 +1,23 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# Generate code coverage reports for uploading to Azure Pipelines and codecov.io.
|
||||||
|
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
set -o pipefail -eu
|
||||||
|
|
||||||
|
PATH="${PWD}/bin:${PATH}"
|
||||||
|
|
||||||
|
if [[ "$(ansible --version)" =~ \ 2\.9\. ]]; then
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! ansible-test --help >/dev/null 2>&1; then
|
||||||
|
# Install the devel version of ansible-test for generating code coverage reports.
|
||||||
|
# This is only used by Ansible Collections, which are typically tested against multiple Ansible versions (in separate jobs).
|
||||||
|
# Since a version of ansible-test is required that can work the output from multiple older releases, the devel version is used.
|
||||||
|
pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
|
||||||
|
fi
|
||||||
|
|
||||||
|
ansible-test coverage xml --group-by command --stub --venv --venv-system-site-packages --color -v
|
||||||
38
ansible_collections/community/docker/.azure-pipelines/scripts/run-tests.sh
Executable file
38
ansible_collections/community/docker/.azure-pipelines/scripts/run-tests.sh
Executable file
|
|
@ -0,0 +1,38 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# Configure the test environment and run the tests.
|
||||||
|
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
set -o pipefail -eu
|
||||||
|
|
||||||
|
entry_point="$1"
|
||||||
|
test="$2"
|
||||||
|
read -r -a coverage_branches <<< "$3" # space separated list of branches to run code coverage on for scheduled builds
|
||||||
|
|
||||||
|
export COMMIT_MESSAGE
|
||||||
|
export COMPLETE
|
||||||
|
export COVERAGE
|
||||||
|
export IS_PULL_REQUEST
|
||||||
|
|
||||||
|
if [ "${SYSTEM_PULLREQUEST_TARGETBRANCH:-}" ]; then
|
||||||
|
IS_PULL_REQUEST=true
|
||||||
|
COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD^2)
|
||||||
|
else
|
||||||
|
IS_PULL_REQUEST=
|
||||||
|
COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD)
|
||||||
|
fi
|
||||||
|
|
||||||
|
COMPLETE=
|
||||||
|
COVERAGE=
|
||||||
|
|
||||||
|
if [ "${BUILD_REASON}" = "Schedule" ]; then
|
||||||
|
COMPLETE=yes
|
||||||
|
|
||||||
|
if printf '%s\n' "${coverage_branches[@]}" | grep -q "^${BUILD_SOURCEBRANCHNAME}$"; then
|
||||||
|
COVERAGE=yes
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
"${entry_point}" "${test}" 2>&1 | "$(dirname "$0")/time-command.py"
|
||||||
|
|
@ -0,0 +1,29 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
"""Prepends a relative timestamp to each input line from stdin and writes it to stdout."""
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division, print_function)
|
||||||
|
__metaclass__ = type
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Main program entry point."""
|
||||||
|
start = time.time()
|
||||||
|
|
||||||
|
sys.stdin.reconfigure(errors='surrogateescape')
|
||||||
|
sys.stdout.reconfigure(errors='surrogateescape')
|
||||||
|
|
||||||
|
for line in sys.stdin:
|
||||||
|
seconds = time.time() - start
|
||||||
|
sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line))
|
||||||
|
sys.stdout.flush()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|
@ -0,0 +1,34 @@
|
||||||
|
---
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
# This template adds a job for processing code coverage data.
|
||||||
|
# It will upload results to Azure Pipelines and codecov.io.
|
||||||
|
# Use it from a job stage that completes after all other jobs have completed.
|
||||||
|
# This can be done by placing it in a separate summary stage that runs after the test stage(s) have completed.
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
- job: Coverage
|
||||||
|
displayName: Code Coverage
|
||||||
|
container: default
|
||||||
|
workspace:
|
||||||
|
clean: all
|
||||||
|
steps:
|
||||||
|
- checkout: self
|
||||||
|
fetchDepth: $(fetchDepth)
|
||||||
|
path: $(checkoutPath)
|
||||||
|
- task: DownloadPipelineArtifact@2
|
||||||
|
displayName: Download Coverage Data
|
||||||
|
inputs:
|
||||||
|
path: coverage/
|
||||||
|
patterns: "Coverage */*=coverage.combined"
|
||||||
|
- bash: .azure-pipelines/scripts/combine-coverage.py coverage/
|
||||||
|
displayName: Combine Coverage Data
|
||||||
|
- bash: .azure-pipelines/scripts/report-coverage.sh
|
||||||
|
displayName: Generate Coverage Report
|
||||||
|
condition: gt(variables.coverageFileCount, 0)
|
||||||
|
- bash: .azure-pipelines/scripts/publish-codecov.py "$(outputPath)"
|
||||||
|
displayName: Publish to codecov.io
|
||||||
|
condition: gt(variables.coverageFileCount, 0)
|
||||||
|
continueOnError: true
|
||||||
|
|
@ -0,0 +1,60 @@
|
||||||
|
---
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
# This template uses the provided targets and optional groups to generate a matrix which is then passed to the test template.
|
||||||
|
# If this matrix template does not provide the required functionality, consider using the test template directly instead.
|
||||||
|
|
||||||
|
parameters:
|
||||||
|
# A required list of dictionaries, one per test target.
|
||||||
|
# Each item in the list must contain a "test" or "name" key.
|
||||||
|
# Both may be provided. If one is omitted, the other will be used.
|
||||||
|
- name: targets
|
||||||
|
type: object
|
||||||
|
|
||||||
|
# An optional list of values which will be used to multiply the targets list into a matrix.
|
||||||
|
# Values can be strings or numbers.
|
||||||
|
- name: groups
|
||||||
|
type: object
|
||||||
|
default: []
|
||||||
|
|
||||||
|
# An optional format string used to generate the job name.
|
||||||
|
# - {0} is the name of an item in the targets list.
|
||||||
|
- name: nameFormat
|
||||||
|
type: string
|
||||||
|
default: "{0}"
|
||||||
|
|
||||||
|
# An optional format string used to generate the test name.
|
||||||
|
# - {0} is the name of an item in the targets list.
|
||||||
|
- name: testFormat
|
||||||
|
type: string
|
||||||
|
default: "{0}"
|
||||||
|
|
||||||
|
# An optional format string used to add the group to the job name.
|
||||||
|
# {0} is the formatted name of an item in the targets list.
|
||||||
|
# {{1}} is the group -- be sure to include the double "{{" and "}}".
|
||||||
|
- name: nameGroupFormat
|
||||||
|
type: string
|
||||||
|
default: "{0} - {{1}}"
|
||||||
|
|
||||||
|
# An optional format string used to add the group to the test name.
|
||||||
|
# {0} is the formatted test of an item in the targets list.
|
||||||
|
# {{1}} is the group -- be sure to include the double "{{" and "}}".
|
||||||
|
- name: testGroupFormat
|
||||||
|
type: string
|
||||||
|
default: "{0}/{{1}}"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
- template: test.yml
|
||||||
|
parameters:
|
||||||
|
jobs:
|
||||||
|
- ${{ if eq(length(parameters.groups), 0) }}:
|
||||||
|
- ${{ each target in parameters.targets }}:
|
||||||
|
- name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }}
|
||||||
|
test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }}
|
||||||
|
- ${{ if not(eq(length(parameters.groups), 0)) }}:
|
||||||
|
- ${{ each group in parameters.groups }}:
|
||||||
|
- ${{ each target in parameters.targets }}:
|
||||||
|
- name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }}
|
||||||
|
test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }}
|
||||||
|
|
@ -0,0 +1,50 @@
|
||||||
|
---
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
# This template uses the provided list of jobs to create test one or more test jobs.
|
||||||
|
# It can be used directly if needed, or through the matrix template.
|
||||||
|
|
||||||
|
parameters:
|
||||||
|
# A required list of dictionaries, one per test job.
|
||||||
|
# Each item in the list must contain a "job" and "name" key.
|
||||||
|
- name: jobs
|
||||||
|
type: object
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
- ${{ each job in parameters.jobs }}:
|
||||||
|
- job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }}
|
||||||
|
displayName: ${{ job.name }}
|
||||||
|
container: default
|
||||||
|
workspace:
|
||||||
|
clean: all
|
||||||
|
steps:
|
||||||
|
- checkout: self
|
||||||
|
fetchDepth: $(fetchDepth)
|
||||||
|
path: $(checkoutPath)
|
||||||
|
- bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)"
|
||||||
|
displayName: Run Tests
|
||||||
|
- bash: .azure-pipelines/scripts/process-results.sh
|
||||||
|
condition: succeededOrFailed()
|
||||||
|
displayName: Process Results
|
||||||
|
- bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)"
|
||||||
|
condition: eq(variables.haveCoverageData, 'true')
|
||||||
|
displayName: Aggregate Coverage Data
|
||||||
|
- task: PublishTestResults@2
|
||||||
|
condition: eq(variables.haveTestResults, 'true')
|
||||||
|
inputs:
|
||||||
|
testResultsFiles: "$(outputPath)/junit/*.xml"
|
||||||
|
displayName: Publish Test Results
|
||||||
|
- task: PublishPipelineArtifact@1
|
||||||
|
condition: eq(variables.haveBotResults, 'true')
|
||||||
|
displayName: Publish Bot Results
|
||||||
|
inputs:
|
||||||
|
targetPath: "$(outputPath)/bot/"
|
||||||
|
artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
|
||||||
|
- task: PublishPipelineArtifact@1
|
||||||
|
condition: eq(variables.haveCoverageData, 'true')
|
||||||
|
displayName: Publish Coverage Data
|
||||||
|
inputs:
|
||||||
|
targetPath: "$(Agent.TempDirectory)/coverage/"
|
||||||
|
artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
|
||||||
13
ansible_collections/community/docker/.flake8
Normal file
13
ansible_collections/community/docker/.flake8
Normal file
|
|
@ -0,0 +1,13 @@
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
# SPDX-FileCopyrightText: 2025 Felix Fontein <felix@fontein.de>
|
||||||
|
|
||||||
|
[flake8]
|
||||||
|
extend-ignore = E203, E402, F401
|
||||||
|
count = true
|
||||||
|
# TODO: decrease this to ~10
|
||||||
|
max-complexity = 60
|
||||||
|
# black's max-line-length is 89, but it doesn't touch long string literals.
|
||||||
|
# Since ansible-test's limit is 160, let's use that here.
|
||||||
|
max-line-length = 160
|
||||||
|
statistics = true
|
||||||
10
ansible_collections/community/docker/.git-blame-ignore-revs
Normal file
10
ansible_collections/community/docker/.git-blame-ignore-revs
Normal file
|
|
@ -0,0 +1,10 @@
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
# Reformat YAML: https://github.com/ansible-collections/community.docker/pull/1071
|
||||||
|
2487d1a0bf4f2c79d3ab5a9e7d0f969432bf32a2
|
||||||
|
# Reformat with black and isort
|
||||||
|
d65d37e9e9a78e03a35643704b413121515ee39c
|
||||||
|
# Reformat with ruff check --fix instead of isort
|
||||||
|
712d920941d8e95d2826e0dbdc5f02914671d02a
|
||||||
15
ansible_collections/community/docker/.github/dependabot.yml
vendored
Normal file
15
ansible_collections/community/docker/.github/dependabot.yml
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
||||||
|
---
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
version: 2
|
||||||
|
updates:
|
||||||
|
- package-ecosystem: "github-actions"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "weekly"
|
||||||
|
groups:
|
||||||
|
ci:
|
||||||
|
patterns:
|
||||||
|
- "*"
|
||||||
9
ansible_collections/community/docker/.github/patchback.yml
vendored
Normal file
9
ansible_collections/community/docker/.github/patchback.yml
vendored
Normal file
|
|
@ -0,0 +1,9 @@
|
||||||
|
---
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
backport_branch_prefix: patchback/backports/
|
||||||
|
backport_label_prefix: backport-
|
||||||
|
target_branch_prefix: stable-
|
||||||
|
...
|
||||||
90
ansible_collections/community/docker/.github/workflows/docker-images.yml
vendored
Normal file
90
ansible_collections/community/docker/.github/workflows/docker-images.yml
vendored
Normal file
|
|
@ -0,0 +1,90 @@
|
||||||
|
---
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
name: Helper Docker images for testing
|
||||||
|
'on':
|
||||||
|
# Run CI against all pushes (direct commits, also merged PRs), Pull Requests
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
paths:
|
||||||
|
- .github/workflows/docker-images.yml
|
||||||
|
- tests/images/**
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
paths:
|
||||||
|
- .github/workflows/docker-images.yml
|
||||||
|
- tests/images/**
|
||||||
|
# Run CI once per day (at 03:00 UTC)
|
||||||
|
schedule:
|
||||||
|
- cron: '0 3 * * *'
|
||||||
|
|
||||||
|
env:
|
||||||
|
CONTAINER_REGISTRY: ghcr.io/ansible-collections
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
name: Build image ${{ matrix.name }}:${{ matrix.tag }}
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- name: simple-1
|
||||||
|
tag: tag
|
||||||
|
tag-as-latest: true
|
||||||
|
- name: simple-2
|
||||||
|
tag: tag
|
||||||
|
tag-as-latest: true
|
||||||
|
- name: healthcheck
|
||||||
|
tag: check
|
||||||
|
tag-as-latest: true
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Check out repository
|
||||||
|
uses: actions/checkout@v6
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt-get install podman buildah
|
||||||
|
|
||||||
|
- name: Set up Go 1.22
|
||||||
|
uses: actions/setup-go@v6
|
||||||
|
with:
|
||||||
|
go-version: '1.22'
|
||||||
|
cache: false # true (default) results in warnings since we don't use Go modules
|
||||||
|
|
||||||
|
- name: Build ${{ matrix.name }} image
|
||||||
|
run: |
|
||||||
|
./build.sh "${CONTAINER_REGISTRY}/${{ matrix.name }}:${{ matrix.tag }}"
|
||||||
|
working-directory: tests/images/${{ matrix.name }}
|
||||||
|
|
||||||
|
- name: Tag image as latest
|
||||||
|
if: matrix.tag-as-latest && matrix.tag != 'latest'
|
||||||
|
run: |
|
||||||
|
podman tag "${CONTAINER_REGISTRY}/${{ matrix.name }}:${{ matrix.tag }}" "${CONTAINER_REGISTRY}/${{ matrix.name }}:latest"
|
||||||
|
|
||||||
|
- name: Publish container image ${{ env.CONTAINER_REGISTRY }}/${{ matrix.name }}:${{ matrix.tag }}
|
||||||
|
if: github.event_name != 'pull_request'
|
||||||
|
uses: redhat-actions/push-to-registry@v2
|
||||||
|
with:
|
||||||
|
registry: ${{ env.CONTAINER_REGISTRY }}
|
||||||
|
image: ${{ matrix.name }}
|
||||||
|
tags: ${{ matrix.tag }}
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Publish container image ${{ env.CONTAINER_REGISTRY }}/${{ matrix.name }}:latest
|
||||||
|
if: github.event_name != 'pull_request' && matrix.tag-as-latest && matrix.tag != 'latest'
|
||||||
|
uses: redhat-actions/push-to-registry@v2
|
||||||
|
with:
|
||||||
|
registry: ${{ env.CONTAINER_REGISTRY }}
|
||||||
|
image: ${{ matrix.name }}
|
||||||
|
tags: latest
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
96
ansible_collections/community/docker/.github/workflows/docs-pr.yml
vendored
Normal file
96
ansible_collections/community/docker/.github/workflows/docs-pr.yml
vendored
Normal file
|
|
@ -0,0 +1,96 @@
|
||||||
|
---
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
name: Collection Docs
|
||||||
|
concurrency:
|
||||||
|
group: docs-pr-${{ github.head_ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
'on':
|
||||||
|
pull_request_target:
|
||||||
|
types: [opened, synchronize, reopened, closed]
|
||||||
|
|
||||||
|
env:
|
||||||
|
GHP_BASE_URL: https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-docs:
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
name: Build Ansible Docs
|
||||||
|
uses: ansible-community/github-docs-build/.github/workflows/_shared-docs-build-pr.yml@main
|
||||||
|
with:
|
||||||
|
collection-name: community.docker
|
||||||
|
init-lenient: false
|
||||||
|
init-fail-on-error: true
|
||||||
|
squash-hierarchy: true
|
||||||
|
init-project: Community.Docker Collection
|
||||||
|
init-copyright: Community.Docker Contributors
|
||||||
|
init-title: Community.Docker Collection Documentation
|
||||||
|
init-html-short-title: Community.Docker Collection Docs
|
||||||
|
init-extra-html-theme-options: |
|
||||||
|
documentation_home_url=https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/branch/main/
|
||||||
|
render-file-line: '> * `$<status>` [$<path_tail>](https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/pr/${{ github.event.number }}/$<path_tail>)'
|
||||||
|
extra-collections: community.library_inventory_filtering_v1
|
||||||
|
|
||||||
|
publish-docs-gh-pages:
|
||||||
|
# for now we won't run this on forks
|
||||||
|
if: github.repository == 'ansible-collections/community.docker'
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pages: write
|
||||||
|
id-token: write
|
||||||
|
needs: [build-docs]
|
||||||
|
name: Publish Ansible Docs
|
||||||
|
uses: ansible-community/github-docs-build/.github/workflows/_shared-docs-build-publish-gh-pages.yml@main
|
||||||
|
with:
|
||||||
|
artifact-name: ${{ needs.build-docs.outputs.artifact-name }}
|
||||||
|
action: ${{ (github.event.action == 'closed' || needs.build-docs.outputs.changed != 'true') && 'teardown' || 'publish' }}
|
||||||
|
publish-gh-pages-branch: true
|
||||||
|
secrets:
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
comment:
|
||||||
|
permissions:
|
||||||
|
pull-requests: write
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: [build-docs, publish-docs-gh-pages]
|
||||||
|
name: PR comments
|
||||||
|
steps:
|
||||||
|
- name: PR comment
|
||||||
|
uses: ansible-community/github-docs-build/actions/ansible-docs-build-comment@main
|
||||||
|
with:
|
||||||
|
body-includes: '## Docs Build'
|
||||||
|
reactions: heart
|
||||||
|
action: ${{ needs.build-docs.outputs.changed != 'true' && 'remove' || '' }}
|
||||||
|
on-closed-body: |
|
||||||
|
## Docs Build 📝
|
||||||
|
|
||||||
|
This PR is closed and any previously published docsite has been unpublished.
|
||||||
|
on-merged-body: |
|
||||||
|
## Docs Build 📝
|
||||||
|
|
||||||
|
Thank you for contribution!✨
|
||||||
|
|
||||||
|
This PR has been merged and the docs are now incorporated into `main`:
|
||||||
|
${{ env.GHP_BASE_URL }}/branch/main
|
||||||
|
body: |
|
||||||
|
## Docs Build 📝
|
||||||
|
|
||||||
|
Thank you for contribution!✨
|
||||||
|
|
||||||
|
The docs for **this PR** have been published here:
|
||||||
|
${{ env.GHP_BASE_URL }}/pr/${{ github.event.number }}
|
||||||
|
|
||||||
|
You can compare to the docs for the `main` branch here:
|
||||||
|
${{ env.GHP_BASE_URL }}/branch/main
|
||||||
|
|
||||||
|
The docsite for **this PR** is also available for download as an artifact from this run:
|
||||||
|
${{ needs.build-docs.outputs.artifact-url }}
|
||||||
|
|
||||||
|
File changes:
|
||||||
|
|
||||||
|
${{ needs.build-docs.outputs.diff-files-rendered }}
|
||||||
|
|
||||||
|
${{ needs.build-docs.outputs.diff-rendered }}
|
||||||
56
ansible_collections/community/docker/.github/workflows/docs-push.yml
vendored
Normal file
56
ansible_collections/community/docker/.github/workflows/docs-push.yml
vendored
Normal file
|
|
@ -0,0 +1,56 @@
|
||||||
|
---
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
name: Collection Docs
|
||||||
|
concurrency:
|
||||||
|
group: docs-push-${{ github.sha }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
'on':
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
- stable-*
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
# Run CI once per day (at 09:00 UTC)
|
||||||
|
schedule:
|
||||||
|
- cron: '0 9 * * *'
|
||||||
|
# Allow manual trigger (for newer antsibull-docs, sphinx-ansible-theme, ... versions)
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-docs:
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
name: Build Ansible Docs
|
||||||
|
uses: ansible-community/github-docs-build/.github/workflows/_shared-docs-build-push.yml@main
|
||||||
|
with:
|
||||||
|
collection-name: community.docker
|
||||||
|
init-lenient: false
|
||||||
|
init-fail-on-error: true
|
||||||
|
squash-hierarchy: true
|
||||||
|
init-project: Community.Docker Collection
|
||||||
|
init-copyright: Community.Docker Contributors
|
||||||
|
init-title: Community.Docker Collection Documentation
|
||||||
|
init-html-short-title: Community.Docker Collection Docs
|
||||||
|
init-extra-html-theme-options: |
|
||||||
|
documentation_home_url=https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/branch/main/
|
||||||
|
extra-collections: community.library_inventory_filtering_v1
|
||||||
|
|
||||||
|
publish-docs-gh-pages:
|
||||||
|
# for now we won't run this on forks
|
||||||
|
if: github.repository == 'ansible-collections/community.docker'
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pages: write
|
||||||
|
id-token: write
|
||||||
|
needs: [build-docs]
|
||||||
|
name: Publish Ansible Docs
|
||||||
|
uses: ansible-community/github-docs-build/.github/workflows/_shared-docs-build-publish-gh-pages.yml@main
|
||||||
|
with:
|
||||||
|
artifact-name: ${{ needs.build-docs.outputs.artifact-name }}
|
||||||
|
publish-gh-pages-branch: true
|
||||||
|
secrets:
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
35
ansible_collections/community/docker/.github/workflows/nox.yml
vendored
Normal file
35
ansible_collections/community/docker/.github/workflows/nox.yml
vendored
Normal file
|
|
@ -0,0 +1,35 @@
|
||||||
|
---
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
name: nox
|
||||||
|
'on':
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
- stable-*
|
||||||
|
pull_request:
|
||||||
|
# Run CI once per day (at 09:00 UTC)
|
||||||
|
schedule:
|
||||||
|
- cron: '0 9 * * *'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
nox:
|
||||||
|
uses: ansible-community/antsibull-nox/.github/workflows/reusable-nox-run.yml@main
|
||||||
|
with:
|
||||||
|
session-name: Run extra sanity tests
|
||||||
|
change-detection-in-prs: true
|
||||||
|
|
||||||
|
ansible-test:
|
||||||
|
uses: ansible-community/antsibull-nox/.github/workflows/reusable-nox-matrix.yml@main
|
||||||
|
with:
|
||||||
|
change-detection-in-prs: true
|
||||||
|
upload-codecov: true
|
||||||
|
upload-codecov-pr: false
|
||||||
|
upload-codecov-push: false
|
||||||
|
upload-codecov-schedule: true
|
||||||
|
max-ansible-core: "2.17"
|
||||||
|
secrets:
|
||||||
|
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||||
27
ansible_collections/community/docker/.mypy.ini
Normal file
27
ansible_collections/community/docker/.mypy.ini
Normal file
|
|
@ -0,0 +1,27 @@
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
[mypy]
|
||||||
|
check_untyped_defs = True
|
||||||
|
disallow_untyped_defs = True
|
||||||
|
|
||||||
|
# strict = True -- only try to enable once everything (including dependencies!) is typed
|
||||||
|
strict_equality = True
|
||||||
|
strict_bytes = True
|
||||||
|
|
||||||
|
warn_redundant_casts = True
|
||||||
|
# warn_return_any = True
|
||||||
|
warn_unreachable = True
|
||||||
|
|
||||||
|
[mypy-ansible.*]
|
||||||
|
# ansible-core has partial typing information
|
||||||
|
follow_untyped_imports = True
|
||||||
|
|
||||||
|
[mypy-docker.*]
|
||||||
|
# Docker SDK for Python has partial typing information
|
||||||
|
follow_untyped_imports = True
|
||||||
|
|
||||||
|
[mypy-jsondiff.*]
|
||||||
|
# jsondiff has no typing information
|
||||||
|
ignore_missing_imports = True
|
||||||
598
ansible_collections/community/docker/.pylintrc
Normal file
598
ansible_collections/community/docker/.pylintrc
Normal file
|
|
@ -0,0 +1,598 @@
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
# SPDX-FileCopyrightText: 2025 Felix Fontein <felix@fontein.de>
|
||||||
|
|
||||||
|
[MAIN]
|
||||||
|
|
||||||
|
# Clear in-memory caches upon conclusion of linting. Useful if running pylint
|
||||||
|
# in a server-like mode.
|
||||||
|
clear-cache-post-run=no
|
||||||
|
|
||||||
|
# Load and enable all available extensions. Use --list-extensions to see a list
|
||||||
|
# all available extensions.
|
||||||
|
#enable-all-extensions=
|
||||||
|
|
||||||
|
# Specify a score threshold under which the program will exit with error.
|
||||||
|
fail-under=10
|
||||||
|
|
||||||
|
# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
|
||||||
|
# number of processors available to use, and will cap the count on Windows to
|
||||||
|
# avoid hangs.
|
||||||
|
jobs=0
|
||||||
|
|
||||||
|
# Minimum Python version to use for version dependent checks. Will default to
|
||||||
|
# the version used to run pylint.
|
||||||
|
py-version=3.7
|
||||||
|
|
||||||
|
# Allow loading of arbitrary C extensions. Extensions are imported into the
|
||||||
|
# active Python interpreter and may run arbitrary code.
|
||||||
|
unsafe-load-any-extension=no
|
||||||
|
|
||||||
|
# In verbose mode, extra non-checker-related info will be displayed.
|
||||||
|
#verbose=
|
||||||
|
|
||||||
|
|
||||||
|
[BASIC]
|
||||||
|
|
||||||
|
# Naming style matching correct argument names.
|
||||||
|
argument-naming-style=snake_case
|
||||||
|
|
||||||
|
# Regular expression matching correct argument names. Overrides argument-
|
||||||
|
# naming-style. If left empty, argument names will be checked with the set
|
||||||
|
# naming style.
|
||||||
|
#argument-rgx=
|
||||||
|
|
||||||
|
# Naming style matching correct attribute names.
|
||||||
|
attr-naming-style=snake_case
|
||||||
|
|
||||||
|
# Regular expression matching correct attribute names. Overrides attr-naming-
|
||||||
|
# style. If left empty, attribute names will be checked with the set naming
|
||||||
|
# style.
|
||||||
|
#attr-rgx=
|
||||||
|
|
||||||
|
# Bad variable names which should always be refused, separated by a comma.
|
||||||
|
bad-names=foo,
|
||||||
|
bar,
|
||||||
|
baz,
|
||||||
|
toto,
|
||||||
|
tutu,
|
||||||
|
tata
|
||||||
|
|
||||||
|
# Bad variable names regexes, separated by a comma. If names match any regex,
|
||||||
|
# they will always be refused
|
||||||
|
bad-names-rgxs=
|
||||||
|
|
||||||
|
# Naming style matching correct class attribute names.
|
||||||
|
class-attribute-naming-style=any
|
||||||
|
|
||||||
|
# Regular expression matching correct class attribute names. Overrides class-
|
||||||
|
# attribute-naming-style. If left empty, class attribute names will be checked
|
||||||
|
# with the set naming style.
|
||||||
|
#class-attribute-rgx=
|
||||||
|
|
||||||
|
# Naming style matching correct class constant names.
|
||||||
|
class-const-naming-style=UPPER_CASE
|
||||||
|
|
||||||
|
# Regular expression matching correct class constant names. Overrides class-
|
||||||
|
# const-naming-style. If left empty, class constant names will be checked with
|
||||||
|
# the set naming style.
|
||||||
|
#class-const-rgx=
|
||||||
|
|
||||||
|
# Naming style matching correct class names.
|
||||||
|
class-naming-style=PascalCase
|
||||||
|
|
||||||
|
# Regular expression matching correct class names. Overrides class-naming-
|
||||||
|
# style. If left empty, class names will be checked with the set naming style.
|
||||||
|
#class-rgx=
|
||||||
|
|
||||||
|
# Naming style matching correct constant names.
|
||||||
|
const-naming-style=UPPER_CASE
|
||||||
|
|
||||||
|
# Regular expression matching correct constant names. Overrides const-naming-
|
||||||
|
# style. If left empty, constant names will be checked with the set naming
|
||||||
|
# style.
|
||||||
|
#const-rgx=
|
||||||
|
|
||||||
|
# Minimum line length for functions/classes that require docstrings, shorter
|
||||||
|
# ones are exempt.
|
||||||
|
docstring-min-length=-1
|
||||||
|
|
||||||
|
# Naming style matching correct function names.
|
||||||
|
function-naming-style=snake_case
|
||||||
|
|
||||||
|
# Regular expression matching correct function names. Overrides function-
|
||||||
|
# naming-style. If left empty, function names will be checked with the set
|
||||||
|
# naming style.
|
||||||
|
#function-rgx=
|
||||||
|
|
||||||
|
# Good variable names which should always be accepted, separated by a comma.
|
||||||
|
good-names=i,
|
||||||
|
j,
|
||||||
|
k,
|
||||||
|
ex,
|
||||||
|
Run,
|
||||||
|
_
|
||||||
|
|
||||||
|
# Good variable names regexes, separated by a comma. If names match any regex,
|
||||||
|
# they will always be accepted
|
||||||
|
good-names-rgxs=
|
||||||
|
|
||||||
|
# Include a hint for the correct naming format with invalid-name.
|
||||||
|
include-naming-hint=no
|
||||||
|
|
||||||
|
# Naming style matching correct inline iteration names.
|
||||||
|
inlinevar-naming-style=any
|
||||||
|
|
||||||
|
# Regular expression matching correct inline iteration names. Overrides
|
||||||
|
# inlinevar-naming-style. If left empty, inline iteration names will be checked
|
||||||
|
# with the set naming style.
|
||||||
|
#inlinevar-rgx=
|
||||||
|
|
||||||
|
# Naming style matching correct method names.
|
||||||
|
method-naming-style=snake_case
|
||||||
|
|
||||||
|
# Regular expression matching correct method names. Overrides method-naming-
|
||||||
|
# style. If left empty, method names will be checked with the set naming style.
|
||||||
|
#method-rgx=
|
||||||
|
|
||||||
|
# Naming style matching correct module names.
|
||||||
|
module-naming-style=snake_case
|
||||||
|
|
||||||
|
# Regular expression matching correct module names. Overrides module-naming-
|
||||||
|
# style. If left empty, module names will be checked with the set naming style.
|
||||||
|
#module-rgx=
|
||||||
|
|
||||||
|
# Colon-delimited sets of names that determine each other's naming style when
|
||||||
|
# the name regexes allow several styles.
|
||||||
|
name-group=
|
||||||
|
|
||||||
|
# Regular expression which should only match function or class names that do
|
||||||
|
# not require a docstring.
|
||||||
|
no-docstring-rgx=^_
|
||||||
|
|
||||||
|
# List of decorators that produce properties, such as abc.abstractproperty. Add
|
||||||
|
# to this list to register other decorators that produce valid properties.
|
||||||
|
# These decorators are taken in consideration only for invalid-name.
|
||||||
|
property-classes=abc.abstractproperty
|
||||||
|
|
||||||
|
# Regular expression matching correct type alias names. If left empty, type
|
||||||
|
# alias names will be checked with the set naming style.
|
||||||
|
#typealias-rgx=
|
||||||
|
|
||||||
|
# Regular expression matching correct type variable names. If left empty, type
|
||||||
|
# variable names will be checked with the set naming style.
|
||||||
|
#typevar-rgx=
|
||||||
|
|
||||||
|
# Naming style matching correct variable names.
|
||||||
|
variable-naming-style=snake_case
|
||||||
|
|
||||||
|
# Regular expression matching correct variable names. Overrides variable-
|
||||||
|
# naming-style. If left empty, variable names will be checked with the set
|
||||||
|
# naming style.
|
||||||
|
#variable-rgx=
|
||||||
|
|
||||||
|
|
||||||
|
[CLASSES]
|
||||||
|
|
||||||
|
# Warn about protected attribute access inside special methods
|
||||||
|
check-protected-access-in-special-methods=no
|
||||||
|
|
||||||
|
# List of method names used to declare (i.e. assign) instance attributes.
|
||||||
|
defining-attr-methods=__init__,
|
||||||
|
__new__,
|
||||||
|
setUp,
|
||||||
|
asyncSetUp,
|
||||||
|
__post_init__
|
||||||
|
|
||||||
|
# List of member names, which should be excluded from the protected access
|
||||||
|
# warning.
|
||||||
|
exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit
|
||||||
|
|
||||||
|
# List of valid names for the first argument in a class method.
|
||||||
|
valid-classmethod-first-arg=cls
|
||||||
|
|
||||||
|
# List of valid names for the first argument in a metaclass class method.
|
||||||
|
valid-metaclass-classmethod-first-arg=mcs
|
||||||
|
|
||||||
|
|
||||||
|
[DESIGN]
|
||||||
|
|
||||||
|
# List of regular expressions of class ancestor names to ignore when counting
|
||||||
|
# public methods (see R0903)
|
||||||
|
exclude-too-few-public-methods=
|
||||||
|
|
||||||
|
# List of qualified class names to ignore when counting class parents (see
|
||||||
|
# R0901)
|
||||||
|
ignored-parents=
|
||||||
|
|
||||||
|
# Maximum number of arguments for function / method.
|
||||||
|
max-args=5
|
||||||
|
|
||||||
|
# Maximum number of attributes for a class (see R0902).
|
||||||
|
max-attributes=7
|
||||||
|
|
||||||
|
# Maximum number of boolean expressions in an if statement (see R0916).
|
||||||
|
max-bool-expr=5
|
||||||
|
|
||||||
|
# Maximum number of branch for function / method body.
|
||||||
|
max-branches=12
|
||||||
|
|
||||||
|
# Maximum number of locals for function / method body.
|
||||||
|
max-locals=15
|
||||||
|
|
||||||
|
# Maximum number of parents for a class (see R0901).
|
||||||
|
max-parents=7
|
||||||
|
|
||||||
|
# Maximum number of positional arguments for function / method.
|
||||||
|
max-positional-arguments=5
|
||||||
|
|
||||||
|
# Maximum number of public methods for a class (see R0904).
|
||||||
|
max-public-methods=20
|
||||||
|
|
||||||
|
# Maximum number of return / yield for function / method body.
|
||||||
|
max-returns=6
|
||||||
|
|
||||||
|
# Maximum number of statements in function / method body.
|
||||||
|
max-statements=50
|
||||||
|
|
||||||
|
# Minimum number of public methods for a class (see R0903).
|
||||||
|
min-public-methods=2
|
||||||
|
|
||||||
|
|
||||||
|
[EXCEPTIONS]
|
||||||
|
|
||||||
|
# Exceptions that will emit a warning when caught.
|
||||||
|
overgeneral-exceptions=builtins.BaseException,builtins.Exception
|
||||||
|
|
||||||
|
|
||||||
|
[FORMAT]
|
||||||
|
|
||||||
|
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
|
||||||
|
expected-line-ending-format=
|
||||||
|
|
||||||
|
# Regexp for a line that is allowed to be longer than the limit.
|
||||||
|
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
|
||||||
|
|
||||||
|
# Number of spaces of indent required inside a hanging or continued line.
|
||||||
|
indent-after-paren=4
|
||||||
|
|
||||||
|
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
|
||||||
|
# tab).
|
||||||
|
indent-string=' '
|
||||||
|
|
||||||
|
# Maximum number of characters on a single line.
|
||||||
|
max-line-length=160
|
||||||
|
|
||||||
|
# Maximum number of lines in a module.
|
||||||
|
max-module-lines=1000
|
||||||
|
|
||||||
|
# Allow the body of a class to be on the same line as the declaration if body
|
||||||
|
# contains single statement.
|
||||||
|
single-line-class-stmt=no
|
||||||
|
|
||||||
|
# Allow the body of an if to be on the same line as the test if there is no
|
||||||
|
# else.
|
||||||
|
single-line-if-stmt=no
|
||||||
|
|
||||||
|
|
||||||
|
[IMPORTS]
|
||||||
|
|
||||||
|
# List of modules that can be imported at any level, not just the top level
|
||||||
|
# one.
|
||||||
|
allow-any-import-level=
|
||||||
|
|
||||||
|
# Allow explicit reexports by alias from a package __init__.
|
||||||
|
allow-reexport-from-package=no
|
||||||
|
|
||||||
|
# Allow wildcard imports from modules that define __all__.
|
||||||
|
allow-wildcard-with-all=no
|
||||||
|
|
||||||
|
# Deprecated modules which should not be used, separated by a comma.
|
||||||
|
deprecated-modules=
|
||||||
|
|
||||||
|
# Output a graph (.gv or any supported image format) of external dependencies
|
||||||
|
# to the given file (report RP0402 must not be disabled).
|
||||||
|
ext-import-graph=
|
||||||
|
|
||||||
|
# Output a graph (.gv or any supported image format) of all (i.e. internal and
|
||||||
|
# external) dependencies to the given file (report RP0402 must not be
|
||||||
|
# disabled).
|
||||||
|
import-graph=
|
||||||
|
|
||||||
|
# Output a graph (.gv or any supported image format) of internal dependencies
|
||||||
|
# to the given file (report RP0402 must not be disabled).
|
||||||
|
int-import-graph=
|
||||||
|
|
||||||
|
# Force import order to recognize a module as part of the standard
|
||||||
|
# compatibility libraries.
|
||||||
|
known-standard-library=
|
||||||
|
|
||||||
|
# Force import order to recognize a module as part of a third party library.
|
||||||
|
known-third-party=enchant
|
||||||
|
|
||||||
|
# Couples of modules and preferred modules, separated by a comma.
|
||||||
|
preferred-modules=
|
||||||
|
|
||||||
|
|
||||||
|
[LOGGING]
|
||||||
|
|
||||||
|
# The type of string formatting that logging methods do. `old` means using %
|
||||||
|
# formatting, `new` is for `{}` formatting.
|
||||||
|
logging-format-style=old
|
||||||
|
|
||||||
|
# Logging modules to check that the string format arguments are in logging
|
||||||
|
# function parameter format.
|
||||||
|
logging-modules=logging
|
||||||
|
|
||||||
|
|
||||||
|
[MESSAGES CONTROL]
|
||||||
|
|
||||||
|
# Only show warnings with the listed confidence levels. Leave empty to show
|
||||||
|
# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE,
|
||||||
|
# UNDEFINED.
|
||||||
|
confidence=HIGH,
|
||||||
|
CONTROL_FLOW,
|
||||||
|
INFERENCE,
|
||||||
|
INFERENCE_FAILURE,
|
||||||
|
UNDEFINED
|
||||||
|
|
||||||
|
# Disable the message, report, category or checker with the given id(s). You
|
||||||
|
# can either give multiple identifiers separated by comma (,) or put this
|
||||||
|
# option multiple times (only on the command line, not in the configuration
|
||||||
|
# file where it should appear only once). You can also use "--disable=all" to
|
||||||
|
# disable everything first and then re-enable specific checks. For example, if
|
||||||
|
# you want to run only the similarities checker, you can use "--disable=all
|
||||||
|
# --enable=similarities". If you want to run only the classes checker, but have
|
||||||
|
# no Warning level messages displayed, use "--disable=all --enable=classes
|
||||||
|
# --disable=W".
|
||||||
|
disable=raw-checker-failed,
|
||||||
|
bad-inline-option,
|
||||||
|
deprecated-pragma,
|
||||||
|
duplicate-code,
|
||||||
|
file-ignored,
|
||||||
|
import-outside-toplevel,
|
||||||
|
missing-class-docstring,
|
||||||
|
missing-function-docstring,
|
||||||
|
missing-module-docstring,
|
||||||
|
locally-disabled,
|
||||||
|
suppressed-message,
|
||||||
|
use-implicit-booleaness-not-comparison,
|
||||||
|
use-implicit-booleaness-not-comparison-to-string,
|
||||||
|
use-implicit-booleaness-not-comparison-to-zero,
|
||||||
|
superfluous-parens,
|
||||||
|
too-few-public-methods,
|
||||||
|
too-many-ancestors,
|
||||||
|
too-many-arguments,
|
||||||
|
too-many-boolean-expressions,
|
||||||
|
too-many-branches,
|
||||||
|
too-many-function-args,
|
||||||
|
too-many-instance-attributes,
|
||||||
|
too-many-lines,
|
||||||
|
too-many-locals,
|
||||||
|
too-many-nested-blocks,
|
||||||
|
too-many-positional-arguments,
|
||||||
|
too-many-public-methods,
|
||||||
|
too-many-return-statements,
|
||||||
|
too-many-statements,
|
||||||
|
ungrouped-imports,
|
||||||
|
useless-parent-delegation,
|
||||||
|
wrong-import-order,
|
||||||
|
wrong-import-position,
|
||||||
|
# To clean up:
|
||||||
|
fixme,
|
||||||
|
import-error, # TODO figure out why pylint cannot find the module
|
||||||
|
no-name-in-module, # TODO figure out why pylint cannot find the module
|
||||||
|
protected-access,
|
||||||
|
subprocess-popen-preexec-fn,
|
||||||
|
unexpected-keyword-arg,
|
||||||
|
unused-argument,
|
||||||
|
# Cannot remove yet due to inadequacy of rules
|
||||||
|
inconsistent-return-statements, # doesn't notice that fail_json() does not return
|
||||||
|
# Buggy impementation in pylint:
|
||||||
|
relative-beyond-top-level, # TODO
|
||||||
|
|
||||||
|
# Enable the message, report, category or checker with the given id(s). You can
|
||||||
|
# either give multiple identifier separated by comma (,) or put this option
|
||||||
|
# multiple time (only on the command line, not in the configuration file where
|
||||||
|
# it should appear only once). See also the "--disable" option for examples.
|
||||||
|
enable=
|
||||||
|
|
||||||
|
|
||||||
|
[METHOD_ARGS]
|
||||||
|
|
||||||
|
# List of qualified names (i.e., library.method) which require a timeout
|
||||||
|
# parameter e.g. 'requests.api.get,requests.api.post'
|
||||||
|
timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request
|
||||||
|
|
||||||
|
|
||||||
|
[MISCELLANEOUS]
|
||||||
|
|
||||||
|
# List of note tags to take in consideration, separated by a comma.
|
||||||
|
notes=FIXME,
|
||||||
|
XXX,
|
||||||
|
TODO
|
||||||
|
|
||||||
|
# Regular expression of note tags to take in consideration.
|
||||||
|
notes-rgx=
|
||||||
|
|
||||||
|
|
||||||
|
[REFACTORING]
|
||||||
|
|
||||||
|
# Maximum number of nested blocks for function / method body
|
||||||
|
max-nested-blocks=5
|
||||||
|
|
||||||
|
# Complete name of functions that never returns. When checking for
|
||||||
|
# inconsistent-return-statements if a never returning function is called then
|
||||||
|
# it will be considered as an explicit return statement and no message will be
|
||||||
|
# printed.
|
||||||
|
never-returning-functions=sys.exit,argparse.parse_error
|
||||||
|
|
||||||
|
# Let 'consider-using-join' be raised when the separator to join on would be
|
||||||
|
# non-empty (resulting in expected fixes of the type: ``"- " + " -
|
||||||
|
# ".join(items)``)
|
||||||
|
suggest-join-with-non-empty-separator=yes
|
||||||
|
|
||||||
|
|
||||||
|
[REPORTS]
|
||||||
|
|
||||||
|
# Python expression which should return a score less than or equal to 10. You
|
||||||
|
# have access to the variables 'fatal', 'error', 'warning', 'refactor',
|
||||||
|
# 'convention', and 'info' which contain the number of messages in each
|
||||||
|
# category, as well as 'statement' which is the total number of statements
|
||||||
|
# analyzed. This score is used by the global evaluation report (RP0004).
|
||||||
|
evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10))
|
||||||
|
|
||||||
|
# Template used to display messages. This is a python new-style format string
|
||||||
|
# used to format the message information. See doc for all details.
|
||||||
|
msg-template=
|
||||||
|
|
||||||
|
# Set the output format. Available formats are: text, parseable, colorized,
|
||||||
|
# json2 (improved json format), json (old json format) and msvs (visual
|
||||||
|
# studio). You can also give a reporter class, e.g.
|
||||||
|
# mypackage.mymodule.MyReporterClass.
|
||||||
|
#output-format=
|
||||||
|
|
||||||
|
# Tells whether to display a full report or only the messages.
|
||||||
|
reports=no
|
||||||
|
|
||||||
|
# Activate the evaluation score.
|
||||||
|
score=yes
|
||||||
|
|
||||||
|
|
||||||
|
[SIMILARITIES]
|
||||||
|
|
||||||
|
# Comments are removed from the similarity computation
|
||||||
|
ignore-comments=yes
|
||||||
|
|
||||||
|
# Docstrings are removed from the similarity computation
|
||||||
|
ignore-docstrings=yes
|
||||||
|
|
||||||
|
# Imports are removed from the similarity computation
|
||||||
|
ignore-imports=yes
|
||||||
|
|
||||||
|
# Signatures are removed from the similarity computation
|
||||||
|
ignore-signatures=yes
|
||||||
|
|
||||||
|
# Minimum lines number of a similarity.
|
||||||
|
min-similarity-lines=4
|
||||||
|
|
||||||
|
|
||||||
|
[SPELLING]
|
||||||
|
|
||||||
|
# Limits count of emitted suggestions for spelling mistakes.
|
||||||
|
max-spelling-suggestions=4
|
||||||
|
|
||||||
|
# Spelling dictionary name. No available dictionaries : You need to install
|
||||||
|
# both the python package and the system dependency for enchant to work.
|
||||||
|
spelling-dict=
|
||||||
|
|
||||||
|
# List of comma separated words that should be considered directives if they
|
||||||
|
# appear at the beginning of a comment and should not be checked.
|
||||||
|
spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy:
|
||||||
|
|
||||||
|
# List of comma separated words that should not be checked.
|
||||||
|
spelling-ignore-words=
|
||||||
|
|
||||||
|
# A path to a file that contains the private dictionary; one word per line.
|
||||||
|
spelling-private-dict-file=
|
||||||
|
|
||||||
|
# Tells whether to store unknown words to the private dictionary (see the
|
||||||
|
# --spelling-private-dict-file option) instead of raising a message.
|
||||||
|
spelling-store-unknown-words=no
|
||||||
|
|
||||||
|
|
||||||
|
[STRING]
|
||||||
|
|
||||||
|
# This flag controls whether inconsistent-quotes generates a warning when the
|
||||||
|
# character used as a quote delimiter is used inconsistently within a module.
|
||||||
|
check-quote-consistency=no
|
||||||
|
|
||||||
|
# This flag controls whether the implicit-str-concat should generate a warning
|
||||||
|
# on implicit string concatenation in sequences defined over several lines.
|
||||||
|
check-str-concat-over-line-jumps=no
|
||||||
|
|
||||||
|
|
||||||
|
[TYPECHECK]
|
||||||
|
|
||||||
|
# List of decorators that produce context managers, such as
|
||||||
|
# contextlib.contextmanager. Add to this list to register other decorators that
|
||||||
|
# produce valid context managers.
|
||||||
|
contextmanager-decorators=contextlib.contextmanager
|
||||||
|
|
||||||
|
# List of members which are set dynamically and missed by pylint inference
|
||||||
|
# system, and so shouldn't trigger E1101 when accessed. Python regular
|
||||||
|
# expressions are accepted.
|
||||||
|
generated-members=
|
||||||
|
|
||||||
|
# Tells whether to warn about missing members when the owner of the attribute
|
||||||
|
# is inferred to be None.
|
||||||
|
ignore-none=yes
|
||||||
|
|
||||||
|
# This flag controls whether pylint should warn about no-member and similar
|
||||||
|
# checks whenever an opaque object is returned when inferring. The inference
|
||||||
|
# can return multiple potential results while evaluating a Python object, but
|
||||||
|
# some branches might not be evaluated, which results in partial inference. In
|
||||||
|
# that case, it might be useful to still emit no-member and other checks for
|
||||||
|
# the rest of the inferred objects.
|
||||||
|
ignore-on-opaque-inference=yes
|
||||||
|
|
||||||
|
# List of symbolic message names to ignore for Mixin members.
|
||||||
|
ignored-checks-for-mixins=no-member,
|
||||||
|
not-async-context-manager,
|
||||||
|
not-context-manager,
|
||||||
|
attribute-defined-outside-init
|
||||||
|
|
||||||
|
# List of class names for which member attributes should not be checked (useful
|
||||||
|
# for classes with dynamically set attributes). This supports the use of
|
||||||
|
# qualified names.
|
||||||
|
ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace
|
||||||
|
|
||||||
|
# Show a hint with possible names when a member name was not found. The aspect
|
||||||
|
# of finding the hint is based on edit distance.
|
||||||
|
missing-member-hint=yes
|
||||||
|
|
||||||
|
# The minimum edit distance a name should have in order to be considered a
|
||||||
|
# similar match for a missing member name.
|
||||||
|
missing-member-hint-distance=1
|
||||||
|
|
||||||
|
# The total number of similar names that should be taken in consideration when
|
||||||
|
# showing a hint for a missing member.
|
||||||
|
missing-member-max-choices=1
|
||||||
|
|
||||||
|
# Regex pattern to define which classes are considered mixins.
|
||||||
|
mixin-class-rgx=.*[Mm]ixin
|
||||||
|
|
||||||
|
# List of decorators that change the signature of a decorated function.
|
||||||
|
signature-mutators=
|
||||||
|
|
||||||
|
|
||||||
|
[VARIABLES]
|
||||||
|
|
||||||
|
# List of additional names supposed to be defined in builtins. Remember that
|
||||||
|
# you should avoid defining new builtins when possible.
|
||||||
|
additional-builtins=
|
||||||
|
|
||||||
|
# Tells whether unused global variables should be treated as a violation.
|
||||||
|
allow-global-unused-variables=yes
|
||||||
|
|
||||||
|
# List of names allowed to shadow builtins
|
||||||
|
allowed-redefined-builtins=
|
||||||
|
|
||||||
|
# List of strings which can identify a callback function by name. A callback
|
||||||
|
# name must start or end with one of those strings.
|
||||||
|
callbacks=cb_,
|
||||||
|
_cb
|
||||||
|
|
||||||
|
# A regular expression matching the name of dummy variables (i.e. expected to
|
||||||
|
# not be used).
|
||||||
|
dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
|
||||||
|
|
||||||
|
# Argument names that match this expression will be ignored.
|
||||||
|
ignored-argument-names=_.*|^ignored_|^unused_
|
||||||
|
|
||||||
|
# Tells whether we should check for unused import in __init__ files.
|
||||||
|
init-import=no
|
||||||
|
|
||||||
|
# List of qualified module names which can have objects that can redefine
|
||||||
|
# builtins.
|
||||||
|
redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io
|
||||||
53
ansible_collections/community/docker/.yamllint
Normal file
53
ansible_collections/community/docker/.yamllint
Normal file
|
|
@ -0,0 +1,53 @@
|
||||||
|
---
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
# SPDX-FileCopyrightText: 2025 Felix Fontein <felix@fontein.de>
|
||||||
|
|
||||||
|
extends: default
|
||||||
|
|
||||||
|
ignore: |
|
||||||
|
/changelogs/
|
||||||
|
|
||||||
|
rules:
|
||||||
|
line-length:
|
||||||
|
max: 300
|
||||||
|
level: error
|
||||||
|
document-start:
|
||||||
|
present: true
|
||||||
|
document-end: false
|
||||||
|
truthy:
|
||||||
|
level: error
|
||||||
|
allowed-values:
|
||||||
|
- 'true'
|
||||||
|
- 'false'
|
||||||
|
indentation:
|
||||||
|
spaces: 2
|
||||||
|
indent-sequences: true
|
||||||
|
key-duplicates: enable
|
||||||
|
trailing-spaces: enable
|
||||||
|
new-line-at-end-of-file: disable
|
||||||
|
hyphens:
|
||||||
|
max-spaces-after: 1
|
||||||
|
empty-lines:
|
||||||
|
max: 2
|
||||||
|
max-start: 0
|
||||||
|
max-end: 0
|
||||||
|
commas:
|
||||||
|
max-spaces-before: 0
|
||||||
|
min-spaces-after: 1
|
||||||
|
max-spaces-after: 1
|
||||||
|
colons:
|
||||||
|
max-spaces-before: 0
|
||||||
|
max-spaces-after: 1
|
||||||
|
brackets:
|
||||||
|
min-spaces-inside: 0
|
||||||
|
max-spaces-inside: 0
|
||||||
|
braces:
|
||||||
|
min-spaces-inside: 0
|
||||||
|
max-spaces-inside: 1
|
||||||
|
octal-values:
|
||||||
|
forbid-implicit-octal: true
|
||||||
|
forbid-explicit-octal: true
|
||||||
|
comments:
|
||||||
|
min-spaces-from-content: 1
|
||||||
|
comments-indentation: false
|
||||||
54
ansible_collections/community/docker/.yamllint-docs
Normal file
54
ansible_collections/community/docker/.yamllint-docs
Normal file
|
|
@ -0,0 +1,54 @@
|
||||||
|
---
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
# SPDX-FileCopyrightText: 2025 Felix Fontein <felix@fontein.de>
|
||||||
|
|
||||||
|
extends: default
|
||||||
|
|
||||||
|
ignore: |
|
||||||
|
/changelogs/
|
||||||
|
|
||||||
|
rules:
|
||||||
|
line-length:
|
||||||
|
max: 160
|
||||||
|
level: error
|
||||||
|
document-start:
|
||||||
|
present: false
|
||||||
|
document-end:
|
||||||
|
present: false
|
||||||
|
truthy:
|
||||||
|
level: error
|
||||||
|
allowed-values:
|
||||||
|
- 'true'
|
||||||
|
- 'false'
|
||||||
|
indentation:
|
||||||
|
spaces: 2
|
||||||
|
indent-sequences: true
|
||||||
|
key-duplicates: enable
|
||||||
|
trailing-spaces: enable
|
||||||
|
new-line-at-end-of-file: disable
|
||||||
|
hyphens:
|
||||||
|
max-spaces-after: 1
|
||||||
|
empty-lines:
|
||||||
|
max: 2
|
||||||
|
max-start: 0
|
||||||
|
max-end: 0
|
||||||
|
commas:
|
||||||
|
max-spaces-before: 0
|
||||||
|
min-spaces-after: 1
|
||||||
|
max-spaces-after: 1
|
||||||
|
colons:
|
||||||
|
max-spaces-before: 0
|
||||||
|
max-spaces-after: 1
|
||||||
|
brackets:
|
||||||
|
min-spaces-inside: 0
|
||||||
|
max-spaces-inside: 0
|
||||||
|
braces:
|
||||||
|
min-spaces-inside: 0
|
||||||
|
max-spaces-inside: 1
|
||||||
|
octal-values:
|
||||||
|
forbid-implicit-octal: true
|
||||||
|
forbid-explicit-octal: true
|
||||||
|
comments:
|
||||||
|
min-spaces-from-content: 1
|
||||||
|
comments-indentation: false
|
||||||
54
ansible_collections/community/docker/.yamllint-examples
Normal file
54
ansible_collections/community/docker/.yamllint-examples
Normal file
|
|
@ -0,0 +1,54 @@
|
||||||
|
---
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
# SPDX-FileCopyrightText: 2025 Felix Fontein <felix@fontein.de>
|
||||||
|
|
||||||
|
extends: default
|
||||||
|
|
||||||
|
ignore: |
|
||||||
|
/changelogs/
|
||||||
|
|
||||||
|
rules:
|
||||||
|
line-length:
|
||||||
|
max: 160
|
||||||
|
level: error
|
||||||
|
document-start:
|
||||||
|
present: true
|
||||||
|
document-end:
|
||||||
|
present: false
|
||||||
|
truthy:
|
||||||
|
level: error
|
||||||
|
allowed-values:
|
||||||
|
- 'true'
|
||||||
|
- 'false'
|
||||||
|
indentation:
|
||||||
|
spaces: 2
|
||||||
|
indent-sequences: true
|
||||||
|
key-duplicates: enable
|
||||||
|
trailing-spaces: enable
|
||||||
|
new-line-at-end-of-file: disable
|
||||||
|
hyphens:
|
||||||
|
max-spaces-after: 1
|
||||||
|
empty-lines:
|
||||||
|
max: 2
|
||||||
|
max-start: 0
|
||||||
|
max-end: 0
|
||||||
|
commas:
|
||||||
|
max-spaces-before: 0
|
||||||
|
min-spaces-after: 1
|
||||||
|
max-spaces-after: 1
|
||||||
|
colons:
|
||||||
|
max-spaces-before: 0
|
||||||
|
max-spaces-after: 1
|
||||||
|
brackets:
|
||||||
|
min-spaces-inside: 0
|
||||||
|
max-spaces-inside: 0
|
||||||
|
braces:
|
||||||
|
min-spaces-inside: 0
|
||||||
|
max-spaces-inside: 1
|
||||||
|
octal-values:
|
||||||
|
forbid-implicit-octal: true
|
||||||
|
forbid-explicit-octal: true
|
||||||
|
comments:
|
||||||
|
min-spaces-from-content: 1
|
||||||
|
comments-indentation: false
|
||||||
2123
ansible_collections/community/docker/CHANGELOG.md
Normal file
2123
ansible_collections/community/docker/CHANGELOG.md
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,3 @@
|
||||||
|
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
SPDX-FileCopyrightText: Ansible Project
|
||||||
1785
ansible_collections/community/docker/CHANGELOG.rst
Normal file
1785
ansible_collections/community/docker/CHANGELOG.rst
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,3 @@
|
||||||
|
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
SPDX-FileCopyrightText: Ansible Project
|
||||||
674
ansible_collections/community/docker/COPYING
Normal file
674
ansible_collections/community/docker/COPYING
Normal file
|
|
@ -0,0 +1,674 @@
|
||||||
|
GNU GENERAL PUBLIC LICENSE
|
||||||
|
Version 3, 29 June 2007
|
||||||
|
|
||||||
|
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies
|
||||||
|
of this license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
Preamble
|
||||||
|
|
||||||
|
The GNU General Public License is a free, copyleft license for
|
||||||
|
software and other kinds of works.
|
||||||
|
|
||||||
|
The licenses for most software and other practical works are designed
|
||||||
|
to take away your freedom to share and change the works. By contrast,
|
||||||
|
the GNU General Public License is intended to guarantee your freedom to
|
||||||
|
share and change all versions of a program--to make sure it remains free
|
||||||
|
software for all its users. We, the Free Software Foundation, use the
|
||||||
|
GNU General Public License for most of our software; it applies also to
|
||||||
|
any other work released this way by its authors. You can apply it to
|
||||||
|
your programs, too.
|
||||||
|
|
||||||
|
When we speak of free software, we are referring to freedom, not
|
||||||
|
price. Our General Public Licenses are designed to make sure that you
|
||||||
|
have the freedom to distribute copies of free software (and charge for
|
||||||
|
them if you wish), that you receive source code or can get it if you
|
||||||
|
want it, that you can change the software or use pieces of it in new
|
||||||
|
free programs, and that you know you can do these things.
|
||||||
|
|
||||||
|
To protect your rights, we need to prevent others from denying you
|
||||||
|
these rights or asking you to surrender the rights. Therefore, you have
|
||||||
|
certain responsibilities if you distribute copies of the software, or if
|
||||||
|
you modify it: responsibilities to respect the freedom of others.
|
||||||
|
|
||||||
|
For example, if you distribute copies of such a program, whether
|
||||||
|
gratis or for a fee, you must pass on to the recipients the same
|
||||||
|
freedoms that you received. You must make sure that they, too, receive
|
||||||
|
or can get the source code. And you must show them these terms so they
|
||||||
|
know their rights.
|
||||||
|
|
||||||
|
Developers that use the GNU GPL protect your rights with two steps:
|
||||||
|
(1) assert copyright on the software, and (2) offer you this License
|
||||||
|
giving you legal permission to copy, distribute and/or modify it.
|
||||||
|
|
||||||
|
For the developers' and authors' protection, the GPL clearly explains
|
||||||
|
that there is no warranty for this free software. For both users' and
|
||||||
|
authors' sake, the GPL requires that modified versions be marked as
|
||||||
|
changed, so that their problems will not be attributed erroneously to
|
||||||
|
authors of previous versions.
|
||||||
|
|
||||||
|
Some devices are designed to deny users access to install or run
|
||||||
|
modified versions of the software inside them, although the manufacturer
|
||||||
|
can do so. This is fundamentally incompatible with the aim of
|
||||||
|
protecting users' freedom to change the software. The systematic
|
||||||
|
pattern of such abuse occurs in the area of products for individuals to
|
||||||
|
use, which is precisely where it is most unacceptable. Therefore, we
|
||||||
|
have designed this version of the GPL to prohibit the practice for those
|
||||||
|
products. If such problems arise substantially in other domains, we
|
||||||
|
stand ready to extend this provision to those domains in future versions
|
||||||
|
of the GPL, as needed to protect the freedom of users.
|
||||||
|
|
||||||
|
Finally, every program is threatened constantly by software patents.
|
||||||
|
States should not allow patents to restrict development and use of
|
||||||
|
software on general-purpose computers, but in those that do, we wish to
|
||||||
|
avoid the special danger that patents applied to a free program could
|
||||||
|
make it effectively proprietary. To prevent this, the GPL assures that
|
||||||
|
patents cannot be used to render the program non-free.
|
||||||
|
|
||||||
|
The precise terms and conditions for copying, distribution and
|
||||||
|
modification follow.
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
0. Definitions.
|
||||||
|
|
||||||
|
"This License" refers to version 3 of the GNU General Public License.
|
||||||
|
|
||||||
|
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||||
|
works, such as semiconductor masks.
|
||||||
|
|
||||||
|
"The Program" refers to any copyrightable work licensed under this
|
||||||
|
License. Each licensee is addressed as "you". "Licensees" and
|
||||||
|
"recipients" may be individuals or organizations.
|
||||||
|
|
||||||
|
To "modify" a work means to copy from or adapt all or part of the work
|
||||||
|
in a fashion requiring copyright permission, other than the making of an
|
||||||
|
exact copy. The resulting work is called a "modified version" of the
|
||||||
|
earlier work or a work "based on" the earlier work.
|
||||||
|
|
||||||
|
A "covered work" means either the unmodified Program or a work based
|
||||||
|
on the Program.
|
||||||
|
|
||||||
|
To "propagate" a work means to do anything with it that, without
|
||||||
|
permission, would make you directly or secondarily liable for
|
||||||
|
infringement under applicable copyright law, except executing it on a
|
||||||
|
computer or modifying a private copy. Propagation includes copying,
|
||||||
|
distribution (with or without modification), making available to the
|
||||||
|
public, and in some countries other activities as well.
|
||||||
|
|
||||||
|
To "convey" a work means any kind of propagation that enables other
|
||||||
|
parties to make or receive copies. Mere interaction with a user through
|
||||||
|
a computer network, with no transfer of a copy, is not conveying.
|
||||||
|
|
||||||
|
An interactive user interface displays "Appropriate Legal Notices"
|
||||||
|
to the extent that it includes a convenient and prominently visible
|
||||||
|
feature that (1) displays an appropriate copyright notice, and (2)
|
||||||
|
tells the user that there is no warranty for the work (except to the
|
||||||
|
extent that warranties are provided), that licensees may convey the
|
||||||
|
work under this License, and how to view a copy of this License. If
|
||||||
|
the interface presents a list of user commands or options, such as a
|
||||||
|
menu, a prominent item in the list meets this criterion.
|
||||||
|
|
||||||
|
1. Source Code.
|
||||||
|
|
||||||
|
The "source code" for a work means the preferred form of the work
|
||||||
|
for making modifications to it. "Object code" means any non-source
|
||||||
|
form of a work.
|
||||||
|
|
||||||
|
A "Standard Interface" means an interface that either is an official
|
||||||
|
standard defined by a recognized standards body, or, in the case of
|
||||||
|
interfaces specified for a particular programming language, one that
|
||||||
|
is widely used among developers working in that language.
|
||||||
|
|
||||||
|
The "System Libraries" of an executable work include anything, other
|
||||||
|
than the work as a whole, that (a) is included in the normal form of
|
||||||
|
packaging a Major Component, but which is not part of that Major
|
||||||
|
Component, and (b) serves only to enable use of the work with that
|
||||||
|
Major Component, or to implement a Standard Interface for which an
|
||||||
|
implementation is available to the public in source code form. A
|
||||||
|
"Major Component", in this context, means a major essential component
|
||||||
|
(kernel, window system, and so on) of the specific operating system
|
||||||
|
(if any) on which the executable work runs, or a compiler used to
|
||||||
|
produce the work, or an object code interpreter used to run it.
|
||||||
|
|
||||||
|
The "Corresponding Source" for a work in object code form means all
|
||||||
|
the source code needed to generate, install, and (for an executable
|
||||||
|
work) run the object code and to modify the work, including scripts to
|
||||||
|
control those activities. However, it does not include the work's
|
||||||
|
System Libraries, or general-purpose tools or generally available free
|
||||||
|
programs which are used unmodified in performing those activities but
|
||||||
|
which are not part of the work. For example, Corresponding Source
|
||||||
|
includes interface definition files associated with source files for
|
||||||
|
the work, and the source code for shared libraries and dynamically
|
||||||
|
linked subprograms that the work is specifically designed to require,
|
||||||
|
such as by intimate data communication or control flow between those
|
||||||
|
subprograms and other parts of the work.
|
||||||
|
|
||||||
|
The Corresponding Source need not include anything that users
|
||||||
|
can regenerate automatically from other parts of the Corresponding
|
||||||
|
Source.
|
||||||
|
|
||||||
|
The Corresponding Source for a work in source code form is that
|
||||||
|
same work.
|
||||||
|
|
||||||
|
2. Basic Permissions.
|
||||||
|
|
||||||
|
All rights granted under this License are granted for the term of
|
||||||
|
copyright on the Program, and are irrevocable provided the stated
|
||||||
|
conditions are met. This License explicitly affirms your unlimited
|
||||||
|
permission to run the unmodified Program. The output from running a
|
||||||
|
covered work is covered by this License only if the output, given its
|
||||||
|
content, constitutes a covered work. This License acknowledges your
|
||||||
|
rights of fair use or other equivalent, as provided by copyright law.
|
||||||
|
|
||||||
|
You may make, run and propagate covered works that you do not
|
||||||
|
convey, without conditions so long as your license otherwise remains
|
||||||
|
in force. You may convey covered works to others for the sole purpose
|
||||||
|
of having them make modifications exclusively for you, or provide you
|
||||||
|
with facilities for running those works, provided that you comply with
|
||||||
|
the terms of this License in conveying all material for which you do
|
||||||
|
not control copyright. Those thus making or running the covered works
|
||||||
|
for you must do so exclusively on your behalf, under your direction
|
||||||
|
and control, on terms that prohibit them from making any copies of
|
||||||
|
your copyrighted material outside their relationship with you.
|
||||||
|
|
||||||
|
Conveying under any other circumstances is permitted solely under
|
||||||
|
the conditions stated below. Sublicensing is not allowed; section 10
|
||||||
|
makes it unnecessary.
|
||||||
|
|
||||||
|
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||||
|
|
||||||
|
No covered work shall be deemed part of an effective technological
|
||||||
|
measure under any applicable law fulfilling obligations under article
|
||||||
|
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||||
|
similar laws prohibiting or restricting circumvention of such
|
||||||
|
measures.
|
||||||
|
|
||||||
|
When you convey a covered work, you waive any legal power to forbid
|
||||||
|
circumvention of technological measures to the extent such circumvention
|
||||||
|
is effected by exercising rights under this License with respect to
|
||||||
|
the covered work, and you disclaim any intention to limit operation or
|
||||||
|
modification of the work as a means of enforcing, against the work's
|
||||||
|
users, your or third parties' legal rights to forbid circumvention of
|
||||||
|
technological measures.
|
||||||
|
|
||||||
|
4. Conveying Verbatim Copies.
|
||||||
|
|
||||||
|
You may convey verbatim copies of the Program's source code as you
|
||||||
|
receive it, in any medium, provided that you conspicuously and
|
||||||
|
appropriately publish on each copy an appropriate copyright notice;
|
||||||
|
keep intact all notices stating that this License and any
|
||||||
|
non-permissive terms added in accord with section 7 apply to the code;
|
||||||
|
keep intact all notices of the absence of any warranty; and give all
|
||||||
|
recipients a copy of this License along with the Program.
|
||||||
|
|
||||||
|
You may charge any price or no price for each copy that you convey,
|
||||||
|
and you may offer support or warranty protection for a fee.
|
||||||
|
|
||||||
|
5. Conveying Modified Source Versions.
|
||||||
|
|
||||||
|
You may convey a work based on the Program, or the modifications to
|
||||||
|
produce it from the Program, in the form of source code under the
|
||||||
|
terms of section 4, provided that you also meet all of these conditions:
|
||||||
|
|
||||||
|
a) The work must carry prominent notices stating that you modified
|
||||||
|
it, and giving a relevant date.
|
||||||
|
|
||||||
|
b) The work must carry prominent notices stating that it is
|
||||||
|
released under this License and any conditions added under section
|
||||||
|
7. This requirement modifies the requirement in section 4 to
|
||||||
|
"keep intact all notices".
|
||||||
|
|
||||||
|
c) You must license the entire work, as a whole, under this
|
||||||
|
License to anyone who comes into possession of a copy. This
|
||||||
|
License will therefore apply, along with any applicable section 7
|
||||||
|
additional terms, to the whole of the work, and all its parts,
|
||||||
|
regardless of how they are packaged. This License gives no
|
||||||
|
permission to license the work in any other way, but it does not
|
||||||
|
invalidate such permission if you have separately received it.
|
||||||
|
|
||||||
|
d) If the work has interactive user interfaces, each must display
|
||||||
|
Appropriate Legal Notices; however, if the Program has interactive
|
||||||
|
interfaces that do not display Appropriate Legal Notices, your
|
||||||
|
work need not make them do so.
|
||||||
|
|
||||||
|
A compilation of a covered work with other separate and independent
|
||||||
|
works, which are not by their nature extensions of the covered work,
|
||||||
|
and which are not combined with it such as to form a larger program,
|
||||||
|
in or on a volume of a storage or distribution medium, is called an
|
||||||
|
"aggregate" if the compilation and its resulting copyright are not
|
||||||
|
used to limit the access or legal rights of the compilation's users
|
||||||
|
beyond what the individual works permit. Inclusion of a covered work
|
||||||
|
in an aggregate does not cause this License to apply to the other
|
||||||
|
parts of the aggregate.
|
||||||
|
|
||||||
|
6. Conveying Non-Source Forms.
|
||||||
|
|
||||||
|
You may convey a covered work in object code form under the terms
|
||||||
|
of sections 4 and 5, provided that you also convey the
|
||||||
|
machine-readable Corresponding Source under the terms of this License,
|
||||||
|
in one of these ways:
|
||||||
|
|
||||||
|
a) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by the
|
||||||
|
Corresponding Source fixed on a durable physical medium
|
||||||
|
customarily used for software interchange.
|
||||||
|
|
||||||
|
b) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by a
|
||||||
|
written offer, valid for at least three years and valid for as
|
||||||
|
long as you offer spare parts or customer support for that product
|
||||||
|
model, to give anyone who possesses the object code either (1) a
|
||||||
|
copy of the Corresponding Source for all the software in the
|
||||||
|
product that is covered by this License, on a durable physical
|
||||||
|
medium customarily used for software interchange, for a price no
|
||||||
|
more than your reasonable cost of physically performing this
|
||||||
|
conveying of source, or (2) access to copy the
|
||||||
|
Corresponding Source from a network server at no charge.
|
||||||
|
|
||||||
|
c) Convey individual copies of the object code with a copy of the
|
||||||
|
written offer to provide the Corresponding Source. This
|
||||||
|
alternative is allowed only occasionally and noncommercially, and
|
||||||
|
only if you received the object code with such an offer, in accord
|
||||||
|
with subsection 6b.
|
||||||
|
|
||||||
|
d) Convey the object code by offering access from a designated
|
||||||
|
place (gratis or for a charge), and offer equivalent access to the
|
||||||
|
Corresponding Source in the same way through the same place at no
|
||||||
|
further charge. You need not require recipients to copy the
|
||||||
|
Corresponding Source along with the object code. If the place to
|
||||||
|
copy the object code is a network server, the Corresponding Source
|
||||||
|
may be on a different server (operated by you or a third party)
|
||||||
|
that supports equivalent copying facilities, provided you maintain
|
||||||
|
clear directions next to the object code saying where to find the
|
||||||
|
Corresponding Source. Regardless of what server hosts the
|
||||||
|
Corresponding Source, you remain obligated to ensure that it is
|
||||||
|
available for as long as needed to satisfy these requirements.
|
||||||
|
|
||||||
|
e) Convey the object code using peer-to-peer transmission, provided
|
||||||
|
you inform other peers where the object code and Corresponding
|
||||||
|
Source of the work are being offered to the general public at no
|
||||||
|
charge under subsection 6d.
|
||||||
|
|
||||||
|
A separable portion of the object code, whose source code is excluded
|
||||||
|
from the Corresponding Source as a System Library, need not be
|
||||||
|
included in conveying the object code work.
|
||||||
|
|
||||||
|
A "User Product" is either (1) a "consumer product", which means any
|
||||||
|
tangible personal property which is normally used for personal, family,
|
||||||
|
or household purposes, or (2) anything designed or sold for incorporation
|
||||||
|
into a dwelling. In determining whether a product is a consumer product,
|
||||||
|
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||||
|
product received by a particular user, "normally used" refers to a
|
||||||
|
typical or common use of that class of product, regardless of the status
|
||||||
|
of the particular user or of the way in which the particular user
|
||||||
|
actually uses, or expects or is expected to use, the product. A product
|
||||||
|
is a consumer product regardless of whether the product has substantial
|
||||||
|
commercial, industrial or non-consumer uses, unless such uses represent
|
||||||
|
the only significant mode of use of the product.
|
||||||
|
|
||||||
|
"Installation Information" for a User Product means any methods,
|
||||||
|
procedures, authorization keys, or other information required to install
|
||||||
|
and execute modified versions of a covered work in that User Product from
|
||||||
|
a modified version of its Corresponding Source. The information must
|
||||||
|
suffice to ensure that the continued functioning of the modified object
|
||||||
|
code is in no case prevented or interfered with solely because
|
||||||
|
modification has been made.
|
||||||
|
|
||||||
|
If you convey an object code work under this section in, or with, or
|
||||||
|
specifically for use in, a User Product, and the conveying occurs as
|
||||||
|
part of a transaction in which the right of possession and use of the
|
||||||
|
User Product is transferred to the recipient in perpetuity or for a
|
||||||
|
fixed term (regardless of how the transaction is characterized), the
|
||||||
|
Corresponding Source conveyed under this section must be accompanied
|
||||||
|
by the Installation Information. But this requirement does not apply
|
||||||
|
if neither you nor any third party retains the ability to install
|
||||||
|
modified object code on the User Product (for example, the work has
|
||||||
|
been installed in ROM).
|
||||||
|
|
||||||
|
The requirement to provide Installation Information does not include a
|
||||||
|
requirement to continue to provide support service, warranty, or updates
|
||||||
|
for a work that has been modified or installed by the recipient, or for
|
||||||
|
the User Product in which it has been modified or installed. Access to a
|
||||||
|
network may be denied when the modification itself materially and
|
||||||
|
adversely affects the operation of the network or violates the rules and
|
||||||
|
protocols for communication across the network.
|
||||||
|
|
||||||
|
Corresponding Source conveyed, and Installation Information provided,
|
||||||
|
in accord with this section must be in a format that is publicly
|
||||||
|
documented (and with an implementation available to the public in
|
||||||
|
source code form), and must require no special password or key for
|
||||||
|
unpacking, reading or copying.
|
||||||
|
|
||||||
|
7. Additional Terms.
|
||||||
|
|
||||||
|
"Additional permissions" are terms that supplement the terms of this
|
||||||
|
License by making exceptions from one or more of its conditions.
|
||||||
|
Additional permissions that are applicable to the entire Program shall
|
||||||
|
be treated as though they were included in this License, to the extent
|
||||||
|
that they are valid under applicable law. If additional permissions
|
||||||
|
apply only to part of the Program, that part may be used separately
|
||||||
|
under those permissions, but the entire Program remains governed by
|
||||||
|
this License without regard to the additional permissions.
|
||||||
|
|
||||||
|
When you convey a copy of a covered work, you may at your option
|
||||||
|
remove any additional permissions from that copy, or from any part of
|
||||||
|
it. (Additional permissions may be written to require their own
|
||||||
|
removal in certain cases when you modify the work.) You may place
|
||||||
|
additional permissions on material, added by you to a covered work,
|
||||||
|
for which you have or can give appropriate copyright permission.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, for material you
|
||||||
|
add to a covered work, you may (if authorized by the copyright holders of
|
||||||
|
that material) supplement the terms of this License with terms:
|
||||||
|
|
||||||
|
a) Disclaiming warranty or limiting liability differently from the
|
||||||
|
terms of sections 15 and 16 of this License; or
|
||||||
|
|
||||||
|
b) Requiring preservation of specified reasonable legal notices or
|
||||||
|
author attributions in that material or in the Appropriate Legal
|
||||||
|
Notices displayed by works containing it; or
|
||||||
|
|
||||||
|
c) Prohibiting misrepresentation of the origin of that material, or
|
||||||
|
requiring that modified versions of such material be marked in
|
||||||
|
reasonable ways as different from the original version; or
|
||||||
|
|
||||||
|
d) Limiting the use for publicity purposes of names of licensors or
|
||||||
|
authors of the material; or
|
||||||
|
|
||||||
|
e) Declining to grant rights under trademark law for use of some
|
||||||
|
trade names, trademarks, or service marks; or
|
||||||
|
|
||||||
|
f) Requiring indemnification of licensors and authors of that
|
||||||
|
material by anyone who conveys the material (or modified versions of
|
||||||
|
it) with contractual assumptions of liability to the recipient, for
|
||||||
|
any liability that these contractual assumptions directly impose on
|
||||||
|
those licensors and authors.
|
||||||
|
|
||||||
|
All other non-permissive additional terms are considered "further
|
||||||
|
restrictions" within the meaning of section 10. If the Program as you
|
||||||
|
received it, or any part of it, contains a notice stating that it is
|
||||||
|
governed by this License along with a term that is a further
|
||||||
|
restriction, you may remove that term. If a license document contains
|
||||||
|
a further restriction but permits relicensing or conveying under this
|
||||||
|
License, you may add to a covered work material governed by the terms
|
||||||
|
of that license document, provided that the further restriction does
|
||||||
|
not survive such relicensing or conveying.
|
||||||
|
|
||||||
|
If you add terms to a covered work in accord with this section, you
|
||||||
|
must place, in the relevant source files, a statement of the
|
||||||
|
additional terms that apply to those files, or a notice indicating
|
||||||
|
where to find the applicable terms.
|
||||||
|
|
||||||
|
Additional terms, permissive or non-permissive, may be stated in the
|
||||||
|
form of a separately written license, or stated as exceptions;
|
||||||
|
the above requirements apply either way.
|
||||||
|
|
||||||
|
8. Termination.
|
||||||
|
|
||||||
|
You may not propagate or modify a covered work except as expressly
|
||||||
|
provided under this License. Any attempt otherwise to propagate or
|
||||||
|
modify it is void, and will automatically terminate your rights under
|
||||||
|
this License (including any patent licenses granted under the third
|
||||||
|
paragraph of section 11).
|
||||||
|
|
||||||
|
However, if you cease all violation of this License, then your
|
||||||
|
license from a particular copyright holder is reinstated (a)
|
||||||
|
provisionally, unless and until the copyright holder explicitly and
|
||||||
|
finally terminates your license, and (b) permanently, if the copyright
|
||||||
|
holder fails to notify you of the violation by some reasonable means
|
||||||
|
prior to 60 days after the cessation.
|
||||||
|
|
||||||
|
Moreover, your license from a particular copyright holder is
|
||||||
|
reinstated permanently if the copyright holder notifies you of the
|
||||||
|
violation by some reasonable means, this is the first time you have
|
||||||
|
received notice of violation of this License (for any work) from that
|
||||||
|
copyright holder, and you cure the violation prior to 30 days after
|
||||||
|
your receipt of the notice.
|
||||||
|
|
||||||
|
Termination of your rights under this section does not terminate the
|
||||||
|
licenses of parties who have received copies or rights from you under
|
||||||
|
this License. If your rights have been terminated and not permanently
|
||||||
|
reinstated, you do not qualify to receive new licenses for the same
|
||||||
|
material under section 10.
|
||||||
|
|
||||||
|
9. Acceptance Not Required for Having Copies.
|
||||||
|
|
||||||
|
You are not required to accept this License in order to receive or
|
||||||
|
run a copy of the Program. Ancillary propagation of a covered work
|
||||||
|
occurring solely as a consequence of using peer-to-peer transmission
|
||||||
|
to receive a copy likewise does not require acceptance. However,
|
||||||
|
nothing other than this License grants you permission to propagate or
|
||||||
|
modify any covered work. These actions infringe copyright if you do
|
||||||
|
not accept this License. Therefore, by modifying or propagating a
|
||||||
|
covered work, you indicate your acceptance of this License to do so.
|
||||||
|
|
||||||
|
10. Automatic Licensing of Downstream Recipients.
|
||||||
|
|
||||||
|
Each time you convey a covered work, the recipient automatically
|
||||||
|
receives a license from the original licensors, to run, modify and
|
||||||
|
propagate that work, subject to this License. You are not responsible
|
||||||
|
for enforcing compliance by third parties with this License.
|
||||||
|
|
||||||
|
An "entity transaction" is a transaction transferring control of an
|
||||||
|
organization, or substantially all assets of one, or subdividing an
|
||||||
|
organization, or merging organizations. If propagation of a covered
|
||||||
|
work results from an entity transaction, each party to that
|
||||||
|
transaction who receives a copy of the work also receives whatever
|
||||||
|
licenses to the work the party's predecessor in interest had or could
|
||||||
|
give under the previous paragraph, plus a right to possession of the
|
||||||
|
Corresponding Source of the work from the predecessor in interest, if
|
||||||
|
the predecessor has it or can get it with reasonable efforts.
|
||||||
|
|
||||||
|
You may not impose any further restrictions on the exercise of the
|
||||||
|
rights granted or affirmed under this License. For example, you may
|
||||||
|
not impose a license fee, royalty, or other charge for exercise of
|
||||||
|
rights granted under this License, and you may not initiate litigation
|
||||||
|
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||||
|
any patent claim is infringed by making, using, selling, offering for
|
||||||
|
sale, or importing the Program or any portion of it.
|
||||||
|
|
||||||
|
11. Patents.
|
||||||
|
|
||||||
|
A "contributor" is a copyright holder who authorizes use under this
|
||||||
|
License of the Program or a work on which the Program is based. The
|
||||||
|
work thus licensed is called the contributor's "contributor version".
|
||||||
|
|
||||||
|
A contributor's "essential patent claims" are all patent claims
|
||||||
|
owned or controlled by the contributor, whether already acquired or
|
||||||
|
hereafter acquired, that would be infringed by some manner, permitted
|
||||||
|
by this License, of making, using, or selling its contributor version,
|
||||||
|
but do not include claims that would be infringed only as a
|
||||||
|
consequence of further modification of the contributor version. For
|
||||||
|
purposes of this definition, "control" includes the right to grant
|
||||||
|
patent sublicenses in a manner consistent with the requirements of
|
||||||
|
this License.
|
||||||
|
|
||||||
|
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||||
|
patent license under the contributor's essential patent claims, to
|
||||||
|
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||||
|
propagate the contents of its contributor version.
|
||||||
|
|
||||||
|
In the following three paragraphs, a "patent license" is any express
|
||||||
|
agreement or commitment, however denominated, not to enforce a patent
|
||||||
|
(such as an express permission to practice a patent or covenant not to
|
||||||
|
sue for patent infringement). To "grant" such a patent license to a
|
||||||
|
party means to make such an agreement or commitment not to enforce a
|
||||||
|
patent against the party.
|
||||||
|
|
||||||
|
If you convey a covered work, knowingly relying on a patent license,
|
||||||
|
and the Corresponding Source of the work is not available for anyone
|
||||||
|
to copy, free of charge and under the terms of this License, through a
|
||||||
|
publicly available network server or other readily accessible means,
|
||||||
|
then you must either (1) cause the Corresponding Source to be so
|
||||||
|
available, or (2) arrange to deprive yourself of the benefit of the
|
||||||
|
patent license for this particular work, or (3) arrange, in a manner
|
||||||
|
consistent with the requirements of this License, to extend the patent
|
||||||
|
license to downstream recipients. "Knowingly relying" means you have
|
||||||
|
actual knowledge that, but for the patent license, your conveying the
|
||||||
|
covered work in a country, or your recipient's use of the covered work
|
||||||
|
in a country, would infringe one or more identifiable patents in that
|
||||||
|
country that you have reason to believe are valid.
|
||||||
|
|
||||||
|
If, pursuant to or in connection with a single transaction or
|
||||||
|
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||||
|
covered work, and grant a patent license to some of the parties
|
||||||
|
receiving the covered work authorizing them to use, propagate, modify
|
||||||
|
or convey a specific copy of the covered work, then the patent license
|
||||||
|
you grant is automatically extended to all recipients of the covered
|
||||||
|
work and works based on it.
|
||||||
|
|
||||||
|
A patent license is "discriminatory" if it does not include within
|
||||||
|
the scope of its coverage, prohibits the exercise of, or is
|
||||||
|
conditioned on the non-exercise of one or more of the rights that are
|
||||||
|
specifically granted under this License. You may not convey a covered
|
||||||
|
work if you are a party to an arrangement with a third party that is
|
||||||
|
in the business of distributing software, under which you make payment
|
||||||
|
to the third party based on the extent of your activity of conveying
|
||||||
|
the work, and under which the third party grants, to any of the
|
||||||
|
parties who would receive the covered work from you, a discriminatory
|
||||||
|
patent license (a) in connection with copies of the covered work
|
||||||
|
conveyed by you (or copies made from those copies), or (b) primarily
|
||||||
|
for and in connection with specific products or compilations that
|
||||||
|
contain the covered work, unless you entered into that arrangement,
|
||||||
|
or that patent license was granted, prior to 28 March 2007.
|
||||||
|
|
||||||
|
Nothing in this License shall be construed as excluding or limiting
|
||||||
|
any implied license or other defenses to infringement that may
|
||||||
|
otherwise be available to you under applicable patent law.
|
||||||
|
|
||||||
|
12. No Surrender of Others' Freedom.
|
||||||
|
|
||||||
|
If conditions are imposed on you (whether by court order, agreement or
|
||||||
|
otherwise) that contradict the conditions of this License, they do not
|
||||||
|
excuse you from the conditions of this License. If you cannot convey a
|
||||||
|
covered work so as to satisfy simultaneously your obligations under this
|
||||||
|
License and any other pertinent obligations, then as a consequence you may
|
||||||
|
not convey it at all. For example, if you agree to terms that obligate you
|
||||||
|
to collect a royalty for further conveying from those to whom you convey
|
||||||
|
the Program, the only way you could satisfy both those terms and this
|
||||||
|
License would be to refrain entirely from conveying the Program.
|
||||||
|
|
||||||
|
13. Use with the GNU Affero General Public License.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, you have
|
||||||
|
permission to link or combine any covered work with a work licensed
|
||||||
|
under version 3 of the GNU Affero General Public License into a single
|
||||||
|
combined work, and to convey the resulting work. The terms of this
|
||||||
|
License will continue to apply to the part which is the covered work,
|
||||||
|
but the special requirements of the GNU Affero General Public License,
|
||||||
|
section 13, concerning interaction through a network will apply to the
|
||||||
|
combination as such.
|
||||||
|
|
||||||
|
14. Revised Versions of this License.
|
||||||
|
|
||||||
|
The Free Software Foundation may publish revised and/or new versions of
|
||||||
|
the GNU General Public License from time to time. Such new versions will
|
||||||
|
be similar in spirit to the present version, but may differ in detail to
|
||||||
|
address new problems or concerns.
|
||||||
|
|
||||||
|
Each version is given a distinguishing version number. If the
|
||||||
|
Program specifies that a certain numbered version of the GNU General
|
||||||
|
Public License "or any later version" applies to it, you have the
|
||||||
|
option of following the terms and conditions either of that numbered
|
||||||
|
version or of any later version published by the Free Software
|
||||||
|
Foundation. If the Program does not specify a version number of the
|
||||||
|
GNU General Public License, you may choose any version ever published
|
||||||
|
by the Free Software Foundation.
|
||||||
|
|
||||||
|
If the Program specifies that a proxy can decide which future
|
||||||
|
versions of the GNU General Public License can be used, that proxy's
|
||||||
|
public statement of acceptance of a version permanently authorizes you
|
||||||
|
to choose that version for the Program.
|
||||||
|
|
||||||
|
Later license versions may give you additional or different
|
||||||
|
permissions. However, no additional obligations are imposed on any
|
||||||
|
author or copyright holder as a result of your choosing to follow a
|
||||||
|
later version.
|
||||||
|
|
||||||
|
15. Disclaimer of Warranty.
|
||||||
|
|
||||||
|
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||||
|
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||||
|
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||||
|
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||||
|
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||||
|
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||||
|
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||||
|
|
||||||
|
16. Limitation of Liability.
|
||||||
|
|
||||||
|
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||||
|
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||||
|
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||||
|
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||||
|
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||||
|
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||||
|
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||||
|
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||||
|
SUCH DAMAGES.
|
||||||
|
|
||||||
|
17. Interpretation of Sections 15 and 16.
|
||||||
|
|
||||||
|
If the disclaimer of warranty and limitation of liability provided
|
||||||
|
above cannot be given local legal effect according to their terms,
|
||||||
|
reviewing courts shall apply local law that most closely approximates
|
||||||
|
an absolute waiver of all civil liability in connection with the
|
||||||
|
Program, unless a warranty or assumption of liability accompanies a
|
||||||
|
copy of the Program in return for a fee.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
How to Apply These Terms to Your New Programs
|
||||||
|
|
||||||
|
If you develop a new program, and you want it to be of the greatest
|
||||||
|
possible use to the public, the best way to achieve this is to make it
|
||||||
|
free software which everyone can redistribute and change under these terms.
|
||||||
|
|
||||||
|
To do so, attach the following notices to the program. It is safest
|
||||||
|
to attach them to the start of each source file to most effectively
|
||||||
|
state the exclusion of warranty; and each file should have at least
|
||||||
|
the "copyright" line and a pointer to where the full notice is found.
|
||||||
|
|
||||||
|
<one line to give the program's name and a brief idea of what it does.>
|
||||||
|
Copyright (C) <year> <name of author>
|
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation, either version 3 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
Also add information on how to contact you by electronic and paper mail.
|
||||||
|
|
||||||
|
If the program does terminal interaction, make it output a short
|
||||||
|
notice like this when it starts in an interactive mode:
|
||||||
|
|
||||||
|
<program> Copyright (C) <year> <name of author>
|
||||||
|
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||||
|
This is free software, and you are welcome to redistribute it
|
||||||
|
under certain conditions; type `show c' for details.
|
||||||
|
|
||||||
|
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||||
|
parts of the General Public License. Of course, your program's commands
|
||||||
|
might be different; for a GUI interface, you would use an "about box".
|
||||||
|
|
||||||
|
You should also get your employer (if you work as a programmer) or school,
|
||||||
|
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||||
|
For more information on this, and how to apply and follow the GNU GPL, see
|
||||||
|
<https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
The GNU General Public License does not permit incorporating your program
|
||||||
|
into proprietary programs. If your program is a subroutine library, you
|
||||||
|
may consider it more useful to permit linking proprietary applications with
|
||||||
|
the library. If this is what you want to do, use the GNU Lesser General
|
||||||
|
Public License instead of this License. But first, please read
|
||||||
|
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
||||||
6564
ansible_collections/community/docker/FILES.json
Normal file
6564
ansible_collections/community/docker/FILES.json
Normal file
File diff suppressed because it is too large
Load diff
191
ansible_collections/community/docker/LICENSES/Apache-2.0.txt
Normal file
191
ansible_collections/community/docker/LICENSES/Apache-2.0.txt
Normal file
|
|
@ -0,0 +1,191 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
https://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
../COPYING
|
||||||
35
ansible_collections/community/docker/MANIFEST.json
Normal file
35
ansible_collections/community/docker/MANIFEST.json
Normal file
|
|
@ -0,0 +1,35 @@
|
||||||
|
{
|
||||||
|
"collection_info": {
|
||||||
|
"namespace": "community",
|
||||||
|
"name": "docker",
|
||||||
|
"version": "5.0.5",
|
||||||
|
"authors": [
|
||||||
|
"Ansible Docker Working Group"
|
||||||
|
],
|
||||||
|
"readme": "README.md",
|
||||||
|
"tags": [
|
||||||
|
"docker"
|
||||||
|
],
|
||||||
|
"description": "Modules and plugins for working with Docker",
|
||||||
|
"license": [
|
||||||
|
"GPL-3.0-or-later",
|
||||||
|
"Apache-2.0"
|
||||||
|
],
|
||||||
|
"license_file": null,
|
||||||
|
"dependencies": {
|
||||||
|
"community.library_inventory_filtering_v1": ">=1.0.0"
|
||||||
|
},
|
||||||
|
"repository": "https://github.com/ansible-collections/community.docker",
|
||||||
|
"documentation": "https://docs.ansible.com/ansible/latest/collections/community/docker/",
|
||||||
|
"homepage": "https://github.com/ansible-collections/community.docker",
|
||||||
|
"issues": "https://github.com/ansible-collections/community.docker/issues"
|
||||||
|
},
|
||||||
|
"file_manifest_file": {
|
||||||
|
"name": "FILES.json",
|
||||||
|
"ftype": "file",
|
||||||
|
"chksum_type": "sha256",
|
||||||
|
"chksum_sha256": "1aa5979880106388e42eb161e2d88931ab8a128b982afd506536bed31a71c176",
|
||||||
|
"format": 1
|
||||||
|
},
|
||||||
|
"format": 1
|
||||||
|
}
|
||||||
164
ansible_collections/community/docker/README.md
Normal file
164
ansible_collections/community/docker/README.md
Normal file
|
|
@ -0,0 +1,164 @@
|
||||||
|
<!--
|
||||||
|
Copyright (c) Ansible Project
|
||||||
|
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Docker Community Collection
|
||||||
|
|
||||||
|
[](https://docs.ansible.com/ansible/devel/collections/community/docker/)
|
||||||
|
[](https://dev.azure.com/ansible/community.docker/_build?definitionId=25)
|
||||||
|
[](https://github.com/ansible-collections/community.docker/actions)
|
||||||
|
[](https://codecov.io/gh/ansible-collections/community.docker)
|
||||||
|
[](https://api.reuse.software/info/github.com/ansible-collections/community.docker)
|
||||||
|
|
||||||
|
This repo contains the `community.docker` Ansible Collection. The collection includes many modules and plugins to work with Docker.
|
||||||
|
|
||||||
|
Please note that this collection does **not** support Windows targets. The connection plugins included in this collection support Windows targets on a best-effort basis, but we are not testing this in CI.
|
||||||
|
|
||||||
|
## Code of Conduct
|
||||||
|
|
||||||
|
We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) in all our interactions within this project.
|
||||||
|
|
||||||
|
If you encounter abusive behavior violating the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html), please refer to the [policy violations](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html#policy-violations) section of the Code of Conduct for information on how to raise a complaint.
|
||||||
|
|
||||||
|
## Communication
|
||||||
|
|
||||||
|
* Join the Ansible forum:
|
||||||
|
* [Get Help](https://forum.ansible.com/c/help/6): get help or help others. Please add appropriate tags if you start new discussions, for example the `docker`, `docker-compose`, or `docker-swarm` tags.
|
||||||
|
* [Posts tagged with 'docker'](https://forum.ansible.com/tag/docker): subscribe to participate in Docker related conversations.
|
||||||
|
* [Posts tagged with 'docker-compose'](https://forum.ansible.com/tag/docker-compose): subscribe to participate in Docker Compose related conversations.
|
||||||
|
* [Posts tagged with 'docker-swarm'](https://forum.ansible.com/tag/docker-swarm): subscribe to participate in Docker Swarm related conversations.
|
||||||
|
* [Social Spaces](https://forum.ansible.com/c/chat/4): gather and interact with fellow enthusiasts.
|
||||||
|
* [News & Announcements](https://forum.ansible.com/c/news/5): track project-wide announcements including social events.
|
||||||
|
|
||||||
|
* The Ansible [Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn): used to announce releases and important changes.
|
||||||
|
|
||||||
|
For more information about communication, see the [Ansible communication guide](https://docs.ansible.com/ansible/devel/community/communication.html).
|
||||||
|
|
||||||
|
## Tested with Ansible
|
||||||
|
|
||||||
|
Tested with the current ansible-core 2.17, ansible-core 2.18, and ansible-core 2.19 releases, and the current development version of ansible-core. Ansible/ansible-base versions before 2.17.0 are not supported.
|
||||||
|
|
||||||
|
## External requirements
|
||||||
|
|
||||||
|
Some modules and plugins require Docker CLI, or other external, programs. Some require the [Docker SDK for Python](https://pypi.org/project/docker/) and some use [requests](https://pypi.org/project/requests/) to directly communicate with the Docker daemon API. All modules and plugins require Python 2.7 or later. Python 2.6 is no longer supported; use community.docker 2.x.y if you need to use Python 2.6.
|
||||||
|
|
||||||
|
Installing the Docker SDK for Python also installs the requirements for the modules and plugins that use `requests`. If you want to directly install the Python libraries instead of the SDK, you need the following ones:
|
||||||
|
|
||||||
|
- [requests](https://pypi.org/project/requests/);
|
||||||
|
- [pywin32](https://pypi.org/project/pywin32/) when using named pipes on Windows with the Windows 32 API;
|
||||||
|
- [paramiko](https://pypi.org/project/paramiko/) when using SSH to connect to the Docker daemon with `use_ssh_client=false`;
|
||||||
|
- [pyOpenSSL](https://pypi.org/project/pyOpenSSL/) when using TLS to connect to the Docker daemon;
|
||||||
|
- [backports.ssl_match_hostname](https://pypi.org/project/backports.ssl_match_hostname/) when using TLS to connect to the Docker daemon on Python 2.
|
||||||
|
|
||||||
|
If you have Docker SDK for Python < 2.0.0 installed ([docker-py](https://pypi.org/project/docker-py/)), you can still use it for modules that support it, though we recommend to uninstall it and then install [docker](https://pypi.org/project/docker/), the Docker SDK for Python >= 2.0.0. Note that both libraries cannot be installed at the same time. If you accidentally did install them simultaneously, you have to uninstall *both* before re-installing one of them.
|
||||||
|
|
||||||
|
## Collection Documentation
|
||||||
|
|
||||||
|
Browsing the [**latest** collection documentation](https://docs.ansible.com/ansible/latest/collections/community/docker) will show docs for the _latest version released in the Ansible package_, not the latest version of the collection released on Galaxy.
|
||||||
|
|
||||||
|
Browsing the [**devel** collection documentation](https://docs.ansible.com/ansible/devel/collections/community/docker) shows docs for the _latest version released on Galaxy_.
|
||||||
|
|
||||||
|
We also separately publish [**latest commit** collection documentation](https://ansible-collections.github.io/community.docker/branch/main/) which shows docs for the _latest commit in the `main` branch_.
|
||||||
|
|
||||||
|
If you use the Ansible package and do not update collections independently, use **latest**. If you install or update this collection directly from Galaxy, use **devel**. If you are looking to contribute, use **latest commit**.
|
||||||
|
|
||||||
|
## Included content
|
||||||
|
|
||||||
|
* Connection plugins:
|
||||||
|
- community.docker.docker: use Docker containers as remotes using the Docker CLI program
|
||||||
|
- community.docker.docker_api: use Docker containers as remotes using the Docker API
|
||||||
|
- community.docker.nsenter: execute commands on the host running the controller container
|
||||||
|
* Inventory plugins:
|
||||||
|
- community.docker.docker_containers: dynamic inventory plugin for Docker containers
|
||||||
|
- community.docker.docker_machine: collect Docker machines as inventory
|
||||||
|
- community.docker.docker_swarm: collect Docker Swarm nodes as inventory
|
||||||
|
* Modules:
|
||||||
|
* Docker:
|
||||||
|
- community.docker.docker_container: manage Docker containers
|
||||||
|
- community.docker.docker_container_copy_into: copy a file into a Docker container
|
||||||
|
- community.docker.docker_container_exec: run commands in Docker containers
|
||||||
|
- community.docker.docker_container_info: retrieve information on Docker containers
|
||||||
|
- community.docker.docker_host_info: retrieve information on the Docker daemon
|
||||||
|
- community.docker.docker_image: manage Docker images
|
||||||
|
- community.docker.docker_image_build: build Docker images using Docker buildx
|
||||||
|
- community.docker.docker_image_export: export (archive) Docker images
|
||||||
|
- community.docker.docker_image_info: retrieve information on Docker images
|
||||||
|
- community.docker.docker_image_load: load Docker images from archives
|
||||||
|
- community.docker.docker_image_pull: pull Docker images from registries
|
||||||
|
- community.docker.docker_image_push: push Docker images to registries
|
||||||
|
- community.docker.docker_image_remove: remove Docker images
|
||||||
|
- community.docker.docker_image_tag: tag Docker images with new names and/or tags
|
||||||
|
- community.docker.docker_login: log in and out to/from registries
|
||||||
|
- community.docker.docker_network: manage Docker networks
|
||||||
|
- community.docker.docker_network_info: retrieve information on Docker networks
|
||||||
|
- community.docker.docker_plugin: manage Docker plugins
|
||||||
|
- community.docker.docker_prune: prune Docker containers, images, networks, volumes, and build data
|
||||||
|
- community.docker.docker_volume: manage Docker volumes
|
||||||
|
- community.docker.docker_volume_info: retrieve information on Docker volumes
|
||||||
|
* Docker Compose:
|
||||||
|
- community.docker.docker_compose_v2: manage Docker Compose files (Docker compose CLI plugin)
|
||||||
|
- community.docker.docker_compose_v2_exec: run command in a container of a Compose service
|
||||||
|
- community.docker.docker_compose_v2_pull: pull a Docker compose project
|
||||||
|
- community.docker.docker_compose_v2_run: run command in a new container of a Compose service
|
||||||
|
* Docker Swarm:
|
||||||
|
- community.docker.docker_config: manage configurations
|
||||||
|
- community.docker.docker_node: manage Docker Swarm nodes
|
||||||
|
- community.docker.docker_node_info: retrieve information on Docker Swarm nodes
|
||||||
|
- community.docker.docker_secret: manage secrets
|
||||||
|
- community.docker.docker_swarm: manage Docker Swarm
|
||||||
|
- community.docker.docker_swarm_info: retrieve information on Docker Swarm
|
||||||
|
- community.docker.docker_swarm_service: manage Docker Swarm services
|
||||||
|
- community.docker.docker_swarm_service_info: retrieve information on Docker Swarm services
|
||||||
|
* Docker Stack:
|
||||||
|
- community.docker.docker_stack: manage Docker Stacks
|
||||||
|
- community.docker.docker_stack_info: retrieve information on Docker Stacks
|
||||||
|
- community.docker.docker_stack_task_info: retrieve information on tasks in Docker Stacks
|
||||||
|
* Other:
|
||||||
|
- current_container_facts: return facts about whether the module runs in a Docker container
|
||||||
|
|
||||||
|
## Using this collection
|
||||||
|
|
||||||
|
Before using the Docker community collection, you need to install the collection with the `ansible-galaxy` CLI:
|
||||||
|
|
||||||
|
ansible-galaxy collection install community.docker
|
||||||
|
|
||||||
|
You can also include it in a `requirements.yml` file and install it via `ansible-galaxy collection install -r requirements.yml` using the format:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
collections:
|
||||||
|
- name: community.docker
|
||||||
|
```
|
||||||
|
|
||||||
|
See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details.
|
||||||
|
|
||||||
|
## Contributing to this collection
|
||||||
|
|
||||||
|
If you want to develop new content for this collection or improve what is already here, the easiest way to work on the collection is to clone it into one of the configured [`COLLECTIONS_PATH`](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#collections-paths), and work on it there.
|
||||||
|
|
||||||
|
You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html).
|
||||||
|
|
||||||
|
## Release notes
|
||||||
|
|
||||||
|
See the [changelog](https://github.com/ansible-collections/community.docker/tree/main/CHANGELOG.md).
|
||||||
|
|
||||||
|
## More information
|
||||||
|
|
||||||
|
- [Ansible Collection overview](https://github.com/ansible-collections/overview)
|
||||||
|
- [Ansible User guide](https://docs.ansible.com/ansible/latest/user_guide/index.html)
|
||||||
|
- [Ansible Developer guide](https://docs.ansible.com/ansible/latest/dev_guide/index.html)
|
||||||
|
- [Ansible Collections Checklist](https://github.com/ansible-collections/overview/blob/master/collection_requirements.rst)
|
||||||
|
- [Ansible Community code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html)
|
||||||
|
- [The Bullhorn (the Ansible Contributor newsletter)](https://us19.campaign-archive.com/home/?u=56d874e027110e35dea0e03c1&id=d6635f5420)
|
||||||
|
- [Changes impacting Contributors](https://github.com/ansible-collections/overview/issues/45)
|
||||||
|
|
||||||
|
## Licensing
|
||||||
|
|
||||||
|
This collection is primarily licensed and distributed as a whole under the GNU General Public License v3.0 or later.
|
||||||
|
|
||||||
|
See [LICENSES/GPL-3.0-or-later.txt](https://github.com/ansible-collections/community.docker/blob/main/COPYING) for the full text.
|
||||||
|
|
||||||
|
Parts of the collection are licensed under the [Apache 2.0 license](https://github.com/ansible-collections/community.docker/blob/main/LICENSES/Apache-2.0.txt). This mostly applies to files vendored from the [Docker SDK for Python](https://github.com/docker/docker-py/).
|
||||||
|
|
||||||
|
All files have a machine readable `SDPX-License-Identifier:` comment denoting its respective license(s) or an equivalent entry in an accompanying `.license` file. Only changelog fragments (which will not be part of a release) are covered by a blanket statement in `REUSE.toml`. This conforms to the [REUSE specification](https://reuse.software/spec/).
|
||||||
11
ansible_collections/community/docker/REUSE.toml
Normal file
11
ansible_collections/community/docker/REUSE.toml
Normal file
|
|
@ -0,0 +1,11 @@
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
version = 1
|
||||||
|
|
||||||
|
[[annotations]]
|
||||||
|
path = "changelogs/fragments/**"
|
||||||
|
precedence = "aggregate"
|
||||||
|
SPDX-FileCopyrightText = "Ansible Project"
|
||||||
|
SPDX-License-Identifier = "GPL-3.0-or-later"
|
||||||
266
ansible_collections/community/docker/antsibull-nox.toml
Normal file
266
ansible_collections/community/docker/antsibull-nox.toml
Normal file
|
|
@ -0,0 +1,266 @@
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
# SPDX-FileCopyrightText: 2025 Felix Fontein <felix@fontein.de>
|
||||||
|
|
||||||
|
[collection_sources]
|
||||||
|
"ansible.posix" = "git+https://github.com/ansible-collections/ansible.posix.git,main"
|
||||||
|
"community.general" = "git+https://github.com/ansible-collections/community.general.git,main"
|
||||||
|
"community.internal_test_tools" = "git+https://github.com/ansible-collections/community.internal_test_tools.git,main"
|
||||||
|
"community.library_inventory_filtering_v1" = "git+https://github.com/ansible-collections/community.library_inventory_filtering.git,stable-1"
|
||||||
|
|
||||||
|
[vcs]
|
||||||
|
vcs = "git"
|
||||||
|
development_branch = "main"
|
||||||
|
stable_branches = [ "stable-*" ]
|
||||||
|
|
||||||
|
[sessions]
|
||||||
|
|
||||||
|
[sessions.lint]
|
||||||
|
run_isort = false
|
||||||
|
run_black = true
|
||||||
|
run_ruff_autofix = true
|
||||||
|
ruff_autofix_config = "ruff.toml"
|
||||||
|
ruff_autofix_select = [
|
||||||
|
"I",
|
||||||
|
"RUF022",
|
||||||
|
]
|
||||||
|
run_ruff_check = true
|
||||||
|
ruff_check_config = "ruff.toml"
|
||||||
|
run_flake8 = true
|
||||||
|
flake8_config = ".flake8"
|
||||||
|
run_pylint = true
|
||||||
|
pylint_rcfile = ".pylintrc"
|
||||||
|
run_yamllint = true
|
||||||
|
yamllint_config = ".yamllint"
|
||||||
|
yamllint_config_plugins = ".yamllint-docs"
|
||||||
|
yamllint_config_plugins_examples = ".yamllint-examples"
|
||||||
|
run_mypy = true
|
||||||
|
mypy_ansible_core_package = "ansible-core>=2.19.0"
|
||||||
|
mypy_config = ".mypy.ini"
|
||||||
|
mypy_extra_deps = [
|
||||||
|
"docker",
|
||||||
|
"paramiko",
|
||||||
|
"urllib3",
|
||||||
|
"requests",
|
||||||
|
"types-mock",
|
||||||
|
"types-paramiko",
|
||||||
|
"types-pywin32",
|
||||||
|
"types-PyYAML",
|
||||||
|
"types-requests",
|
||||||
|
]
|
||||||
|
|
||||||
|
[sessions.docs_check]
|
||||||
|
validate_collection_refs="all"
|
||||||
|
codeblocks_restrict_types = [
|
||||||
|
"ansible-output",
|
||||||
|
"console",
|
||||||
|
"yaml",
|
||||||
|
"yaml+jinja",
|
||||||
|
]
|
||||||
|
codeblocks_restrict_type_exact_case = true
|
||||||
|
codeblocks_allow_without_type = false
|
||||||
|
codeblocks_allow_literal_blocks = false
|
||||||
|
|
||||||
|
[sessions.license_check]
|
||||||
|
|
||||||
|
[sessions.extra_checks]
|
||||||
|
run_no_unwanted_files = true
|
||||||
|
no_unwanted_files_module_extensions = [".py"]
|
||||||
|
no_unwanted_files_yaml_extensions = [".yml"]
|
||||||
|
run_action_groups = true
|
||||||
|
run_no_trailing_whitespace = true
|
||||||
|
run_avoid_characters = true
|
||||||
|
|
||||||
|
[[sessions.extra_checks.action_groups_config]]
|
||||||
|
name = "docker"
|
||||||
|
pattern = "^.*$"
|
||||||
|
exclusions = [
|
||||||
|
"current_container_facts",
|
||||||
|
"docker_context_info",
|
||||||
|
]
|
||||||
|
doc_fragment = "community.docker._attributes.actiongroup_docker"
|
||||||
|
|
||||||
|
[[sessions.extra_checks.avoid_character_group]]
|
||||||
|
name = "tab"
|
||||||
|
regex = "\\x09"
|
||||||
|
skip_directories = [
|
||||||
|
"tests/images/",
|
||||||
|
]
|
||||||
|
|
||||||
|
[sessions.build_import_check]
|
||||||
|
run_galaxy_importer = true
|
||||||
|
|
||||||
|
[sessions.ansible_test_sanity]
|
||||||
|
include_devel = true
|
||||||
|
|
||||||
|
[sessions.ansible_test_units]
|
||||||
|
include_devel = true
|
||||||
|
|
||||||
|
[sessions.ansible_test_integration]
|
||||||
|
session_name_template = "ansible-test-integration-{ansible_core}{dash_docker_short}{dash_remote}{dash_python_version}{dash_target_dashized}"
|
||||||
|
display_name_template = "main+Ⓐ{ansible_core}{plus_docker_short}{plus_remote}{plus_py_python_version}{plus_target}{plus_force_docker_sdk_for_python_dev}{plus_force_docker_sdk_for_python_pypi}"
|
||||||
|
description_template = "Run main integration tests with ansible-core {ansible_core}{comma_docker_short}{comma_remote}{comma_py_python_version}{comma_target}{comma_force_docker_sdk_for_python_dev}{comma_force_docker_sdk_for_python_pypi}"
|
||||||
|
|
||||||
|
[sessions.ansible_test_integration.ansible_vars]
|
||||||
|
force_docker_sdk_for_python_dev = { type = "value", value = false, template_value = "" }
|
||||||
|
force_docker_sdk_for_python_pypi = { type = "value", value = false, template_value = "" }
|
||||||
|
|
||||||
|
##################################################################################################
|
||||||
|
|
||||||
|
# Ansible-core 2.17:
|
||||||
|
|
||||||
|
[[sessions.ansible_test_integration.groups]]
|
||||||
|
session_name = "ansible-test-integration-2.17"
|
||||||
|
description = "Meta session for running all ansible-test-integration-2.17-* sessions."
|
||||||
|
|
||||||
|
[[sessions.ansible_test_integration.groups.sessions]]
|
||||||
|
ansible_core = "2.17"
|
||||||
|
target = [ "azp/4/", "azp/5/" ]
|
||||||
|
docker = [ "fedora39", "ubuntu2004", "alpine319" ]
|
||||||
|
|
||||||
|
[[sessions.ansible_test_integration.groups.sessions]]
|
||||||
|
ansible_core = "2.17"
|
||||||
|
target = [ "azp/1/", "azp/2/", "azp/3/", "azp/4/", "azp/5/" ]
|
||||||
|
remote = [ "rhel/9.3" ]
|
||||||
|
|
||||||
|
# Ansible-core 2.18:
|
||||||
|
|
||||||
|
[[sessions.ansible_test_integration.groups]]
|
||||||
|
session_name = "ansible-test-integration-2.18"
|
||||||
|
description = "Meta session for running all ansible-test-integration-2.18-* sessions."
|
||||||
|
|
||||||
|
[[sessions.ansible_test_integration.groups.sessions]]
|
||||||
|
ansible_core = "2.18"
|
||||||
|
target = [ "azp/4/", "azp/5/" ]
|
||||||
|
docker = [ "fedora40", "ubuntu2204", "alpine320" ]
|
||||||
|
|
||||||
|
[[sessions.ansible_test_integration.groups.sessions]]
|
||||||
|
ansible_core = "2.18"
|
||||||
|
target = [ "azp/1/", "azp/2/", "azp/3/", "azp/4/", "azp/5/" ]
|
||||||
|
remote = [ "rhel/9.4" ]
|
||||||
|
|
||||||
|
# Ansible-core 2.19:
|
||||||
|
|
||||||
|
[[sessions.ansible_test_integration.groups]]
|
||||||
|
session_name = "ansible-test-integration-2.19"
|
||||||
|
description = "Meta session for running all ansible-test-integration-2.19-* sessions."
|
||||||
|
|
||||||
|
[[sessions.ansible_test_integration.groups.sessions]]
|
||||||
|
ansible_core = "2.19"
|
||||||
|
target = [ "azp/4/", "azp/5/" ]
|
||||||
|
docker = [ "fedora41", "alpine321" ]
|
||||||
|
|
||||||
|
[[sessions.ansible_test_integration.groups.sessions]]
|
||||||
|
ansible_core = "2.19"
|
||||||
|
target = [ "azp/1/", "azp/2/", "azp/3/", "azp/4/", "azp/5/" ]
|
||||||
|
remote = [ "rhel/9.5", "ubuntu/22.04" ]
|
||||||
|
|
||||||
|
# Ansible-core 2.20:
|
||||||
|
|
||||||
|
[[sessions.ansible_test_integration.groups]]
|
||||||
|
session_name = "ansible-test-integration-2.20"
|
||||||
|
description = "Meta session for running all ansible-test-integration-2.20-* sessions."
|
||||||
|
|
||||||
|
[[sessions.ansible_test_integration.groups.sessions]]
|
||||||
|
ansible_core = "2.20"
|
||||||
|
target = [ "azp/4/", "azp/5/" ]
|
||||||
|
docker = [ "fedora42", "alpine322" ]
|
||||||
|
|
||||||
|
[[sessions.ansible_test_integration.groups.sessions]]
|
||||||
|
ansible_core = "2.20"
|
||||||
|
target = [ "azp/1/", "azp/2/", "azp/3/", "azp/4/", "azp/5/" ]
|
||||||
|
remote = [ "rhel/9.6" ]
|
||||||
|
|
||||||
|
# Ansible-core devel:
|
||||||
|
|
||||||
|
[[sessions.ansible_test_integration.groups]]
|
||||||
|
session_name = "ansible-test-integration-devel"
|
||||||
|
description = "Meta session for running all ansible-test-integration-devel-* sessions."
|
||||||
|
|
||||||
|
[[sessions.ansible_test_integration.groups.sessions]]
|
||||||
|
ansible_core = "devel"
|
||||||
|
target = [ "azp/4/", "azp/5/" ]
|
||||||
|
docker = [ "fedora42", "ubuntu2204", "ubuntu2404", "alpine322" ]
|
||||||
|
|
||||||
|
[[sessions.ansible_test_integration.groups.sessions]]
|
||||||
|
ansible_core = "devel"
|
||||||
|
target = [ "azp/4/", "azp/5/" ]
|
||||||
|
python_version = "3.9"
|
||||||
|
docker = "quay.io/ansible-community/test-image:debian-bullseye"
|
||||||
|
|
||||||
|
[[sessions.ansible_test_integration.groups.sessions]]
|
||||||
|
ansible_core = "devel"
|
||||||
|
target = [ "azp/4/", "azp/5/" ]
|
||||||
|
python_version = "3.11"
|
||||||
|
docker = "quay.io/ansible-community/test-image:debian-bookworm"
|
||||||
|
|
||||||
|
[[sessions.ansible_test_integration.groups.sessions]]
|
||||||
|
ansible_core = "devel"
|
||||||
|
target = [ "azp/4/", "azp/5/" ]
|
||||||
|
python_version = "3.13"
|
||||||
|
docker = "quay.io/ansible-community/test-image:debian-13-trixie"
|
||||||
|
|
||||||
|
[[sessions.ansible_test_integration.groups.sessions]]
|
||||||
|
ansible_core = "devel"
|
||||||
|
target = [ "azp/4/", "azp/5/" ]
|
||||||
|
python_version = "3.13"
|
||||||
|
docker = "quay.io/ansible-community/test-image:archlinux"
|
||||||
|
|
||||||
|
[[sessions.ansible_test_integration.groups.sessions]]
|
||||||
|
ansible_core = "devel"
|
||||||
|
target = [ "azp/1/", "azp/2/", "azp/3/", "azp/4/", "azp/5/" ]
|
||||||
|
remote = [ "rhel/9.6" ]
|
||||||
|
ansible_vars = { force_docker_sdk_for_python_dev = { type = "value", value = true, template_value = "sdk-dev-latest" } }
|
||||||
|
|
||||||
|
[[sessions.ansible_test_integration.groups.sessions]]
|
||||||
|
ansible_core = "devel"
|
||||||
|
target = [ "azp/1/", "azp/2/", "azp/3/", "azp/4/", "azp/5/" ]
|
||||||
|
remote = [
|
||||||
|
"rhel/10.0",
|
||||||
|
# For some reason, Ubuntu 24.04 is *extremely* slower than RHEL 9.6
|
||||||
|
# "ubuntu/24.04",
|
||||||
|
]
|
||||||
|
|
||||||
|
##################################################################################################
|
||||||
|
|
||||||
|
[sessions.ansible_lint]
|
||||||
|
|
||||||
|
[[sessions.ee_check.execution_environments]]
|
||||||
|
name = "devel-ubi-9"
|
||||||
|
description = "ansible-core devel @ RHEL UBI 9"
|
||||||
|
test_playbooks = ["tests/ee/all.yml"]
|
||||||
|
config.images.base_image.name = "docker.io/redhat/ubi9:latest"
|
||||||
|
config.dependencies.ansible_core.package_pip = "https://github.com/ansible/ansible/archive/devel.tar.gz"
|
||||||
|
config.dependencies.ansible_runner.package_pip = "ansible-runner"
|
||||||
|
config.dependencies.python_interpreter.package_system = "python3.12 python3.12-pip python3.12-wheel python3.12-cryptography"
|
||||||
|
config.dependencies.python_interpreter.python_path = "/usr/bin/python3.12"
|
||||||
|
runtime_environment = {"ANSIBLE_PRIVATE_ROLE_VARS" = "true"}
|
||||||
|
runtime_container_options = [
|
||||||
|
# Mount Docker socket into the container so we can talk to Docker outside the container
|
||||||
|
"-v",
|
||||||
|
"/var/run/docker.sock:/var/run/docker.sock",
|
||||||
|
# Need to be root so we can access /var/run/docker.sock, which usually isn't accessible by the user,
|
||||||
|
# but only by the group the user is in (but that group membership isn't there in the container)
|
||||||
|
"--user",
|
||||||
|
"0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[sessions.ee_check.execution_environments]]
|
||||||
|
name = "2.17-rocky-9"
|
||||||
|
description = "ansible-core 2.17 @ Rocky Linux 9"
|
||||||
|
test_playbooks = ["tests/ee/all.yml"]
|
||||||
|
config.images.base_image.name = "quay.io/rockylinux/rockylinux:9"
|
||||||
|
config.dependencies.ansible_core.package_pip = "https://github.com/ansible/ansible/archive/stable-2.17.tar.gz"
|
||||||
|
config.dependencies.ansible_runner.package_pip = "ansible-runner"
|
||||||
|
config.dependencies.python_interpreter.package_system = "python3.11 python3.11-pip python3.11-wheel python3.11-cryptography"
|
||||||
|
config.dependencies.python_interpreter.python_path = "/usr/bin/python3.11"
|
||||||
|
runtime_environment = {"ANSIBLE_PRIVATE_ROLE_VARS" = "true"}
|
||||||
|
runtime_container_options = [
|
||||||
|
# Mount Docker socket into the container so we can talk to Docker outside the container
|
||||||
|
"-v",
|
||||||
|
"/var/run/docker.sock:/var/run/docker.sock",
|
||||||
|
# Need to be root so we can access /var/run/docker.sock, which usually isn't accessible by the user,
|
||||||
|
# but only by the group the user is in (but that group membership isn't there in the container)
|
||||||
|
"--user",
|
||||||
|
"0",
|
||||||
|
]
|
||||||
2359
ansible_collections/community/docker/changelogs/changelog.yaml
Normal file
2359
ansible_collections/community/docker/changelogs/changelog.yaml
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,3 @@
|
||||||
|
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
SPDX-FileCopyrightText: Ansible Project
|
||||||
43
ansible_collections/community/docker/changelogs/config.yaml
Normal file
43
ansible_collections/community/docker/changelogs/config.yaml
Normal file
|
|
@ -0,0 +1,43 @@
|
||||||
|
---
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
changelog_filename_template: ../CHANGELOG.rst
|
||||||
|
changelog_filename_version_depth: 0
|
||||||
|
changes_file: changelog.yaml
|
||||||
|
changes_format: combined
|
||||||
|
ignore_other_fragment_extensions: true
|
||||||
|
keep_fragments: false
|
||||||
|
mention_ancestor: true
|
||||||
|
new_plugins_after_name: removed_features
|
||||||
|
notesdir: fragments
|
||||||
|
output_formats:
|
||||||
|
- md
|
||||||
|
- rst
|
||||||
|
prelude_section_name: release_summary
|
||||||
|
prelude_section_title: Release Summary
|
||||||
|
sections:
|
||||||
|
- - major_changes
|
||||||
|
- Major Changes
|
||||||
|
- - minor_changes
|
||||||
|
- Minor Changes
|
||||||
|
- - breaking_changes
|
||||||
|
- Breaking Changes / Porting Guide
|
||||||
|
- - deprecated_features
|
||||||
|
- Deprecated Features
|
||||||
|
- - removed_features
|
||||||
|
- Removed Features (previously deprecated)
|
||||||
|
- - security_fixes
|
||||||
|
- Security Fixes
|
||||||
|
- - bugfixes
|
||||||
|
- Bugfixes
|
||||||
|
- - known_issues
|
||||||
|
- Known Issues
|
||||||
|
title: Docker Community Collection
|
||||||
|
trivial_section_name: trivial
|
||||||
|
use_fqcn: true
|
||||||
|
add_plugin_period: true
|
||||||
|
changelog_nice_yaml: true
|
||||||
|
changelog_sort: version
|
||||||
|
vcs: auto
|
||||||
18
ansible_collections/community/docker/docs/docsite/config.yml
Normal file
18
ansible_collections/community/docker/docs/docsite/config.yml
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
---
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
# The following `.. envvar::` directives are defined in the extra docsite docs:
|
||||||
|
envvar_directives:
|
||||||
|
- DOCKER_HOST
|
||||||
|
- DOCKER_API_VERSION
|
||||||
|
- DOCKER_TIMEOUT
|
||||||
|
- DOCKER_CERT_PATH
|
||||||
|
- DOCKER_SSL_VERSION
|
||||||
|
- DOCKER_TLS
|
||||||
|
- DOCKER_TLS_HOSTNAME
|
||||||
|
- DOCKER_TLS_VERIFY
|
||||||
|
|
||||||
|
changelog:
|
||||||
|
write_changelog: true
|
||||||
|
|
@ -0,0 +1,9 @@
|
||||||
|
---
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
sections:
|
||||||
|
- title: Scenario Guide
|
||||||
|
toctree:
|
||||||
|
- scenario_guide
|
||||||
43
ansible_collections/community/docker/docs/docsite/links.yml
Normal file
43
ansible_collections/community/docker/docs/docsite/links.yml
Normal file
|
|
@ -0,0 +1,43 @@
|
||||||
|
---
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
edit_on_github:
|
||||||
|
repository: ansible-collections/community.docker
|
||||||
|
branch: main
|
||||||
|
path_prefix: ''
|
||||||
|
|
||||||
|
extra_links:
|
||||||
|
- description: Ask for help (Docker)
|
||||||
|
url: https://forum.ansible.com/tags/c/help/6/none/docker
|
||||||
|
- description: Ask for help (Docker Compose)
|
||||||
|
url: https://forum.ansible.com/tags/c/help/6/none/docker-compose
|
||||||
|
- description: Ask for help (Docker Swarm)
|
||||||
|
url: https://forum.ansible.com/tags/c/help/6/none/docker-swarm
|
||||||
|
- description: Submit a bug report
|
||||||
|
url: https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&template=bug_report.md
|
||||||
|
- description: Request a feature
|
||||||
|
url: https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&template=feature_request.md
|
||||||
|
|
||||||
|
communication:
|
||||||
|
matrix_rooms:
|
||||||
|
- topic: General usage and support questions
|
||||||
|
room: '#users:ansible.im'
|
||||||
|
irc_channels:
|
||||||
|
- topic: General usage and support questions
|
||||||
|
network: Libera
|
||||||
|
channel: '#ansible'
|
||||||
|
forums:
|
||||||
|
- topic: "Ansible Forum: General usage and support questions"
|
||||||
|
# The following URL directly points to the "Get Help" section
|
||||||
|
url: https://forum.ansible.com/c/help/6/none
|
||||||
|
- topic: "Ansible Forum: Discussions about Docker"
|
||||||
|
# The following URL directly points to the "docker" tag
|
||||||
|
url: https://forum.ansible.com/tag/docker
|
||||||
|
- topic: "Ansible Forum: Discussions about Docker Compose"
|
||||||
|
# The following URL directly points to the "docker-compose" tag
|
||||||
|
url: https://forum.ansible.com/tag/docker-compose
|
||||||
|
- topic: "Ansible Forum: Discussions about Docker Swarm"
|
||||||
|
# The following URL directly points to the "docker-swarm" tag
|
||||||
|
url: https://forum.ansible.com/tag/docker-swarm
|
||||||
|
|
@ -0,0 +1,330 @@
|
||||||
|
..
|
||||||
|
Copyright (c) Ansible Project
|
||||||
|
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
.. _ansible_collections.community.docker.docsite.scenario_guide:
|
||||||
|
|
||||||
|
Docker Guide
|
||||||
|
============
|
||||||
|
|
||||||
|
The `community.docker collection <https://galaxy.ansible.com/ui/repo/published/community/docker/>`_ offers several modules and plugins for orchestrating Docker containers and Docker Swarm.
|
||||||
|
|
||||||
|
.. contents::
|
||||||
|
:local:
|
||||||
|
:depth: 1
|
||||||
|
|
||||||
|
|
||||||
|
Requirements
|
||||||
|
------------
|
||||||
|
|
||||||
|
Most of the modules and plugins in community.docker require the `Docker SDK for Python <https://docker-py.readthedocs.io/en/stable/>`_. The SDK needs to be installed on the machines where the modules and plugins are executed, and for the Python version(s) with which the modules and plugins are executed. You can use the :ansplugin:`community.general.python_requirements_info module <community.general.python_requirements_info#module>` to make sure that the Docker SDK for Python is installed on the correct machine and for the Python version used by Ansible.
|
||||||
|
|
||||||
|
Note that plugins (inventory plugins and connection plugins) are always executed in the context of Ansible itself. If you use a plugin that requires the Docker SDK for Python, you need to install it on the machine running ``ansible`` or ``ansible-playbook`` and for the same Python interpreter used by Ansible. To see which Python is used, run ``ansible --version``.
|
||||||
|
|
||||||
|
You can install the Docker SDK for Python for Python 3.6 or later as follows:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
$ pip install docker
|
||||||
|
|
||||||
|
For Python 2.7, you need to use a version between 2.0.0 and 4.4.4 since the Python package for Docker removed support for Python 2.7 on 5.0.0. You can install the specific version of the Docker SDK for Python as follows:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
$ pip install 'docker==4.4.4'
|
||||||
|
|
||||||
|
Note that the Docker SDK for Python was called ``docker-py`` on PyPi before version 2.0.0. Please avoid installing this really old version, and make sure to not install both ``docker`` and ``docker-py``. Installing both will result in a broken installation. If this happens, Ansible will detect it and inform you about it. If that happens, you must uninstall both and reinstall the correct version. If in doubt, always install ``docker`` and never ``docker-py``.
|
||||||
|
|
||||||
|
|
||||||
|
Connecting to the Docker API
|
||||||
|
----------------------------
|
||||||
|
|
||||||
|
You can connect to a local or remote API using parameters passed to each task or by setting environment variables. The order of precedence is command line parameters and then environment variables. If neither a command line option nor an environment variable is found, Ansible uses the default value provided under `Parameters`_.
|
||||||
|
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
..........
|
||||||
|
|
||||||
|
Most plugins and modules can be configured by the following parameters:
|
||||||
|
|
||||||
|
docker_host
|
||||||
|
The URL or Unix socket path used to connect to the Docker API. Defaults to ``unix:///var/run/docker.sock``. To connect to a remote host, provide the TCP connection string (for example: ``tcp://192.0.2.23:2376``). If TLS is used to encrypt the connection to the API, then the module will automatically replace ``tcp`` in the connection URL with ``https``.
|
||||||
|
|
||||||
|
api_version
|
||||||
|
The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported by the Docker SDK for Python installed.
|
||||||
|
|
||||||
|
timeout
|
||||||
|
The maximum amount of time in seconds to wait on a response from the API. Defaults to 60 seconds.
|
||||||
|
|
||||||
|
tls
|
||||||
|
Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server. Defaults to ``false``.
|
||||||
|
|
||||||
|
validate_certs
|
||||||
|
Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server. Default is ``false``.
|
||||||
|
|
||||||
|
ca_path
|
||||||
|
Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||||
|
|
||||||
|
cert_path
|
||||||
|
Path to the client's TLS certificate file.
|
||||||
|
|
||||||
|
key_path
|
||||||
|
Path to the client's TLS key file.
|
||||||
|
|
||||||
|
tls_hostname
|
||||||
|
When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults to ``localhost``.
|
||||||
|
|
||||||
|
ssl_version
|
||||||
|
Provide a valid SSL version number. The default value is determined by the Docker SDK for Python.
|
||||||
|
|
||||||
|
This option is not available for the CLI based plugins. It is mainly needed for legacy systems and should be avoided.
|
||||||
|
|
||||||
|
|
||||||
|
Module default group
|
||||||
|
....................
|
||||||
|
|
||||||
|
To avoid having to specify common parameters for all the modules in every task, you can use the ``community.docker.docker`` :ref:`module defaults group <module_defaults_groups>`, or its short name ``docker``.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
Module default groups only work for modules, not for plugins (connection and inventory plugins).
|
||||||
|
|
||||||
|
The following example shows how the module default group can be used in a playbook:
|
||||||
|
|
||||||
|
.. code-block:: yaml+jinja
|
||||||
|
|
||||||
|
---
|
||||||
|
- name: Pull image and start the container
|
||||||
|
hosts: localhost
|
||||||
|
gather_facts: false
|
||||||
|
module_defaults:
|
||||||
|
group/community.docker.docker:
|
||||||
|
# Select Docker Daemon on other host
|
||||||
|
docker_host: tcp://192.0.2.23:2376
|
||||||
|
# Configure TLS
|
||||||
|
tls: true
|
||||||
|
validate_certs: true
|
||||||
|
tls_hostname: docker.example.com
|
||||||
|
ca_path: /path/to/cacert.pem
|
||||||
|
# Increase timeout
|
||||||
|
timeout: 120
|
||||||
|
tasks:
|
||||||
|
- name: Pull image
|
||||||
|
community.docker.docker_image_pull:
|
||||||
|
name: python
|
||||||
|
tag: 3.12
|
||||||
|
|
||||||
|
- name: Start container
|
||||||
|
community.docker.docker_container:
|
||||||
|
cleanup: true
|
||||||
|
command: python --version
|
||||||
|
detach: false
|
||||||
|
image: python:3.12
|
||||||
|
name: my-python-container
|
||||||
|
output_logs: true
|
||||||
|
|
||||||
|
- name: Show output
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "{{ output.container.Output }}"
|
||||||
|
|
||||||
|
Here the two ``community.docker`` tasks will use the options set for the module defaults group.
|
||||||
|
|
||||||
|
|
||||||
|
Environment variables
|
||||||
|
.....................
|
||||||
|
|
||||||
|
You can also control how the plugins and modules connect to the Docker API by setting the following environment variables.
|
||||||
|
|
||||||
|
For plugins, they have to be set for the environment Ansible itself runs in. For modules, they have to be set for the environment the modules are executed in. For modules running on remote machines, the environment variables have to be set on that machine for the user used to execute the modules with.
|
||||||
|
|
||||||
|
.. envvar:: DOCKER_HOST
|
||||||
|
|
||||||
|
The URL or Unix socket path used to connect to the Docker API.
|
||||||
|
|
||||||
|
.. envvar:: DOCKER_API_VERSION
|
||||||
|
|
||||||
|
The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported
|
||||||
|
by Docker SDK for Python.
|
||||||
|
|
||||||
|
.. envvar:: DOCKER_TIMEOUT
|
||||||
|
|
||||||
|
The maximum amount of time in seconds to wait on a response from the API.
|
||||||
|
|
||||||
|
.. envvar:: DOCKER_CERT_PATH
|
||||||
|
|
||||||
|
Path to the directory containing the client certificate, client key and CA certificate.
|
||||||
|
|
||||||
|
.. envvar:: DOCKER_SSL_VERSION
|
||||||
|
|
||||||
|
Provide a valid SSL version number.
|
||||||
|
|
||||||
|
.. envvar:: DOCKER_TLS
|
||||||
|
|
||||||
|
Secure the connection to the API by using TLS without verifying the authenticity of the Docker Host.
|
||||||
|
|
||||||
|
.. envvar:: DOCKER_TLS_HOSTNAME
|
||||||
|
|
||||||
|
When verifying the authenticity of the Docker Host, uses this hostname to compare to the host's certificate.
|
||||||
|
|
||||||
|
.. envvar:: DOCKER_TLS_VERIFY
|
||||||
|
|
||||||
|
Secure the connection to the API by using TLS and verify the authenticity of the Docker Host.
|
||||||
|
|
||||||
|
|
||||||
|
Plain Docker daemon: images, networks, volumes, and containers
|
||||||
|
--------------------------------------------------------------
|
||||||
|
|
||||||
|
For working with a plain Docker daemon, that is without Swarm, there are connection plugins, an inventory plugin, and several modules available:
|
||||||
|
|
||||||
|
docker connection plugin
|
||||||
|
The :ansplugin:`community.docker.docker connection plugin <community.docker.docker#connection>` uses the Docker CLI utility to connect to Docker containers and execute modules in them. It essentially wraps ``docker exec`` and ``docker cp``. This connection plugin is supported by the :ansplugin:`ansible.posix.synchronize module <ansible.posix.synchronize#module>`.
|
||||||
|
|
||||||
|
docker_api connection plugin
|
||||||
|
The :ansplugin:`community.docker.docker_api connection plugin <community.docker.docker_api#connection>` talks directly to the Docker daemon to connect to Docker containers and execute modules in them.
|
||||||
|
|
||||||
|
docker_containers inventory plugin
|
||||||
|
The :ansplugin:`community.docker.docker_containers inventory plugin <community.docker.docker_containers#inventory>` allows you to dynamically add Docker containers from a Docker Daemon to your Ansible inventory. See :ref:`dynamic_inventory` for details on dynamic inventories.
|
||||||
|
|
||||||
|
The `docker inventory script <https://github.com/ansible-community/contrib-scripts/blob/main/inventory/docker.py>`_ is deprecated. Please use the inventory plugin instead. The inventory plugin has several compatibility options. If you need to collect Docker containers from multiple Docker daemons, you need to add every Docker daemon as an individual inventory source.
|
||||||
|
|
||||||
|
docker_host_info module
|
||||||
|
The :ansplugin:`community.docker.docker_host_info module <community.docker.docker_host_info#module>` allows you to retrieve information on a Docker daemon, such as all containers, images, volumes, networks and so on.
|
||||||
|
|
||||||
|
docker_login module
|
||||||
|
The :ansplugin:`community.docker.docker_login module <community.docker.docker_login#module>` allows you to log in and out of a remote registry, such as Docker Hub or a private registry. It provides similar functionality to the ``docker login`` and ``docker logout`` CLI commands.
|
||||||
|
|
||||||
|
docker_prune module
|
||||||
|
The :ansplugin:`community.docker.docker_prune module <community.docker.docker_prune#module>` allows you to prune no longer needed containers, images, volumes and so on. It provides similar functionality to the ``docker prune`` CLI command.
|
||||||
|
|
||||||
|
docker_image module
|
||||||
|
The :ansplugin:`community.docker.docker_image module <community.docker.docker_image#module>` provides full control over images, including: build, pull, push, tag and remove.
|
||||||
|
|
||||||
|
docker_image_build
|
||||||
|
The :ansplugin:`community.docker.docker_image_build module <community.docker.docker_image_build#module>` allows you to build a Docker image using Docker buildx.
|
||||||
|
|
||||||
|
docker_image_export module
|
||||||
|
The :ansplugin:`community.docker.docker_image_export module <community.docker.docker_image_export#module>` allows you to export (archive) images.
|
||||||
|
|
||||||
|
docker_image_info module
|
||||||
|
The :ansplugin:`community.docker.docker_image_info module <community.docker.docker_image_info#module>` allows you to list and inspect images.
|
||||||
|
|
||||||
|
docker_image_load
|
||||||
|
The :ansplugin:`community.docker.docker_image_load module <community.docker.docker_image_load#module>` allows you to import one or multiple images from tarballs.
|
||||||
|
|
||||||
|
docker_image_pull
|
||||||
|
The :ansplugin:`community.docker.docker_image_pull module <community.docker.docker_image_pull#module>` allows you to pull a Docker image from a registry.
|
||||||
|
|
||||||
|
docker_image_push
|
||||||
|
The :ansplugin:`community.docker.docker_image_push module <community.docker.docker_image_push#module>` allows you to push a Docker image to a registry.
|
||||||
|
|
||||||
|
docker_image_remove
|
||||||
|
The :ansplugin:`community.docker.docker_image_remove module <community.docker.docker_image_remove#module>` allows you to remove and/or untag a Docker image from the Docker daemon.
|
||||||
|
|
||||||
|
docker_image_tag
|
||||||
|
The :ansplugin:`community.docker.docker_image_tag module <community.docker.docker_image_tag#module>` allows you to tag a Docker image with additional names and/or tags.
|
||||||
|
|
||||||
|
docker_network module
|
||||||
|
The :ansplugin:`community.docker.docker_network module <community.docker.docker_network#module>` provides full control over Docker networks.
|
||||||
|
|
||||||
|
docker_network_info module
|
||||||
|
The :ansplugin:`community.docker.docker_network_info module <community.docker.docker_network_info#module>` allows you to inspect Docker networks.
|
||||||
|
|
||||||
|
docker_volume_info module
|
||||||
|
The :ansplugin:`community.docker.docker_volume_info module <community.docker.docker_volume_info#module>` provides full control over Docker volumes.
|
||||||
|
|
||||||
|
docker_volume module
|
||||||
|
The :ansplugin:`community.docker.docker_volume module <community.docker.docker_volume#module>` allows you to inspect Docker volumes.
|
||||||
|
|
||||||
|
docker_container module
|
||||||
|
The :ansplugin:`community.docker.docker_container module <community.docker.docker_container#module>` manages the container lifecycle by providing the ability to create, update, stop, start and destroy a Docker container.
|
||||||
|
|
||||||
|
docker_container_copy_into
|
||||||
|
The :ansplugin:`community.docker.docker_container_copy_into module <community.docker.docker_container_copy_into#module>` allows you to copy files from the control node into a container.
|
||||||
|
|
||||||
|
docker_container_exec
|
||||||
|
The :ansplugin:`community.docker.docker_container_exec module <community.docker.docker_container_exec#module>` allows you to execute commands in a running container.
|
||||||
|
|
||||||
|
docker_container_info module
|
||||||
|
The :ansplugin:`community.docker.docker_container_info module <community.docker.docker_container_info#module>` allows you to inspect a Docker container.
|
||||||
|
|
||||||
|
docker_plugin
|
||||||
|
The :ansplugin:`community.docker.docker_plugin module <community.docker.docker_plugin#module>` allows you to manage Docker plugins.
|
||||||
|
|
||||||
|
|
||||||
|
Docker Compose
|
||||||
|
--------------
|
||||||
|
|
||||||
|
Docker Compose v2
|
||||||
|
.................
|
||||||
|
|
||||||
|
There are several modules for working with Docker Compose projects:
|
||||||
|
|
||||||
|
community.docker.docker_compose_v2
|
||||||
|
The :ansplugin:`community.docker.docker_compose_v2 module <community.docker.docker_compose_v2#module>` allows you to use your existing Docker Compose files to orchestrate containers on a single Docker daemon or on Swarm.
|
||||||
|
|
||||||
|
community.docker.docker_compose_v2_exec
|
||||||
|
The :ansplugin:`community.docker.docker_compose_v2_exec module <community.docker.docker_compose_v2_exec#module>` allows you to run a command in a container of Docker Compose projects.
|
||||||
|
|
||||||
|
community.docker.docker_compose_v2_pull
|
||||||
|
The :ansplugin:`community.docker.docker_compose_v2_pull module <community.docker.docker_compose_v2_pull#module>` allows you to pull Docker Compose projects.
|
||||||
|
|
||||||
|
community.docker.docker_compose_v2_run
|
||||||
|
The :ansplugin:`community.docker.docker_compose_v2_run module <community.docker.docker_compose_v2_run#module>` allows you to run a command in a new container of a Docker Compose project.
|
||||||
|
|
||||||
|
These modules use the Docker CLI "compose" plugin (``docker compose``), and thus needs access to the Docker CLI tool.
|
||||||
|
No further requirements next to to the CLI tool and its Docker Compose plugin are needed.
|
||||||
|
|
||||||
|
|
||||||
|
Docker Machine
|
||||||
|
--------------
|
||||||
|
|
||||||
|
The :ansplugin:`community.docker.docker_machine inventory plugin <community.docker.docker_machine#inventory>` allows you to dynamically add Docker Machine hosts to your Ansible inventory.
|
||||||
|
|
||||||
|
|
||||||
|
Docker Swarm stack
|
||||||
|
------------------
|
||||||
|
|
||||||
|
The :ansplugin:`community.docker.docker_stack module <community.docker.docker_stack#module>` module allows you to control Docker Swarm stacks. Information on Swarm stacks can be retrieved by the :ansplugin:`community.docker.docker_stack_info module <community.docker.docker_stack_info#module>`, and information on Swarm stack tasks can be retrieved by the :ansplugin:`community.docker.docker_stack_task_info module <community.docker.docker_stack_task_info#module>`.
|
||||||
|
|
||||||
|
|
||||||
|
Docker Swarm
|
||||||
|
------------
|
||||||
|
|
||||||
|
The community.docker collection provides multiple plugins and modules for managing Docker Swarms.
|
||||||
|
|
||||||
|
Swarm management
|
||||||
|
................
|
||||||
|
|
||||||
|
One inventory plugin and several modules are provided to manage Docker Swarms:
|
||||||
|
|
||||||
|
docker_swarm inventory plugin
|
||||||
|
The :ansplugin:`community.docker.docker_swarm inventory plugin <community.docker.docker_swarm#inventory>` allows you to dynamically add all Docker Swarm nodes to your Ansible inventory.
|
||||||
|
|
||||||
|
docker_swarm module
|
||||||
|
The :ansplugin:`community.docker.docker_swarm module <community.docker.docker_swarm#module>` allows you to globally configure Docker Swarm manager nodes to join and leave swarms, and to change the Docker Swarm configuration.
|
||||||
|
|
||||||
|
docker_swarm_info module
|
||||||
|
The :ansplugin:`community.docker.docker_swarm_info module <community.docker.docker_swarm_info#module>` allows you to retrieve information on Docker Swarm.
|
||||||
|
|
||||||
|
docker_node module
|
||||||
|
The :ansplugin:`community.docker.docker_node module <community.docker.docker_node#module>` allows you to manage Docker Swarm nodes.
|
||||||
|
|
||||||
|
docker_node_info module
|
||||||
|
The :ansplugin:`community.docker.docker_node_info module <community.docker.docker_node_info#module>` allows you to retrieve information on Docker Swarm nodes.
|
||||||
|
|
||||||
|
Configuration management
|
||||||
|
........................
|
||||||
|
|
||||||
|
The community.docker collection offers modules to manage Docker Swarm configurations and secrets:
|
||||||
|
|
||||||
|
docker_config module
|
||||||
|
The :ansplugin:`community.docker.docker_config module <community.docker.docker_config#module>` allows you to create and modify Docker Swarm configs.
|
||||||
|
|
||||||
|
docker_secret module
|
||||||
|
The :ansplugin:`community.docker.docker_secret module <community.docker.docker_secret#module>` allows you to create and modify Docker Swarm secrets.
|
||||||
|
|
||||||
|
Swarm services
|
||||||
|
..............
|
||||||
|
|
||||||
|
Docker Swarm services can be created and updated with the :ansplugin:`community.docker.docker_swarm_service module <community.docker.docker_swarm_service#module>`, and information on them can be queried by the :ansplugin:`community.docker.docker_swarm_service_info module <community.docker.docker_swarm_service_info#module>`.
|
||||||
3
ansible_collections/community/docker/meta/ee-bindep.txt
Normal file
3
ansible_collections/community/docker/meta/ee-bindep.txt
Normal file
|
|
@ -0,0 +1,3 @@
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
@ -0,0 +1,16 @@
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
docker
|
||||||
|
urllib3
|
||||||
|
requests
|
||||||
|
paramiko
|
||||||
|
pyyaml
|
||||||
|
|
||||||
|
# We assume that EEs are not based on Windows, and have Python >= 3.5.
|
||||||
|
# (ansible-builder does not support conditionals, it will simply add
|
||||||
|
# the following unconditionally to the requirements)
|
||||||
|
#
|
||||||
|
# pywin32 ; sys_platform == 'win32'
|
||||||
|
# backports.ssl-match-hostname ; python_version < '3.5'
|
||||||
|
|
@ -0,0 +1,9 @@
|
||||||
|
---
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
version: 1
|
||||||
|
dependencies:
|
||||||
|
python: meta/ee-requirements.txt
|
||||||
|
system: meta/ee-bindep.txt
|
||||||
51
ansible_collections/community/docker/meta/runtime.yml
Normal file
51
ansible_collections/community/docker/meta/runtime.yml
Normal file
|
|
@ -0,0 +1,51 @@
|
||||||
|
---
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
requires_ansible: '>=2.17.0'
|
||||||
|
action_groups:
|
||||||
|
docker:
|
||||||
|
- docker_compose_v2
|
||||||
|
- docker_compose_v2_exec
|
||||||
|
- docker_compose_v2_pull
|
||||||
|
- docker_compose_v2_run
|
||||||
|
- docker_config
|
||||||
|
- docker_container
|
||||||
|
- docker_container_copy_into
|
||||||
|
- docker_container_exec
|
||||||
|
- docker_container_info
|
||||||
|
- docker_host_info
|
||||||
|
- docker_image
|
||||||
|
- docker_image_build
|
||||||
|
- docker_image_export
|
||||||
|
- docker_image_info
|
||||||
|
- docker_image_load
|
||||||
|
- docker_image_pull
|
||||||
|
- docker_image_push
|
||||||
|
- docker_image_remove
|
||||||
|
- docker_image_tag
|
||||||
|
- docker_login
|
||||||
|
- docker_network
|
||||||
|
- docker_network_info
|
||||||
|
- docker_node
|
||||||
|
- docker_node_info
|
||||||
|
- docker_plugin
|
||||||
|
- docker_prune
|
||||||
|
- docker_secret
|
||||||
|
- docker_stack
|
||||||
|
- docker_stack_info
|
||||||
|
- docker_stack_task_info
|
||||||
|
- docker_swarm
|
||||||
|
- docker_swarm_info
|
||||||
|
- docker_swarm_service
|
||||||
|
- docker_swarm_service_info
|
||||||
|
- docker_volume
|
||||||
|
- docker_volume_info
|
||||||
|
|
||||||
|
plugin_routing:
|
||||||
|
modules:
|
||||||
|
docker_compose:
|
||||||
|
tombstone:
|
||||||
|
removal_version: 4.0.0
|
||||||
|
warning_text: This module uses docker-compose v1, which is End of Life since July 2022. Please migrate to community.docker.docker_compose_v2.
|
||||||
26
ansible_collections/community/docker/noxfile.py
Normal file
26
ansible_collections/community/docker/noxfile.py
Normal file
|
|
@ -0,0 +1,26 @@
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
# SPDX-FileCopyrightText: 2025 Felix Fontein <felix@fontein.de>
|
||||||
|
|
||||||
|
# /// script
|
||||||
|
# dependencies = ["nox>=2025.02.09", "antsibull-nox"]
|
||||||
|
# ///
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import nox
|
||||||
|
|
||||||
|
try:
|
||||||
|
import antsibull_nox
|
||||||
|
except ImportError:
|
||||||
|
print("You need to install antsibull-nox in the same Python environment as nox.")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
antsibull_nox.load_antsibull_nox_toml()
|
||||||
|
|
||||||
|
|
||||||
|
# Allow to run the noxfile with `python noxfile.py`, `pipx run noxfile.py`, or similar.
|
||||||
|
# Requires nox >= 2025.02.09
|
||||||
|
if __name__ == "__main__":
|
||||||
|
nox.main()
|
||||||
|
|
@ -0,0 +1,49 @@
|
||||||
|
# Copyright (c) 2022, Felix Fontein <felix@fontein.de>
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
from ansible import constants as C
|
||||||
|
from ansible.plugins.action import ActionBase
|
||||||
|
from ansible.utils.vars import merge_hash
|
||||||
|
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._scramble import (
|
||||||
|
unscramble,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ActionModule(ActionBase):
|
||||||
|
# Set to True when transferring files to the remote
|
||||||
|
TRANSFERS_FILES = False
|
||||||
|
|
||||||
|
def run(
|
||||||
|
self, tmp: str | None = None, task_vars: dict[str, t.Any] | None = None
|
||||||
|
) -> dict[str, t.Any]:
|
||||||
|
self._supports_check_mode = True
|
||||||
|
self._supports_async = True
|
||||||
|
|
||||||
|
result = super().run(tmp, task_vars)
|
||||||
|
del tmp # tmp no longer has any effect
|
||||||
|
|
||||||
|
# pylint: disable-next=no-member
|
||||||
|
max_file_size_for_diff: int = C.MAX_FILE_SIZE_FOR_DIFF # type: ignore
|
||||||
|
self._task.args["_max_file_size_for_diff"] = max_file_size_for_diff
|
||||||
|
|
||||||
|
result = merge_hash(
|
||||||
|
result,
|
||||||
|
self._execute_module(task_vars=task_vars, wrap_async=self._task.async_val),
|
||||||
|
)
|
||||||
|
|
||||||
|
if "diff" in result and result["diff"].get("scrambled_diff"):
|
||||||
|
# Scrambling is not done for security, but to avoid no_log screwing up the diff
|
||||||
|
diff = result["diff"]
|
||||||
|
key = base64.b64decode(diff.pop("scrambled_diff"))
|
||||||
|
for k in ("before", "after"):
|
||||||
|
if k in diff:
|
||||||
|
diff[k] = unscramble(diff[k], key)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
@ -0,0 +1,623 @@
|
||||||
|
# Based on the chroot connection plugin by Maykel Moya
|
||||||
|
#
|
||||||
|
# (c) 2014, Lorin Hochstein
|
||||||
|
# (c) 2015, Leendert Brouwer (https://github.com/objectified)
|
||||||
|
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||||
|
# Copyright (c) 2017 Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
DOCUMENTATION = r"""
|
||||||
|
author:
|
||||||
|
- Lorin Hochestein (!UNKNOWN)
|
||||||
|
- Leendert Brouwer (!UNKNOWN)
|
||||||
|
name: docker
|
||||||
|
short_description: Run tasks in docker containers
|
||||||
|
description:
|
||||||
|
- Run commands or put/fetch files to an existing docker container.
|
||||||
|
- Uses the Docker CLI to execute commands in the container. If you prefer to directly connect to the Docker daemon, use
|
||||||
|
the P(community.docker.docker_api#connection) connection plugin.
|
||||||
|
options:
|
||||||
|
remote_addr:
|
||||||
|
description:
|
||||||
|
- The name of the container you want to access.
|
||||||
|
default: inventory_hostname
|
||||||
|
vars:
|
||||||
|
- name: inventory_hostname
|
||||||
|
- name: ansible_host
|
||||||
|
- name: ansible_docker_host
|
||||||
|
remote_user:
|
||||||
|
description:
|
||||||
|
- The user to execute as inside the container.
|
||||||
|
- If Docker is too old to allow this (< 1.7), the one set by Docker itself will be used.
|
||||||
|
vars:
|
||||||
|
- name: ansible_user
|
||||||
|
- name: ansible_docker_user
|
||||||
|
ini:
|
||||||
|
- section: defaults
|
||||||
|
key: remote_user
|
||||||
|
env:
|
||||||
|
- name: ANSIBLE_REMOTE_USER
|
||||||
|
cli:
|
||||||
|
- name: user
|
||||||
|
keyword:
|
||||||
|
- name: remote_user
|
||||||
|
docker_extra_args:
|
||||||
|
description:
|
||||||
|
- Extra arguments to pass to the docker command line.
|
||||||
|
default: ''
|
||||||
|
vars:
|
||||||
|
- name: ansible_docker_extra_args
|
||||||
|
ini:
|
||||||
|
- section: docker_connection
|
||||||
|
key: extra_cli_args
|
||||||
|
container_timeout:
|
||||||
|
default: 10
|
||||||
|
description:
|
||||||
|
- Controls how long we can wait to access reading output from the container once execution started.
|
||||||
|
env:
|
||||||
|
- name: ANSIBLE_TIMEOUT
|
||||||
|
- name: ANSIBLE_DOCKER_TIMEOUT
|
||||||
|
version_added: 2.2.0
|
||||||
|
ini:
|
||||||
|
- key: timeout
|
||||||
|
section: defaults
|
||||||
|
- key: timeout
|
||||||
|
section: docker_connection
|
||||||
|
version_added: 2.2.0
|
||||||
|
vars:
|
||||||
|
- name: ansible_docker_timeout
|
||||||
|
version_added: 2.2.0
|
||||||
|
cli:
|
||||||
|
- name: timeout
|
||||||
|
type: integer
|
||||||
|
extra_env:
|
||||||
|
description:
|
||||||
|
- Provide extra environment variables to set when running commands in the Docker container.
|
||||||
|
- This option can currently only be provided as Ansible variables due to limitations of ansible-core's configuration
|
||||||
|
manager.
|
||||||
|
vars:
|
||||||
|
- name: ansible_docker_extra_env
|
||||||
|
type: dict
|
||||||
|
version_added: 3.12.0
|
||||||
|
working_dir:
|
||||||
|
description:
|
||||||
|
- The directory inside the container to run commands in.
|
||||||
|
- Requires Docker CLI version 18.06 or later.
|
||||||
|
env:
|
||||||
|
- name: ANSIBLE_DOCKER_WORKING_DIR
|
||||||
|
ini:
|
||||||
|
- key: working_dir
|
||||||
|
section: docker_connection
|
||||||
|
vars:
|
||||||
|
- name: ansible_docker_working_dir
|
||||||
|
type: string
|
||||||
|
version_added: 3.12.0
|
||||||
|
privileged:
|
||||||
|
description:
|
||||||
|
- Whether commands should be run with extended privileges.
|
||||||
|
- B(Note) that this allows command to potentially break out of the container. Use with care!
|
||||||
|
env:
|
||||||
|
- name: ANSIBLE_DOCKER_PRIVILEGED
|
||||||
|
ini:
|
||||||
|
- key: privileged
|
||||||
|
section: docker_connection
|
||||||
|
vars:
|
||||||
|
- name: ansible_docker_privileged
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
version_added: 3.12.0
|
||||||
|
"""
|
||||||
|
|
||||||
|
import fcntl
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import re
|
||||||
|
import selectors
|
||||||
|
import subprocess
|
||||||
|
import typing as t
|
||||||
|
from shlex import quote
|
||||||
|
|
||||||
|
from ansible.errors import AnsibleConnectionFailure, AnsibleError, AnsibleFileNotFound
|
||||||
|
from ansible.module_utils.common.process import get_bin_path
|
||||||
|
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
||||||
|
from ansible.plugins.connection import BUFSIZE, ConnectionBase
|
||||||
|
from ansible.utils.display import Display
|
||||||
|
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._version import (
|
||||||
|
LooseVersion,
|
||||||
|
)
|
||||||
|
|
||||||
|
display = Display()
|
||||||
|
|
||||||
|
|
||||||
|
class Connection(ConnectionBase):
|
||||||
|
"""Local docker based connections"""
|
||||||
|
|
||||||
|
transport = "community.docker.docker"
|
||||||
|
has_pipelining = True
|
||||||
|
|
||||||
|
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
# Note: docker supports running as non-root in some configurations.
|
||||||
|
# (For instance, setting the UNIX socket file to be readable and
|
||||||
|
# writable by a specific UNIX group and then putting users into that
|
||||||
|
# group). Therefore we do not check that the user is root when using
|
||||||
|
# this connection. But if the user is getting a permission denied
|
||||||
|
# error it probably means that docker on their system is only
|
||||||
|
# configured to be connected to by root and they are not running as
|
||||||
|
# root.
|
||||||
|
|
||||||
|
self._docker_args: list[bytes | str] = []
|
||||||
|
self._container_user_cache: dict[str, str | None] = {}
|
||||||
|
self._version: str | None = None
|
||||||
|
self.remote_user: str | None = None
|
||||||
|
self.timeout: int | float | None = None
|
||||||
|
|
||||||
|
# Windows uses Powershell modules
|
||||||
|
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||||
|
self.module_implementation_preferences = (".ps1", ".exe", "")
|
||||||
|
|
||||||
|
if "docker_command" in kwargs:
|
||||||
|
self.docker_cmd = kwargs["docker_command"]
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
self.docker_cmd = get_bin_path("docker")
|
||||||
|
except ValueError as exc:
|
||||||
|
raise AnsibleError("docker command not found in PATH") from exc
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _sanitize_version(version: str) -> str:
|
||||||
|
version = re.sub("[^0-9a-zA-Z.]", "", version)
|
||||||
|
version = re.sub("^v", "", version)
|
||||||
|
return version
|
||||||
|
|
||||||
|
def _old_docker_version(self) -> tuple[list[str], str, bytes, int]:
|
||||||
|
cmd_args = self._docker_args
|
||||||
|
|
||||||
|
old_version_subcommand = ["version"]
|
||||||
|
|
||||||
|
old_docker_cmd = [self.docker_cmd] + cmd_args + old_version_subcommand
|
||||||
|
with subprocess.Popen(
|
||||||
|
old_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
||||||
|
) as p:
|
||||||
|
cmd_output, err = p.communicate()
|
||||||
|
|
||||||
|
return old_docker_cmd, to_text(cmd_output), err, p.returncode
|
||||||
|
|
||||||
|
def _new_docker_version(self) -> tuple[list[str], str, bytes, int]:
|
||||||
|
# no result yet, must be newer Docker version
|
||||||
|
cmd_args = self._docker_args
|
||||||
|
|
||||||
|
new_version_subcommand = ["version", "--format", "'{{.Server.Version}}'"]
|
||||||
|
|
||||||
|
new_docker_cmd = [self.docker_cmd] + cmd_args + new_version_subcommand
|
||||||
|
with subprocess.Popen(
|
||||||
|
new_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
||||||
|
) as p:
|
||||||
|
cmd_output, err = p.communicate()
|
||||||
|
return new_docker_cmd, to_text(cmd_output), err, p.returncode
|
||||||
|
|
||||||
|
def _get_docker_version(self) -> str:
|
||||||
|
cmd, cmd_output, err, returncode = self._old_docker_version()
|
||||||
|
if returncode == 0:
|
||||||
|
for line in to_text(cmd_output, errors="surrogate_or_strict").split("\n"):
|
||||||
|
if line.startswith("Server version:"): # old docker versions
|
||||||
|
return self._sanitize_version(line.split()[2])
|
||||||
|
|
||||||
|
cmd, cmd_output, err, returncode = self._new_docker_version()
|
||||||
|
if returncode:
|
||||||
|
raise AnsibleError(
|
||||||
|
f"Docker version check ({to_text(cmd)}) failed: {to_text(err)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return self._sanitize_version(to_text(cmd_output, errors="surrogate_or_strict"))
|
||||||
|
|
||||||
|
def _get_docker_remote_user(self) -> str | None:
|
||||||
|
"""Get the default user configured in the docker container"""
|
||||||
|
container = self.get_option("remote_addr")
|
||||||
|
if container in self._container_user_cache:
|
||||||
|
return self._container_user_cache[container]
|
||||||
|
with subprocess.Popen(
|
||||||
|
[self.docker_cmd, "inspect", "--format", "{{.Config.User}}", container],
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
) as p:
|
||||||
|
out_b, err_b = p.communicate()
|
||||||
|
out = to_text(out_b, errors="surrogate_or_strict")
|
||||||
|
|
||||||
|
if p.returncode != 0:
|
||||||
|
display.warning(
|
||||||
|
f"unable to retrieve default user from docker container: {out} {to_text(err_b)}"
|
||||||
|
)
|
||||||
|
self._container_user_cache[container] = None
|
||||||
|
return None
|
||||||
|
|
||||||
|
# The default exec user is root, unless it was changed in the Dockerfile with USER
|
||||||
|
user = out.strip() or "root"
|
||||||
|
self._container_user_cache[container] = user
|
||||||
|
return user
|
||||||
|
|
||||||
|
def _build_exec_cmd(self, cmd: list[bytes | str]) -> list[bytes | str]:
|
||||||
|
"""Build the local docker exec command to run cmd on remote_host
|
||||||
|
|
||||||
|
If remote_user is available and is supported by the docker
|
||||||
|
version we are using, it will be provided to docker exec.
|
||||||
|
"""
|
||||||
|
|
||||||
|
local_cmd = [self.docker_cmd]
|
||||||
|
|
||||||
|
if self._docker_args:
|
||||||
|
local_cmd += self._docker_args
|
||||||
|
|
||||||
|
local_cmd += [b"exec"]
|
||||||
|
|
||||||
|
if self.remote_user is not None:
|
||||||
|
local_cmd += [b"-u", self.remote_user]
|
||||||
|
|
||||||
|
if self.get_option("extra_env"):
|
||||||
|
for k, v in self.get_option("extra_env").items():
|
||||||
|
for val, what in ((k, "Key"), (v, "Value")):
|
||||||
|
if not isinstance(val, str):
|
||||||
|
raise AnsibleConnectionFailure(
|
||||||
|
f"Non-string {what.lower()} found for extra_env option. Ambiguous env options must be "
|
||||||
|
"wrapped in quotes to avoid them being interpreted when directly specified "
|
||||||
|
"in YAML, or explicitly converted to strings when the option is templated. "
|
||||||
|
f"{what}: {val!r}"
|
||||||
|
)
|
||||||
|
local_cmd += [
|
||||||
|
b"-e",
|
||||||
|
b"%s=%s"
|
||||||
|
% (
|
||||||
|
to_bytes(k, errors="surrogate_or_strict"),
|
||||||
|
to_bytes(v, errors="surrogate_or_strict"),
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
if self.get_option("working_dir") is not None:
|
||||||
|
local_cmd += [
|
||||||
|
b"-w",
|
||||||
|
to_bytes(self.get_option("working_dir"), errors="surrogate_or_strict"),
|
||||||
|
]
|
||||||
|
if self.docker_version != "dev" and LooseVersion(
|
||||||
|
self.docker_version
|
||||||
|
) < LooseVersion("18.06"):
|
||||||
|
# https://github.com/docker/cli/pull/732, first appeared in release 18.06.0
|
||||||
|
raise AnsibleConnectionFailure(
|
||||||
|
f"Providing the working directory requires Docker CLI version 18.06 or newer. You have Docker CLI version {self.docker_version}."
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.get_option("privileged"):
|
||||||
|
local_cmd += [b"--privileged"]
|
||||||
|
|
||||||
|
# -i is needed to keep stdin open which allows pipelining to work
|
||||||
|
local_cmd += [b"-i", self.get_option("remote_addr")] + cmd
|
||||||
|
|
||||||
|
return local_cmd
|
||||||
|
|
||||||
|
def _set_docker_args(self) -> None:
|
||||||
|
# TODO: this is mostly for backwards compatibility, play_context is used as fallback for older versions
|
||||||
|
# docker arguments
|
||||||
|
del self._docker_args[:]
|
||||||
|
extra_args = self.get_option("docker_extra_args") or getattr(
|
||||||
|
self._play_context, "docker_extra_args", ""
|
||||||
|
)
|
||||||
|
if extra_args:
|
||||||
|
self._docker_args += extra_args.split(" ")
|
||||||
|
|
||||||
|
def _set_conn_data(self) -> None:
|
||||||
|
"""initialize for the connection, cannot do only in init since all data is not ready at that point"""
|
||||||
|
|
||||||
|
self._set_docker_args()
|
||||||
|
|
||||||
|
self.remote_user = self.get_option("remote_user")
|
||||||
|
if self.remote_user is None and self._play_context.remote_user is not None:
|
||||||
|
self.remote_user = self._play_context.remote_user
|
||||||
|
|
||||||
|
# timeout, use unless default and pc is different, backwards compat
|
||||||
|
self.timeout = self.get_option("container_timeout")
|
||||||
|
if self.timeout == 10 and self.timeout != self._play_context.timeout:
|
||||||
|
self.timeout = self._play_context.timeout
|
||||||
|
|
||||||
|
@property
|
||||||
|
def docker_version(self) -> str:
|
||||||
|
if not self._version:
|
||||||
|
self._set_docker_args()
|
||||||
|
|
||||||
|
self._version = self._get_docker_version()
|
||||||
|
if self._version == "dev":
|
||||||
|
display.warning(
|
||||||
|
'Docker version number is "dev". Will assume latest version.'
|
||||||
|
)
|
||||||
|
if self._version != "dev" and LooseVersion(self._version) < LooseVersion(
|
||||||
|
"1.3"
|
||||||
|
):
|
||||||
|
raise AnsibleError(
|
||||||
|
"docker connection type requires docker 1.3 or higher"
|
||||||
|
)
|
||||||
|
return self._version
|
||||||
|
|
||||||
|
def _get_actual_user(self) -> str | None:
|
||||||
|
if self.remote_user is not None:
|
||||||
|
# An explicit user is provided
|
||||||
|
if self.docker_version == "dev" or LooseVersion(
|
||||||
|
self.docker_version
|
||||||
|
) >= LooseVersion("1.7"):
|
||||||
|
# Support for specifying the exec user was added in docker 1.7
|
||||||
|
return self.remote_user
|
||||||
|
self.remote_user = None
|
||||||
|
actual_user = self._get_docker_remote_user()
|
||||||
|
if actual_user != self.get_option("remote_user"):
|
||||||
|
display.warning(
|
||||||
|
f"docker {self.docker_version} does not support remote_user, using container default: {actual_user or '?'}"
|
||||||
|
)
|
||||||
|
return actual_user
|
||||||
|
if self._display.verbosity > 2:
|
||||||
|
# Since we are not setting the actual_user, look it up so we have it for logging later
|
||||||
|
# Only do this if display verbosity is high enough that we'll need the value
|
||||||
|
# This saves overhead from calling into docker when we do not need to.
|
||||||
|
return self._get_docker_remote_user()
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _connect(self) -> t.Self:
|
||||||
|
"""Connect to the container. Nothing to do"""
|
||||||
|
super()._connect() # type: ignore[safe-super]
|
||||||
|
if not self._connected:
|
||||||
|
self._set_conn_data()
|
||||||
|
actual_user = self._get_actual_user()
|
||||||
|
display.vvv(
|
||||||
|
f"ESTABLISH DOCKER CONNECTION FOR USER: {actual_user or '?'}",
|
||||||
|
host=self.get_option("remote_addr"),
|
||||||
|
)
|
||||||
|
self._connected = True
|
||||||
|
return self
|
||||||
|
|
||||||
|
def exec_command(
|
||||||
|
self, cmd: str, in_data: bytes | None = None, sudoable: bool = False
|
||||||
|
) -> tuple[int, bytes, bytes]:
|
||||||
|
"""Run a command on the docker host"""
|
||||||
|
|
||||||
|
self._set_conn_data()
|
||||||
|
|
||||||
|
super().exec_command(cmd, in_data=in_data, sudoable=sudoable) # type: ignore[safe-super]
|
||||||
|
|
||||||
|
local_cmd = self._build_exec_cmd([self._play_context.executable, "-c", cmd])
|
||||||
|
|
||||||
|
display.vvv(f"EXEC {to_text(local_cmd)}", host=self.get_option("remote_addr"))
|
||||||
|
display.debug("opening command with Popen()")
|
||||||
|
|
||||||
|
local_cmd = [to_bytes(i, errors="surrogate_or_strict") for i in local_cmd]
|
||||||
|
|
||||||
|
with subprocess.Popen(
|
||||||
|
local_cmd,
|
||||||
|
stdin=subprocess.PIPE,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
) as p:
|
||||||
|
assert p.stdin is not None
|
||||||
|
assert p.stdout is not None
|
||||||
|
assert p.stderr is not None
|
||||||
|
display.debug("done running command with Popen()")
|
||||||
|
|
||||||
|
if self.become and self.become.expect_prompt() and sudoable:
|
||||||
|
fcntl.fcntl(
|
||||||
|
p.stdout,
|
||||||
|
fcntl.F_SETFL,
|
||||||
|
fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK,
|
||||||
|
)
|
||||||
|
fcntl.fcntl(
|
||||||
|
p.stderr,
|
||||||
|
fcntl.F_SETFL,
|
||||||
|
fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK,
|
||||||
|
)
|
||||||
|
selector = selectors.DefaultSelector()
|
||||||
|
selector.register(p.stdout, selectors.EVENT_READ)
|
||||||
|
selector.register(p.stderr, selectors.EVENT_READ)
|
||||||
|
|
||||||
|
become_output = b""
|
||||||
|
try:
|
||||||
|
while not self.become.check_success(
|
||||||
|
become_output
|
||||||
|
) and not self.become.check_password_prompt(become_output):
|
||||||
|
events = selector.select(self.timeout)
|
||||||
|
if not events:
|
||||||
|
stdout, stderr = p.communicate()
|
||||||
|
raise AnsibleError(
|
||||||
|
"timeout waiting for privilege escalation password prompt:\n"
|
||||||
|
+ to_text(become_output)
|
||||||
|
)
|
||||||
|
|
||||||
|
chunks = b""
|
||||||
|
for key, dummy_event in events:
|
||||||
|
if key.fileobj == p.stdout:
|
||||||
|
chunk = p.stdout.read()
|
||||||
|
if chunk:
|
||||||
|
chunks += chunk
|
||||||
|
elif key.fileobj == p.stderr:
|
||||||
|
chunk = p.stderr.read()
|
||||||
|
if chunk:
|
||||||
|
chunks += chunk
|
||||||
|
|
||||||
|
if not chunks:
|
||||||
|
stdout, stderr = p.communicate()
|
||||||
|
raise AnsibleError(
|
||||||
|
"privilege output closed while waiting for password prompt:\n"
|
||||||
|
+ to_text(become_output)
|
||||||
|
)
|
||||||
|
become_output += chunks
|
||||||
|
finally:
|
||||||
|
selector.close()
|
||||||
|
|
||||||
|
if not self.become.check_success(become_output):
|
||||||
|
become_pass = self.become.get_option(
|
||||||
|
"become_pass", playcontext=self._play_context
|
||||||
|
)
|
||||||
|
p.stdin.write(
|
||||||
|
to_bytes(become_pass, errors="surrogate_or_strict") + b"\n"
|
||||||
|
)
|
||||||
|
fcntl.fcntl(
|
||||||
|
p.stdout,
|
||||||
|
fcntl.F_SETFL,
|
||||||
|
fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK,
|
||||||
|
)
|
||||||
|
fcntl.fcntl(
|
||||||
|
p.stderr,
|
||||||
|
fcntl.F_SETFL,
|
||||||
|
fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK,
|
||||||
|
)
|
||||||
|
|
||||||
|
display.debug("getting output with communicate()")
|
||||||
|
stdout, stderr = p.communicate(in_data)
|
||||||
|
display.debug("done communicating")
|
||||||
|
|
||||||
|
display.debug("done with docker.exec_command()")
|
||||||
|
return (p.returncode, stdout, stderr)
|
||||||
|
|
||||||
|
def _prefix_login_path(self, remote_path: str) -> str:
|
||||||
|
"""Make sure that we put files into a standard path
|
||||||
|
|
||||||
|
If a path is relative, then we need to choose where to put it.
|
||||||
|
ssh chooses $HOME but we are not guaranteed that a home dir will
|
||||||
|
exist in any given chroot. So for now we are choosing "/" instead.
|
||||||
|
This also happens to be the former default.
|
||||||
|
|
||||||
|
Can revisit using $HOME instead if it is a problem
|
||||||
|
"""
|
||||||
|
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||||
|
import ntpath
|
||||||
|
|
||||||
|
return ntpath.normpath(remote_path)
|
||||||
|
if not remote_path.startswith(os.path.sep):
|
||||||
|
remote_path = os.path.join(os.path.sep, remote_path)
|
||||||
|
return os.path.normpath(remote_path)
|
||||||
|
|
||||||
|
def put_file(self, in_path: str, out_path: str) -> None:
|
||||||
|
"""Transfer a file from local to docker container"""
|
||||||
|
self._set_conn_data()
|
||||||
|
super().put_file(in_path, out_path) # type: ignore[safe-super]
|
||||||
|
display.vvv(f"PUT {in_path} TO {out_path}", host=self.get_option("remote_addr"))
|
||||||
|
|
||||||
|
out_path = self._prefix_login_path(out_path)
|
||||||
|
if not os.path.exists(to_bytes(in_path, errors="surrogate_or_strict")):
|
||||||
|
raise AnsibleFileNotFound(
|
||||||
|
f"file or module does not exist: {to_text(in_path)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
out_path = quote(out_path)
|
||||||
|
# Older docker does not have native support for copying files into
|
||||||
|
# running containers, so we use docker exec to implement this
|
||||||
|
# Although docker version 1.8 and later provide support, the
|
||||||
|
# owner and group of the files are always set to root
|
||||||
|
with open(to_bytes(in_path, errors="surrogate_or_strict"), "rb") as in_file:
|
||||||
|
if not os.fstat(in_file.fileno()).st_size:
|
||||||
|
count = " count=0"
|
||||||
|
else:
|
||||||
|
count = ""
|
||||||
|
args = self._build_exec_cmd(
|
||||||
|
[
|
||||||
|
self._play_context.executable,
|
||||||
|
"-c",
|
||||||
|
f"dd of={out_path} bs={BUFSIZE}{count}",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
args = [to_bytes(i, errors="surrogate_or_strict") for i in args]
|
||||||
|
try:
|
||||||
|
# pylint: disable-next=consider-using-with
|
||||||
|
p = subprocess.Popen(
|
||||||
|
args, stdin=in_file, stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
||||||
|
)
|
||||||
|
except OSError as exc:
|
||||||
|
raise AnsibleError(
|
||||||
|
"docker connection requires dd command in the container to put files"
|
||||||
|
) from exc
|
||||||
|
stdout, stderr = p.communicate()
|
||||||
|
|
||||||
|
if p.returncode != 0:
|
||||||
|
raise AnsibleError(
|
||||||
|
f"failed to transfer file {to_text(in_path)} to {to_text(out_path)}:\n{to_text(stdout)}\n{to_text(stderr)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def fetch_file(self, in_path: str, out_path: str) -> None:
|
||||||
|
"""Fetch a file from container to local."""
|
||||||
|
self._set_conn_data()
|
||||||
|
super().fetch_file(in_path, out_path) # type: ignore[safe-super]
|
||||||
|
display.vvv(
|
||||||
|
f"FETCH {in_path} TO {out_path}", host=self.get_option("remote_addr")
|
||||||
|
)
|
||||||
|
|
||||||
|
in_path = self._prefix_login_path(in_path)
|
||||||
|
# out_path is the final file path, but docker takes a directory, not a
|
||||||
|
# file path
|
||||||
|
out_dir = os.path.dirname(out_path)
|
||||||
|
|
||||||
|
args = [
|
||||||
|
self.docker_cmd,
|
||||||
|
"cp",
|
||||||
|
f"{self.get_option('remote_addr')}:{in_path}",
|
||||||
|
out_dir,
|
||||||
|
]
|
||||||
|
args = [to_bytes(i, errors="surrogate_or_strict") for i in args]
|
||||||
|
|
||||||
|
with subprocess.Popen(
|
||||||
|
args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
||||||
|
) as p:
|
||||||
|
p.communicate()
|
||||||
|
|
||||||
|
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||||
|
import ntpath
|
||||||
|
|
||||||
|
actual_out_path = ntpath.join(out_dir, ntpath.basename(in_path))
|
||||||
|
else:
|
||||||
|
actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
|
||||||
|
|
||||||
|
if p.returncode != 0:
|
||||||
|
# Older docker does not have native support for fetching files command `cp`
|
||||||
|
# If `cp` fails, try to use `dd` instead
|
||||||
|
args = self._build_exec_cmd(
|
||||||
|
[
|
||||||
|
self._play_context.executable,
|
||||||
|
"-c",
|
||||||
|
f"dd if={in_path} bs={BUFSIZE}",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
args = [to_bytes(i, errors="surrogate_or_strict") for i in args]
|
||||||
|
with open(
|
||||||
|
to_bytes(actual_out_path, errors="surrogate_or_strict"), "wb"
|
||||||
|
) as out_file:
|
||||||
|
try:
|
||||||
|
# pylint: disable-next=consider-using-with
|
||||||
|
pp = subprocess.Popen(
|
||||||
|
args,
|
||||||
|
stdin=subprocess.PIPE,
|
||||||
|
stdout=out_file,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
)
|
||||||
|
except OSError as exc:
|
||||||
|
raise AnsibleError(
|
||||||
|
"docker connection requires dd command in the container to put files"
|
||||||
|
) from exc
|
||||||
|
stdout, stderr = pp.communicate()
|
||||||
|
|
||||||
|
if pp.returncode != 0:
|
||||||
|
raise AnsibleError(
|
||||||
|
f"failed to fetch file {in_path} to {out_path}:\n{stdout!r}\n{stderr!r}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Rename if needed
|
||||||
|
if actual_out_path != out_path:
|
||||||
|
os.rename(
|
||||||
|
to_bytes(actual_out_path, errors="strict"),
|
||||||
|
to_bytes(out_path, errors="strict"),
|
||||||
|
)
|
||||||
|
|
||||||
|
def close(self) -> None:
|
||||||
|
"""Terminate the connection. Nothing to do for Docker"""
|
||||||
|
super().close() # type: ignore[safe-super]
|
||||||
|
self._connected = False
|
||||||
|
|
||||||
|
def reset(self) -> None:
|
||||||
|
# Clear container user cache
|
||||||
|
self._container_user_cache = {}
|
||||||
|
|
@ -0,0 +1,479 @@
|
||||||
|
# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de>
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
DOCUMENTATION = r"""
|
||||||
|
author:
|
||||||
|
- Felix Fontein (@felixfontein)
|
||||||
|
name: docker_api
|
||||||
|
short_description: Run tasks in docker containers
|
||||||
|
version_added: 1.1.0
|
||||||
|
description:
|
||||||
|
- Run commands or put/fetch files to an existing docker container.
|
||||||
|
- Uses the L(requests library,https://pypi.org/project/requests/) to interact directly with the Docker daemon instead of
|
||||||
|
using the Docker CLI. Use the P(community.docker.docker#connection) connection plugin if you want to use the Docker CLI.
|
||||||
|
notes:
|
||||||
|
- Does B(not work with TCP TLS sockets)! This is caused by the inability to send C(close_notify) without closing the connection
|
||||||
|
with Python's C(SSLSocket)s. See U(https://github.com/ansible-collections/community.docker/issues/605) for more information.
|
||||||
|
extends_documentation_fragment:
|
||||||
|
- community.docker._docker.api_documentation
|
||||||
|
- community.docker._docker.var_names
|
||||||
|
options:
|
||||||
|
remote_user:
|
||||||
|
type: str
|
||||||
|
description:
|
||||||
|
- The user to execute as inside the container.
|
||||||
|
vars:
|
||||||
|
- name: ansible_user
|
||||||
|
- name: ansible_docker_user
|
||||||
|
ini:
|
||||||
|
- section: defaults
|
||||||
|
key: remote_user
|
||||||
|
env:
|
||||||
|
- name: ANSIBLE_REMOTE_USER
|
||||||
|
cli:
|
||||||
|
- name: user
|
||||||
|
keyword:
|
||||||
|
- name: remote_user
|
||||||
|
remote_addr:
|
||||||
|
type: str
|
||||||
|
description:
|
||||||
|
- The name of the container you want to access.
|
||||||
|
default: inventory_hostname
|
||||||
|
vars:
|
||||||
|
- name: inventory_hostname
|
||||||
|
- name: ansible_host
|
||||||
|
- name: ansible_docker_host
|
||||||
|
container_timeout:
|
||||||
|
default: 10
|
||||||
|
description:
|
||||||
|
- Controls how long we can wait to access reading output from the container once execution started.
|
||||||
|
env:
|
||||||
|
- name: ANSIBLE_TIMEOUT
|
||||||
|
- name: ANSIBLE_DOCKER_TIMEOUT
|
||||||
|
version_added: 2.2.0
|
||||||
|
ini:
|
||||||
|
- key: timeout
|
||||||
|
section: defaults
|
||||||
|
- key: timeout
|
||||||
|
section: docker_connection
|
||||||
|
version_added: 2.2.0
|
||||||
|
vars:
|
||||||
|
- name: ansible_docker_timeout
|
||||||
|
version_added: 2.2.0
|
||||||
|
cli:
|
||||||
|
- name: timeout
|
||||||
|
type: integer
|
||||||
|
extra_env:
|
||||||
|
description:
|
||||||
|
- Provide extra environment variables to set when running commands in the Docker container.
|
||||||
|
- This option can currently only be provided as Ansible variables due to limitations of ansible-core's configuration
|
||||||
|
manager.
|
||||||
|
vars:
|
||||||
|
- name: ansible_docker_extra_env
|
||||||
|
type: dict
|
||||||
|
version_added: 3.12.0
|
||||||
|
working_dir:
|
||||||
|
description:
|
||||||
|
- The directory inside the container to run commands in.
|
||||||
|
- Requires Docker API version 1.35 or later.
|
||||||
|
env:
|
||||||
|
- name: ANSIBLE_DOCKER_WORKING_DIR
|
||||||
|
ini:
|
||||||
|
- key: working_dir
|
||||||
|
section: docker_connection
|
||||||
|
vars:
|
||||||
|
- name: ansible_docker_working_dir
|
||||||
|
type: string
|
||||||
|
version_added: 3.12.0
|
||||||
|
privileged:
|
||||||
|
description:
|
||||||
|
- Whether commands should be run with extended privileges.
|
||||||
|
- B(Note) that this allows command to potentially break out of the container. Use with care!
|
||||||
|
env:
|
||||||
|
- name: ANSIBLE_DOCKER_PRIVILEGED
|
||||||
|
ini:
|
||||||
|
- key: privileged
|
||||||
|
section: docker_connection
|
||||||
|
vars:
|
||||||
|
- name: ansible_docker_privileged
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
version_added: 3.12.0
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
from ansible.errors import AnsibleConnectionFailure, AnsibleFileNotFound
|
||||||
|
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
||||||
|
from ansible.plugins.connection import ConnectionBase
|
||||||
|
from ansible.utils.display import Display
|
||||||
|
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||||
|
APIError,
|
||||||
|
DockerException,
|
||||||
|
NotFound,
|
||||||
|
)
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||||
|
RequestException,
|
||||||
|
)
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._copy import (
|
||||||
|
DockerFileCopyError,
|
||||||
|
DockerFileNotFound,
|
||||||
|
fetch_file,
|
||||||
|
put_file,
|
||||||
|
)
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._version import (
|
||||||
|
LooseVersion,
|
||||||
|
)
|
||||||
|
from ansible_collections.community.docker.plugins.plugin_utils._common_api import (
|
||||||
|
AnsibleDockerClient,
|
||||||
|
)
|
||||||
|
from ansible_collections.community.docker.plugins.plugin_utils._socket_handler import (
|
||||||
|
DockerSocketHandler,
|
||||||
|
)
|
||||||
|
|
||||||
|
if t.TYPE_CHECKING:
|
||||||
|
from collections.abc import Callable
|
||||||
|
|
||||||
|
_T = t.TypeVar("_T")
|
||||||
|
|
||||||
|
|
||||||
|
MIN_DOCKER_API = None
|
||||||
|
|
||||||
|
|
||||||
|
display = Display()
|
||||||
|
|
||||||
|
|
||||||
|
class Connection(ConnectionBase):
|
||||||
|
"""Local docker based connections"""
|
||||||
|
|
||||||
|
transport = "community.docker.docker_api"
|
||||||
|
has_pipelining = True
|
||||||
|
|
||||||
|
def _call_client(
|
||||||
|
self,
|
||||||
|
f: Callable[[AnsibleDockerClient], _T],
|
||||||
|
not_found_can_be_resource: bool = False,
|
||||||
|
) -> _T:
|
||||||
|
if self.client is None:
|
||||||
|
raise AssertionError("Client must be present")
|
||||||
|
remote_addr = self.get_option("remote_addr")
|
||||||
|
try:
|
||||||
|
return f(self.client)
|
||||||
|
except NotFound as e:
|
||||||
|
if not_found_can_be_resource:
|
||||||
|
raise AnsibleConnectionFailure(
|
||||||
|
f'Could not find container "{remote_addr}" or resource in it ({e})'
|
||||||
|
) from e
|
||||||
|
raise AnsibleConnectionFailure(
|
||||||
|
f'Could not find container "{remote_addr}" ({e})'
|
||||||
|
) from e
|
||||||
|
except APIError as e:
|
||||||
|
if e.response is not None and e.response.status_code == 409:
|
||||||
|
raise AnsibleConnectionFailure(
|
||||||
|
f'The container "{remote_addr}" has been paused ({e})'
|
||||||
|
) from e
|
||||||
|
self.client.fail(
|
||||||
|
f'An unexpected Docker error occurred for container "{remote_addr}": {e}'
|
||||||
|
)
|
||||||
|
except DockerException as e:
|
||||||
|
self.client.fail(
|
||||||
|
f'An unexpected Docker error occurred for container "{remote_addr}": {e}'
|
||||||
|
)
|
||||||
|
except RequestException as e:
|
||||||
|
self.client.fail(
|
||||||
|
f'An unexpected requests error occurred for container "{remote_addr}" when trying to talk to the Docker daemon: {e}'
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
self.client: AnsibleDockerClient | None = None
|
||||||
|
self.ids: dict[str | None, tuple[int, int]] = {}
|
||||||
|
|
||||||
|
# Windows uses Powershell modules
|
||||||
|
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||||
|
self.module_implementation_preferences = (".ps1", ".exe", "")
|
||||||
|
|
||||||
|
self.actual_user: str | None = None
|
||||||
|
|
||||||
|
def _connect(self) -> Connection:
|
||||||
|
"""Connect to the container. Nothing to do"""
|
||||||
|
super()._connect() # type: ignore[safe-super]
|
||||||
|
if not self._connected:
|
||||||
|
self.actual_user = self.get_option("remote_user")
|
||||||
|
display.vvv(
|
||||||
|
f"ESTABLISH DOCKER CONNECTION FOR USER: {self.actual_user or '?'}",
|
||||||
|
host=self.get_option("remote_addr"),
|
||||||
|
)
|
||||||
|
if self.client is None:
|
||||||
|
self.client = AnsibleDockerClient(
|
||||||
|
self, min_docker_api_version=MIN_DOCKER_API
|
||||||
|
)
|
||||||
|
self._connected = True
|
||||||
|
|
||||||
|
if self.actual_user is None and display.verbosity > 2:
|
||||||
|
# Since we are not setting the actual_user, look it up so we have it for logging later
|
||||||
|
# Only do this if display verbosity is high enough that we'll need the value
|
||||||
|
# This saves overhead from calling into docker when we do not need to
|
||||||
|
display.vvv("Trying to determine actual user")
|
||||||
|
result = self._call_client(
|
||||||
|
lambda client: client.get_json(
|
||||||
|
"/containers/{0}/json", self.get_option("remote_addr")
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if result.get("Config"):
|
||||||
|
self.actual_user = result["Config"].get("User")
|
||||||
|
if self.actual_user is not None:
|
||||||
|
display.vvv(f"Actual user is '{self.actual_user}'")
|
||||||
|
|
||||||
|
return self
|
||||||
|
|
||||||
|
def exec_command(
|
||||||
|
self, cmd: str, in_data: bytes | None = None, sudoable: bool = False
|
||||||
|
) -> tuple[int, bytes, bytes]:
|
||||||
|
"""Run a command on the docker host"""
|
||||||
|
|
||||||
|
super().exec_command(cmd, in_data=in_data, sudoable=sudoable) # type: ignore[safe-super]
|
||||||
|
|
||||||
|
if self.client is None:
|
||||||
|
raise AssertionError("Client must be present")
|
||||||
|
|
||||||
|
command = [self._play_context.executable, "-c", cmd]
|
||||||
|
|
||||||
|
do_become = self.become and self.become.expect_prompt() and sudoable
|
||||||
|
|
||||||
|
stdin_part = (
|
||||||
|
f", with stdin ({len(in_data)} bytes)" if in_data is not None else ""
|
||||||
|
)
|
||||||
|
become_part = ", with become prompt" if do_become else ""
|
||||||
|
display.vvv(
|
||||||
|
f"EXEC {to_text(command)}{stdin_part}{become_part}",
|
||||||
|
host=self.get_option("remote_addr"),
|
||||||
|
)
|
||||||
|
|
||||||
|
need_stdin = bool((in_data is not None) or do_become)
|
||||||
|
|
||||||
|
data = {
|
||||||
|
"Container": self.get_option("remote_addr"),
|
||||||
|
"User": self.get_option("remote_user") or "",
|
||||||
|
"Privileged": self.get_option("privileged"),
|
||||||
|
"Tty": False,
|
||||||
|
"AttachStdin": need_stdin,
|
||||||
|
"AttachStdout": True,
|
||||||
|
"AttachStderr": True,
|
||||||
|
"Cmd": command,
|
||||||
|
}
|
||||||
|
|
||||||
|
if "detachKeys" in self.client._general_configs:
|
||||||
|
data["detachKeys"] = self.client._general_configs["detachKeys"]
|
||||||
|
|
||||||
|
if self.get_option("extra_env"):
|
||||||
|
data["Env"] = []
|
||||||
|
for k, v in self.get_option("extra_env").items():
|
||||||
|
for val, what in ((k, "Key"), (v, "Value")):
|
||||||
|
if not isinstance(val, str):
|
||||||
|
raise AnsibleConnectionFailure(
|
||||||
|
f"Non-string {what.lower()} found for extra_env option. Ambiguous env options must be "
|
||||||
|
"wrapped in quotes to avoid them being interpreted when directly specified "
|
||||||
|
"in YAML, or explicitly converted to strings when the option is templated. "
|
||||||
|
f"{what}: {val!r}"
|
||||||
|
)
|
||||||
|
data["Env"].append(f"{k}={v}")
|
||||||
|
|
||||||
|
if self.get_option("working_dir") is not None:
|
||||||
|
data["WorkingDir"] = self.get_option("working_dir")
|
||||||
|
if self.client.docker_api_version < LooseVersion("1.35"):
|
||||||
|
raise AnsibleConnectionFailure(
|
||||||
|
"Providing the working directory requires Docker API version 1.35 or newer."
|
||||||
|
f" The Docker daemon the connection is using has API version {self.client.docker_api_version_str}."
|
||||||
|
)
|
||||||
|
|
||||||
|
exec_data = self._call_client(
|
||||||
|
lambda client: client.post_json_to_json(
|
||||||
|
"/containers/{0}/exec", self.get_option("remote_addr"), data=data
|
||||||
|
)
|
||||||
|
)
|
||||||
|
exec_id = exec_data["Id"]
|
||||||
|
|
||||||
|
data = {"Tty": False, "Detach": False}
|
||||||
|
if need_stdin:
|
||||||
|
exec_socket = self._call_client(
|
||||||
|
lambda client: client.post_json_to_stream_socket(
|
||||||
|
"/exec/{0}/start", exec_id, data=data
|
||||||
|
)
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
with DockerSocketHandler(
|
||||||
|
display, exec_socket, container=self.get_option("remote_addr")
|
||||||
|
) as exec_socket_handler:
|
||||||
|
if do_become:
|
||||||
|
assert self.become is not None
|
||||||
|
|
||||||
|
become_output = [b""]
|
||||||
|
|
||||||
|
def append_become_output(stream_id: int, data: bytes) -> None:
|
||||||
|
become_output[0] += data
|
||||||
|
|
||||||
|
exec_socket_handler.set_block_done_callback(
|
||||||
|
append_become_output
|
||||||
|
)
|
||||||
|
|
||||||
|
while not self.become.check_success(
|
||||||
|
become_output[0]
|
||||||
|
) and not self.become.check_password_prompt(become_output[0]):
|
||||||
|
if not exec_socket_handler.select(
|
||||||
|
self.get_option("container_timeout")
|
||||||
|
):
|
||||||
|
stdout, stderr = exec_socket_handler.consume()
|
||||||
|
raise AnsibleConnectionFailure(
|
||||||
|
"timeout waiting for privilege escalation password prompt:\n"
|
||||||
|
+ to_text(become_output[0])
|
||||||
|
)
|
||||||
|
|
||||||
|
if exec_socket_handler.is_eof():
|
||||||
|
raise AnsibleConnectionFailure(
|
||||||
|
"privilege output closed while waiting for password prompt:\n"
|
||||||
|
+ to_text(become_output[0])
|
||||||
|
)
|
||||||
|
|
||||||
|
if not self.become.check_success(become_output[0]):
|
||||||
|
become_pass = self.become.get_option(
|
||||||
|
"become_pass", playcontext=self._play_context
|
||||||
|
)
|
||||||
|
exec_socket_handler.write(
|
||||||
|
to_bytes(become_pass, errors="surrogate_or_strict")
|
||||||
|
+ b"\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
if in_data is not None:
|
||||||
|
exec_socket_handler.write(in_data)
|
||||||
|
|
||||||
|
stdout, stderr = exec_socket_handler.consume()
|
||||||
|
finally:
|
||||||
|
exec_socket.close()
|
||||||
|
else:
|
||||||
|
stdout, stderr = self._call_client(
|
||||||
|
lambda client: client.post_json_to_stream(
|
||||||
|
"/exec/{0}/start",
|
||||||
|
exec_id,
|
||||||
|
stream=False,
|
||||||
|
demux=True,
|
||||||
|
tty=False,
|
||||||
|
data=data,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
result = self._call_client(
|
||||||
|
lambda client: client.get_json("/exec/{0}/json", exec_id)
|
||||||
|
)
|
||||||
|
|
||||||
|
return result.get("ExitCode") or 0, stdout or b"", stderr or b""
|
||||||
|
|
||||||
|
def _prefix_login_path(self, remote_path: str) -> str:
|
||||||
|
"""Make sure that we put files into a standard path
|
||||||
|
|
||||||
|
If a path is relative, then we need to choose where to put it.
|
||||||
|
ssh chooses $HOME but we are not guaranteed that a home dir will
|
||||||
|
exist in any given chroot. So for now we are choosing "/" instead.
|
||||||
|
This also happens to be the former default.
|
||||||
|
|
||||||
|
Can revisit using $HOME instead if it is a problem
|
||||||
|
"""
|
||||||
|
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||||
|
import ntpath
|
||||||
|
|
||||||
|
return ntpath.normpath(remote_path)
|
||||||
|
if not remote_path.startswith(os.path.sep):
|
||||||
|
remote_path = os.path.join(os.path.sep, remote_path)
|
||||||
|
return os.path.normpath(remote_path)
|
||||||
|
|
||||||
|
def put_file(self, in_path: str, out_path: str) -> None:
|
||||||
|
"""Transfer a file from local to docker container"""
|
||||||
|
super().put_file(in_path, out_path) # type: ignore[safe-super]
|
||||||
|
display.vvv(f"PUT {in_path} TO {out_path}", host=self.get_option("remote_addr"))
|
||||||
|
|
||||||
|
if self.client is None:
|
||||||
|
raise AssertionError("Client must be present")
|
||||||
|
|
||||||
|
out_path = self._prefix_login_path(out_path)
|
||||||
|
|
||||||
|
if self.actual_user not in self.ids:
|
||||||
|
dummy, ids, dummy2 = self.exec_command("id -u && id -g")
|
||||||
|
remote_addr = self.get_option("remote_addr")
|
||||||
|
try:
|
||||||
|
b_user_id, b_group_id = ids.splitlines()
|
||||||
|
user_id, group_id = int(b_user_id), int(b_group_id)
|
||||||
|
self.ids[self.actual_user] = user_id, group_id
|
||||||
|
display.vvvv(
|
||||||
|
f'PUT: Determined uid={user_id} and gid={group_id} for user "{self.actual_user}"',
|
||||||
|
host=remote_addr,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
raise AnsibleConnectionFailure(
|
||||||
|
f'Error while determining user and group ID of current user in container "{remote_addr}": {e}\nGot value: {ids!r}'
|
||||||
|
) from e
|
||||||
|
|
||||||
|
user_id, group_id = self.ids[self.actual_user]
|
||||||
|
try:
|
||||||
|
self._call_client(
|
||||||
|
lambda client: put_file(
|
||||||
|
client,
|
||||||
|
container=self.get_option("remote_addr"),
|
||||||
|
in_path=in_path,
|
||||||
|
out_path=out_path,
|
||||||
|
user_id=user_id,
|
||||||
|
group_id=group_id,
|
||||||
|
user_name=self.actual_user,
|
||||||
|
follow_links=True,
|
||||||
|
),
|
||||||
|
not_found_can_be_resource=True,
|
||||||
|
)
|
||||||
|
except DockerFileNotFound as exc:
|
||||||
|
raise AnsibleFileNotFound(to_text(exc)) from exc
|
||||||
|
except DockerFileCopyError as exc:
|
||||||
|
raise AnsibleConnectionFailure(to_text(exc)) from exc
|
||||||
|
|
||||||
|
def fetch_file(self, in_path: str, out_path: str) -> None:
|
||||||
|
"""Fetch a file from container to local."""
|
||||||
|
super().fetch_file(in_path, out_path) # type: ignore[safe-super]
|
||||||
|
display.vvv(
|
||||||
|
f"FETCH {in_path} TO {out_path}", host=self.get_option("remote_addr")
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.client is None:
|
||||||
|
raise AssertionError("Client must be present")
|
||||||
|
|
||||||
|
in_path = self._prefix_login_path(in_path)
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._call_client(
|
||||||
|
lambda client: fetch_file(
|
||||||
|
client,
|
||||||
|
container=self.get_option("remote_addr"),
|
||||||
|
in_path=in_path,
|
||||||
|
out_path=out_path,
|
||||||
|
follow_links=True,
|
||||||
|
log=lambda msg: display.vvvv(
|
||||||
|
msg, host=self.get_option("remote_addr")
|
||||||
|
),
|
||||||
|
),
|
||||||
|
not_found_can_be_resource=True,
|
||||||
|
)
|
||||||
|
except DockerFileNotFound as exc:
|
||||||
|
raise AnsibleFileNotFound(to_text(exc)) from exc
|
||||||
|
except DockerFileCopyError as exc:
|
||||||
|
raise AnsibleConnectionFailure(to_text(exc)) from exc
|
||||||
|
|
||||||
|
def close(self) -> None:
|
||||||
|
"""Terminate the connection. Nothing to do for Docker"""
|
||||||
|
super().close() # type: ignore[safe-super]
|
||||||
|
self._connected = False
|
||||||
|
|
||||||
|
def reset(self) -> None:
|
||||||
|
self.ids.clear()
|
||||||
|
|
@ -0,0 +1,286 @@
|
||||||
|
# Copyright (c) 2021 Jeff Goldschrafe <jeff@holyhandgrenade.org>
|
||||||
|
# Based on Ansible local connection plugin by:
|
||||||
|
# Copyright (c) 2012 Michael DeHaan <michael.dehaan@gmail.com>
|
||||||
|
# Copyright (c) 2015, 2017 Toshio Kuratomi <tkuratomi@ansible.com>
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
DOCUMENTATION = r"""
|
||||||
|
name: nsenter
|
||||||
|
short_description: execute on host running controller container
|
||||||
|
version_added: 1.9.0
|
||||||
|
description:
|
||||||
|
- This connection plugin allows Ansible, running in a privileged container, to execute tasks on the container host instead
|
||||||
|
of in the container itself.
|
||||||
|
- This is useful for running Ansible in a pull model, while still keeping the Ansible control node containerized.
|
||||||
|
- It relies on having privileged access to run C(nsenter) in the host's PID namespace, allowing it to enter the namespaces
|
||||||
|
of the provided PID (default PID 1, or init/systemd).
|
||||||
|
author: Jeff Goldschrafe (@jgoldschrafe)
|
||||||
|
options:
|
||||||
|
nsenter_pid:
|
||||||
|
description:
|
||||||
|
- PID to attach with using nsenter.
|
||||||
|
- The default should be fine unless you are attaching as a non-root user.
|
||||||
|
type: int
|
||||||
|
default: 1
|
||||||
|
vars:
|
||||||
|
- name: ansible_nsenter_pid
|
||||||
|
env:
|
||||||
|
- name: ANSIBLE_NSENTER_PID
|
||||||
|
ini:
|
||||||
|
- section: nsenter_connection
|
||||||
|
key: nsenter_pid
|
||||||
|
notes:
|
||||||
|
- The remote user is ignored; this plugin always runs as root.
|
||||||
|
- "This plugin requires the Ansible controller container to be launched in the following way: (1) The container image contains
|
||||||
|
the C(nsenter) program; (2) The container is launched in privileged mode; (3) The container is launched in the host's
|
||||||
|
PID namespace (C(--pid host))."
|
||||||
|
"""
|
||||||
|
|
||||||
|
import fcntl
|
||||||
|
import os
|
||||||
|
import pty
|
||||||
|
import selectors
|
||||||
|
import shlex
|
||||||
|
import subprocess
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
import ansible.constants as C
|
||||||
|
from ansible.errors import AnsibleError
|
||||||
|
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
||||||
|
from ansible.plugins.connection import ConnectionBase
|
||||||
|
from ansible.utils.display import Display
|
||||||
|
from ansible.utils.path import unfrackpath
|
||||||
|
|
||||||
|
display = Display()
|
||||||
|
|
||||||
|
|
||||||
|
class Connection(ConnectionBase):
|
||||||
|
"""Connections to a container host using nsenter"""
|
||||||
|
|
||||||
|
transport = "community.docker.nsenter"
|
||||||
|
has_pipelining = False
|
||||||
|
|
||||||
|
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.cwd = None
|
||||||
|
self._nsenter_pid = None
|
||||||
|
|
||||||
|
def _connect(self) -> t.Self:
|
||||||
|
self._nsenter_pid = self.get_option("nsenter_pid")
|
||||||
|
|
||||||
|
# Because nsenter requires very high privileges, our remote user
|
||||||
|
# is always assumed to be root.
|
||||||
|
self._play_context.remote_user = "root"
|
||||||
|
|
||||||
|
if not self._connected:
|
||||||
|
display.vvv(
|
||||||
|
f"ESTABLISH NSENTER CONNECTION FOR USER: {self._play_context.remote_user}",
|
||||||
|
host=self._play_context.remote_addr,
|
||||||
|
)
|
||||||
|
self._connected = True
|
||||||
|
return self
|
||||||
|
|
||||||
|
def exec_command(
|
||||||
|
self, cmd: str, in_data: bytes | None = None, sudoable: bool = True
|
||||||
|
) -> tuple[int, bytes, bytes]:
|
||||||
|
super().exec_command(cmd, in_data=in_data, sudoable=sudoable) # type: ignore[safe-super]
|
||||||
|
|
||||||
|
display.debug("in nsenter.exec_command()")
|
||||||
|
|
||||||
|
# pylint: disable-next=no-member
|
||||||
|
def_executable: str | None = C.DEFAULT_EXECUTABLE # type: ignore[attr-defined]
|
||||||
|
executable = def_executable.split()[0] if def_executable else None
|
||||||
|
|
||||||
|
if not os.path.exists(to_bytes(executable, errors="surrogate_or_strict")):
|
||||||
|
raise AnsibleError(
|
||||||
|
f"failed to find the executable specified {executable}."
|
||||||
|
" Please verify if the executable exists and re-try."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Rewrite the provided command to prefix it with nsenter
|
||||||
|
nsenter_cmd_parts = [
|
||||||
|
"nsenter",
|
||||||
|
"--ipc",
|
||||||
|
"--mount",
|
||||||
|
"--net",
|
||||||
|
"--pid",
|
||||||
|
"--uts",
|
||||||
|
"--preserve-credentials",
|
||||||
|
f"--target={self._nsenter_pid}",
|
||||||
|
"--",
|
||||||
|
]
|
||||||
|
|
||||||
|
cmd_parts = nsenter_cmd_parts + [cmd]
|
||||||
|
cmd_b = to_bytes(" ".join(cmd_parts))
|
||||||
|
|
||||||
|
display.vvv(f"EXEC {to_text(cmd_b)}", host=self._play_context.remote_addr)
|
||||||
|
display.debug("opening command with Popen()")
|
||||||
|
|
||||||
|
master = None
|
||||||
|
stdin = subprocess.PIPE
|
||||||
|
|
||||||
|
# This plugin does not support pipelining. This diverges from the behavior of
|
||||||
|
# the core "local" connection plugin that this one derives from.
|
||||||
|
if sudoable and self.become and self.become.expect_prompt():
|
||||||
|
# Create a pty if sudoable for privilege escalation that needs it.
|
||||||
|
# Falls back to using a standard pipe if this fails, which may
|
||||||
|
# cause the command to fail in certain situations where we are escalating
|
||||||
|
# privileges or the command otherwise needs a pty.
|
||||||
|
try:
|
||||||
|
master, stdin = pty.openpty()
|
||||||
|
except (IOError, OSError) as e:
|
||||||
|
display.debug(f"Unable to open pty: {e}")
|
||||||
|
|
||||||
|
with subprocess.Popen(
|
||||||
|
cmd_b,
|
||||||
|
shell=True,
|
||||||
|
executable=executable,
|
||||||
|
cwd=self.cwd,
|
||||||
|
stdin=stdin,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
) as p:
|
||||||
|
assert p.stderr is not None
|
||||||
|
assert p.stdin is not None
|
||||||
|
assert p.stdout is not None
|
||||||
|
# if we created a master, we can close the other half of the pty now, otherwise master is stdin
|
||||||
|
if master is not None:
|
||||||
|
os.close(stdin)
|
||||||
|
|
||||||
|
display.debug("done running command with Popen()")
|
||||||
|
|
||||||
|
if self.become and self.become.expect_prompt() and sudoable:
|
||||||
|
fcntl.fcntl(
|
||||||
|
p.stdout,
|
||||||
|
fcntl.F_SETFL,
|
||||||
|
fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK,
|
||||||
|
)
|
||||||
|
fcntl.fcntl(
|
||||||
|
p.stderr,
|
||||||
|
fcntl.F_SETFL,
|
||||||
|
fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK,
|
||||||
|
)
|
||||||
|
selector = selectors.DefaultSelector()
|
||||||
|
selector.register(p.stdout, selectors.EVENT_READ)
|
||||||
|
selector.register(p.stderr, selectors.EVENT_READ)
|
||||||
|
|
||||||
|
become_output = b""
|
||||||
|
try:
|
||||||
|
while not self.become.check_success(
|
||||||
|
become_output
|
||||||
|
) and not self.become.check_password_prompt(become_output):
|
||||||
|
events = selector.select(self._play_context.timeout)
|
||||||
|
if not events:
|
||||||
|
stdout, stderr = p.communicate()
|
||||||
|
raise AnsibleError(
|
||||||
|
"timeout waiting for privilege escalation password prompt:\n"
|
||||||
|
+ to_text(become_output)
|
||||||
|
)
|
||||||
|
|
||||||
|
chunks = b""
|
||||||
|
for key, dummy_event in events:
|
||||||
|
if key.fileobj == p.stdout:
|
||||||
|
chunk = p.stdout.read()
|
||||||
|
if chunk:
|
||||||
|
chunks += chunk
|
||||||
|
elif key.fileobj == p.stderr:
|
||||||
|
chunk = p.stderr.read()
|
||||||
|
if chunk:
|
||||||
|
chunks += chunk
|
||||||
|
|
||||||
|
if not chunks:
|
||||||
|
stdout, stderr = p.communicate()
|
||||||
|
raise AnsibleError(
|
||||||
|
"privilege output closed while waiting for password prompt:\n"
|
||||||
|
+ to_text(become_output)
|
||||||
|
)
|
||||||
|
become_output += chunks
|
||||||
|
finally:
|
||||||
|
selector.close()
|
||||||
|
|
||||||
|
if not self.become.check_success(become_output):
|
||||||
|
become_pass = self.become.get_option(
|
||||||
|
"become_pass", playcontext=self._play_context
|
||||||
|
)
|
||||||
|
if master is None:
|
||||||
|
p.stdin.write(
|
||||||
|
to_bytes(become_pass, errors="surrogate_or_strict") + b"\n"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
os.write(
|
||||||
|
master,
|
||||||
|
to_bytes(become_pass, errors="surrogate_or_strict") + b"\n",
|
||||||
|
)
|
||||||
|
|
||||||
|
fcntl.fcntl(
|
||||||
|
p.stdout,
|
||||||
|
fcntl.F_SETFL,
|
||||||
|
fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK,
|
||||||
|
)
|
||||||
|
fcntl.fcntl(
|
||||||
|
p.stderr,
|
||||||
|
fcntl.F_SETFL,
|
||||||
|
fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK,
|
||||||
|
)
|
||||||
|
|
||||||
|
display.debug("getting output with communicate()")
|
||||||
|
stdout, stderr = p.communicate(in_data)
|
||||||
|
display.debug("done communicating")
|
||||||
|
|
||||||
|
# finally, close the other half of the pty, if it was created
|
||||||
|
if master:
|
||||||
|
os.close(master)
|
||||||
|
|
||||||
|
display.debug("done with nsenter.exec_command()")
|
||||||
|
return (p.returncode, stdout, stderr)
|
||||||
|
|
||||||
|
def put_file(self, in_path: str, out_path: str) -> None:
|
||||||
|
super().put_file(in_path, out_path) # type: ignore[safe-super]
|
||||||
|
|
||||||
|
in_path = unfrackpath(in_path, basedir=self.cwd)
|
||||||
|
out_path = unfrackpath(out_path, basedir=self.cwd)
|
||||||
|
|
||||||
|
display.vvv(f"PUT {in_path} to {out_path}", host=self._play_context.remote_addr)
|
||||||
|
try:
|
||||||
|
with open(to_bytes(in_path, errors="surrogate_or_strict"), "rb") as in_file:
|
||||||
|
in_data = in_file.read()
|
||||||
|
rc, dummy_out, err = self.exec_command(
|
||||||
|
cmd=f"tee {shlex.quote(out_path)}", in_data=in_data
|
||||||
|
)
|
||||||
|
if rc != 0:
|
||||||
|
raise AnsibleError(
|
||||||
|
f"failed to transfer file to {out_path}: {to_text(err)}"
|
||||||
|
)
|
||||||
|
except IOError as e:
|
||||||
|
raise AnsibleError(f"failed to transfer file to {out_path}: {e}") from e
|
||||||
|
|
||||||
|
def fetch_file(self, in_path: str, out_path: str) -> None:
|
||||||
|
super().fetch_file(in_path, out_path) # type: ignore[safe-super]
|
||||||
|
|
||||||
|
in_path = unfrackpath(in_path, basedir=self.cwd)
|
||||||
|
out_path = unfrackpath(out_path, basedir=self.cwd)
|
||||||
|
|
||||||
|
try:
|
||||||
|
rc, out, err = self.exec_command(cmd=f"cat {shlex.quote(in_path)}")
|
||||||
|
display.vvv(
|
||||||
|
f"FETCH {in_path} TO {out_path}", host=self._play_context.remote_addr
|
||||||
|
)
|
||||||
|
if rc != 0:
|
||||||
|
raise AnsibleError(
|
||||||
|
f"failed to transfer file to {in_path}: {to_text(err)}"
|
||||||
|
)
|
||||||
|
with open(
|
||||||
|
to_bytes(out_path, errors="surrogate_or_strict"), "wb"
|
||||||
|
) as out_file:
|
||||||
|
out_file.write(out)
|
||||||
|
except IOError as e:
|
||||||
|
raise AnsibleError(
|
||||||
|
f"failed to transfer file to {to_text(out_path)}: {e}"
|
||||||
|
) from e
|
||||||
|
|
||||||
|
def close(self) -> None:
|
||||||
|
"""terminate the connection; nothing to do here"""
|
||||||
|
self._connected = False
|
||||||
|
|
@ -0,0 +1,110 @@
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
|
||||||
|
class ModuleDocFragment:
|
||||||
|
|
||||||
|
# Standard documentation fragment
|
||||||
|
DOCUMENTATION = r"""
|
||||||
|
options: {}
|
||||||
|
attributes:
|
||||||
|
check_mode:
|
||||||
|
description: Can run in C(check_mode) and return changed status prediction without modifying target.
|
||||||
|
diff_mode:
|
||||||
|
description: Will return details on what has changed (or possibly needs changing in C(check_mode)), when in diff mode.
|
||||||
|
idempotent:
|
||||||
|
description:
|
||||||
|
- When run twice in a row outside check mode, with the same arguments, the second invocation indicates no change.
|
||||||
|
- This assumes that the system controlled/queried by the module has not changed in a relevant way.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Should be used together with the standard fragment
|
||||||
|
IDEMPOTENT_NOT_MODIFY_STATE = r"""
|
||||||
|
options: {}
|
||||||
|
attributes:
|
||||||
|
idempotent:
|
||||||
|
support: full
|
||||||
|
details:
|
||||||
|
- This action does not modify state.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Should be used together with the standard fragment
|
||||||
|
INFO_MODULE = r"""
|
||||||
|
options: {}
|
||||||
|
attributes:
|
||||||
|
check_mode:
|
||||||
|
support: full
|
||||||
|
details:
|
||||||
|
- This action does not modify state.
|
||||||
|
diff_mode:
|
||||||
|
support: N/A
|
||||||
|
details:
|
||||||
|
- This action does not modify state.
|
||||||
|
"""
|
||||||
|
|
||||||
|
ACTIONGROUP_DOCKER = r"""
|
||||||
|
options: {}
|
||||||
|
attributes:
|
||||||
|
action_group:
|
||||||
|
description: Use C(group/docker) or C(group/community.docker.docker) in C(module_defaults) to set defaults for this module.
|
||||||
|
support: full
|
||||||
|
membership:
|
||||||
|
- community.docker.docker
|
||||||
|
- docker
|
||||||
|
"""
|
||||||
|
|
||||||
|
CONN = r"""
|
||||||
|
options: {}
|
||||||
|
attributes:
|
||||||
|
become:
|
||||||
|
description: Is usable alongside C(become) keywords.
|
||||||
|
connection:
|
||||||
|
description: Uses the target's configured connection information to execute code on it.
|
||||||
|
delegation:
|
||||||
|
description: Can be used in conjunction with C(delegate_to) and related keywords.
|
||||||
|
"""
|
||||||
|
|
||||||
|
FACTS = r"""
|
||||||
|
options: {}
|
||||||
|
attributes:
|
||||||
|
facts:
|
||||||
|
description: Action returns an C(ansible_facts) dictionary that will update existing host facts.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Should be used together with the standard fragment and the FACTS fragment
|
||||||
|
FACTS_MODULE = r"""
|
||||||
|
options: {}
|
||||||
|
attributes:
|
||||||
|
check_mode:
|
||||||
|
support: full
|
||||||
|
details:
|
||||||
|
- This action does not modify state.
|
||||||
|
diff_mode:
|
||||||
|
support: N/A
|
||||||
|
details:
|
||||||
|
- This action does not modify state.
|
||||||
|
facts:
|
||||||
|
support: full
|
||||||
|
"""
|
||||||
|
|
||||||
|
FILES = r"""
|
||||||
|
options: {}
|
||||||
|
attributes:
|
||||||
|
safe_file_operations:
|
||||||
|
description: Uses Ansible's strict file operation functions to ensure proper permissions and avoid data corruption.
|
||||||
|
"""
|
||||||
|
|
||||||
|
FLOW = r"""
|
||||||
|
options: {}
|
||||||
|
attributes:
|
||||||
|
action:
|
||||||
|
description: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller.
|
||||||
|
async:
|
||||||
|
description: Supports being used with the C(async) keyword.
|
||||||
|
"""
|
||||||
|
|
@ -0,0 +1,82 @@
|
||||||
|
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
|
||||||
|
class ModuleDocFragment:
|
||||||
|
|
||||||
|
# Docker doc fragment
|
||||||
|
DOCUMENTATION = r"""
|
||||||
|
options:
|
||||||
|
project_src:
|
||||||
|
description:
|
||||||
|
- Path to a directory containing a Compose file (C(compose.yml), C(compose.yaml), C(docker-compose.yml), or C(docker-compose.yaml)).
|
||||||
|
- If O(files) is provided, will look for these files in this directory instead.
|
||||||
|
- Mutually exclusive with O(definition). One of O(project_src) and O(definition) must be provided.
|
||||||
|
type: path
|
||||||
|
project_name:
|
||||||
|
description:
|
||||||
|
- Provide a project name. If not provided, the project name is taken from the basename of O(project_src).
|
||||||
|
- Required when O(definition) is provided.
|
||||||
|
type: str
|
||||||
|
files:
|
||||||
|
description:
|
||||||
|
- List of Compose file names relative to O(project_src) to be used instead of the main Compose file (C(compose.yml),
|
||||||
|
C(compose.yaml), C(docker-compose.yml), or C(docker-compose.yaml)).
|
||||||
|
- Files are loaded and merged in the order given.
|
||||||
|
- Mutually exclusive with O(definition).
|
||||||
|
type: list
|
||||||
|
elements: path
|
||||||
|
version_added: 3.7.0
|
||||||
|
definition:
|
||||||
|
description:
|
||||||
|
- Compose file describing one or more services, networks and volumes.
|
||||||
|
- Mutually exclusive with O(project_src) and O(files). One of O(project_src) and O(definition) must be provided.
|
||||||
|
- If provided, PyYAML must be available to this module, and O(project_name) must be specified.
|
||||||
|
- Note that a temporary directory will be created and deleted afterwards when using this option.
|
||||||
|
type: dict
|
||||||
|
version_added: 3.9.0
|
||||||
|
env_files:
|
||||||
|
description:
|
||||||
|
- By default environment files are loaded from a C(.env) file located directly under the O(project_src) directory.
|
||||||
|
- O(env_files) can be used to specify the path of one or multiple custom environment files instead.
|
||||||
|
- The path is relative to the O(project_src) directory.
|
||||||
|
type: list
|
||||||
|
elements: path
|
||||||
|
profiles:
|
||||||
|
description:
|
||||||
|
- List of profiles to enable when starting services.
|
||||||
|
- Equivalent to C(docker compose --profile).
|
||||||
|
type: list
|
||||||
|
elements: str
|
||||||
|
check_files_existing:
|
||||||
|
description:
|
||||||
|
- If set to V(false), the module will not check whether one of the files C(compose.yaml), C(compose.yml), C(docker-compose.yaml),
|
||||||
|
or C(docker-compose.yml) exists in O(project_src) if O(files) is not provided.
|
||||||
|
- This can be useful if environment files with C(COMPOSE_FILE) are used to configure a different filename. The module
|
||||||
|
currently does not check for C(COMPOSE_FILE) in environment files or the current environment.
|
||||||
|
type: bool
|
||||||
|
default: true
|
||||||
|
version_added: 3.9.0
|
||||||
|
requirements:
|
||||||
|
- "PyYAML if O(definition) is used"
|
||||||
|
notes:
|
||||||
|
- |-
|
||||||
|
The Docker compose CLI plugin has no stable output format (see for example U(https://github.com/docker/compose/issues/10872)),
|
||||||
|
and for the main operations also no machine friendly output format. The module tries to accomodate this with various
|
||||||
|
version-dependent behavior adjustments and with testing older and newer versions of the Docker compose CLI plugin.
|
||||||
|
Currently the module is tested with multiple plugin versions between 2.18.1 and 2.23.3. The exact list of plugin versions
|
||||||
|
will change over time. New releases of the Docker compose CLI plugin can break this module at any time.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# The following needs to be kept in sync with the compose_v2 module utils
|
||||||
|
MINIMUM_VERSION = r"""
|
||||||
|
options: {}
|
||||||
|
requirements:
|
||||||
|
- "Docker CLI with Docker compose plugin 2.18.0 or later"
|
||||||
|
"""
|
||||||
|
|
@ -0,0 +1,389 @@
|
||||||
|
# Copyright (c) Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
|
||||||
|
class ModuleDocFragment:
|
||||||
|
|
||||||
|
# Docker doc fragment
|
||||||
|
DOCUMENTATION = r"""
|
||||||
|
options:
|
||||||
|
docker_host:
|
||||||
|
description:
|
||||||
|
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the TCP connection
|
||||||
|
string. For example, V(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection, the module will automatically
|
||||||
|
replace C(tcp) in the connection URL with C(https).
|
||||||
|
- If the value is not specified in the task, the value of environment variable E(DOCKER_HOST) will be used instead.
|
||||||
|
If the environment variable is not set, the default value will be used.
|
||||||
|
type: str
|
||||||
|
default: unix:///var/run/docker.sock
|
||||||
|
aliases:
|
||||||
|
- docker_url
|
||||||
|
tls_hostname:
|
||||||
|
description:
|
||||||
|
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
|
||||||
|
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_HOSTNAME) will be used instead.
|
||||||
|
If the environment variable is not set, the default value will be used.
|
||||||
|
- Note that this option had a default value V(localhost) in older versions. It was removed in community.docker 3.0.0.
|
||||||
|
- B(Note:) this option is no longer supported for Docker SDK for Python 7.0.0+. Specifying it with Docker SDK for Python
|
||||||
|
7.0.0 or newer will lead to an error.
|
||||||
|
type: str
|
||||||
|
api_version:
|
||||||
|
description:
|
||||||
|
- The version of the Docker API running on the Docker Host.
|
||||||
|
- Defaults to the latest version of the API supported by Docker SDK for Python and the docker daemon.
|
||||||
|
- If the value is not specified in the task, the value of environment variable E(DOCKER_API_VERSION) will be used instead.
|
||||||
|
If the environment variable is not set, the default value will be used.
|
||||||
|
type: str
|
||||||
|
default: auto
|
||||||
|
aliases:
|
||||||
|
- docker_api_version
|
||||||
|
timeout:
|
||||||
|
description:
|
||||||
|
- The maximum amount of time in seconds to wait on a response from the API.
|
||||||
|
- If the value is not specified in the task, the value of environment variable E(DOCKER_TIMEOUT) will be used instead.
|
||||||
|
If the environment variable is not set, the default value will be used.
|
||||||
|
type: int
|
||||||
|
default: 60
|
||||||
|
ca_path:
|
||||||
|
description:
|
||||||
|
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||||
|
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set, the file C(ca.pem)
|
||||||
|
from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||||
|
- This option was called O(ca_cert) and got renamed to O(ca_path) in community.docker 3.6.0. The old name has been added
|
||||||
|
as an alias and can still be used.
|
||||||
|
type: path
|
||||||
|
aliases:
|
||||||
|
- ca_cert
|
||||||
|
- tls_ca_cert
|
||||||
|
- cacert_path
|
||||||
|
client_cert:
|
||||||
|
description:
|
||||||
|
- Path to the client's TLS certificate file.
|
||||||
|
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set, the file C(cert.pem)
|
||||||
|
from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||||
|
type: path
|
||||||
|
aliases:
|
||||||
|
- tls_client_cert
|
||||||
|
- cert_path
|
||||||
|
client_key:
|
||||||
|
description:
|
||||||
|
- Path to the client's TLS key file.
|
||||||
|
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set, the file C(key.pem)
|
||||||
|
from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||||
|
type: path
|
||||||
|
aliases:
|
||||||
|
- tls_client_key
|
||||||
|
- key_path
|
||||||
|
tls:
|
||||||
|
description:
|
||||||
|
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server. Note that
|
||||||
|
if O(validate_certs) is set to V(true) as well, it will take precedence.
|
||||||
|
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS) will be used instead. If
|
||||||
|
the environment variable is not set, the default value will be used.
|
||||||
|
type: bool
|
||||||
|
default: false
|
||||||
|
use_ssh_client:
|
||||||
|
description:
|
||||||
|
- For SSH transports, use the C(ssh) CLI tool instead of paramiko.
|
||||||
|
- Requires Docker SDK for Python 4.4.0 or newer.
|
||||||
|
type: bool
|
||||||
|
default: false
|
||||||
|
version_added: 1.5.0
|
||||||
|
validate_certs:
|
||||||
|
description:
|
||||||
|
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
|
||||||
|
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_VERIFY) will be used instead.
|
||||||
|
If the environment variable is not set, the default value will be used.
|
||||||
|
type: bool
|
||||||
|
default: false
|
||||||
|
aliases:
|
||||||
|
- tls_verify
|
||||||
|
debug:
|
||||||
|
description:
|
||||||
|
- Debug mode.
|
||||||
|
type: bool
|
||||||
|
default: false
|
||||||
|
|
||||||
|
notes:
|
||||||
|
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables. You can define
|
||||||
|
E(DOCKER_HOST), E(DOCKER_TLS_HOSTNAME), E(DOCKER_API_VERSION), E(DOCKER_CERT_PATH), E(DOCKER_TLS), E(DOCKER_TLS_VERIFY)
|
||||||
|
and E(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped with the product that sets up the environment.
|
||||||
|
It will set these variables for you. See U(https://docs.docker.com/machine/reference/env/) for more details.
|
||||||
|
- When connecting to Docker daemon with TLS, you might need to install additional Python packages. For the Docker SDK for
|
||||||
|
Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(ansible.builtin.pip).
|
||||||
|
- Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
|
||||||
|
In general, it will use C($HOME/.docker/config.json) if the E(DOCKER_CONFIG) environment variable is not specified, and
|
||||||
|
use C($DOCKER_CONFIG/config.json) otherwise.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# For plugins: allow to define common options with Ansible variables
|
||||||
|
|
||||||
|
VAR_NAMES = r"""
|
||||||
|
options:
|
||||||
|
docker_host:
|
||||||
|
vars:
|
||||||
|
- name: ansible_docker_docker_host
|
||||||
|
tls_hostname:
|
||||||
|
vars:
|
||||||
|
- name: ansible_docker_tls_hostname
|
||||||
|
api_version:
|
||||||
|
vars:
|
||||||
|
- name: ansible_docker_api_version
|
||||||
|
timeout:
|
||||||
|
vars:
|
||||||
|
- name: ansible_docker_timeout
|
||||||
|
ca_path:
|
||||||
|
vars:
|
||||||
|
- name: ansible_docker_ca_cert
|
||||||
|
- name: ansible_docker_ca_path
|
||||||
|
version_added: 3.6.0
|
||||||
|
client_cert:
|
||||||
|
vars:
|
||||||
|
- name: ansible_docker_client_cert
|
||||||
|
client_key:
|
||||||
|
vars:
|
||||||
|
- name: ansible_docker_client_key
|
||||||
|
tls:
|
||||||
|
vars:
|
||||||
|
- name: ansible_docker_tls
|
||||||
|
validate_certs:
|
||||||
|
vars:
|
||||||
|
- name: ansible_docker_validate_certs
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Additional, more specific stuff for minimal Docker SDK for Python version >= 2.0.
|
||||||
|
|
||||||
|
DOCKER_PY_2_DOCUMENTATION = r"""
|
||||||
|
options: {}
|
||||||
|
notes:
|
||||||
|
- This module uses the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
|
||||||
|
communicate with the Docker daemon.
|
||||||
|
requirements:
|
||||||
|
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
|
||||||
|
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
|
||||||
|
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
|
||||||
|
This module does B(not) work with docker-py."
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Docker doc fragment when using the vendored API access code
|
||||||
|
API_DOCUMENTATION = r"""
|
||||||
|
options:
|
||||||
|
docker_host:
|
||||||
|
description:
|
||||||
|
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
|
||||||
|
TCP connection string. For example, V(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
|
||||||
|
the module will automatically replace C(tcp) in the connection URL with C(https).
|
||||||
|
- If the value is not specified in the task, the value of environment variable E(DOCKER_HOST) will be used
|
||||||
|
instead. If the environment variable is not set, the default value will be used.
|
||||||
|
type: str
|
||||||
|
default: unix:///var/run/docker.sock
|
||||||
|
aliases:
|
||||||
|
- docker_url
|
||||||
|
tls_hostname:
|
||||||
|
description:
|
||||||
|
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
|
||||||
|
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_HOSTNAME) will
|
||||||
|
be used instead. If the environment variable is not set, the default value will be used.
|
||||||
|
- Note that this option had a default value V(localhost) in older versions. It was removed in community.docker 3.0.0.
|
||||||
|
type: str
|
||||||
|
api_version:
|
||||||
|
description:
|
||||||
|
- The version of the Docker API running on the Docker Host.
|
||||||
|
- Defaults to the latest version of the API supported by this collection and the docker daemon.
|
||||||
|
- If the value is not specified in the task, the value of environment variable E(DOCKER_API_VERSION) will be
|
||||||
|
used instead. If the environment variable is not set, the default value will be used.
|
||||||
|
type: str
|
||||||
|
default: auto
|
||||||
|
aliases:
|
||||||
|
- docker_api_version
|
||||||
|
timeout:
|
||||||
|
description:
|
||||||
|
- The maximum amount of time in seconds to wait on a response from the API.
|
||||||
|
- If the value is not specified in the task, the value of environment variable E(DOCKER_TIMEOUT) will be used
|
||||||
|
instead. If the environment variable is not set, the default value will be used.
|
||||||
|
type: int
|
||||||
|
default: 60
|
||||||
|
ca_path:
|
||||||
|
description:
|
||||||
|
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||||
|
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set,
|
||||||
|
the file C(ca.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||||
|
- This option was called O(ca_cert) and got renamed to O(ca_path) in community.docker 3.6.0. The old name has
|
||||||
|
been added as an alias and can still be used.
|
||||||
|
type: path
|
||||||
|
aliases:
|
||||||
|
- ca_cert
|
||||||
|
- tls_ca_cert
|
||||||
|
- cacert_path
|
||||||
|
client_cert:
|
||||||
|
description:
|
||||||
|
- Path to the client's TLS certificate file.
|
||||||
|
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set,
|
||||||
|
the file C(cert.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||||
|
type: path
|
||||||
|
aliases:
|
||||||
|
- tls_client_cert
|
||||||
|
- cert_path
|
||||||
|
client_key:
|
||||||
|
description:
|
||||||
|
- Path to the client's TLS key file.
|
||||||
|
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set,
|
||||||
|
the file C(key.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||||
|
type: path
|
||||||
|
aliases:
|
||||||
|
- tls_client_key
|
||||||
|
- key_path
|
||||||
|
tls:
|
||||||
|
description:
|
||||||
|
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
|
||||||
|
server. Note that if O(validate_certs) is set to V(true) as well, it will take precedence.
|
||||||
|
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS) will be used
|
||||||
|
instead. If the environment variable is not set, the default value will be used.
|
||||||
|
type: bool
|
||||||
|
default: false
|
||||||
|
use_ssh_client:
|
||||||
|
description:
|
||||||
|
- For SSH transports, use the C(ssh) CLI tool instead of paramiko.
|
||||||
|
type: bool
|
||||||
|
default: false
|
||||||
|
version_added: 1.5.0
|
||||||
|
validate_certs:
|
||||||
|
description:
|
||||||
|
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
|
||||||
|
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_VERIFY) will be
|
||||||
|
used instead. If the environment variable is not set, the default value will be used.
|
||||||
|
type: bool
|
||||||
|
default: false
|
||||||
|
aliases:
|
||||||
|
- tls_verify
|
||||||
|
debug:
|
||||||
|
description:
|
||||||
|
- Debug mode
|
||||||
|
type: bool
|
||||||
|
default: false
|
||||||
|
|
||||||
|
notes:
|
||||||
|
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
|
||||||
|
You can define E(DOCKER_HOST), E(DOCKER_TLS_HOSTNAME), E(DOCKER_API_VERSION), E(DOCKER_CERT_PATH),
|
||||||
|
E(DOCKER_TLS), E(DOCKER_TLS_VERIFY) and E(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
|
||||||
|
with the product that sets up the environment. It will set these variables for you. See
|
||||||
|
U(https://docs.docker.com/machine/reference/env/) for more details.
|
||||||
|
# - Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
|
||||||
|
# In general, it will use C($HOME/.docker/config.json) if the E(DOCKER_CONFIG) environment variable is not specified,
|
||||||
|
# and use C($DOCKER_CONFIG/config.json) otherwise.
|
||||||
|
- This module does B(not) use the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
|
||||||
|
communicate with the Docker daemon. It uses code derived from the Docker SDK or Python that is included in this
|
||||||
|
collection.
|
||||||
|
requirements:
|
||||||
|
- requests
|
||||||
|
- pywin32 (when using named pipes on Windows 32)
|
||||||
|
- paramiko (when using SSH with O(use_ssh_client=false))
|
||||||
|
- pyOpenSSL (when using TLS)
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Docker doc fragment when using the Docker CLI
|
||||||
|
CLI_DOCUMENTATION = r"""
|
||||||
|
options:
|
||||||
|
docker_cli:
|
||||||
|
description:
|
||||||
|
- Path to the Docker CLI. If not provided, will search for Docker CLI on the E(PATH).
|
||||||
|
type: path
|
||||||
|
docker_host:
|
||||||
|
description:
|
||||||
|
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
|
||||||
|
TCP connection string. For example, V(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
|
||||||
|
the module will automatically replace C(tcp) in the connection URL with C(https).
|
||||||
|
- If the value is not specified in the task, the value of environment variable E(DOCKER_HOST) will be used
|
||||||
|
instead. If the environment variable is not set, the default value will be used.
|
||||||
|
- Mutually exclusive with O(cli_context). If neither O(docker_host) nor O(cli_context) are provided, the
|
||||||
|
value V(unix:///var/run/docker.sock) is used.
|
||||||
|
type: str
|
||||||
|
aliases:
|
||||||
|
- docker_url
|
||||||
|
tls_hostname:
|
||||||
|
description:
|
||||||
|
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
|
||||||
|
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_HOSTNAME) will
|
||||||
|
be used instead. If the environment variable is not set, the default value will be used.
|
||||||
|
type: str
|
||||||
|
api_version:
|
||||||
|
description:
|
||||||
|
- The version of the Docker API running on the Docker Host.
|
||||||
|
- Defaults to the latest version of the API supported by this collection and the docker daemon.
|
||||||
|
- If the value is not specified in the task, the value of environment variable E(DOCKER_API_VERSION) will be
|
||||||
|
used instead. If the environment variable is not set, the default value will be used.
|
||||||
|
type: str
|
||||||
|
default: auto
|
||||||
|
aliases:
|
||||||
|
- docker_api_version
|
||||||
|
ca_path:
|
||||||
|
description:
|
||||||
|
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||||
|
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set,
|
||||||
|
the file C(ca.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||||
|
type: path
|
||||||
|
aliases:
|
||||||
|
- ca_cert
|
||||||
|
- tls_ca_cert
|
||||||
|
- cacert_path
|
||||||
|
client_cert:
|
||||||
|
description:
|
||||||
|
- Path to the client's TLS certificate file.
|
||||||
|
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set,
|
||||||
|
the file C(cert.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||||
|
type: path
|
||||||
|
aliases:
|
||||||
|
- tls_client_cert
|
||||||
|
- cert_path
|
||||||
|
client_key:
|
||||||
|
description:
|
||||||
|
- Path to the client's TLS key file.
|
||||||
|
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set,
|
||||||
|
the file C(key.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||||
|
type: path
|
||||||
|
aliases:
|
||||||
|
- tls_client_key
|
||||||
|
- key_path
|
||||||
|
tls:
|
||||||
|
description:
|
||||||
|
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
|
||||||
|
server. Note that if O(validate_certs) is set to V(true) as well, it will take precedence.
|
||||||
|
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS) will be used
|
||||||
|
instead. If the environment variable is not set, the default value will be used.
|
||||||
|
type: bool
|
||||||
|
default: false
|
||||||
|
validate_certs:
|
||||||
|
description:
|
||||||
|
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
|
||||||
|
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_VERIFY) will be
|
||||||
|
used instead. If the environment variable is not set, the default value will be used.
|
||||||
|
type: bool
|
||||||
|
default: false
|
||||||
|
aliases:
|
||||||
|
- tls_verify
|
||||||
|
# debug:
|
||||||
|
# description:
|
||||||
|
# - Debug mode
|
||||||
|
# type: bool
|
||||||
|
# default: false
|
||||||
|
cli_context:
|
||||||
|
description:
|
||||||
|
- The Docker CLI context to use.
|
||||||
|
- Mutually exclusive with O(docker_host).
|
||||||
|
type: str
|
||||||
|
|
||||||
|
notes:
|
||||||
|
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
|
||||||
|
You can define E(DOCKER_HOST), E(DOCKER_TLS_HOSTNAME), E(DOCKER_API_VERSION), E(DOCKER_CERT_PATH),
|
||||||
|
E(DOCKER_TLS), E(DOCKER_TLS_VERIFY) and E(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
|
||||||
|
with the product that sets up the environment. It will set these variables for you. See
|
||||||
|
U(https://docs.docker.com/machine/reference/env/) for more details.
|
||||||
|
- This module does B(not) use the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
|
||||||
|
communicate with the Docker daemon. It directly calls the Docker CLI program.
|
||||||
|
"""
|
||||||
|
|
@ -0,0 +1,423 @@
|
||||||
|
# Copyright (c) 2020, Felix Fontein <felix@fontein.de>
|
||||||
|
# For the parts taken from the docker inventory script:
|
||||||
|
# Copyright (c) 2016, Paul Durivage <paul.durivage@gmail.com>
|
||||||
|
# Copyright (c) 2016, Chris Houseknecht <house@redhat.com>
|
||||||
|
# Copyright (c) 2016, James Tanner <jtanner@redhat.com>
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
DOCUMENTATION = r"""
|
||||||
|
name: docker_containers
|
||||||
|
short_description: Ansible dynamic inventory plugin for Docker containers
|
||||||
|
version_added: 1.1.0
|
||||||
|
author:
|
||||||
|
- Felix Fontein (@felixfontein)
|
||||||
|
extends_documentation_fragment:
|
||||||
|
- ansible.builtin.constructed
|
||||||
|
- community.docker._docker.api_documentation
|
||||||
|
- community.library_inventory_filtering_v1.inventory_filter
|
||||||
|
description:
|
||||||
|
- Reads inventories from the Docker API.
|
||||||
|
- Uses a YAML configuration file that ends with V(docker.(yml|yaml\)).
|
||||||
|
notes:
|
||||||
|
- The configuration file must be a YAML file whose filename ends with V(docker.yml) or V(docker.yaml). Other filenames will
|
||||||
|
not be accepted.
|
||||||
|
options:
|
||||||
|
plugin:
|
||||||
|
description:
|
||||||
|
- The name of this plugin, it should always be set to V(community.docker.docker_containers) for this plugin to recognize
|
||||||
|
it as its own.
|
||||||
|
type: str
|
||||||
|
required: true
|
||||||
|
choices: [community.docker.docker_containers]
|
||||||
|
|
||||||
|
connection_type:
|
||||||
|
description:
|
||||||
|
- Which connection type to use the containers.
|
||||||
|
- One way to connect to containers is to use SSH (V(ssh)). For this, the options O(default_ip) and O(private_ssh_port)
|
||||||
|
are used. This requires that a SSH daemon is running inside the containers.
|
||||||
|
- Alternatively, V(docker-cli) selects the P(community.docker.docker#connection) connection plugin, and V(docker-api)
|
||||||
|
(default) selects the P(community.docker.docker_api#connection) connection plugin.
|
||||||
|
- When V(docker-api) is used, all Docker daemon configuration values are passed from the inventory plugin to the connection
|
||||||
|
plugin. This can be controlled with O(configure_docker_daemon).
|
||||||
|
- Note that the P(community.docker.docker_api#connection) does B(not work with TCP TLS sockets)!
|
||||||
|
See U(https://github.com/ansible-collections/community.docker/issues/605) for more information.
|
||||||
|
type: str
|
||||||
|
default: docker-api
|
||||||
|
choices:
|
||||||
|
- ssh
|
||||||
|
- docker-cli
|
||||||
|
- docker-api
|
||||||
|
|
||||||
|
configure_docker_daemon:
|
||||||
|
description:
|
||||||
|
- Whether to pass all Docker daemon configuration from the inventory plugin to the connection plugin.
|
||||||
|
- Only used when O(connection_type=docker-api).
|
||||||
|
type: bool
|
||||||
|
default: true
|
||||||
|
version_added: 1.8.0
|
||||||
|
|
||||||
|
verbose_output:
|
||||||
|
description:
|
||||||
|
- Toggle to (not) include all available inspection metadata.
|
||||||
|
- Note that all top-level keys will be transformed to the format C(docker_xxx). For example, C(HostConfig) is converted
|
||||||
|
to C(docker_hostconfig).
|
||||||
|
- If this is V(false), these values can only be used during O(compose), O(groups), and O(keyed_groups).
|
||||||
|
- The C(docker) inventory script always added these variables, so for compatibility set this to V(true).
|
||||||
|
type: bool
|
||||||
|
default: false
|
||||||
|
|
||||||
|
default_ip:
|
||||||
|
description:
|
||||||
|
- The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'.
|
||||||
|
- Only used if O(connection_type) is V(ssh).
|
||||||
|
type: str
|
||||||
|
default: 127.0.0.1
|
||||||
|
|
||||||
|
private_ssh_port:
|
||||||
|
description:
|
||||||
|
- The port containers use for SSH.
|
||||||
|
- Only used if O(connection_type) is V(ssh).
|
||||||
|
type: int
|
||||||
|
default: 22
|
||||||
|
|
||||||
|
add_legacy_groups:
|
||||||
|
description:
|
||||||
|
- 'Add the same groups as the C(docker) inventory script does. These are the following:'
|
||||||
|
- 'C(<container id>): contains the container of this ID.'
|
||||||
|
- 'C(<container name>): contains the container that has this name.'
|
||||||
|
- 'C(<container short id>): contains the containers that have this short ID (first 13 letters of ID).'
|
||||||
|
- 'C(image_<image name>): contains the containers that have the image C(<image name>).'
|
||||||
|
- 'C(stack_<stack name>): contains the containers that belong to the stack C(<stack name>).'
|
||||||
|
- 'C(service_<service name>): contains the containers that belong to the service C(<service name>).'
|
||||||
|
- 'C(<docker_host>): contains the containers which belong to the Docker daemon O(docker_host). Useful if you run this
|
||||||
|
plugin against multiple Docker daemons.'
|
||||||
|
- 'C(running): contains all containers that are running.'
|
||||||
|
- 'C(stopped): contains all containers that are not running.'
|
||||||
|
- If this is not set to V(true), you should use keyed groups to add the containers to groups. See the examples for how
|
||||||
|
to do that.
|
||||||
|
type: bool
|
||||||
|
default: false
|
||||||
|
|
||||||
|
filters:
|
||||||
|
version_added: 3.5.0
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXAMPLES = """
|
||||||
|
---
|
||||||
|
# Minimal example using local Docker daemon
|
||||||
|
plugin: community.docker.docker_containers
|
||||||
|
docker_host: unix:///var/run/docker.sock
|
||||||
|
|
||||||
|
---
|
||||||
|
# Minimal example using remote Docker daemon
|
||||||
|
plugin: community.docker.docker_containers
|
||||||
|
docker_host: tcp://my-docker-host:2375
|
||||||
|
|
||||||
|
---
|
||||||
|
# Example using remote Docker daemon with unverified TLS
|
||||||
|
plugin: community.docker.docker_containers
|
||||||
|
docker_host: tcp://my-docker-host:2376
|
||||||
|
tls: true
|
||||||
|
|
||||||
|
---
|
||||||
|
# Example using remote Docker daemon with verified TLS and client certificate verification
|
||||||
|
plugin: community.docker.docker_containers
|
||||||
|
docker_host: tcp://my-docker-host:2376
|
||||||
|
validate_certs: true
|
||||||
|
ca_path: /somewhere/ca.pem
|
||||||
|
client_key: /somewhere/key.pem
|
||||||
|
client_cert: /somewhere/cert.pem
|
||||||
|
|
||||||
|
---
|
||||||
|
# Example using constructed features to create groups
|
||||||
|
plugin: community.docker.docker_containers
|
||||||
|
docker_host: tcp://my-docker-host:2375
|
||||||
|
strict: false
|
||||||
|
keyed_groups:
|
||||||
|
# Add containers with primary network foo to a network_foo group
|
||||||
|
- prefix: network
|
||||||
|
key: 'docker_hostconfig.NetworkMode'
|
||||||
|
# Add Linux hosts to an os_linux group
|
||||||
|
- prefix: os
|
||||||
|
key: docker_platform
|
||||||
|
|
||||||
|
---
|
||||||
|
# Example using SSH connection with an explicit fallback for when port 22 has not been
|
||||||
|
# exported: use container name as ansible_ssh_host and 22 as ansible_ssh_port
|
||||||
|
plugin: community.docker.docker_containers
|
||||||
|
connection_type: ssh
|
||||||
|
compose:
|
||||||
|
ansible_ssh_host: ansible_ssh_host | default(docker_name[1:], true)
|
||||||
|
ansible_ssh_port: ansible_ssh_port | default(22, true)
|
||||||
|
|
||||||
|
---
|
||||||
|
# Only consider containers which have a label 'foo', or whose name starts with 'a'
|
||||||
|
plugin: community.docker.docker_containers
|
||||||
|
filters:
|
||||||
|
# Accept all containers which have a label called 'foo'
|
||||||
|
- include: >-
|
||||||
|
"foo" in docker_config.Labels
|
||||||
|
# Next accept all containers whose inventory_hostname starts with 'a'
|
||||||
|
- include: >-
|
||||||
|
inventory_hostname.startswith("a")
|
||||||
|
# Exclude all containers that did not match any of the above filters
|
||||||
|
- exclude: true
|
||||||
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
from ansible.errors import AnsibleError
|
||||||
|
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||||
|
from ansible_collections.community.library_inventory_filtering_v1.plugins.plugin_utils.inventory_filter import (
|
||||||
|
filter_host,
|
||||||
|
parse_filters,
|
||||||
|
)
|
||||||
|
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||||
|
APIError,
|
||||||
|
DockerException,
|
||||||
|
)
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||||
|
RequestException,
|
||||||
|
)
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||||
|
DOCKER_COMMON_ARGS_VARS,
|
||||||
|
)
|
||||||
|
from ansible_collections.community.docker.plugins.plugin_utils._common_api import (
|
||||||
|
AnsibleDockerClient,
|
||||||
|
)
|
||||||
|
from ansible_collections.community.docker.plugins.plugin_utils._unsafe import (
|
||||||
|
make_unsafe,
|
||||||
|
)
|
||||||
|
|
||||||
|
if t.TYPE_CHECKING:
|
||||||
|
from ansible.inventory.data import InventoryData
|
||||||
|
from ansible.parsing.dataloader import DataLoader
|
||||||
|
|
||||||
|
|
||||||
|
MIN_DOCKER_API = None
|
||||||
|
|
||||||
|
|
||||||
|
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||||
|
"""Host inventory parser for ansible using Docker daemon as source."""
|
||||||
|
|
||||||
|
NAME = "community.docker.docker_containers"
|
||||||
|
|
||||||
|
def _slugify(self, value: str) -> str:
|
||||||
|
slug = re.sub(r"[^\w-]", "_", value).lower().lstrip("_")
|
||||||
|
return f"docker_{slug}"
|
||||||
|
|
||||||
|
def _populate(self, client: AnsibleDockerClient) -> None:
|
||||||
|
strict = self.get_option("strict")
|
||||||
|
|
||||||
|
ssh_port = self.get_option("private_ssh_port")
|
||||||
|
default_ip = self.get_option("default_ip")
|
||||||
|
hostname = self.get_option("docker_host")
|
||||||
|
verbose_output = self.get_option("verbose_output")
|
||||||
|
connection_type = self.get_option("connection_type")
|
||||||
|
add_legacy_groups = self.get_option("add_legacy_groups")
|
||||||
|
|
||||||
|
if self.inventory is None:
|
||||||
|
raise AssertionError("Inventory must be there")
|
||||||
|
|
||||||
|
try:
|
||||||
|
params = {
|
||||||
|
"limit": -1,
|
||||||
|
"all": 1,
|
||||||
|
"size": 0,
|
||||||
|
"trunc_cmd": 0,
|
||||||
|
"since": None,
|
||||||
|
"before": None,
|
||||||
|
}
|
||||||
|
containers = client.get_json("/containers/json", params=params)
|
||||||
|
except APIError as exc:
|
||||||
|
raise AnsibleError(f"Error listing containers: {exc}") from exc
|
||||||
|
|
||||||
|
if add_legacy_groups:
|
||||||
|
self.inventory.add_group("running")
|
||||||
|
self.inventory.add_group("stopped")
|
||||||
|
|
||||||
|
extra_facts = {}
|
||||||
|
if self.get_option("configure_docker_daemon"):
|
||||||
|
for option_name, var_name in DOCKER_COMMON_ARGS_VARS.items():
|
||||||
|
value = self.get_option(option_name)
|
||||||
|
if value is not None:
|
||||||
|
extra_facts[var_name] = value
|
||||||
|
|
||||||
|
filters = parse_filters(self.get_option("filters"))
|
||||||
|
for container in containers:
|
||||||
|
container_id = container.get("Id")
|
||||||
|
short_container_id = container_id[:13]
|
||||||
|
|
||||||
|
try:
|
||||||
|
name = container.get("Names", [])[0].lstrip("/")
|
||||||
|
full_name = name
|
||||||
|
except IndexError:
|
||||||
|
name = short_container_id
|
||||||
|
full_name = container_id
|
||||||
|
|
||||||
|
facts = {
|
||||||
|
"docker_name": make_unsafe(name),
|
||||||
|
"docker_short_id": make_unsafe(short_container_id),
|
||||||
|
}
|
||||||
|
full_facts = {}
|
||||||
|
|
||||||
|
try:
|
||||||
|
inspect = client.get_json("/containers/{0}/json", container_id)
|
||||||
|
except APIError as exc:
|
||||||
|
raise AnsibleError(
|
||||||
|
f"Error inspecting container {name} - {exc}"
|
||||||
|
) from exc
|
||||||
|
|
||||||
|
state = inspect.get("State") or {}
|
||||||
|
config = inspect.get("Config") or {}
|
||||||
|
labels = config.get("Labels") or {}
|
||||||
|
|
||||||
|
running = state.get("Running")
|
||||||
|
|
||||||
|
groups = []
|
||||||
|
|
||||||
|
# Add container to groups
|
||||||
|
image_name = config.get("Image")
|
||||||
|
if image_name and add_legacy_groups:
|
||||||
|
groups.append(f"image_{image_name}")
|
||||||
|
|
||||||
|
stack_name = labels.get("com.docker.stack.namespace")
|
||||||
|
if stack_name:
|
||||||
|
full_facts["docker_stack"] = stack_name
|
||||||
|
if add_legacy_groups:
|
||||||
|
groups.append(f"stack_{stack_name}")
|
||||||
|
|
||||||
|
service_name = labels.get("com.docker.swarm.service.name")
|
||||||
|
if service_name:
|
||||||
|
full_facts["docker_service"] = service_name
|
||||||
|
if add_legacy_groups:
|
||||||
|
groups.append(f"service_{service_name}")
|
||||||
|
|
||||||
|
ansible_connection = None
|
||||||
|
if connection_type == "ssh":
|
||||||
|
# Figure out ssh IP and Port
|
||||||
|
try:
|
||||||
|
# Lookup the public facing port Nat'ed to ssh port.
|
||||||
|
network_settings = inspect.get("NetworkSettings") or {}
|
||||||
|
port_settings = network_settings.get("Ports") or {}
|
||||||
|
port = port_settings.get(f"{ssh_port}/tcp")[0] # type: ignore[index]
|
||||||
|
except (IndexError, AttributeError, TypeError):
|
||||||
|
port = {}
|
||||||
|
|
||||||
|
try:
|
||||||
|
ip = default_ip if port["HostIp"] == "0.0.0.0" else port["HostIp"]
|
||||||
|
except KeyError:
|
||||||
|
ip = ""
|
||||||
|
|
||||||
|
facts.update(
|
||||||
|
{
|
||||||
|
"ansible_ssh_host": ip,
|
||||||
|
"ansible_ssh_port": port.get("HostPort", 0),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
elif connection_type == "docker-cli":
|
||||||
|
facts.update(
|
||||||
|
{
|
||||||
|
"ansible_host": full_name,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
ansible_connection = "community.docker.docker"
|
||||||
|
elif connection_type == "docker-api":
|
||||||
|
facts.update(
|
||||||
|
{
|
||||||
|
"ansible_host": full_name,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
facts.update(extra_facts)
|
||||||
|
ansible_connection = "community.docker.docker_api"
|
||||||
|
|
||||||
|
full_facts.update(facts)
|
||||||
|
for key, value in inspect.items():
|
||||||
|
fact_key = self._slugify(key)
|
||||||
|
full_facts[fact_key] = value
|
||||||
|
|
||||||
|
full_facts = make_unsafe(full_facts)
|
||||||
|
|
||||||
|
if ansible_connection:
|
||||||
|
for d in (facts, full_facts):
|
||||||
|
if "ansible_connection" not in d:
|
||||||
|
d["ansible_connection"] = ansible_connection
|
||||||
|
|
||||||
|
if not filter_host(self, name, full_facts, filters):
|
||||||
|
continue
|
||||||
|
|
||||||
|
if verbose_output:
|
||||||
|
facts.update(full_facts)
|
||||||
|
|
||||||
|
self.inventory.add_host(name)
|
||||||
|
for group in groups:
|
||||||
|
self.inventory.add_group(group)
|
||||||
|
self.inventory.add_host(name, group=group)
|
||||||
|
|
||||||
|
for key, value in facts.items():
|
||||||
|
self.inventory.set_variable(name, key, value)
|
||||||
|
|
||||||
|
# Use constructed if applicable
|
||||||
|
# Composed variables
|
||||||
|
self._set_composite_vars(
|
||||||
|
self.get_option("compose"), full_facts, name, strict=strict
|
||||||
|
)
|
||||||
|
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||||
|
self._add_host_to_composed_groups(
|
||||||
|
self.get_option("groups"), full_facts, name, strict=strict
|
||||||
|
)
|
||||||
|
# Create groups based on variable values and add the corresponding hosts to it
|
||||||
|
self._add_host_to_keyed_groups(
|
||||||
|
self.get_option("keyed_groups"), full_facts, name, strict=strict
|
||||||
|
)
|
||||||
|
|
||||||
|
# We need to do this last since we also add a group called `name`.
|
||||||
|
# When we do this before a set_variable() call, the variables are assigned
|
||||||
|
# to the group, and not to the host.
|
||||||
|
if add_legacy_groups:
|
||||||
|
self.inventory.add_group(container_id)
|
||||||
|
self.inventory.add_host(name, group=container_id)
|
||||||
|
self.inventory.add_group(name)
|
||||||
|
self.inventory.add_host(name, group=name)
|
||||||
|
self.inventory.add_group(short_container_id)
|
||||||
|
self.inventory.add_host(name, group=short_container_id)
|
||||||
|
self.inventory.add_group(hostname)
|
||||||
|
self.inventory.add_host(name, group=hostname)
|
||||||
|
|
||||||
|
if running is True:
|
||||||
|
self.inventory.add_host(name, group="running")
|
||||||
|
else:
|
||||||
|
self.inventory.add_host(name, group="stopped")
|
||||||
|
|
||||||
|
def verify_file(self, path: str) -> bool:
|
||||||
|
"""Return the possibly of a file being consumable by this plugin."""
|
||||||
|
return super().verify_file(path) and path.endswith(
|
||||||
|
("docker.yaml", "docker.yml")
|
||||||
|
)
|
||||||
|
|
||||||
|
def _create_client(self) -> AnsibleDockerClient:
|
||||||
|
return AnsibleDockerClient(self, min_docker_api_version=MIN_DOCKER_API)
|
||||||
|
|
||||||
|
def parse(
|
||||||
|
self,
|
||||||
|
inventory: InventoryData,
|
||||||
|
loader: DataLoader,
|
||||||
|
path: str,
|
||||||
|
cache: bool = True,
|
||||||
|
) -> None:
|
||||||
|
super().parse(inventory, loader, path, cache)
|
||||||
|
self._read_config_data(path)
|
||||||
|
client = self._create_client()
|
||||||
|
try:
|
||||||
|
self._populate(client)
|
||||||
|
except DockerException as e:
|
||||||
|
raise AnsibleError(f"An unexpected Docker error occurred: {e}") from e
|
||||||
|
except RequestException as e:
|
||||||
|
raise AnsibleError(
|
||||||
|
f"An unexpected requests error occurred when trying to talk to the Docker daemon: {e}"
|
||||||
|
) from e
|
||||||
|
|
@ -0,0 +1,359 @@
|
||||||
|
# Copyright (c) 2019, Ximon Eighteen <ximon.eighteen@gmail.com>
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
DOCUMENTATION = r"""
|
||||||
|
name: docker_machine
|
||||||
|
author: Ximon Eighteen (@ximon18)
|
||||||
|
short_description: Docker Machine inventory source
|
||||||
|
requirements:
|
||||||
|
- L(Docker Machine,https://docs.docker.com/machine/)
|
||||||
|
extends_documentation_fragment:
|
||||||
|
- ansible.builtin.constructed
|
||||||
|
- community.library_inventory_filtering_v1.inventory_filter
|
||||||
|
description:
|
||||||
|
- Get inventory hosts from Docker Machine.
|
||||||
|
- Uses a YAML configuration file that ends with V(docker_machine.(yml|yaml\)).
|
||||||
|
- The plugin sets standard host variables C(ansible_host), C(ansible_port), C(ansible_user) and C(ansible_ssh_private_key).
|
||||||
|
- The plugin stores the Docker Machine 'env' output variables in C(dm_) prefixed host variables.
|
||||||
|
notes:
|
||||||
|
- The configuration file must be a YAML file whose filename ends with V(docker_machine.yml) or V(docker_machine.yaml). Other
|
||||||
|
filenames will not be accepted.
|
||||||
|
options:
|
||||||
|
plugin:
|
||||||
|
description: Token that ensures this is a source file for the C(docker_machine) plugin.
|
||||||
|
required: true
|
||||||
|
choices: ['docker_machine', 'community.docker.docker_machine']
|
||||||
|
daemon_env:
|
||||||
|
description:
|
||||||
|
- Whether docker daemon connection environment variables should be fetched, and how to behave if they cannot be fetched.
|
||||||
|
- With V(require) and V(require-silently), fetch them and skip any host for which they cannot be fetched. A warning
|
||||||
|
will be issued for any skipped host if the choice is V(require).
|
||||||
|
- With V(optional) and V(optional-silently), fetch them and not skip hosts for which they cannot be fetched. A warning
|
||||||
|
will be issued for hosts where they cannot be fetched if the choice is V(optional).
|
||||||
|
- With V(skip), do not attempt to fetch the docker daemon connection environment variables.
|
||||||
|
- If fetched successfully, the variables will be prefixed with C(dm_) and stored as host variables.
|
||||||
|
type: str
|
||||||
|
choices:
|
||||||
|
- require
|
||||||
|
- require-silently
|
||||||
|
- optional
|
||||||
|
- optional-silently
|
||||||
|
- skip
|
||||||
|
default: require
|
||||||
|
running_required:
|
||||||
|
description:
|
||||||
|
- When V(true), hosts which Docker Machine indicates are in a state other than C(running) will be skipped.
|
||||||
|
type: bool
|
||||||
|
default: true
|
||||||
|
verbose_output:
|
||||||
|
description:
|
||||||
|
- When V(true), include all available nodes metadata (for example C(Image), C(Region), C(Size)) as a JSON object named
|
||||||
|
C(docker_machine_node_attributes).
|
||||||
|
type: bool
|
||||||
|
default: true
|
||||||
|
filters:
|
||||||
|
version_added: 3.5.0
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXAMPLES = """
|
||||||
|
---
|
||||||
|
# Minimal example
|
||||||
|
plugin: community.docker.docker_machine
|
||||||
|
|
||||||
|
---
|
||||||
|
# Example using constructed features to create a group per Docker Machine driver
|
||||||
|
# (https://docs.docker.com/machine/drivers/), for example:
|
||||||
|
# $ docker-machine create --driver digitalocean ... mymachine
|
||||||
|
# $ ansible-inventory -i ./path/to/docker-machine.yml --host=mymachine
|
||||||
|
# {
|
||||||
|
# ...
|
||||||
|
# "digitalocean": {
|
||||||
|
# "hosts": [
|
||||||
|
# "mymachine"
|
||||||
|
# ]
|
||||||
|
# ...
|
||||||
|
# }
|
||||||
|
plugin: community.docker.docker_machine
|
||||||
|
strict: false
|
||||||
|
keyed_groups:
|
||||||
|
- separator: ''
|
||||||
|
key: docker_machine_node_attributes.DriverName
|
||||||
|
|
||||||
|
---
|
||||||
|
# Example grouping hosts by Digital Machine tag
|
||||||
|
plugin: community.docker.docker_machine
|
||||||
|
strict: false
|
||||||
|
keyed_groups:
|
||||||
|
- prefix: tag
|
||||||
|
key: 'dm_tags'
|
||||||
|
|
||||||
|
---
|
||||||
|
# Example using compose to override the default SSH behaviour of asking the user to accept the remote host key
|
||||||
|
plugin: community.docker.docker_machine
|
||||||
|
compose:
|
||||||
|
ansible_ssh_common_args: '"-o StrictHostKeyChecking=accept-new"'
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
from ansible.errors import AnsibleError
|
||||||
|
from ansible.module_utils.common.process import get_bin_path
|
||||||
|
from ansible.module_utils.common.text.converters import to_text
|
||||||
|
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, Constructable
|
||||||
|
from ansible.utils.display import Display
|
||||||
|
from ansible_collections.community.library_inventory_filtering_v1.plugins.plugin_utils.inventory_filter import (
|
||||||
|
filter_host,
|
||||||
|
parse_filters,
|
||||||
|
)
|
||||||
|
|
||||||
|
from ansible_collections.community.docker.plugins.plugin_utils._unsafe import (
|
||||||
|
make_unsafe,
|
||||||
|
)
|
||||||
|
|
||||||
|
if t.TYPE_CHECKING:
|
||||||
|
from ansible.inventory.data import InventoryData
|
||||||
|
from ansible.parsing.dataloader import DataLoader
|
||||||
|
|
||||||
|
DaemonEnv = t.Literal[
|
||||||
|
"require", "require-silently", "optional", "optional-silently", "skip"
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
display = Display()
|
||||||
|
|
||||||
|
|
||||||
|
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||||
|
"""Host inventory parser for ansible using Docker machine as source."""
|
||||||
|
|
||||||
|
NAME = "community.docker.docker_machine"
|
||||||
|
|
||||||
|
docker_machine_path: str | None = None
|
||||||
|
|
||||||
|
def _run_command(self, args: list[str]) -> str:
|
||||||
|
if not self.docker_machine_path:
|
||||||
|
try:
|
||||||
|
self.docker_machine_path = get_bin_path("docker-machine")
|
||||||
|
except ValueError as e:
|
||||||
|
raise AnsibleError(to_text(e)) from e
|
||||||
|
|
||||||
|
command = [self.docker_machine_path]
|
||||||
|
command.extend(args)
|
||||||
|
display.debug(f"Executing command {command}")
|
||||||
|
try:
|
||||||
|
result = subprocess.check_output(command)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
display.warning(
|
||||||
|
f"Exception {type(e).__name__} caught while executing command {command}, this was the original exception: {e}"
|
||||||
|
)
|
||||||
|
raise e
|
||||||
|
|
||||||
|
return to_text(result).strip()
|
||||||
|
|
||||||
|
def _get_docker_daemon_variables(self, machine_name: str) -> list[tuple[str, str]]:
|
||||||
|
"""
|
||||||
|
Capture settings from Docker Machine that would be needed to connect to the remote Docker daemon installed on
|
||||||
|
the Docker Machine remote host. Note: passing '--shell=sh' is a workaround for 'Error: Unknown shell'.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
env_lines = self._run_command(
|
||||||
|
["env", "--shell=sh", machine_name]
|
||||||
|
).splitlines()
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
# This can happen when the machine is created but provisioning is incomplete
|
||||||
|
return []
|
||||||
|
|
||||||
|
# example output of docker-machine env --shell=sh:
|
||||||
|
# export DOCKER_TLS_VERIFY="1"
|
||||||
|
# export DOCKER_HOST="tcp://134.209.204.160:2376"
|
||||||
|
# export DOCKER_CERT_PATH="/root/.docker/machine/machines/routinator"
|
||||||
|
# export DOCKER_MACHINE_NAME="routinator"
|
||||||
|
# # Run this command to configure your shell:
|
||||||
|
# # eval $(docker-machine env --shell=bash routinator)
|
||||||
|
|
||||||
|
# capture any of the DOCKER_xxx variables that were output and create Ansible host vars
|
||||||
|
# with the same name and value but with a dm_ name prefix.
|
||||||
|
env_vars = []
|
||||||
|
for line in env_lines:
|
||||||
|
match = re.search('(DOCKER_[^=]+)="([^"]+)"', line)
|
||||||
|
if match:
|
||||||
|
env_var_name = match.group(1)
|
||||||
|
env_var_value = match.group(2)
|
||||||
|
env_vars.append((env_var_name, env_var_value))
|
||||||
|
|
||||||
|
return env_vars
|
||||||
|
|
||||||
|
def _get_machine_names(self) -> list[str]:
|
||||||
|
# Filter out machines that are not in the Running state as we probably cannot do anything useful actions
|
||||||
|
# with them.
|
||||||
|
ls_command = ["ls", "-q"]
|
||||||
|
if self.get_option("running_required"):
|
||||||
|
ls_command.extend(["--filter", "state=Running"])
|
||||||
|
|
||||||
|
try:
|
||||||
|
ls_lines = self._run_command(ls_command)
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
return []
|
||||||
|
|
||||||
|
return ls_lines.splitlines()
|
||||||
|
|
||||||
|
def _inspect_docker_machine_host(self, node: str) -> t.Any | None:
|
||||||
|
try:
|
||||||
|
inspect_lines = self._run_command(["inspect", node])
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return json.loads(inspect_lines)
|
||||||
|
|
||||||
|
def _ip_addr_docker_machine_host(self, node: str) -> t.Any | None:
|
||||||
|
try:
|
||||||
|
ip_addr = self._run_command(["ip", node])
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return ip_addr
|
||||||
|
|
||||||
|
def _should_skip_host(
|
||||||
|
self,
|
||||||
|
machine_name: str,
|
||||||
|
env_var_tuples: list[tuple[str, str]],
|
||||||
|
daemon_env: DaemonEnv,
|
||||||
|
) -> bool:
|
||||||
|
if not env_var_tuples:
|
||||||
|
warning_prefix = f"Unable to fetch Docker daemon env vars from Docker Machine for host {machine_name}"
|
||||||
|
if daemon_env in ("require", "require-silently"):
|
||||||
|
if daemon_env == "require":
|
||||||
|
display.warning(f"{warning_prefix}: host will be skipped")
|
||||||
|
return True
|
||||||
|
if daemon_env == "optional":
|
||||||
|
display.warning(
|
||||||
|
f"{warning_prefix}: host will lack dm_DOCKER_xxx variables"
|
||||||
|
)
|
||||||
|
# daemon_env is 'optional-silently'
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _populate(self) -> None:
|
||||||
|
if self.inventory is None:
|
||||||
|
raise AssertionError("Inventory must be there")
|
||||||
|
|
||||||
|
daemon_env: DaemonEnv = self.get_option("daemon_env")
|
||||||
|
filters = parse_filters(self.get_option("filters"))
|
||||||
|
try:
|
||||||
|
for node in self._get_machine_names():
|
||||||
|
node_attrs = self._inspect_docker_machine_host(node)
|
||||||
|
if not node_attrs:
|
||||||
|
continue
|
||||||
|
|
||||||
|
unsafe_node_attrs = make_unsafe(node_attrs)
|
||||||
|
|
||||||
|
machine_name = unsafe_node_attrs["Driver"]["MachineName"]
|
||||||
|
if not filter_host(self, machine_name, unsafe_node_attrs, filters):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# query `docker-machine env` to obtain remote Docker daemon connection settings in the form of commands
|
||||||
|
# that could be used to set environment variables to influence a local Docker client:
|
||||||
|
if daemon_env == "skip":
|
||||||
|
env_var_tuples = []
|
||||||
|
else:
|
||||||
|
env_var_tuples = self._get_docker_daemon_variables(machine_name)
|
||||||
|
if self._should_skip_host(machine_name, env_var_tuples, daemon_env):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# add an entry in the inventory for this host
|
||||||
|
self.inventory.add_host(machine_name)
|
||||||
|
|
||||||
|
# check for valid ip address from inspect output, else explicitly use ip command to find host ip address
|
||||||
|
# this works around an issue seen with Google Compute Platform where the IP address was not available
|
||||||
|
# via the 'inspect' subcommand but was via the 'ip' subcomannd.
|
||||||
|
if unsafe_node_attrs["Driver"]["IPAddress"]:
|
||||||
|
ip_addr = unsafe_node_attrs["Driver"]["IPAddress"]
|
||||||
|
else:
|
||||||
|
ip_addr = self._ip_addr_docker_machine_host(node)
|
||||||
|
|
||||||
|
# set standard Ansible remote host connection settings to details captured from `docker-machine`
|
||||||
|
# see: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html
|
||||||
|
self.inventory.set_variable(
|
||||||
|
machine_name, "ansible_host", make_unsafe(ip_addr)
|
||||||
|
)
|
||||||
|
self.inventory.set_variable(
|
||||||
|
machine_name, "ansible_port", unsafe_node_attrs["Driver"]["SSHPort"]
|
||||||
|
)
|
||||||
|
self.inventory.set_variable(
|
||||||
|
machine_name, "ansible_user", unsafe_node_attrs["Driver"]["SSHUser"]
|
||||||
|
)
|
||||||
|
self.inventory.set_variable(
|
||||||
|
machine_name,
|
||||||
|
"ansible_ssh_private_key_file",
|
||||||
|
unsafe_node_attrs["Driver"]["SSHKeyPath"],
|
||||||
|
)
|
||||||
|
|
||||||
|
# set variables based on Docker Machine tags
|
||||||
|
tags = unsafe_node_attrs["Driver"].get("Tags") or ""
|
||||||
|
self.inventory.set_variable(machine_name, "dm_tags", make_unsafe(tags))
|
||||||
|
|
||||||
|
# set variables based on Docker Machine env variables
|
||||||
|
for kv in env_var_tuples:
|
||||||
|
self.inventory.set_variable(
|
||||||
|
machine_name, f"dm_{kv[0]}", make_unsafe(kv[1])
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.get_option("verbose_output"):
|
||||||
|
self.inventory.set_variable(
|
||||||
|
machine_name,
|
||||||
|
"docker_machine_node_attributes",
|
||||||
|
unsafe_node_attrs,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Use constructed if applicable
|
||||||
|
strict = self.get_option("strict")
|
||||||
|
|
||||||
|
# Composed variables
|
||||||
|
self._set_composite_vars(
|
||||||
|
self.get_option("compose"),
|
||||||
|
unsafe_node_attrs,
|
||||||
|
machine_name,
|
||||||
|
strict=strict,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||||
|
self._add_host_to_composed_groups(
|
||||||
|
self.get_option("groups"),
|
||||||
|
unsafe_node_attrs,
|
||||||
|
machine_name,
|
||||||
|
strict=strict,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create groups based on variable values and add the corresponding hosts to it
|
||||||
|
self._add_host_to_keyed_groups(
|
||||||
|
self.get_option("keyed_groups"),
|
||||||
|
unsafe_node_attrs,
|
||||||
|
machine_name,
|
||||||
|
strict=strict,
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
raise AnsibleError(
|
||||||
|
f"Unable to fetch hosts from Docker Machine, this was the original exception: {e}"
|
||||||
|
) from e
|
||||||
|
|
||||||
|
def verify_file(self, path: str) -> bool:
|
||||||
|
"""Return the possibility of a file being consumable by this plugin."""
|
||||||
|
return super().verify_file(path) and path.endswith(
|
||||||
|
("docker_machine.yaml", "docker_machine.yml")
|
||||||
|
)
|
||||||
|
|
||||||
|
def parse(
|
||||||
|
self,
|
||||||
|
inventory: InventoryData,
|
||||||
|
loader: DataLoader,
|
||||||
|
path: str,
|
||||||
|
cache: bool = True,
|
||||||
|
) -> None:
|
||||||
|
super().parse(inventory, loader, path, cache)
|
||||||
|
self._read_config_data(path)
|
||||||
|
self._populate()
|
||||||
|
|
@ -0,0 +1,338 @@
|
||||||
|
# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
|
||||||
|
# Copyright (c) 2018 Ansible Project
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
DOCUMENTATION = r"""
|
||||||
|
name: docker_swarm
|
||||||
|
author:
|
||||||
|
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
|
||||||
|
short_description: Ansible dynamic inventory plugin for Docker swarm nodes
|
||||||
|
requirements:
|
||||||
|
- L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
|
||||||
|
extends_documentation_fragment:
|
||||||
|
- ansible.builtin.constructed
|
||||||
|
- community.library_inventory_filtering_v1.inventory_filter
|
||||||
|
description:
|
||||||
|
- Reads inventories from the Docker swarm API.
|
||||||
|
- Uses a YAML configuration file that ends with V(docker_swarm.(yml|yaml\)).
|
||||||
|
- 'The plugin returns following groups of swarm nodes: C(all) - all hosts; C(workers) - all worker nodes; C(managers) -
|
||||||
|
all manager nodes; C(leader) - the swarm leader node; C(nonleaders) - all nodes except the swarm leader.'
|
||||||
|
notes:
|
||||||
|
- The configuration file must be a YAML file whose filename ends with V(docker_swarm.yml) or V(docker_swarm.yaml). Other
|
||||||
|
filenames will not be accepted.
|
||||||
|
options:
|
||||||
|
plugin:
|
||||||
|
description: The name of this plugin, it should always be set to V(community.docker.docker_swarm) for this plugin to recognize
|
||||||
|
it as its own.
|
||||||
|
type: str
|
||||||
|
required: true
|
||||||
|
choices: [docker_swarm, community.docker.docker_swarm]
|
||||||
|
docker_host:
|
||||||
|
description:
|
||||||
|
- Socket of a Docker swarm manager node (C(tcp), C(unix)).
|
||||||
|
- Use V(unix:///var/run/docker.sock) to connect through a local socket.
|
||||||
|
type: str
|
||||||
|
required: true
|
||||||
|
aliases: [docker_url]
|
||||||
|
verbose_output:
|
||||||
|
description: Toggle to (not) include all available nodes metadata (for example C(Platform), C(Architecture), C(OS), C(EngineVersion)).
|
||||||
|
type: bool
|
||||||
|
default: true
|
||||||
|
tls:
|
||||||
|
description: Connect using TLS without verifying the authenticity of the Docker host server.
|
||||||
|
type: bool
|
||||||
|
default: false
|
||||||
|
validate_certs:
|
||||||
|
description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker host server.
|
||||||
|
type: bool
|
||||||
|
default: false
|
||||||
|
aliases: [tls_verify]
|
||||||
|
client_key:
|
||||||
|
description: Path to the client's TLS key file.
|
||||||
|
type: path
|
||||||
|
aliases: [tls_client_key, key_path]
|
||||||
|
ca_path:
|
||||||
|
description:
|
||||||
|
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||||
|
- This option was called O(ca_cert) and got renamed to O(ca_path) in community.docker 3.6.0. The old name has been added
|
||||||
|
as an alias and can still be used.
|
||||||
|
type: path
|
||||||
|
aliases: [ca_cert, tls_ca_cert, cacert_path]
|
||||||
|
client_cert:
|
||||||
|
description: Path to the client's TLS certificate file.
|
||||||
|
type: path
|
||||||
|
aliases: [tls_client_cert, cert_path]
|
||||||
|
tls_hostname:
|
||||||
|
description: When verifying the authenticity of the Docker host server, provide the expected name of the server.
|
||||||
|
type: str
|
||||||
|
api_version:
|
||||||
|
description:
|
||||||
|
- The version of the Docker API running on the Docker Host.
|
||||||
|
- Defaults to the latest version of the API supported by Docker SDK for Python.
|
||||||
|
type: str
|
||||||
|
aliases: [docker_api_version]
|
||||||
|
timeout:
|
||||||
|
description:
|
||||||
|
- The maximum amount of time in seconds to wait on a response from the API.
|
||||||
|
- If the value is not specified in the task, the value of environment variable E(DOCKER_TIMEOUT). will be used instead.
|
||||||
|
If the environment variable is not set, the default value will be used.
|
||||||
|
type: int
|
||||||
|
default: 60
|
||||||
|
aliases: [time_out]
|
||||||
|
use_ssh_client:
|
||||||
|
description:
|
||||||
|
- For SSH transports, use the C(ssh) CLI tool instead of paramiko.
|
||||||
|
- Requires Docker SDK for Python 4.4.0 or newer.
|
||||||
|
type: bool
|
||||||
|
default: false
|
||||||
|
version_added: 1.5.0
|
||||||
|
include_host_uri:
|
||||||
|
description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the swarm leader
|
||||||
|
in format of V(tcp://172.16.0.1:2376). This value may be used without additional modification as value of option O(docker_host)
|
||||||
|
in Docker Swarm modules when connecting through the API. The port always defaults to V(2376).
|
||||||
|
type: bool
|
||||||
|
default: false
|
||||||
|
include_host_uri_port:
|
||||||
|
description: Override the detected port number included in C(ansible_host_uri).
|
||||||
|
type: int
|
||||||
|
filters:
|
||||||
|
version_added: 3.5.0
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXAMPLES = """
|
||||||
|
---
|
||||||
|
# Minimal example using local docker
|
||||||
|
plugin: community.docker.docker_swarm
|
||||||
|
docker_host: unix:///var/run/docker.sock
|
||||||
|
|
||||||
|
---
|
||||||
|
# Minimal example using remote docker
|
||||||
|
plugin: community.docker.docker_swarm
|
||||||
|
docker_host: tcp://my-docker-host:2375
|
||||||
|
|
||||||
|
---
|
||||||
|
# Example using remote docker with unverified TLS
|
||||||
|
plugin: community.docker.docker_swarm
|
||||||
|
docker_host: tcp://my-docker-host:2376
|
||||||
|
tls: true
|
||||||
|
|
||||||
|
---
|
||||||
|
# Example using remote docker with verified TLS and client certificate verification
|
||||||
|
plugin: community.docker.docker_swarm
|
||||||
|
docker_host: tcp://my-docker-host:2376
|
||||||
|
validate_certs: true
|
||||||
|
ca_path: /somewhere/ca.pem
|
||||||
|
client_key: /somewhere/key.pem
|
||||||
|
client_cert: /somewhere/cert.pem
|
||||||
|
|
||||||
|
---
|
||||||
|
# Example using constructed features to create groups and set ansible_host
|
||||||
|
plugin: community.docker.docker_swarm
|
||||||
|
docker_host: tcp://my-docker-host:2375
|
||||||
|
strict: false
|
||||||
|
keyed_groups:
|
||||||
|
# add for example x86_64 hosts to an arch_x86_64 group
|
||||||
|
- prefix: arch
|
||||||
|
key: 'Description.Platform.Architecture'
|
||||||
|
# add for example linux hosts to an os_linux group
|
||||||
|
- prefix: os
|
||||||
|
key: 'Description.Platform.OS'
|
||||||
|
# create a group per node label
|
||||||
|
# for exomple a node labeled w/ "production" ends up in group "label_production"
|
||||||
|
# hint: labels containing special characters will be converted to safe names
|
||||||
|
- key: 'Spec.Labels'
|
||||||
|
prefix: label
|
||||||
|
"""
|
||||||
|
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
from ansible.errors import AnsibleError
|
||||||
|
from ansible.parsing.utils.addresses import parse_address
|
||||||
|
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||||
|
from ansible_collections.community.library_inventory_filtering_v1.plugins.plugin_utils.inventory_filter import (
|
||||||
|
filter_host,
|
||||||
|
parse_filters,
|
||||||
|
)
|
||||||
|
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._common import (
|
||||||
|
get_connect_params,
|
||||||
|
)
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||||
|
update_tls_hostname,
|
||||||
|
)
|
||||||
|
from ansible_collections.community.docker.plugins.plugin_utils._unsafe import (
|
||||||
|
make_unsafe,
|
||||||
|
)
|
||||||
|
|
||||||
|
if t.TYPE_CHECKING:
|
||||||
|
from ansible.inventory.data import InventoryData
|
||||||
|
from ansible.parsing.dataloader import DataLoader
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
import docker
|
||||||
|
|
||||||
|
HAS_DOCKER = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_DOCKER = False
|
||||||
|
|
||||||
|
|
||||||
|
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||||
|
"""Host inventory parser for ansible using Docker swarm as source."""
|
||||||
|
|
||||||
|
NAME = "community.docker.docker_swarm"
|
||||||
|
|
||||||
|
def _fail(self, msg: str) -> t.NoReturn:
|
||||||
|
raise AnsibleError(msg)
|
||||||
|
|
||||||
|
def _populate(self) -> None:
|
||||||
|
if self.inventory is None:
|
||||||
|
raise AssertionError("Inventory must be there")
|
||||||
|
|
||||||
|
raw_params = {
|
||||||
|
"docker_host": self.get_option("docker_host"),
|
||||||
|
"tls": self.get_option("tls"),
|
||||||
|
"tls_verify": self.get_option("validate_certs"),
|
||||||
|
"key_path": self.get_option("client_key"),
|
||||||
|
"cacert_path": self.get_option("ca_path"),
|
||||||
|
"cert_path": self.get_option("client_cert"),
|
||||||
|
"tls_hostname": self.get_option("tls_hostname"),
|
||||||
|
"api_version": self.get_option("api_version"),
|
||||||
|
"timeout": self.get_option("timeout"),
|
||||||
|
"use_ssh_client": self.get_option("use_ssh_client"),
|
||||||
|
"debug": None,
|
||||||
|
}
|
||||||
|
update_tls_hostname(raw_params)
|
||||||
|
connect_params = get_connect_params(raw_params, fail_function=self._fail)
|
||||||
|
client = docker.DockerClient(**connect_params)
|
||||||
|
self.inventory.add_group("all")
|
||||||
|
self.inventory.add_group("manager")
|
||||||
|
self.inventory.add_group("worker")
|
||||||
|
self.inventory.add_group("leader")
|
||||||
|
self.inventory.add_group("nonleaders")
|
||||||
|
|
||||||
|
filters = parse_filters(self.get_option("filters"))
|
||||||
|
|
||||||
|
if self.get_option("include_host_uri"):
|
||||||
|
if self.get_option("include_host_uri_port"):
|
||||||
|
host_uri_port = str(self.get_option("include_host_uri_port"))
|
||||||
|
elif self.get_option("tls") or self.get_option("validate_certs"):
|
||||||
|
host_uri_port = "2376"
|
||||||
|
else:
|
||||||
|
host_uri_port = "2375"
|
||||||
|
|
||||||
|
try:
|
||||||
|
nodes = client.nodes.list()
|
||||||
|
for node in nodes:
|
||||||
|
node_attrs = client.nodes.get(node.id).attrs
|
||||||
|
unsafe_node_attrs = make_unsafe(node_attrs)
|
||||||
|
if not filter_host(
|
||||||
|
self, unsafe_node_attrs["ID"], unsafe_node_attrs, filters
|
||||||
|
):
|
||||||
|
continue
|
||||||
|
self.inventory.add_host(unsafe_node_attrs["ID"])
|
||||||
|
self.inventory.add_host(
|
||||||
|
unsafe_node_attrs["ID"], group=unsafe_node_attrs["Spec"]["Role"]
|
||||||
|
)
|
||||||
|
self.inventory.set_variable(
|
||||||
|
unsafe_node_attrs["ID"],
|
||||||
|
"ansible_host",
|
||||||
|
unsafe_node_attrs["Status"]["Addr"],
|
||||||
|
)
|
||||||
|
if self.get_option("include_host_uri"):
|
||||||
|
self.inventory.set_variable(
|
||||||
|
unsafe_node_attrs["ID"],
|
||||||
|
"ansible_host_uri",
|
||||||
|
make_unsafe(
|
||||||
|
"tcp://"
|
||||||
|
+ unsafe_node_attrs["Status"]["Addr"]
|
||||||
|
+ ":"
|
||||||
|
+ host_uri_port
|
||||||
|
),
|
||||||
|
)
|
||||||
|
if self.get_option("verbose_output"):
|
||||||
|
self.inventory.set_variable(
|
||||||
|
unsafe_node_attrs["ID"],
|
||||||
|
"docker_swarm_node_attributes",
|
||||||
|
unsafe_node_attrs,
|
||||||
|
)
|
||||||
|
if "ManagerStatus" in unsafe_node_attrs:
|
||||||
|
if unsafe_node_attrs["ManagerStatus"].get("Leader"):
|
||||||
|
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
|
||||||
|
# Check moby/moby#35437 for details
|
||||||
|
swarm_leader_ip = (
|
||||||
|
parse_address(node_attrs["ManagerStatus"]["Addr"])[0]
|
||||||
|
or unsafe_node_attrs["Status"]["Addr"]
|
||||||
|
)
|
||||||
|
if self.get_option("include_host_uri"):
|
||||||
|
self.inventory.set_variable(
|
||||||
|
unsafe_node_attrs["ID"],
|
||||||
|
"ansible_host_uri",
|
||||||
|
make_unsafe(
|
||||||
|
"tcp://" + swarm_leader_ip + ":" + host_uri_port
|
||||||
|
),
|
||||||
|
)
|
||||||
|
self.inventory.set_variable(
|
||||||
|
unsafe_node_attrs["ID"],
|
||||||
|
"ansible_host",
|
||||||
|
make_unsafe(swarm_leader_ip),
|
||||||
|
)
|
||||||
|
self.inventory.add_host(unsafe_node_attrs["ID"], group="leader")
|
||||||
|
else:
|
||||||
|
self.inventory.add_host(
|
||||||
|
unsafe_node_attrs["ID"], group="nonleaders"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.inventory.add_host(unsafe_node_attrs["ID"], group="nonleaders")
|
||||||
|
# Use constructed if applicable
|
||||||
|
strict = self.get_option("strict")
|
||||||
|
# Composed variables
|
||||||
|
self._set_composite_vars(
|
||||||
|
self.get_option("compose"),
|
||||||
|
unsafe_node_attrs,
|
||||||
|
unsafe_node_attrs["ID"],
|
||||||
|
strict=strict,
|
||||||
|
)
|
||||||
|
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||||
|
self._add_host_to_composed_groups(
|
||||||
|
self.get_option("groups"),
|
||||||
|
unsafe_node_attrs,
|
||||||
|
unsafe_node_attrs["ID"],
|
||||||
|
strict=strict,
|
||||||
|
)
|
||||||
|
# Create groups based on variable values and add the corresponding hosts to it
|
||||||
|
self._add_host_to_keyed_groups(
|
||||||
|
self.get_option("keyed_groups"),
|
||||||
|
unsafe_node_attrs,
|
||||||
|
unsafe_node_attrs["ID"],
|
||||||
|
strict=strict,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
raise AnsibleError(
|
||||||
|
f"Unable to fetch hosts from Docker swarm API, this was the original exception: {e}"
|
||||||
|
) from e
|
||||||
|
|
||||||
|
def verify_file(self, path: str) -> bool:
|
||||||
|
"""Return the possibly of a file being consumable by this plugin."""
|
||||||
|
return super().verify_file(path) and path.endswith(
|
||||||
|
("docker_swarm.yaml", "docker_swarm.yml")
|
||||||
|
)
|
||||||
|
|
||||||
|
def parse(
|
||||||
|
self,
|
||||||
|
inventory: InventoryData,
|
||||||
|
loader: DataLoader,
|
||||||
|
path: str,
|
||||||
|
cache: bool = True,
|
||||||
|
) -> None:
|
||||||
|
if not HAS_DOCKER:
|
||||||
|
raise AnsibleError(
|
||||||
|
"The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: "
|
||||||
|
"https://github.com/docker/docker-py."
|
||||||
|
)
|
||||||
|
super().parse(inventory, loader, path, cache)
|
||||||
|
self._read_config_data(path)
|
||||||
|
self._populate()
|
||||||
|
|
@ -0,0 +1,102 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import traceback
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
REQUESTS_IMPORT_ERROR: str | None # pylint: disable=invalid-name
|
||||||
|
try:
|
||||||
|
from requests import Session # noqa: F401, pylint: disable=unused-import
|
||||||
|
from requests.adapters import ( # noqa: F401, pylint: disable=unused-import
|
||||||
|
HTTPAdapter,
|
||||||
|
)
|
||||||
|
from requests.exceptions import ( # noqa: F401, pylint: disable=unused-import
|
||||||
|
HTTPError,
|
||||||
|
InvalidSchema,
|
||||||
|
)
|
||||||
|
except ImportError:
|
||||||
|
REQUESTS_IMPORT_ERROR = traceback.format_exc() # pylint: disable=invalid-name
|
||||||
|
|
||||||
|
class Session: # type: ignore
|
||||||
|
__attrs__: list[t.Never] = []
|
||||||
|
|
||||||
|
class HTTPAdapter: # type: ignore
|
||||||
|
__attrs__: list[t.Never] = []
|
||||||
|
|
||||||
|
class HTTPError(Exception): # type: ignore
|
||||||
|
pass
|
||||||
|
|
||||||
|
class InvalidSchema(Exception): # type: ignore
|
||||||
|
pass
|
||||||
|
|
||||||
|
else:
|
||||||
|
REQUESTS_IMPORT_ERROR = None # pylint: disable=invalid-name
|
||||||
|
|
||||||
|
|
||||||
|
URLLIB3_IMPORT_ERROR: str | None = None # pylint: disable=invalid-name
|
||||||
|
try:
|
||||||
|
from requests.packages import urllib3 # pylint: disable=unused-import
|
||||||
|
|
||||||
|
from requests.packages.urllib3 import ( # type: ignore # pylint: disable=unused-import # isort: skip
|
||||||
|
connection as urllib3_connection,
|
||||||
|
)
|
||||||
|
except ImportError:
|
||||||
|
try:
|
||||||
|
import urllib3 # pylint: disable=unused-import
|
||||||
|
from urllib3 import (
|
||||||
|
connection as urllib3_connection, # pylint: disable=unused-import
|
||||||
|
)
|
||||||
|
except ImportError:
|
||||||
|
URLLIB3_IMPORT_ERROR = traceback.format_exc() # pylint: disable=invalid-name
|
||||||
|
|
||||||
|
class _HTTPConnectionPool:
|
||||||
|
pass
|
||||||
|
|
||||||
|
class _HTTPConnection:
|
||||||
|
pass
|
||||||
|
|
||||||
|
class FakeURLLIB3:
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self._collections = self
|
||||||
|
self.poolmanager = self
|
||||||
|
self.connection = self
|
||||||
|
self.connectionpool = self
|
||||||
|
|
||||||
|
self.RecentlyUsedContainer = object() # pylint: disable=invalid-name
|
||||||
|
self.PoolManager = object() # pylint: disable=invalid-name
|
||||||
|
self.match_hostname = object()
|
||||||
|
self.HTTPConnectionPool = ( # pylint: disable=invalid-name
|
||||||
|
_HTTPConnectionPool
|
||||||
|
)
|
||||||
|
|
||||||
|
class FakeURLLIB3Connection:
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self.HTTPConnection = _HTTPConnection # pylint: disable=invalid-name
|
||||||
|
|
||||||
|
urllib3 = FakeURLLIB3()
|
||||||
|
urllib3_connection = FakeURLLIB3Connection()
|
||||||
|
|
||||||
|
|
||||||
|
def fail_on_missing_imports() -> None:
|
||||||
|
if REQUESTS_IMPORT_ERROR is not None:
|
||||||
|
from .errors import MissingRequirementException # pylint: disable=cyclic-import
|
||||||
|
|
||||||
|
raise MissingRequirementException(
|
||||||
|
"You have to install requests", "requests", REQUESTS_IMPORT_ERROR
|
||||||
|
)
|
||||||
|
if URLLIB3_IMPORT_ERROR is not None:
|
||||||
|
from .errors import MissingRequirementException # pylint: disable=cyclic-import
|
||||||
|
|
||||||
|
raise MissingRequirementException(
|
||||||
|
"You have to install urllib3", "urllib3", URLLIB3_IMPORT_ERROR
|
||||||
|
)
|
||||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,406 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
from . import errors
|
||||||
|
from .credentials.errors import CredentialsNotFound, StoreError
|
||||||
|
from .credentials.store import Store
|
||||||
|
from .utils import config
|
||||||
|
|
||||||
|
if t.TYPE_CHECKING:
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._api.api.client import (
|
||||||
|
APIClient,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
INDEX_NAME = "docker.io"
|
||||||
|
INDEX_URL = f"https://index.{INDEX_NAME}/v1/"
|
||||||
|
TOKEN_USERNAME = "<token>"
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def resolve_repository_name(repo_name: str) -> tuple[str, str]:
|
||||||
|
if "://" in repo_name:
|
||||||
|
raise errors.InvalidRepository(
|
||||||
|
f"Repository name cannot contain a scheme ({repo_name})"
|
||||||
|
)
|
||||||
|
|
||||||
|
index_name, remote_name = split_repo_name(repo_name)
|
||||||
|
if index_name[0] == "-" or index_name[-1] == "-":
|
||||||
|
raise errors.InvalidRepository(
|
||||||
|
f"Invalid index name ({index_name}). Cannot begin or end with a hyphen."
|
||||||
|
)
|
||||||
|
return resolve_index_name(index_name), remote_name
|
||||||
|
|
||||||
|
|
||||||
|
def resolve_index_name(index_name: str) -> str:
|
||||||
|
index_name = convert_to_hostname(index_name)
|
||||||
|
if index_name == "index." + INDEX_NAME:
|
||||||
|
index_name = INDEX_NAME
|
||||||
|
return index_name
|
||||||
|
|
||||||
|
|
||||||
|
def get_config_header(client: APIClient, registry: str) -> bytes | None:
|
||||||
|
log.debug("Looking for auth config")
|
||||||
|
if not client._auth_configs or client._auth_configs.is_empty:
|
||||||
|
log.debug("No auth config in memory - loading from filesystem")
|
||||||
|
client._auth_configs = load_config(credstore_env=client.credstore_env)
|
||||||
|
authcfg = resolve_authconfig(
|
||||||
|
client._auth_configs, registry, credstore_env=client.credstore_env
|
||||||
|
)
|
||||||
|
# Do not fail here if no authentication exists for this
|
||||||
|
# specific registry as we can have a readonly pull. Just
|
||||||
|
# put the header if we can.
|
||||||
|
if authcfg:
|
||||||
|
log.debug("Found auth config")
|
||||||
|
# auth_config needs to be a dict in the format used by
|
||||||
|
# auth.py username , password, serveraddress, email
|
||||||
|
return encode_header(authcfg)
|
||||||
|
log.debug("No auth config found")
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def split_repo_name(repo_name: str) -> tuple[str, str]:
|
||||||
|
parts = repo_name.split("/", 1)
|
||||||
|
if len(parts) == 1 or (
|
||||||
|
"." not in parts[0] and ":" not in parts[0] and parts[0] != "localhost"
|
||||||
|
):
|
||||||
|
# This is a docker index repo (ex: username/foobar or ubuntu)
|
||||||
|
return INDEX_NAME, repo_name
|
||||||
|
return tuple(parts) # type: ignore
|
||||||
|
|
||||||
|
|
||||||
|
def get_credential_store(
|
||||||
|
authconfig: dict[str, t.Any] | AuthConfig, registry: str
|
||||||
|
) -> str | None:
|
||||||
|
if not isinstance(authconfig, AuthConfig):
|
||||||
|
authconfig = AuthConfig(authconfig)
|
||||||
|
return authconfig.get_credential_store(registry)
|
||||||
|
|
||||||
|
|
||||||
|
class AuthConfig(dict):
|
||||||
|
def __init__(
|
||||||
|
self, dct: dict[str, t.Any], credstore_env: dict[str, str] | None = None
|
||||||
|
):
|
||||||
|
if "auths" not in dct:
|
||||||
|
dct["auths"] = {}
|
||||||
|
self.update(dct)
|
||||||
|
self._credstore_env = credstore_env
|
||||||
|
self._stores: dict[str, Store] = {}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def parse_auth(
|
||||||
|
cls, entries: dict[str, dict[str, t.Any]], raise_on_error: bool = False
|
||||||
|
) -> dict[str, dict[str, t.Any]]:
|
||||||
|
"""
|
||||||
|
Parses authentication entries
|
||||||
|
|
||||||
|
Args:
|
||||||
|
entries: Dict of authentication entries.
|
||||||
|
raise_on_error: If set to true, an invalid format will raise
|
||||||
|
InvalidConfigFile
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Authentication registry.
|
||||||
|
"""
|
||||||
|
|
||||||
|
conf: dict[str, dict[str, t.Any]] = {}
|
||||||
|
for registry, entry in entries.items():
|
||||||
|
if not isinstance(entry, dict):
|
||||||
|
log.debug("Config entry for key %s is not auth config", registry) # type: ignore
|
||||||
|
# We sometimes fall back to parsing the whole config as if it
|
||||||
|
# was the auth config by itself, for legacy purposes. In that
|
||||||
|
# case, we fail silently and return an empty conf if any of the
|
||||||
|
# keys is not formatted properly.
|
||||||
|
if raise_on_error:
|
||||||
|
raise errors.InvalidConfigFile(
|
||||||
|
f"Invalid configuration for registry {registry}"
|
||||||
|
)
|
||||||
|
return {}
|
||||||
|
if "identitytoken" in entry:
|
||||||
|
log.debug("Found an IdentityToken entry for registry %s", registry)
|
||||||
|
conf[registry] = {"IdentityToken": entry["identitytoken"]}
|
||||||
|
continue # Other values are irrelevant if we have a token
|
||||||
|
|
||||||
|
if "auth" not in entry:
|
||||||
|
# Starting with engine v1.11 (API 1.23), an empty dictionary is
|
||||||
|
# a valid value in the auths config.
|
||||||
|
# https://github.com/docker/compose/issues/3265
|
||||||
|
log.debug(
|
||||||
|
"Auth data for %s is absent. Client might be using a credentials store instead.",
|
||||||
|
registry,
|
||||||
|
)
|
||||||
|
conf[registry] = {}
|
||||||
|
continue
|
||||||
|
|
||||||
|
username, password = decode_auth(entry["auth"])
|
||||||
|
log.debug(
|
||||||
|
"Found entry (registry=%s, username=%s)", repr(registry), repr(username)
|
||||||
|
)
|
||||||
|
|
||||||
|
conf[registry] = {
|
||||||
|
"username": username,
|
||||||
|
"password": password,
|
||||||
|
"email": entry.get("email"),
|
||||||
|
"serveraddress": registry,
|
||||||
|
}
|
||||||
|
return conf
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def load_config(
|
||||||
|
cls,
|
||||||
|
config_path: str | None,
|
||||||
|
config_dict: dict[str, t.Any] | None,
|
||||||
|
credstore_env: dict[str, str] | None = None,
|
||||||
|
) -> t.Self:
|
||||||
|
"""
|
||||||
|
Loads authentication data from a Docker configuration file in the given
|
||||||
|
root directory or if config_path is passed use given path.
|
||||||
|
Lookup priority:
|
||||||
|
explicit config_path parameter > DOCKER_CONFIG environment
|
||||||
|
variable > ~/.docker/config.json > ~/.dockercfg
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not config_dict:
|
||||||
|
config_file = config.find_config_file(config_path)
|
||||||
|
|
||||||
|
if not config_file:
|
||||||
|
return cls({}, credstore_env)
|
||||||
|
try:
|
||||||
|
with open(config_file, "rt", encoding="utf-8") as f:
|
||||||
|
config_dict = json.load(f)
|
||||||
|
except (IOError, KeyError, ValueError) as e:
|
||||||
|
# Likely missing new Docker config file or it is in an
|
||||||
|
# unknown format, continue to attempt to read old location
|
||||||
|
# and format.
|
||||||
|
log.debug(e)
|
||||||
|
return cls(_load_legacy_config(config_file), credstore_env)
|
||||||
|
|
||||||
|
res = {}
|
||||||
|
if config_dict.get("auths"):
|
||||||
|
log.debug("Found 'auths' section")
|
||||||
|
res.update(
|
||||||
|
{"auths": cls.parse_auth(config_dict.pop("auths"), raise_on_error=True)}
|
||||||
|
)
|
||||||
|
if config_dict.get("credsStore"):
|
||||||
|
log.debug("Found 'credsStore' section")
|
||||||
|
res.update({"credsStore": config_dict.pop("credsStore")})
|
||||||
|
if config_dict.get("credHelpers"):
|
||||||
|
log.debug("Found 'credHelpers' section")
|
||||||
|
res.update({"credHelpers": config_dict.pop("credHelpers")})
|
||||||
|
if res:
|
||||||
|
return cls(res, credstore_env)
|
||||||
|
|
||||||
|
log.debug(
|
||||||
|
"Could not find auth-related section ; attempting to interpret "
|
||||||
|
"as auth-only file"
|
||||||
|
)
|
||||||
|
return cls({"auths": cls.parse_auth(config_dict)}, credstore_env)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def auths(self) -> dict[str, dict[str, t.Any]]:
|
||||||
|
return self.get("auths", {})
|
||||||
|
|
||||||
|
@property
|
||||||
|
def creds_store(self) -> str | None:
|
||||||
|
return self.get("credsStore", None)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def cred_helpers(self) -> dict[str, t.Any]:
|
||||||
|
return self.get("credHelpers", {})
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_empty(self) -> bool:
|
||||||
|
return not self.auths and not self.creds_store and not self.cred_helpers
|
||||||
|
|
||||||
|
def resolve_authconfig(
|
||||||
|
self, registry: str | None = None
|
||||||
|
) -> dict[str, t.Any] | None:
|
||||||
|
"""
|
||||||
|
Returns the authentication data from the given auth configuration for a
|
||||||
|
specific registry. As with the Docker client, legacy entries in the
|
||||||
|
config with full URLs are stripped down to hostnames before checking
|
||||||
|
for a match. Returns None if no match was found.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if self.creds_store or self.cred_helpers:
|
||||||
|
store_name = self.get_credential_store(registry)
|
||||||
|
if store_name is not None:
|
||||||
|
log.debug('Using credentials store "%s"', store_name)
|
||||||
|
cfg = self._resolve_authconfig_credstore(registry, store_name)
|
||||||
|
if cfg is not None:
|
||||||
|
return cfg
|
||||||
|
log.debug("No entry in credstore - fetching from auth dict")
|
||||||
|
|
||||||
|
# Default to the public index server
|
||||||
|
registry = resolve_index_name(registry) if registry else INDEX_NAME
|
||||||
|
log.debug("Looking for auth entry for %s", repr(registry))
|
||||||
|
|
||||||
|
if registry in self.auths:
|
||||||
|
log.debug("Found %s", repr(registry))
|
||||||
|
return self.auths[registry]
|
||||||
|
|
||||||
|
for key, conf in self.auths.items():
|
||||||
|
if resolve_index_name(key) == registry:
|
||||||
|
log.debug("Found %s", repr(key))
|
||||||
|
return conf
|
||||||
|
|
||||||
|
log.debug("No entry found")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _resolve_authconfig_credstore(
|
||||||
|
self, registry: str | None, credstore_name: str
|
||||||
|
) -> dict[str, t.Any] | None:
|
||||||
|
if not registry or registry == INDEX_NAME:
|
||||||
|
# The ecosystem is a little schizophrenic with index.docker.io VS
|
||||||
|
# docker.io - in that case, it seems the full URL is necessary.
|
||||||
|
registry = INDEX_URL
|
||||||
|
log.debug("Looking for auth entry for %s", repr(registry))
|
||||||
|
store = self._get_store_instance(credstore_name)
|
||||||
|
try:
|
||||||
|
data = store.get(registry)
|
||||||
|
res = {
|
||||||
|
"ServerAddress": registry,
|
||||||
|
}
|
||||||
|
if data["Username"] == TOKEN_USERNAME:
|
||||||
|
res["IdentityToken"] = data["Secret"]
|
||||||
|
else:
|
||||||
|
res.update(
|
||||||
|
{
|
||||||
|
"Username": data["Username"],
|
||||||
|
"Password": data["Secret"],
|
||||||
|
}
|
||||||
|
)
|
||||||
|
return res
|
||||||
|
except CredentialsNotFound:
|
||||||
|
log.debug("No entry found")
|
||||||
|
return None
|
||||||
|
except StoreError as e:
|
||||||
|
raise errors.DockerException(f"Credentials store error: {e}") from e
|
||||||
|
|
||||||
|
def _get_store_instance(self, name: str) -> Store:
|
||||||
|
if name not in self._stores:
|
||||||
|
self._stores[name] = Store(name, environment=self._credstore_env)
|
||||||
|
return self._stores[name]
|
||||||
|
|
||||||
|
def get_credential_store(self, registry: str | None) -> str | None:
|
||||||
|
if not registry or registry == INDEX_NAME:
|
||||||
|
registry = INDEX_URL
|
||||||
|
|
||||||
|
return self.cred_helpers.get(registry) or self.creds_store
|
||||||
|
|
||||||
|
def get_all_credentials(self) -> dict[str, dict[str, t.Any] | None]:
|
||||||
|
auth_data: dict[str, dict[str, t.Any] | None] = self.auths.copy() # type: ignore
|
||||||
|
if self.creds_store:
|
||||||
|
# Retrieve all credentials from the default store
|
||||||
|
store = self._get_store_instance(self.creds_store)
|
||||||
|
for k in store.list():
|
||||||
|
auth_data[k] = self._resolve_authconfig_credstore(k, self.creds_store)
|
||||||
|
auth_data[convert_to_hostname(k)] = auth_data[k]
|
||||||
|
|
||||||
|
# credHelpers entries take priority over all others
|
||||||
|
for reg, store_name in self.cred_helpers.items():
|
||||||
|
auth_data[reg] = self._resolve_authconfig_credstore(reg, store_name)
|
||||||
|
auth_data[convert_to_hostname(reg)] = auth_data[reg]
|
||||||
|
|
||||||
|
return auth_data
|
||||||
|
|
||||||
|
def add_auth(self, reg: str, data: dict[str, t.Any]) -> None:
|
||||||
|
self["auths"][reg] = data
|
||||||
|
|
||||||
|
|
||||||
|
def resolve_authconfig(
|
||||||
|
authconfig: AuthConfig | dict[str, t.Any],
|
||||||
|
registry: str | None = None,
|
||||||
|
credstore_env: dict[str, str] | None = None,
|
||||||
|
) -> dict[str, t.Any] | None:
|
||||||
|
if not isinstance(authconfig, AuthConfig):
|
||||||
|
authconfig = AuthConfig(authconfig, credstore_env)
|
||||||
|
return authconfig.resolve_authconfig(registry)
|
||||||
|
|
||||||
|
|
||||||
|
def convert_to_hostname(url: str) -> str:
|
||||||
|
return url.replace("http://", "").replace("https://", "").split("/", 1)[0]
|
||||||
|
|
||||||
|
|
||||||
|
def decode_auth(auth: str | bytes) -> tuple[str, str]:
|
||||||
|
if isinstance(auth, str):
|
||||||
|
auth = auth.encode("ascii")
|
||||||
|
s = base64.b64decode(auth)
|
||||||
|
login, pwd = s.split(b":", 1)
|
||||||
|
return login.decode("utf8"), pwd.decode("utf8")
|
||||||
|
|
||||||
|
|
||||||
|
def encode_header(auth: dict[str, t.Any]) -> bytes:
|
||||||
|
auth_json = json.dumps(auth).encode("ascii")
|
||||||
|
return base64.urlsafe_b64encode(auth_json)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_auth(
|
||||||
|
entries: dict[str, dict[str, t.Any]], raise_on_error: bool = False
|
||||||
|
) -> dict[str, dict[str, t.Any]]:
|
||||||
|
"""
|
||||||
|
Parses authentication entries
|
||||||
|
|
||||||
|
Args:
|
||||||
|
entries: Dict of authentication entries.
|
||||||
|
raise_on_error: If set to true, an invalid format will raise
|
||||||
|
InvalidConfigFile
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Authentication registry.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return AuthConfig.parse_auth(entries, raise_on_error)
|
||||||
|
|
||||||
|
|
||||||
|
def load_config(
|
||||||
|
config_path: str | None = None,
|
||||||
|
config_dict: dict[str, t.Any] | None = None,
|
||||||
|
credstore_env: dict[str, str] | None = None,
|
||||||
|
) -> AuthConfig:
|
||||||
|
return AuthConfig.load_config(config_path, config_dict, credstore_env)
|
||||||
|
|
||||||
|
|
||||||
|
def _load_legacy_config(config_file: str) -> dict[str, dict[str, t.Any]]:
|
||||||
|
log.debug("Attempting to parse legacy auth file format")
|
||||||
|
try:
|
||||||
|
data = []
|
||||||
|
with open(config_file, "rt", encoding="utf-8") as f:
|
||||||
|
for line in f.readlines():
|
||||||
|
data.append(line.strip().split(" = ")[1])
|
||||||
|
if len(data) < 2:
|
||||||
|
# Not enough data
|
||||||
|
raise errors.InvalidConfigFile("Invalid or empty configuration file!")
|
||||||
|
|
||||||
|
username, password = decode_auth(data[0])
|
||||||
|
return {
|
||||||
|
"auths": {
|
||||||
|
INDEX_NAME: {
|
||||||
|
"username": username,
|
||||||
|
"password": password,
|
||||||
|
"email": data[1],
|
||||||
|
"serveraddress": INDEX_URL,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
except Exception as e: # pylint: disable=broad-exception-caught
|
||||||
|
log.debug(e)
|
||||||
|
|
||||||
|
log.debug("All parsing attempts failed - returning empty config")
|
||||||
|
return {}
|
||||||
|
|
@ -0,0 +1,40 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
MINIMUM_DOCKER_API_VERSION = "1.21"
|
||||||
|
DEFAULT_TIMEOUT_SECONDS = 60
|
||||||
|
STREAM_HEADER_SIZE_BYTES = 8
|
||||||
|
CONTAINER_LIMITS_KEYS = ["memory", "memswap", "cpushares", "cpusetcpus"]
|
||||||
|
|
||||||
|
DEFAULT_HTTP_HOST = "127.0.0.1"
|
||||||
|
DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock"
|
||||||
|
DEFAULT_NPIPE = "npipe:////./pipe/docker_engine"
|
||||||
|
|
||||||
|
BYTE_UNITS = {"b": 1, "k": 1024, "m": 1024 * 1024, "g": 1024 * 1024 * 1024}
|
||||||
|
|
||||||
|
IS_WINDOWS_PLATFORM = sys.platform == "win32"
|
||||||
|
WINDOWS_LONGPATH_PREFIX = "\\\\?\\"
|
||||||
|
|
||||||
|
DEFAULT_USER_AGENT = "ansible-community.docker"
|
||||||
|
DEFAULT_NUM_POOLS = 25
|
||||||
|
|
||||||
|
# The OpenSSH server default value for MaxSessions is 10 which means we can
|
||||||
|
# use up to 9, leaving the final session for the underlying SSH connection.
|
||||||
|
# For more details see: https://github.com/docker/docker-py/issues/2246
|
||||||
|
DEFAULT_NUM_POOLS_SSH = 9
|
||||||
|
|
||||||
|
DEFAULT_MAX_POOL_SIZE = 10
|
||||||
|
|
||||||
|
DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048
|
||||||
|
|
@ -0,0 +1,253 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2025 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
from .. import errors
|
||||||
|
from .config import (
|
||||||
|
METAFILE,
|
||||||
|
get_current_context_name,
|
||||||
|
get_meta_dir,
|
||||||
|
write_context_name_to_docker_config,
|
||||||
|
)
|
||||||
|
from .context import Context
|
||||||
|
|
||||||
|
if t.TYPE_CHECKING:
|
||||||
|
from ..tls import TLSConfig
|
||||||
|
|
||||||
|
|
||||||
|
def create_default_context() -> Context:
|
||||||
|
host = None
|
||||||
|
if os.environ.get("DOCKER_HOST"):
|
||||||
|
host = os.environ.get("DOCKER_HOST")
|
||||||
|
return Context(
|
||||||
|
"default", "swarm", host, description="Current DOCKER_HOST based configuration"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ContextAPI:
|
||||||
|
"""Context API.
|
||||||
|
Contains methods for context management:
|
||||||
|
create, list, remove, get, inspect.
|
||||||
|
"""
|
||||||
|
|
||||||
|
DEFAULT_CONTEXT = None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_default_context(cls) -> Context:
|
||||||
|
context = cls.DEFAULT_CONTEXT
|
||||||
|
if context is None:
|
||||||
|
context = create_default_context()
|
||||||
|
cls.DEFAULT_CONTEXT = context
|
||||||
|
return context
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def create_context(
|
||||||
|
cls,
|
||||||
|
name: str,
|
||||||
|
orchestrator: str | None = None,
|
||||||
|
host: str | None = None,
|
||||||
|
tls_cfg: TLSConfig | None = None,
|
||||||
|
default_namespace: str | None = None,
|
||||||
|
skip_tls_verify: bool = False,
|
||||||
|
) -> Context:
|
||||||
|
"""Creates a new context.
|
||||||
|
Returns:
|
||||||
|
(Context): a Context object.
|
||||||
|
Raises:
|
||||||
|
:py:class:`docker.errors.MissingContextParameter`
|
||||||
|
If a context name is not provided.
|
||||||
|
:py:class:`docker.errors.ContextAlreadyExists`
|
||||||
|
If a context with the name already exists.
|
||||||
|
:py:class:`docker.errors.ContextException`
|
||||||
|
If name is default.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
>>> from docker.context import ContextAPI
|
||||||
|
>>> ctx = ContextAPI.create_context(name='test')
|
||||||
|
>>> print(ctx.Metadata)
|
||||||
|
{
|
||||||
|
"Name": "test",
|
||||||
|
"Metadata": {},
|
||||||
|
"Endpoints": {
|
||||||
|
"docker": {
|
||||||
|
"Host": "unix:///var/run/docker.sock",
|
||||||
|
"SkipTLSVerify": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
if not name:
|
||||||
|
raise errors.MissingContextParameter("name")
|
||||||
|
if name == "default":
|
||||||
|
raise errors.ContextException('"default" is a reserved context name')
|
||||||
|
ctx = Context.load_context(name)
|
||||||
|
if ctx:
|
||||||
|
raise errors.ContextAlreadyExists(name)
|
||||||
|
endpoint = "docker"
|
||||||
|
if orchestrator and orchestrator != "swarm":
|
||||||
|
endpoint = orchestrator
|
||||||
|
ctx = Context(name, orchestrator)
|
||||||
|
ctx.set_endpoint(
|
||||||
|
endpoint,
|
||||||
|
host,
|
||||||
|
tls_cfg,
|
||||||
|
skip_tls_verify=skip_tls_verify,
|
||||||
|
def_namespace=default_namespace,
|
||||||
|
)
|
||||||
|
ctx.save()
|
||||||
|
return ctx
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_context(cls, name: str | None = None) -> Context | None:
|
||||||
|
"""Retrieves a context object.
|
||||||
|
Args:
|
||||||
|
name (str): The name of the context
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
>>> from docker.context import ContextAPI
|
||||||
|
>>> ctx = ContextAPI.get_context(name='test')
|
||||||
|
>>> print(ctx.Metadata)
|
||||||
|
{
|
||||||
|
"Name": "test",
|
||||||
|
"Metadata": {},
|
||||||
|
"Endpoints": {
|
||||||
|
"docker": {
|
||||||
|
"Host": "unix:///var/run/docker.sock",
|
||||||
|
"SkipTLSVerify": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
if not name:
|
||||||
|
name = get_current_context_name()
|
||||||
|
if name == "default":
|
||||||
|
return cls.get_default_context()
|
||||||
|
return Context.load_context(name)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def contexts(cls) -> list[Context]:
|
||||||
|
"""Context list.
|
||||||
|
Returns:
|
||||||
|
(Context): List of context objects.
|
||||||
|
Raises:
|
||||||
|
:py:class:`docker.errors.APIError`
|
||||||
|
If something goes wrong.
|
||||||
|
"""
|
||||||
|
names = []
|
||||||
|
for dirname, dummy, fnames in os.walk(get_meta_dir()):
|
||||||
|
for filename in fnames:
|
||||||
|
if filename == METAFILE:
|
||||||
|
filepath = os.path.join(dirname, filename)
|
||||||
|
try:
|
||||||
|
with open(filepath, "rt", encoding="utf-8") as f:
|
||||||
|
data = json.load(f)
|
||||||
|
name = data["Name"]
|
||||||
|
if name == "default":
|
||||||
|
raise ValueError('"default" is a reserved context name')
|
||||||
|
names.append(name)
|
||||||
|
except Exception as e:
|
||||||
|
raise errors.ContextException(
|
||||||
|
f"Failed to load metafile {filepath}: {e}"
|
||||||
|
) from e
|
||||||
|
|
||||||
|
contexts = [cls.get_default_context()]
|
||||||
|
for name in names:
|
||||||
|
context = Context.load_context(name)
|
||||||
|
if not context:
|
||||||
|
raise errors.ContextException(f"Context {name} cannot be found")
|
||||||
|
contexts.append(context)
|
||||||
|
return contexts
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_current_context(cls) -> Context | None:
|
||||||
|
"""Get current context.
|
||||||
|
Returns:
|
||||||
|
(Context): current context object.
|
||||||
|
"""
|
||||||
|
return cls.get_context()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def set_current_context(cls, name: str = "default") -> None:
|
||||||
|
ctx = cls.get_context(name)
|
||||||
|
if not ctx:
|
||||||
|
raise errors.ContextNotFound(name)
|
||||||
|
|
||||||
|
err = write_context_name_to_docker_config(name)
|
||||||
|
if err:
|
||||||
|
raise errors.ContextException(f"Failed to set current context: {err}")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def remove_context(cls, name: str) -> None:
|
||||||
|
"""Remove a context. Similar to the ``docker context rm`` command.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name (str): The name of the context
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
:py:class:`docker.errors.MissingContextParameter`
|
||||||
|
If a context name is not provided.
|
||||||
|
:py:class:`docker.errors.ContextNotFound`
|
||||||
|
If a context with the name does not exist.
|
||||||
|
:py:class:`docker.errors.ContextException`
|
||||||
|
If name is default.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
>>> from docker.context import ContextAPI
|
||||||
|
>>> ContextAPI.remove_context(name='test')
|
||||||
|
>>>
|
||||||
|
"""
|
||||||
|
if not name:
|
||||||
|
raise errors.MissingContextParameter("name")
|
||||||
|
if name == "default":
|
||||||
|
raise errors.ContextException('context "default" cannot be removed')
|
||||||
|
ctx = Context.load_context(name)
|
||||||
|
if not ctx:
|
||||||
|
raise errors.ContextNotFound(name)
|
||||||
|
if name == get_current_context_name():
|
||||||
|
write_context_name_to_docker_config(None)
|
||||||
|
ctx.remove()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def inspect_context(cls, name: str = "default") -> dict[str, t.Any]:
|
||||||
|
"""Inspect a context. Similar to the ``docker context inspect`` command.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name (str): The name of the context
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
:py:class:`docker.errors.MissingContextParameter`
|
||||||
|
If a context name is not provided.
|
||||||
|
:py:class:`docker.errors.ContextNotFound`
|
||||||
|
If a context with the name does not exist.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
>>> from docker.context import ContextAPI
|
||||||
|
>>> ContextAPI.remove_context(name='test')
|
||||||
|
>>>
|
||||||
|
"""
|
||||||
|
if not name:
|
||||||
|
raise errors.MissingContextParameter("name")
|
||||||
|
if name == "default":
|
||||||
|
return cls.get_default_context()()
|
||||||
|
ctx = Context.load_context(name)
|
||||||
|
if not ctx:
|
||||||
|
raise errors.ContextNotFound(name)
|
||||||
|
|
||||||
|
return ctx()
|
||||||
|
|
@ -0,0 +1,107 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2025 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import hashlib
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
|
||||||
|
from ..constants import DEFAULT_UNIX_SOCKET, IS_WINDOWS_PLATFORM
|
||||||
|
from ..utils.config import find_config_file, get_default_config_file
|
||||||
|
from ..utils.utils import parse_host
|
||||||
|
|
||||||
|
METAFILE = "meta.json"
|
||||||
|
|
||||||
|
|
||||||
|
def get_current_context_name_with_source() -> tuple[str, str]:
|
||||||
|
if os.environ.get("DOCKER_HOST"):
|
||||||
|
return "default", "DOCKER_HOST environment variable set"
|
||||||
|
if os.environ.get("DOCKER_CONTEXT"):
|
||||||
|
return os.environ["DOCKER_CONTEXT"], "DOCKER_CONTEXT environment variable set"
|
||||||
|
docker_cfg_path = find_config_file()
|
||||||
|
if docker_cfg_path:
|
||||||
|
try:
|
||||||
|
with open(docker_cfg_path, "rt", encoding="utf-8") as f:
|
||||||
|
return (
|
||||||
|
json.load(f).get("currentContext", "default"),
|
||||||
|
f"configuration file {docker_cfg_path}",
|
||||||
|
)
|
||||||
|
except Exception: # pylint: disable=broad-exception-caught
|
||||||
|
pass
|
||||||
|
return "default", "fallback value"
|
||||||
|
|
||||||
|
|
||||||
|
def get_current_context_name() -> str:
|
||||||
|
return get_current_context_name_with_source()[0]
|
||||||
|
|
||||||
|
|
||||||
|
def write_context_name_to_docker_config(name: str | None = None) -> Exception | None:
|
||||||
|
if name == "default":
|
||||||
|
name = None
|
||||||
|
docker_cfg_path = find_config_file()
|
||||||
|
config = {}
|
||||||
|
if docker_cfg_path:
|
||||||
|
try:
|
||||||
|
with open(docker_cfg_path, "rt", encoding="utf-8") as f:
|
||||||
|
config = json.load(f)
|
||||||
|
except Exception as e: # pylint: disable=broad-exception-caught
|
||||||
|
return e
|
||||||
|
current_context = config.get("currentContext", None)
|
||||||
|
if current_context and not name:
|
||||||
|
del config["currentContext"]
|
||||||
|
elif name:
|
||||||
|
config["currentContext"] = name
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
if not docker_cfg_path:
|
||||||
|
docker_cfg_path = get_default_config_file()
|
||||||
|
try:
|
||||||
|
with open(docker_cfg_path, "wt", encoding="utf-8") as f:
|
||||||
|
json.dump(config, f, indent=4)
|
||||||
|
return None
|
||||||
|
except Exception as e: # pylint: disable=broad-exception-caught
|
||||||
|
return e
|
||||||
|
|
||||||
|
|
||||||
|
def get_context_id(name: str) -> str:
|
||||||
|
return hashlib.sha256(name.encode("utf-8")).hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
def get_context_dir() -> str:
|
||||||
|
docker_cfg_path = find_config_file() or get_default_config_file()
|
||||||
|
return os.path.join(os.path.dirname(docker_cfg_path), "contexts")
|
||||||
|
|
||||||
|
|
||||||
|
def get_meta_dir(name: str | None = None) -> str:
|
||||||
|
meta_dir = os.path.join(get_context_dir(), "meta")
|
||||||
|
if name:
|
||||||
|
return os.path.join(meta_dir, get_context_id(name))
|
||||||
|
return meta_dir
|
||||||
|
|
||||||
|
|
||||||
|
def get_meta_file(name: str) -> str:
|
||||||
|
return os.path.join(get_meta_dir(name), METAFILE)
|
||||||
|
|
||||||
|
|
||||||
|
def get_tls_dir(name: str | None = None, endpoint: str = "") -> str:
|
||||||
|
context_dir = get_context_dir()
|
||||||
|
if name:
|
||||||
|
return os.path.join(context_dir, "tls", get_context_id(name), endpoint)
|
||||||
|
return os.path.join(context_dir, "tls")
|
||||||
|
|
||||||
|
|
||||||
|
def get_context_host(path: str | None = None, tls: bool = False) -> str:
|
||||||
|
host = parse_host(path, IS_WINDOWS_PLATFORM, tls)
|
||||||
|
if host == DEFAULT_UNIX_SOCKET and host.startswith("http+"):
|
||||||
|
# remove http+ from default docker socket url
|
||||||
|
host = host[5:]
|
||||||
|
return host
|
||||||
|
|
@ -0,0 +1,286 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2025 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import typing as t
|
||||||
|
from shutil import copyfile, rmtree
|
||||||
|
|
||||||
|
from ..errors import ContextException
|
||||||
|
from ..tls import TLSConfig
|
||||||
|
from .config import (
|
||||||
|
get_context_host,
|
||||||
|
get_meta_dir,
|
||||||
|
get_meta_file,
|
||||||
|
get_tls_dir,
|
||||||
|
)
|
||||||
|
|
||||||
|
IN_MEMORY = "IN MEMORY"
|
||||||
|
|
||||||
|
|
||||||
|
class Context:
|
||||||
|
"""A context."""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
name: str,
|
||||||
|
orchestrator: str | None = None,
|
||||||
|
host: str | None = None,
|
||||||
|
endpoints: dict[str, dict[str, t.Any]] | None = None,
|
||||||
|
skip_tls_verify: bool = False,
|
||||||
|
tls: bool = False,
|
||||||
|
description: str | None = None,
|
||||||
|
) -> None:
|
||||||
|
if not name:
|
||||||
|
raise ValueError("Name not provided")
|
||||||
|
self.name = name
|
||||||
|
self.context_type = None
|
||||||
|
self.orchestrator = orchestrator
|
||||||
|
self.endpoints = {}
|
||||||
|
self.tls_cfg: dict[str, TLSConfig] = {}
|
||||||
|
self.meta_path = IN_MEMORY
|
||||||
|
self.tls_path = IN_MEMORY
|
||||||
|
self.description = description
|
||||||
|
|
||||||
|
if not endpoints:
|
||||||
|
# set default docker endpoint if no endpoint is set
|
||||||
|
default_endpoint = (
|
||||||
|
"docker"
|
||||||
|
if (not orchestrator or orchestrator == "swarm")
|
||||||
|
else orchestrator
|
||||||
|
)
|
||||||
|
|
||||||
|
self.endpoints = {
|
||||||
|
default_endpoint: {
|
||||||
|
"Host": get_context_host(host, skip_tls_verify or tls),
|
||||||
|
"SkipTLSVerify": skip_tls_verify,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
|
||||||
|
# check docker endpoints
|
||||||
|
for k, v in endpoints.items():
|
||||||
|
if not isinstance(v, dict):
|
||||||
|
# unknown format
|
||||||
|
raise ContextException(
|
||||||
|
f"Unknown endpoint format for context {name}: {v}",
|
||||||
|
)
|
||||||
|
|
||||||
|
self.endpoints[k] = v
|
||||||
|
if k != "docker":
|
||||||
|
continue
|
||||||
|
|
||||||
|
self.endpoints[k]["Host"] = v.get(
|
||||||
|
"Host", get_context_host(host, skip_tls_verify or tls)
|
||||||
|
)
|
||||||
|
self.endpoints[k]["SkipTLSVerify"] = bool(
|
||||||
|
v.get("SkipTLSVerify", skip_tls_verify)
|
||||||
|
)
|
||||||
|
|
||||||
|
def set_endpoint(
|
||||||
|
self,
|
||||||
|
name: str = "docker",
|
||||||
|
host: str | None = None,
|
||||||
|
tls_cfg: TLSConfig | None = None,
|
||||||
|
skip_tls_verify: bool = False,
|
||||||
|
def_namespace: str | None = None,
|
||||||
|
) -> None:
|
||||||
|
self.endpoints[name] = {
|
||||||
|
"Host": get_context_host(host, not skip_tls_verify or tls_cfg is not None),
|
||||||
|
"SkipTLSVerify": skip_tls_verify,
|
||||||
|
}
|
||||||
|
if def_namespace:
|
||||||
|
self.endpoints[name]["DefaultNamespace"] = def_namespace
|
||||||
|
|
||||||
|
if tls_cfg:
|
||||||
|
self.tls_cfg[name] = tls_cfg
|
||||||
|
|
||||||
|
def inspect(self) -> dict[str, t.Any]:
|
||||||
|
return self()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def load_context(cls, name: str) -> t.Self | None:
|
||||||
|
meta = Context._load_meta(name)
|
||||||
|
if meta:
|
||||||
|
instance = cls(
|
||||||
|
meta["Name"],
|
||||||
|
orchestrator=meta["Metadata"].get("StackOrchestrator", None),
|
||||||
|
endpoints=meta.get("Endpoints", None),
|
||||||
|
description=meta["Metadata"].get("Description"),
|
||||||
|
)
|
||||||
|
instance.context_type = meta["Metadata"].get("Type", None)
|
||||||
|
instance._load_certs()
|
||||||
|
instance.meta_path = get_meta_dir(name)
|
||||||
|
return instance
|
||||||
|
return None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _load_meta(cls, name: str) -> dict[str, t.Any] | None:
|
||||||
|
meta_file = get_meta_file(name)
|
||||||
|
if not os.path.isfile(meta_file):
|
||||||
|
return None
|
||||||
|
|
||||||
|
metadata: dict[str, t.Any] = {}
|
||||||
|
try:
|
||||||
|
with open(meta_file, "rt", encoding="utf-8") as f:
|
||||||
|
metadata = json.load(f)
|
||||||
|
except (OSError, KeyError, ValueError) as e:
|
||||||
|
# unknown format
|
||||||
|
raise RuntimeError(
|
||||||
|
f"Detected corrupted meta file for context {name} : {e}"
|
||||||
|
) from e
|
||||||
|
|
||||||
|
# for docker endpoints, set defaults for
|
||||||
|
# Host and SkipTLSVerify fields
|
||||||
|
for k, v in metadata["Endpoints"].items():
|
||||||
|
if k != "docker":
|
||||||
|
continue
|
||||||
|
metadata["Endpoints"][k]["Host"] = v.get(
|
||||||
|
"Host", get_context_host(None, False)
|
||||||
|
)
|
||||||
|
metadata["Endpoints"][k]["SkipTLSVerify"] = bool(
|
||||||
|
v.get("SkipTLSVerify", True)
|
||||||
|
)
|
||||||
|
|
||||||
|
return metadata
|
||||||
|
|
||||||
|
def _load_certs(self) -> None:
|
||||||
|
certs = {}
|
||||||
|
tls_dir = get_tls_dir(self.name)
|
||||||
|
for endpoint in self.endpoints:
|
||||||
|
if not os.path.isdir(os.path.join(tls_dir, endpoint)):
|
||||||
|
continue
|
||||||
|
ca_cert = None
|
||||||
|
cert = None
|
||||||
|
key = None
|
||||||
|
for filename in os.listdir(os.path.join(tls_dir, endpoint)):
|
||||||
|
if filename.startswith("ca"):
|
||||||
|
ca_cert = os.path.join(tls_dir, endpoint, filename)
|
||||||
|
elif filename.startswith("cert"):
|
||||||
|
cert = os.path.join(tls_dir, endpoint, filename)
|
||||||
|
elif filename.startswith("key"):
|
||||||
|
key = os.path.join(tls_dir, endpoint, filename)
|
||||||
|
if all([cert, key]) or ca_cert:
|
||||||
|
verify = None
|
||||||
|
if endpoint == "docker" and not self.endpoints["docker"].get(
|
||||||
|
"SkipTLSVerify", False
|
||||||
|
):
|
||||||
|
verify = True
|
||||||
|
certs[endpoint] = TLSConfig(
|
||||||
|
client_cert=(cert, key) if cert and key else None,
|
||||||
|
ca_cert=ca_cert,
|
||||||
|
verify=verify,
|
||||||
|
)
|
||||||
|
self.tls_cfg = certs
|
||||||
|
self.tls_path = tls_dir
|
||||||
|
|
||||||
|
def save(self) -> None:
|
||||||
|
meta_dir = get_meta_dir(self.name)
|
||||||
|
if not os.path.isdir(meta_dir):
|
||||||
|
os.makedirs(meta_dir)
|
||||||
|
with open(get_meta_file(self.name), "wt", encoding="utf-8") as f:
|
||||||
|
f.write(json.dumps(self.Metadata))
|
||||||
|
|
||||||
|
tls_dir = get_tls_dir(self.name)
|
||||||
|
for endpoint, tls in self.tls_cfg.items():
|
||||||
|
if not os.path.isdir(os.path.join(tls_dir, endpoint)):
|
||||||
|
os.makedirs(os.path.join(tls_dir, endpoint))
|
||||||
|
|
||||||
|
ca_file = tls.ca_cert
|
||||||
|
if ca_file:
|
||||||
|
copyfile(
|
||||||
|
ca_file, os.path.join(tls_dir, endpoint, os.path.basename(ca_file))
|
||||||
|
)
|
||||||
|
|
||||||
|
if tls.cert:
|
||||||
|
cert_file, key_file = tls.cert
|
||||||
|
copyfile(
|
||||||
|
cert_file,
|
||||||
|
os.path.join(tls_dir, endpoint, os.path.basename(cert_file)),
|
||||||
|
)
|
||||||
|
copyfile(
|
||||||
|
key_file,
|
||||||
|
os.path.join(tls_dir, endpoint, os.path.basename(key_file)),
|
||||||
|
)
|
||||||
|
|
||||||
|
self.meta_path = get_meta_dir(self.name)
|
||||||
|
self.tls_path = get_tls_dir(self.name)
|
||||||
|
|
||||||
|
def remove(self) -> None:
|
||||||
|
if os.path.isdir(self.meta_path):
|
||||||
|
rmtree(self.meta_path)
|
||||||
|
if os.path.isdir(self.tls_path):
|
||||||
|
rmtree(self.tls_path)
|
||||||
|
|
||||||
|
def __repr__(self) -> str:
|
||||||
|
return f"<{self.__class__.__name__}: '{self.name}'>"
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
return json.dumps(self.__call__(), indent=2)
|
||||||
|
|
||||||
|
def __call__(self) -> dict[str, t.Any]:
|
||||||
|
result = self.Metadata
|
||||||
|
result.update(self.TLSMaterial)
|
||||||
|
result.update(self.Storage)
|
||||||
|
return result
|
||||||
|
|
||||||
|
def is_docker_host(self) -> bool:
|
||||||
|
return self.context_type is None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def Name(self) -> str: # pylint: disable=invalid-name
|
||||||
|
return self.name
|
||||||
|
|
||||||
|
@property
|
||||||
|
def Host(self) -> str | None: # pylint: disable=invalid-name
|
||||||
|
if not self.orchestrator or self.orchestrator == "swarm":
|
||||||
|
endpoint = self.endpoints.get("docker", None)
|
||||||
|
if endpoint:
|
||||||
|
return endpoint.get("Host", None) # type: ignore
|
||||||
|
return None
|
||||||
|
|
||||||
|
return self.endpoints[self.orchestrator].get("Host", None) # type: ignore
|
||||||
|
|
||||||
|
@property
|
||||||
|
def Orchestrator(self) -> str | None: # pylint: disable=invalid-name
|
||||||
|
return self.orchestrator
|
||||||
|
|
||||||
|
@property
|
||||||
|
def Metadata(self) -> dict[str, t.Any]: # pylint: disable=invalid-name
|
||||||
|
meta: dict[str, t.Any] = {}
|
||||||
|
if self.orchestrator:
|
||||||
|
meta = {"StackOrchestrator": self.orchestrator}
|
||||||
|
return {"Name": self.name, "Metadata": meta, "Endpoints": self.endpoints}
|
||||||
|
|
||||||
|
@property
|
||||||
|
def TLSConfig(self) -> TLSConfig | None: # pylint: disable=invalid-name
|
||||||
|
key = self.orchestrator
|
||||||
|
if not key or key == "swarm":
|
||||||
|
key = "docker"
|
||||||
|
if key in self.tls_cfg:
|
||||||
|
return self.tls_cfg[key]
|
||||||
|
return None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def TLSMaterial(self) -> dict[str, t.Any]: # pylint: disable=invalid-name
|
||||||
|
certs: dict[str, t.Any] = {}
|
||||||
|
for endpoint, tls in self.tls_cfg.items():
|
||||||
|
paths = [tls.ca_cert, *tls.cert] if tls.cert else [tls.ca_cert]
|
||||||
|
certs[endpoint] = [
|
||||||
|
os.path.basename(path) if path else None for path in paths
|
||||||
|
]
|
||||||
|
return {"TLSMaterial": certs}
|
||||||
|
|
||||||
|
@property
|
||||||
|
def Storage(self) -> dict[str, t.Any]: # pylint: disable=invalid-name
|
||||||
|
return {"Storage": {"MetadataPath": self.meta_path, "TLSPath": self.tls_path}}
|
||||||
|
|
@ -0,0 +1,17 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
PROGRAM_PREFIX = "docker-credential-"
|
||||||
|
DEFAULT_LINUX_STORE = "secretservice"
|
||||||
|
DEFAULT_OSX_STORE = "osxkeychain"
|
||||||
|
DEFAULT_WIN32_STORE = "wincred"
|
||||||
|
|
@ -0,0 +1,38 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
if t.TYPE_CHECKING:
|
||||||
|
from subprocess import CalledProcessError
|
||||||
|
|
||||||
|
|
||||||
|
class StoreError(RuntimeError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class CredentialsNotFound(StoreError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class InitializationError(StoreError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def process_store_error(cpe: CalledProcessError, program: str) -> StoreError:
|
||||||
|
message = cpe.output.decode("utf-8")
|
||||||
|
if "credentials not found in native keychain" in message:
|
||||||
|
return CredentialsNotFound(f"No matching credentials in {program}")
|
||||||
|
return StoreError(
|
||||||
|
f'Credentials store {program} exited with "{cpe.output.decode("utf-8").strip()}".'
|
||||||
|
)
|
||||||
|
|
@ -0,0 +1,102 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import errno
|
||||||
|
import json
|
||||||
|
import subprocess
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
from . import constants, errors
|
||||||
|
from .utils import create_environment_dict, find_executable
|
||||||
|
|
||||||
|
|
||||||
|
class Store:
|
||||||
|
def __init__(self, program: str, environment: dict[str, str] | None = None) -> None:
|
||||||
|
"""Create a store object that acts as an interface to
|
||||||
|
perform the basic operations for storing, retrieving
|
||||||
|
and erasing credentials using `program`.
|
||||||
|
"""
|
||||||
|
self.program = constants.PROGRAM_PREFIX + program
|
||||||
|
self.exe = find_executable(self.program)
|
||||||
|
self.environment = environment
|
||||||
|
if self.exe is None:
|
||||||
|
raise errors.InitializationError(
|
||||||
|
f"{self.program} not installed or not available in PATH"
|
||||||
|
)
|
||||||
|
|
||||||
|
def get(self, server: str | bytes) -> dict[str, t.Any]:
|
||||||
|
"""Retrieve credentials for `server`. If no credentials are found,
|
||||||
|
a `StoreError` will be raised.
|
||||||
|
"""
|
||||||
|
if not isinstance(server, bytes):
|
||||||
|
server = server.encode("utf-8")
|
||||||
|
data = self._execute("get", server)
|
||||||
|
result = json.loads(data.decode("utf-8"))
|
||||||
|
|
||||||
|
# docker-credential-pass will return an object for inexistent servers
|
||||||
|
# whereas other helpers will exit with returncode != 0. For
|
||||||
|
# consistency, if no significant data is returned,
|
||||||
|
# raise CredentialsNotFound
|
||||||
|
if result["Username"] == "" and result["Secret"] == "":
|
||||||
|
raise errors.CredentialsNotFound(
|
||||||
|
f"No matching credentials in {self.program}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def store(self, server: str, username: str, secret: str) -> bytes:
|
||||||
|
"""Store credentials for `server`. Raises a `StoreError` if an error
|
||||||
|
occurs.
|
||||||
|
"""
|
||||||
|
data_input = json.dumps(
|
||||||
|
{"ServerURL": server, "Username": username, "Secret": secret}
|
||||||
|
).encode("utf-8")
|
||||||
|
return self._execute("store", data_input)
|
||||||
|
|
||||||
|
def erase(self, server: str | bytes) -> None:
|
||||||
|
"""Erase credentials for `server`. Raises a `StoreError` if an error
|
||||||
|
occurs.
|
||||||
|
"""
|
||||||
|
if not isinstance(server, bytes):
|
||||||
|
server = server.encode("utf-8")
|
||||||
|
self._execute("erase", server)
|
||||||
|
|
||||||
|
def list(self) -> t.Any:
|
||||||
|
"""List stored credentials. Requires v0.4.0+ of the helper."""
|
||||||
|
data = self._execute("list", None)
|
||||||
|
return json.loads(data.decode("utf-8"))
|
||||||
|
|
||||||
|
def _execute(self, subcmd: str, data_input: bytes | None) -> bytes:
|
||||||
|
if self.exe is None:
|
||||||
|
raise errors.StoreError(
|
||||||
|
f"{self.program} not installed or not available in PATH"
|
||||||
|
)
|
||||||
|
output = None
|
||||||
|
env = create_environment_dict(self.environment)
|
||||||
|
try:
|
||||||
|
output = subprocess.check_output(
|
||||||
|
[self.exe, subcmd],
|
||||||
|
input=data_input,
|
||||||
|
env=env,
|
||||||
|
)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
raise errors.process_store_error(e, self.program) from e
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno == errno.ENOENT:
|
||||||
|
raise errors.StoreError(
|
||||||
|
f"{self.program} not installed or not available in PATH"
|
||||||
|
) from e
|
||||||
|
raise errors.StoreError(
|
||||||
|
f'Unexpected OS error "{e.strerror}", errno={e.errno}'
|
||||||
|
) from e
|
||||||
|
return output
|
||||||
|
|
@ -0,0 +1,35 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import os
|
||||||
|
from shutil import which
|
||||||
|
|
||||||
|
|
||||||
|
def find_executable(executable: str, path: str | None = None) -> str | None:
|
||||||
|
"""
|
||||||
|
As distutils.spawn.find_executable, but on Windows, look up
|
||||||
|
every extension declared in PATHEXT instead of just `.exe`
|
||||||
|
"""
|
||||||
|
# shutil.which() already uses PATHEXT on Windows, so on
|
||||||
|
# Python 3 we can simply use shutil.which() in all cases.
|
||||||
|
# (https://github.com/docker/docker-py/commit/42789818bed5d86b487a030e2e60b02bf0cfa284)
|
||||||
|
return which(executable, path=path)
|
||||||
|
|
||||||
|
|
||||||
|
def create_environment_dict(overrides: dict[str, str] | None) -> dict[str, str]:
|
||||||
|
"""
|
||||||
|
Create and return a copy of os.environ with the specified overrides
|
||||||
|
"""
|
||||||
|
result = os.environ.copy()
|
||||||
|
result.update(overrides or {})
|
||||||
|
return result
|
||||||
|
|
@ -0,0 +1,244 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
from ansible.module_utils.common.text.converters import to_text
|
||||||
|
|
||||||
|
from ._import_helper import HTTPError as _HTTPError
|
||||||
|
|
||||||
|
if t.TYPE_CHECKING:
|
||||||
|
from requests import Response
|
||||||
|
|
||||||
|
|
||||||
|
class DockerException(Exception):
|
||||||
|
"""
|
||||||
|
A base class from which all other exceptions inherit.
|
||||||
|
|
||||||
|
If you want to catch all errors that the Docker SDK might raise,
|
||||||
|
catch this base exception.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def create_api_error_from_http_exception(e: _HTTPError) -> t.NoReturn:
|
||||||
|
"""
|
||||||
|
Create a suitable APIError from requests.exceptions.HTTPError.
|
||||||
|
"""
|
||||||
|
response = e.response
|
||||||
|
try:
|
||||||
|
explanation = response.json()["message"]
|
||||||
|
except ValueError:
|
||||||
|
explanation = to_text((response.content or "").strip())
|
||||||
|
cls = APIError
|
||||||
|
if response.status_code == 404:
|
||||||
|
if explanation and (
|
||||||
|
"No such image" in str(explanation)
|
||||||
|
or "not found: does not exist or no pull access" in str(explanation)
|
||||||
|
or "repository does not exist" in str(explanation)
|
||||||
|
):
|
||||||
|
cls = ImageNotFound
|
||||||
|
else:
|
||||||
|
cls = NotFound
|
||||||
|
raise cls(e, response=response, explanation=explanation) from e
|
||||||
|
|
||||||
|
|
||||||
|
class APIError(_HTTPError, DockerException):
|
||||||
|
"""
|
||||||
|
An HTTP error from the API.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
message: str | Exception,
|
||||||
|
response: Response | None = None,
|
||||||
|
explanation: str | None = None,
|
||||||
|
) -> None:
|
||||||
|
# requests 1.2 supports response as a keyword argument, but
|
||||||
|
# requests 1.1 does not
|
||||||
|
super().__init__(message)
|
||||||
|
self.response = response
|
||||||
|
self.explanation = explanation or ""
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
message = super().__str__()
|
||||||
|
|
||||||
|
if self.is_client_error():
|
||||||
|
message = f"{self.response.status_code} Client Error for {self.response.url}: {self.response.reason}"
|
||||||
|
|
||||||
|
elif self.is_server_error():
|
||||||
|
message = f"{self.response.status_code} Server Error for {self.response.url}: {self.response.reason}"
|
||||||
|
|
||||||
|
if self.explanation:
|
||||||
|
message = f'{message} ("{self.explanation}")'
|
||||||
|
|
||||||
|
return message
|
||||||
|
|
||||||
|
@property
|
||||||
|
def status_code(self) -> int | None:
|
||||||
|
if self.response is not None:
|
||||||
|
return self.response.status_code
|
||||||
|
return None
|
||||||
|
|
||||||
|
def is_error(self) -> bool:
|
||||||
|
return self.is_client_error() or self.is_server_error()
|
||||||
|
|
||||||
|
def is_client_error(self) -> bool:
|
||||||
|
if self.status_code is None:
|
||||||
|
return False
|
||||||
|
return 400 <= self.status_code < 500
|
||||||
|
|
||||||
|
def is_server_error(self) -> bool:
|
||||||
|
if self.status_code is None:
|
||||||
|
return False
|
||||||
|
return 500 <= self.status_code < 600
|
||||||
|
|
||||||
|
|
||||||
|
class NotFound(APIError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ImageNotFound(NotFound):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidVersion(DockerException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidRepository(DockerException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidConfigFile(DockerException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidArgument(DockerException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class DeprecatedMethod(DockerException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class TLSParameterError(DockerException):
|
||||||
|
def __init__(self, msg: str) -> None:
|
||||||
|
self.msg = msg
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
return self.msg + (
|
||||||
|
". TLS configurations should map the Docker CLI "
|
||||||
|
"client configurations. See "
|
||||||
|
"https://docs.docker.com/engine/articles/https/ "
|
||||||
|
"for API details."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class NullResource(DockerException, ValueError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ContainerError(DockerException):
|
||||||
|
"""
|
||||||
|
Represents a container that has exited with a non-zero exit code.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
container: str,
|
||||||
|
exit_status: int,
|
||||||
|
command: list[str],
|
||||||
|
image: str,
|
||||||
|
stderr: str | None,
|
||||||
|
):
|
||||||
|
self.container = container
|
||||||
|
self.exit_status = exit_status
|
||||||
|
self.command = command
|
||||||
|
self.image = image
|
||||||
|
self.stderr = stderr
|
||||||
|
|
||||||
|
err = f": {stderr}" if stderr is not None else ""
|
||||||
|
msg = f"Command '{command}' in image '{image}' returned non-zero exit status {exit_status}{err}"
|
||||||
|
|
||||||
|
super().__init__(msg)
|
||||||
|
|
||||||
|
|
||||||
|
class StreamParseError(RuntimeError):
|
||||||
|
def __init__(self, reason: Exception) -> None:
|
||||||
|
self.msg = reason
|
||||||
|
|
||||||
|
|
||||||
|
class BuildError(DockerException):
|
||||||
|
def __init__(self, reason: str, build_log: str) -> None:
|
||||||
|
super().__init__(reason)
|
||||||
|
self.msg = reason
|
||||||
|
self.build_log = build_log
|
||||||
|
|
||||||
|
|
||||||
|
class ImageLoadError(DockerException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def create_unexpected_kwargs_error(name: str, kwargs: dict[str, t.Any]) -> TypeError:
|
||||||
|
quoted_kwargs = [f"'{k}'" for k in sorted(kwargs)]
|
||||||
|
text = [f"{name}() "]
|
||||||
|
if len(quoted_kwargs) == 1:
|
||||||
|
text.append("got an unexpected keyword argument ")
|
||||||
|
else:
|
||||||
|
text.append("got unexpected keyword arguments ")
|
||||||
|
text.append(", ".join(quoted_kwargs))
|
||||||
|
return TypeError("".join(text))
|
||||||
|
|
||||||
|
|
||||||
|
class MissingContextParameter(DockerException):
|
||||||
|
def __init__(self, param: str) -> None:
|
||||||
|
self.param = param
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
return f"missing parameter: {self.param}"
|
||||||
|
|
||||||
|
|
||||||
|
class ContextAlreadyExists(DockerException):
|
||||||
|
def __init__(self, name: str) -> None:
|
||||||
|
self.name = name
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
return f"context {self.name} already exists"
|
||||||
|
|
||||||
|
|
||||||
|
class ContextException(DockerException):
|
||||||
|
def __init__(self, msg: str) -> None:
|
||||||
|
self.msg = msg
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
return self.msg
|
||||||
|
|
||||||
|
|
||||||
|
class ContextNotFound(DockerException):
|
||||||
|
def __init__(self, name: str) -> None:
|
||||||
|
self.name = name
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
return f"context '{self.name}' not found"
|
||||||
|
|
||||||
|
|
||||||
|
class MissingRequirementException(DockerException):
|
||||||
|
def __init__(
|
||||||
|
self, msg: str, requirement: str, import_exception: ImportError | str
|
||||||
|
) -> None:
|
||||||
|
self.msg = msg
|
||||||
|
self.requirement = requirement
|
||||||
|
self.import_exception = import_exception
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
return self.msg
|
||||||
|
|
@ -0,0 +1,107 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import os
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
from . import errors
|
||||||
|
from .transport.ssladapter import SSLHTTPAdapter
|
||||||
|
|
||||||
|
if t.TYPE_CHECKING:
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._api.api.client import (
|
||||||
|
APIClient,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TLSConfig:
|
||||||
|
"""
|
||||||
|
TLS configuration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
client_cert (tuple of str): Path to client cert, path to client key.
|
||||||
|
ca_cert (str): Path to CA cert file.
|
||||||
|
verify (bool or str): This can be ``False`` or a path to a CA cert
|
||||||
|
file.
|
||||||
|
assert_hostname (bool): Verify the hostname of the server.
|
||||||
|
|
||||||
|
.. _`SSL version`:
|
||||||
|
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
|
||||||
|
"""
|
||||||
|
|
||||||
|
cert: tuple[str, str] | None = None
|
||||||
|
ca_cert: str | None = None
|
||||||
|
verify: bool | None = None
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
client_cert: tuple[str, str] | None = None,
|
||||||
|
ca_cert: str | None = None,
|
||||||
|
verify: bool | None = None,
|
||||||
|
assert_hostname: bool | None = None,
|
||||||
|
):
|
||||||
|
# Argument compatibility/mapping with
|
||||||
|
# https://docs.docker.com/engine/articles/https/
|
||||||
|
# This diverges from the Docker CLI in that users can specify 'tls'
|
||||||
|
# here, but also disable any public/default CA pool verification by
|
||||||
|
# leaving verify=False
|
||||||
|
|
||||||
|
self.assert_hostname = assert_hostname
|
||||||
|
|
||||||
|
# "client_cert" must have both or neither cert/key files. In
|
||||||
|
# either case, Alert the user when both are expected, but any are
|
||||||
|
# missing.
|
||||||
|
|
||||||
|
if client_cert:
|
||||||
|
try:
|
||||||
|
tls_cert, tls_key = client_cert
|
||||||
|
except ValueError:
|
||||||
|
raise errors.TLSParameterError(
|
||||||
|
"client_cert must be a tuple of (client certificate, key file)"
|
||||||
|
) from None
|
||||||
|
|
||||||
|
if not (tls_cert and tls_key) or (
|
||||||
|
not os.path.isfile(tls_cert) or not os.path.isfile(tls_key)
|
||||||
|
):
|
||||||
|
raise errors.TLSParameterError(
|
||||||
|
"Path to a certificate and key files must be provided"
|
||||||
|
" through the client_cert param"
|
||||||
|
)
|
||||||
|
self.cert = (tls_cert, tls_key)
|
||||||
|
|
||||||
|
# If verify is set, make sure the cert exists
|
||||||
|
self.verify = verify
|
||||||
|
self.ca_cert = ca_cert
|
||||||
|
if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
|
||||||
|
raise errors.TLSParameterError(
|
||||||
|
"Invalid CA certificate provided for `ca_cert`."
|
||||||
|
)
|
||||||
|
|
||||||
|
def configure_client(self, client: APIClient) -> None:
|
||||||
|
"""
|
||||||
|
Configure a client with these TLS options.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if self.verify and self.ca_cert:
|
||||||
|
client.verify = self.ca_cert
|
||||||
|
else:
|
||||||
|
client.verify = self.verify
|
||||||
|
|
||||||
|
if self.cert:
|
||||||
|
client.cert = self.cert
|
||||||
|
|
||||||
|
client.mount(
|
||||||
|
"https://",
|
||||||
|
SSLHTTPAdapter(
|
||||||
|
assert_hostname=self.assert_hostname,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
@ -0,0 +1,35 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from .._import_helper import HTTPAdapter as _HTTPAdapter
|
||||||
|
|
||||||
|
|
||||||
|
class BaseHTTPAdapter(_HTTPAdapter):
|
||||||
|
def close(self) -> None:
|
||||||
|
# pylint finds our HTTPAdapter stub instead of requests.adapters.HTTPAdapter:
|
||||||
|
# pylint: disable-next=no-member
|
||||||
|
super().close()
|
||||||
|
if hasattr(self, "pools"):
|
||||||
|
self.pools.clear()
|
||||||
|
|
||||||
|
# Hotfix for requests 2.32.0 and 2.32.1: its commit
|
||||||
|
# https://github.com/psf/requests/commit/c0813a2d910ea6b4f8438b91d315b8d181302356
|
||||||
|
# changes requests.adapters.HTTPAdapter to no longer call get_connection() from
|
||||||
|
# send(), but instead call _get_connection().
|
||||||
|
def _get_connection(self, request, *args, **kwargs): # type: ignore
|
||||||
|
return self.get_connection(request.url, kwargs.get("proxies"))
|
||||||
|
|
||||||
|
# Fix for requests 2.32.2+:
|
||||||
|
# https://github.com/psf/requests/commit/c98e4d133ef29c46a9b68cd783087218a8075e05
|
||||||
|
def get_connection_with_tls_context(self, request, verify, proxies=None, cert=None): # type: ignore
|
||||||
|
return self.get_connection(request.url, proxies)
|
||||||
|
|
@ -0,0 +1,123 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import typing as t
|
||||||
|
from queue import Empty
|
||||||
|
|
||||||
|
from .. import constants
|
||||||
|
from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
|
||||||
|
from .basehttpadapter import BaseHTTPAdapter
|
||||||
|
from .npipesocket import NpipeSocket
|
||||||
|
|
||||||
|
if t.TYPE_CHECKING:
|
||||||
|
from collections.abc import Mapping
|
||||||
|
|
||||||
|
from requests import PreparedRequest
|
||||||
|
|
||||||
|
|
||||||
|
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
|
||||||
|
|
||||||
|
|
||||||
|
class NpipeHTTPConnection(urllib3_connection.HTTPConnection):
|
||||||
|
def __init__(self, npipe_path: str, timeout: int | float = 60) -> None:
|
||||||
|
super().__init__("localhost", timeout=timeout)
|
||||||
|
self.npipe_path = npipe_path
|
||||||
|
self.timeout = timeout
|
||||||
|
|
||||||
|
def connect(self) -> None:
|
||||||
|
sock = NpipeSocket()
|
||||||
|
sock.settimeout(self.timeout)
|
||||||
|
sock.connect(self.npipe_path)
|
||||||
|
self.sock = sock
|
||||||
|
|
||||||
|
|
||||||
|
class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
||||||
|
def __init__(
|
||||||
|
self, npipe_path: str, timeout: int | float = 60, maxsize: int = 10
|
||||||
|
) -> None:
|
||||||
|
super().__init__("localhost", timeout=timeout, maxsize=maxsize)
|
||||||
|
self.npipe_path = npipe_path
|
||||||
|
self.timeout = timeout
|
||||||
|
|
||||||
|
def _new_conn(self) -> NpipeHTTPConnection:
|
||||||
|
return NpipeHTTPConnection(self.npipe_path, self.timeout)
|
||||||
|
|
||||||
|
# When re-using connections, urllib3 tries to call select() on our
|
||||||
|
# NpipeSocket instance, causing a crash. To circumvent this, we override
|
||||||
|
# _get_conn, where that check happens.
|
||||||
|
def _get_conn(self, timeout: int | float) -> NpipeHTTPConnection:
|
||||||
|
conn = None
|
||||||
|
try:
|
||||||
|
conn = self.pool.get(block=self.block, timeout=timeout)
|
||||||
|
|
||||||
|
except AttributeError as exc: # self.pool is None
|
||||||
|
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from exc
|
||||||
|
|
||||||
|
except Empty as exc:
|
||||||
|
if self.block:
|
||||||
|
raise urllib3.exceptions.EmptyPoolError(
|
||||||
|
self,
|
||||||
|
"Pool reached maximum size and no more connections are allowed.",
|
||||||
|
) from exc
|
||||||
|
# Oh well, we'll create a new connection then
|
||||||
|
|
||||||
|
return conn or self._new_conn()
|
||||||
|
|
||||||
|
|
||||||
|
class NpipeHTTPAdapter(BaseHTTPAdapter):
|
||||||
|
__attrs__ = HTTPAdapter.__attrs__ + [
|
||||||
|
"npipe_path",
|
||||||
|
"pools",
|
||||||
|
"timeout",
|
||||||
|
"max_pool_size",
|
||||||
|
]
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
base_url: str,
|
||||||
|
timeout: int | float = 60,
|
||||||
|
pool_connections: int = constants.DEFAULT_NUM_POOLS,
|
||||||
|
max_pool_size: int = constants.DEFAULT_MAX_POOL_SIZE,
|
||||||
|
) -> None:
|
||||||
|
self.npipe_path = base_url.replace("npipe://", "")
|
||||||
|
self.timeout = timeout
|
||||||
|
self.max_pool_size = max_pool_size
|
||||||
|
self.pools = RecentlyUsedContainer(
|
||||||
|
pool_connections, dispose_func=lambda p: p.close()
|
||||||
|
)
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
def get_connection(
|
||||||
|
self, url: str | bytes, proxies: Mapping[str, str] | None = None
|
||||||
|
) -> NpipeHTTPConnectionPool:
|
||||||
|
with self.pools.lock:
|
||||||
|
pool = self.pools.get(url)
|
||||||
|
if pool:
|
||||||
|
return pool
|
||||||
|
|
||||||
|
pool = NpipeHTTPConnectionPool(
|
||||||
|
self.npipe_path, self.timeout, maxsize=self.max_pool_size
|
||||||
|
)
|
||||||
|
self.pools[url] = pool
|
||||||
|
|
||||||
|
return pool
|
||||||
|
|
||||||
|
def request_url(
|
||||||
|
self, request: PreparedRequest, proxies: Mapping[str, str] | None
|
||||||
|
) -> str:
|
||||||
|
# The select_proxy utility in requests errors out when the provided URL
|
||||||
|
# does not have a hostname, like is the case when using a UNIX socket.
|
||||||
|
# Since proxies are an irrelevant notion in the case of UNIX sockets
|
||||||
|
# anyway, we simply return the path URL directly.
|
||||||
|
# See also: https://github.com/docker/docker-sdk-python/issues/811
|
||||||
|
return request.path_url
|
||||||
|
|
@ -0,0 +1,277 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import functools
|
||||||
|
import io
|
||||||
|
import time
|
||||||
|
import traceback
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
PYWIN32_IMPORT_ERROR: str | None # pylint: disable=invalid-name
|
||||||
|
try:
|
||||||
|
import pywintypes
|
||||||
|
import win32api
|
||||||
|
import win32event
|
||||||
|
import win32file
|
||||||
|
import win32pipe
|
||||||
|
except ImportError:
|
||||||
|
PYWIN32_IMPORT_ERROR = traceback.format_exc() # pylint: disable=invalid-name
|
||||||
|
else:
|
||||||
|
PYWIN32_IMPORT_ERROR = None # pylint: disable=invalid-name
|
||||||
|
|
||||||
|
if t.TYPE_CHECKING:
|
||||||
|
from collections.abc import Buffer, Callable
|
||||||
|
|
||||||
|
_Self = t.TypeVar("_Self")
|
||||||
|
_P = t.ParamSpec("_P")
|
||||||
|
_R = t.TypeVar("_R")
|
||||||
|
|
||||||
|
|
||||||
|
ERROR_PIPE_BUSY = 0xE7
|
||||||
|
SECURITY_SQOS_PRESENT = 0x100000
|
||||||
|
SECURITY_ANONYMOUS = 0
|
||||||
|
|
||||||
|
MAXIMUM_RETRY_COUNT = 10
|
||||||
|
|
||||||
|
|
||||||
|
def check_closed(
|
||||||
|
f: Callable[t.Concatenate[_Self, _P], _R],
|
||||||
|
) -> Callable[t.Concatenate[_Self, _P], _R]:
|
||||||
|
@functools.wraps(f)
|
||||||
|
def wrapped(self: _Self, *args: _P.args, **kwargs: _P.kwargs) -> _R:
|
||||||
|
if self._closed: # type: ignore
|
||||||
|
raise RuntimeError("Can not reuse socket after connection was closed.")
|
||||||
|
return f(self, *args, **kwargs)
|
||||||
|
|
||||||
|
return wrapped
|
||||||
|
|
||||||
|
|
||||||
|
class NpipeSocket:
|
||||||
|
"""Partial implementation of the socket API over windows named pipes.
|
||||||
|
This implementation is only designed to be used as a client socket,
|
||||||
|
and server-specific methods (bind, listen, accept...) are not
|
||||||
|
implemented.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, handle: t.Any | None = None) -> None:
|
||||||
|
self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
|
||||||
|
self._handle = handle
|
||||||
|
self._address: str | None = None
|
||||||
|
self._closed = False
|
||||||
|
self.flags: int | None = None
|
||||||
|
|
||||||
|
def accept(self) -> t.NoReturn:
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def bind(self, address: t.Any) -> t.NoReturn:
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def close(self) -> None:
|
||||||
|
if self._handle is None:
|
||||||
|
raise ValueError("Handle not present")
|
||||||
|
self._handle.Close()
|
||||||
|
self._closed = True
|
||||||
|
|
||||||
|
@check_closed
|
||||||
|
def connect(self, address: str, retry_count: int = 0) -> None:
|
||||||
|
try:
|
||||||
|
handle = win32file.CreateFile(
|
||||||
|
address,
|
||||||
|
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
|
||||||
|
0,
|
||||||
|
None,
|
||||||
|
win32file.OPEN_EXISTING,
|
||||||
|
(
|
||||||
|
SECURITY_ANONYMOUS
|
||||||
|
| SECURITY_SQOS_PRESENT
|
||||||
|
| win32file.FILE_FLAG_OVERLAPPED
|
||||||
|
),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
except win32pipe.error as e:
|
||||||
|
# See Remarks:
|
||||||
|
# https://msdn.microsoft.com/en-us/library/aa365800.aspx
|
||||||
|
if e.winerror == ERROR_PIPE_BUSY:
|
||||||
|
# Another program or thread has grabbed our pipe instance
|
||||||
|
# before we got to it. Wait for availability and attempt to
|
||||||
|
# connect again.
|
||||||
|
retry_count = retry_count + 1
|
||||||
|
if retry_count < MAXIMUM_RETRY_COUNT:
|
||||||
|
time.sleep(1)
|
||||||
|
return self.connect(address, retry_count)
|
||||||
|
raise e
|
||||||
|
|
||||||
|
self.flags = win32pipe.GetNamedPipeInfo(handle)[0] # type: ignore
|
||||||
|
|
||||||
|
self._handle = handle
|
||||||
|
self._address = address
|
||||||
|
|
||||||
|
@check_closed
|
||||||
|
def connect_ex(self, address: str) -> None:
|
||||||
|
self.connect(address)
|
||||||
|
|
||||||
|
@check_closed
|
||||||
|
def detach(self) -> t.Any:
|
||||||
|
self._closed = True
|
||||||
|
return self._handle
|
||||||
|
|
||||||
|
@check_closed
|
||||||
|
def dup(self) -> NpipeSocket:
|
||||||
|
return NpipeSocket(self._handle)
|
||||||
|
|
||||||
|
def getpeername(self) -> str | None:
|
||||||
|
return self._address
|
||||||
|
|
||||||
|
def getsockname(self) -> str | None:
|
||||||
|
return self._address
|
||||||
|
|
||||||
|
def getsockopt(
|
||||||
|
self, level: t.Any, optname: t.Any, buflen: t.Any = None
|
||||||
|
) -> t.NoReturn:
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def ioctl(self, control: t.Any, option: t.Any) -> t.NoReturn:
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def listen(self, backlog: t.Any) -> t.NoReturn:
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def makefile(self, mode: str, bufsize: int | None = None) -> t.IO[bytes]:
|
||||||
|
if mode.strip("b") != "r":
|
||||||
|
raise NotImplementedError()
|
||||||
|
rawio = NpipeFileIOBase(self)
|
||||||
|
if bufsize is None or bufsize <= 0:
|
||||||
|
bufsize = io.DEFAULT_BUFFER_SIZE
|
||||||
|
return io.BufferedReader(rawio, buffer_size=bufsize)
|
||||||
|
|
||||||
|
@check_closed
|
||||||
|
def recv(self, bufsize: int, flags: int = 0) -> str:
|
||||||
|
if self._handle is None:
|
||||||
|
raise ValueError("Handle not present")
|
||||||
|
dummy_err, data = win32file.ReadFile(self._handle, bufsize)
|
||||||
|
return data
|
||||||
|
|
||||||
|
@check_closed
|
||||||
|
def recvfrom(self, bufsize: int, flags: int = 0) -> tuple[str, str | None]:
|
||||||
|
data = self.recv(bufsize, flags)
|
||||||
|
return (data, self._address)
|
||||||
|
|
||||||
|
@check_closed
|
||||||
|
def recvfrom_into(
|
||||||
|
self, buf: Buffer, nbytes: int = 0, flags: int = 0
|
||||||
|
) -> tuple[int, str | None]:
|
||||||
|
return self.recv_into(buf, nbytes), self._address
|
||||||
|
|
||||||
|
@check_closed
|
||||||
|
def recv_into(self, buf: Buffer, nbytes: int = 0) -> int:
|
||||||
|
if self._handle is None:
|
||||||
|
raise ValueError("Handle not present")
|
||||||
|
readbuf = buf if isinstance(buf, memoryview) else memoryview(buf)
|
||||||
|
|
||||||
|
event = win32event.CreateEvent(None, True, True, None)
|
||||||
|
try:
|
||||||
|
overlapped = pywintypes.OVERLAPPED()
|
||||||
|
overlapped.hEvent = event
|
||||||
|
dummy_err, dummy_data = win32file.ReadFile( # type: ignore
|
||||||
|
self._handle, readbuf[:nbytes] if nbytes else readbuf, overlapped
|
||||||
|
)
|
||||||
|
wait_result = win32event.WaitForSingleObject(event, self._timeout)
|
||||||
|
if wait_result == win32event.WAIT_TIMEOUT:
|
||||||
|
win32file.CancelIo(self._handle)
|
||||||
|
raise TimeoutError
|
||||||
|
return win32file.GetOverlappedResult(self._handle, overlapped, 0)
|
||||||
|
finally:
|
||||||
|
win32api.CloseHandle(event)
|
||||||
|
|
||||||
|
@check_closed
|
||||||
|
def send(self, string: Buffer, flags: int = 0) -> int:
|
||||||
|
if self._handle is None:
|
||||||
|
raise ValueError("Handle not present")
|
||||||
|
event = win32event.CreateEvent(None, True, True, None)
|
||||||
|
try:
|
||||||
|
overlapped = pywintypes.OVERLAPPED()
|
||||||
|
overlapped.hEvent = event
|
||||||
|
win32file.WriteFile(self._handle, string, overlapped) # type: ignore
|
||||||
|
wait_result = win32event.WaitForSingleObject(event, self._timeout)
|
||||||
|
if wait_result == win32event.WAIT_TIMEOUT:
|
||||||
|
win32file.CancelIo(self._handle)
|
||||||
|
raise TimeoutError
|
||||||
|
return win32file.GetOverlappedResult(self._handle, overlapped, 0)
|
||||||
|
finally:
|
||||||
|
win32api.CloseHandle(event)
|
||||||
|
|
||||||
|
@check_closed
|
||||||
|
def sendall(self, string: Buffer, flags: int = 0) -> int:
|
||||||
|
return self.send(string, flags)
|
||||||
|
|
||||||
|
@check_closed
|
||||||
|
def sendto(self, string: Buffer, address: str) -> int:
|
||||||
|
self.connect(address)
|
||||||
|
return self.send(string)
|
||||||
|
|
||||||
|
def setblocking(self, flag: bool) -> None:
|
||||||
|
if flag:
|
||||||
|
return self.settimeout(None)
|
||||||
|
return self.settimeout(0)
|
||||||
|
|
||||||
|
def settimeout(self, value: int | float | None) -> None:
|
||||||
|
if value is None:
|
||||||
|
# Blocking mode
|
||||||
|
self._timeout = win32event.INFINITE
|
||||||
|
elif not isinstance(value, (float, int)) or value < 0:
|
||||||
|
raise ValueError("Timeout value out of range")
|
||||||
|
else:
|
||||||
|
# Timeout mode - Value converted to milliseconds
|
||||||
|
self._timeout = int(value * 1000)
|
||||||
|
|
||||||
|
def gettimeout(self) -> int | float | None:
|
||||||
|
return self._timeout
|
||||||
|
|
||||||
|
def setsockopt(self, level: t.Any, optname: t.Any, value: t.Any) -> t.NoReturn:
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@check_closed
|
||||||
|
def shutdown(self, how: t.Any) -> None:
|
||||||
|
return self.close()
|
||||||
|
|
||||||
|
|
||||||
|
class NpipeFileIOBase(io.RawIOBase):
|
||||||
|
def __init__(self, npipe_socket: NpipeSocket | None) -> None:
|
||||||
|
self.sock = npipe_socket
|
||||||
|
|
||||||
|
def close(self) -> None:
|
||||||
|
super().close()
|
||||||
|
self.sock = None
|
||||||
|
|
||||||
|
def fileno(self) -> int:
|
||||||
|
if self.sock is None:
|
||||||
|
raise RuntimeError("socket is closed")
|
||||||
|
# TODO: This is definitely a bug, NpipeSocket.fileno() does not exist!
|
||||||
|
return self.sock.fileno() # type: ignore
|
||||||
|
|
||||||
|
def isatty(self) -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def readable(self) -> bool:
|
||||||
|
return True
|
||||||
|
|
||||||
|
def readinto(self, buf: Buffer) -> int:
|
||||||
|
if self.sock is None:
|
||||||
|
raise RuntimeError("socket is closed")
|
||||||
|
return self.sock.recv_into(buf)
|
||||||
|
|
||||||
|
def seekable(self) -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def writable(self) -> bool:
|
||||||
|
return False
|
||||||
|
|
@ -0,0 +1,311 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import signal
|
||||||
|
import socket
|
||||||
|
import subprocess
|
||||||
|
import traceback
|
||||||
|
import typing as t
|
||||||
|
from queue import Empty
|
||||||
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
|
from .. import constants
|
||||||
|
from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
|
||||||
|
from .basehttpadapter import BaseHTTPAdapter
|
||||||
|
|
||||||
|
PARAMIKO_IMPORT_ERROR: str | None # pylint: disable=invalid-name
|
||||||
|
try:
|
||||||
|
import paramiko
|
||||||
|
except ImportError:
|
||||||
|
PARAMIKO_IMPORT_ERROR = traceback.format_exc() # pylint: disable=invalid-name
|
||||||
|
else:
|
||||||
|
PARAMIKO_IMPORT_ERROR = None # pylint: disable=invalid-name
|
||||||
|
|
||||||
|
if t.TYPE_CHECKING:
|
||||||
|
from collections.abc import Buffer, Mapping
|
||||||
|
|
||||||
|
|
||||||
|
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
|
||||||
|
|
||||||
|
|
||||||
|
class SSHSocket(socket.socket):
|
||||||
|
def __init__(self, host: str) -> None:
|
||||||
|
super().__init__(socket.AF_INET, socket.SOCK_STREAM)
|
||||||
|
self.host = host
|
||||||
|
self.port = None
|
||||||
|
self.user = None
|
||||||
|
if ":" in self.host:
|
||||||
|
self.host, self.port = self.host.split(":")
|
||||||
|
if "@" in self.host:
|
||||||
|
self.user, self.host = self.host.split("@")
|
||||||
|
|
||||||
|
self.proc: subprocess.Popen | None = None
|
||||||
|
|
||||||
|
def connect(self, *args_: t.Any, **kwargs: t.Any) -> None:
|
||||||
|
args = ["ssh"]
|
||||||
|
if self.user:
|
||||||
|
args = args + ["-l", self.user]
|
||||||
|
|
||||||
|
if self.port:
|
||||||
|
args = args + ["-p", self.port]
|
||||||
|
|
||||||
|
args = args + ["--", self.host, "docker system dial-stdio"]
|
||||||
|
|
||||||
|
preexec_func = None
|
||||||
|
if not constants.IS_WINDOWS_PLATFORM:
|
||||||
|
|
||||||
|
def f() -> None:
|
||||||
|
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||||
|
|
||||||
|
preexec_func = f
|
||||||
|
|
||||||
|
env = dict(os.environ)
|
||||||
|
|
||||||
|
# drop LD_LIBRARY_PATH and SSL_CERT_FILE
|
||||||
|
env.pop("LD_LIBRARY_PATH", None)
|
||||||
|
env.pop("SSL_CERT_FILE", None)
|
||||||
|
|
||||||
|
self.proc = subprocess.Popen( # pylint: disable=consider-using-with
|
||||||
|
args,
|
||||||
|
env=env,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stdin=subprocess.PIPE,
|
||||||
|
preexec_fn=preexec_func,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _write(self, data: Buffer) -> int:
|
||||||
|
if not self.proc:
|
||||||
|
raise RuntimeError(
|
||||||
|
"SSH subprocess not initiated. connect() must be called first."
|
||||||
|
)
|
||||||
|
assert self.proc.stdin is not None
|
||||||
|
if self.proc.stdin.closed:
|
||||||
|
raise RuntimeError(
|
||||||
|
"SSH subprocess not initiated. connect() must be called first after close()."
|
||||||
|
)
|
||||||
|
written = self.proc.stdin.write(data)
|
||||||
|
self.proc.stdin.flush()
|
||||||
|
return written
|
||||||
|
|
||||||
|
def sendall(self, data: Buffer, *args: t.Any, **kwargs: t.Any) -> None:
|
||||||
|
self._write(data)
|
||||||
|
|
||||||
|
def send(self, data: Buffer, *args: t.Any, **kwargs: t.Any) -> int:
|
||||||
|
return self._write(data)
|
||||||
|
|
||||||
|
def recv(self, n: int, *args: t.Any, **kwargs: t.Any) -> bytes:
|
||||||
|
if not self.proc:
|
||||||
|
raise RuntimeError(
|
||||||
|
"SSH subprocess not initiated. connect() must be called first."
|
||||||
|
)
|
||||||
|
assert self.proc.stdout is not None
|
||||||
|
return self.proc.stdout.read(n)
|
||||||
|
|
||||||
|
def makefile(self, mode: str, *args: t.Any, **kwargs: t.Any) -> t.IO: # type: ignore
|
||||||
|
if not self.proc:
|
||||||
|
self.connect()
|
||||||
|
assert self.proc is not None
|
||||||
|
assert self.proc.stdout is not None
|
||||||
|
self.proc.stdout.channel = self # type: ignore
|
||||||
|
|
||||||
|
return self.proc.stdout
|
||||||
|
|
||||||
|
def close(self) -> None:
|
||||||
|
if not self.proc:
|
||||||
|
return
|
||||||
|
assert self.proc.stdin is not None
|
||||||
|
if self.proc.stdin.closed:
|
||||||
|
return
|
||||||
|
self.proc.stdin.write(b"\n\n")
|
||||||
|
self.proc.stdin.flush()
|
||||||
|
self.proc.terminate()
|
||||||
|
|
||||||
|
|
||||||
|
class SSHConnection(urllib3_connection.HTTPConnection):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
*,
|
||||||
|
ssh_transport: paramiko.Transport | None = None,
|
||||||
|
timeout: int | float = 60,
|
||||||
|
host: str,
|
||||||
|
) -> None:
|
||||||
|
super().__init__("localhost", timeout=timeout)
|
||||||
|
self.ssh_transport = ssh_transport
|
||||||
|
self.timeout = timeout
|
||||||
|
self.ssh_host = host
|
||||||
|
self.sock: paramiko.Channel | SSHSocket | None = None
|
||||||
|
|
||||||
|
def connect(self) -> None:
|
||||||
|
if self.ssh_transport:
|
||||||
|
channel = self.ssh_transport.open_session()
|
||||||
|
channel.settimeout(self.timeout)
|
||||||
|
channel.exec_command("docker system dial-stdio")
|
||||||
|
self.sock = channel
|
||||||
|
else:
|
||||||
|
sock = SSHSocket(self.ssh_host)
|
||||||
|
sock.settimeout(self.timeout)
|
||||||
|
sock.connect()
|
||||||
|
self.sock = sock
|
||||||
|
|
||||||
|
|
||||||
|
class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
||||||
|
scheme = "ssh"
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
*,
|
||||||
|
ssh_client: paramiko.SSHClient | None = None,
|
||||||
|
timeout: int | float = 60,
|
||||||
|
maxsize: int = 10,
|
||||||
|
host: str,
|
||||||
|
) -> None:
|
||||||
|
super().__init__("localhost", timeout=timeout, maxsize=maxsize)
|
||||||
|
self.ssh_transport: paramiko.Transport | None = None
|
||||||
|
self.timeout = timeout
|
||||||
|
if ssh_client:
|
||||||
|
self.ssh_transport = ssh_client.get_transport()
|
||||||
|
self.ssh_host = host
|
||||||
|
|
||||||
|
def _new_conn(self) -> SSHConnection:
|
||||||
|
return SSHConnection(
|
||||||
|
ssh_transport=self.ssh_transport,
|
||||||
|
timeout=self.timeout,
|
||||||
|
host=self.ssh_host,
|
||||||
|
)
|
||||||
|
|
||||||
|
# When re-using connections, urllib3 calls fileno() on our
|
||||||
|
# SSH channel instance, quickly overloading our fd limit. To avoid this,
|
||||||
|
# we override _get_conn
|
||||||
|
def _get_conn(self, timeout: int | float) -> SSHConnection:
|
||||||
|
conn = None
|
||||||
|
try:
|
||||||
|
conn = self.pool.get(block=self.block, timeout=timeout)
|
||||||
|
|
||||||
|
except AttributeError as exc: # self.pool is None
|
||||||
|
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from exc
|
||||||
|
|
||||||
|
except Empty as exc:
|
||||||
|
if self.block:
|
||||||
|
raise urllib3.exceptions.EmptyPoolError(
|
||||||
|
self,
|
||||||
|
"Pool reached maximum size and no more connections are allowed.",
|
||||||
|
) from exc
|
||||||
|
# Oh well, we'll create a new connection then
|
||||||
|
|
||||||
|
return conn or self._new_conn()
|
||||||
|
|
||||||
|
|
||||||
|
class SSHHTTPAdapter(BaseHTTPAdapter):
|
||||||
|
__attrs__ = HTTPAdapter.__attrs__ + [
|
||||||
|
"pools",
|
||||||
|
"timeout",
|
||||||
|
"ssh_client",
|
||||||
|
"ssh_params",
|
||||||
|
"max_pool_size",
|
||||||
|
]
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
base_url: str,
|
||||||
|
timeout: int | float = 60,
|
||||||
|
pool_connections: int = constants.DEFAULT_NUM_POOLS,
|
||||||
|
max_pool_size: int = constants.DEFAULT_MAX_POOL_SIZE,
|
||||||
|
shell_out: bool = False,
|
||||||
|
) -> None:
|
||||||
|
self.ssh_client: paramiko.SSHClient | None = None
|
||||||
|
if not shell_out:
|
||||||
|
self._create_paramiko_client(base_url)
|
||||||
|
self._connect()
|
||||||
|
|
||||||
|
self.ssh_host = base_url
|
||||||
|
if base_url.startswith("ssh://"):
|
||||||
|
self.ssh_host = base_url[len("ssh://") :]
|
||||||
|
|
||||||
|
self.timeout = timeout
|
||||||
|
self.max_pool_size = max_pool_size
|
||||||
|
self.pools = RecentlyUsedContainer(
|
||||||
|
pool_connections, dispose_func=lambda p: p.close()
|
||||||
|
)
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
def _create_paramiko_client(self, base_url: str) -> None:
|
||||||
|
logging.getLogger("paramiko").setLevel(logging.WARNING)
|
||||||
|
self.ssh_client = paramiko.SSHClient()
|
||||||
|
base_url_p = urlparse(base_url)
|
||||||
|
assert base_url_p.hostname is not None
|
||||||
|
self.ssh_params: dict[str, t.Any] = {
|
||||||
|
"hostname": base_url_p.hostname,
|
||||||
|
"port": base_url_p.port,
|
||||||
|
"username": base_url_p.username,
|
||||||
|
}
|
||||||
|
ssh_config_file = os.path.expanduser("~/.ssh/config")
|
||||||
|
if os.path.exists(ssh_config_file):
|
||||||
|
conf = paramiko.SSHConfig()
|
||||||
|
with open(ssh_config_file, "rt", encoding="utf-8") as f:
|
||||||
|
conf.parse(f)
|
||||||
|
host_config = conf.lookup(base_url_p.hostname)
|
||||||
|
if "proxycommand" in host_config:
|
||||||
|
self.ssh_params["sock"] = paramiko.ProxyCommand(
|
||||||
|
host_config["proxycommand"]
|
||||||
|
)
|
||||||
|
if "hostname" in host_config:
|
||||||
|
self.ssh_params["hostname"] = host_config["hostname"]
|
||||||
|
if base_url_p.port is None and "port" in host_config:
|
||||||
|
self.ssh_params["port"] = host_config["port"]
|
||||||
|
if base_url_p.username is None and "user" in host_config:
|
||||||
|
self.ssh_params["username"] = host_config["user"]
|
||||||
|
if "identityfile" in host_config:
|
||||||
|
self.ssh_params["key_filename"] = host_config["identityfile"]
|
||||||
|
|
||||||
|
self.ssh_client.load_system_host_keys()
|
||||||
|
self.ssh_client.set_missing_host_key_policy(paramiko.RejectPolicy())
|
||||||
|
|
||||||
|
def _connect(self) -> None:
|
||||||
|
if self.ssh_client:
|
||||||
|
self.ssh_client.connect(**self.ssh_params)
|
||||||
|
|
||||||
|
def get_connection(
|
||||||
|
self, url: str | bytes, proxies: Mapping[str, str] | None = None
|
||||||
|
) -> SSHConnectionPool:
|
||||||
|
if not self.ssh_client:
|
||||||
|
return SSHConnectionPool(
|
||||||
|
ssh_client=self.ssh_client,
|
||||||
|
timeout=self.timeout,
|
||||||
|
maxsize=self.max_pool_size,
|
||||||
|
host=self.ssh_host,
|
||||||
|
)
|
||||||
|
with self.pools.lock:
|
||||||
|
pool = self.pools.get(url)
|
||||||
|
if pool:
|
||||||
|
return pool
|
||||||
|
|
||||||
|
# Connection is closed try a reconnect
|
||||||
|
if self.ssh_client and not self.ssh_client.get_transport():
|
||||||
|
self._connect()
|
||||||
|
|
||||||
|
pool = SSHConnectionPool(
|
||||||
|
ssh_client=self.ssh_client,
|
||||||
|
timeout=self.timeout,
|
||||||
|
maxsize=self.max_pool_size,
|
||||||
|
host=self.ssh_host,
|
||||||
|
)
|
||||||
|
self.pools[url] = pool
|
||||||
|
|
||||||
|
return pool
|
||||||
|
|
||||||
|
def close(self) -> None:
|
||||||
|
super().close()
|
||||||
|
if self.ssh_client:
|
||||||
|
self.ssh_client.close()
|
||||||
|
|
@ -0,0 +1,71 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
from .._import_helper import HTTPAdapter, urllib3
|
||||||
|
from .basehttpadapter import BaseHTTPAdapter
|
||||||
|
|
||||||
|
# Resolves OpenSSL issues in some servers:
|
||||||
|
# https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
|
||||||
|
# https://github.com/kennethreitz/requests/pull/799
|
||||||
|
|
||||||
|
|
||||||
|
PoolManager = urllib3.poolmanager.PoolManager
|
||||||
|
|
||||||
|
|
||||||
|
class SSLHTTPAdapter(BaseHTTPAdapter):
|
||||||
|
"""An HTTPS Transport Adapter that uses an arbitrary SSL version."""
|
||||||
|
|
||||||
|
__attrs__ = HTTPAdapter.__attrs__ + ["assert_hostname"]
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
assert_hostname: bool | None = None,
|
||||||
|
**kwargs: t.Any,
|
||||||
|
) -> None:
|
||||||
|
self.assert_hostname = assert_hostname
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
|
||||||
|
def init_poolmanager(
|
||||||
|
self, connections: int, maxsize: int, block: bool = False, **kwargs: t.Any
|
||||||
|
) -> None:
|
||||||
|
kwargs = {
|
||||||
|
"num_pools": connections,
|
||||||
|
"maxsize": maxsize,
|
||||||
|
"block": block,
|
||||||
|
}
|
||||||
|
if self.assert_hostname is not None:
|
||||||
|
kwargs["assert_hostname"] = self.assert_hostname
|
||||||
|
|
||||||
|
self.poolmanager = PoolManager(**kwargs)
|
||||||
|
|
||||||
|
def get_connection(self, *args: t.Any, **kwargs: t.Any) -> urllib3.ConnectionPool:
|
||||||
|
"""
|
||||||
|
Ensure assert_hostname is set correctly on our pool
|
||||||
|
|
||||||
|
We already take care of a normal poolmanager via init_poolmanager
|
||||||
|
|
||||||
|
But we still need to take care of when there is a proxy poolmanager
|
||||||
|
|
||||||
|
Note that this method is no longer called for newer requests versions.
|
||||||
|
"""
|
||||||
|
# pylint finds our HTTPAdapter stub instead of requests.adapters.HTTPAdapter:
|
||||||
|
# pylint: disable-next=no-member
|
||||||
|
conn = super().get_connection(*args, **kwargs)
|
||||||
|
if (
|
||||||
|
self.assert_hostname is not None
|
||||||
|
and conn.assert_hostname != self.assert_hostname # type: ignore
|
||||||
|
):
|
||||||
|
conn.assert_hostname = self.assert_hostname # type: ignore
|
||||||
|
return conn
|
||||||
|
|
@ -0,0 +1,126 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import socket
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
from .. import constants
|
||||||
|
from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
|
||||||
|
from .basehttpadapter import BaseHTTPAdapter
|
||||||
|
|
||||||
|
if t.TYPE_CHECKING:
|
||||||
|
from collections.abc import Mapping
|
||||||
|
|
||||||
|
from requests import PreparedRequest
|
||||||
|
|
||||||
|
from ..._socket_helper import SocketLike
|
||||||
|
|
||||||
|
|
||||||
|
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
|
||||||
|
|
||||||
|
|
||||||
|
class UnixHTTPConnection(urllib3_connection.HTTPConnection):
|
||||||
|
def __init__(
|
||||||
|
self, base_url: str | bytes, unix_socket: str, timeout: int | float = 60
|
||||||
|
) -> None:
|
||||||
|
super().__init__("localhost", timeout=timeout)
|
||||||
|
self.base_url = base_url
|
||||||
|
self.unix_socket = unix_socket
|
||||||
|
self.timeout = timeout
|
||||||
|
self.disable_buffering = False
|
||||||
|
|
||||||
|
def connect(self) -> None:
|
||||||
|
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||||
|
sock.settimeout(self.timeout)
|
||||||
|
sock.connect(self.unix_socket)
|
||||||
|
self.sock = sock
|
||||||
|
|
||||||
|
def putheader(self, header: str, *values: str) -> None:
|
||||||
|
super().putheader(header, *values)
|
||||||
|
if header == "Connection" and "Upgrade" in values:
|
||||||
|
self.disable_buffering = True
|
||||||
|
|
||||||
|
def response_class(self, sock: SocketLike, *args: t.Any, **kwargs: t.Any) -> t.Any:
|
||||||
|
# FIXME: We may need to disable buffering on Py3,
|
||||||
|
# but there's no clear way to do it at the moment. See:
|
||||||
|
# https://github.com/docker/docker-py/issues/1799
|
||||||
|
return super().response_class(sock, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
base_url: str | bytes,
|
||||||
|
socket_path: str,
|
||||||
|
timeout: int | float = 60,
|
||||||
|
maxsize: int = 10,
|
||||||
|
) -> None:
|
||||||
|
super().__init__("localhost", timeout=timeout, maxsize=maxsize)
|
||||||
|
self.base_url = base_url
|
||||||
|
self.socket_path = socket_path
|
||||||
|
self.timeout = timeout
|
||||||
|
|
||||||
|
def _new_conn(self) -> UnixHTTPConnection:
|
||||||
|
return UnixHTTPConnection(self.base_url, self.socket_path, self.timeout)
|
||||||
|
|
||||||
|
|
||||||
|
class UnixHTTPAdapter(BaseHTTPAdapter):
|
||||||
|
__attrs__ = HTTPAdapter.__attrs__ + [
|
||||||
|
"pools",
|
||||||
|
"socket_path",
|
||||||
|
"timeout",
|
||||||
|
"max_pool_size",
|
||||||
|
]
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
socket_url: str,
|
||||||
|
timeout: int | float = 60,
|
||||||
|
pool_connections: int = constants.DEFAULT_NUM_POOLS,
|
||||||
|
max_pool_size: int = constants.DEFAULT_MAX_POOL_SIZE,
|
||||||
|
) -> None:
|
||||||
|
socket_path = socket_url.replace("http+unix://", "")
|
||||||
|
if not socket_path.startswith("/"):
|
||||||
|
socket_path = "/" + socket_path
|
||||||
|
self.socket_path = socket_path
|
||||||
|
self.timeout = timeout
|
||||||
|
self.max_pool_size = max_pool_size
|
||||||
|
|
||||||
|
def f(p: t.Any) -> None:
|
||||||
|
p.close()
|
||||||
|
|
||||||
|
self.pools = RecentlyUsedContainer(pool_connections, dispose_func=f)
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
def get_connection(
|
||||||
|
self, url: str | bytes, proxies: Mapping[str, str] | None = None
|
||||||
|
) -> UnixHTTPConnectionPool:
|
||||||
|
with self.pools.lock:
|
||||||
|
pool = self.pools.get(url)
|
||||||
|
if pool:
|
||||||
|
return pool
|
||||||
|
|
||||||
|
pool = UnixHTTPConnectionPool(
|
||||||
|
url, self.socket_path, self.timeout, maxsize=self.max_pool_size
|
||||||
|
)
|
||||||
|
self.pools[url] = pool
|
||||||
|
|
||||||
|
return pool
|
||||||
|
|
||||||
|
def request_url(self, request: PreparedRequest, proxies: Mapping[str, str]) -> str:
|
||||||
|
# The select_proxy utility in requests errors out when the provided URL
|
||||||
|
# does not have a hostname, like is the case when using a UNIX socket.
|
||||||
|
# Since proxies are an irrelevant notion in the case of UNIX sockets
|
||||||
|
# anyway, we simply return the path URL directly.
|
||||||
|
# See also: https://github.com/docker/docker-py/issues/811
|
||||||
|
return request.path_url
|
||||||
|
|
@ -0,0 +1,90 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import socket
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
from .._import_helper import urllib3
|
||||||
|
from ..errors import DockerException
|
||||||
|
|
||||||
|
if t.TYPE_CHECKING:
|
||||||
|
from requests import Response
|
||||||
|
|
||||||
|
_T = t.TypeVar("_T")
|
||||||
|
|
||||||
|
|
||||||
|
class CancellableStream(t.Generic[_T]):
|
||||||
|
"""
|
||||||
|
Stream wrapper for real-time events, logs, etc. from the server.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
>>> events = client.events()
|
||||||
|
>>> for event in events:
|
||||||
|
... print(event)
|
||||||
|
>>> # and cancel from another thread
|
||||||
|
>>> events.close()
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, stream: t.Generator[_T], response: Response) -> None:
|
||||||
|
self._stream = stream
|
||||||
|
self._response = response
|
||||||
|
|
||||||
|
def __iter__(self) -> t.Self:
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __next__(self) -> _T:
|
||||||
|
try:
|
||||||
|
return next(self._stream)
|
||||||
|
except urllib3.exceptions.ProtocolError as exc:
|
||||||
|
raise StopIteration from exc
|
||||||
|
except socket.error as exc:
|
||||||
|
raise StopIteration from exc
|
||||||
|
|
||||||
|
next = __next__
|
||||||
|
|
||||||
|
def close(self) -> None:
|
||||||
|
"""
|
||||||
|
Closes the event streaming.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not self._response.raw.closed:
|
||||||
|
# find the underlying socket object
|
||||||
|
# based on api.client._get_raw_response_socket
|
||||||
|
|
||||||
|
sock_fp = self._response.raw._fp.fp # type: ignore
|
||||||
|
|
||||||
|
if hasattr(sock_fp, "raw"):
|
||||||
|
sock_raw = sock_fp.raw
|
||||||
|
|
||||||
|
if hasattr(sock_raw, "sock"):
|
||||||
|
sock = sock_raw.sock
|
||||||
|
|
||||||
|
elif hasattr(sock_raw, "_sock"):
|
||||||
|
sock = sock_raw._sock
|
||||||
|
|
||||||
|
elif hasattr(sock_fp, "channel"):
|
||||||
|
# We are working with a paramiko (SSH) channel, which does not
|
||||||
|
# support cancelable streams with the current implementation
|
||||||
|
raise DockerException(
|
||||||
|
"Cancellable streams not supported for the SSH protocol"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
sock = sock_fp._sock # type: ignore
|
||||||
|
|
||||||
|
if hasattr(urllib3.contrib, "pyopenssl") and isinstance(
|
||||||
|
sock, urllib3.contrib.pyopenssl.WrappedSocket
|
||||||
|
):
|
||||||
|
sock = sock.socket
|
||||||
|
|
||||||
|
sock.shutdown(socket.SHUT_RDWR)
|
||||||
|
sock.close()
|
||||||
|
|
@ -0,0 +1,310 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import io
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
import re
|
||||||
|
import tarfile
|
||||||
|
import tempfile
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
from ..constants import IS_WINDOWS_PLATFORM, WINDOWS_LONGPATH_PREFIX
|
||||||
|
from . import fnmatch
|
||||||
|
|
||||||
|
if t.TYPE_CHECKING:
|
||||||
|
from collections.abc import Sequence
|
||||||
|
|
||||||
|
|
||||||
|
_SEP = re.compile("/|\\\\") if IS_WINDOWS_PLATFORM else re.compile("/")
|
||||||
|
|
||||||
|
|
||||||
|
def tar(
|
||||||
|
path: str,
|
||||||
|
exclude: list[str] | None = None,
|
||||||
|
dockerfile: tuple[str, str | None] | tuple[None, None] | None = None,
|
||||||
|
fileobj: t.IO[bytes] | None = None,
|
||||||
|
gzip: bool = False,
|
||||||
|
) -> t.IO[bytes]:
|
||||||
|
root = os.path.abspath(path)
|
||||||
|
exclude = exclude or []
|
||||||
|
dockerfile = dockerfile or (None, None)
|
||||||
|
extra_files: list[tuple[str, str]] = []
|
||||||
|
if dockerfile[1] is not None:
|
||||||
|
assert dockerfile[0] is not None
|
||||||
|
dockerignore_contents = "\n".join(
|
||||||
|
(exclude or [".dockerignore"]) + [dockerfile[0]]
|
||||||
|
)
|
||||||
|
extra_files = [
|
||||||
|
(".dockerignore", dockerignore_contents),
|
||||||
|
dockerfile, # type: ignore
|
||||||
|
]
|
||||||
|
return create_archive(
|
||||||
|
files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile[0])),
|
||||||
|
root=root,
|
||||||
|
fileobj=fileobj,
|
||||||
|
gzip=gzip,
|
||||||
|
extra_files=extra_files,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def exclude_paths(
|
||||||
|
root: str, patterns: list[str], dockerfile: str | None = None
|
||||||
|
) -> set[str]:
|
||||||
|
"""
|
||||||
|
Given a root directory path and a list of .dockerignore patterns, return
|
||||||
|
an iterator of all paths (both regular files and directories) in the root
|
||||||
|
directory that do *not* match any of the patterns.
|
||||||
|
|
||||||
|
All paths returned are relative to the root.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if dockerfile is None:
|
||||||
|
dockerfile = "Dockerfile"
|
||||||
|
|
||||||
|
patterns.append("!" + dockerfile)
|
||||||
|
pm = PatternMatcher(patterns)
|
||||||
|
return set(pm.walk(root))
|
||||||
|
|
||||||
|
|
||||||
|
def build_file_list(root: str) -> list[str]:
|
||||||
|
files = []
|
||||||
|
for dirname, dirnames, fnames in os.walk(root):
|
||||||
|
for filename in fnames + dirnames:
|
||||||
|
longpath = os.path.join(dirname, filename)
|
||||||
|
files.append(longpath.replace(root, "", 1).lstrip("/"))
|
||||||
|
|
||||||
|
return files
|
||||||
|
|
||||||
|
|
||||||
|
def create_archive(
|
||||||
|
root: str,
|
||||||
|
files: Sequence[str] | None = None,
|
||||||
|
fileobj: t.IO[bytes] | None = None,
|
||||||
|
gzip: bool = False,
|
||||||
|
extra_files: Sequence[tuple[str, str]] | None = None,
|
||||||
|
) -> t.IO[bytes]:
|
||||||
|
extra_files = extra_files or []
|
||||||
|
if not fileobj:
|
||||||
|
# pylint: disable-next=consider-using-with
|
||||||
|
fileobj = tempfile.NamedTemporaryFile() # noqa: SIM115
|
||||||
|
|
||||||
|
with tarfile.open(mode="w:gz" if gzip else "w", fileobj=fileobj) as tarf:
|
||||||
|
if files is None:
|
||||||
|
files = build_file_list(root)
|
||||||
|
extra_names = set(e[0] for e in extra_files)
|
||||||
|
for path in files:
|
||||||
|
if path in extra_names:
|
||||||
|
# Extra files override context files with the same name
|
||||||
|
continue
|
||||||
|
full_path = os.path.join(root, path)
|
||||||
|
|
||||||
|
i = tarf.gettarinfo(full_path, arcname=path)
|
||||||
|
if i is None:
|
||||||
|
# This happens when we encounter a socket file. We can safely
|
||||||
|
# ignore it and proceed.
|
||||||
|
continue # type: ignore
|
||||||
|
|
||||||
|
# Workaround https://bugs.python.org/issue32713
|
||||||
|
if i.mtime < 0 or i.mtime > 8**11 - 1:
|
||||||
|
i.mtime = int(i.mtime)
|
||||||
|
|
||||||
|
if IS_WINDOWS_PLATFORM:
|
||||||
|
# Windows does not keep track of the execute bit, so we make files
|
||||||
|
# and directories executable by default.
|
||||||
|
i.mode = i.mode & 0o755 | 0o111
|
||||||
|
|
||||||
|
if i.isfile():
|
||||||
|
try:
|
||||||
|
with open(full_path, "rb") as f:
|
||||||
|
tarf.addfile(i, f)
|
||||||
|
except IOError as exc:
|
||||||
|
raise IOError(f"Can not read file in context: {full_path}") from exc
|
||||||
|
else:
|
||||||
|
# Directories, FIFOs, symlinks... do not need to be read.
|
||||||
|
tarf.addfile(i, None)
|
||||||
|
|
||||||
|
for name, contents in extra_files:
|
||||||
|
info = tarfile.TarInfo(name)
|
||||||
|
contents_encoded = contents.encode("utf-8")
|
||||||
|
info.size = len(contents_encoded)
|
||||||
|
tarf.addfile(info, io.BytesIO(contents_encoded))
|
||||||
|
|
||||||
|
fileobj.seek(0)
|
||||||
|
return fileobj
|
||||||
|
|
||||||
|
|
||||||
|
def mkbuildcontext(dockerfile: io.BytesIO | t.IO[bytes]) -> t.IO[bytes]:
|
||||||
|
# pylint: disable-next=consider-using-with
|
||||||
|
f = tempfile.NamedTemporaryFile() # noqa: SIM115
|
||||||
|
try:
|
||||||
|
with tarfile.open(mode="w", fileobj=f) as tarf:
|
||||||
|
if isinstance(dockerfile, io.StringIO): # type: ignore
|
||||||
|
raise TypeError("Please use io.BytesIO to create in-memory Dockerfiles")
|
||||||
|
if isinstance(dockerfile, io.BytesIO):
|
||||||
|
dfinfo = tarfile.TarInfo("Dockerfile")
|
||||||
|
dfinfo.size = len(dockerfile.getvalue())
|
||||||
|
dockerfile.seek(0)
|
||||||
|
else:
|
||||||
|
dfinfo = tarf.gettarinfo(fileobj=dockerfile, arcname="Dockerfile")
|
||||||
|
tarf.addfile(dfinfo, dockerfile)
|
||||||
|
f.seek(0)
|
||||||
|
except Exception: # noqa: E722
|
||||||
|
f.close()
|
||||||
|
raise
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def split_path(p: str) -> list[str]:
|
||||||
|
return [pt for pt in re.split(_SEP, p) if pt and pt != "."]
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_slashes(p: str) -> str:
|
||||||
|
if IS_WINDOWS_PLATFORM:
|
||||||
|
return "/".join(split_path(p))
|
||||||
|
return p
|
||||||
|
|
||||||
|
|
||||||
|
def walk(root: str, patterns: Sequence[str], default: bool = True) -> t.Generator[str]:
|
||||||
|
pm = PatternMatcher(patterns)
|
||||||
|
return pm.walk(root)
|
||||||
|
|
||||||
|
|
||||||
|
# Heavily based on
|
||||||
|
# https://github.com/moby/moby/blob/master/pkg/fileutils/fileutils.go
|
||||||
|
class PatternMatcher:
|
||||||
|
def __init__(self, patterns: Sequence[str]) -> None:
|
||||||
|
self.patterns = list(filter(lambda p: p.dirs, [Pattern(p) for p in patterns]))
|
||||||
|
self.patterns.append(Pattern("!.dockerignore"))
|
||||||
|
|
||||||
|
def matches(self, filepath: str) -> bool:
|
||||||
|
matched = False
|
||||||
|
parent_path = os.path.dirname(filepath)
|
||||||
|
parent_path_dirs = split_path(parent_path)
|
||||||
|
|
||||||
|
for pattern in self.patterns:
|
||||||
|
negative = pattern.exclusion
|
||||||
|
match = pattern.match(filepath)
|
||||||
|
if (
|
||||||
|
not match
|
||||||
|
and parent_path != ""
|
||||||
|
and len(pattern.dirs) <= len(parent_path_dirs)
|
||||||
|
):
|
||||||
|
match = pattern.match(
|
||||||
|
os.path.sep.join(parent_path_dirs[: len(pattern.dirs)])
|
||||||
|
)
|
||||||
|
|
||||||
|
if match:
|
||||||
|
matched = not negative
|
||||||
|
|
||||||
|
return matched
|
||||||
|
|
||||||
|
def walk(self, root: str) -> t.Generator[str]:
|
||||||
|
def rec_walk(current_dir: str) -> t.Generator[str]:
|
||||||
|
for f in os.listdir(current_dir):
|
||||||
|
fpath = os.path.join(os.path.relpath(current_dir, root), f)
|
||||||
|
if fpath.startswith("." + os.path.sep):
|
||||||
|
fpath = fpath[2:]
|
||||||
|
match = self.matches(fpath)
|
||||||
|
if not match:
|
||||||
|
yield fpath
|
||||||
|
|
||||||
|
cur = os.path.join(root, fpath)
|
||||||
|
if not os.path.isdir(cur) or os.path.islink(cur):
|
||||||
|
continue
|
||||||
|
|
||||||
|
if match:
|
||||||
|
# If we want to skip this file and it is a directory
|
||||||
|
# then we should first check to see if there's an
|
||||||
|
# excludes pattern (e.g. !dir/file) that starts with this
|
||||||
|
# dir. If so then we cannot skip this dir.
|
||||||
|
skip = True
|
||||||
|
|
||||||
|
for pat in self.patterns:
|
||||||
|
if not pat.exclusion:
|
||||||
|
continue
|
||||||
|
if pat.cleaned_pattern.startswith(normalize_slashes(fpath)):
|
||||||
|
skip = False
|
||||||
|
break
|
||||||
|
if skip:
|
||||||
|
continue
|
||||||
|
yield from rec_walk(cur)
|
||||||
|
|
||||||
|
return rec_walk(root)
|
||||||
|
|
||||||
|
|
||||||
|
class Pattern:
|
||||||
|
def __init__(self, pattern_str: str) -> None:
|
||||||
|
self.exclusion = False
|
||||||
|
if pattern_str.startswith("!"):
|
||||||
|
self.exclusion = True
|
||||||
|
pattern_str = pattern_str[1:]
|
||||||
|
|
||||||
|
self.dirs = self.normalize(pattern_str)
|
||||||
|
self.cleaned_pattern = "/".join(self.dirs)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def normalize(cls, p: str) -> list[str]:
|
||||||
|
# Remove trailing spaces
|
||||||
|
p = p.strip()
|
||||||
|
|
||||||
|
# Leading and trailing slashes are not relevant. Yes,
|
||||||
|
# "foo.py/" must exclude the "foo.py" regular file. "."
|
||||||
|
# components are not relevant either, even if the whole
|
||||||
|
# pattern is only ".", as the Docker reference states: "For
|
||||||
|
# historical reasons, the pattern . is ignored."
|
||||||
|
# ".." component must be cleared with the potential previous
|
||||||
|
# component, regardless of whether it exists: "A preprocessing
|
||||||
|
# step [...] eliminates . and .. elements using Go's
|
||||||
|
# filepath.".
|
||||||
|
i = 0
|
||||||
|
split = split_path(p)
|
||||||
|
while i < len(split):
|
||||||
|
if split[i] == "..":
|
||||||
|
del split[i]
|
||||||
|
if i > 0:
|
||||||
|
del split[i - 1]
|
||||||
|
i -= 1
|
||||||
|
else:
|
||||||
|
i += 1
|
||||||
|
return split
|
||||||
|
|
||||||
|
def match(self, filepath: str) -> bool:
|
||||||
|
return fnmatch.fnmatch(normalize_slashes(filepath), self.cleaned_pattern)
|
||||||
|
|
||||||
|
|
||||||
|
def process_dockerfile(
|
||||||
|
dockerfile: str | None, path: str
|
||||||
|
) -> tuple[str, str | None] | tuple[None, None]:
|
||||||
|
if not dockerfile:
|
||||||
|
return (None, None)
|
||||||
|
|
||||||
|
abs_dockerfile = dockerfile
|
||||||
|
if not os.path.isabs(dockerfile):
|
||||||
|
abs_dockerfile = os.path.join(path, dockerfile)
|
||||||
|
if IS_WINDOWS_PLATFORM and path.startswith(WINDOWS_LONGPATH_PREFIX):
|
||||||
|
abs_dockerfile = f"{WINDOWS_LONGPATH_PREFIX}{os.path.normpath(abs_dockerfile[len(WINDOWS_LONGPATH_PREFIX) :])}"
|
||||||
|
if os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[
|
||||||
|
0
|
||||||
|
] or os.path.relpath(abs_dockerfile, path).startswith(".."):
|
||||||
|
# Dockerfile not in context - read data to insert into tar later
|
||||||
|
with open(abs_dockerfile, "rt", encoding="utf-8") as df:
|
||||||
|
return (f".dockerfile.{random.getrandbits(160):x}", df.read())
|
||||||
|
|
||||||
|
# Dockerfile is inside the context - return path relative to context root
|
||||||
|
if dockerfile == abs_dockerfile:
|
||||||
|
# Only calculate relpath if necessary to avoid errors
|
||||||
|
# on Windows client -> Linux Docker
|
||||||
|
# see https://github.com/docker/compose/issues/5969
|
||||||
|
dockerfile = os.path.relpath(abs_dockerfile, path)
|
||||||
|
return (dockerfile, None)
|
||||||
|
|
@ -0,0 +1,89 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
from ..constants import IS_WINDOWS_PLATFORM
|
||||||
|
|
||||||
|
DOCKER_CONFIG_FILENAME = os.path.join(".docker", "config.json")
|
||||||
|
LEGACY_DOCKER_CONFIG_FILENAME = ".dockercfg"
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def get_default_config_file() -> str:
|
||||||
|
return os.path.join(home_dir(), DOCKER_CONFIG_FILENAME)
|
||||||
|
|
||||||
|
|
||||||
|
def find_config_file(config_path: str | None = None) -> str | None:
|
||||||
|
homedir = home_dir()
|
||||||
|
paths = list(
|
||||||
|
filter(
|
||||||
|
None,
|
||||||
|
[
|
||||||
|
config_path, # 1
|
||||||
|
config_path_from_environment(), # 2
|
||||||
|
os.path.join(homedir, DOCKER_CONFIG_FILENAME), # 3
|
||||||
|
os.path.join(homedir, LEGACY_DOCKER_CONFIG_FILENAME), # 4
|
||||||
|
],
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
log.debug("Trying paths: %s", repr(paths))
|
||||||
|
|
||||||
|
for path in paths:
|
||||||
|
if os.path.exists(path):
|
||||||
|
log.debug("Found file at path: %s", path)
|
||||||
|
return path
|
||||||
|
|
||||||
|
log.debug("No config file found")
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def config_path_from_environment() -> str | None:
|
||||||
|
config_dir = os.environ.get("DOCKER_CONFIG")
|
||||||
|
if not config_dir:
|
||||||
|
return None
|
||||||
|
return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))
|
||||||
|
|
||||||
|
|
||||||
|
def home_dir() -> str:
|
||||||
|
"""
|
||||||
|
Get the user's home directory, using the same logic as the Docker Engine
|
||||||
|
client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.
|
||||||
|
"""
|
||||||
|
if IS_WINDOWS_PLATFORM:
|
||||||
|
return os.environ.get("USERPROFILE", "")
|
||||||
|
return os.path.expanduser("~")
|
||||||
|
|
||||||
|
|
||||||
|
def load_general_config(config_path: str | None = None) -> dict[str, t.Any]:
|
||||||
|
config_file = find_config_file(config_path)
|
||||||
|
|
||||||
|
if not config_file:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(config_file, "rt", encoding="utf-8") as f:
|
||||||
|
return json.load(f)
|
||||||
|
except (IOError, ValueError) as e:
|
||||||
|
# In the case of a legacy `.dockercfg` file, we will not
|
||||||
|
# be able to load any JSON data.
|
||||||
|
log.debug(e)
|
||||||
|
|
||||||
|
log.debug("All parsing attempts failed - returning empty config")
|
||||||
|
return {}
|
||||||
|
|
@ -0,0 +1,67 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import functools
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
from .. import errors
|
||||||
|
from . import utils
|
||||||
|
|
||||||
|
if t.TYPE_CHECKING:
|
||||||
|
from collections.abc import Callable
|
||||||
|
|
||||||
|
from ..api.client import APIClient
|
||||||
|
|
||||||
|
_Self = t.TypeVar("_Self")
|
||||||
|
_P = t.ParamSpec("_P")
|
||||||
|
_R = t.TypeVar("_R")
|
||||||
|
|
||||||
|
|
||||||
|
def minimum_version(
|
||||||
|
version: str,
|
||||||
|
) -> Callable[
|
||||||
|
[Callable[t.Concatenate[_Self, _P], _R]],
|
||||||
|
Callable[t.Concatenate[_Self, _P], _R],
|
||||||
|
]:
|
||||||
|
def decorator(
|
||||||
|
f: Callable[t.Concatenate[_Self, _P], _R],
|
||||||
|
) -> Callable[t.Concatenate[_Self, _P], _R]:
|
||||||
|
@functools.wraps(f)
|
||||||
|
def wrapper(self: _Self, *args: _P.args, **kwargs: _P.kwargs) -> _R:
|
||||||
|
# We use _Self instead of APIClient since this is used for mixins for APIClient.
|
||||||
|
# This unfortunately means that self._version does not exist in the mixin,
|
||||||
|
# it only exists after mixing in. This is why we ignore types here.
|
||||||
|
if utils.version_lt(self._version, version): # type: ignore
|
||||||
|
raise errors.InvalidVersion(
|
||||||
|
f"{f.__name__} is not available for version < {version}"
|
||||||
|
)
|
||||||
|
return f(self, *args, **kwargs)
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
|
def update_headers(
|
||||||
|
f: Callable[t.Concatenate[APIClient, _P], _R],
|
||||||
|
) -> Callable[t.Concatenate[APIClient, _P], _R]:
|
||||||
|
def inner(self: APIClient, *args: _P.args, **kwargs: _P.kwargs) -> _R:
|
||||||
|
if "HttpHeaders" in self._general_configs:
|
||||||
|
if not kwargs.get("headers"):
|
||||||
|
kwargs["headers"] = self._general_configs["HttpHeaders"]
|
||||||
|
else:
|
||||||
|
# We cannot (yet) model that kwargs["headers"] should be a dictionary
|
||||||
|
kwargs["headers"].update(self._general_configs["HttpHeaders"]) # type: ignore
|
||||||
|
return f(self, *args, **kwargs)
|
||||||
|
|
||||||
|
return inner
|
||||||
|
|
@ -0,0 +1,128 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
"""Filename matching with shell patterns.
|
||||||
|
|
||||||
|
fnmatch(FILENAME, PATTERN) matches according to the local convention.
|
||||||
|
fnmatchcase(FILENAME, PATTERN) always takes case in account.
|
||||||
|
|
||||||
|
The functions operate by translating the pattern into a regular
|
||||||
|
expression. They cache the compiled regular expressions for speed.
|
||||||
|
|
||||||
|
The function translate(PATTERN) returns a regular expression
|
||||||
|
corresponding to PATTERN. (It does not compile it.)
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
__all__ = ["fnmatch", "fnmatchcase", "translate"]
|
||||||
|
|
||||||
|
_cache: dict[str, re.Pattern] = {}
|
||||||
|
_MAXCACHE = 100
|
||||||
|
|
||||||
|
|
||||||
|
def _purge() -> None:
|
||||||
|
"""Clear the pattern cache"""
|
||||||
|
_cache.clear()
|
||||||
|
|
||||||
|
|
||||||
|
def fnmatch(name: str, pat: str) -> bool:
|
||||||
|
"""Test whether FILENAME matches PATTERN.
|
||||||
|
|
||||||
|
Patterns are Unix shell style:
|
||||||
|
|
||||||
|
* matches everything
|
||||||
|
? matches any single character
|
||||||
|
[seq] matches any character in seq
|
||||||
|
[!seq] matches any char not in seq
|
||||||
|
|
||||||
|
An initial period in FILENAME is not special.
|
||||||
|
Both FILENAME and PATTERN are first case-normalized
|
||||||
|
if the operating system requires it.
|
||||||
|
If you do not want this, use fnmatchcase(FILENAME, PATTERN).
|
||||||
|
"""
|
||||||
|
|
||||||
|
name = name.lower()
|
||||||
|
pat = pat.lower()
|
||||||
|
return fnmatchcase(name, pat)
|
||||||
|
|
||||||
|
|
||||||
|
def fnmatchcase(name: str, pat: str) -> bool:
|
||||||
|
"""Test whether FILENAME matches PATTERN, including case.
|
||||||
|
This is a version of fnmatch() which does not case-normalize
|
||||||
|
its arguments.
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
re_pat = _cache[pat]
|
||||||
|
except KeyError:
|
||||||
|
res = translate(pat)
|
||||||
|
if len(_cache) >= _MAXCACHE:
|
||||||
|
_cache.clear()
|
||||||
|
_cache[pat] = re_pat = re.compile(res)
|
||||||
|
return re_pat.match(name) is not None
|
||||||
|
|
||||||
|
|
||||||
|
def translate(pat: str) -> str:
|
||||||
|
"""Translate a shell PATTERN to a regular expression.
|
||||||
|
|
||||||
|
There is no way to quote meta-characters.
|
||||||
|
"""
|
||||||
|
i, n = 0, len(pat)
|
||||||
|
res = "^"
|
||||||
|
while i < n:
|
||||||
|
c = pat[i]
|
||||||
|
i = i + 1
|
||||||
|
if c == "*":
|
||||||
|
if i < n and pat[i] == "*":
|
||||||
|
# is some flavor of "**"
|
||||||
|
i = i + 1
|
||||||
|
# Treat **/ as ** so eat the "/"
|
||||||
|
if i < n and pat[i] == "/":
|
||||||
|
i = i + 1
|
||||||
|
if i >= n:
|
||||||
|
# is "**EOF" - to align with .gitignore just accept all
|
||||||
|
res = res + ".*"
|
||||||
|
else:
|
||||||
|
# is "**"
|
||||||
|
# Note that this allows for any # of /'s (even 0) because
|
||||||
|
# the .* will eat everything, even /'s
|
||||||
|
res = res + "(.*/)?"
|
||||||
|
else:
|
||||||
|
# is "*" so map it to anything but "/"
|
||||||
|
res = res + "[^/]*"
|
||||||
|
elif c == "?":
|
||||||
|
# "?" is any char except "/"
|
||||||
|
res = res + "[^/]"
|
||||||
|
elif c == "[":
|
||||||
|
j = i
|
||||||
|
if j < n and pat[j] == "!":
|
||||||
|
j = j + 1
|
||||||
|
if j < n and pat[j] == "]":
|
||||||
|
j = j + 1
|
||||||
|
while j < n and pat[j] != "]":
|
||||||
|
j = j + 1
|
||||||
|
if j >= n:
|
||||||
|
res = res + "\\["
|
||||||
|
else:
|
||||||
|
stuff = pat[i:j].replace("\\", "\\\\")
|
||||||
|
i = j + 1
|
||||||
|
if stuff[0] == "!":
|
||||||
|
stuff = "^" + stuff[1:]
|
||||||
|
elif stuff[0] == "^":
|
||||||
|
stuff = "\\" + stuff
|
||||||
|
res = f"{res}[{stuff}]"
|
||||||
|
else:
|
||||||
|
res = res + re.escape(c)
|
||||||
|
|
||||||
|
return res + "$"
|
||||||
|
|
@ -0,0 +1,100 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import json.decoder
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
from ..errors import StreamParseError
|
||||||
|
|
||||||
|
if t.TYPE_CHECKING:
|
||||||
|
import re
|
||||||
|
from collections.abc import Callable
|
||||||
|
|
||||||
|
_T = t.TypeVar("_T")
|
||||||
|
|
||||||
|
|
||||||
|
json_decoder = json.JSONDecoder()
|
||||||
|
|
||||||
|
|
||||||
|
def stream_as_text(stream: t.Generator[bytes | str]) -> t.Generator[str]:
|
||||||
|
"""
|
||||||
|
Given a stream of bytes or text, if any of the items in the stream
|
||||||
|
are bytes convert them to text.
|
||||||
|
This function can be removed once we return text streams
|
||||||
|
instead of byte streams.
|
||||||
|
"""
|
||||||
|
for data in stream:
|
||||||
|
if not isinstance(data, str):
|
||||||
|
data = data.decode("utf-8", "replace")
|
||||||
|
yield data
|
||||||
|
|
||||||
|
|
||||||
|
def json_splitter(buffer: str) -> tuple[t.Any, str] | None:
|
||||||
|
"""Attempt to parse a json object from a buffer. If there is at least one
|
||||||
|
object, return it and the rest of the buffer, otherwise return None.
|
||||||
|
"""
|
||||||
|
buffer = buffer.strip()
|
||||||
|
try:
|
||||||
|
obj, index = json_decoder.raw_decode(buffer)
|
||||||
|
ws: re.Pattern = json.decoder.WHITESPACE # type: ignore[attr-defined]
|
||||||
|
m = ws.match(buffer, index)
|
||||||
|
rest = buffer[m.end() :] if m else buffer[index:]
|
||||||
|
return obj, rest
|
||||||
|
except ValueError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def json_stream(stream: t.Generator[str | bytes]) -> t.Generator[t.Any]:
|
||||||
|
"""Given a stream of text, return a stream of json objects.
|
||||||
|
This handles streams which are inconsistently buffered (some entries may
|
||||||
|
be newline delimited, and others are not).
|
||||||
|
"""
|
||||||
|
return split_buffer(stream, json_splitter, json_decoder.decode)
|
||||||
|
|
||||||
|
|
||||||
|
def line_splitter(buffer: str, separator: str = "\n") -> tuple[str, str] | None:
|
||||||
|
index = buffer.find(str(separator))
|
||||||
|
if index == -1:
|
||||||
|
return None
|
||||||
|
return buffer[: index + 1], buffer[index + 1 :]
|
||||||
|
|
||||||
|
|
||||||
|
def split_buffer(
|
||||||
|
stream: t.Generator[str | bytes],
|
||||||
|
splitter: Callable[[str], tuple[_T, str] | None],
|
||||||
|
decoder: Callable[[str], _T],
|
||||||
|
) -> t.Generator[_T | str]:
|
||||||
|
"""Given a generator which yields strings and a splitter function,
|
||||||
|
joins all input, splits on the separator and yields each chunk.
|
||||||
|
Unlike string.split(), each chunk includes the trailing
|
||||||
|
separator, except for the last one if none was found on the end
|
||||||
|
of the input.
|
||||||
|
"""
|
||||||
|
buffered = ""
|
||||||
|
|
||||||
|
for data in stream_as_text(stream):
|
||||||
|
buffered += data
|
||||||
|
while True:
|
||||||
|
buffer_split = splitter(buffered)
|
||||||
|
if buffer_split is None:
|
||||||
|
break
|
||||||
|
|
||||||
|
item, buffered = buffer_split
|
||||||
|
yield item
|
||||||
|
|
||||||
|
if buffered:
|
||||||
|
try:
|
||||||
|
yield decoder(buffered)
|
||||||
|
except Exception as e:
|
||||||
|
raise StreamParseError(e) from e
|
||||||
|
|
@ -0,0 +1,136 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import re
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
if t.TYPE_CHECKING:
|
||||||
|
from collections.abc import Collection, Sequence
|
||||||
|
|
||||||
|
|
||||||
|
PORT_SPEC = re.compile(
|
||||||
|
"^" # Match full string
|
||||||
|
"(" # External part
|
||||||
|
r"(\[?(?P<host>[a-fA-F\d.:]+)\]?:)?" # Address
|
||||||
|
r"(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
|
||||||
|
")?"
|
||||||
|
r"(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
|
||||||
|
"(?P<proto>/(udp|tcp|sctp))?" # Protocol
|
||||||
|
"$" # Match full string
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def add_port_mapping(
|
||||||
|
port_bindings: dict[str, list[str | tuple[str, str | None] | None]],
|
||||||
|
internal_port: str,
|
||||||
|
external: str | tuple[str, str | None] | None,
|
||||||
|
) -> None:
|
||||||
|
if internal_port in port_bindings:
|
||||||
|
port_bindings[internal_port].append(external)
|
||||||
|
else:
|
||||||
|
port_bindings[internal_port] = [external]
|
||||||
|
|
||||||
|
|
||||||
|
def add_port(
|
||||||
|
port_bindings: dict[str, list[str | tuple[str, str | None] | None]],
|
||||||
|
internal_port_range: list[str],
|
||||||
|
external_range: list[str] | list[tuple[str, str | None]] | None,
|
||||||
|
) -> None:
|
||||||
|
if external_range is None:
|
||||||
|
for internal_port in internal_port_range:
|
||||||
|
add_port_mapping(port_bindings, internal_port, None)
|
||||||
|
else:
|
||||||
|
for internal_port, external_port in zip(internal_port_range, external_range):
|
||||||
|
# mypy loses the exact type of eternal_port elements for some reason...
|
||||||
|
add_port_mapping(port_bindings, internal_port, external_port) # type: ignore
|
||||||
|
|
||||||
|
|
||||||
|
def build_port_bindings(
|
||||||
|
ports: Collection[str],
|
||||||
|
) -> dict[str, list[str | tuple[str, str | None] | None]]:
|
||||||
|
port_bindings: dict[str, list[str | tuple[str, str | None] | None]] = {}
|
||||||
|
for port in ports:
|
||||||
|
internal_port_range, external_range = split_port(port)
|
||||||
|
add_port(port_bindings, internal_port_range, external_range)
|
||||||
|
return port_bindings
|
||||||
|
|
||||||
|
|
||||||
|
def _raise_invalid_port(port: str) -> t.NoReturn:
|
||||||
|
raise ValueError(
|
||||||
|
f'Invalid port "{port}", should be '
|
||||||
|
"[[remote_ip:]remote_port[-remote_port]:]"
|
||||||
|
"port[/protocol]"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@t.overload
|
||||||
|
def port_range(
|
||||||
|
start: str,
|
||||||
|
end: str | None,
|
||||||
|
proto: str,
|
||||||
|
randomly_available_port: bool = False,
|
||||||
|
) -> list[str]: ...
|
||||||
|
|
||||||
|
|
||||||
|
@t.overload
|
||||||
|
def port_range(
|
||||||
|
start: str | None,
|
||||||
|
end: str | None,
|
||||||
|
proto: str,
|
||||||
|
randomly_available_port: bool = False,
|
||||||
|
) -> list[str] | None: ...
|
||||||
|
|
||||||
|
|
||||||
|
def port_range(
|
||||||
|
start: str | None,
|
||||||
|
end: str | None,
|
||||||
|
proto: str,
|
||||||
|
randomly_available_port: bool = False,
|
||||||
|
) -> list[str] | None:
|
||||||
|
if start is None:
|
||||||
|
return start
|
||||||
|
if end is None:
|
||||||
|
return [f"{start}{proto}"]
|
||||||
|
if randomly_available_port:
|
||||||
|
return [f"{start}-{end}{proto}"]
|
||||||
|
return [f"{port}{proto}" for port in range(int(start), int(end) + 1)]
|
||||||
|
|
||||||
|
|
||||||
|
def split_port(
|
||||||
|
port: str | int,
|
||||||
|
) -> tuple[list[str], list[str] | list[tuple[str, str | None]] | None]:
|
||||||
|
port = str(port)
|
||||||
|
match = PORT_SPEC.match(port)
|
||||||
|
if match is None:
|
||||||
|
_raise_invalid_port(port)
|
||||||
|
parts = match.groupdict()
|
||||||
|
|
||||||
|
host: str | None = parts["host"]
|
||||||
|
proto: str = parts["proto"] or ""
|
||||||
|
int_p: str = parts["int"]
|
||||||
|
ext_p: str = parts["ext"]
|
||||||
|
internal: list[str] = port_range(int_p, parts["int_end"], proto) # type: ignore
|
||||||
|
external = port_range(ext_p or None, parts["ext_end"], "", len(internal) == 1)
|
||||||
|
|
||||||
|
if host is None:
|
||||||
|
if (external is not None and len(internal) != len(external)) or ext_p == "":
|
||||||
|
raise ValueError("Port ranges don't match in length")
|
||||||
|
return internal, external
|
||||||
|
external_or_none: Sequence[str | None]
|
||||||
|
if not external:
|
||||||
|
external_or_none = [None] * len(internal)
|
||||||
|
else:
|
||||||
|
external_or_none = external
|
||||||
|
if len(internal) != len(external_or_none):
|
||||||
|
raise ValueError("Port ranges don't match in length")
|
||||||
|
return internal, [(host, ext_port) for ext_port in external_or_none]
|
||||||
|
|
@ -0,0 +1,98 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
from .utils import format_environment
|
||||||
|
|
||||||
|
|
||||||
|
class ProxyConfig(dict):
|
||||||
|
"""
|
||||||
|
Hold the client's proxy configuration
|
||||||
|
"""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def http(self) -> str | None:
|
||||||
|
return self.get("http")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def https(self) -> str | None:
|
||||||
|
return self.get("https")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def ftp(self) -> str | None:
|
||||||
|
return self.get("ftp")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def no_proxy(self) -> str | None:
|
||||||
|
return self.get("no_proxy")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_dict(config: dict[str, str]) -> ProxyConfig:
|
||||||
|
"""
|
||||||
|
Instantiate a new ProxyConfig from a dictionary that represents a
|
||||||
|
client configuration, as described in `the documentation`_.
|
||||||
|
|
||||||
|
.. _the documentation:
|
||||||
|
https://docs.docker.com/network/proxy/#configure-the-docker-client
|
||||||
|
"""
|
||||||
|
return ProxyConfig(
|
||||||
|
http=config.get("httpProxy"),
|
||||||
|
https=config.get("httpsProxy"),
|
||||||
|
ftp=config.get("ftpProxy"),
|
||||||
|
no_proxy=config.get("noProxy"),
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_environment(self) -> dict[str, str]:
|
||||||
|
"""
|
||||||
|
Return a dictionary representing the environment variables used to
|
||||||
|
set the proxy settings.
|
||||||
|
"""
|
||||||
|
env = {}
|
||||||
|
if self.http:
|
||||||
|
env["http_proxy"] = env["HTTP_PROXY"] = self.http
|
||||||
|
if self.https:
|
||||||
|
env["https_proxy"] = env["HTTPS_PROXY"] = self.https
|
||||||
|
if self.ftp:
|
||||||
|
env["ftp_proxy"] = env["FTP_PROXY"] = self.ftp
|
||||||
|
if self.no_proxy:
|
||||||
|
env["no_proxy"] = env["NO_PROXY"] = self.no_proxy
|
||||||
|
return env
|
||||||
|
|
||||||
|
@t.overload
|
||||||
|
def inject_proxy_environment(self, environment: list[str]) -> list[str]: ...
|
||||||
|
|
||||||
|
@t.overload
|
||||||
|
def inject_proxy_environment(
|
||||||
|
self, environment: list[str] | None
|
||||||
|
) -> list[str] | None: ...
|
||||||
|
|
||||||
|
def inject_proxy_environment(
|
||||||
|
self, environment: list[str] | None
|
||||||
|
) -> list[str] | None:
|
||||||
|
"""
|
||||||
|
Given a list of strings representing environment variables, prepend the
|
||||||
|
environment variables corresponding to the proxy settings.
|
||||||
|
"""
|
||||||
|
if not self:
|
||||||
|
return environment
|
||||||
|
|
||||||
|
proxy_env = format_environment(self.get_environment())
|
||||||
|
if not environment:
|
||||||
|
return proxy_env
|
||||||
|
# It is important to prepend our variables, because we want the
|
||||||
|
# variables defined in "environment" to take precedence.
|
||||||
|
return proxy_env + environment
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
return f"ProxyConfig(http={self.http}, https={self.https}, ftp={self.ftp}, no_proxy={self.no_proxy})"
|
||||||
|
|
@ -0,0 +1,242 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import errno
|
||||||
|
import os
|
||||||
|
import select
|
||||||
|
import socket as pysocket
|
||||||
|
import struct
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
from ..transport.npipesocket import NpipeSocket
|
||||||
|
|
||||||
|
if t.TYPE_CHECKING:
|
||||||
|
from collections.abc import Sequence
|
||||||
|
|
||||||
|
from ..._socket_helper import SocketLike
|
||||||
|
|
||||||
|
|
||||||
|
STDOUT = 1
|
||||||
|
STDERR = 2
|
||||||
|
|
||||||
|
|
||||||
|
class SocketError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# NpipeSockets have their own error types
|
||||||
|
# pywintypes.error: (109, 'ReadFile', 'The pipe has been ended.')
|
||||||
|
NPIPE_ENDED = 109
|
||||||
|
|
||||||
|
|
||||||
|
def read(socket: SocketLike, n: int = 4096) -> bytes | None:
|
||||||
|
"""
|
||||||
|
Reads at most n bytes from socket
|
||||||
|
"""
|
||||||
|
|
||||||
|
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
|
||||||
|
|
||||||
|
if not isinstance(socket, NpipeSocket): # type: ignore[unreachable]
|
||||||
|
if not hasattr(select, "poll"):
|
||||||
|
# Limited to 1024
|
||||||
|
select.select([socket], [], [])
|
||||||
|
else:
|
||||||
|
poll = select.poll()
|
||||||
|
poll.register(socket, select.POLLIN | select.POLLPRI)
|
||||||
|
poll.poll()
|
||||||
|
|
||||||
|
try:
|
||||||
|
if hasattr(socket, "recv"):
|
||||||
|
return socket.recv(n)
|
||||||
|
if isinstance(socket, pysocket.SocketIO): # type: ignore
|
||||||
|
return socket.read(n) # type: ignore[unreachable]
|
||||||
|
return os.read(socket.fileno(), n)
|
||||||
|
except EnvironmentError as e:
|
||||||
|
if e.errno not in recoverable_errors:
|
||||||
|
raise
|
||||||
|
return None # TODO ???
|
||||||
|
except Exception as e:
|
||||||
|
is_pipe_ended = (
|
||||||
|
isinstance(socket, NpipeSocket) # type: ignore[unreachable]
|
||||||
|
and len(e.args) > 0
|
||||||
|
and e.args[0] == NPIPE_ENDED
|
||||||
|
)
|
||||||
|
if is_pipe_ended:
|
||||||
|
# npipes do not support duplex sockets, so we interpret
|
||||||
|
# a PIPE_ENDED error as a close operation (0-length read).
|
||||||
|
return b""
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def read_exactly(socket: SocketLike, n: int) -> bytes:
|
||||||
|
"""
|
||||||
|
Reads exactly n bytes from socket
|
||||||
|
Raises SocketError if there is not enough data
|
||||||
|
"""
|
||||||
|
data = b""
|
||||||
|
while len(data) < n:
|
||||||
|
next_data = read(socket, n - len(data))
|
||||||
|
if not next_data:
|
||||||
|
raise SocketError("Unexpected EOF")
|
||||||
|
data += next_data
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
def next_frame_header(socket: SocketLike) -> tuple[int, int]:
|
||||||
|
"""
|
||||||
|
Returns the stream and size of the next frame of data waiting to be read
|
||||||
|
from socket, according to the protocol defined here:
|
||||||
|
|
||||||
|
https://docs.docker.com/engine/api/v1.24/#attach-to-a-container
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
data = read_exactly(socket, 8)
|
||||||
|
except SocketError:
|
||||||
|
return (-1, -1)
|
||||||
|
|
||||||
|
stream, actual = struct.unpack(">BxxxL", data)
|
||||||
|
return (stream, actual)
|
||||||
|
|
||||||
|
|
||||||
|
def frames_iter(socket: SocketLike, tty: bool) -> t.Generator[tuple[int, bytes]]:
|
||||||
|
"""
|
||||||
|
Return a generator of frames read from socket. A frame is a tuple where
|
||||||
|
the first item is the stream number and the second item is a chunk of data.
|
||||||
|
|
||||||
|
If the tty setting is enabled, the streams are multiplexed into the stdout
|
||||||
|
stream.
|
||||||
|
"""
|
||||||
|
if tty:
|
||||||
|
return ((STDOUT, frame) for frame in frames_iter_tty(socket))
|
||||||
|
return frames_iter_no_tty(socket)
|
||||||
|
|
||||||
|
|
||||||
|
def frames_iter_no_tty(socket: SocketLike) -> t.Generator[tuple[int, bytes]]:
|
||||||
|
"""
|
||||||
|
Returns a generator of data read from the socket when the tty setting is
|
||||||
|
not enabled.
|
||||||
|
"""
|
||||||
|
while True:
|
||||||
|
(stream, n) = next_frame_header(socket)
|
||||||
|
if n < 0:
|
||||||
|
break
|
||||||
|
while n > 0:
|
||||||
|
result = read(socket, n)
|
||||||
|
if result is None:
|
||||||
|
continue
|
||||||
|
data_length = len(result)
|
||||||
|
if data_length == 0:
|
||||||
|
# We have reached EOF
|
||||||
|
return
|
||||||
|
n -= data_length
|
||||||
|
yield (stream, result)
|
||||||
|
|
||||||
|
|
||||||
|
def frames_iter_tty(socket: SocketLike) -> t.Generator[bytes]:
|
||||||
|
"""
|
||||||
|
Return a generator of data read from the socket when the tty setting is
|
||||||
|
enabled.
|
||||||
|
"""
|
||||||
|
while True:
|
||||||
|
result = read(socket)
|
||||||
|
if not result:
|
||||||
|
# We have reached EOF
|
||||||
|
return
|
||||||
|
yield result
|
||||||
|
|
||||||
|
|
||||||
|
@t.overload
|
||||||
|
def consume_socket_output(
|
||||||
|
frames: Sequence[bytes] | t.Generator[bytes], demux: t.Literal[False] = False
|
||||||
|
) -> bytes: ...
|
||||||
|
|
||||||
|
|
||||||
|
@t.overload
|
||||||
|
def consume_socket_output(
|
||||||
|
frames: (
|
||||||
|
Sequence[tuple[bytes | None, bytes | None]]
|
||||||
|
| t.Generator[tuple[bytes | None, bytes | None]]
|
||||||
|
),
|
||||||
|
demux: t.Literal[True],
|
||||||
|
) -> tuple[bytes, bytes]: ...
|
||||||
|
|
||||||
|
|
||||||
|
@t.overload
|
||||||
|
def consume_socket_output(
|
||||||
|
frames: (
|
||||||
|
Sequence[bytes]
|
||||||
|
| Sequence[tuple[bytes | None, bytes | None]]
|
||||||
|
| t.Generator[bytes]
|
||||||
|
| t.Generator[tuple[bytes | None, bytes | None]]
|
||||||
|
),
|
||||||
|
demux: bool = False,
|
||||||
|
) -> bytes | tuple[bytes, bytes]: ...
|
||||||
|
|
||||||
|
|
||||||
|
def consume_socket_output(
|
||||||
|
frames: (
|
||||||
|
Sequence[bytes]
|
||||||
|
| Sequence[tuple[bytes | None, bytes | None]]
|
||||||
|
| t.Generator[bytes]
|
||||||
|
| t.Generator[tuple[bytes | None, bytes | None]]
|
||||||
|
),
|
||||||
|
demux: bool = False,
|
||||||
|
) -> bytes | tuple[bytes, bytes]:
|
||||||
|
"""
|
||||||
|
Iterate through frames read from the socket and return the result.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
|
||||||
|
demux (bool):
|
||||||
|
If False, stdout and stderr are multiplexed, and the result is the
|
||||||
|
concatenation of all the frames. If True, the streams are
|
||||||
|
demultiplexed, and the result is a 2-tuple where each item is the
|
||||||
|
concatenation of frames belonging to the same stream.
|
||||||
|
"""
|
||||||
|
if demux is False:
|
||||||
|
# If the streams are multiplexed, the generator returns strings, that
|
||||||
|
# we just need to concatenate.
|
||||||
|
return b"".join(frames) # type: ignore
|
||||||
|
|
||||||
|
# If the streams are demultiplexed, the generator yields tuples
|
||||||
|
# (stdout, stderr)
|
||||||
|
out: list[bytes | None] = [None, None]
|
||||||
|
frame: tuple[bytes | None, bytes | None]
|
||||||
|
for frame in frames: # type: ignore
|
||||||
|
# It is guaranteed that for each frame, one and only one stream
|
||||||
|
# is not None.
|
||||||
|
if frame == (None, None):
|
||||||
|
raise AssertionError(f"frame must be (None, None), but got {frame}")
|
||||||
|
if frame[0] is not None:
|
||||||
|
if out[0] is None:
|
||||||
|
out[0] = frame[0]
|
||||||
|
else:
|
||||||
|
out[0] += frame[0]
|
||||||
|
else:
|
||||||
|
if out[1] is None:
|
||||||
|
out[1] = frame[1]
|
||||||
|
else:
|
||||||
|
out[1] += frame[1] # type: ignore[operator]
|
||||||
|
return tuple(out) # type: ignore
|
||||||
|
|
||||||
|
|
||||||
|
def demux_adaptor(stream_id: int, data: bytes) -> tuple[bytes | None, bytes | None]:
|
||||||
|
"""
|
||||||
|
Utility to demultiplex stdout and stderr when reading frames from the
|
||||||
|
socket.
|
||||||
|
"""
|
||||||
|
if stream_id == STDOUT:
|
||||||
|
return (data, None)
|
||||||
|
if stream_id == STDERR:
|
||||||
|
return (None, data)
|
||||||
|
raise ValueError(f"{stream_id} is not a valid stream")
|
||||||
|
|
@ -0,0 +1,519 @@
|
||||||
|
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||||
|
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016-2022 Docker, Inc.
|
||||||
|
#
|
||||||
|
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import collections
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import shlex
|
||||||
|
import string
|
||||||
|
import typing as t
|
||||||
|
from urllib.parse import urlparse, urlunparse
|
||||||
|
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._version import (
|
||||||
|
StrictVersion,
|
||||||
|
)
|
||||||
|
|
||||||
|
from .. import errors
|
||||||
|
from ..constants import (
|
||||||
|
BYTE_UNITS,
|
||||||
|
DEFAULT_HTTP_HOST,
|
||||||
|
DEFAULT_NPIPE,
|
||||||
|
DEFAULT_UNIX_SOCKET,
|
||||||
|
)
|
||||||
|
from ..tls import TLSConfig
|
||||||
|
|
||||||
|
if t.TYPE_CHECKING:
|
||||||
|
from collections.abc import Mapping, Sequence
|
||||||
|
|
||||||
|
|
||||||
|
URLComponents = collections.namedtuple(
|
||||||
|
"URLComponents",
|
||||||
|
"scheme netloc url params query fragment",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def decode_json_header(header: str | bytes) -> dict[str, t.Any]:
|
||||||
|
data = base64.b64decode(header).decode("utf-8")
|
||||||
|
return json.loads(data)
|
||||||
|
|
||||||
|
|
||||||
|
def compare_version(v1: str, v2: str) -> t.Literal[-1, 0, 1]:
|
||||||
|
"""Compare docker versions
|
||||||
|
|
||||||
|
>>> v1 = '1.9'
|
||||||
|
>>> v2 = '1.10'
|
||||||
|
>>> compare_version(v1, v2)
|
||||||
|
1
|
||||||
|
>>> compare_version(v2, v1)
|
||||||
|
-1
|
||||||
|
>>> compare_version(v2, v2)
|
||||||
|
0
|
||||||
|
"""
|
||||||
|
s1 = StrictVersion(v1)
|
||||||
|
s2 = StrictVersion(v2)
|
||||||
|
if s1 == s2:
|
||||||
|
return 0
|
||||||
|
if s1 > s2:
|
||||||
|
return -1
|
||||||
|
return 1
|
||||||
|
|
||||||
|
|
||||||
|
def version_lt(v1: str, v2: str) -> bool:
|
||||||
|
return compare_version(v1, v2) > 0
|
||||||
|
|
||||||
|
|
||||||
|
def version_gte(v1: str, v2: str) -> bool:
|
||||||
|
return not version_lt(v1, v2)
|
||||||
|
|
||||||
|
|
||||||
|
def _convert_port_binding(
|
||||||
|
binding: (
|
||||||
|
tuple[str, str | int | None]
|
||||||
|
| tuple[str | int | None]
|
||||||
|
| dict[str, str]
|
||||||
|
| str
|
||||||
|
| int
|
||||||
|
),
|
||||||
|
) -> dict[str, str]:
|
||||||
|
result = {"HostIp": "", "HostPort": ""}
|
||||||
|
host_port: str | int | None = ""
|
||||||
|
if isinstance(binding, tuple):
|
||||||
|
if len(binding) == 2:
|
||||||
|
host_port = binding[1] # type: ignore
|
||||||
|
result["HostIp"] = binding[0]
|
||||||
|
elif isinstance(binding[0], str):
|
||||||
|
result["HostIp"] = binding[0]
|
||||||
|
else:
|
||||||
|
host_port = binding[0]
|
||||||
|
elif isinstance(binding, dict):
|
||||||
|
if "HostPort" in binding:
|
||||||
|
host_port = binding["HostPort"]
|
||||||
|
if "HostIp" in binding:
|
||||||
|
result["HostIp"] = binding["HostIp"]
|
||||||
|
else:
|
||||||
|
raise ValueError(binding)
|
||||||
|
else:
|
||||||
|
host_port = binding
|
||||||
|
|
||||||
|
result["HostPort"] = str(host_port) if host_port is not None else ""
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def convert_port_bindings(
|
||||||
|
port_bindings: dict[
|
||||||
|
str | int,
|
||||||
|
tuple[str, str | int | None]
|
||||||
|
| tuple[str | int | None]
|
||||||
|
| dict[str, str]
|
||||||
|
| str
|
||||||
|
| int
|
||||||
|
| list[
|
||||||
|
tuple[str, str | int | None]
|
||||||
|
| tuple[str | int | None]
|
||||||
|
| dict[str, str]
|
||||||
|
| str
|
||||||
|
| int
|
||||||
|
],
|
||||||
|
],
|
||||||
|
) -> dict[str, list[dict[str, str]]]:
|
||||||
|
result = {}
|
||||||
|
for k, v in port_bindings.items():
|
||||||
|
key = str(k)
|
||||||
|
if "/" not in key:
|
||||||
|
key += "/tcp"
|
||||||
|
if isinstance(v, list):
|
||||||
|
result[key] = [_convert_port_binding(binding) for binding in v]
|
||||||
|
else:
|
||||||
|
result[key] = [_convert_port_binding(v)]
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def convert_volume_binds(
|
||||||
|
binds: (
|
||||||
|
list[str]
|
||||||
|
| Mapping[
|
||||||
|
str | bytes, dict[str, str | bytes] | dict[str, str] | bytes | str | int
|
||||||
|
]
|
||||||
|
),
|
||||||
|
) -> list[str]:
|
||||||
|
if isinstance(binds, list):
|
||||||
|
return binds # type: ignore
|
||||||
|
|
||||||
|
result = []
|
||||||
|
for k, v in binds.items():
|
||||||
|
if isinstance(k, bytes):
|
||||||
|
k = k.decode("utf-8")
|
||||||
|
|
||||||
|
if isinstance(v, dict):
|
||||||
|
if "ro" in v and "mode" in v:
|
||||||
|
raise ValueError(f'Binding cannot contain both "ro" and "mode": {v!r}')
|
||||||
|
|
||||||
|
bind = v["bind"]
|
||||||
|
if isinstance(bind, bytes):
|
||||||
|
bind = bind.decode("utf-8")
|
||||||
|
|
||||||
|
if "ro" in v:
|
||||||
|
mode = "ro" if v["ro"] else "rw"
|
||||||
|
elif "mode" in v:
|
||||||
|
mode = v["mode"] # type: ignore # TODO
|
||||||
|
else:
|
||||||
|
mode = "rw"
|
||||||
|
|
||||||
|
# NOTE: this is only relevant for Linux hosts
|
||||||
|
# (does not apply in Docker Desktop)
|
||||||
|
propagation_modes = [
|
||||||
|
"rshared",
|
||||||
|
"shared",
|
||||||
|
"rslave",
|
||||||
|
"slave",
|
||||||
|
"rprivate",
|
||||||
|
"private",
|
||||||
|
]
|
||||||
|
if "propagation" in v and v["propagation"] in propagation_modes:
|
||||||
|
if mode:
|
||||||
|
mode = ",".join([mode, v["propagation"]]) # type: ignore # TODO
|
||||||
|
else:
|
||||||
|
mode = v["propagation"] # type: ignore # TODO
|
||||||
|
|
||||||
|
result.append(f"{k}:{bind}:{mode}")
|
||||||
|
else:
|
||||||
|
if isinstance(v, bytes):
|
||||||
|
v = v.decode("utf-8")
|
||||||
|
result.append(f"{k}:{v}:rw")
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def convert_tmpfs_mounts(tmpfs: dict[str, str] | list[str]) -> dict[str, str]:
|
||||||
|
if isinstance(tmpfs, dict):
|
||||||
|
return tmpfs
|
||||||
|
|
||||||
|
if not isinstance(tmpfs, list):
|
||||||
|
raise ValueError(
|
||||||
|
f"Expected tmpfs value to be either a list or a dict, found: {type(tmpfs).__name__}"
|
||||||
|
)
|
||||||
|
|
||||||
|
result = {}
|
||||||
|
for mount in tmpfs:
|
||||||
|
if isinstance(mount, str):
|
||||||
|
if ":" in mount:
|
||||||
|
name, options = mount.split(":", 1)
|
||||||
|
else:
|
||||||
|
name = mount
|
||||||
|
options = ""
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
f"Expected item in tmpfs list to be a string, found: {type(mount).__name__}"
|
||||||
|
)
|
||||||
|
|
||||||
|
result[name] = options
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def convert_service_networks(
|
||||||
|
networks: list[str | dict[str, str]],
|
||||||
|
) -> list[dict[str, str]]:
|
||||||
|
if not networks:
|
||||||
|
return networks # type: ignore
|
||||||
|
if not isinstance(networks, list):
|
||||||
|
raise TypeError("networks parameter must be a list.")
|
||||||
|
|
||||||
|
result = []
|
||||||
|
for n in networks:
|
||||||
|
if isinstance(n, str):
|
||||||
|
n = {"Target": n}
|
||||||
|
result.append(n)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def parse_repository_tag(repo_name: str) -> tuple[str, str | None]:
|
||||||
|
parts = repo_name.rsplit("@", 1)
|
||||||
|
if len(parts) == 2:
|
||||||
|
return tuple(parts) # type: ignore
|
||||||
|
parts = repo_name.rsplit(":", 1)
|
||||||
|
if len(parts) == 2 and "/" not in parts[1]:
|
||||||
|
return tuple(parts) # type: ignore
|
||||||
|
return repo_name, None
|
||||||
|
|
||||||
|
|
||||||
|
def parse_host(addr: str | None, is_win32: bool = False, tls: bool = False) -> str:
|
||||||
|
# Sensible defaults
|
||||||
|
if not addr and is_win32:
|
||||||
|
return DEFAULT_NPIPE
|
||||||
|
if not addr or addr.strip() == "unix://":
|
||||||
|
return DEFAULT_UNIX_SOCKET
|
||||||
|
|
||||||
|
addr = addr.strip()
|
||||||
|
|
||||||
|
parsed_url = urlparse(addr)
|
||||||
|
proto = parsed_url.scheme
|
||||||
|
if not proto or any(x not in string.ascii_letters + "+" for x in proto):
|
||||||
|
# https://bugs.python.org/issue754016
|
||||||
|
parsed_url = urlparse("//" + addr, "tcp")
|
||||||
|
proto = "tcp"
|
||||||
|
|
||||||
|
if proto == "fd":
|
||||||
|
raise errors.DockerException("fd protocol is not implemented")
|
||||||
|
|
||||||
|
# These protos are valid aliases for our library but not for the
|
||||||
|
# official spec
|
||||||
|
if proto in ("http", "https"):
|
||||||
|
tls = proto == "https"
|
||||||
|
proto = "tcp"
|
||||||
|
elif proto == "http+unix":
|
||||||
|
proto = "unix"
|
||||||
|
|
||||||
|
if proto not in ("tcp", "unix", "npipe", "ssh"):
|
||||||
|
raise errors.DockerException(f"Invalid bind address protocol: {addr}")
|
||||||
|
|
||||||
|
if proto == "tcp" and not parsed_url.netloc:
|
||||||
|
# "tcp://" is exceptionally disallowed by convention;
|
||||||
|
# omitting a hostname for other protocols is fine
|
||||||
|
raise errors.DockerException(f"Invalid bind address format: {addr}")
|
||||||
|
|
||||||
|
if any(
|
||||||
|
[parsed_url.params, parsed_url.query, parsed_url.fragment, parsed_url.password]
|
||||||
|
):
|
||||||
|
raise errors.DockerException(f"Invalid bind address format: {addr}")
|
||||||
|
|
||||||
|
if parsed_url.path and proto == "ssh":
|
||||||
|
raise errors.DockerException(
|
||||||
|
f"Invalid bind address format: no path allowed for this protocol: {addr}"
|
||||||
|
)
|
||||||
|
path = parsed_url.path
|
||||||
|
if proto == "unix" and parsed_url.hostname is not None:
|
||||||
|
# For legacy reasons, we consider unix://path
|
||||||
|
# to be valid and equivalent to unix:///path
|
||||||
|
path = f"{parsed_url.hostname}/{path}"
|
||||||
|
|
||||||
|
netloc = parsed_url.netloc
|
||||||
|
if proto in ("tcp", "ssh"):
|
||||||
|
port = parsed_url.port or 0
|
||||||
|
if port <= 0:
|
||||||
|
port = 22 if proto == "ssh" else (2375 if tls else 2376)
|
||||||
|
netloc = f"{parsed_url.netloc}:{port}"
|
||||||
|
|
||||||
|
if not parsed_url.hostname:
|
||||||
|
netloc = f"{DEFAULT_HTTP_HOST}:{port}"
|
||||||
|
|
||||||
|
# Rewrite schemes to fit library internals (requests adapters)
|
||||||
|
if proto == "tcp":
|
||||||
|
proto = f"http{'s' if tls else ''}"
|
||||||
|
elif proto == "unix":
|
||||||
|
proto = "http+unix"
|
||||||
|
|
||||||
|
if proto in ("http+unix", "npipe"):
|
||||||
|
return f"{proto}://{path}".rstrip("/")
|
||||||
|
return urlunparse(
|
||||||
|
URLComponents(
|
||||||
|
scheme=proto,
|
||||||
|
netloc=netloc,
|
||||||
|
url=path,
|
||||||
|
params="",
|
||||||
|
query="",
|
||||||
|
fragment="",
|
||||||
|
)
|
||||||
|
).rstrip("/")
|
||||||
|
|
||||||
|
|
||||||
|
def parse_devices(devices: Sequence[dict[str, str] | str]) -> list[dict[str, str]]:
|
||||||
|
device_list = []
|
||||||
|
for device in devices:
|
||||||
|
if isinstance(device, dict):
|
||||||
|
device_list.append(device)
|
||||||
|
continue
|
||||||
|
if not isinstance(device, str):
|
||||||
|
raise errors.DockerException(f"Invalid device type {type(device)}")
|
||||||
|
device_mapping = device.split(":")
|
||||||
|
if device_mapping:
|
||||||
|
path_on_host = device_mapping[0]
|
||||||
|
if len(device_mapping) > 1:
|
||||||
|
path_in_container = device_mapping[1]
|
||||||
|
else:
|
||||||
|
path_in_container = path_on_host
|
||||||
|
if len(device_mapping) > 2:
|
||||||
|
permissions = device_mapping[2]
|
||||||
|
else:
|
||||||
|
permissions = "rwm"
|
||||||
|
device_list.append(
|
||||||
|
{
|
||||||
|
"PathOnHost": path_on_host,
|
||||||
|
"PathInContainer": path_in_container,
|
||||||
|
"CgroupPermissions": permissions,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
return device_list
|
||||||
|
|
||||||
|
|
||||||
|
def kwargs_from_env(
|
||||||
|
assert_hostname: bool | None = None,
|
||||||
|
environment: Mapping[str, str] | None = None,
|
||||||
|
) -> dict[str, t.Any]:
|
||||||
|
if not environment:
|
||||||
|
environment = os.environ
|
||||||
|
host = environment.get("DOCKER_HOST")
|
||||||
|
|
||||||
|
# empty string for cert path is the same as unset.
|
||||||
|
cert_path = environment.get("DOCKER_CERT_PATH") or None
|
||||||
|
|
||||||
|
# empty string for tls verify counts as "false".
|
||||||
|
# Any value or 'unset' counts as true.
|
||||||
|
tls_verify_str = environment.get("DOCKER_TLS_VERIFY")
|
||||||
|
if tls_verify_str == "":
|
||||||
|
tls_verify = False
|
||||||
|
else:
|
||||||
|
tls_verify = tls_verify_str is not None
|
||||||
|
enable_tls = cert_path or tls_verify
|
||||||
|
|
||||||
|
params: dict[str, t.Any] = {}
|
||||||
|
|
||||||
|
if host:
|
||||||
|
params["base_url"] = host
|
||||||
|
|
||||||
|
if not enable_tls:
|
||||||
|
return params
|
||||||
|
|
||||||
|
if not cert_path:
|
||||||
|
cert_path = os.path.join(os.path.expanduser("~"), ".docker")
|
||||||
|
|
||||||
|
if not tls_verify and assert_hostname is None:
|
||||||
|
# assert_hostname is a subset of TLS verification,
|
||||||
|
# so if it is not set already then set it to false.
|
||||||
|
assert_hostname = False
|
||||||
|
|
||||||
|
params["tls"] = TLSConfig(
|
||||||
|
client_cert=(
|
||||||
|
os.path.join(cert_path, "cert.pem"),
|
||||||
|
os.path.join(cert_path, "key.pem"),
|
||||||
|
),
|
||||||
|
ca_cert=os.path.join(cert_path, "ca.pem"),
|
||||||
|
verify=tls_verify,
|
||||||
|
assert_hostname=assert_hostname,
|
||||||
|
)
|
||||||
|
|
||||||
|
return params
|
||||||
|
|
||||||
|
|
||||||
|
def convert_filters(
|
||||||
|
filters: Mapping[str, bool | str | int | list[int] | list[str] | list[str | int]],
|
||||||
|
) -> str:
|
||||||
|
result = {}
|
||||||
|
for k, v in filters.items():
|
||||||
|
if isinstance(v, bool):
|
||||||
|
v = "true" if v else "false"
|
||||||
|
if not isinstance(v, list):
|
||||||
|
v = [
|
||||||
|
v,
|
||||||
|
]
|
||||||
|
result[k] = [str(item) if not isinstance(item, str) else item for item in v]
|
||||||
|
return json.dumps(result)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_bytes(s: int | float | str) -> int | float:
|
||||||
|
if isinstance(s, (int, float)):
|
||||||
|
return s
|
||||||
|
if len(s) == 0:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if s[-2:-1].isalpha() and s[-1].isalpha() and (s[-1] == "b" or s[-1] == "B"):
|
||||||
|
s = s[:-1]
|
||||||
|
units = BYTE_UNITS
|
||||||
|
suffix = s[-1].lower()
|
||||||
|
|
||||||
|
# Check if the variable is a string representation of an int
|
||||||
|
# without a units part. Assuming that the units are bytes.
|
||||||
|
if suffix.isdigit():
|
||||||
|
digits_part = s
|
||||||
|
suffix = "b"
|
||||||
|
else:
|
||||||
|
digits_part = s[:-1]
|
||||||
|
|
||||||
|
if suffix in units or suffix.isdigit():
|
||||||
|
try:
|
||||||
|
digits = float(digits_part)
|
||||||
|
except ValueError as exc:
|
||||||
|
raise errors.DockerException(
|
||||||
|
f"Failed converting the string value for memory ({digits_part}) to an integer."
|
||||||
|
) from exc
|
||||||
|
|
||||||
|
# Reconvert to long for the final result
|
||||||
|
s = int(digits * units[suffix])
|
||||||
|
else:
|
||||||
|
raise errors.DockerException(
|
||||||
|
f"The specified value for memory ({s}) should specify the units. The postfix should be one of the `b` `k` `m` `g` characters"
|
||||||
|
)
|
||||||
|
|
||||||
|
return s
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_links(links: dict[str, str] | Sequence[tuple[str, str]]) -> list[str]:
|
||||||
|
if isinstance(links, dict):
|
||||||
|
sorted_links = sorted(links.items())
|
||||||
|
else:
|
||||||
|
sorted_links = sorted(links)
|
||||||
|
|
||||||
|
return [f"{k}:{v}" if v else k for k, v in sorted_links]
|
||||||
|
|
||||||
|
|
||||||
|
def parse_env_file(env_file: str | os.PathLike) -> dict[str, str]:
|
||||||
|
"""
|
||||||
|
Reads a line-separated environment file.
|
||||||
|
The format of each line should be "key=value".
|
||||||
|
"""
|
||||||
|
environment = {}
|
||||||
|
|
||||||
|
with open(env_file, "rt", encoding="utf-8") as f:
|
||||||
|
for line in f:
|
||||||
|
if line[0] == "#":
|
||||||
|
continue
|
||||||
|
|
||||||
|
line = line.strip()
|
||||||
|
if not line:
|
||||||
|
continue
|
||||||
|
|
||||||
|
parse_line = line.split("=", 1)
|
||||||
|
if len(parse_line) == 2:
|
||||||
|
k, v = parse_line
|
||||||
|
environment[k] = v
|
||||||
|
else:
|
||||||
|
raise errors.DockerException(
|
||||||
|
f"Invalid line in environment file {env_file}:\n{line}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return environment
|
||||||
|
|
||||||
|
|
||||||
|
def split_command(command: str) -> list[str]:
|
||||||
|
return shlex.split(command)
|
||||||
|
|
||||||
|
|
||||||
|
def format_environment(environment: Mapping[str, str | bytes | None]) -> list[str]:
|
||||||
|
def format_env(key: str, value: str | bytes | None) -> str:
|
||||||
|
if value is None:
|
||||||
|
return key
|
||||||
|
if isinstance(value, bytes):
|
||||||
|
value = value.decode("utf-8")
|
||||||
|
|
||||||
|
return f"{key}={value}"
|
||||||
|
|
||||||
|
return [format_env(*var) for var in environment.items()]
|
||||||
|
|
||||||
|
|
||||||
|
def format_extra_hosts(extra_hosts: Mapping[str, str], task: bool = False) -> list[str]:
|
||||||
|
# Use format dictated by Swarm API if container is part of a task
|
||||||
|
if task:
|
||||||
|
return [f"{v} {k}" for k, v in sorted(extra_hosts.items())]
|
||||||
|
|
||||||
|
return [f"{k}:{v}" for k, v in sorted(extra_hosts.items())]
|
||||||
|
|
@ -0,0 +1,555 @@
|
||||||
|
# Copyright 2016 Red Hat | Ansible
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import abc
|
||||||
|
import os
|
||||||
|
import platform
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import traceback
|
||||||
|
import typing as t
|
||||||
|
from collections.abc import Mapping, Sequence
|
||||||
|
|
||||||
|
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||||
|
from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE
|
||||||
|
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||||
|
DEFAULT_DOCKER_HOST,
|
||||||
|
DEFAULT_TIMEOUT_SECONDS,
|
||||||
|
DEFAULT_TLS,
|
||||||
|
DEFAULT_TLS_VERIFY,
|
||||||
|
DOCKER_COMMON_ARGS,
|
||||||
|
DOCKER_MUTUALLY_EXCLUSIVE,
|
||||||
|
DOCKER_REQUIRED_TOGETHER,
|
||||||
|
sanitize_result,
|
||||||
|
update_tls_hostname,
|
||||||
|
)
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._version import (
|
||||||
|
LooseVersion,
|
||||||
|
)
|
||||||
|
|
||||||
|
HAS_DOCKER_PY_2 = False # pylint: disable=invalid-name
|
||||||
|
HAS_DOCKER_PY_3 = False # pylint: disable=invalid-name
|
||||||
|
HAS_DOCKER_ERROR: None | str # pylint: disable=invalid-name
|
||||||
|
HAS_DOCKER_TRACEBACK: None | str # pylint: disable=invalid-name
|
||||||
|
docker_version: str | None # pylint: disable=invalid-name
|
||||||
|
|
||||||
|
try:
|
||||||
|
from docker import __version__ as docker_version
|
||||||
|
from docker.errors import APIError, TLSParameterError
|
||||||
|
from docker.tls import TLSConfig
|
||||||
|
|
||||||
|
if LooseVersion(docker_version) >= LooseVersion("3.0.0"):
|
||||||
|
HAS_DOCKER_PY_3 = True # pylint: disable=invalid-name
|
||||||
|
from docker import APIClient as Client
|
||||||
|
elif LooseVersion(docker_version) >= LooseVersion("2.0.0"):
|
||||||
|
HAS_DOCKER_PY_2 = True # pylint: disable=invalid-name
|
||||||
|
from docker import APIClient as Client
|
||||||
|
else:
|
||||||
|
from docker import Client # type: ignore
|
||||||
|
|
||||||
|
except ImportError as exc:
|
||||||
|
HAS_DOCKER_ERROR = str(exc) # pylint: disable=invalid-name
|
||||||
|
HAS_DOCKER_TRACEBACK = traceback.format_exc() # pylint: disable=invalid-name
|
||||||
|
HAS_DOCKER_PY = False # pylint: disable=invalid-name
|
||||||
|
docker_version = None # pylint: disable=invalid-name
|
||||||
|
else:
|
||||||
|
HAS_DOCKER_PY = True # pylint: disable=invalid-name
|
||||||
|
HAS_DOCKER_ERROR = None # pylint: disable=invalid-name
|
||||||
|
HAS_DOCKER_TRACEBACK = None # pylint: disable=invalid-name
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
from requests.exceptions import ( # noqa: F401, pylint: disable=unused-import
|
||||||
|
RequestException,
|
||||||
|
)
|
||||||
|
except ImportError:
|
||||||
|
# Either Docker SDK for Python is no longer using requests, or Docker SDK for Python is not around either,
|
||||||
|
# or Docker SDK for Python's dependency requests is missing. In any case, define an exception
|
||||||
|
# class RequestException so that our code does not break.
|
||||||
|
class RequestException(Exception): # type: ignore
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
if t.TYPE_CHECKING:
|
||||||
|
from collections.abc import Callable
|
||||||
|
|
||||||
|
|
||||||
|
MIN_DOCKER_VERSION = "2.0.0"
|
||||||
|
|
||||||
|
|
||||||
|
if not HAS_DOCKER_PY:
|
||||||
|
# No Docker SDK for Python. Create a place holder client to allow
|
||||||
|
# instantiation of AnsibleModule and proper error handing
|
||||||
|
class Client: # type: ignore # noqa: F811, pylint: disable=function-redefined
|
||||||
|
def __init__(self, **kwargs: t.Any) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
class APIError(Exception): # type: ignore # noqa: F811, pylint: disable=function-redefined
|
||||||
|
pass
|
||||||
|
|
||||||
|
class NotFound(Exception): # type: ignore # noqa: F811, pylint: disable=function-redefined
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def _get_tls_config(
|
||||||
|
fail_function: Callable[[str], t.NoReturn], **kwargs: t.Any
|
||||||
|
) -> TLSConfig:
|
||||||
|
if "assert_hostname" in kwargs and LooseVersion(docker_version) >= LooseVersion(
|
||||||
|
"7.0.0b1"
|
||||||
|
):
|
||||||
|
assert_hostname = kwargs.pop("assert_hostname")
|
||||||
|
if assert_hostname is not None:
|
||||||
|
fail_function(
|
||||||
|
"tls_hostname is not compatible with Docker SDK for Python 7.0.0+. You are using"
|
||||||
|
f" Docker SDK for Python {docker_version}. The tls_hostname option (value: {assert_hostname})"
|
||||||
|
" has either been set directly or with the environment variable DOCKER_TLS_HOSTNAME."
|
||||||
|
" Make sure it is not set, or switch to an older version of Docker SDK for Python."
|
||||||
|
)
|
||||||
|
# Filter out all None parameters
|
||||||
|
kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
|
||||||
|
try:
|
||||||
|
return TLSConfig(**kwargs)
|
||||||
|
except TLSParameterError as exc:
|
||||||
|
fail_function(f"TLS config error: {exc}")
|
||||||
|
|
||||||
|
|
||||||
|
def is_using_tls(auth_data: dict[str, t.Any]) -> bool:
|
||||||
|
return auth_data["tls_verify"] or auth_data["tls"]
|
||||||
|
|
||||||
|
|
||||||
|
def get_connect_params(
|
||||||
|
auth_data: dict[str, t.Any], fail_function: Callable[[str], t.NoReturn]
|
||||||
|
) -> dict[str, t.Any]:
|
||||||
|
if is_using_tls(auth_data):
|
||||||
|
auth_data["docker_host"] = auth_data["docker_host"].replace(
|
||||||
|
"tcp://", "https://"
|
||||||
|
)
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"base_url": auth_data["docker_host"],
|
||||||
|
"version": auth_data["api_version"],
|
||||||
|
"timeout": auth_data["timeout"],
|
||||||
|
}
|
||||||
|
|
||||||
|
if auth_data["tls_verify"]:
|
||||||
|
# TLS with verification
|
||||||
|
tls_config: dict[str, t.Any] = {
|
||||||
|
"verify": True,
|
||||||
|
}
|
||||||
|
if auth_data["tls_hostname"] is not None:
|
||||||
|
tls_config["assert_hostname"] = auth_data["tls_hostname"]
|
||||||
|
if auth_data["cert_path"] and auth_data["key_path"]:
|
||||||
|
tls_config["client_cert"] = (auth_data["cert_path"], auth_data["key_path"])
|
||||||
|
if auth_data["cacert_path"]:
|
||||||
|
tls_config["ca_cert"] = auth_data["cacert_path"]
|
||||||
|
result["tls"] = _get_tls_config(fail_function=fail_function, **tls_config)
|
||||||
|
elif auth_data["tls"]:
|
||||||
|
# TLS without verification
|
||||||
|
tls_config = {
|
||||||
|
"verify": False,
|
||||||
|
}
|
||||||
|
if auth_data["cert_path"] and auth_data["key_path"]:
|
||||||
|
tls_config["client_cert"] = (auth_data["cert_path"], auth_data["key_path"])
|
||||||
|
result["tls"] = _get_tls_config(fail_function=fail_function, **tls_config)
|
||||||
|
|
||||||
|
if auth_data.get("use_ssh_client"):
|
||||||
|
if LooseVersion(docker_version) < LooseVersion("4.4.0"):
|
||||||
|
fail_function(
|
||||||
|
"use_ssh_client=True requires Docker SDK for Python 4.4.0 or newer"
|
||||||
|
)
|
||||||
|
result["use_ssh_client"] = True
|
||||||
|
|
||||||
|
# No TLS
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
DOCKERPYUPGRADE_SWITCH_TO_DOCKER = (
|
||||||
|
"Try `pip uninstall docker-py` followed by `pip install docker`."
|
||||||
|
)
|
||||||
|
DOCKERPYUPGRADE_UPGRADE_DOCKER = "Use `pip install --upgrade docker` to upgrade."
|
||||||
|
|
||||||
|
|
||||||
|
class AnsibleDockerClientBase(Client):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
min_docker_version: str | None = None,
|
||||||
|
min_docker_api_version: str | None = None,
|
||||||
|
) -> None:
|
||||||
|
if min_docker_version is None:
|
||||||
|
min_docker_version = MIN_DOCKER_VERSION
|
||||||
|
|
||||||
|
self.docker_py_version = LooseVersion(docker_version)
|
||||||
|
|
||||||
|
if not HAS_DOCKER_PY:
|
||||||
|
msg = missing_required_lib("Docker SDK for Python: docker>=5.0.0")
|
||||||
|
msg = f"{msg}, for example via `pip install docker`. The error was: {HAS_DOCKER_ERROR}"
|
||||||
|
self.fail(msg, exception=HAS_DOCKER_TRACEBACK)
|
||||||
|
|
||||||
|
if self.docker_py_version < LooseVersion(min_docker_version):
|
||||||
|
msg = (
|
||||||
|
f"Error: Docker SDK for Python version is {docker_version} ({platform.node()}'s Python {sys.executable})."
|
||||||
|
f" Minimum version required is {min_docker_version}."
|
||||||
|
)
|
||||||
|
if docker_version < LooseVersion("2.0"):
|
||||||
|
msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
|
||||||
|
else:
|
||||||
|
msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
|
||||||
|
self.fail(msg)
|
||||||
|
|
||||||
|
self._connect_params = get_connect_params(
|
||||||
|
self.auth_params, fail_function=self.fail
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
super().__init__(**self._connect_params)
|
||||||
|
self.docker_api_version_str = self.api_version
|
||||||
|
except APIError as exc:
|
||||||
|
self.fail(f"Docker API error: {exc}")
|
||||||
|
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||||
|
self.fail(f"Error connecting: {exc}")
|
||||||
|
|
||||||
|
self.docker_api_version = LooseVersion(self.docker_api_version_str)
|
||||||
|
min_docker_api_version = min_docker_api_version or "1.25"
|
||||||
|
if self.docker_api_version < LooseVersion(min_docker_api_version):
|
||||||
|
self.fail(
|
||||||
|
f"Docker API version is {self.docker_api_version_str}. Minimum version required is {min_docker_api_version}."
|
||||||
|
)
|
||||||
|
|
||||||
|
def log(self, msg: t.Any, pretty_print: bool = False) -> None:
|
||||||
|
pass
|
||||||
|
# if self.debug:
|
||||||
|
# from .util import log_debug
|
||||||
|
# log_debug(msg, pretty_print=pretty_print)
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def deprecate(
|
||||||
|
self,
|
||||||
|
msg: str,
|
||||||
|
version: str | None = None,
|
||||||
|
date: str | None = None,
|
||||||
|
collection_name: str | None = None,
|
||||||
|
) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_value(
|
||||||
|
param_name: str,
|
||||||
|
param_value: t.Any,
|
||||||
|
env_variable: str | None,
|
||||||
|
default_value: t.Any | None,
|
||||||
|
value_type: t.Literal["str", "bool", "int"] = "str",
|
||||||
|
) -> t.Any:
|
||||||
|
if param_value is not None:
|
||||||
|
# take module parameter value
|
||||||
|
if value_type == "bool":
|
||||||
|
if param_value in BOOLEANS_TRUE:
|
||||||
|
return True
|
||||||
|
if param_value in BOOLEANS_FALSE:
|
||||||
|
return False
|
||||||
|
return bool(param_value)
|
||||||
|
if value_type == "int":
|
||||||
|
return int(param_value)
|
||||||
|
return param_value
|
||||||
|
|
||||||
|
if env_variable is not None:
|
||||||
|
env_value = os.environ.get(env_variable)
|
||||||
|
if env_value is not None:
|
||||||
|
# take the env variable value
|
||||||
|
if param_name == "cert_path":
|
||||||
|
return os.path.join(env_value, "cert.pem")
|
||||||
|
if param_name == "cacert_path":
|
||||||
|
return os.path.join(env_value, "ca.pem")
|
||||||
|
if param_name == "key_path":
|
||||||
|
return os.path.join(env_value, "key.pem")
|
||||||
|
if value_type == "bool":
|
||||||
|
if env_value in BOOLEANS_TRUE:
|
||||||
|
return True
|
||||||
|
if env_value in BOOLEANS_FALSE:
|
||||||
|
return False
|
||||||
|
return bool(env_value)
|
||||||
|
if value_type == "int":
|
||||||
|
return int(env_value)
|
||||||
|
return env_value
|
||||||
|
|
||||||
|
# take the default
|
||||||
|
return default_value
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def _get_params(self) -> dict[str, t.Any]:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@property
|
||||||
|
def auth_params(self) -> dict[str, t.Any]:
|
||||||
|
# Get authentication credentials.
|
||||||
|
# Precedence: module parameters-> environment variables-> defaults.
|
||||||
|
|
||||||
|
self.log("Getting credentials")
|
||||||
|
|
||||||
|
client_params = self._get_params()
|
||||||
|
|
||||||
|
params = {}
|
||||||
|
for key in DOCKER_COMMON_ARGS:
|
||||||
|
params[key] = client_params.get(key)
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"docker_host": self._get_value(
|
||||||
|
"docker_host",
|
||||||
|
params["docker_host"],
|
||||||
|
"DOCKER_HOST",
|
||||||
|
DEFAULT_DOCKER_HOST,
|
||||||
|
value_type="str",
|
||||||
|
),
|
||||||
|
"tls_hostname": self._get_value(
|
||||||
|
"tls_hostname",
|
||||||
|
params["tls_hostname"],
|
||||||
|
"DOCKER_TLS_HOSTNAME",
|
||||||
|
None,
|
||||||
|
value_type="str",
|
||||||
|
),
|
||||||
|
"api_version": self._get_value(
|
||||||
|
"api_version",
|
||||||
|
params["api_version"],
|
||||||
|
"DOCKER_API_VERSION",
|
||||||
|
"auto",
|
||||||
|
value_type="str",
|
||||||
|
),
|
||||||
|
"cacert_path": self._get_value(
|
||||||
|
"cacert_path",
|
||||||
|
params["ca_path"],
|
||||||
|
"DOCKER_CERT_PATH",
|
||||||
|
None,
|
||||||
|
value_type="str",
|
||||||
|
),
|
||||||
|
"cert_path": self._get_value(
|
||||||
|
"cert_path",
|
||||||
|
params["client_cert"],
|
||||||
|
"DOCKER_CERT_PATH",
|
||||||
|
None,
|
||||||
|
value_type="str",
|
||||||
|
),
|
||||||
|
"key_path": self._get_value(
|
||||||
|
"key_path",
|
||||||
|
params["client_key"],
|
||||||
|
"DOCKER_CERT_PATH",
|
||||||
|
None,
|
||||||
|
value_type="str",
|
||||||
|
),
|
||||||
|
"tls": self._get_value(
|
||||||
|
"tls", params["tls"], "DOCKER_TLS", DEFAULT_TLS, value_type="bool"
|
||||||
|
),
|
||||||
|
"tls_verify": self._get_value(
|
||||||
|
"validate_certs",
|
||||||
|
params["validate_certs"],
|
||||||
|
"DOCKER_TLS_VERIFY",
|
||||||
|
DEFAULT_TLS_VERIFY,
|
||||||
|
value_type="bool",
|
||||||
|
),
|
||||||
|
"timeout": self._get_value(
|
||||||
|
"timeout",
|
||||||
|
params["timeout"],
|
||||||
|
"DOCKER_TIMEOUT",
|
||||||
|
DEFAULT_TIMEOUT_SECONDS,
|
||||||
|
value_type="int",
|
||||||
|
),
|
||||||
|
"use_ssh_client": self._get_value(
|
||||||
|
"use_ssh_client",
|
||||||
|
params["use_ssh_client"],
|
||||||
|
None,
|
||||||
|
False,
|
||||||
|
value_type="bool",
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
if LooseVersion(docker_version) < LooseVersion("7.0.0b1"):
|
||||||
|
update_tls_hostname(result)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _handle_ssl_error(self, error: Exception) -> t.NoReturn:
|
||||||
|
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
|
||||||
|
if match:
|
||||||
|
hostname = self.auth_params["tls_hostname"]
|
||||||
|
self.fail(
|
||||||
|
f"You asked for verification that Docker daemons certificate's hostname matches {hostname}. "
|
||||||
|
f"The actual certificate's hostname is {match.group(1)}. Most likely you need to set DOCKER_TLS_HOSTNAME "
|
||||||
|
f"or pass `tls_hostname` with a value of {match.group(1)}. You may also use TLS without verification by "
|
||||||
|
"setting the `tls` parameter to true."
|
||||||
|
)
|
||||||
|
self.fail(f"SSL Exception: {error}")
|
||||||
|
|
||||||
|
|
||||||
|
class AnsibleDockerClient(AnsibleDockerClientBase):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
argument_spec: dict[str, t.Any] | None = None,
|
||||||
|
supports_check_mode: bool = False,
|
||||||
|
mutually_exclusive: Sequence[Sequence[str]] | None = None,
|
||||||
|
required_together: Sequence[Sequence[str]] | None = None,
|
||||||
|
required_if: (
|
||||||
|
Sequence[
|
||||||
|
tuple[str, t.Any, Sequence[str]]
|
||||||
|
| tuple[str, t.Any, Sequence[str], bool]
|
||||||
|
]
|
||||||
|
| None
|
||||||
|
) = None,
|
||||||
|
required_one_of: Sequence[Sequence[str]] | None = None,
|
||||||
|
required_by: dict[str, Sequence[str]] | None = None,
|
||||||
|
min_docker_version: str | None = None,
|
||||||
|
min_docker_api_version: str | None = None,
|
||||||
|
option_minimal_versions: dict[str, t.Any] | None = None,
|
||||||
|
option_minimal_versions_ignore_params: Sequence[str] | None = None,
|
||||||
|
fail_results: dict[str, t.Any] | None = None,
|
||||||
|
):
|
||||||
|
# Modules can put information in here which will always be returned
|
||||||
|
# in case client.fail() is called.
|
||||||
|
self.fail_results = fail_results or {}
|
||||||
|
|
||||||
|
merged_arg_spec = {}
|
||||||
|
merged_arg_spec.update(DOCKER_COMMON_ARGS)
|
||||||
|
if argument_spec:
|
||||||
|
merged_arg_spec.update(argument_spec)
|
||||||
|
self.arg_spec = merged_arg_spec
|
||||||
|
|
||||||
|
mutually_exclusive_params: list[Sequence[str]] = []
|
||||||
|
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
|
||||||
|
if mutually_exclusive:
|
||||||
|
mutually_exclusive_params += mutually_exclusive
|
||||||
|
|
||||||
|
required_together_params: list[Sequence[str]] = []
|
||||||
|
required_together_params += DOCKER_REQUIRED_TOGETHER
|
||||||
|
if required_together:
|
||||||
|
required_together_params += required_together
|
||||||
|
|
||||||
|
self.module = AnsibleModule(
|
||||||
|
argument_spec=merged_arg_spec,
|
||||||
|
supports_check_mode=supports_check_mode,
|
||||||
|
mutually_exclusive=mutually_exclusive_params,
|
||||||
|
required_together=required_together_params,
|
||||||
|
required_if=required_if,
|
||||||
|
required_one_of=required_one_of,
|
||||||
|
required_by=required_by or {},
|
||||||
|
)
|
||||||
|
|
||||||
|
self.debug = self.module.params.get("debug")
|
||||||
|
self.check_mode = self.module.check_mode
|
||||||
|
|
||||||
|
super().__init__(
|
||||||
|
min_docker_version=min_docker_version,
|
||||||
|
min_docker_api_version=min_docker_api_version,
|
||||||
|
)
|
||||||
|
|
||||||
|
if option_minimal_versions is not None:
|
||||||
|
self._get_minimal_versions(
|
||||||
|
option_minimal_versions, option_minimal_versions_ignore_params
|
||||||
|
)
|
||||||
|
|
||||||
|
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
|
||||||
|
self.fail_results.update(kwargs)
|
||||||
|
self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
|
||||||
|
|
||||||
|
def deprecate(
|
||||||
|
self,
|
||||||
|
msg: str,
|
||||||
|
version: str | None = None,
|
||||||
|
date: str | None = None,
|
||||||
|
collection_name: str | None = None,
|
||||||
|
) -> None:
|
||||||
|
self.module.deprecate(
|
||||||
|
msg, version=version, date=date, collection_name=collection_name
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_params(self) -> dict[str, t.Any]:
|
||||||
|
return self.module.params
|
||||||
|
|
||||||
|
def _get_minimal_versions(
|
||||||
|
self,
|
||||||
|
option_minimal_versions: dict[str, t.Any],
|
||||||
|
ignore_params: Sequence[str] | None = None,
|
||||||
|
) -> None:
|
||||||
|
self.option_minimal_versions: dict[str, dict[str, t.Any]] = {}
|
||||||
|
for option in self.module.argument_spec:
|
||||||
|
if ignore_params is not None and option in ignore_params:
|
||||||
|
continue
|
||||||
|
self.option_minimal_versions[option] = {}
|
||||||
|
self.option_minimal_versions.update(option_minimal_versions)
|
||||||
|
|
||||||
|
for option, data in self.option_minimal_versions.items():
|
||||||
|
# Test whether option is supported, and store result
|
||||||
|
support_docker_py = True
|
||||||
|
support_docker_api = True
|
||||||
|
if "docker_py_version" in data:
|
||||||
|
support_docker_py = self.docker_py_version >= LooseVersion(
|
||||||
|
data["docker_py_version"]
|
||||||
|
)
|
||||||
|
if "docker_api_version" in data:
|
||||||
|
support_docker_api = self.docker_api_version >= LooseVersion(
|
||||||
|
data["docker_api_version"]
|
||||||
|
)
|
||||||
|
data["supported"] = support_docker_py and support_docker_api
|
||||||
|
# Fail if option is not supported but used
|
||||||
|
if not data["supported"]:
|
||||||
|
# Test whether option is specified
|
||||||
|
if "detect_usage" in data:
|
||||||
|
used = data["detect_usage"](self)
|
||||||
|
else:
|
||||||
|
used = self.module.params.get(option) is not None
|
||||||
|
if used and "default" in self.module.argument_spec[option]:
|
||||||
|
used = (
|
||||||
|
self.module.params[option]
|
||||||
|
!= self.module.argument_spec[option]["default"]
|
||||||
|
)
|
||||||
|
if used:
|
||||||
|
# If the option is used, compose error message.
|
||||||
|
if "usage_msg" in data:
|
||||||
|
usg = data["usage_msg"]
|
||||||
|
else:
|
||||||
|
usg = f"set {option} option"
|
||||||
|
if not support_docker_api:
|
||||||
|
msg = f"Docker API version is {self.docker_api_version_str}. Minimum version required is {data['docker_api_version']} to {usg}."
|
||||||
|
elif not support_docker_py:
|
||||||
|
msg = (
|
||||||
|
f"Docker SDK for Python version is {docker_version} ({platform.node()}'s Python {sys.executable})."
|
||||||
|
f" Minimum version required is {data['docker_py_version']} to {usg}. {DOCKERPYUPGRADE_UPGRADE_DOCKER}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# should not happen
|
||||||
|
msg = f"Cannot {usg} with your configuration."
|
||||||
|
self.fail(msg)
|
||||||
|
|
||||||
|
def report_warnings(
|
||||||
|
self, result: t.Any, warnings_key: Sequence[str] | None = None
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Checks result of client operation for warnings, and if present, outputs them.
|
||||||
|
|
||||||
|
warnings_key should be a list of keys used to crawl the result dictionary.
|
||||||
|
For example, if warnings_key == ['a', 'b'], the function will consider
|
||||||
|
result['a']['b'] if these keys exist. If the result is a non-empty string, it
|
||||||
|
will be reported as a warning. If the result is a list, every entry will be
|
||||||
|
reported as a warning.
|
||||||
|
|
||||||
|
In most cases (if warnings are returned at all), warnings_key should be
|
||||||
|
['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
|
||||||
|
"""
|
||||||
|
if warnings_key is None:
|
||||||
|
warnings_key = ["Warnings"]
|
||||||
|
for key in warnings_key:
|
||||||
|
if not isinstance(result, Mapping):
|
||||||
|
return
|
||||||
|
result = result.get(key)
|
||||||
|
if isinstance(result, Sequence):
|
||||||
|
for warning in result:
|
||||||
|
self.module.warn(f"Docker warning: {warning}")
|
||||||
|
elif isinstance(result, str) and result:
|
||||||
|
self.module.warn(f"Docker warning: {result}")
|
||||||
|
|
@ -0,0 +1,729 @@
|
||||||
|
# Copyright 2016 Red Hat | Ansible
|
||||||
|
# Copyright (c) 2022 Felix Fontein <felix@fontein.de>
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import abc
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import typing as t
|
||||||
|
from collections.abc import Mapping, Sequence
|
||||||
|
|
||||||
|
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||||
|
from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE
|
||||||
|
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._version import (
|
||||||
|
LooseVersion,
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
from requests.exceptions import ( # noqa: F401, pylint: disable=unused-import
|
||||||
|
RequestException,
|
||||||
|
SSLError,
|
||||||
|
)
|
||||||
|
except ImportError:
|
||||||
|
# Define an exception class RequestException so that our code does not break.
|
||||||
|
class RequestException(Exception): # type: ignore
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._api import auth
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._api.api.client import (
|
||||||
|
APIClient as Client,
|
||||||
|
)
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||||
|
APIError,
|
||||||
|
MissingRequirementException,
|
||||||
|
NotFound,
|
||||||
|
TLSParameterError,
|
||||||
|
)
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._api.tls import TLSConfig
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
|
||||||
|
convert_filters,
|
||||||
|
parse_repository_tag,
|
||||||
|
)
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||||
|
DEFAULT_DOCKER_HOST,
|
||||||
|
DEFAULT_TIMEOUT_SECONDS,
|
||||||
|
DEFAULT_TLS,
|
||||||
|
DEFAULT_TLS_VERIFY,
|
||||||
|
DOCKER_COMMON_ARGS,
|
||||||
|
DOCKER_MUTUALLY_EXCLUSIVE,
|
||||||
|
DOCKER_REQUIRED_TOGETHER,
|
||||||
|
sanitize_result,
|
||||||
|
update_tls_hostname,
|
||||||
|
)
|
||||||
|
|
||||||
|
if t.TYPE_CHECKING:
|
||||||
|
from collections.abc import Callable
|
||||||
|
|
||||||
|
|
||||||
|
def _get_tls_config(
|
||||||
|
fail_function: Callable[[str], t.NoReturn], **kwargs: t.Any
|
||||||
|
) -> TLSConfig:
|
||||||
|
try:
|
||||||
|
return TLSConfig(**kwargs)
|
||||||
|
except TLSParameterError as exc:
|
||||||
|
fail_function(f"TLS config error: {exc}")
|
||||||
|
|
||||||
|
|
||||||
|
def is_using_tls(auth_data: dict[str, t.Any]) -> bool:
|
||||||
|
return auth_data["tls_verify"] or auth_data["tls"]
|
||||||
|
|
||||||
|
|
||||||
|
def get_connect_params(
|
||||||
|
auth_data: dict[str, t.Any], fail_function: Callable[[str], t.NoReturn]
|
||||||
|
) -> dict[str, t.Any]:
|
||||||
|
if is_using_tls(auth_data):
|
||||||
|
auth_data["docker_host"] = auth_data["docker_host"].replace(
|
||||||
|
"tcp://", "https://"
|
||||||
|
)
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"base_url": auth_data["docker_host"],
|
||||||
|
"version": auth_data["api_version"],
|
||||||
|
"timeout": auth_data["timeout"],
|
||||||
|
}
|
||||||
|
|
||||||
|
if auth_data["tls_verify"]:
|
||||||
|
# TLS with verification
|
||||||
|
tls_config = {
|
||||||
|
"verify": True,
|
||||||
|
"assert_hostname": auth_data["tls_hostname"],
|
||||||
|
"fail_function": fail_function,
|
||||||
|
}
|
||||||
|
if auth_data["cert_path"] and auth_data["key_path"]:
|
||||||
|
tls_config["client_cert"] = (auth_data["cert_path"], auth_data["key_path"])
|
||||||
|
if auth_data["cacert_path"]:
|
||||||
|
tls_config["ca_cert"] = auth_data["cacert_path"]
|
||||||
|
result["tls"] = _get_tls_config(**tls_config)
|
||||||
|
elif auth_data["tls"]:
|
||||||
|
# TLS without verification
|
||||||
|
tls_config = {
|
||||||
|
"verify": False,
|
||||||
|
"fail_function": fail_function,
|
||||||
|
}
|
||||||
|
if auth_data["cert_path"] and auth_data["key_path"]:
|
||||||
|
tls_config["client_cert"] = (auth_data["cert_path"], auth_data["key_path"])
|
||||||
|
result["tls"] = _get_tls_config(**tls_config)
|
||||||
|
|
||||||
|
if auth_data.get("use_ssh_client"):
|
||||||
|
result["use_ssh_client"] = True
|
||||||
|
|
||||||
|
# No TLS
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class AnsibleDockerClientBase(Client):
|
||||||
|
def __init__(self, min_docker_api_version: str | None = None) -> None:
|
||||||
|
self._connect_params = get_connect_params(
|
||||||
|
self.auth_params, fail_function=self.fail
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
super().__init__(**self._connect_params)
|
||||||
|
self.docker_api_version_str = self.api_version
|
||||||
|
except MissingRequirementException as exc:
|
||||||
|
self.fail(
|
||||||
|
missing_required_lib(exc.requirement), exception=exc.import_exception
|
||||||
|
)
|
||||||
|
except APIError as exc:
|
||||||
|
self.fail(f"Docker API error: {exc}")
|
||||||
|
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||||
|
self.fail(f"Error connecting: {exc}")
|
||||||
|
|
||||||
|
self.docker_api_version = LooseVersion(self.docker_api_version_str)
|
||||||
|
min_docker_api_version = min_docker_api_version or "1.25"
|
||||||
|
if self.docker_api_version < LooseVersion(min_docker_api_version):
|
||||||
|
self.fail(
|
||||||
|
f"Docker API version is {self.docker_api_version_str}. Minimum version required is {min_docker_api_version}."
|
||||||
|
)
|
||||||
|
|
||||||
|
def log(self, msg: t.Any, pretty_print: bool = False) -> None:
|
||||||
|
pass
|
||||||
|
# if self.debug:
|
||||||
|
# from .util import log_debug
|
||||||
|
# log_debug(msg, pretty_print=pretty_print)
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def deprecate(
|
||||||
|
self,
|
||||||
|
msg: str,
|
||||||
|
version: str | None = None,
|
||||||
|
date: str | None = None,
|
||||||
|
collection_name: str | None = None,
|
||||||
|
) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_value(
|
||||||
|
param_name: str,
|
||||||
|
param_value: t.Any,
|
||||||
|
env_variable: str | None,
|
||||||
|
default_value: t.Any | None,
|
||||||
|
value_type: t.Literal["str", "bool", "int"] = "str",
|
||||||
|
) -> t.Any:
|
||||||
|
if param_value is not None:
|
||||||
|
# take module parameter value
|
||||||
|
if value_type == "bool":
|
||||||
|
if param_value in BOOLEANS_TRUE:
|
||||||
|
return True
|
||||||
|
if param_value in BOOLEANS_FALSE:
|
||||||
|
return False
|
||||||
|
return bool(param_value)
|
||||||
|
if value_type == "int":
|
||||||
|
return int(param_value)
|
||||||
|
return param_value
|
||||||
|
|
||||||
|
if env_variable is not None:
|
||||||
|
env_value = os.environ.get(env_variable)
|
||||||
|
if env_value is not None:
|
||||||
|
# take the env variable value
|
||||||
|
if param_name == "cert_path":
|
||||||
|
return os.path.join(env_value, "cert.pem")
|
||||||
|
if param_name == "cacert_path":
|
||||||
|
return os.path.join(env_value, "ca.pem")
|
||||||
|
if param_name == "key_path":
|
||||||
|
return os.path.join(env_value, "key.pem")
|
||||||
|
if value_type == "bool":
|
||||||
|
if env_value in BOOLEANS_TRUE:
|
||||||
|
return True
|
||||||
|
if env_value in BOOLEANS_FALSE:
|
||||||
|
return False
|
||||||
|
return bool(env_value)
|
||||||
|
if value_type == "int":
|
||||||
|
return int(env_value)
|
||||||
|
return env_value
|
||||||
|
|
||||||
|
# take the default
|
||||||
|
return default_value
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def _get_params(self) -> dict[str, t.Any]:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@property
|
||||||
|
def auth_params(self) -> dict[str, t.Any]:
|
||||||
|
# Get authentication credentials.
|
||||||
|
# Precedence: module parameters-> environment variables-> defaults.
|
||||||
|
|
||||||
|
self.log("Getting credentials")
|
||||||
|
|
||||||
|
client_params = self._get_params()
|
||||||
|
|
||||||
|
params = {}
|
||||||
|
for key in DOCKER_COMMON_ARGS:
|
||||||
|
params[key] = client_params.get(key)
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"docker_host": self._get_value(
|
||||||
|
"docker_host",
|
||||||
|
params["docker_host"],
|
||||||
|
"DOCKER_HOST",
|
||||||
|
DEFAULT_DOCKER_HOST,
|
||||||
|
value_type="str",
|
||||||
|
),
|
||||||
|
"tls_hostname": self._get_value(
|
||||||
|
"tls_hostname",
|
||||||
|
params["tls_hostname"],
|
||||||
|
"DOCKER_TLS_HOSTNAME",
|
||||||
|
None,
|
||||||
|
value_type="str",
|
||||||
|
),
|
||||||
|
"api_version": self._get_value(
|
||||||
|
"api_version",
|
||||||
|
params["api_version"],
|
||||||
|
"DOCKER_API_VERSION",
|
||||||
|
"auto",
|
||||||
|
value_type="str",
|
||||||
|
),
|
||||||
|
"cacert_path": self._get_value(
|
||||||
|
"cacert_path",
|
||||||
|
params["ca_path"],
|
||||||
|
"DOCKER_CERT_PATH",
|
||||||
|
None,
|
||||||
|
value_type="str",
|
||||||
|
),
|
||||||
|
"cert_path": self._get_value(
|
||||||
|
"cert_path",
|
||||||
|
params["client_cert"],
|
||||||
|
"DOCKER_CERT_PATH",
|
||||||
|
None,
|
||||||
|
value_type="str",
|
||||||
|
),
|
||||||
|
"key_path": self._get_value(
|
||||||
|
"key_path",
|
||||||
|
params["client_key"],
|
||||||
|
"DOCKER_CERT_PATH",
|
||||||
|
None,
|
||||||
|
value_type="str",
|
||||||
|
),
|
||||||
|
"tls": self._get_value(
|
||||||
|
"tls", params["tls"], "DOCKER_TLS", DEFAULT_TLS, value_type="bool"
|
||||||
|
),
|
||||||
|
"tls_verify": self._get_value(
|
||||||
|
"validate_certs",
|
||||||
|
params["validate_certs"],
|
||||||
|
"DOCKER_TLS_VERIFY",
|
||||||
|
DEFAULT_TLS_VERIFY,
|
||||||
|
value_type="bool",
|
||||||
|
),
|
||||||
|
"timeout": self._get_value(
|
||||||
|
"timeout",
|
||||||
|
params["timeout"],
|
||||||
|
"DOCKER_TIMEOUT",
|
||||||
|
DEFAULT_TIMEOUT_SECONDS,
|
||||||
|
value_type="int",
|
||||||
|
),
|
||||||
|
"use_ssh_client": self._get_value(
|
||||||
|
"use_ssh_client",
|
||||||
|
params["use_ssh_client"],
|
||||||
|
None,
|
||||||
|
False,
|
||||||
|
value_type="bool",
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
def depr(*args: t.Any, **kwargs: t.Any) -> None:
|
||||||
|
self.deprecate(*args, **kwargs)
|
||||||
|
|
||||||
|
update_tls_hostname(
|
||||||
|
result,
|
||||||
|
old_behavior=True,
|
||||||
|
deprecate_function=depr,
|
||||||
|
uses_tls=is_using_tls(result),
|
||||||
|
)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _handle_ssl_error(self, error: Exception) -> t.NoReturn:
|
||||||
|
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
|
||||||
|
if match:
|
||||||
|
hostname = self.auth_params["tls_hostname"]
|
||||||
|
self.fail(
|
||||||
|
f"You asked for verification that Docker daemons certificate's hostname matches {hostname}. "
|
||||||
|
f"The actual certificate's hostname is {match.group(1)}. Most likely you need to set DOCKER_TLS_HOSTNAME "
|
||||||
|
f"or pass `tls_hostname` with a value of {match.group(1)}. You may also use TLS without verification by "
|
||||||
|
"setting the `tls` parameter to true."
|
||||||
|
)
|
||||||
|
self.fail(f"SSL Exception: {error}")
|
||||||
|
|
||||||
|
def get_container_by_id(self, container_id: str) -> dict[str, t.Any] | None:
|
||||||
|
try:
|
||||||
|
self.log(f"Inspecting container Id {container_id}")
|
||||||
|
result = self.get_json("/containers/{0}/json", container_id)
|
||||||
|
self.log("Completed container inspection")
|
||||||
|
return result
|
||||||
|
except NotFound:
|
||||||
|
return None
|
||||||
|
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||||
|
self.fail(f"Error inspecting container: {exc}")
|
||||||
|
|
||||||
|
def get_container(self, name: str | None) -> dict[str, t.Any] | None:
|
||||||
|
"""
|
||||||
|
Lookup a container and return the inspection results.
|
||||||
|
"""
|
||||||
|
if name is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
search_name = name
|
||||||
|
if not name.startswith("/"):
|
||||||
|
search_name = "/" + name
|
||||||
|
|
||||||
|
result = None
|
||||||
|
try:
|
||||||
|
params = {
|
||||||
|
"limit": -1,
|
||||||
|
"all": 1,
|
||||||
|
"size": 0,
|
||||||
|
"trunc_cmd": 0,
|
||||||
|
}
|
||||||
|
containers = self.get_json("/containers/json", params=params)
|
||||||
|
for container in containers:
|
||||||
|
self.log(f"testing container: {container['Names']}")
|
||||||
|
if (
|
||||||
|
isinstance(container["Names"], list)
|
||||||
|
and search_name in container["Names"]
|
||||||
|
):
|
||||||
|
result = container
|
||||||
|
break
|
||||||
|
if container["Id"].startswith(name):
|
||||||
|
result = container
|
||||||
|
break
|
||||||
|
if container["Id"] == name:
|
||||||
|
result = container
|
||||||
|
break
|
||||||
|
except SSLError as exc:
|
||||||
|
self._handle_ssl_error(exc)
|
||||||
|
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||||
|
self.fail(f"Error retrieving container list: {exc}")
|
||||||
|
|
||||||
|
if result is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return self.get_container_by_id(result["Id"])
|
||||||
|
|
||||||
|
def get_network(
|
||||||
|
self, name: str | None = None, network_id: str | None = None
|
||||||
|
) -> dict[str, t.Any] | None:
|
||||||
|
"""
|
||||||
|
Lookup a network and return the inspection results.
|
||||||
|
"""
|
||||||
|
if name is None and network_id is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
result = None
|
||||||
|
|
||||||
|
if network_id is None:
|
||||||
|
try:
|
||||||
|
networks = self.get_json("/networks")
|
||||||
|
for network in networks:
|
||||||
|
self.log(f"testing network: {network['Name']}")
|
||||||
|
if name == network["Name"]:
|
||||||
|
result = network
|
||||||
|
break
|
||||||
|
if network["Id"].startswith(name):
|
||||||
|
result = network
|
||||||
|
break
|
||||||
|
except SSLError as exc:
|
||||||
|
self._handle_ssl_error(exc)
|
||||||
|
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||||
|
self.fail(f"Error retrieving network list: {exc}")
|
||||||
|
|
||||||
|
if result is not None:
|
||||||
|
network_id = result["Id"]
|
||||||
|
|
||||||
|
if network_id is not None:
|
||||||
|
try:
|
||||||
|
self.log(f"Inspecting network Id {network_id}")
|
||||||
|
result = self.get_json("/networks/{0}", network_id)
|
||||||
|
self.log("Completed network inspection")
|
||||||
|
except NotFound:
|
||||||
|
return None
|
||||||
|
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||||
|
self.fail(f"Error inspecting network: {exc}")
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _image_lookup(self, name: str, tag: str | None) -> list[dict[str, t.Any]]:
|
||||||
|
"""
|
||||||
|
Including a tag in the name parameter sent to the Docker SDK for Python images method
|
||||||
|
does not work consistently. Instead, get the result set for name and manually check
|
||||||
|
if the tag exists.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
params: dict[str, t.Any] = {
|
||||||
|
"only_ids": 0,
|
||||||
|
"all": 0,
|
||||||
|
}
|
||||||
|
if LooseVersion(self.api_version) < LooseVersion("1.25"):
|
||||||
|
# only use "filter" on API 1.24 and under, as it is deprecated
|
||||||
|
params["filter"] = name
|
||||||
|
else:
|
||||||
|
params["filters"] = convert_filters({"reference": name})
|
||||||
|
images = self.get_json("/images/json", params=params)
|
||||||
|
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||||
|
self.fail(f"Error searching for image {name} - {exc}")
|
||||||
|
if tag:
|
||||||
|
lookup = f"{name}:{tag}"
|
||||||
|
lookup_digest = f"{name}@{tag}"
|
||||||
|
response = images
|
||||||
|
images = []
|
||||||
|
for image in response:
|
||||||
|
tags = image.get("RepoTags")
|
||||||
|
digests = image.get("RepoDigests")
|
||||||
|
if (tags and lookup in tags) or (digests and lookup_digest in digests):
|
||||||
|
images = [image]
|
||||||
|
break
|
||||||
|
return images
|
||||||
|
|
||||||
|
def find_image(self, name: str, tag: str | None) -> dict[str, t.Any] | None:
|
||||||
|
"""
|
||||||
|
Lookup an image (by name and tag) and return the inspection results.
|
||||||
|
"""
|
||||||
|
if not name:
|
||||||
|
return None
|
||||||
|
|
||||||
|
self.log(f"Find image {name}:{tag}")
|
||||||
|
images = self._image_lookup(name, tag)
|
||||||
|
if not images:
|
||||||
|
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
|
||||||
|
registry, repo_name = auth.resolve_repository_name(name)
|
||||||
|
if registry == "docker.io":
|
||||||
|
# If docker.io is explicitly there in name, the image
|
||||||
|
# is not found in some cases (#41509)
|
||||||
|
self.log(f"Check for docker.io image: {repo_name}")
|
||||||
|
images = self._image_lookup(repo_name, tag)
|
||||||
|
if not images and repo_name.startswith("library/"):
|
||||||
|
# Sometimes library/xxx images are not found
|
||||||
|
lookup = repo_name[len("library/") :]
|
||||||
|
self.log(f"Check for docker.io image: {lookup}")
|
||||||
|
images = self._image_lookup(lookup, tag)
|
||||||
|
if not images:
|
||||||
|
# Last case for some Docker versions: if docker.io was not there,
|
||||||
|
# it can be that the image was not found either
|
||||||
|
# (https://github.com/ansible/ansible/pull/15586)
|
||||||
|
lookup = f"{registry}/{repo_name}"
|
||||||
|
self.log(f"Check for docker.io image: {lookup}")
|
||||||
|
images = self._image_lookup(lookup, tag)
|
||||||
|
if not images and "/" not in repo_name:
|
||||||
|
# This seems to be happening with podman-docker
|
||||||
|
# (https://github.com/ansible-collections/community.docker/issues/291)
|
||||||
|
lookup = f"{registry}/library/{repo_name}"
|
||||||
|
self.log(f"Check for docker.io image: {lookup}")
|
||||||
|
images = self._image_lookup(lookup, tag)
|
||||||
|
|
||||||
|
if len(images) > 1:
|
||||||
|
self.fail(f"Daemon returned more than one result for {name}:{tag}")
|
||||||
|
|
||||||
|
if len(images) == 1:
|
||||||
|
try:
|
||||||
|
return self.get_json("/images/{0}/json", images[0]["Id"])
|
||||||
|
except NotFound:
|
||||||
|
self.log(f"Image {name}:{tag} not found.")
|
||||||
|
return None
|
||||||
|
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||||
|
self.fail(f"Error inspecting image {name}:{tag} - {exc}")
|
||||||
|
|
||||||
|
self.log(f"Image {name}:{tag} not found.")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def find_image_by_id(
|
||||||
|
self, image_id: str, accept_missing_image: bool = False
|
||||||
|
) -> dict[str, t.Any] | None:
|
||||||
|
"""
|
||||||
|
Lookup an image (by ID) and return the inspection results.
|
||||||
|
"""
|
||||||
|
if not image_id:
|
||||||
|
return None
|
||||||
|
|
||||||
|
self.log(f"Find image {image_id} (by ID)")
|
||||||
|
try:
|
||||||
|
return self.get_json("/images/{0}/json", image_id)
|
||||||
|
except NotFound as exc:
|
||||||
|
if not accept_missing_image:
|
||||||
|
self.fail(f"Error inspecting image ID {image_id} - {exc}")
|
||||||
|
self.log(f"Image {image_id} not found.")
|
||||||
|
return None
|
||||||
|
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||||
|
self.fail(f"Error inspecting image ID {image_id} - {exc}")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _compare_images(
|
||||||
|
img1: dict[str, t.Any] | None, img2: dict[str, t.Any] | None
|
||||||
|
) -> bool:
|
||||||
|
if img1 is None or img2 is None:
|
||||||
|
return img1 == img2
|
||||||
|
filter_keys = {"Metadata"}
|
||||||
|
img1_filtered = {k: v for k, v in img1.items() if k not in filter_keys}
|
||||||
|
img2_filtered = {k: v for k, v in img2.items() if k not in filter_keys}
|
||||||
|
return img1_filtered == img2_filtered
|
||||||
|
|
||||||
|
def pull_image(
|
||||||
|
self, name: str, tag: str = "latest", image_platform: str | None = None
|
||||||
|
) -> tuple[dict[str, t.Any] | None, bool]:
|
||||||
|
"""
|
||||||
|
Pull an image
|
||||||
|
"""
|
||||||
|
self.log(f"Pulling image {name}:{tag}")
|
||||||
|
old_image = self.find_image(name, tag)
|
||||||
|
try:
|
||||||
|
repository, image_tag = parse_repository_tag(name)
|
||||||
|
registry, dummy_repo_name = auth.resolve_repository_name(repository)
|
||||||
|
params = {
|
||||||
|
"tag": tag or image_tag or "latest",
|
||||||
|
"fromImage": repository,
|
||||||
|
}
|
||||||
|
if image_platform is not None:
|
||||||
|
params["platform"] = image_platform
|
||||||
|
|
||||||
|
headers = {}
|
||||||
|
header = auth.get_config_header(self, registry)
|
||||||
|
if header:
|
||||||
|
headers["X-Registry-Auth"] = header
|
||||||
|
|
||||||
|
response = self._post(
|
||||||
|
self._url("/images/create"),
|
||||||
|
params=params,
|
||||||
|
headers=headers,
|
||||||
|
stream=True,
|
||||||
|
timeout=None,
|
||||||
|
)
|
||||||
|
self._raise_for_status(response)
|
||||||
|
for line in self._stream_helper(response, decode=True):
|
||||||
|
self.log(line, pretty_print=True)
|
||||||
|
if line.get("error"):
|
||||||
|
if line.get("errorDetail"):
|
||||||
|
error_detail = line.get("errorDetail")
|
||||||
|
self.fail(
|
||||||
|
f"Error pulling {name} - code: {error_detail.get('code')} message: {error_detail.get('message')}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.fail(f"Error pulling {name} - {line.get('error')}")
|
||||||
|
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||||
|
self.fail(f"Error pulling image {name}:{tag} - {exc}")
|
||||||
|
|
||||||
|
new_image = self.find_image(name, tag)
|
||||||
|
|
||||||
|
return new_image, self._compare_images(old_image, new_image)
|
||||||
|
|
||||||
|
|
||||||
|
class AnsibleDockerClient(AnsibleDockerClientBase):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
argument_spec: dict[str, t.Any] | None = None,
|
||||||
|
supports_check_mode: bool = False,
|
||||||
|
mutually_exclusive: Sequence[Sequence[str]] | None = None,
|
||||||
|
required_together: Sequence[Sequence[str]] | None = None,
|
||||||
|
required_if: (
|
||||||
|
Sequence[
|
||||||
|
tuple[str, t.Any, Sequence[str]]
|
||||||
|
| tuple[str, t.Any, Sequence[str], bool]
|
||||||
|
]
|
||||||
|
| None
|
||||||
|
) = None,
|
||||||
|
required_one_of: Sequence[Sequence[str]] | None = None,
|
||||||
|
required_by: dict[str, Sequence[str]] | None = None,
|
||||||
|
min_docker_api_version: str | None = None,
|
||||||
|
option_minimal_versions: dict[str, t.Any] | None = None,
|
||||||
|
option_minimal_versions_ignore_params: Sequence[str] | None = None,
|
||||||
|
fail_results: dict[str, t.Any] | None = None,
|
||||||
|
):
|
||||||
|
# Modules can put information in here which will always be returned
|
||||||
|
# in case client.fail() is called.
|
||||||
|
self.fail_results = fail_results or {}
|
||||||
|
|
||||||
|
merged_arg_spec = {}
|
||||||
|
merged_arg_spec.update(DOCKER_COMMON_ARGS)
|
||||||
|
if argument_spec:
|
||||||
|
merged_arg_spec.update(argument_spec)
|
||||||
|
self.arg_spec = merged_arg_spec
|
||||||
|
|
||||||
|
mutually_exclusive_params: list[Sequence[str]] = []
|
||||||
|
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
|
||||||
|
if mutually_exclusive:
|
||||||
|
mutually_exclusive_params += mutually_exclusive
|
||||||
|
|
||||||
|
required_together_params: list[Sequence[str]] = []
|
||||||
|
required_together_params += DOCKER_REQUIRED_TOGETHER
|
||||||
|
if required_together:
|
||||||
|
required_together_params += required_together
|
||||||
|
|
||||||
|
self.module = AnsibleModule(
|
||||||
|
argument_spec=merged_arg_spec,
|
||||||
|
supports_check_mode=supports_check_mode,
|
||||||
|
mutually_exclusive=mutually_exclusive_params,
|
||||||
|
required_together=required_together_params,
|
||||||
|
required_if=required_if,
|
||||||
|
required_one_of=required_one_of,
|
||||||
|
required_by=required_by or {},
|
||||||
|
)
|
||||||
|
|
||||||
|
self.debug = self.module.params.get("debug")
|
||||||
|
self.check_mode = self.module.check_mode
|
||||||
|
|
||||||
|
super().__init__(min_docker_api_version=min_docker_api_version)
|
||||||
|
|
||||||
|
if option_minimal_versions is not None:
|
||||||
|
self._get_minimal_versions(
|
||||||
|
option_minimal_versions, option_minimal_versions_ignore_params
|
||||||
|
)
|
||||||
|
|
||||||
|
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
|
||||||
|
self.fail_results.update(kwargs)
|
||||||
|
self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
|
||||||
|
|
||||||
|
def deprecate(
|
||||||
|
self,
|
||||||
|
msg: str,
|
||||||
|
version: str | None = None,
|
||||||
|
date: str | None = None,
|
||||||
|
collection_name: str | None = None,
|
||||||
|
) -> None:
|
||||||
|
self.module.deprecate(
|
||||||
|
msg, version=version, date=date, collection_name=collection_name
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_params(self) -> dict[str, t.Any]:
|
||||||
|
return self.module.params
|
||||||
|
|
||||||
|
def _get_minimal_versions(
|
||||||
|
self,
|
||||||
|
option_minimal_versions: dict[str, t.Any],
|
||||||
|
ignore_params: Sequence[str] | None = None,
|
||||||
|
) -> None:
|
||||||
|
self.option_minimal_versions: dict[str, dict[str, t.Any]] = {}
|
||||||
|
for option in self.module.argument_spec:
|
||||||
|
if ignore_params is not None and option in ignore_params:
|
||||||
|
continue
|
||||||
|
self.option_minimal_versions[option] = {}
|
||||||
|
self.option_minimal_versions.update(option_minimal_versions)
|
||||||
|
|
||||||
|
for option, data in self.option_minimal_versions.items():
|
||||||
|
# Test whether option is supported, and store result
|
||||||
|
support_docker_api = True
|
||||||
|
if "docker_api_version" in data:
|
||||||
|
support_docker_api = self.docker_api_version >= LooseVersion(
|
||||||
|
data["docker_api_version"]
|
||||||
|
)
|
||||||
|
data["supported"] = support_docker_api
|
||||||
|
# Fail if option is not supported but used
|
||||||
|
if not data["supported"]:
|
||||||
|
# Test whether option is specified
|
||||||
|
if "detect_usage" in data:
|
||||||
|
used = data["detect_usage"](self)
|
||||||
|
else:
|
||||||
|
used = self.module.params.get(option) is not None
|
||||||
|
if used and "default" in self.module.argument_spec[option]:
|
||||||
|
used = (
|
||||||
|
self.module.params[option]
|
||||||
|
!= self.module.argument_spec[option]["default"]
|
||||||
|
)
|
||||||
|
if used:
|
||||||
|
# If the option is used, compose error message.
|
||||||
|
if "usage_msg" in data:
|
||||||
|
usg = data["usage_msg"]
|
||||||
|
else:
|
||||||
|
usg = f"set {option} option"
|
||||||
|
if not support_docker_api:
|
||||||
|
msg = f"Docker API version is {self.docker_api_version_str}. Minimum version required is {data['docker_api_version']} to {usg}."
|
||||||
|
else:
|
||||||
|
# should not happen
|
||||||
|
msg = f"Cannot {usg} with your configuration."
|
||||||
|
self.fail(msg)
|
||||||
|
|
||||||
|
def report_warnings(
|
||||||
|
self, result: t.Any, warnings_key: Sequence[str] | None = None
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Checks result of client operation for warnings, and if present, outputs them.
|
||||||
|
|
||||||
|
warnings_key should be a list of keys used to crawl the result dictionary.
|
||||||
|
For example, if warnings_key == ['a', 'b'], the function will consider
|
||||||
|
result['a']['b'] if these keys exist. If the result is a non-empty string, it
|
||||||
|
will be reported as a warning. If the result is a list, every entry will be
|
||||||
|
reported as a warning.
|
||||||
|
|
||||||
|
In most cases (if warnings are returned at all), warnings_key should be
|
||||||
|
['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
|
||||||
|
"""
|
||||||
|
if warnings_key is None:
|
||||||
|
warnings_key = ["Warnings"]
|
||||||
|
for key in warnings_key:
|
||||||
|
if not isinstance(result, Mapping):
|
||||||
|
return
|
||||||
|
result = result.get(key)
|
||||||
|
if isinstance(result, Sequence):
|
||||||
|
for warning in result:
|
||||||
|
self.module.warn(f"Docker warning: {warning}")
|
||||||
|
elif isinstance(result, str) and result:
|
||||||
|
self.module.warn(f"Docker warning: {result}")
|
||||||
|
|
@ -0,0 +1,489 @@
|
||||||
|
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
|
||||||
|
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||||
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||||
|
# Do not use this from other collections or standalone plugins/modules!
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import abc
|
||||||
|
import json
|
||||||
|
import shlex
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
from ansible.module_utils.basic import AnsibleModule, env_fallback
|
||||||
|
from ansible.module_utils.common.process import get_bin_path
|
||||||
|
from ansible.module_utils.common.text.converters import to_text
|
||||||
|
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._api.auth import (
|
||||||
|
resolve_repository_name,
|
||||||
|
)
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||||
|
DEFAULT_DOCKER_HOST,
|
||||||
|
DEFAULT_TLS,
|
||||||
|
DEFAULT_TLS_VERIFY,
|
||||||
|
DOCKER_MUTUALLY_EXCLUSIVE,
|
||||||
|
DOCKER_REQUIRED_TOGETHER,
|
||||||
|
sanitize_result,
|
||||||
|
)
|
||||||
|
from ansible_collections.community.docker.plugins.module_utils._version import (
|
||||||
|
LooseVersion,
|
||||||
|
)
|
||||||
|
|
||||||
|
if t.TYPE_CHECKING:
|
||||||
|
from collections.abc import Mapping, Sequence
|
||||||
|
|
||||||
|
|
||||||
|
DOCKER_COMMON_ARGS = {
|
||||||
|
"docker_cli": {"type": "path"},
|
||||||
|
"docker_host": {
|
||||||
|
"type": "str",
|
||||||
|
"fallback": (env_fallback, ["DOCKER_HOST"]),
|
||||||
|
"aliases": ["docker_url"],
|
||||||
|
},
|
||||||
|
"tls_hostname": {
|
||||||
|
"type": "str",
|
||||||
|
"fallback": (env_fallback, ["DOCKER_TLS_HOSTNAME"]),
|
||||||
|
},
|
||||||
|
"api_version": {
|
||||||
|
"type": "str",
|
||||||
|
"default": "auto",
|
||||||
|
"fallback": (env_fallback, ["DOCKER_API_VERSION"]),
|
||||||
|
"aliases": ["docker_api_version"],
|
||||||
|
},
|
||||||
|
"ca_path": {"type": "path", "aliases": ["ca_cert", "tls_ca_cert", "cacert_path"]},
|
||||||
|
"client_cert": {"type": "path", "aliases": ["tls_client_cert", "cert_path"]},
|
||||||
|
"client_key": {"type": "path", "aliases": ["tls_client_key", "key_path"]},
|
||||||
|
"tls": {
|
||||||
|
"type": "bool",
|
||||||
|
"default": DEFAULT_TLS,
|
||||||
|
"fallback": (env_fallback, ["DOCKER_TLS"]),
|
||||||
|
},
|
||||||
|
"validate_certs": {
|
||||||
|
"type": "bool",
|
||||||
|
"default": DEFAULT_TLS_VERIFY,
|
||||||
|
"fallback": (env_fallback, ["DOCKER_TLS_VERIFY"]),
|
||||||
|
"aliases": ["tls_verify"],
|
||||||
|
},
|
||||||
|
# "debug": {"type": "bool", "default: False},
|
||||||
|
"cli_context": {"type": "str"},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class DockerException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class AnsibleDockerClientBase:
|
||||||
|
docker_api_version_str: str | None
|
||||||
|
docker_api_version: LooseVersion | None
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
common_args: dict[str, t.Any],
|
||||||
|
min_docker_api_version: str | None = None,
|
||||||
|
needs_api_version: bool = True,
|
||||||
|
) -> None:
|
||||||
|
self._environment: dict[str, str] = {}
|
||||||
|
if common_args["tls_hostname"]:
|
||||||
|
self._environment["DOCKER_TLS_HOSTNAME"] = common_args["tls_hostname"]
|
||||||
|
if common_args["api_version"] and common_args["api_version"] != "auto":
|
||||||
|
self._environment["DOCKER_API_VERSION"] = common_args["api_version"]
|
||||||
|
cli = common_args.get("docker_cli")
|
||||||
|
if cli is None:
|
||||||
|
try:
|
||||||
|
cli = get_bin_path("docker")
|
||||||
|
except ValueError:
|
||||||
|
self.fail(
|
||||||
|
"Cannot find docker CLI in path. Please provide it explicitly with the docker_cli parameter"
|
||||||
|
)
|
||||||
|
self._cli = cli
|
||||||
|
self._cli_base = [self._cli]
|
||||||
|
docker_host = common_args["docker_host"]
|
||||||
|
if not docker_host and not common_args["cli_context"]:
|
||||||
|
docker_host = DEFAULT_DOCKER_HOST
|
||||||
|
if docker_host:
|
||||||
|
self._cli_base.extend(["--host", docker_host])
|
||||||
|
if common_args["validate_certs"]:
|
||||||
|
self._cli_base.append("--tlsverify")
|
||||||
|
elif common_args["tls"]:
|
||||||
|
self._cli_base.append("--tls")
|
||||||
|
if common_args["ca_path"]:
|
||||||
|
self._cli_base.extend(["--tlscacert", common_args["ca_path"]])
|
||||||
|
if common_args["client_cert"]:
|
||||||
|
self._cli_base.extend(["--tlscert", common_args["client_cert"]])
|
||||||
|
if common_args["client_key"]:
|
||||||
|
self._cli_base.extend(["--tlskey", common_args["client_key"]])
|
||||||
|
if common_args["cli_context"]:
|
||||||
|
self._cli_base.extend(["--context", common_args["cli_context"]])
|
||||||
|
|
||||||
|
# `--format json` was only added as a shorthand for `--format {{ json . }}` in Docker 23.0
|
||||||
|
dummy, self._version, dummy2 = self.call_cli_json(
|
||||||
|
"version", "--format", "{{ json . }}", check_rc=True
|
||||||
|
)
|
||||||
|
self._info: dict[str, t.Any] | None = None
|
||||||
|
|
||||||
|
if needs_api_version:
|
||||||
|
api_version_string = self._version["Server"].get(
|
||||||
|
"ApiVersion"
|
||||||
|
) or self._version["Server"].get("APIVersion")
|
||||||
|
if not isinstance(self._version.get("Server"), dict) or not isinstance(
|
||||||
|
api_version_string, str
|
||||||
|
):
|
||||||
|
self.fail(
|
||||||
|
"Cannot determine Docker Daemon information. Are you maybe using podman instead of docker?"
|
||||||
|
)
|
||||||
|
self.docker_api_version_str = to_text(api_version_string)
|
||||||
|
self.docker_api_version = LooseVersion(self.docker_api_version_str)
|
||||||
|
min_docker_api_version = min_docker_api_version or "1.25"
|
||||||
|
if self.docker_api_version < LooseVersion(min_docker_api_version):
|
||||||
|
self.fail(
|
||||||
|
f"Docker API version is {self.docker_api_version_str}. Minimum version required is {min_docker_api_version}."
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.docker_api_version_str = None
|
||||||
|
self.docker_api_version = None
|
||||||
|
if min_docker_api_version is not None:
|
||||||
|
self.fail(
|
||||||
|
"Internal error: cannot have needs_api_version=False with min_docker_api_version not None"
|
||||||
|
)
|
||||||
|
|
||||||
|
def log(self, msg: str, pretty_print: bool = False) -> None:
|
||||||
|
pass
|
||||||
|
# if self.debug:
|
||||||
|
# from .util import log_debug
|
||||||
|
# log_debug(msg, pretty_print=pretty_print)
|
||||||
|
|
||||||
|
def get_cli(self) -> str:
|
||||||
|
return self._cli
|
||||||
|
|
||||||
|
def get_version_info(self) -> str:
|
||||||
|
return self._version
|
||||||
|
|
||||||
|
def _compose_cmd(self, args: t.Sequence[str]) -> list[str]:
|
||||||
|
return self._cli_base + list(args)
|
||||||
|
|
||||||
|
def _compose_cmd_str(self, args: t.Sequence[str]) -> str:
|
||||||
|
return " ".join(shlex.quote(a) for a in self._compose_cmd(args))
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def call_cli(
|
||||||
|
self,
|
||||||
|
*args: str,
|
||||||
|
check_rc: bool = False,
|
||||||
|
data: bytes | None = None,
|
||||||
|
cwd: str | None = None,
|
||||||
|
environ_update: dict[str, str] | None = None,
|
||||||
|
) -> tuple[int, bytes, bytes]:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def call_cli_json(
|
||||||
|
self,
|
||||||
|
*args: str,
|
||||||
|
check_rc: bool = False,
|
||||||
|
data: bytes | None = None,
|
||||||
|
cwd: str | None = None,
|
||||||
|
environ_update: dict[str, str] | None = None,
|
||||||
|
warn_on_stderr: bool = False,
|
||||||
|
) -> tuple[int, t.Any, bytes]:
|
||||||
|
rc, stdout, stderr = self.call_cli(
|
||||||
|
*args, check_rc=check_rc, data=data, cwd=cwd, environ_update=environ_update
|
||||||
|
)
|
||||||
|
if warn_on_stderr and stderr:
|
||||||
|
self.warn(to_text(stderr))
|
||||||
|
try:
|
||||||
|
data = json.loads(stdout)
|
||||||
|
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||||
|
self.fail(
|
||||||
|
f"Error while parsing JSON output of {self._compose_cmd_str(args)}: {exc}\nJSON output: {to_text(stdout)}\n\nError output:\n{to_text(stderr)}",
|
||||||
|
cmd=self._compose_cmd_str(args),
|
||||||
|
rc=rc,
|
||||||
|
stdout=stdout,
|
||||||
|
stderr=stderr,
|
||||||
|
)
|
||||||
|
return rc, data, stderr
|
||||||
|
|
||||||
|
def call_cli_json_stream(
|
||||||
|
self,
|
||||||
|
*args: str,
|
||||||
|
check_rc: bool = False,
|
||||||
|
data: bytes | None = None,
|
||||||
|
cwd: str | None = None,
|
||||||
|
environ_update: dict[str, str] | None = None,
|
||||||
|
warn_on_stderr: bool = False,
|
||||||
|
) -> tuple[int, list[t.Any], bytes]:
|
||||||
|
rc, stdout, stderr = self.call_cli(
|
||||||
|
*args, check_rc=check_rc, data=data, cwd=cwd, environ_update=environ_update
|
||||||
|
)
|
||||||
|
if warn_on_stderr and stderr:
|
||||||
|
self.warn(to_text(stderr))
|
||||||
|
result = []
|
||||||
|
try:
|
||||||
|
for line in stdout.splitlines():
|
||||||
|
line = line.strip()
|
||||||
|
if line.startswith(b"{"):
|
||||||
|
result.append(json.loads(line))
|
||||||
|
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||||
|
self.fail(
|
||||||
|
f"Error while parsing JSON output of {self._compose_cmd_str(args)}: {exc}\nJSON output: {to_text(stdout)}\n\nError output:\n{to_text(stderr)}",
|
||||||
|
cmd=self._compose_cmd_str(args),
|
||||||
|
rc=rc,
|
||||||
|
stdout=stdout,
|
||||||
|
stderr=stderr,
|
||||||
|
)
|
||||||
|
return rc, result, stderr
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def warn(self, msg: str) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def deprecate(
|
||||||
|
self,
|
||||||
|
msg: str,
|
||||||
|
version: str | None = None,
|
||||||
|
date: str | None = None,
|
||||||
|
collection_name: str | None = None,
|
||||||
|
) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_cli_info(self) -> dict[str, t.Any]:
|
||||||
|
if self._info is None:
|
||||||
|
dummy, self._info, dummy2 = self.call_cli_json(
|
||||||
|
"info", "--format", "{{ json . }}", check_rc=True
|
||||||
|
)
|
||||||
|
return self._info
|
||||||
|
|
||||||
|
def get_client_plugin_info(self, component: str) -> dict[str, t.Any] | None:
|
||||||
|
cli_info = self.get_cli_info()
|
||||||
|
if not isinstance(cli_info.get("ClientInfo"), dict):
|
||||||
|
self.fail(
|
||||||
|
"Cannot determine Docker client information. Are you maybe using podman instead of docker?"
|
||||||
|
)
|
||||||
|
for plugin in cli_info["ClientInfo"].get("Plugins") or []:
|
||||||
|
if plugin.get("Name") == component:
|
||||||
|
return plugin
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _image_lookup(self, name: str, tag: str) -> list[dict[str, t.Any]]:
|
||||||
|
"""
|
||||||
|
Including a tag in the name parameter sent to the Docker SDK for Python images method
|
||||||
|
does not work consistently. Instead, get the result set for name and manually check
|
||||||
|
if the tag exists.
|
||||||
|
"""
|
||||||
|
dummy, images, dummy2 = self.call_cli_json_stream(
|
||||||
|
"image",
|
||||||
|
"ls",
|
||||||
|
"--format",
|
||||||
|
"{{ json . }}",
|
||||||
|
"--no-trunc",
|
||||||
|
"--filter",
|
||||||
|
f"reference={name}",
|
||||||
|
check_rc=True,
|
||||||
|
)
|
||||||
|
if tag:
|
||||||
|
response = images
|
||||||
|
images = []
|
||||||
|
for image in response:
|
||||||
|
if image.get("Tag") == tag or image.get("Digest") == tag:
|
||||||
|
images = [image]
|
||||||
|
break
|
||||||
|
return images
|
||||||
|
|
||||||
|
@t.overload
|
||||||
|
def find_image(self, name: None, tag: str) -> None: ...
|
||||||
|
|
||||||
|
@t.overload
|
||||||
|
def find_image(self, name: str, tag: str) -> dict[str, t.Any] | None: ...
|
||||||
|
|
||||||
|
def find_image(self, name: str | None, tag: str) -> dict[str, t.Any] | None:
|
||||||
|
"""
|
||||||
|
Lookup an image (by name and tag) and return the inspection results.
|
||||||
|
"""
|
||||||
|
if not name:
|
||||||
|
return None
|
||||||
|
|
||||||
|
self.log(f"Find image {name}:{tag}")
|
||||||
|
images = self._image_lookup(name, tag)
|
||||||
|
if not images:
|
||||||
|
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
|
||||||
|
registry, repo_name = resolve_repository_name(name)
|
||||||
|
if registry == "docker.io":
|
||||||
|
# If docker.io is explicitly there in name, the image
|
||||||
|
# is not found in some cases (#41509)
|
||||||
|
self.log(f"Check for docker.io image: {repo_name}")
|
||||||
|
images = self._image_lookup(repo_name, tag)
|
||||||
|
if not images and repo_name.startswith("library/"):
|
||||||
|
# Sometimes library/xxx images are not found
|
||||||
|
lookup = repo_name[len("library/") :]
|
||||||
|
self.log(f"Check for docker.io image: {lookup}")
|
||||||
|
images = self._image_lookup(lookup, tag)
|
||||||
|
if not images:
|
||||||
|
# Last case for some Docker versions: if docker.io was not there,
|
||||||
|
# it can be that the image was not found either
|
||||||
|
# (https://github.com/ansible/ansible/pull/15586)
|
||||||
|
lookup = f"{registry}/{repo_name}"
|
||||||
|
self.log(f"Check for docker.io image: {lookup}")
|
||||||
|
images = self._image_lookup(lookup, tag)
|
||||||
|
if not images and "/" not in repo_name:
|
||||||
|
# This seems to be happening with podman-docker
|
||||||
|
# (https://github.com/ansible-collections/community.docker/issues/291)
|
||||||
|
lookup = f"{registry}/library/{repo_name}"
|
||||||
|
self.log(f"Check for docker.io image: {lookup}")
|
||||||
|
images = self._image_lookup(lookup, tag)
|
||||||
|
|
||||||
|
if len(images) > 1:
|
||||||
|
self.fail(f"Daemon returned more than one result for {name}:{tag}")
|
||||||
|
|
||||||
|
if len(images) == 1:
|
||||||
|
rc, image, stderr = self.call_cli_json("image", "inspect", images[0]["ID"])
|
||||||
|
if not image:
|
||||||
|
self.log(f"Image {name}:{tag} not found.")
|
||||||
|
return None
|
||||||
|
if rc != 0:
|
||||||
|
self.fail(f"Error inspecting image {name}:{tag} - {to_text(stderr)}")
|
||||||
|
return image[0]
|
||||||
|
|
||||||
|
self.log(f"Image {name}:{tag} not found.")
|
||||||
|
return None
|
||||||
|
|
||||||
|
@t.overload
|
||||||
|
def find_image_by_id(
|
||||||
|
self, image_id: None, accept_missing_image: bool = False
|
||||||
|
) -> None: ...
|
||||||
|
|
||||||
|
@t.overload
|
||||||
|
def find_image_by_id(
|
||||||
|
self, image_id: str | None, accept_missing_image: bool = False
|
||||||
|
) -> dict[str, t.Any] | None: ...
|
||||||
|
|
||||||
|
def find_image_by_id(
|
||||||
|
self, image_id: str | None, accept_missing_image: bool = False
|
||||||
|
) -> dict[str, t.Any] | None:
|
||||||
|
"""
|
||||||
|
Lookup an image (by ID) and return the inspection results.
|
||||||
|
"""
|
||||||
|
if not image_id:
|
||||||
|
return None
|
||||||
|
|
||||||
|
self.log(f"Find image {image_id} (by ID)")
|
||||||
|
rc, image, stderr = self.call_cli_json("image", "inspect", image_id)
|
||||||
|
if not image:
|
||||||
|
if not accept_missing_image:
|
||||||
|
self.fail(f"Error inspecting image ID {image_id} - {to_text(stderr)}")
|
||||||
|
self.log(f"Image {image_id} not found.")
|
||||||
|
return None
|
||||||
|
if rc != 0:
|
||||||
|
self.fail(f"Error inspecting image ID {image_id} - {to_text(stderr)}")
|
||||||
|
return image[0]
|
||||||
|
|
||||||
|
|
||||||
|
class AnsibleModuleDockerClient(AnsibleDockerClientBase):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
argument_spec: dict[str, t.Any] | None = None,
|
||||||
|
supports_check_mode: bool = False,
|
||||||
|
mutually_exclusive: Sequence[Sequence[str]] | None = None,
|
||||||
|
required_together: Sequence[Sequence[str]] | None = None,
|
||||||
|
required_if: (
|
||||||
|
Sequence[
|
||||||
|
tuple[str, t.Any, Sequence[str]]
|
||||||
|
| tuple[str, t.Any, Sequence[str], bool]
|
||||||
|
]
|
||||||
|
| None
|
||||||
|
) = None,
|
||||||
|
required_one_of: Sequence[Sequence[str]] | None = None,
|
||||||
|
required_by: Mapping[str, Sequence[str]] | None = None,
|
||||||
|
min_docker_api_version: str | None = None,
|
||||||
|
fail_results: dict[str, t.Any] | None = None,
|
||||||
|
needs_api_version: bool = True,
|
||||||
|
) -> None:
|
||||||
|
# Modules can put information in here which will always be returned
|
||||||
|
# in case client.fail() is called.
|
||||||
|
self.fail_results = fail_results or {}
|
||||||
|
|
||||||
|
merged_arg_spec = {}
|
||||||
|
merged_arg_spec.update(DOCKER_COMMON_ARGS)
|
||||||
|
if argument_spec:
|
||||||
|
merged_arg_spec.update(argument_spec)
|
||||||
|
self.arg_spec = merged_arg_spec
|
||||||
|
|
||||||
|
mutually_exclusive_params: list[Sequence[str]] = [
|
||||||
|
("docker_host", "cli_context")
|
||||||
|
]
|
||||||
|
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
|
||||||
|
if mutually_exclusive:
|
||||||
|
mutually_exclusive_params += mutually_exclusive
|
||||||
|
|
||||||
|
required_together_params: list[Sequence[str]] = []
|
||||||
|
required_together_params += DOCKER_REQUIRED_TOGETHER
|
||||||
|
if required_together:
|
||||||
|
required_together_params += required_together
|
||||||
|
|
||||||
|
self.module = AnsibleModule(
|
||||||
|
argument_spec=merged_arg_spec,
|
||||||
|
supports_check_mode=supports_check_mode,
|
||||||
|
mutually_exclusive=mutually_exclusive_params,
|
||||||
|
required_together=required_together_params,
|
||||||
|
required_if=required_if,
|
||||||
|
required_one_of=required_one_of,
|
||||||
|
required_by=required_by or {},
|
||||||
|
)
|
||||||
|
|
||||||
|
self.debug = False # self.module.params['debug']
|
||||||
|
self.check_mode = self.module.check_mode
|
||||||
|
self.diff = self.module._diff
|
||||||
|
|
||||||
|
common_args = dict((k, self.module.params[k]) for k in DOCKER_COMMON_ARGS)
|
||||||
|
super().__init__(
|
||||||
|
common_args,
|
||||||
|
min_docker_api_version=min_docker_api_version,
|
||||||
|
needs_api_version=needs_api_version,
|
||||||
|
)
|
||||||
|
|
||||||
|
def call_cli(
|
||||||
|
self,
|
||||||
|
*args: str,
|
||||||
|
check_rc: bool = False,
|
||||||
|
data: bytes | None = None,
|
||||||
|
cwd: str | None = None,
|
||||||
|
environ_update: dict[str, str] | None = None,
|
||||||
|
) -> tuple[int, bytes, bytes]:
|
||||||
|
environment = self._environment.copy()
|
||||||
|
if environ_update:
|
||||||
|
environment.update(environ_update)
|
||||||
|
rc, stdout, stderr = self.module.run_command(
|
||||||
|
self._compose_cmd(args),
|
||||||
|
binary_data=True,
|
||||||
|
check_rc=check_rc,
|
||||||
|
cwd=cwd,
|
||||||
|
data=data,
|
||||||
|
encoding=None,
|
||||||
|
environ_update=environment,
|
||||||
|
expand_user_and_vars=False,
|
||||||
|
ignore_invalid_cwd=False,
|
||||||
|
)
|
||||||
|
return rc, stdout, stderr
|
||||||
|
|
||||||
|
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
|
||||||
|
self.fail_results.update(kwargs)
|
||||||
|
self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
|
||||||
|
|
||||||
|
def warn(self, msg: str) -> None:
|
||||||
|
self.module.warn(msg)
|
||||||
|
|
||||||
|
def deprecate(
|
||||||
|
self,
|
||||||
|
msg: str,
|
||||||
|
version: str | None = None,
|
||||||
|
date: str | None = None,
|
||||||
|
collection_name: str | None = None,
|
||||||
|
) -> None:
|
||||||
|
self.module.deprecate(
|
||||||
|
msg, version=version, date=date, collection_name=collection_name
|
||||||
|
)
|
||||||
File diff suppressed because it is too large
Load diff
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue