Compare commits
522 Commits
0.0.26
...
cdc09f9c46
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cdc09f9c46 | ||
|
|
65fac167cf | ||
|
|
6c67acf5fe | ||
|
|
ab69c29761 | ||
|
|
5f4ae05340 | ||
|
|
c48b884d16 | ||
|
|
fe77bce544 | ||
|
|
26640df164 | ||
|
|
debd4ee653 | ||
|
|
63269fe343 | ||
|
|
f588703474 | ||
|
|
52fbc31da7 | ||
|
|
154f900d2a | ||
|
|
bbd4f1dea1 | ||
|
|
22120649e5 | ||
|
|
a840dd43dd | ||
|
|
83743df3e4 | ||
|
|
15b2ff68cc | ||
|
|
17544e881c | ||
|
|
6090517830 | ||
|
|
6a7b3d5fa9 | ||
|
|
25804c71df | ||
|
|
a54553c858 | ||
|
|
ff1581be87 | ||
|
|
ec48a6944c | ||
|
|
df9849319d | ||
|
|
045aa8820c | ||
|
|
a66cbc3098 | ||
|
|
9833cb8df3 | ||
|
|
a73dc19ce9 | ||
|
|
bcf57f8581 | ||
|
|
611cdb3193 | ||
|
|
1b39a780e1 | ||
|
|
d59ae2e007 | ||
|
|
b9363a3463 | ||
|
|
a5f89d8d7b | ||
|
|
790b7199ca | ||
|
|
1e2db5b8c5 | ||
|
|
6041c5dc22 | ||
|
|
a668cd847e | ||
|
|
474df66ca0 | ||
|
|
2022a31820 | ||
|
|
71756df815 | ||
|
|
f808c1ea0c | ||
|
|
6c206b8010 | ||
|
|
fb8958f192 | ||
|
|
a070722937 | ||
|
|
b02363ad0d | ||
|
|
bed7e6be79 | ||
|
|
0031acbcbc | ||
|
|
4e523231d6 | ||
|
|
05d72b339d | ||
|
|
d2cda4ca78 | ||
|
|
2f860be5fe | ||
|
|
5692837175 | ||
|
|
04ff2a8c5c | ||
|
|
166b463fa9 | ||
|
|
2e858ff447 | ||
|
|
6fac46da7c | ||
|
|
2b820da311 | ||
|
|
86c6ee8dee | ||
|
|
0a76910902 | ||
|
|
138ff8a895 | ||
|
|
8edb8e2a7b | ||
|
|
bdf5209f68 | ||
|
|
299b49bb17 | ||
|
|
71e4282d3f | ||
|
|
984aa0f5fc | ||
|
|
737d1c9004 | ||
|
|
d113fcb6d7 | ||
|
|
73e641120f | ||
|
|
f7e446ef6f | ||
|
|
21917864db | ||
|
|
c49d469dcd | ||
|
|
0a79daf277 | ||
|
|
a4ed647d02 | ||
|
|
79c199975d | ||
|
|
50b4a2a4f7 | ||
|
|
f778008929 | ||
|
|
54380a2a1f | ||
|
|
a5fc1256e2 | ||
|
|
0b2236962c | ||
|
|
ee7ad7cbbe | ||
|
|
7ba4c4960b | ||
|
|
777ea6115b | ||
|
|
582cea31b0 | ||
|
|
c107240098 | ||
|
|
6265a330bf | ||
|
|
cfcb35456f | ||
|
|
1ef7c370e7 | ||
|
|
f22ec07ed3 | ||
|
|
f594799a7f | ||
|
|
1ccec72a7c | ||
|
|
55eeb50fb4 | ||
|
|
2b7e9c3200 | ||
|
|
25263fe9ea | ||
|
|
1df28c1d00 | ||
|
|
32e91dc7de | ||
|
|
c2c6cb5b6a | ||
|
|
58461323b9 | ||
|
|
cdc6743d83 | ||
|
|
86dfa5844a | ||
|
|
5d44cd28db | ||
|
|
4f977dbe57 | ||
|
|
573bd283cd | ||
|
|
6ac72974eb | ||
|
|
4189a1299a | ||
|
|
dcd4f0709f | ||
|
|
75bafa4fd3 | ||
|
|
e5ec13f592 | ||
|
|
1a0fcfb829 | ||
|
|
ba86665cbb | ||
|
|
301ce03061 | ||
|
|
650e8392c5 | ||
|
|
d7534b2b3b | ||
|
|
3b88ad3794 | ||
|
|
499b8f2732 | ||
|
|
ac3a131a9f | ||
|
|
a72492f042 | ||
|
|
c51a55e98a | ||
|
|
892fd1e691 | ||
|
|
23aa18d363 | ||
|
|
d14930ef28 | ||
|
|
81063ae6df | ||
|
|
f42daf4509 | ||
|
|
1caf6fb3bf | ||
|
|
16ade43caf | ||
|
|
99d8846934 | ||
|
|
2d78dc9836 | ||
|
|
b85751e07e | ||
|
|
0fd6970c0a | ||
|
|
936cf7dd9d | ||
|
|
fd5bc63293 | ||
|
|
be4cb6658e | ||
|
|
efff84bbd4 | ||
|
|
74f36c5d73 | ||
|
|
b868a60f69 | ||
|
|
f73127ec23 | ||
|
|
654e958d3f | ||
|
|
1f54253f95 | ||
|
|
2948644536 | ||
|
|
d2403d2f0c | ||
|
|
876834ff29 | ||
|
|
861ba50f9c | ||
|
|
43492a8cfa | ||
|
|
287504cfa8 | ||
|
|
0847b27f6a | ||
|
|
aa4320dd5f | ||
|
|
7c5adb1fec | ||
|
|
20e1c461b8 | ||
|
|
e9d485b7f6 | ||
|
|
e86d66b906 | ||
|
|
9a63fadfbd | ||
|
|
7afa372765 | ||
|
|
fddebb2474 | ||
|
|
ad0dd44130 | ||
|
|
2ee403bab9 | ||
|
|
0e34f13cce | ||
|
|
f2a6a6769f | ||
|
|
4903995052 | ||
|
|
774d408e13 | ||
|
|
ae1d9b94d4 | ||
|
|
267bcc144b | ||
|
|
fd40c3b64f | ||
|
|
e2b6da01d8 | ||
|
|
dad15f6adc | ||
|
|
6ec707aea7 | ||
|
|
807c4b3430 | ||
|
|
6b10477ebc | ||
|
|
412e480b44 | ||
|
|
cc2a235fbb | ||
|
|
7c77efec1d | ||
|
|
4b1548488d | ||
|
|
47aecd25c3 | ||
|
|
fb6eae184a | ||
|
|
16d3e3063c | ||
|
|
37bb67dffd | ||
|
|
9c9f2f558a | ||
|
|
1a81e57410 | ||
|
|
7096293885 | ||
|
|
1536a94026 | ||
|
|
1bb1e056c4 | ||
|
|
e71b11f843 | ||
|
|
b4769eefd9 | ||
|
|
d4a24d4c9d | ||
|
|
c5d7fdb0a3 | ||
|
|
ae68dd79cb | ||
|
|
de970ee557 | ||
|
|
60a3d7c65e | ||
|
|
9c1c587f7b | ||
|
|
af04d364d0 | ||
|
|
748f05732a | ||
|
|
148bd50e9f | ||
|
|
6c1c08ceda | ||
|
|
045b29291e | ||
|
|
955112f64f | ||
|
|
7f341cee84 | ||
|
|
f71e623931 | ||
|
|
8ff187b4e9 | ||
|
|
30aca74089 | ||
|
|
3dfca97f86 | ||
|
|
0d4c6e272d | ||
|
|
fce250b8af | ||
|
|
f6a8404f9f | ||
|
|
c50a578426 | ||
|
|
7e8ced534f | ||
|
|
f2d9219218 | ||
|
|
f269096eb9 | ||
|
|
5665259784 | ||
|
|
02fca6409a | ||
|
|
a7a5237b07 | ||
|
|
7ad54fe3e8 | ||
|
|
1a35ab61ca | ||
|
|
59c33a354c | ||
|
|
c853e24ded | ||
|
|
692db084e4 | ||
|
|
1edff3b3f5 | ||
|
|
b173d9f8ef | ||
|
|
a697add8d0 | ||
|
|
c83e2cb877 | ||
|
|
642c6b952f | ||
|
|
f313380480 | ||
|
|
f8a4751290 | ||
|
|
3ca1aa9cb1 | ||
|
|
37308b950b | ||
|
|
05306e3729 | ||
|
|
a6057319a9 | ||
|
|
87d8533bd0 | ||
|
|
dce83efa96 | ||
|
|
1cb9648b08 | ||
|
|
17b859d0d7 | ||
|
|
8d34910b9b | ||
|
|
5667fec410 | ||
|
|
46035fee83 | ||
|
|
219d25b330 | ||
|
|
181aab9c81 | ||
|
|
aabcaa7c8c | ||
|
|
09f3b8c05f | ||
|
|
f5e2fde20d | ||
|
|
50bd6f5811 | ||
|
|
37e5a37045 | ||
|
|
8544a66257 | ||
|
|
a50765393d | ||
|
|
6bae5fc561 | ||
|
|
f87c550be1 | ||
|
|
aea894a706 | ||
|
|
a549a30f71 | ||
|
|
2899bc234a | ||
|
|
cf7e73bbf6 | ||
|
|
c456a5ab97 | ||
|
|
b5ae96e0d4 | ||
|
|
f792facde9 | ||
|
|
a64e3ee179 | ||
|
|
c5257dd64b | ||
|
|
f74bc969ca | ||
|
|
63d7b8e51e | ||
|
|
d56896cb0d | ||
|
|
d579787bcd | ||
|
|
37fffce9f5 | ||
|
|
d7ecc72b41 | ||
|
|
f754bbbf90 | ||
|
|
bb171ced86 | ||
|
|
c25a2a257b | ||
|
|
1f90de31e7 | ||
|
|
b5384d1278 | ||
|
|
d80ec4aaa7 | ||
|
|
5d63376087 | ||
|
|
4fd6ced287 | ||
|
|
82489c3fe0 | ||
|
|
a064a87ecd | ||
|
|
f51a1200d1 | ||
|
|
ecd5a0f304 | ||
|
|
096ef4cc66 | ||
|
|
7da98ef349 | ||
|
|
8cd7fa27e2 | ||
|
|
134e2f1cd9 | ||
|
|
042ae02428 | ||
|
|
1e33c1fdae | ||
|
|
bdc46648ff | ||
|
|
ab9c1d3d96 | ||
|
|
0fb546ad61 | ||
|
|
7439d532c7 | ||
|
|
de9c0adcc0 | ||
|
|
e40819d617 | ||
|
|
7331d3a7ee | ||
|
|
95f353c6a4 | ||
|
|
5044ef4e8a | ||
|
|
3c61d088ab | ||
|
|
315a493565 | ||
|
|
6268b61a7d | ||
|
|
3f202c6a7a | ||
|
|
d46c43df5a | ||
|
|
1538befc93 | ||
|
|
1af334f2ce | ||
|
|
d30ea571f1 | ||
|
|
d30ff6cfae | ||
|
|
1d3f2dbe3c | ||
|
|
ca6e7ad432 | ||
|
|
f92afe9ae4 | ||
|
|
483cc22c32 | ||
|
|
2f3bd72491 | ||
|
|
6499fcb1e0 | ||
|
|
12a0600d38 | ||
|
|
cace2bacb8 | ||
|
|
05d440114a | ||
|
|
b392285e1d | ||
|
|
a713c08585 | ||
|
|
8e8e831e73 | ||
|
|
86b95b2c0b | ||
|
|
4a578ca40c | ||
|
|
a60148f370 | ||
|
|
00366de67b | ||
|
|
a08ba0b0e9 | ||
|
|
81b102ed7f | ||
|
|
c8f911ec5d | ||
|
|
7694da4241 | ||
|
|
a0d500a602 | ||
|
|
553172992f | ||
|
|
e6d614f4dd | ||
|
|
3107dfbd08 | ||
|
|
802823dcb0 | ||
|
|
5858c8b501 | ||
|
|
2561dba8f5 | ||
|
|
f5806e0617 | ||
|
|
e9dbb7b86c | ||
|
|
28f5b8331a | ||
|
|
5ff4f0456a | ||
|
|
82c614f2d9 | ||
|
|
50265d2303 | ||
|
|
ecee84aa28 | ||
|
|
2e146e8c8e | ||
|
|
097efdd66a | ||
|
|
5764c977f1 | ||
|
|
4d30772b3b | ||
|
|
8472b1ebf2 | ||
|
|
e1070d3e13 | ||
|
|
98d7f294eb | ||
|
|
517fc1e05b | ||
|
|
c2f75d64a6 | ||
|
|
380004b1dc | ||
|
|
28a443d11d | ||
|
|
a600e11100 | ||
|
|
7b45f94bb0 | ||
|
|
acdd6476f2 | ||
|
|
018d3efc90 | ||
|
|
b40d10a366 | ||
|
|
a88034ab06 | ||
|
|
907d2ad1a1 | ||
|
|
3bd6986fde | ||
|
|
43bd847bad | ||
|
|
0c0583adc8 | ||
|
|
c642b5eeae | ||
|
|
69e84fbbed | ||
|
|
f8b06f32ec | ||
|
|
59a20f53eb | ||
|
|
83fd8b7e54 | ||
|
|
098ab20ec9 | ||
|
|
a2ce9e890d | ||
|
|
be7a5399e3 | ||
|
|
3fb492f6aa | ||
|
|
e4f062b006 | ||
|
|
422719c345 | ||
|
|
71a1efde99 | ||
|
|
ed84c5460c | ||
|
|
0222a682fc | ||
|
|
1cd4af5838 | ||
|
|
d1aaf6e82b | ||
|
|
52a71f9515 | ||
|
|
3c665174cc | ||
|
|
93eb8b2b76 | ||
|
|
1692235498 | ||
|
|
a6bcd4b92b | ||
|
|
d065cddf5e | ||
|
|
20d4809e8e | ||
|
|
b074386c26 | ||
|
|
b140b2dfeb | ||
|
|
8e3d959500 | ||
|
|
8c886bbe7c | ||
|
|
7d204dfb74 | ||
|
|
583f90dc7b | ||
|
|
85e1d6c0e8 | ||
|
|
2c967e3897 | ||
|
|
202a8e1fba | ||
|
|
e6085d77ff | ||
|
|
10f82c6566 | ||
|
|
3d11b2caaf | ||
|
|
f8037a1f99 | ||
|
|
067eef9007 | ||
|
|
e45d9be065 | ||
|
|
d24149518c | ||
|
|
d403f18b2a | ||
|
|
1f12150d8f | ||
|
|
d3a7cefcc6 | ||
|
|
a8c8f09aa3 | ||
|
|
b03fdeccae | ||
|
|
6c12e8a29b | ||
|
|
310452542b | ||
|
|
b514022713 | ||
|
|
c937280664 | ||
|
|
898b58e3e7 | ||
|
|
74119a0a53 | ||
|
|
d6914d3437 | ||
|
|
fdfa03685c | ||
|
|
149a85fcde | ||
|
|
65f26769ae | ||
|
|
6397c8f930 | ||
|
|
761030b5b8 | ||
|
|
9a752da13d | ||
|
|
13fc711657 | ||
|
|
6419f931ee | ||
|
|
562138cb74 | ||
|
|
8aac770bcb | ||
|
|
80e8c9cadc | ||
|
|
87b3dda867 | ||
|
|
b9e093cd6b | ||
|
|
06a023ca66 | ||
|
|
ccb4c6244d | ||
|
|
4489e5ce6e | ||
|
|
8df82f0301 | ||
|
|
57b4a7efce | ||
|
|
fd508fbc0d | ||
|
|
a4f368fc9f | ||
|
|
e7b57a32fd | ||
|
|
06cc63fcb6 | ||
|
|
e34212a10b | ||
|
|
5f6e07b5e8 | ||
|
|
1465c5cdc9 | ||
|
|
29eebd0d07 | ||
|
|
5bbc06d8bc | ||
|
|
9a5f6a5bd3 | ||
|
|
2e774215e4 | ||
|
|
1327752725 | ||
|
|
118f266211 | ||
|
|
87a2dfc8f9 | ||
|
|
b88ac4603f | ||
|
|
28e81bcca3 | ||
|
|
3d0660559e | ||
|
|
48dc9ad099 | ||
|
|
fd3a2a1f0f | ||
|
|
c6e9175e3f | ||
|
|
1a39472734 | ||
|
|
bfa88e9b1c | ||
|
|
a0972c0752 | ||
|
|
8dc0d44513 | ||
|
|
8594b6e2a9 | ||
|
|
82cac572ca | ||
|
|
da4f69cd87 | ||
|
|
e6cb776eb6 | ||
|
|
7557114b4e | ||
|
|
001e1a5da0 | ||
|
|
6f3c3c318b | ||
|
|
654e376be7 | ||
|
|
211121cdca | ||
|
|
f2d4cac92d | ||
|
|
c5dc9c7650 | ||
|
|
7596f5b572 | ||
|
|
8e9c3b6c1e | ||
|
|
a93c98402a | ||
|
|
b04ee4ba22 | ||
|
|
65b104ea55 | ||
|
|
562eb643fc | ||
|
|
80178422db | ||
|
|
e94f39bf2c | ||
|
|
6c1a33066a | ||
|
|
beca0c3ae6 | ||
|
|
7517c83953 | ||
|
|
0354082f89 | ||
|
|
4abcd9cf01 | ||
|
|
c974820c56 | ||
|
|
88670ae7a1 | ||
|
|
d0ed004b24 | ||
|
|
6de7d8b254 | ||
|
|
bfb5d983c1 | ||
|
|
0a183dfff9 | ||
|
|
8b54306eb9 | ||
|
|
fd86b35ce3 | ||
|
|
d9f8dd53c1 | ||
|
|
ccb0e55d5a | ||
|
|
74f747e80b | ||
|
|
aa03b5ce2f | ||
|
|
e747e12002 | ||
|
|
d1753c592a | ||
|
|
7a35bef7ac | ||
|
|
e10e92596f | ||
|
|
28253c4bd2 | ||
|
|
f2976deb02 | ||
|
|
14577a0c46 | ||
|
|
4e18c9c67a | ||
|
|
d326917432 | ||
|
|
ad4eb86262 | ||
|
|
5c73e7c582 | ||
|
|
dc33d9aab7 | ||
|
|
cdc2439f89 | ||
|
|
318ee0d002 | ||
|
|
653299a88f | ||
|
|
6066cbf075 | ||
|
|
2b3a41fefa | ||
|
|
5ca9f7fa38 | ||
|
|
201cf091d5 | ||
|
|
09e53bfd3f | ||
|
|
3c661ab674 | ||
|
|
415e22abe2 | ||
|
|
3b754d5324 | ||
|
|
7a568627e9 | ||
|
|
328360eea8 | ||
|
|
7ae3ba4483 | ||
|
|
351a61c0cd | ||
|
|
c12452b3ce | ||
|
|
0e92d190cc | ||
|
|
453cd4409e | ||
|
|
51cf1e2921 | ||
|
|
dc45c90ccd | ||
|
|
6782d65577 | ||
|
|
68ee163eb1 | ||
|
|
bc4b5d96b0 | ||
|
|
909dbb531d | ||
|
|
a2654bdc69 | ||
|
|
edcb04d1a9 | ||
|
|
29ec867ac7 | ||
|
|
22873b3c3f | ||
|
|
ede5db18d7 | ||
|
|
964b1dc58a |
4
.github/workflows/test.yaml
vendored
4
.github/workflows/test.yaml
vendored
@@ -27,9 +27,9 @@ jobs:
|
||||
# macos tests are currently failing in CI
|
||||
#- macos-latest
|
||||
golang_version:
|
||||
# TODO: add 1.21.x and tip
|
||||
# TODO: add 1.24.x and tip
|
||||
# minimum required and latest published go_version
|
||||
- "1.20"
|
||||
- "1.23"
|
||||
test_block:
|
||||
- basic
|
||||
- shell
|
||||
|
||||
16
.gitignore
vendored
16
.gitignore
vendored
@@ -5,16 +5,22 @@
|
||||
.envrc
|
||||
old/
|
||||
tmp/
|
||||
/vendor/
|
||||
*WIP
|
||||
*_stringer.go
|
||||
mgmt
|
||||
mgmt.static
|
||||
/mgmt
|
||||
/mgmt.static
|
||||
# crossbuild artifacts
|
||||
build/mgmt-*
|
||||
/build/mgmt-*
|
||||
mgmt.iml
|
||||
rpmbuild/
|
||||
releases/
|
||||
/rpmbuild/
|
||||
/releases/
|
||||
/repository/
|
||||
/pprof/
|
||||
/sites/
|
||||
# vim swap files
|
||||
.*.sw[op]
|
||||
# prevent `echo foo 2>1` typo errors by making this file read-only
|
||||
1
|
||||
# allow users to keep some junk files around
|
||||
*.wip
|
||||
|
||||
3
.lycheeignore
Normal file
3
.lycheeignore
Normal file
@@ -0,0 +1,3 @@
|
||||
# list URLs that should be excluded for lychee link checher
|
||||
https://roidelapluie.be
|
||||
https://github.com/purpleidea/mgmt/commit
|
||||
63
.travis.yml
63
.travis.yml
@@ -1,63 +0,0 @@
|
||||
language: go
|
||||
os:
|
||||
- linux
|
||||
go_import_path: github.com/purpleidea/mgmt
|
||||
sudo: true
|
||||
dist: xenial
|
||||
# travis requires that you update manually, and provides this key to trigger it
|
||||
apt:
|
||||
update: true
|
||||
before_install:
|
||||
# print some debug information to help catch the constant travis regressions
|
||||
- if [ -e /etc/apt/sources.list.d/ ]; then sudo ls -l /etc/apt/sources.list.d/; fi
|
||||
# workaround broken travis NO_PUBKEY errors
|
||||
- if [ -e /etc/apt/sources.list.d/rabbitmq_rabbitmq-server.list ]; then sudo rm -f /etc/apt/sources.list.d/rabbitmq_rabbitmq-server.list; fi
|
||||
- if [ -e /etc/apt/sources.list.d/github_git-lfs.list ]; then sudo rm -f /etc/apt/sources.list.d/github_git-lfs.list; fi
|
||||
# as per a number of comments online, this might mitigate some flaky fails...
|
||||
- if [[ "$TRAVIS_OS_NAME" != "osx" ]]; then sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 0C49F3730359A14518585931BC711F9BA15703C6; fi
|
||||
# apt update tends to be flaky in travis, retry up to 3 times on failure
|
||||
# https://docs.travis-ci.com/user/common-build-problems/#travis_retry
|
||||
- if [[ "$TRAVIS_OS_NAME" != "osx" ]]; then travis_retry travis_retry sudo apt update; fi
|
||||
- git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*"
|
||||
- git fetch --unshallow
|
||||
install: 'make deps'
|
||||
matrix:
|
||||
fast_finish: false
|
||||
allow_failures:
|
||||
- go: 1.21.x
|
||||
- go: tip
|
||||
- os: osx
|
||||
# include only one build for osx for a quicker build as the nr. of these runners are sparse
|
||||
include:
|
||||
- name: "basic tests"
|
||||
go: 1.20.x
|
||||
env: TEST_BLOCK=basic
|
||||
- name: "shell tests"
|
||||
go: 1.20.x
|
||||
env: TEST_BLOCK=shell
|
||||
- name: "race tests"
|
||||
go: 1.20.x
|
||||
env: TEST_BLOCK=race
|
||||
- go: 1.21.x
|
||||
- go: tip
|
||||
- os: osx
|
||||
script: 'TEST_BLOCK="$TEST_BLOCK" make test'
|
||||
|
||||
# the "secure" channel value is the result of running: ./misc/travis-encrypt.sh
|
||||
# with a value of: irc.freenode.net#mgmtconfig to eliminate noise from forks...
|
||||
notifications:
|
||||
irc:
|
||||
#channels:
|
||||
# - secure: htcuWAczm3C1zKC9vUfdRzhIXM1vtF+q0cLlQFXK1IQQlk693/pM30Mmf2L/9V2DVDeps+GyLdip0ARXD1DZEJV0lK+Ca1qbHdFP1r4Xv6l5+jaDb5Y88YU5LI8K758QShiZJojuQ1aO2j8xmmt9V0/5y5QwlpPeHbKYBOFPBX3HvlT9DhvwZNKGhBb4qJOEaPVOwq9IkN3DyQ456MHcJ3q3vF9Lb440uTuLsJNof2AbYZH8ZIHCSG2N8tBj2qhJOpWQboYtQJzE2pRaGkGBL4kYcHZSZMXX8sl4cBM1vx/IRUkvBxJUpLJz2gn/eRI+/gr59juZE2K0+FOLlx9dLnX626Y9xSViopBI6JsIoHJDqNC7aGaF2qaYulGYN65VNKVqmghjgt6JLmmiKeH10hYrJMMvt2rms8l4+5iwmCwXvhH/WU9edzk2p5wqERMnostJFEJib0zI3yzLoF0sdJs+veKtagzfayY2d2l7hlmt951IpqqVWldVgWUcQKVvi8gmRarbwFlK+5D7BEnkUDcLNly/cqf7BgEeX6YfF+FiR4pgfOhYvGCD+2q91NgWQXHBCxbyN0be1TVdkXD94f0Lkn94VyEJJ+PkPlG+rPgFwGcjqN4oEGkJeJmES2If05q2Ms1dJLwYQDL3+Py4lNMSdSWj24TzlFVhtwHepuw=
|
||||
template:
|
||||
- "%{repository} (%{commit}: %{author}): %{message}"
|
||||
- "More info : %{build_url}"
|
||||
on_success: always
|
||||
on_failure: always
|
||||
use_notice: false
|
||||
skip_join: false
|
||||
email:
|
||||
recipients:
|
||||
- secure: qNkgP6QLl6VXpFQIxas2wggxvIiOmm1/hGRXm4BXsSFzHsJPvMamA3E1HEC7H+luiWTny1jtGSGgTJPV9CX1LtQV0g0S4ThaAvWuKvk3rXO8IVd++iA/Lh1s1H6JdKM0dJtLqFICawjeci4tOQzSvrM2eCBWqT0UYsrQsGHB6AF31GNAH0Acqd5cYeL+ZpbCN+hQEznAZQ7546N25TwqieI8Lg7nisA+lwYYwsaC2+f5RIeyvvKjQv3wzEdBAQ9CI9WQiTOUBnUnyYxMrdomQ/XGF66QnZy9vq5nEP83IFtuhPvSamL7ceT+yJW0jDyBi8sYEV7On7eXzjyHbiYpF4YHcJrFnf5RyV4kQGd6/SC8iZwK4Is4eyeAjDFTC+JafLajw9R9x9bK43BwlRAWOZxjFKe0cU/BVAjmlz87vHgUho2P41+0a5XfajfU6VhA5QFPK6rNH7W1CnA7D/0LmS0yaqJM1OCrm6LfoZEMhe0DxTJ9uWJbr0x1sYao6q8H4xYk+fyRgoBAr2TxYU7kXx8ThiRdzuQ8izdbojlzTYLe8liZMIsjL0axLsLK7YBWrjJUcDFDjR/DqmVxPrvbVFbCi9ChmBw0WmbJvDY0FV8T8dO8wCjg9JEmprAmWPyq0g/F87LFK4tAZqQFJGjP1qwsR9jdwdNTKeCdY656f/Y=
|
||||
on_failure: change
|
||||
on_success: change
|
||||
@@ -1,5 +1,5 @@
|
||||
Mgmt
|
||||
Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
Copyright (C) James Shubin and the project contributors
|
||||
Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
|
||||
33
Makefile
33
Makefile
@@ -1,5 +1,5 @@
|
||||
# Mgmt
|
||||
# Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
# Copyright (C) James Shubin and the project contributors
|
||||
# Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
@@ -27,7 +27,7 @@
|
||||
# additional permission if he deems it necessary to achieve the goals of this
|
||||
# additional permission.
|
||||
|
||||
SHELL = /usr/bin/env bash
|
||||
SHELL = bash
|
||||
.PHONY: all art cleanart version program lang path deps run race generate build build-debug crossbuild clean test gofmt yamlfmt format docs
|
||||
.PHONY: rpmbuild mkdirs rpm srpm spec tar upload upload-sources upload-srpms upload-rpms upload-releases copr tag
|
||||
.PHONY: mkosi mkosi_fedora-latest mkosi_fedora-older mkosi_stream-latest mkosi_debian-stable mkosi_ubuntu-latest mkosi_archlinux
|
||||
@@ -38,6 +38,7 @@ SHELL = /usr/bin/env bash
|
||||
# a large amount of output from this `find`, can cause `make` to be much slower!
|
||||
GO_FILES := $(shell find * -name '*.go' -not -path 'old/*' -not -path 'tmp/*')
|
||||
MCL_FILES := $(shell find lang/ -name '*.mcl' -not -path 'old/*' -not -path 'tmp/*')
|
||||
MISC_FILES := $(shell find engine/resources/http_server_ui/)
|
||||
|
||||
SVERSION := $(or $(SVERSION),$(shell git describe --match '[0-9]*\.[0-9]*\.[0-9]*' --tags --dirty --always))
|
||||
VERSION := $(or $(VERSION),$(shell git describe --match '[0-9]*\.[0-9]*\.[0-9]*' --tags --abbrev=0))
|
||||
@@ -191,13 +192,6 @@ path: ## create working paths
|
||||
deps: ## install system and golang dependencies
|
||||
./misc/make-deps.sh
|
||||
|
||||
run: ## run mgmt
|
||||
find . -maxdepth 1 -type f -name '*.go' -not -name '*_test.go' | xargs go run -ldflags "-X main.program=$(PROGRAM) -X main.version=$(SVERSION)"
|
||||
|
||||
# include race flag
|
||||
race:
|
||||
find . -maxdepth 1 -type f -name '*.go' -not -name '*_test.go' | xargs go run -race -ldflags "-X main.program=$(PROGRAM) -X main.version=$(SVERSION)"
|
||||
|
||||
generate:
|
||||
go generate
|
||||
|
||||
@@ -205,11 +199,15 @@ lang: ## generates the lexer/parser for the language frontend
|
||||
@# recursively run make in child dir named lang
|
||||
@$(MAKE) --quiet -C lang
|
||||
|
||||
resources: ## builds the resources dependencies required for the engine backend
|
||||
@# recursively run make in child dir named engine/resources
|
||||
@$(MAKE) --quiet -C engine/resources
|
||||
|
||||
# build a `mgmt` binary for current host os/arch
|
||||
$(PROGRAM): build/mgmt-${GOHOSTOS}-${GOHOSTARCH} ## build an mgmt binary for current host os/arch
|
||||
cp -a $< $@
|
||||
|
||||
$(PROGRAM).static: $(GO_FILES) $(MCL_FILES) go.mod go.sum
|
||||
$(PROGRAM).static: $(GO_FILES) $(MCL_FILES) $(MISC_FILES) go.mod go.sum
|
||||
@echo "Building: $(PROGRAM).static, version: $(SVERSION)..."
|
||||
go generate
|
||||
go build $(TRIMPATH) -a -installsuffix cgo -tags netgo -ldflags '-extldflags "-static" -X main.program=$(PROGRAM) -X main.version=$(SVERSION) -s -w' -o $(PROGRAM).static $(BUILD_FLAGS);
|
||||
@@ -220,13 +218,22 @@ build: $(PROGRAM)
|
||||
build-debug: LDFLAGS=
|
||||
build-debug: $(PROGRAM)
|
||||
|
||||
# if you're using the bad/dev branch, you might want this too!
|
||||
baddev: BUILD_FLAGS = -tags 'noaugeas novirt'
|
||||
baddev: $(PROGRAM)
|
||||
|
||||
# pattern rule target for (cross)building, mgmt-OS-ARCH will be expanded to the correct build
|
||||
# extract os and arch from target pattern
|
||||
GOOS=$(firstword $(subst -, ,$*))
|
||||
GOARCH=$(lastword $(subst -, ,$*))
|
||||
build/mgmt-%: $(GO_FILES) $(MCL_FILES) go.mod go.sum | lang funcgen
|
||||
build/mgmt-%: $(GO_FILES) $(MCL_FILES) $(MISC_FILES) go.mod go.sum | lang resources funcgen
|
||||
@# If you need to run `go mod tidy` then this can trigger.
|
||||
@if [ "$(PKGNAME)" = "" ]; then echo "\$$(PKGNAME) is empty, test with: go list ."; exit 42; fi
|
||||
@echo "Building: $(PROGRAM), os/arch: $*, version: $(SVERSION)..."
|
||||
time env GOOS=${GOOS} GOARCH=${GOARCH} go build $(TRIMPATH) -ldflags=$(PKGNAME)="-X main.program=$(PROGRAM) -X main.version=$(SVERSION) ${LDFLAGS}" -o $@ $(BUILD_FLAGS)
|
||||
@# XXX: leave race detector on by default for now. For production
|
||||
@# builds, we can consider turning it off for performance improvements.
|
||||
@# XXX: ./mgmt run --tmp-prefix lang something_fast.mcl > /tmp/race 2>&1 # search for "WARNING: DATA RACE"
|
||||
time env GOOS=${GOOS} GOARCH=${GOARCH} go build $(TRIMPATH) -race -ldflags=$(PKGNAME)="-X main.program=$(PROGRAM) -X main.version=$(SVERSION) ${LDFLAGS}" -o $@ $(BUILD_FLAGS)
|
||||
|
||||
# create a list of binary file names to use as make targets
|
||||
# to use this you might want to run something like:
|
||||
@@ -238,6 +245,7 @@ crossbuild: ${crossbuild_targets}
|
||||
clean: ## clean things up
|
||||
$(MAKE) --quiet -C test clean
|
||||
$(MAKE) --quiet -C lang clean
|
||||
$(MAKE) --quiet -C engine/resources clean
|
||||
$(MAKE) --quiet -C misc/mkosi clean
|
||||
rm -f lang/core/generated_funcs.go || true
|
||||
rm -f lang/core/generated_funcs_test.go || true
|
||||
@@ -641,5 +649,6 @@ funcgen: lang/core/generated_funcs.go
|
||||
lang/core/generated_funcs.go: lang/funcs/funcgen/*.go lang/core/funcgen.yaml lang/funcs/funcgen/templates/generated_funcs.go.tpl
|
||||
@echo "Generating: funcs..."
|
||||
@go run `find lang/funcs/funcgen/ -maxdepth 1 -type f -name '*.go' -not -name '*_test.go'` -templates=lang/funcs/funcgen/templates/generated_funcs.go.tpl >/dev/null
|
||||
@gofmt -s -w $@
|
||||
|
||||
# vim: ts=8
|
||||
|
||||
33
README.md
33
README.md
@@ -6,17 +6,22 @@
|
||||
[](https://github.com/purpleidea/mgmt/actions/)
|
||||
[](https://godocs.io/github.com/purpleidea/mgmt)
|
||||
[](https://matrix.to/#/#mgmtconfig:matrix.org)
|
||||
[](https://web.libera.chat/?channels=#mgmtconfig)
|
||||
[](https://www.patreon.com/purpleidea)
|
||||
[](https://liberapay.com/purpleidea/donate)
|
||||
|
||||
> [!TIP]
|
||||
> [Resource reference guide now available!](https://mgmtconfig.com/docs/resources/)
|
||||
|
||||
> [!TIP]
|
||||
> [Function reference guide now available!](https://mgmtconfig.com/docs/functions/)
|
||||
|
||||
## About:
|
||||
|
||||
`Mgmt` is a real-time automation tool. It is familiar to existing configuration
|
||||
management software, but is drastically more powerful as it can allow you to
|
||||
build real-time, closed-loop feedback systems, in a very safe way, and with a
|
||||
surprisingly small amout of our `mcl` code. For example, the following code will
|
||||
ensure that your file server is set to read-only when it's friday.
|
||||
surprisingly small amount of our `mcl` code. For example, the following code
|
||||
will ensure that your file server is set to read-only when it's friday.
|
||||
|
||||
```mcl
|
||||
import "datetime"
|
||||
@@ -67,7 +72,6 @@ Come join us in the `mgmt` community!
|
||||
| Medium | Link |
|
||||
|---|---|
|
||||
| Matrix | [#mgmtconfig](https://matrix.to/#/#mgmtconfig:matrix.org) on Matrix.org |
|
||||
| IRC | [#mgmtconfig](https://web.libera.chat/?channels=#mgmtconfig) on Libera.Chat |
|
||||
| Twitter | [@mgmtconfig](https://twitter.com/mgmtconfig) & [#mgmtconfig](https://twitter.com/hashtag/mgmtconfig) |
|
||||
| Mailing list | [looking for a new home, suggestions welcome](https://gitlab.freedesktop.org/freedesktop/freedesktop/-/issues/1082) |
|
||||
| Patreon | [purpleidea](https://www.patreon.com/purpleidea) on Patreon |
|
||||
@@ -79,9 +83,19 @@ the configuration management space, but has a fast, modern, distributed systems
|
||||
approach. The project contains an engine and a language.
|
||||
[Please have a look at an introductory video or blog post.](docs/on-the-web.md)
|
||||
|
||||
Mgmt is a fairly new project. It is usable today, but not yet feature complete.
|
||||
With your help you'll be able to influence our design and get us to 1.0 sooner!
|
||||
Interested users should read the [quick start guide](docs/quick-start-guide.md).
|
||||
Mgmt is over ten years old! It is very powerful today, and has a solid
|
||||
foundation and architecture which has been polished over the years. As with all
|
||||
software, there are bugs to fix and improvements to be made, but I expect
|
||||
they're easy to hack through and fix if you find any. Interested users should
|
||||
start with the [official website](https://mgmtconfig.com/docs/).
|
||||
|
||||
## Sponsors:
|
||||
|
||||
Mgmt is generously sponsored by:
|
||||
|
||||
[](https://m9rx.com/)
|
||||
|
||||
Please reach out if you'd like to sponsor!
|
||||
|
||||
## Documentation:
|
||||
|
||||
@@ -92,12 +106,17 @@ Please read, enjoy and help improve our documentation!
|
||||
| [quick start guide](docs/quick-start-guide.md) | for everyone |
|
||||
| [frequently asked questions](docs/faq.md) | for everyone |
|
||||
| [general documentation](docs/documentation.md) | for everyone |
|
||||
| [resource reference](https://mgmtconfig.com/docs/resources/) | for everyone |
|
||||
| [function reference](https://mgmtconfig.com/docs/functions/) | for everyone |
|
||||
| [language guide](docs/language-guide.md) | for everyone |
|
||||
| [function guide](docs/function-guide.md) | for mgmt developers |
|
||||
| [resource guide](docs/resource-guide.md) | for mgmt developers |
|
||||
| [style guide](docs/style-guide.md) | for mgmt developers |
|
||||
| [contributing guide](docs/contributing.md) | for mgmt contributors |
|
||||
| [service API guide](docs/service-guide.md) | for external developers |
|
||||
| [godoc API reference](https://godoc.org/github.com/purpleidea/mgmt) | for mgmt developers |
|
||||
| [prometheus guide](docs/prometheus.md) | for everyone |
|
||||
| [puppet guide](docs/puppet-guide.md) | for puppet sysadmins |
|
||||
| [development](docs/development.md) | for mgmt developers |
|
||||
| [videos](docs/on-the-web.md) | for everyone |
|
||||
| [blogs](docs/on-the-web.md) | for everyone |
|
||||
|
||||
BIN
art/m9rx.png
Normal file
BIN
art/m9rx.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 37 KiB |
26
cli/cli.go
26
cli/cli.go
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -119,6 +119,14 @@ type Args struct {
|
||||
|
||||
DeployCmd *DeployArgs `arg:"subcommand:deploy" help:"deploy code into a cluster"`
|
||||
|
||||
SetupCmd *SetupArgs `arg:"subcommand:setup" help:"setup some bootstrapping tasks"`
|
||||
|
||||
FirstbootCmd *FirstbootArgs `arg:"subcommand:firstboot" help:"run some tasks on first boot"`
|
||||
|
||||
DocsCmd *DocsGenerateArgs `arg:"subcommand:docs" help:"generate documentation"`
|
||||
|
||||
ToolsCmd *ToolsArgs `arg:"subcommand:tools" help:"collection of useful tools"`
|
||||
|
||||
// This never runs, it gets preempted in the real main() function.
|
||||
// XXX: Can we do it nicely with the new arg parser? can it ignore all args?
|
||||
EtcdCmd *EtcdArgs `arg:"subcommand:etcd" help:"run standalone etcd"`
|
||||
@@ -155,6 +163,22 @@ func (obj *Args) Run(ctx context.Context, data *cliUtil.Data) (bool, error) {
|
||||
return cmd.Run(ctx, data)
|
||||
}
|
||||
|
||||
if cmd := obj.SetupCmd; cmd != nil {
|
||||
return cmd.Run(ctx, data)
|
||||
}
|
||||
|
||||
if cmd := obj.FirstbootCmd; cmd != nil {
|
||||
return cmd.Run(ctx, data)
|
||||
}
|
||||
|
||||
if cmd := obj.DocsCmd; cmd != nil {
|
||||
return cmd.Run(ctx, data)
|
||||
}
|
||||
|
||||
if cmd := obj.ToolsCmd; cmd != nil {
|
||||
return cmd.Run(ctx, data)
|
||||
}
|
||||
|
||||
// NOTE: we could return true, fmt.Errorf("...") if more than one did
|
||||
return false, nil // nobody activated
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -36,9 +36,11 @@ import (
|
||||
"os/signal"
|
||||
|
||||
cliUtil "github.com/purpleidea/mgmt/cli/util"
|
||||
"github.com/purpleidea/mgmt/engine"
|
||||
"github.com/purpleidea/mgmt/etcd"
|
||||
"github.com/purpleidea/mgmt/etcd/client"
|
||||
"github.com/purpleidea/mgmt/etcd/deployer"
|
||||
etcdfs "github.com/purpleidea/mgmt/etcd/fs"
|
||||
etcdSSH "github.com/purpleidea/mgmt/etcd/ssh"
|
||||
"github.com/purpleidea/mgmt/gapi"
|
||||
"github.com/purpleidea/mgmt/lib"
|
||||
"github.com/purpleidea/mgmt/util"
|
||||
@@ -52,15 +54,33 @@ import (
|
||||
// particular one contains all the common flags for the `deploy` subcommand
|
||||
// which all frontends can use.
|
||||
type DeployArgs struct {
|
||||
Seeds []string `arg:"--seeds,env:MGMT_SEEDS" help:"default etc client endpoint"`
|
||||
// SSHURL can be specified if we want to transport the SSH client
|
||||
// connection over SSH. If this is specified, the second hop is made
|
||||
// with the Seeds values, but they connect from this destination. You
|
||||
// can specify this in the standard james@server:22 format. This will
|
||||
// use your ~/.ssh/ directory for public key authentication and
|
||||
// verifying the host key in the known_hosts file. This must already be
|
||||
// setup for things to work.
|
||||
SSHURL string `arg:"--ssh-url" help:"transport the etcd client connection over SSH to this server"`
|
||||
|
||||
// SSHHostKey is the key part (which is already base64 encoded) from a
|
||||
// known_hosts file, representing the host we're connecting to. If this
|
||||
// is specified, then it overrides looking for it in the URL.
|
||||
SSHHostKey string `arg:"--ssh-hostkey" help:"use this ssh known hosts key when connecting over SSH"`
|
||||
|
||||
Seeds []string `arg:"--seeds,separate,env:MGMT_SEEDS" help:"default etcd client endpoints"`
|
||||
Noop bool `arg:"--noop" help:"globally force all resources into no-op mode"`
|
||||
Sema int `arg:"--sema" default:"-1" help:"globally add a semaphore to all resources with this lock count"`
|
||||
NoGit bool `arg:"--no-git" help:"don't look at git commit id for safe deploys"`
|
||||
Force bool `arg:"--force" help:"force a new deploy, even if the safety chain would break"`
|
||||
|
||||
DeployEmpty *cliUtil.EmptyArgs `arg:"subcommand:empty" help:"deploy empty payload"`
|
||||
DeployLang *cliUtil.LangArgs `arg:"subcommand:lang" help:"deploy lang (mcl) payload"`
|
||||
DeployYaml *cliUtil.YamlArgs `arg:"subcommand:yaml" help:"deploy yaml graph payload"`
|
||||
NoAutoEdges bool `arg:"--no-autoedges" help:"skip the autoedges stage"`
|
||||
|
||||
DeployEmpty *cliUtil.EmptyArgs `arg:"subcommand:empty" help:"deploy empty payload"`
|
||||
DeployLang *cliUtil.LangArgs `arg:"subcommand:lang" help:"deploy lang (mcl) payload"`
|
||||
DeployYaml *cliUtil.YamlArgs `arg:"subcommand:yaml" help:"deploy yaml graph payload"`
|
||||
DeployPuppet *cliUtil.PuppetArgs `arg:"subcommand:puppet" help:"deploy puppet graph payload"`
|
||||
DeployLangPuppet *cliUtil.LangPuppetArgs `arg:"subcommand:langpuppet" help:"deploy langpuppet graph payload"`
|
||||
}
|
||||
|
||||
// Run executes the correct subcommand. It errors if there's ever an error. It
|
||||
@@ -87,6 +107,14 @@ func (obj *DeployArgs) Run(ctx context.Context, data *cliUtil.Data) (bool, error
|
||||
name = cliUtil.LookupSubcommand(obj, cmd) // "yaml"
|
||||
args = cmd
|
||||
}
|
||||
if cmd := obj.DeployPuppet; cmd != nil {
|
||||
name = cliUtil.LookupSubcommand(obj, cmd) // "puppet"
|
||||
args = cmd
|
||||
}
|
||||
if cmd := obj.DeployLangPuppet; cmd != nil {
|
||||
name = cliUtil.LookupSubcommand(obj, cmd) // "langpuppet"
|
||||
args = cmd
|
||||
}
|
||||
|
||||
// XXX: workaround https://github.com/alexflint/go-arg/issues/239
|
||||
gapiNames := gapi.Names() // list of registered names
|
||||
@@ -174,26 +202,53 @@ func (obj *DeployArgs) Run(ctx context.Context, data *cliUtil.Data) (bool, error
|
||||
}
|
||||
}()
|
||||
|
||||
simpleDeploy := &deployer.SimpleDeploy{
|
||||
Client: etcdClient,
|
||||
Debug: data.Flags.Debug,
|
||||
var world engine.World
|
||||
world = &etcd.World{ // XXX: What should some of these fields be?
|
||||
Client: etcdClient, // XXX: remove me when etcdfs below is done
|
||||
Seeds: obj.Seeds,
|
||||
NS: lib.NS,
|
||||
//MetadataPrefix: lib.MetadataPrefix,
|
||||
//StoragePrefix: lib.StoragePrefix,
|
||||
//StandaloneFs: ???.DeployFs, // used for static deploys
|
||||
//GetURI: func() string {
|
||||
//},
|
||||
}
|
||||
if obj.SSHURL != "" { // alternate world implementation over SSH
|
||||
world = &etcdSSH.World{
|
||||
URL: obj.SSHURL,
|
||||
HostKey: obj.SSHHostKey,
|
||||
Seeds: obj.Seeds,
|
||||
NS: lib.NS,
|
||||
//MetadataPrefix: lib.MetadataPrefix,
|
||||
//StoragePrefix: lib.StoragePrefix,
|
||||
//StandaloneFs: ???.DeployFs, // used for static deploys
|
||||
//GetURI: func() string {
|
||||
//},
|
||||
}
|
||||
// XXX: We need to first get rid of the standalone etcd client,
|
||||
// and then pull the etcdfs stuff in so it uses that client.
|
||||
return false, fmt.Errorf("--ssh-url is not implemented yet")
|
||||
}
|
||||
worldInit := &engine.WorldInit{
|
||||
Hostname: "", // XXX: Should we set this?
|
||||
Debug: data.Flags.Debug,
|
||||
Logf: func(format string, v ...interface{}) {
|
||||
Logf("deploy: "+format, v...)
|
||||
Logf("world: etcd: "+format, v...)
|
||||
},
|
||||
}
|
||||
if err := simpleDeploy.Init(); err != nil {
|
||||
return false, errwrap.Wrapf(err, "deploy Init failed")
|
||||
if err := world.Connect(ctx, worldInit); err != nil {
|
||||
return false, errwrap.Wrapf(err, "world Connect failed")
|
||||
}
|
||||
defer func() {
|
||||
err := errwrap.Wrapf(simpleDeploy.Close(), "deploy Close failed")
|
||||
err := errwrap.Wrapf(world.Cleanup(), "world Cleanup failed")
|
||||
if err != nil {
|
||||
// TODO: cause the final exit code to be non-zero
|
||||
Logf("deploy cleanup error: %+v", err)
|
||||
// TODO: cause the final exit code to be non-zero?
|
||||
Logf("close error: %+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// get max id (from all the previous deploys)
|
||||
max, err := simpleDeploy.GetMaxDeployID(ctx)
|
||||
max, err := world.GetMaxDeployID(ctx)
|
||||
if err != nil {
|
||||
return false, errwrap.Wrapf(err, "error getting max deploy id")
|
||||
}
|
||||
@@ -201,6 +256,7 @@ func (obj *DeployArgs) Run(ctx context.Context, data *cliUtil.Data) (bool, error
|
||||
var id = max + 1 // next id
|
||||
Logf("previous max deploy id: %d", max)
|
||||
|
||||
// XXX: Get this from the World API? (Which might need improving!)
|
||||
etcdFs := &etcdfs.Fs{
|
||||
Client: etcdClient,
|
||||
// TODO: using a uuid is meant as a temporary measure, i hate them
|
||||
@@ -241,13 +297,16 @@ func (obj *DeployArgs) Run(ctx context.Context, data *cliUtil.Data) (bool, error
|
||||
deploy.Noop = obj.Noop
|
||||
deploy.Sema = obj.Sema
|
||||
|
||||
deploy.NoAutoEdges = obj.NoAutoEdges
|
||||
|
||||
str, err := deploy.ToB64()
|
||||
if err != nil {
|
||||
return false, errwrap.Wrapf(err, "encoding error")
|
||||
}
|
||||
|
||||
Logf("pushing...")
|
||||
// this nominally checks the previous git hash matches our expectation
|
||||
if err := simpleDeploy.AddDeploy(ctx, id, hash, pHash, &str); err != nil {
|
||||
if err := world.AddDeploy(ctx, id, hash, pHash, &str); err != nil {
|
||||
return false, errwrap.Wrapf(err, "could not create deploy id `%d`", id)
|
||||
}
|
||||
Logf("success, id: %d", id)
|
||||
|
||||
150
cli/docs.go
Normal file
150
cli/docs.go
Normal file
@@ -0,0 +1,150 @@
|
||||
// Mgmt
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
//
|
||||
// Additional permission under GNU GPL version 3 section 7
|
||||
//
|
||||
// If you modify this program, or any covered work, by linking or combining it
|
||||
// with embedded mcl code and modules (and that the embedded mcl code and
|
||||
// modules which link with this program, contain a copy of their source code in
|
||||
// the authoritative form) containing parts covered by the terms of any other
|
||||
// license, the licensors of this program grant you additional permission to
|
||||
// convey the resulting work. Furthermore, the licensors of this program grant
|
||||
// the original author, James Shubin, additional permission to update this
|
||||
// additional permission if he deems it necessary to achieve the goals of this
|
||||
// additional permission.
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
cliUtil "github.com/purpleidea/mgmt/cli/util"
|
||||
"github.com/purpleidea/mgmt/docs"
|
||||
)
|
||||
|
||||
// DocsGenerateArgs is the CLI parsing structure and type of the parsed result.
|
||||
// This particular one contains all the common flags for the `docs generate`
|
||||
// subcommand.
|
||||
type DocsGenerateArgs struct {
|
||||
docs.Config // embedded config (can't be a pointer) https://github.com/alexflint/go-arg/issues/240
|
||||
|
||||
DocsGenerate *cliUtil.DocsGenerateArgs `arg:"subcommand:generate" help:"generate documentation"`
|
||||
}
|
||||
|
||||
// Run executes the correct subcommand. It errors if there's ever an error. It
|
||||
// returns true if we did activate one of the subcommands. It returns false if
|
||||
// we did not. This information is used so that the top-level parser can return
|
||||
// usage or help information if no subcommand activates. This particular Run is
|
||||
// the run for the main `docs` subcommand.
|
||||
func (obj *DocsGenerateArgs) Run(ctx context.Context, data *cliUtil.Data) (bool, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
var name string
|
||||
var args interface{}
|
||||
if cmd := obj.DocsGenerate; cmd != nil {
|
||||
name = cliUtil.LookupSubcommand(obj, cmd) // "generate"
|
||||
args = cmd
|
||||
}
|
||||
_ = name
|
||||
|
||||
Logf := func(format string, v ...interface{}) {
|
||||
// Don't block this globally...
|
||||
//if !data.Flags.Debug {
|
||||
// return
|
||||
//}
|
||||
data.Flags.Logf("main: "+format, v...)
|
||||
}
|
||||
|
||||
var api docs.API
|
||||
|
||||
if cmd := obj.DocsGenerate; cmd != nil {
|
||||
api = &docs.Generate{
|
||||
DocsGenerateArgs: args.(*cliUtil.DocsGenerateArgs),
|
||||
Config: obj.Config,
|
||||
Program: data.Program,
|
||||
Version: data.Version,
|
||||
Debug: data.Flags.Debug,
|
||||
Logf: Logf,
|
||||
}
|
||||
}
|
||||
|
||||
if api == nil {
|
||||
return false, nil // nothing found (display help!)
|
||||
}
|
||||
|
||||
// We don't use these for the setup command in normal operation.
|
||||
if data.Flags.Debug {
|
||||
cliUtil.Hello(data.Program, data.Version, data.Flags) // say hello!
|
||||
defer Logf("goodbye!")
|
||||
}
|
||||
|
||||
// install the exit signal handler
|
||||
wg := &sync.WaitGroup{}
|
||||
defer wg.Wait()
|
||||
exit := make(chan struct{})
|
||||
defer close(exit)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer cancel()
|
||||
defer wg.Done()
|
||||
// must have buffer for max number of signals
|
||||
signals := make(chan os.Signal, 3+1) // 3 * ^C + 1 * SIGTERM
|
||||
signal.Notify(signals, os.Interrupt) // catch ^C
|
||||
//signal.Notify(signals, os.Kill) // catch signals
|
||||
signal.Notify(signals, syscall.SIGTERM)
|
||||
var count uint8
|
||||
for {
|
||||
select {
|
||||
case sig := <-signals: // any signal will do
|
||||
if sig != os.Interrupt {
|
||||
data.Flags.Logf("interrupted by signal")
|
||||
return
|
||||
}
|
||||
|
||||
switch count {
|
||||
case 0:
|
||||
data.Flags.Logf("interrupted by ^C")
|
||||
cancel()
|
||||
case 1:
|
||||
data.Flags.Logf("interrupted by ^C (fast pause)")
|
||||
cancel()
|
||||
case 2:
|
||||
data.Flags.Logf("interrupted by ^C (hard interrupt)")
|
||||
cancel()
|
||||
}
|
||||
count++
|
||||
|
||||
case <-exit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if err := api.Main(ctx); err != nil {
|
||||
if data.Flags.Debug {
|
||||
data.Flags.Logf("main: %+v", err)
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
151
cli/firstboot.go
Normal file
151
cli/firstboot.go
Normal file
@@ -0,0 +1,151 @@
|
||||
// Mgmt
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
//
|
||||
// Additional permission under GNU GPL version 3 section 7
|
||||
//
|
||||
// If you modify this program, or any covered work, by linking or combining it
|
||||
// with embedded mcl code and modules (and that the embedded mcl code and
|
||||
// modules which link with this program, contain a copy of their source code in
|
||||
// the authoritative form) containing parts covered by the terms of any other
|
||||
// license, the licensors of this program grant you additional permission to
|
||||
// convey the resulting work. Furthermore, the licensors of this program grant
|
||||
// the original author, James Shubin, additional permission to update this
|
||||
// additional permission if he deems it necessary to achieve the goals of this
|
||||
// additional permission.
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
cliUtil "github.com/purpleidea/mgmt/cli/util"
|
||||
"github.com/purpleidea/mgmt/firstboot"
|
||||
)
|
||||
|
||||
// FirstbootArgs is the CLI parsing structure and type of the parsed result.
|
||||
// This particular one contains all the common flags for the `firstboot`
|
||||
// subcommand.
|
||||
type FirstbootArgs struct {
|
||||
firstboot.Config // embedded config (can't be a pointer) https://github.com/alexflint/go-arg/issues/240
|
||||
|
||||
FirstbootStart *cliUtil.FirstbootStartArgs `arg:"subcommand:start" help:"start firstboot service"`
|
||||
}
|
||||
|
||||
// Run executes the correct subcommand. It errors if there's ever an error. It
|
||||
// returns true if we did activate one of the subcommands. It returns false if
|
||||
// we did not. This information is used so that the top-level parser can return
|
||||
// usage or help information if no subcommand activates. This particular Run is
|
||||
// the run for the main `firstboot` subcommand. The firstboot command as a
|
||||
// service that lets you run commands once on the first boot of a system.
|
||||
func (obj *FirstbootArgs) Run(ctx context.Context, data *cliUtil.Data) (bool, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
var name string
|
||||
var args interface{}
|
||||
if cmd := obj.FirstbootStart; cmd != nil {
|
||||
name = cliUtil.LookupSubcommand(obj, cmd) // "pkg"
|
||||
args = cmd
|
||||
}
|
||||
_ = name
|
||||
|
||||
Logf := func(format string, v ...interface{}) {
|
||||
// Don't block this globally...
|
||||
//if !data.Flags.Debug {
|
||||
// return
|
||||
//}
|
||||
data.Flags.Logf("main: "+format, v...)
|
||||
}
|
||||
|
||||
var api firstboot.API
|
||||
|
||||
if cmd := obj.FirstbootStart; cmd != nil {
|
||||
api = &firstboot.Start{
|
||||
FirstbootStartArgs: args.(*cliUtil.FirstbootStartArgs),
|
||||
Config: obj.Config,
|
||||
Program: data.Program,
|
||||
Version: data.Version,
|
||||
Debug: data.Flags.Debug,
|
||||
Logf: Logf,
|
||||
}
|
||||
}
|
||||
|
||||
if api == nil {
|
||||
return false, nil // nothing found (display help!)
|
||||
}
|
||||
|
||||
// We don't use these for the setup command in normal operation.
|
||||
if data.Flags.Debug {
|
||||
cliUtil.Hello(data.Program, data.Version, data.Flags) // say hello!
|
||||
defer Logf("goodbye!")
|
||||
}
|
||||
|
||||
// install the exit signal handler
|
||||
wg := &sync.WaitGroup{}
|
||||
defer wg.Wait()
|
||||
exit := make(chan struct{})
|
||||
defer close(exit)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer cancel()
|
||||
defer wg.Done()
|
||||
// must have buffer for max number of signals
|
||||
signals := make(chan os.Signal, 3+1) // 3 * ^C + 1 * SIGTERM
|
||||
signal.Notify(signals, os.Interrupt) // catch ^C
|
||||
//signal.Notify(signals, os.Kill) // catch signals
|
||||
signal.Notify(signals, syscall.SIGTERM)
|
||||
var count uint8
|
||||
for {
|
||||
select {
|
||||
case sig := <-signals: // any signal will do
|
||||
if sig != os.Interrupt {
|
||||
data.Flags.Logf("interrupted by signal")
|
||||
return
|
||||
}
|
||||
|
||||
switch count {
|
||||
case 0:
|
||||
data.Flags.Logf("interrupted by ^C")
|
||||
cancel()
|
||||
case 1:
|
||||
data.Flags.Logf("interrupted by ^C (fast pause)")
|
||||
cancel()
|
||||
case 2:
|
||||
data.Flags.Logf("interrupted by ^C (hard interrupt)")
|
||||
cancel()
|
||||
}
|
||||
count++
|
||||
|
||||
case <-exit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if err := api.Main(ctx); err != nil {
|
||||
if data.Flags.Debug {
|
||||
data.Flags.Logf("main: %+v", err)
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
20
cli/run.go
20
cli/run.go
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -52,9 +52,11 @@ import (
|
||||
type RunArgs struct {
|
||||
lib.Config // embedded config (can't be a pointer) https://github.com/alexflint/go-arg/issues/240
|
||||
|
||||
RunEmpty *cliUtil.EmptyArgs `arg:"subcommand:empty" help:"run empty payload"`
|
||||
RunLang *cliUtil.LangArgs `arg:"subcommand:lang" help:"run lang (mcl) payload"`
|
||||
RunYaml *cliUtil.YamlArgs `arg:"subcommand:yaml" help:"run yaml graph payload"`
|
||||
RunEmpty *cliUtil.EmptyArgs `arg:"subcommand:empty" help:"run empty payload"`
|
||||
RunLang *cliUtil.LangArgs `arg:"subcommand:lang" help:"run lang (mcl) payload"`
|
||||
RunYaml *cliUtil.YamlArgs `arg:"subcommand:yaml" help:"run yaml graph payload"`
|
||||
RunPuppet *cliUtil.PuppetArgs `arg:"subcommand:puppet" help:"run puppet graph payload"`
|
||||
RunLangPuppet *cliUtil.LangPuppetArgs `arg:"subcommand:langpuppet" help:"run a combined lang/puppet graph payload"`
|
||||
}
|
||||
|
||||
// Run executes the correct subcommand. It errors if there's ever an error. It
|
||||
@@ -81,6 +83,14 @@ func (obj *RunArgs) Run(ctx context.Context, data *cliUtil.Data) (bool, error) {
|
||||
name = cliUtil.LookupSubcommand(obj, cmd) // "yaml"
|
||||
args = cmd
|
||||
}
|
||||
if cmd := obj.RunPuppet; cmd != nil {
|
||||
name = cliUtil.LookupSubcommand(obj, cmd) // "puppet"
|
||||
args = cmd
|
||||
}
|
||||
if cmd := obj.RunLangPuppet; cmd != nil {
|
||||
name = cliUtil.LookupSubcommand(obj, cmd) // "langpuppet"
|
||||
args = cmd
|
||||
}
|
||||
|
||||
// XXX: workaround https://github.com/alexflint/go-arg/issues/239
|
||||
lists := [][]string{
|
||||
@@ -131,6 +141,8 @@ func (obj *RunArgs) Run(ctx context.Context, data *cliUtil.Data) (bool, error) {
|
||||
Noop: obj.Noop,
|
||||
Sema: obj.Sema,
|
||||
//Update: obj.Update,
|
||||
|
||||
NoAutoEdges: obj.NoAutoEdges,
|
||||
},
|
||||
|
||||
Fs: standaloneFs,
|
||||
|
||||
180
cli/setup.go
Normal file
180
cli/setup.go
Normal file
@@ -0,0 +1,180 @@
|
||||
// Mgmt
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
//
|
||||
// Additional permission under GNU GPL version 3 section 7
|
||||
//
|
||||
// If you modify this program, or any covered work, by linking or combining it
|
||||
// with embedded mcl code and modules (and that the embedded mcl code and
|
||||
// modules which link with this program, contain a copy of their source code in
|
||||
// the authoritative form) containing parts covered by the terms of any other
|
||||
// license, the licensors of this program grant you additional permission to
|
||||
// convey the resulting work. Furthermore, the licensors of this program grant
|
||||
// the original author, James Shubin, additional permission to update this
|
||||
// additional permission if he deems it necessary to achieve the goals of this
|
||||
// additional permission.
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
cliUtil "github.com/purpleidea/mgmt/cli/util"
|
||||
"github.com/purpleidea/mgmt/setup"
|
||||
)
|
||||
|
||||
// SetupArgs is the CLI parsing structure and type of the parsed result. This
|
||||
// particular one contains all the common flags for the `setup` subcommand.
|
||||
type SetupArgs struct {
|
||||
setup.Config // embedded config (can't be a pointer) https://github.com/alexflint/go-arg/issues/240
|
||||
|
||||
SetupPkg *cliUtil.SetupPkgArgs `arg:"subcommand:pkg" help:"setup packages"`
|
||||
SetupSvc *cliUtil.SetupSvcArgs `arg:"subcommand:svc" help:"setup services"`
|
||||
SetupFirstboot *cliUtil.SetupFirstbootArgs `arg:"subcommand:firstboot" help:"setup firstboot"`
|
||||
}
|
||||
|
||||
// Run executes the correct subcommand. It errors if there's ever an error. It
|
||||
// returns true if we did activate one of the subcommands. It returns false if
|
||||
// we did not. This information is used so that the top-level parser can return
|
||||
// usage or help information if no subcommand activates. This particular Run is
|
||||
// the run for the main `setup` subcommand. The setup command does some
|
||||
// bootstrap work to help get things going.
|
||||
func (obj *SetupArgs) Run(ctx context.Context, data *cliUtil.Data) (bool, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
var name string
|
||||
var args interface{}
|
||||
if cmd := obj.SetupPkg; cmd != nil {
|
||||
name = cliUtil.LookupSubcommand(obj, cmd) // "pkg"
|
||||
args = cmd
|
||||
}
|
||||
if cmd := obj.SetupSvc; cmd != nil {
|
||||
name = cliUtil.LookupSubcommand(obj, cmd) // "svc"
|
||||
args = cmd
|
||||
}
|
||||
if cmd := obj.SetupFirstboot; cmd != nil {
|
||||
name = cliUtil.LookupSubcommand(obj, cmd) // "firstboot"
|
||||
args = cmd
|
||||
}
|
||||
_ = name
|
||||
|
||||
Logf := func(format string, v ...interface{}) {
|
||||
// Don't block this globally...
|
||||
//if !data.Flags.Debug {
|
||||
// return
|
||||
//}
|
||||
data.Flags.Logf("main: "+format, v...)
|
||||
}
|
||||
|
||||
var api setup.API
|
||||
|
||||
if cmd := obj.SetupPkg; cmd != nil {
|
||||
api = &setup.Pkg{
|
||||
SetupPkgArgs: args.(*cliUtil.SetupPkgArgs),
|
||||
Config: obj.Config,
|
||||
Program: data.Program,
|
||||
Version: data.Version,
|
||||
Debug: data.Flags.Debug,
|
||||
Logf: Logf,
|
||||
}
|
||||
}
|
||||
if cmd := obj.SetupSvc; cmd != nil {
|
||||
api = &setup.Svc{
|
||||
SetupSvcArgs: args.(*cliUtil.SetupSvcArgs),
|
||||
Config: obj.Config,
|
||||
Program: data.Program,
|
||||
Version: data.Version,
|
||||
Debug: data.Flags.Debug,
|
||||
Logf: Logf,
|
||||
}
|
||||
}
|
||||
if cmd := obj.SetupFirstboot; cmd != nil {
|
||||
api = &setup.Firstboot{
|
||||
SetupFirstbootArgs: args.(*cliUtil.SetupFirstbootArgs),
|
||||
Config: obj.Config,
|
||||
Program: data.Program,
|
||||
Version: data.Version,
|
||||
Debug: data.Flags.Debug,
|
||||
Logf: Logf,
|
||||
}
|
||||
}
|
||||
|
||||
if api == nil {
|
||||
return false, nil // nothing found (display help!)
|
||||
}
|
||||
|
||||
// We don't use these for the setup command in normal operation.
|
||||
if data.Flags.Debug {
|
||||
cliUtil.Hello(data.Program, data.Version, data.Flags) // say hello!
|
||||
defer Logf("goodbye!")
|
||||
}
|
||||
|
||||
// install the exit signal handler
|
||||
wg := &sync.WaitGroup{}
|
||||
defer wg.Wait()
|
||||
exit := make(chan struct{})
|
||||
defer close(exit)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer cancel()
|
||||
defer wg.Done()
|
||||
// must have buffer for max number of signals
|
||||
signals := make(chan os.Signal, 3+1) // 3 * ^C + 1 * SIGTERM
|
||||
signal.Notify(signals, os.Interrupt) // catch ^C
|
||||
//signal.Notify(signals, os.Kill) // catch signals
|
||||
signal.Notify(signals, syscall.SIGTERM)
|
||||
var count uint8
|
||||
for {
|
||||
select {
|
||||
case sig := <-signals: // any signal will do
|
||||
if sig != os.Interrupt {
|
||||
data.Flags.Logf("interrupted by signal")
|
||||
return
|
||||
}
|
||||
|
||||
switch count {
|
||||
case 0:
|
||||
data.Flags.Logf("interrupted by ^C")
|
||||
cancel()
|
||||
case 1:
|
||||
data.Flags.Logf("interrupted by ^C (fast pause)")
|
||||
cancel()
|
||||
case 2:
|
||||
data.Flags.Logf("interrupted by ^C (hard interrupt)")
|
||||
cancel()
|
||||
}
|
||||
count++
|
||||
|
||||
case <-exit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if err := api.Main(ctx); err != nil {
|
||||
if data.Flags.Debug {
|
||||
data.Flags.Logf("main: %+v", err)
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
150
cli/tools.go
Normal file
150
cli/tools.go
Normal file
@@ -0,0 +1,150 @@
|
||||
// Mgmt
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
//
|
||||
// Additional permission under GNU GPL version 3 section 7
|
||||
//
|
||||
// If you modify this program, or any covered work, by linking or combining it
|
||||
// with embedded mcl code and modules (and that the embedded mcl code and
|
||||
// modules which link with this program, contain a copy of their source code in
|
||||
// the authoritative form) containing parts covered by the terms of any other
|
||||
// license, the licensors of this program grant you additional permission to
|
||||
// convey the resulting work. Furthermore, the licensors of this program grant
|
||||
// the original author, James Shubin, additional permission to update this
|
||||
// additional permission if he deems it necessary to achieve the goals of this
|
||||
// additional permission.
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
cliUtil "github.com/purpleidea/mgmt/cli/util"
|
||||
"github.com/purpleidea/mgmt/tools"
|
||||
)
|
||||
|
||||
// ToolsArgs is the CLI parsing structure and type of the parsed result. This
|
||||
// particular one contains all the common flags for the `tools` subcommand.
|
||||
type ToolsArgs struct {
|
||||
tools.Config // embedded config (can't be a pointer) https://github.com/alexflint/go-arg/issues/240
|
||||
|
||||
ToolsGrow *cliUtil.ToolsGrowArgs `arg:"subcommand:grow" help:"tools for growing storage"`
|
||||
}
|
||||
|
||||
// Run executes the correct subcommand. It errors if there's ever an error. It
|
||||
// returns true if we did activate one of the subcommands. It returns false if
|
||||
// we did not. This information is used so that the top-level parser can return
|
||||
// usage or help information if no subcommand activates. This particular Run is
|
||||
// the run for the main `tools` subcommand. The tools command provides some
|
||||
// functionality which can be helpful with provisioning and config management.
|
||||
func (obj *ToolsArgs) Run(ctx context.Context, data *cliUtil.Data) (bool, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
var name string
|
||||
var args interface{}
|
||||
if cmd := obj.ToolsGrow; cmd != nil {
|
||||
name = cliUtil.LookupSubcommand(obj, cmd) // "grow"
|
||||
args = cmd
|
||||
}
|
||||
_ = name
|
||||
|
||||
Logf := func(format string, v ...interface{}) {
|
||||
// Don't block this globally...
|
||||
//if !data.Flags.Debug {
|
||||
// return
|
||||
//}
|
||||
data.Flags.Logf("main: "+format, v...)
|
||||
}
|
||||
|
||||
var api tools.API
|
||||
|
||||
if cmd := obj.ToolsGrow; cmd != nil {
|
||||
api = &tools.Grow{
|
||||
ToolsGrowArgs: args.(*cliUtil.ToolsGrowArgs),
|
||||
Config: obj.Config,
|
||||
Program: data.Program,
|
||||
Version: data.Version,
|
||||
Debug: data.Flags.Debug,
|
||||
Logf: Logf,
|
||||
}
|
||||
}
|
||||
|
||||
if api == nil {
|
||||
return false, nil // nothing found (display help!)
|
||||
}
|
||||
|
||||
// We don't use these for the tools command in normal operation.
|
||||
if data.Flags.Debug {
|
||||
cliUtil.Hello(data.Program, data.Version, data.Flags) // say hello!
|
||||
defer Logf("goodbye!")
|
||||
}
|
||||
|
||||
// install the exit signal handler
|
||||
wg := &sync.WaitGroup{}
|
||||
defer wg.Wait()
|
||||
exit := make(chan struct{})
|
||||
defer close(exit)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer cancel()
|
||||
defer wg.Done()
|
||||
// must have buffer for max number of signals
|
||||
signals := make(chan os.Signal, 3+1) // 3 * ^C + 1 * SIGTERM
|
||||
signal.Notify(signals, os.Interrupt) // catch ^C
|
||||
//signal.Notify(signals, os.Kill) // catch signals
|
||||
signal.Notify(signals, syscall.SIGTERM)
|
||||
var count uint8
|
||||
for {
|
||||
select {
|
||||
case sig := <-signals: // any signal will do
|
||||
if sig != os.Interrupt {
|
||||
data.Flags.Logf("interrupted by signal")
|
||||
return
|
||||
}
|
||||
|
||||
switch count {
|
||||
case 0:
|
||||
data.Flags.Logf("interrupted by ^C")
|
||||
cancel()
|
||||
case 1:
|
||||
data.Flags.Logf("interrupted by ^C (fast pause)")
|
||||
cancel()
|
||||
case 2:
|
||||
data.Flags.Logf("interrupted by ^C (hard interrupt)")
|
||||
cancel()
|
||||
}
|
||||
count++
|
||||
|
||||
case <-exit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if err := api.Main(ctx); err != nil {
|
||||
if data.Flags.Debug {
|
||||
data.Flags.Logf("main: %+v", err)
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
116
cli/util/args.go
116
cli/util/args.go
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -70,7 +70,9 @@ func LookupSubcommand(obj interface{}, st interface{}) string {
|
||||
}
|
||||
|
||||
// EmptyArgs is the empty CLI parsing structure and type of the parsed result.
|
||||
type EmptyArgs struct{}
|
||||
type EmptyArgs struct {
|
||||
Wait bool `arg:"--wait" help:"don't use any existing (stale) deploys"`
|
||||
}
|
||||
|
||||
// LangArgs is the lang CLI parsing structure and type of the parsed result.
|
||||
type LangArgs struct {
|
||||
@@ -87,7 +89,7 @@ type LangArgs struct {
|
||||
OnlyUnify bool `arg:"--only-unify" help:"stop after type unification"`
|
||||
SkipUnify bool `arg:"--skip-unify" help:"skip type unification"`
|
||||
UnifySolver *string `arg:"--unify-name" help:"pick a specific unification solver"`
|
||||
UnifyOptimizations []string `arg:"--unify-optimizations" help:"list of unification optimizations to request (experts only)"`
|
||||
UnifyOptimizations []string `arg:"--unify-optimizations,separate" help:"list of unification optimizations to request (experts only)"`
|
||||
|
||||
Depth int `arg:"--depth" default:"-1" help:"max recursion depth limit (-1 is unlimited)"`
|
||||
|
||||
@@ -102,3 +104,111 @@ type YamlArgs struct {
|
||||
// Input is the input yaml code or file path or any input specification.
|
||||
Input string `arg:"positional,required"`
|
||||
}
|
||||
|
||||
// PuppetArgs is the puppet CLI parsing structure and type of the parsed result.
|
||||
type PuppetArgs struct {
|
||||
// Input is the input puppet code or file path or just "agent".
|
||||
Input string `arg:"positional,required"`
|
||||
|
||||
// PuppetConf is the optional path to a puppet.conf config file.
|
||||
PuppetConf string `arg:"--puppet-conf" help:"full path to the puppet.conf file to use"`
|
||||
}
|
||||
|
||||
// LangPuppetArgs is the langpuppet CLI parsing structure and type of the parsed
|
||||
// result.
|
||||
type LangPuppetArgs struct {
|
||||
// LangInput is the input mcl code or file path or any input specification.
|
||||
LangInput string `arg:"--lang,required" help:"the input parameter for the lang module"`
|
||||
|
||||
// PuppetInput is the input puppet code or file path or just "agent".
|
||||
PuppetInput string `arg:"--puppet,required" help:"the input parameter for the puppet module"`
|
||||
|
||||
// copy-pasted from PuppetArgs
|
||||
|
||||
// PuppetConf is the optional path to a puppet.conf config file.
|
||||
PuppetConf string `arg:"--puppet-conf" help:"full path to the puppet.conf file to use"`
|
||||
|
||||
// end PuppetArgs
|
||||
|
||||
// copy-pasted from LangArgs
|
||||
|
||||
// TODO: removed (temporarily?)
|
||||
//Stdin bool `arg:"--stdin" help:"use passthrough stdin"`
|
||||
|
||||
Download bool `arg:"--download" help:"download any missing imports"`
|
||||
OnlyDownload bool `arg:"--only-download" help:"stop after downloading any missing imports"`
|
||||
Update bool `arg:"--update" help:"update all dependencies to the latest versions"`
|
||||
|
||||
OnlyUnify bool `arg:"--only-unify" help:"stop after type unification"`
|
||||
SkipUnify bool `arg:"--skip-unify" help:"skip type unification"`
|
||||
|
||||
Depth int `arg:"--depth" default:"-1" help:"max recursion depth limit (-1 is unlimited)"`
|
||||
|
||||
// The default of 0 means any error is a failure by default.
|
||||
Retry int `arg:"--depth" help:"max number of retries (-1 is unlimited)"`
|
||||
|
||||
ModulePath string `arg:"--module-path,env:MGMT_MODULE_PATH" help:"choose the modules path (absolute)"`
|
||||
|
||||
// end LangArgs
|
||||
}
|
||||
|
||||
// SetupPkgArgs is the setup service CLI parsing structure and type of the
|
||||
// parsed result.
|
||||
type SetupPkgArgs struct {
|
||||
Distro string `arg:"--distro" help:"build for this distro"`
|
||||
Sudo bool `arg:"--sudo" help:"include sudo in the command"`
|
||||
Exec bool `arg:"--exec" help:"actually run these commands"`
|
||||
}
|
||||
|
||||
// SetupSvcArgs is the setup service CLI parsing structure and type of the
|
||||
// parsed result.
|
||||
type SetupSvcArgs struct {
|
||||
BinaryPath string `arg:"--binary-path" help:"path to the binary"`
|
||||
SSHURL string `arg:"--ssh-url" help:"transport the etcd client connection over SSH to this server"`
|
||||
SSHHostKey string `arg:"--ssh-hostkey" help:"use this ssh known hosts key when connecting over SSH"`
|
||||
|
||||
Seeds []string `arg:"--seeds,separate,env:MGMT_SEEDS" help:"default etcd client endpoints"`
|
||||
NoServer bool `arg:"--no-server" help:"do not start embedded etcd server (do not promote from client to peer)"`
|
||||
|
||||
Install bool `arg:"--install" help:"install the systemd mgmt service"`
|
||||
Start bool `arg:"--start" help:"start the mgmt service"`
|
||||
Enable bool `arg:"--enable" help:"enable the mgmt service"`
|
||||
}
|
||||
|
||||
// SetupFirstbootArgs is the setup service CLI parsing structure and type of the
|
||||
// parsed result.
|
||||
type SetupFirstbootArgs struct {
|
||||
BinaryPath string `arg:"--binary-path" help:"path to the binary"`
|
||||
Mkdir bool `arg:"--mkdir" help:"make the necessary firstboot dirs"`
|
||||
Install bool `arg:"--install" help:"install the systemd firstboot service"`
|
||||
Start bool `arg:"--start" help:"start the firstboot service (typically not used)"`
|
||||
Enable bool `arg:"--enable" help:"enable the firstboot service"`
|
||||
|
||||
FirstbootStartArgs // Include these options if we want to specify them.
|
||||
}
|
||||
|
||||
// FirstbootStartArgs is the firstboot service CLI parsing structure and type of
|
||||
// the parsed result.
|
||||
type FirstbootStartArgs struct {
|
||||
LockFilePath string `arg:"--lock-file-path" help:"path to the lock file"`
|
||||
ScriptsDir string `arg:"--scripts-dir" help:"path to the scripts dir"`
|
||||
DoneDir string `arg:"--done-dir" help:"dir to move done scripts to"`
|
||||
LoggingDir string `arg:"--logging-dir" help:"directory to store logs in"`
|
||||
}
|
||||
|
||||
// DocsGenerateArgs is the docgen utility CLI parsing structure and type of the
|
||||
// parsed result.
|
||||
type DocsGenerateArgs struct {
|
||||
Output string `arg:"--output" help:"output path to write to"`
|
||||
RootDir string `arg:"--root-dir" help:"path to mgmt source dir"`
|
||||
NoResources bool `arg:"--no-resources" help:"skip resource doc generation"`
|
||||
NoFunctions bool `arg:"--no-functions" help:"skip function doc generation"`
|
||||
}
|
||||
|
||||
// ToolsGrowArgs is the util tool CLI parsing structure and type of the parsed
|
||||
// result.
|
||||
type ToolsGrowArgs struct {
|
||||
Mount string `arg:"--mount,required" help:"root mount point to start with"`
|
||||
Exec bool `arg:"--exec" help:"actually run these commands"`
|
||||
Done string `arg:"--done" help:"create this file when done, skip if it exists"`
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -41,7 +41,7 @@ func Hello(program, version string, flags Flags) {
|
||||
program = "<unknown>"
|
||||
}
|
||||
fmt.Println(fmt.Sprintf("This is: %s, version: %s", program, version))
|
||||
fmt.Println("Copyright (C) 2013-2024+ James Shubin and the project contributors")
|
||||
fmt.Println("Copyright (C) James Shubin and the project contributors")
|
||||
fmt.Println("Written by James Shubin <james@shubin.ca> and the project contributors")
|
||||
flags.Logf("main: start: %v", start)
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -34,6 +34,7 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/purpleidea/mgmt/util"
|
||||
@@ -61,6 +62,8 @@ func New(timeout int) *Coordinator {
|
||||
//resumeSignal: make(chan struct{}), // happens on pause
|
||||
//pausedAck: util.NewEasyAck(), // happens on pause
|
||||
|
||||
sendSignal: make(chan bool),
|
||||
|
||||
stateFns: make(map[string]func(bool) error),
|
||||
smutex: &sync.RWMutex{},
|
||||
|
||||
@@ -103,6 +106,8 @@ type Coordinator struct {
|
||||
// pausedAck is used to send an ack message saying that we've paused.
|
||||
pausedAck *util.EasyAck
|
||||
|
||||
sendSignal chan bool // send pause (false) or resume (true)
|
||||
|
||||
// stateFns run on converged state changes.
|
||||
stateFns map[string]func(bool) error
|
||||
// smutex is used for controlling access to the stateFns map.
|
||||
@@ -126,6 +131,8 @@ func (obj *Coordinator) Register() *UID {
|
||||
//id: obj.lastid,
|
||||
//name: fmt.Sprintf("%d", obj.lastid), // some default
|
||||
|
||||
isConverged: &atomic.Bool{},
|
||||
|
||||
poke: obj.poke,
|
||||
|
||||
// timer
|
||||
@@ -176,11 +183,28 @@ func (obj *Coordinator) Run(startPaused bool) {
|
||||
for {
|
||||
// pause if one was requested...
|
||||
select {
|
||||
case <-obj.pauseSignal: // channel closes
|
||||
//case <-obj.pauseSignal: // channel closes
|
||||
// obj.pausedAck.Ack() // send ack
|
||||
// // we are paused now, and waiting for resume or exit...
|
||||
// select {
|
||||
// case <-obj.resumeSignal: // channel closes # XXX: RACE READ
|
||||
// // resumed!
|
||||
//
|
||||
// case <-obj.closeChan: // we can always escape
|
||||
// return
|
||||
// }
|
||||
case b, _ := <-obj.sendSignal:
|
||||
if b { // resume
|
||||
panic("unexpected resume") // TODO: continue instead?
|
||||
}
|
||||
// paused
|
||||
obj.pausedAck.Ack() // send ack
|
||||
// we are paused now, and waiting for resume or exit...
|
||||
select {
|
||||
case <-obj.resumeSignal: // channel closes
|
||||
case b, _ := <-obj.sendSignal:
|
||||
if !b { // pause
|
||||
panic("unexpected pause") // TODO: continue instead?
|
||||
}
|
||||
// resumed!
|
||||
|
||||
case <-obj.closeChan: // we can always escape
|
||||
@@ -229,8 +253,13 @@ func (obj *Coordinator) Pause() error {
|
||||
}
|
||||
|
||||
obj.pausedAck = util.NewEasyAck()
|
||||
obj.resumeSignal = make(chan struct{}) // build the resume signal
|
||||
close(obj.pauseSignal)
|
||||
//obj.resumeSignal = make(chan struct{}) // build the resume signal XXX: RACE WRITE
|
||||
//close(obj.pauseSignal)
|
||||
select {
|
||||
case obj.sendSignal <- false:
|
||||
case <-obj.closeChan:
|
||||
return fmt.Errorf("closing")
|
||||
}
|
||||
|
||||
// wait for ack (or exit signal)
|
||||
select {
|
||||
@@ -253,8 +282,14 @@ func (obj *Coordinator) Resume() {
|
||||
return
|
||||
}
|
||||
|
||||
obj.pauseSignal = make(chan struct{}) // rebuild for next pause
|
||||
close(obj.resumeSignal)
|
||||
//obj.pauseSignal = make(chan struct{}) // rebuild for next pause
|
||||
//close(obj.resumeSignal)
|
||||
select {
|
||||
case obj.sendSignal <- true:
|
||||
case <-obj.closeChan:
|
||||
return
|
||||
}
|
||||
|
||||
obj.poke() // unblock and notice the resume if necessary
|
||||
|
||||
obj.paused = false
|
||||
@@ -389,7 +424,7 @@ type UID struct {
|
||||
// for per-UID timeouts too.
|
||||
timeout int
|
||||
// isConverged stores the convergence state of this particular UID.
|
||||
isConverged bool
|
||||
isConverged *atomic.Bool
|
||||
|
||||
// poke stores a reference to the main poke function.
|
||||
poke func()
|
||||
@@ -411,14 +446,14 @@ func (obj *UID) Unregister() {
|
||||
|
||||
// IsConverged reports whether this UID is converged or not.
|
||||
func (obj *UID) IsConverged() bool {
|
||||
return obj.isConverged
|
||||
return obj.isConverged.Load()
|
||||
}
|
||||
|
||||
// SetConverged sets the convergence state of this UID. This is used by the
|
||||
// running timer if one is started. The timer will overwrite any value set by
|
||||
// this method.
|
||||
func (obj *UID) SetConverged(isConverged bool) {
|
||||
obj.isConverged = isConverged
|
||||
obj.isConverged.Store(isConverged)
|
||||
obj.poke() // notify of change
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
||||
4
debian/control
vendored
4
debian/control
vendored
@@ -12,6 +12,6 @@ Architecture: any
|
||||
Depends: ${shlibs:Depends}, ${misc:Depends}, packagekit
|
||||
Suggests: graphviz
|
||||
Description: mgmt: next generation config management!
|
||||
The mgmt tool is a next generation config management prototype. It's
|
||||
not yet ready for production, but we hope to get there soon. Get
|
||||
The mgmt tool is a next generation config management solution. It's
|
||||
ready for production, and we hope you try out the future soon. Get
|
||||
involved today!
|
||||
|
||||
2
debian/copyright
vendored
2
debian/copyright
vendored
@@ -3,7 +3,7 @@ Upstream-Name: mgmt
|
||||
Source: <https://github.com/purpleidea/mgmt>
|
||||
|
||||
Files: *
|
||||
Copyright: Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
Copyright: Copyright (C) James Shubin and the project contributors
|
||||
License: GPL-3.0
|
||||
|
||||
License: GPL-3.0
|
||||
|
||||
2
doc.go
2
doc.go
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.20
|
||||
FROM golang:1.23
|
||||
|
||||
MAINTAINER Michał Czeraszkiewicz <contact@czerasz.com>
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM fedora:38
|
||||
FROM fedora:41
|
||||
LABEL org.opencontainers.image.authors="laurent.indermuehle@pm.me"
|
||||
|
||||
ENV GOPATH=/root/gopath
|
||||
|
||||
@@ -6,7 +6,7 @@ ENV PATH=/opt/rh/rh-ruby22/root/usr/bin:/root/gopath/bin:/usr/local/sbin:/sbin:/
|
||||
ENV LD_LIBRARY_PATH=/opt/rh/rh-ruby22/root/usr/lib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}
|
||||
ENV PKG_CONFIG_PATH=/opt/rh/rh-ruby22/root/usr/lib64/pkgconfig${PKG_CONFIG_PATH:+:${PKG_CONFIG_PATH}}
|
||||
|
||||
RUN yum -y install epel-release wget unzip git make which centos-release-scl gcc && sed -i "s/enabled=0/enabled=1/" /etc/yum.repos.d/epel-testing.repo && yum -y install rh-ruby22 && wget -O /opt/go1.20.11.linux-amd64.tar.gz https://storage.googleapis.com/golang/go1.20.11.linux-amd64.tar.gz && tar -C /usr/local -xzf /opt/go1.20.11.linux-amd64.tar.gz
|
||||
RUN yum -y install epel-release wget unzip git make which centos-release-scl gcc && sed -i "s/enabled=0/enabled=1/" /etc/yum.repos.d/epel-testing.repo && yum -y install rh-ruby22 && wget -O /opt/go1.23.5.linux-amd64.tar.gz https://storage.googleapis.com/golang/go1.23.5.linux-amd64.tar.gz && tar -C /usr/local -xzf /opt/go1.23.5.linux-amd64.tar.gz
|
||||
RUN mkdir -p $GOPATH/src/github.com/purpleidea && cd $GOPATH/src/github.com/purpleidea && git clone --recursive https://github.com/purpleidea/mgmt
|
||||
RUN go get -u gopkg.in/alecthomas/gometalinter.v1 && cd $GOPATH/src/github.com/purpleidea/mgmt && make deps && make build
|
||||
CMD ["/bin/bash"]
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.20
|
||||
FROM golang:1.23
|
||||
|
||||
MAINTAINER Michał Czeraszkiewicz <contact@czerasz.com>
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
script_directory="$( cd "$( dirname "$0" )" && pwd )"
|
||||
project_directory=$script_directory/../..
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Stop on any error
|
||||
set -e
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# runs command provided as argument inside a development (Linux) Docker container
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Stop on any error
|
||||
set -e
|
||||
|
||||
@@ -51,7 +51,7 @@ master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'mgmt'
|
||||
copyright = u'2013-2024+ James Shubin and the project contributors'
|
||||
copyright = u'Copyright (C) James Shubin and the project contributors'
|
||||
author = u'James Shubin'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
@@ -153,6 +153,6 @@ man_pages = [
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
(master_doc, 'mgmt', u'mgmt Documentation',
|
||||
author, 'mgmt', 'A next generation config management prototype!',
|
||||
author, 'mgmt', 'Next generation distributed, event-driven, parallel config management!',
|
||||
'Miscellaneous'),
|
||||
]
|
||||
|
||||
96
docs/contributing.md
Normal file
96
docs/contributing.md
Normal file
@@ -0,0 +1,96 @@
|
||||
# Contributing
|
||||
|
||||
What follows is a short guide with information for participants who wish to
|
||||
contribute to the project. It hopes to set both some expectations and boundaries
|
||||
so that we both benefit.
|
||||
|
||||
## Small patches
|
||||
|
||||
If you have a small patch which you believe is straightforward, should be easy
|
||||
to merge, and isn't overly onerous on your time to write, please feel free to
|
||||
send it our way without asking first. Bug fixes are excellent examples of small
|
||||
patches. Please make sure to familiarize yourself with the rough coding style of
|
||||
the project first, and read through the [style guide](style-guide.md).
|
||||
|
||||
## Making an excellent small patch
|
||||
|
||||
As a special case: We'd like to avoid minimal effort, one-off, drive-by patches
|
||||
by bots and contributors looking to increase their "activity" numbers. As an
|
||||
example: a patch which fixes a small linting issue isn't rousing, but a patch
|
||||
that adds a linter test _and_ fixes a small linting issue is, because it shows
|
||||
you put in more effort.
|
||||
|
||||
## Medium patches
|
||||
|
||||
Medium sized patches are especially welcome. Good examples of these patches
|
||||
can include writing a new `mgmt` resource or function. You'll generally need
|
||||
some knowledge of golang interfaces and concurrency to write these patches.
|
||||
Before writing one of these, please make sure you understand some basics about
|
||||
the project and how the tool works. After this, it is recommended that you join
|
||||
our discussion channel to suggest the idea, and ideally include the actual API
|
||||
you'd like to propose before writing the code and sending a patch.
|
||||
|
||||
## Making an excellent medium patch proposal
|
||||
|
||||
The "API" of a resource is the type signature of the resource struct, and the
|
||||
"API" of a function is the type signature or signatures that it supports. (Since
|
||||
functions can be polymorphic, more than one signature can be possible!) A good
|
||||
proposal would likely also comment on the mechanisms the resources or functions
|
||||
would use to watch for events, to check state, and to apply changes. If these
|
||||
mechanisms need new dependencies, a brief survey of which dependencies are
|
||||
available and why you recommend a particular one is encouraged.
|
||||
|
||||
## Large patches or structural and core patches
|
||||
|
||||
Please do not send us large, core or structurally significant patches without
|
||||
first getting our approval and without getting some medium patches in first.
|
||||
These patches take a lot of effort to review, and we don't want to skimp on our
|
||||
commitment to that if we can't muster it. Instead grow our relationship with you
|
||||
on the medium-sized patches first. (A core patch might refer to something that
|
||||
touches either the function engine, resource engine, compiler internals, or
|
||||
something that is part of one of the internal API's.)
|
||||
|
||||
## Expectations and boundaries
|
||||
|
||||
When interacting with the project and soliciting feedback (either for design or
|
||||
during a code review) please keep in mind that the project (unfortunately!) has
|
||||
time constraints and so must prioritize how it handles workloads. If you are
|
||||
someone who has successfully sent in small patches, we will be more willing to
|
||||
spend time mentoring your medium sized patches and so on. Think of it this way:
|
||||
as you show that you're contributing to the project, we'll contribute more to
|
||||
you. Put another way: we can't afford to spend large amounts of time discussing
|
||||
potential patches with you, just to end up nowhere. Build up your reputation
|
||||
with us, and we hope to help grow our symbiosis with you all the while as you
|
||||
grow too!
|
||||
|
||||
## Energy output
|
||||
|
||||
The same goes for users and issue creators. There are times when we simply don't
|
||||
have the cycles to discuss or litigate an issue with you. We wish we did have
|
||||
more time, but it is finite, and running a project is not free. Therefore,
|
||||
please keep in mind that you don't automatically qualify for free support or
|
||||
attention.
|
||||
|
||||
## Attention seeking behaviours
|
||||
|
||||
Some folks spend too much time starting discussions, commenting on issues,
|
||||
"planning" and otherwise displaying attention seeking behaviours. Please avoid
|
||||
doing this as much as possible, especially if you are not already a major
|
||||
contributor to the project. While it may be well intentioned, if it is
|
||||
indistinguishable to us from intentional interference, then it's not welcome
|
||||
behaviour. Remember that Free Software is not free to write. If you require more
|
||||
attention, then either contribute more to the project, or consider paying for a
|
||||
[support contract](https://mgmtconfig.com/).
|
||||
|
||||
## Consulting
|
||||
|
||||
Having said all that, there are some folks who want to do some longer-term
|
||||
planning to decide if our core design and architecture is right for them to
|
||||
invest in. If that's the case, and you aren't already a well-known project
|
||||
contributor, please [contact](https://mgmtconfig.com/) us for a consulting
|
||||
quote. We have packages available for both individuals and businesses.
|
||||
|
||||
## Respect
|
||||
|
||||
Please be mindful and respectful of others when interacting with the project and
|
||||
its contributors. If you cannot abide by that, you may no longer be welcome.
|
||||
@@ -16,7 +16,7 @@ be working properly.
|
||||
|
||||
## Using Docker
|
||||
|
||||
Alternatively, you can check out the [docker-guide](docker-guide.md) in order to
|
||||
Alternatively, you can check out the [docker folder](../docker/) in order to
|
||||
develop or deploy using docker. This method is not endorsed or supported, so use
|
||||
at your own risk, as it might not be working properly.
|
||||
|
||||
@@ -28,8 +28,9 @@ required for running the _test_ suite.
|
||||
|
||||
### Build
|
||||
|
||||
* `golang` 1.20 or higher (required, available in some distros and distributed
|
||||
as a binary officially by [golang.org](https://golang.org/dl/))
|
||||
* A modern `golang` version. The version available in the current Fedora
|
||||
releases is usually supported. This is also distributed as a binary officially
|
||||
by [golang.org](https://golang.org/dl/).
|
||||
|
||||
### Runtime
|
||||
|
||||
@@ -138,7 +139,7 @@ easy as copying one of the files in [`test/shell/`](/test/shell) and adapting
|
||||
it.
|
||||
|
||||
This test suite won't run by default (unless when on CI server) and needs to be
|
||||
called explictly using:
|
||||
called explicitly using:
|
||||
|
||||
```
|
||||
make test-shell
|
||||
|
||||
50
docs/docs.go
Normal file
50
docs/docs.go
Normal file
@@ -0,0 +1,50 @@
|
||||
// Mgmt
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
//
|
||||
// Additional permission under GNU GPL version 3 section 7
|
||||
//
|
||||
// If you modify this program, or any covered work, by linking or combining it
|
||||
// with embedded mcl code and modules (and that the embedded mcl code and
|
||||
// modules which link with this program, contain a copy of their source code in
|
||||
// the authoritative form) containing parts covered by the terms of any other
|
||||
// license, the licensors of this program grant you additional permission to
|
||||
// convey the resulting work. Furthermore, the licensors of this program grant
|
||||
// the original author, James Shubin, additional permission to update this
|
||||
// additional permission if he deems it necessary to achieve the goals of this
|
||||
// additional permission.
|
||||
|
||||
// Package docs provides a tool that generates documentation from the source.
|
||||
//
|
||||
// ./mgmt docs generate --output /tmp/docs.json && cat /tmp/docs.json | jq
|
||||
package docs
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// API is the simple interface we expect for any setup items.
|
||||
type API interface {
|
||||
// Main runs everything for this setup item.
|
||||
Main(context.Context) error
|
||||
}
|
||||
|
||||
// Config is a struct of all the configuration values which are shared by all of
|
||||
// the setup utilities. By including this as a separate struct, it can be used
|
||||
// as part of the API if we want.
|
||||
type Config struct {
|
||||
//Foo string `arg:"--foo,env:MGMT_DOCGEN_FOO" help:"Foo..."` // TODO: foo
|
||||
}
|
||||
@@ -2,8 +2,8 @@
|
||||
|
||||
## Overview
|
||||
|
||||
The `mgmt` tool is a next generation config management prototype. It's not yet
|
||||
ready for production, but we hope to get there soon. Get involved today!
|
||||
The `mgmt` tool is a next generation config management solution. It's ready for
|
||||
production, and we hope you try out the future soon. Get involved today!
|
||||
|
||||
## Project Description
|
||||
|
||||
@@ -131,6 +131,33 @@ execute via a `remote` resource.
|
||||
You can read the introductory blog post about this topic here:
|
||||
[https://purpleidea.com/blog/2016/10/07/remote-execution-in-mgmt/](https://purpleidea.com/blog/2016/10/07/remote-execution-in-mgmt/)
|
||||
|
||||
### Puppet support
|
||||
|
||||
You can supply a puppet manifest instead of creating the (YAML) graph manually.
|
||||
Puppet must be installed and in `mgmt`'s search path. You also need the
|
||||
[ffrank-mgmtgraph puppet module](https://forge.puppet.com/ffrank/mgmtgraph).
|
||||
|
||||
Invoke `mgmt` with the `--puppet` switch, which supports 3 variants:
|
||||
|
||||
1. Request the configuration from the puppet server (like `puppet agent` does)
|
||||
|
||||
`mgmt run puppet --puppet agent`
|
||||
|
||||
2. Compile a local manifest file (like `puppet apply`)
|
||||
|
||||
`mgmt run puppet --puppet /path/to/my/manifest.pp`
|
||||
|
||||
3. Compile an ad hoc manifest from the commandline (like `puppet apply -e`)
|
||||
|
||||
`mgmt run puppet --puppet 'file { "/etc/ntp.conf": ensure => file }'`
|
||||
|
||||
For more details and caveats see [puppet-guide.md](puppet-guide.md).
|
||||
|
||||
#### Blog post
|
||||
|
||||
An introductory post on the puppet support is on
|
||||
[Felix's blog](http://ffrank.github.io/features/2016/06/19/puppet-powered-mgmt/).
|
||||
|
||||
## Reference
|
||||
|
||||
Please note that there are a number of undocumented options. For more
|
||||
@@ -247,6 +274,72 @@ and it can't guarantee it if the resource is blocked because of a failed
|
||||
pre-requisite resource.
|
||||
*XXX: This is currently not implemented!*
|
||||
|
||||
#### Dollar
|
||||
|
||||
Boolean. Dollar allows you to have a resource name that starts with a `$` sign.
|
||||
This is false by default. This helps you catch cases when you write code like:
|
||||
|
||||
```mcl
|
||||
$foo = "/tmp/file1"
|
||||
file "$foo" {} # incorrect!
|
||||
```
|
||||
|
||||
The above code would ignore the `$foo` variable and attempt to make a file named
|
||||
`$foo` which would obviously not work. To correctly interpolate a variable, you
|
||||
need to surround the name with curly braces.
|
||||
|
||||
```mcl
|
||||
$foo = "/tmp/file1"
|
||||
file "${foo}" {} # correct!
|
||||
```
|
||||
|
||||
This meta param is a safety measure to make your life easier. It works for all
|
||||
resources. If someone comes up with a resource which would routinely start with
|
||||
a dollar sign, then we can revisit the default for this resource kind.
|
||||
|
||||
#### Hidden
|
||||
|
||||
Boolean. Hidden means that this resource will not get executed on the resource
|
||||
graph on which it is defined. This can be used as a simple boolean switch, or,
|
||||
more commonly in combination with the Export meta param which specifies that the
|
||||
resource params are exported into the shared database. When this is true, it
|
||||
does not prevent export. In fact, it is commonly used in combination with
|
||||
Export. Using this option will still include it in the resource graph, but it
|
||||
will exist there in a special "mode" where it will not conflict with any other
|
||||
identically named resources. It can even be used as part of an edge or via a
|
||||
send/recv receiver. It can NOT be a sending vertex. These properties
|
||||
differentiate the use of this instead of simply wrapping a resource in an "if"
|
||||
statement.
|
||||
|
||||
#### Export
|
||||
|
||||
List of strings. Export is a list of hostnames (and/or the special "*" entry)
|
||||
which if set, will mark this resource data as intended for export to those
|
||||
hosts. This does not prevent any users of the shared data storage from reading
|
||||
these values, so if you want to guarantee secrecy, use the encryption
|
||||
primitives. This only labels the data accordingly, so that other hosts can know
|
||||
what data is available for them to collect. The (kind, name, host) export triple
|
||||
must be unique from any given exporter. In other words, you may not export two
|
||||
different instances of a kind+name to the same host, the exports must not
|
||||
conflict. On resource collect, this parameter is not preserved.
|
||||
|
||||
```mcl
|
||||
file "/tmp/foo" {
|
||||
state => "exists",
|
||||
content => "i'm exported!\n",
|
||||
|
||||
Meta:hidden => true,
|
||||
Meta:export => ["h1",],
|
||||
}
|
||||
|
||||
file "/tmp/foo" {
|
||||
state => "exists",
|
||||
content => "i'm exported AND i'm used here\n",
|
||||
|
||||
Meta:export => ["h1",],
|
||||
}
|
||||
```
|
||||
|
||||
#### Reverse
|
||||
|
||||
Boolean. Reverse is a property that some resources can implement that specifies
|
||||
@@ -335,7 +428,7 @@ size of 42, you can expect a semaphore if named: `:42`. It is expected that
|
||||
consumers of the semaphore metaparameter always include a prefix to avoid a
|
||||
collision with this globally defined semaphore. The size value must be greater
|
||||
than zero at this time. The traditional non-parallel execution found in config
|
||||
management tools such as `Puppet` can be obtained with `--sema 1`.
|
||||
management tools such as `puppet` can be obtained with `--sema 1`.
|
||||
|
||||
#### `--ssh-priv-id-rsa`
|
||||
|
||||
@@ -410,7 +503,7 @@ directory in the git source repository. It is available from:
|
||||
|
||||
### Systemd:
|
||||
|
||||
See [`misc/mgmt.service`](misc/mgmt.service) for a sample systemd unit file.
|
||||
See [`misc/mgmt.service`](../misc/mgmt.service) for a sample systemd unit file.
|
||||
This unit file is part of the RPM.
|
||||
|
||||
To specify your custom options for `mgmt` on a systemd distro:
|
||||
@@ -443,7 +536,7 @@ To report any bugs, please file a ticket at: [https://github.com/purpleidea/mgmt
|
||||
|
||||
## Authors
|
||||
|
||||
Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
Copyright (C) James Shubin and the project contributors
|
||||
|
||||
Please see the
|
||||
[AUTHORS](https://github.com/purpleidea/mgmt/tree/master/AUTHORS) file
|
||||
|
||||
95
docs/faq.md
95
docs/faq.md
@@ -53,16 +53,13 @@ find a number of tutorials online.
|
||||
3. Spend between four to six hours with the [golang tour](https://tour.golang.org/).
|
||||
Skip over the longer problems, but try and get a solid overview of everything.
|
||||
If you forget something, you can always go back and repeat those parts.
|
||||
4. Connect to our [#mgmtconfig](https://web.libera.chat/?channels=#mgmtconfig)
|
||||
IRC channel on the [Libera.Chat](https://libera.chat/) network. You can use any
|
||||
IRC client that you'd like, but the [hosted web portal](https://web.libera.chat/?channels=#mgmtconfig)
|
||||
will suffice if you don't know what else to use. [Here are a few suggestions for
|
||||
alternative clients.](https://libera.chat/guides/clients)
|
||||
4. Connect to our [#mgmtconfig](https://matrix.to/#/#mgmtconfig:matrix.org)
|
||||
Matrix channel and hang out with us there out there.
|
||||
5. Now it's time to try and starting writing a patch! We have tagged a bunch of
|
||||
[open issues as #mgmtlove](https://github.com/purpleidea/mgmt/issues?q=is%3Aissue+is%3Aopen+label%3Amgmtlove)
|
||||
for new users to have somewhere to get involved. Look through them to see if
|
||||
something interests you. If you find one, let us know you're working on it by
|
||||
leaving a comment in the ticket. We'll be around to answer questions in the IRC
|
||||
leaving a comment in the ticket. We'll be around to answer questions in the
|
||||
channel, and to create new issues if there wasn't something that fit your
|
||||
interests. When you submit a patch, we'll review it and give you some feedback.
|
||||
Over time, we hope you'll learn a lot while supporting the project! Now get
|
||||
@@ -280,9 +277,76 @@ prevent masking an error for a situation when you expected a file to already be
|
||||
at that location. It also turns out to simplify the internals significantly, and
|
||||
remove an ambiguous scenario with the reversible file resource.
|
||||
|
||||
### Package resources error with: "The name is not activatable", what's wrong?
|
||||
|
||||
You may see an error like:
|
||||
|
||||
`main: error running auto edges: The name is not activatable`
|
||||
|
||||
This can happen because the mgmt `pkg` resource uses a library and daemon called
|
||||
`PackageKit` to install packages. If it is not installed, then it cannot do its
|
||||
work. On Fedora system you may wish to run `dnf install /usr/bin/pkcon` or on a
|
||||
Debian system you may wish to run `apt install packagekit-tools`.
|
||||
|
||||
PackageKit is excellent because it provides both an API and an event system to
|
||||
watch the package database for changes, and it abstracts away the differences
|
||||
between the various package managers. If you'd prefer to not need to install
|
||||
this tool, then you can contribute a native `pkg:rpm` and `pkg:deb` resource to
|
||||
mgmt!
|
||||
|
||||
### When running mgmt, it says: "module path error: can't find a module path".
|
||||
|
||||
You might get an error along the lines of:
|
||||
|
||||
```
|
||||
could not set scope: import scope `git://github.com/purpleidea/mgmt/modules/some_module_name/` failed: module path error: can't find a module path
|
||||
```
|
||||
|
||||
This usually means that you haven't specified the directory that mgmt should use
|
||||
when looking for modules. This could happen when using mgmt interactively or
|
||||
when it's being run as a service. In such cases you may want the main invocation
|
||||
to look something like:
|
||||
|
||||
```
|
||||
mgmt run lang --module-path '/etc/mgmt/modules/' /etc/mgmt/main.mcl
|
||||
```
|
||||
|
||||
### I get an error: "cannot open shared object file: No such file or directory".
|
||||
|
||||
Mgmt currently uses two libraries that depend on `.so` files being installed on
|
||||
the host. Those are for `augeas` and `libvirt`. If those dependencies are not
|
||||
present, then mgmt will not run. The complete error might look like:
|
||||
|
||||
```
|
||||
mgmt: error while loading shared libraries: libvirt-lxc.so.0: cannot open shared object file: No such file or directory
|
||||
```
|
||||
|
||||
or:
|
||||
|
||||
```
|
||||
mgmt: error while loading shared libraries: libaugeas.so.0: cannot open shared object file: No such file or directory
|
||||
```
|
||||
|
||||
or something similar. There are two solutions to this:
|
||||
|
||||
1. Use a build that doesn't include one or both of those features. You can build
|
||||
that like: `GOTAGS="noaugeas novirt nodocker" make build`.
|
||||
|
||||
2. Install those dependencies. On a Fedora machine you might want to run:
|
||||
|
||||
```
|
||||
dnf install libvirt-devel augeas-devel
|
||||
```
|
||||
|
||||
On a Debian machine you might want to run:
|
||||
|
||||
```
|
||||
apt install libvirt-dev libaugeas-dev
|
||||
```
|
||||
|
||||
### Why do function names inside of templates include underscores?
|
||||
|
||||
The golang template library which we use to implement the template() function
|
||||
The golang template library which we use to implement the golang.template() func
|
||||
doesn't support the dot notation, so we import all our normal functions, and
|
||||
just replace dots with underscores. As an example, the standard `datetime.print`
|
||||
function is shown within mcl scripts as datetime_print after being imported.
|
||||
@@ -320,7 +384,7 @@ an instance of mgmt running, or if a related file locking issue occurred. To
|
||||
solve this, shutdown and running mgmt process, run `rm mgmt` to remove the file,
|
||||
and then get a new one by running `make` again.
|
||||
|
||||
### Type unification error: "could not unify types: 2 unconsumed generators".
|
||||
### Type unification error with string interpolation.
|
||||
|
||||
Look carefully at the following code:
|
||||
|
||||
@@ -343,8 +407,13 @@ print "hello" {
|
||||
}
|
||||
```
|
||||
|
||||
Yes we know the compiler gives horrible error messages, and yes we would
|
||||
absolutely love your help improving this.
|
||||
The first example will usually error with something along the lines of:
|
||||
|
||||
`unify error with: topLevel(func() { <built-in:concat> }): type error: int != str`
|
||||
|
||||
Now you know why this specific case doesn't work! We may reconsider allowing
|
||||
other types to be pulled into interpolation in the future. If you have a good
|
||||
case for this, then let us know.
|
||||
|
||||
### The run and deploy commands don't parse correctly when used with `--seeds`.
|
||||
|
||||
@@ -462,9 +531,7 @@ which definitely existed before the band did.
|
||||
|
||||
### You didn't answer my question, or I have a question!
|
||||
|
||||
It's best to ask on [IRC](https://web.libera.chat/?channels=#mgmtconfig)
|
||||
to see if someone can help you. If you don't get a response from IRC, you can
|
||||
contact me through my [technical blog](https://purpleidea.com/contact/) and I'll
|
||||
do my best to help. If you have a good question, please add it as a patch to
|
||||
It's best to ask on [Matrix](https://matrix.to/#/#mgmtconfig:matrix.org) to see
|
||||
if someone can help. If you don't get a response there, you can send a patch to
|
||||
this documentation. I'll merge your question, and add a patch with the answer!
|
||||
For news and updates, subscribe to the [mailing list](https://www.redhat.com/mailman/listinfo/mgmtconfig-list).
|
||||
|
||||
@@ -41,7 +41,7 @@ To implement a function, you'll need to create a file that imports the
|
||||
[`lang/funcs/simple/`](https://github.com/purpleidea/mgmt/tree/master/lang/funcs/simple/)
|
||||
module. It should probably get created in the correct directory inside of:
|
||||
[`lang/core/`](https://github.com/purpleidea/mgmt/tree/master/lang/core/). The
|
||||
function should be implemented as a `FuncValue` in our type system. It is then
|
||||
function should be implemented as a `simple.Scaffold` in our API. It is then
|
||||
registered with the engine during `init()`. An example explains it best:
|
||||
|
||||
### Example
|
||||
@@ -50,6 +50,7 @@ registered with the engine during `init()`. An example explains it best:
|
||||
package simple
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/purpleidea/mgmt/lang/funcs/simple"
|
||||
@@ -59,9 +60,10 @@ import (
|
||||
// you must register your functions in init when the program starts up
|
||||
func init() {
|
||||
// Example function that squares an int and prints out answer as an str.
|
||||
simple.ModuleRegister(ModuleName, "talkingsquare", &types.FuncValue{
|
||||
|
||||
simple.ModuleRegister(ModuleName, "talkingsquare", &simple.Scaffold{
|
||||
T: types.NewType("func(int) str"), // declare the signature
|
||||
V: func(input []types.Value) (types.Value, error) {
|
||||
F: func(ctx context.Context, input []types.Value) (types.Value, error) {
|
||||
i := input[0].Int() // get first arg as an int64
|
||||
// must return the above specified value
|
||||
return &types.StrValue{
|
||||
@@ -87,109 +89,41 @@ mgmt engine to shutdown. It should be seen as the equivalent to calling a
|
||||
Ideally, your functions should never need to error. You should never cause a
|
||||
real `panic()`, since this could have negative consequences to the system.
|
||||
|
||||
## Simple Polymorphic Function API
|
||||
|
||||
Most functions should be implemented using the simple function API. If they need
|
||||
to have multiple polymorphic forms under the same name, then you can use this
|
||||
API. This is useful for situations when it would be unhelpful to name the
|
||||
functions differently, or when the number of possible signatures for the
|
||||
function would be infinite.
|
||||
|
||||
The canonical example of this is the `len` function which returns the number of
|
||||
elements in either a `list` or a `map`. Since lists and maps are two different
|
||||
types, you can see that polymorphism is more convenient than requiring a
|
||||
`listlen` and `maplen` function. Nevertheless, it is also required because a
|
||||
`list of int` is a different type than a `list of str`, which is a different
|
||||
type than a `list of list of str` and so on. As you can see the number of
|
||||
possible input types for such a `len` function is infinite.
|
||||
|
||||
Another downside to implementing your functions with this API is that they will
|
||||
*not* be made available for use inside templates. This is a limitation of the
|
||||
`golang` template library. In the future if this limitation proves to be
|
||||
significantly annoying, we might consider writing our own template library.
|
||||
|
||||
As with the simple, non-polymorphic API, you can only implement [pure](https://en.wikipedia.org/wiki/Pure_function)
|
||||
functions, without writing too much boilerplate code. They will be automatically
|
||||
re-evaluated as needed when their input values change.
|
||||
|
||||
To implement a function, you'll need to create a file that imports the
|
||||
[`lang/funcs/simplepoly/`](https://github.com/purpleidea/mgmt/tree/master/lang/funcs/simplepoly/)
|
||||
module. It should probably get created in the correct directory inside of:
|
||||
[`lang/core/`](https://github.com/purpleidea/mgmt/tree/master/lang/core/). The
|
||||
function should be implemented as a list of `FuncValue`'s in our type system. It
|
||||
is then registered with the engine during `init()`. You may also use the
|
||||
`variant` type in your type definitions. This special type will never be seen
|
||||
inside a running program, and will get converted to a concrete type if a
|
||||
suitable match to this signature can be found. Be warned that signatures which
|
||||
contain too many variants, or which are very general, might be hard for the
|
||||
compiler to match, and ambiguous type graphs make for user compiler errors. The
|
||||
top-level type must still be a function type, it may only contain variants as
|
||||
part of its signature. It is probably more difficult to unify a function if its
|
||||
return type is a variant, as opposed to if one of its args was.
|
||||
|
||||
An example explains it best:
|
||||
|
||||
### Example
|
||||
|
||||
```golang
|
||||
package simple
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/purpleidea/mgmt/lang/funcs/simplepoly"
|
||||
"github.com/purpleidea/mgmt/lang/funcs/simple"
|
||||
"github.com/purpleidea/mgmt/lang/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// You may use the simplepoly.ModuleRegister method to register your
|
||||
// function if it's in a module, as seen in the simple function example.
|
||||
simplepoly.Register("len", []*types.FuncValue{
|
||||
{
|
||||
T: types.NewType("func([]variant) int"),
|
||||
V: Len,
|
||||
},
|
||||
{
|
||||
T: types.NewType("func({variant: variant}) int"),
|
||||
V: Len,
|
||||
},
|
||||
// This is the actual definition of the `len` function.
|
||||
simple.Register("len", &simple.Scaffold{
|
||||
T: types.NewType("func(?1) int"), // contains a unification var
|
||||
C: simple.TypeMatch([]string{ // match on any of these sigs
|
||||
"func(str) int",
|
||||
"func([]?1) int",
|
||||
"func(map{?1: ?2}) int",
|
||||
}),
|
||||
// The implementation is left as an exercise for the reader.
|
||||
F: Len,
|
||||
})
|
||||
}
|
||||
|
||||
// Len returns the number of elements in a list or the number of key pairs in a
|
||||
// map. It can operate on either of these types.
|
||||
func Len(input []types.Value) (types.Value, error) {
|
||||
var length int
|
||||
switch k := input[0].Type().Kind; k {
|
||||
case types.KindList:
|
||||
length = len(input[0].List())
|
||||
case types.KindMap:
|
||||
length = len(input[0].Map())
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported kind: %+v", k)
|
||||
}
|
||||
|
||||
return &types.IntValue{
|
||||
V: int64(length),
|
||||
}, nil
|
||||
}
|
||||
```
|
||||
|
||||
This simple polymorphic function can accept an infinite number of signatures, of
|
||||
which there are two basic forms. Both forms return an `int` as is seen above.
|
||||
The first form takes a `[]variant` which means a `list` of `variant`'s, which
|
||||
means that it can be a list of any type, since `variant` itself is not a
|
||||
concrete type. The second form accepts a `{variant: variant}`, which means that
|
||||
it accepts any form of `map` as input.
|
||||
## Simple Polymorphic Function API
|
||||
|
||||
The implementation for both of these forms is the same: it is handled by the
|
||||
same `Len` function which is clever enough to be able to deal with any of the
|
||||
type signatures possible from those two patterns.
|
||||
|
||||
At compile time, if your `mcl` code type checks correctly, a concrete type will
|
||||
be known for each and every usage of the `len` function, and specific values
|
||||
will be passed in for this code to compute the length of. As usual, make sure to
|
||||
only write safe code that will not panic! A panic is a bug. If you really cannot
|
||||
continue, then you must return an error.
|
||||
Most functions should be implemented using the simple function API. If they need
|
||||
to have multiple polymorphic forms under the same name, with each resultant type
|
||||
match needing to be paired to a different implementation, then you can use this
|
||||
API. This is useful for situations when the functions differ in output type
|
||||
only.
|
||||
|
||||
## Function API
|
||||
|
||||
@@ -243,66 +177,69 @@ func (obj *FooFunc) Init(init *interfaces.Init) error {
|
||||
}
|
||||
```
|
||||
|
||||
### Call
|
||||
|
||||
Call is run when you want to return a new value from the function. It takes the
|
||||
input arguments to the function.
|
||||
|
||||
#### Example
|
||||
|
||||
```golang
|
||||
func (obj *FooFunc) Call(ctx context.Context, args []types.Value) (types.Value, error) {
|
||||
return &types.StrValue{ // Our type system "str" (string) value.
|
||||
V: strconv.FormatInt(args[0].Int(), 10), // a golang string
|
||||
}, nil
|
||||
}
|
||||
```
|
||||
|
||||
### Stream
|
||||
|
||||
```golang
|
||||
Stream(context.Context) error
|
||||
```
|
||||
|
||||
`Stream` is where the real _work_ is done. This method is started by the
|
||||
language function engine. It will run this function while simultaneously sending
|
||||
it values on the `Input` channel. It will only send a complete set of input
|
||||
values. You should send a value to the output channel when you have decided that
|
||||
one should be produced. Make sure to only use input values of the expected type
|
||||
as declared in the `Info` struct, and send values of the similarly declared
|
||||
appropriate return type. Failure to do so will may result in a panic and
|
||||
sadness. You must shutdown if the input context cancels. You must close the
|
||||
`Output` channel if you are done generating new values and/or when you shutdown.
|
||||
`Stream` is where any evented work is done. This method is started by the
|
||||
function engine. It will run this function once. It should call the
|
||||
`obj.init.Event()` method when it believes the function engine should run
|
||||
`Call()` again.
|
||||
|
||||
Implementing this is not required if you don't have events.
|
||||
|
||||
If the `ctx` closes, you must shutdown as soon as possible.
|
||||
|
||||
#### Example
|
||||
|
||||
```golang
|
||||
// Stream returns the single value that was generated and then closes.
|
||||
// Stream starts a mainloop and runs Event when it's time to Call() again.
|
||||
func (obj *FooFunc) Stream(ctx context.Context) error {
|
||||
defer close(obj.init.Output) // the sender closes
|
||||
var result string
|
||||
|
||||
ticker := time.NewTicker(time.Duration(1) * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
// streams must generate an initial event on startup
|
||||
// even though ticker will send one, we want to be faster to first event
|
||||
startChan := make(chan struct{}) // start signal
|
||||
close(startChan) // kick it off!
|
||||
|
||||
for {
|
||||
select {
|
||||
case input, ok := <-obj.init.Input:
|
||||
if !ok {
|
||||
return nil // can't output any more
|
||||
}
|
||||
case <-startChan:
|
||||
startChan = nil // disable
|
||||
|
||||
ix := input.Struct()["a"].Int()
|
||||
if ix < 0 {
|
||||
return fmt.Errorf("we can't deal with negatives")
|
||||
}
|
||||
|
||||
result = fmt.Sprintf("the input is: %d", ix)
|
||||
case <-ticker.C: // received the timer event
|
||||
// pass
|
||||
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case obj.init.Output <- &types.StrValue{
|
||||
V: result,
|
||||
}:
|
||||
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
if err := obj.init.Event(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
As you can see, we read our inputs from the `input` channel, and write to the
|
||||
`output` channel. Our code is careful to never block or deadlock, and can always
|
||||
exit if a close signal is requested. It also cleans up after itself by closing
|
||||
the `output` channel when it is done using it. This is done easily with `defer`.
|
||||
If it notices that the `input` channel closes, then it knows that no more input
|
||||
values are coming and it can consider shutting down early.
|
||||
|
||||
## Further considerations
|
||||
|
||||
There is some additional information that any function author will need to know.
|
||||
@@ -358,23 +295,6 @@ We don't expect this functionality to be particularly useful or common, as it's
|
||||
probably easier and preferable to simply import common golang library code into
|
||||
multiple different functions instead.
|
||||
|
||||
## Polymorphic Function API
|
||||
|
||||
The polymorphic function API is an API that lets you implement functions which
|
||||
do not necessarily have a single static function signature. After compile time,
|
||||
all functions must have a static function signature. We also know that there
|
||||
might be different ways you would want to call `printf`, such as:
|
||||
`printf("the %s is %d", "answer", 42)` or `printf("3 * 2 = %d", 3 * 2)`. Since
|
||||
you couldn't implement the infinite number of possible signatures, this API lets
|
||||
you write code which can be coerced into different forms. This makes
|
||||
implementing what would appear to be generic or polymorphic, instead of
|
||||
something that is actually static and that still has the static type safety
|
||||
properties that were guaranteed by the mgmt language.
|
||||
|
||||
Since this is an advanced topic, it is not described in full at this time. For
|
||||
more information please have a look at the source code comments, some of the
|
||||
existing implementations, and ask around in the community.
|
||||
|
||||
## Frequently asked questions
|
||||
|
||||
(Send your questions as a patch to this FAQ! I'll review it, merge it, and
|
||||
@@ -410,7 +330,7 @@ Yes, you can use a function generator in `golang` to build multiple different
|
||||
implementations from the same function generator. You just need to implement a
|
||||
function which *returns* a `golang` type of `func([]types.Value) (types.Value, error)`
|
||||
which is what `FuncValue` expects. The generator function can use any input it
|
||||
wants to build the individual functions, thus helping with code re-use.
|
||||
wants to build the individual functions, thus helping with code reuse.
|
||||
|
||||
### How do I determine the signature of my simple, polymorphic function?
|
||||
|
||||
|
||||
795
docs/generate.go
Normal file
795
docs/generate.go
Normal file
@@ -0,0 +1,795 @@
|
||||
// Mgmt
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
//
|
||||
// Additional permission under GNU GPL version 3 section 7
|
||||
//
|
||||
// If you modify this program, or any covered work, by linking or combining it
|
||||
// with embedded mcl code and modules (and that the embedded mcl code and
|
||||
// modules which link with this program, contain a copy of their source code in
|
||||
// the authoritative form) containing parts covered by the terms of any other
|
||||
// license, the licensors of this program grant you additional permission to
|
||||
// convey the resulting work. Furthermore, the licensors of this program grant
|
||||
// the original author, James Shubin, additional permission to update this
|
||||
// additional permission if he deems it necessary to achieve the goals of this
|
||||
// additional permission.
|
||||
|
||||
package docs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
cliUtil "github.com/purpleidea/mgmt/cli/util"
|
||||
docsUtil "github.com/purpleidea/mgmt/docs/util"
|
||||
"github.com/purpleidea/mgmt/engine"
|
||||
engineUtil "github.com/purpleidea/mgmt/engine/util"
|
||||
"github.com/purpleidea/mgmt/lang/funcs"
|
||||
"github.com/purpleidea/mgmt/lang/interfaces"
|
||||
"github.com/purpleidea/mgmt/util"
|
||||
)
|
||||
|
||||
const (
|
||||
// JSONSuffix is the output extension for the generated documentation.
|
||||
JSONSuffix = ".json"
|
||||
)
|
||||
|
||||
// Generate is the main entrypoint for this command. It generates everything.
|
||||
type Generate struct {
|
||||
*cliUtil.DocsGenerateArgs // embedded config
|
||||
Config // embedded Config
|
||||
|
||||
// Program is the name of this program, usually set at compile time.
|
||||
Program string
|
||||
|
||||
// Version is the version of this program, usually set at compile time.
|
||||
Version string
|
||||
|
||||
// Debug represents if we're running in debug mode or not.
|
||||
Debug bool
|
||||
|
||||
// Logf is a logger which should be used.
|
||||
Logf func(format string, v ...interface{})
|
||||
}
|
||||
|
||||
// Main runs everything for this setup item.
|
||||
func (obj *Generate) Main(ctx context.Context) error {
|
||||
if err := obj.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := obj.Run(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate verifies that the structure has acceptable data stored within.
|
||||
func (obj *Generate) Validate() error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("data is nil")
|
||||
}
|
||||
if obj.Program == "" {
|
||||
return fmt.Errorf("program is empty")
|
||||
}
|
||||
if obj.Version == "" {
|
||||
return fmt.Errorf("version is empty")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run performs the desired actions to generate the documentation.
|
||||
func (obj *Generate) Run(ctx context.Context) error {
|
||||
|
||||
outputFile := obj.DocsGenerateArgs.Output
|
||||
if outputFile == "" || !strings.HasSuffix(outputFile, JSONSuffix) {
|
||||
return fmt.Errorf("must specify output")
|
||||
}
|
||||
// support relative paths too!
|
||||
if !strings.HasPrefix(outputFile, "/") {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outputFile = filepath.Join(wd, outputFile)
|
||||
}
|
||||
|
||||
if obj.Debug {
|
||||
obj.Logf("output: %s", outputFile)
|
||||
}
|
||||
|
||||
// Ensure the directory exists.
|
||||
//d := filepath.Dir(outputFile)
|
||||
//if err := os.MkdirAll(d, 0750); err != nil {
|
||||
// return fmt.Errorf("could not make output dir at: %s", d)
|
||||
//}
|
||||
|
||||
resources, err := obj.genResources()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
functions, err := obj.genFunctions()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data := &Output{
|
||||
Version: safeVersion(obj.Version),
|
||||
Resources: resources,
|
||||
Functions: functions,
|
||||
}
|
||||
|
||||
b, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b = append(b, '\n') // needs a trailing newline
|
||||
|
||||
if err := os.WriteFile(outputFile, b, 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
obj.Logf("wrote: %s", outputFile)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (obj *Generate) getResourceInfo(kind, filename, structName string) (*ResourceInfo, error) {
|
||||
rootDir := obj.DocsGenerateArgs.RootDir
|
||||
if rootDir == "" {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rootDir = wd + "/" // add a trailing slash
|
||||
}
|
||||
if !strings.HasPrefix(rootDir, "/") || !strings.HasSuffix(rootDir, "/") {
|
||||
return nil, fmt.Errorf("bad root dir: %s", rootDir)
|
||||
}
|
||||
|
||||
// filename might be "noop.go" for example
|
||||
p := filepath.Join(rootDir, engine.ResourcesRelDir, filename)
|
||||
|
||||
fset := token.NewFileSet()
|
||||
|
||||
// f is a: https://golang.org/pkg/go/ast/#File
|
||||
f, err := parser.ParseFile(fset, p, nil, parser.ParseComments)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// mcl field name to golang field name
|
||||
mapping, err := engineUtil.LangFieldNameToStructFieldName(kind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// golang field name to mcl field name
|
||||
nameMap, err := util.MapSwap(mapping)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// mcl field name to mcl type
|
||||
typMap, err := engineUtil.LangFieldNameToStructType(kind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ri := &ResourceInfo{}
|
||||
// Populate the fields, even if they don't have a comment.
|
||||
ri.Name = structName // golang name
|
||||
ri.Kind = kind // duplicate data
|
||||
ri.File = filename
|
||||
ri.Fields = make(map[string]*ResourceFieldInfo)
|
||||
for mclFieldName, fieldName := range mapping {
|
||||
typ, exists := typMap[mclFieldName]
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
|
||||
ri.Fields[mclFieldName] = &ResourceFieldInfo{
|
||||
Name: fieldName,
|
||||
Type: typ.String(),
|
||||
Desc: "", // empty for now
|
||||
}
|
||||
}
|
||||
|
||||
var previousComment *ast.CommentGroup
|
||||
|
||||
// Walk through the AST...
|
||||
ast.Inspect(f, func(node ast.Node) bool {
|
||||
|
||||
// Comments above the struct appear as a node right _before_ we
|
||||
// find the struct, so if we see one, save it for later...
|
||||
if cg, ok := node.(*ast.CommentGroup); ok {
|
||||
previousComment = cg
|
||||
return true
|
||||
}
|
||||
|
||||
typeSpec, ok := node.(*ast.TypeSpec)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
name := typeSpec.Name.Name // name is now known!
|
||||
|
||||
// If the struct isn't what we're expecting, then move on...
|
||||
if name != structName {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check if the TypeSpec is a named struct type that we want...
|
||||
st, ok := typeSpec.Type.(*ast.StructType)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
// At this point, we have the struct we want...
|
||||
|
||||
var comment *ast.CommentGroup
|
||||
if typeSpec.Doc != nil {
|
||||
// I don't know how to even get here...
|
||||
comment = typeSpec.Doc // found!
|
||||
|
||||
} else if previousComment != nil {
|
||||
comment = previousComment // found!
|
||||
previousComment = nil
|
||||
}
|
||||
|
||||
ri.Desc = commentCleaner(comment)
|
||||
|
||||
// Iterate over the fields of the struct
|
||||
for _, field := range st.Fields.List {
|
||||
// Check if the field has a comment associated with it
|
||||
if field.Doc == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(field.Names) < 1 { // XXX: why does this happen?
|
||||
continue
|
||||
}
|
||||
|
||||
fieldName := field.Names[0].Name
|
||||
if fieldName == "" { // Can this happen?
|
||||
continue
|
||||
}
|
||||
if isPrivate(fieldName) {
|
||||
continue
|
||||
}
|
||||
|
||||
mclFieldName, exists := nameMap[fieldName]
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
|
||||
ri.Fields[mclFieldName].Desc = commentCleaner(field.Doc)
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return ri, nil
|
||||
}
|
||||
|
||||
func (obj *Generate) genResources() (map[string]*ResourceInfo, error) {
|
||||
resources := make(map[string]*ResourceInfo)
|
||||
if obj.DocsGenerateArgs.NoResources {
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
r := engine.RegisteredResourcesNames()
|
||||
sort.Strings(r)
|
||||
for _, kind := range r {
|
||||
metadata, err := docsUtil.LookupResource(kind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if strings.HasPrefix(kind, "_") {
|
||||
// TODO: Should we display these somehow?
|
||||
// built-in resource
|
||||
continue
|
||||
}
|
||||
|
||||
ri, err := obj.getResourceInfo(kind, metadata.Filename, metadata.Typename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ri.Name == "" {
|
||||
return nil, fmt.Errorf("empty resource name: %s", kind)
|
||||
}
|
||||
if ri.File == "" {
|
||||
return nil, fmt.Errorf("empty resource file: %s", kind)
|
||||
}
|
||||
if ri.Desc == "" {
|
||||
obj.Logf("empty resource desc: %s", kind)
|
||||
}
|
||||
fields := []string{}
|
||||
for field := range ri.Fields {
|
||||
fields = append(fields, field)
|
||||
}
|
||||
sort.Strings(fields)
|
||||
for _, field := range fields {
|
||||
if ri.Fields[field].Desc == "" {
|
||||
obj.Logf("empty resource (%s) field desc: %s", kind, field)
|
||||
}
|
||||
}
|
||||
|
||||
resources[kind] = ri
|
||||
}
|
||||
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
func (obj *Generate) getFunctionInfo(pkg, name string, metadata *docsUtil.Metadata) (*FunctionInfo, error) {
|
||||
rootDir := obj.DocsGenerateArgs.RootDir
|
||||
if rootDir == "" {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rootDir = wd + "/" // add a trailing slash
|
||||
}
|
||||
if !strings.HasPrefix(rootDir, "/") || !strings.HasSuffix(rootDir, "/") {
|
||||
return nil, fmt.Errorf("bad root dir: %s", rootDir)
|
||||
}
|
||||
if metadata.Filename == "" {
|
||||
return nil, fmt.Errorf("empty filename for: %s.%s", pkg, name)
|
||||
}
|
||||
|
||||
// filename might be "pow.go" for example and contain a rel dir
|
||||
p := filepath.Join(rootDir, funcs.FunctionsRelDir, metadata.Filename)
|
||||
|
||||
fset := token.NewFileSet()
|
||||
|
||||
// f is a: https://golang.org/pkg/go/ast/#File
|
||||
f, err := parser.ParseFile(fset, p, nil, parser.ParseComments)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fi := &FunctionInfo{}
|
||||
fi.Name = metadata.Typename
|
||||
fi.File = metadata.Filename
|
||||
|
||||
var previousComment *ast.CommentGroup
|
||||
found := false
|
||||
|
||||
rawFunc := func(node ast.Node) (*ast.CommentGroup, string) {
|
||||
fd, ok := node.(*ast.FuncDecl)
|
||||
if !ok {
|
||||
return nil, ""
|
||||
}
|
||||
return fd.Doc, fd.Name.Name // name is now known!
|
||||
}
|
||||
|
||||
rawStruct := func(node ast.Node) (*ast.CommentGroup, string) {
|
||||
typeSpec, ok := node.(*ast.TypeSpec)
|
||||
if !ok {
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
// Check if the TypeSpec is a named struct type that we want...
|
||||
if _, ok := typeSpec.Type.(*ast.StructType); !ok {
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
return typeSpec.Doc, typeSpec.Name.Name // name is now known!
|
||||
}
|
||||
|
||||
// Walk through the AST...
|
||||
ast.Inspect(f, func(node ast.Node) bool {
|
||||
|
||||
// Comments above the struct appear as a node right _before_ we
|
||||
// find the struct, so if we see one, save it for later...
|
||||
if cg, ok := node.(*ast.CommentGroup); ok {
|
||||
previousComment = cg
|
||||
return true
|
||||
}
|
||||
|
||||
doc, name := rawFunc(node) // First see if it's a raw func.
|
||||
if name == "" {
|
||||
doc, name = rawStruct(node) // Otherwise it's a struct.
|
||||
}
|
||||
|
||||
// If the func isn't what we're expecting, then move on...
|
||||
if name != metadata.Typename {
|
||||
return true
|
||||
}
|
||||
|
||||
var comment *ast.CommentGroup
|
||||
if doc != nil {
|
||||
// I don't know how to even get here...
|
||||
comment = doc // found!
|
||||
|
||||
} else if previousComment != nil {
|
||||
comment = previousComment // found!
|
||||
previousComment = nil
|
||||
}
|
||||
|
||||
fi.Desc = commentCleaner(comment)
|
||||
found = true
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
if !found {
|
||||
//return nil, nil
|
||||
}
|
||||
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
func (obj *Generate) genFunctions() (map[string]*FunctionInfo, error) {
|
||||
functions := make(map[string]*FunctionInfo)
|
||||
if obj.DocsGenerateArgs.NoFunctions {
|
||||
return functions, nil
|
||||
}
|
||||
|
||||
m := funcs.Map() // map[string]func() interfaces.Func
|
||||
names := []string{}
|
||||
for name := range m {
|
||||
names = append(names, name)
|
||||
}
|
||||
sort.Slice(names, func(i, j int) bool {
|
||||
a := names[i]
|
||||
b := names[j]
|
||||
// TODO: do a sorted-by-package order.
|
||||
return a < b
|
||||
})
|
||||
|
||||
for _, name := range names {
|
||||
//v := m[name]
|
||||
//fn := v()
|
||||
fn := m[name]()
|
||||
|
||||
// eg: golang/strings.has_suffix
|
||||
sp := strings.Split(name, ".")
|
||||
if len(sp) == 0 {
|
||||
return nil, fmt.Errorf("unexpected empty function")
|
||||
}
|
||||
if len(sp) > 2 {
|
||||
return nil, fmt.Errorf("unexpected function name: %s", name)
|
||||
}
|
||||
n := sp[0]
|
||||
p := sp[0]
|
||||
if len(sp) == 1 { // built-in
|
||||
p = "" // no package!
|
||||
}
|
||||
if len(sp) == 2 { // normal import
|
||||
n = sp[1]
|
||||
}
|
||||
|
||||
if strings.HasPrefix(n, "_") {
|
||||
// TODO: Should we display these somehow?
|
||||
// built-in function
|
||||
continue
|
||||
}
|
||||
|
||||
var sig *string
|
||||
//iface := ""
|
||||
if x := fn.Info().Sig; x != nil {
|
||||
s := x.String()
|
||||
sig = &s
|
||||
//iface = "simple"
|
||||
}
|
||||
|
||||
metadata := &docsUtil.Metadata{}
|
||||
|
||||
// XXX: maybe we need a better way to get this?
|
||||
mdFunc, ok := fn.(interfaces.MetadataFunc)
|
||||
if !ok {
|
||||
// Function doesn't tell us what the data is, let's try
|
||||
// to get it automatically...
|
||||
metadata.Typename = funcs.GetFunctionName(fn) // works!
|
||||
metadata.Filename = "" // XXX: How can we get this?
|
||||
|
||||
// XXX: We only need this back-channel metadata store
|
||||
// because we don't know how to get the filename without
|
||||
// manually writing code in each function. Alternatively
|
||||
// we could add a New() method to each struct and then
|
||||
// we could modify the struct instead of having it be
|
||||
// behind a copy which is needed to get new copies!
|
||||
var err error
|
||||
metadata, err = docsUtil.LookupFunction(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
} else if mdFunc == nil {
|
||||
// programming error
|
||||
return nil, fmt.Errorf("unexpected empty metadata for function: %s", name)
|
||||
|
||||
} else {
|
||||
metadata = mdFunc.GetMetadata()
|
||||
}
|
||||
|
||||
if metadata == nil {
|
||||
return nil, fmt.Errorf("unexpected nil metadata for function: %s", name)
|
||||
}
|
||||
|
||||
// This may be an empty func name if the function did not know
|
||||
// how to get it. (This is normal for automatic regular funcs.)
|
||||
if metadata.Typename == "" {
|
||||
metadata.Typename = funcs.GetFunctionName(fn) // works!
|
||||
}
|
||||
|
||||
fi, err := obj.getFunctionInfo(p, n, metadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// We may not get any fields added if we can't find anything...
|
||||
fi.Name = metadata.Typename
|
||||
fi.Package = p
|
||||
fi.Func = n
|
||||
fi.File = metadata.Filename
|
||||
//fi.Desc = desc
|
||||
fi.Signature = sig
|
||||
|
||||
// Hack for golang generated functions!
|
||||
if strings.HasPrefix(fi.Package, "golang/") && fi.File == "generated_funcs.go" {
|
||||
pkg := fi.Package[len("golang/"):]
|
||||
frag := strings.TrimPrefix(fi.Name, strings.Title(strings.Join(strings.Split(pkg, "/"), ""))) // yuck
|
||||
fi.File = fmt.Sprintf("https://pkg.go.dev/%s#%s", pkg, frag)
|
||||
}
|
||||
|
||||
if fi.Func == "" {
|
||||
return nil, fmt.Errorf("empty function name: %s", name)
|
||||
}
|
||||
if fi.File == "" {
|
||||
return nil, fmt.Errorf("empty function file: %s", name)
|
||||
}
|
||||
if fi.Desc == "" {
|
||||
obj.Logf("empty function desc: %s", name)
|
||||
}
|
||||
if fi.Signature == nil {
|
||||
obj.Logf("empty function sig: %s", name)
|
||||
}
|
||||
|
||||
functions[name] = fi
|
||||
}
|
||||
|
||||
return functions, nil
|
||||
}
|
||||
|
||||
// Output is the type of the final data that will be for the json output.
|
||||
type Output struct {
|
||||
// Version is the sha1 or ref name of this specific version. This is
|
||||
// used if we want to generate documentation with links matching the
|
||||
// correct version. If unspecified then this assumes git master.
|
||||
Version string `json:"version"`
|
||||
|
||||
// Resources contains the collection of every available resource!
|
||||
// FIXME: should this be a list instead?
|
||||
Resources map[string]*ResourceInfo `json:"resources"`
|
||||
|
||||
// Functions contains the collection of every available function!
|
||||
// FIXME: should this be a list instead?
|
||||
Functions map[string]*FunctionInfo `json:"functions"`
|
||||
}
|
||||
|
||||
// ResourceInfo stores some information about each resource.
|
||||
type ResourceInfo struct {
|
||||
// Name is the golang name of this resource.
|
||||
Name string `json:"name"`
|
||||
|
||||
// Kind is the kind of this resource.
|
||||
Kind string `json:"kind"`
|
||||
|
||||
// File is the file name where this resource exists.
|
||||
File string `json:"file"`
|
||||
|
||||
// Desc explains what this resource does.
|
||||
Desc string `json:"description"`
|
||||
|
||||
// Fields is a collection of each resource field and corresponding info.
|
||||
Fields map[string]*ResourceFieldInfo `json:"fields"`
|
||||
}
|
||||
|
||||
// ResourceFieldInfo stores some information about each field in each resource.
|
||||
type ResourceFieldInfo struct {
|
||||
// Name is what this field is called in golang format.
|
||||
Name string `json:"name"`
|
||||
|
||||
// Type is the mcl type for this field.
|
||||
Type string `json:"type"`
|
||||
|
||||
// Desc explains what this field does.
|
||||
Desc string `json:"description"`
|
||||
}
|
||||
|
||||
// FunctionInfo stores some information about each function.
|
||||
type FunctionInfo struct {
|
||||
// Name is the golang name of this function. This may be an actual
|
||||
// function if used by the simple API, or the name of a struct.
|
||||
Name string `json:"name"`
|
||||
|
||||
// Package is the import name to use to get to this function.
|
||||
Package string `json:"package"`
|
||||
|
||||
// Func is the name of the function in that package.
|
||||
Func string `json:"func"`
|
||||
|
||||
// File is the file name where this function exists.
|
||||
File string `json:"file"`
|
||||
|
||||
// Desc explains what this function does.
|
||||
Desc string `json:"description"`
|
||||
|
||||
// Signature is the type signature of this function. If empty then the
|
||||
// signature is not known statically and it may be polymorphic.
|
||||
Signature *string `json:"signature,omitempty"`
|
||||
}
|
||||
|
||||
// commentCleaner takes a comment group and returns it as a clean string. It
|
||||
// removes the spurious newlines and programmer-focused comments. If there are
|
||||
// blank lines, it replaces them with a single newline. The idea is that the
|
||||
// webpage formatter would replace the newline with a <br /> or similar. This
|
||||
// code is a modified alternative of the ast.CommentGroup.Text() function.
|
||||
func commentCleaner(g *ast.CommentGroup) string {
|
||||
if g == nil {
|
||||
return ""
|
||||
}
|
||||
comments := make([]string, len(g.List))
|
||||
for i, c := range g.List {
|
||||
comments[i] = c.Text
|
||||
}
|
||||
|
||||
lines := make([]string, 0, 10) // most comments are less than 10 lines
|
||||
for _, c := range comments {
|
||||
// Remove comment markers.
|
||||
// The parser has given us exactly the comment text.
|
||||
switch c[1] {
|
||||
case '/':
|
||||
//-style comment (no newline at the end)
|
||||
c = c[2:]
|
||||
if len(c) == 0 {
|
||||
// empty line
|
||||
break
|
||||
}
|
||||
if isDevComment(c[1:]) { // get rid of one space
|
||||
continue
|
||||
}
|
||||
if c[0] == ' ' {
|
||||
// strip first space - required for Example tests
|
||||
c = c[1:]
|
||||
break
|
||||
}
|
||||
//if isDirective(c) {
|
||||
// // Ignore //go:noinline, //line, and so on.
|
||||
// continue
|
||||
//}
|
||||
case '*':
|
||||
/*-style comment */
|
||||
c = c[2 : len(c)-2]
|
||||
}
|
||||
|
||||
// Split on newlines.
|
||||
cl := strings.Split(c, "\n")
|
||||
|
||||
// Walk lines, stripping trailing white space and adding to list.
|
||||
for _, l := range cl {
|
||||
lines = append(lines, stripTrailingWhitespace(l))
|
||||
}
|
||||
}
|
||||
|
||||
// Remove leading blank lines; convert runs of interior blank lines to a
|
||||
// single blank line.
|
||||
n := 0
|
||||
for _, line := range lines {
|
||||
if line != "" || n > 0 && lines[n-1] != "" {
|
||||
lines[n] = line
|
||||
n++
|
||||
}
|
||||
}
|
||||
lines = lines[0:n]
|
||||
|
||||
// Concatenate all of these together. Blank lines should be a newline.
|
||||
s := ""
|
||||
for i, line := range lines {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
s += line
|
||||
if i < len(lines)-1 { // Is there another line?
|
||||
if lines[i+1] == "" {
|
||||
s += "\n" // Will eventually be a line break.
|
||||
} else {
|
||||
s += " "
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// TODO: should we use unicode.IsSpace instead?
|
||||
func isWhitespace(ch byte) bool { return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' }
|
||||
|
||||
// TODO: should we replace with a strings package stdlib function?
|
||||
func stripTrailingWhitespace(s string) string {
|
||||
i := len(s)
|
||||
for i > 0 && isWhitespace(s[i-1]) {
|
||||
i--
|
||||
}
|
||||
return s[0:i]
|
||||
}
|
||||
|
||||
// isPrivate specifies if a field name is "private" or not.
|
||||
func isPrivate(fieldName string) bool {
|
||||
if fieldName == "" {
|
||||
panic("invalid field name")
|
||||
}
|
||||
x := fieldName[0:1]
|
||||
|
||||
if strings.ToLower(x) == x {
|
||||
return true // it was already private
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// isDevComment tells us that the comment is for developers only!
|
||||
func isDevComment(comment string) bool {
|
||||
if strings.HasPrefix(comment, "TODO:") {
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(comment, "FIXME:") {
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(comment, "XXX:") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// safeVersion parses the main version string and returns a short hash for us.
|
||||
// For example, we might get a string of 0.0.26-176-gabcdef012-dirty as input,
|
||||
// and we'd want to return abcdef012.
|
||||
func safeVersion(version string) string {
|
||||
const dirty = "-dirty"
|
||||
|
||||
s := version
|
||||
if strings.HasSuffix(s, dirty) { // helpful dirty remover
|
||||
s = s[0 : len(s)-len(dirty)]
|
||||
}
|
||||
|
||||
ix := strings.LastIndex(s, "-")
|
||||
if ix == -1 { // assume we have a standalone version (future proofing?)
|
||||
return s
|
||||
}
|
||||
s = s[ix+1:]
|
||||
|
||||
// From the `git describe` man page: The "g" prefix stands for "git" and
|
||||
// is used to allow describing the version of a software depending on
|
||||
// the SCM the software is managed with. This is useful in an
|
||||
// environment where people may use different SCMs.
|
||||
const g = "g"
|
||||
if strings.HasPrefix(s, g) {
|
||||
s = s[len(g):]
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
@@ -14,3 +14,4 @@ Welcome to mgmt's documentation!
|
||||
quick-start-guide
|
||||
resource-guide
|
||||
prometheus
|
||||
puppet-guide
|
||||
|
||||
@@ -100,6 +100,24 @@ expression
|
||||
}
|
||||
```
|
||||
|
||||
- **for**: loop over a list with a body of statements
|
||||
|
||||
```mcl
|
||||
$list = ["a", "b", "c",]
|
||||
for $index, $value in $list {
|
||||
# some statements go here
|
||||
}
|
||||
```
|
||||
|
||||
- **forkv**: loop over a map with a body of statements
|
||||
|
||||
```mcl
|
||||
$map = {0 => "a", 1 => "b", 2 => "c",}
|
||||
forkv $key, $val in $map {
|
||||
# some statements go here
|
||||
}
|
||||
```
|
||||
|
||||
- **resource**: produces a resource
|
||||
|
||||
```mcl
|
||||
@@ -283,6 +301,14 @@ one of many ways you can perform iterative tasks that you might have
|
||||
traditionally used a `for` loop for instead. This is preferred, because flow
|
||||
control is error-prone and can make for less readable code.
|
||||
|
||||
The single `str` variation, may only be used when it is possible for the
|
||||
compiler to determine statically that the value is of that type. Otherwise, it
|
||||
will assume it to be a list of strings. Programmers should explicitly wrap their
|
||||
variables in a string by interpolation to force this static `str` determination,
|
||||
or in square brackets to force a list. The former is generally preferable
|
||||
because it generates a smaller function graph since it doesn't need to build a
|
||||
list.
|
||||
|
||||
##### Internal edges
|
||||
|
||||
Resources may also declare edges internally. The edges may point to or from
|
||||
@@ -337,6 +363,28 @@ to express a relationship between three resources. The first character in the
|
||||
resource kind must be capitalized so that the parser can't ascertain
|
||||
unambiguously that we are referring to a dependency relationship.
|
||||
|
||||
##### Edge naming
|
||||
|
||||
Each edge must have a unique name of type `str` that is used to uniquely
|
||||
identify that edge, and can be used in the functioning of the edge at its
|
||||
discretion.
|
||||
|
||||
Alternatively, the name value may be a list of strings `[]str` to build a list
|
||||
of edges, each with a name from that list.
|
||||
|
||||
Using this construct is a veiled form of looping (iteration). This technique is
|
||||
one of many ways you can perform iterative tasks that you might have
|
||||
traditionally used a `for` loop for instead. This is preferred, because flow
|
||||
control is error-prone and can make for less readable code.
|
||||
|
||||
The single `str` variation, may only be used when it is possible for the
|
||||
compiler to determine statically that the value is of that type. Otherwise, it
|
||||
will assume it to be a list of strings. Programmers should explicitly wrap their
|
||||
variables in a string by interpolation to force this static `str` determination,
|
||||
or in square brackets to force a list. The former is generally preferable
|
||||
because it generates a smaller function graph since it doesn't need to build a
|
||||
list.
|
||||
|
||||
#### Class
|
||||
|
||||
A class is a grouping structure that bind's a list of statements to a name in
|
||||
@@ -561,7 +609,7 @@ Lexing is done using [nex](https://github.com/blynn/nex). It is a pure-golang
|
||||
implementation which is similar to _Lex_ or _Flex_, but which produces golang
|
||||
code instead of C. It integrates reasonably well with golang's _yacc_ which is
|
||||
used for parsing. The token definitions are in:
|
||||
[lang/lexer.nex](https://github.com/purpleidea/mgmt/tree/master/lang/lexer.nex).
|
||||
[lang/lexer.nex](https://github.com/purpleidea/mgmt/tree/master/lang/parser/lexer.nex).
|
||||
Lexing and parsing run together by calling the `LexParse` method.
|
||||
|
||||
#### Parsing
|
||||
@@ -573,7 +621,7 @@ and trial and error. One small advantage yacc has over standard yacc is that it
|
||||
can produce error messages from examples. The best documentation is to examine
|
||||
the source. There is a short write up available [here](https://research.swtch.com/yyerror).
|
||||
The yacc file exists at:
|
||||
[lang/parser.y](https://github.com/purpleidea/mgmt/tree/master/lang/parser.y).
|
||||
[lang/parser.y](https://github.com/purpleidea/mgmt/tree/master/lang/parser/parser.y).
|
||||
Lexing and parsing run together by calling the `LexParse` method.
|
||||
|
||||
#### Interpolation
|
||||
@@ -609,23 +657,27 @@ so that each `Expr` node in the AST knows what to expect. Type annotation is
|
||||
allowed in situations when you want to explicitly specify a type, or when the
|
||||
compiler cannot deduce it, however, most of it can usually be inferred.
|
||||
|
||||
For type inferrence to work, each node in the AST implements a `Unify` method
|
||||
which is able to return a list of invariants that must hold true. This starts at
|
||||
the top most AST node, and gets called through to it's children to assemble a
|
||||
giant list of invariants. The invariants can take different forms. They can
|
||||
specify that a particular expression must have a particular type, or they can
|
||||
specify that two expressions must have the same types. More complex invariants
|
||||
allow you to specify relationships between different types and expressions.
|
||||
Furthermore, invariants can allow you to specify that only one invariant out of
|
||||
a set must hold true.
|
||||
For type inference to work, each `Stmt` node in the AST implements a `TypeCheck`
|
||||
method which is able to return a list of invariants that must hold true. This
|
||||
starts at the top most AST node, and gets called through to it's children to
|
||||
assemble a giant list of invariants. The invariants all have the same form. They
|
||||
specify that a particular expression corresponds to two particular types which
|
||||
may both contain unification variables.
|
||||
|
||||
Each `Expr` node in the AST implements an `Infer` and `Check` method. The
|
||||
`Infer` method returns the type of that node along with a list of invariants as
|
||||
described above. Unification variables can of course be used throughout. The
|
||||
`Check` method always uses a generic check implementation and generally doesn't
|
||||
need to be implemented by the user.
|
||||
|
||||
Once the list of invariants has been collected, they are run through an
|
||||
invariant solver. The solver can return either return successfully or with an
|
||||
error. If the solver returns successfully, it means that it has found a trivial
|
||||
error. If the solver returns successfully, it means that it has found a single
|
||||
mapping between every expression and it's corresponding type. At this point it
|
||||
is a simple task to run `SetType` on every expression so that the types are
|
||||
known. If the solver returns in error, it is usually due to one of two
|
||||
possibilities:
|
||||
known. During this stage, each SetType method verifies that it's a compatible
|
||||
type that it can use. If either that method or if the solver returns in error,
|
||||
it is usually due to one of two possibilities:
|
||||
|
||||
1. Ambiguity
|
||||
|
||||
@@ -645,8 +697,8 @@ possibilities:
|
||||
always happens if the user has made a type error in their program.
|
||||
|
||||
Only one solver currently exists, but it is possible to easily plug in an
|
||||
alternate implementation if someone more skilled in the art of solver design
|
||||
would like to propose a more logical or performant variant.
|
||||
alternate implementation if someone wants to experiment with the art of solver
|
||||
design and would like to propose a more logical or performant variant.
|
||||
|
||||
#### Function graph generation
|
||||
|
||||
@@ -687,8 +739,9 @@ If you'd like to create a built-in, core function, you'll need to implement the
|
||||
function API interface named `Func`. It can be found in
|
||||
[lang/interfaces/func.go](https://github.com/purpleidea/mgmt/tree/master/lang/interfaces/func.go).
|
||||
Your function must have a specific type. For example, a simple math function
|
||||
might have a signature of `func(x int, y int) int`. As you can see, all the
|
||||
types are known _before_ compile time.
|
||||
might have a signature of `func(x int, y int) int`. The simple functions have
|
||||
their types known _before_ compile time. You may also include unification
|
||||
variables in the function signature as long as the top-level type is a function.
|
||||
|
||||
A separate discussion on this matter can be found in the [function guide](function-guide.md).
|
||||
|
||||
@@ -716,6 +769,12 @@ added in the future. This method is usually called before any other, and should
|
||||
not depend on any other method being called first. Other methods must not depend
|
||||
on this method being called first.
|
||||
|
||||
If you use any unification variables in the function signature, then your
|
||||
function will *not* be made available for use inside templates. This is a
|
||||
limitation of the `golang` templating library. In the future if this limitation
|
||||
proves to be significantly annoying, we might consider writing our own template
|
||||
library.
|
||||
|
||||
#### Example
|
||||
|
||||
```golang
|
||||
@@ -726,6 +785,18 @@ func (obj *FooFunc) Info() *interfaces.Info {
|
||||
}
|
||||
```
|
||||
|
||||
#### Example
|
||||
|
||||
This example contains unification variables.
|
||||
|
||||
```golang
|
||||
func (obj *FooFunc) Info() *interfaces.Info {
|
||||
return &interfaces.Info{
|
||||
Sig: types.NewType("func(a ?1, b ?2, foo [?3]) ?1"),
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Init
|
||||
|
||||
```golang
|
||||
@@ -788,49 +859,67 @@ Please see the example functions in
|
||||
[lang/core/](https://github.com/purpleidea/mgmt/tree/master/lang/core/).
|
||||
```
|
||||
|
||||
### Polymorphic Function API
|
||||
### BuildableFunc Function API
|
||||
|
||||
For some functions, it might be helpful to be able to implement a function once,
|
||||
but to have multiple polymorphic variants that can be chosen at compile time.
|
||||
For this more advanced topic, you will need to use the
|
||||
[Polymorphic Function API](#polymorphic-function-api). This will help with code
|
||||
reuse when you have a small, finite number of possible type signatures, and also
|
||||
for more complicated cases where you might have an infinite number of possible
|
||||
type signatures. (eg: `[]str`, or `[][]str`, or `[][][]str`, etc...)
|
||||
For some functions, it might be helpful to have a function which needs a "build"
|
||||
step which is run after type unification. This step can be used to build the
|
||||
function using the determined type, but it may also just be used for checking
|
||||
that unification picked a valid solution.
|
||||
|
||||
Suppose you want to implement a function which can assume different type
|
||||
signatures. The mgmt language does not support polymorphic types-- you must use
|
||||
static types throughout the language, however, it is legal to implement a
|
||||
function which can take different specific type signatures based on how it is
|
||||
used. For example, you might wish to add a math function which could take the
|
||||
form of `func(x int, x int) int` or `func(x float, x float) float` depending on
|
||||
the input values. You might also want to implement a function which takes an
|
||||
arbitrary number of input arguments (the number must be statically fixed at the
|
||||
compile time of your program though) and which returns a string.
|
||||
form of `func(x int, y int) int` or `func(x float, y float) float` depending on
|
||||
the input values. For this case you could use a signature containing unification
|
||||
variables, eg: `func(x ?1, y ?1) ?1`. At the end the buildable function would
|
||||
need to check that it received a `?1` type of either `int` or `float`, since
|
||||
this function might not support doing math on strings. Remember that type
|
||||
unification can only return zero or one solutions, it's not possible to return
|
||||
more than one, which is why this secondary validation step is a brilliant way to
|
||||
filter out invalid solutions without needing to encode them as algebraic
|
||||
conditions during the solver state, which would otherwise make it exponential.
|
||||
|
||||
The `PolyFunc` interface adds additional methods which you must implement to
|
||||
satisfy such a function implementation. If you'd like to implement such a
|
||||
function, then please notify the project authors, and they will expand this
|
||||
section with a longer description of the process.
|
||||
### InferableFunc Function API
|
||||
|
||||
#### Examples
|
||||
You might also want to implement a function which takes an arbitrary number of
|
||||
input arguments (the number must be statically fixed at the compile time of your
|
||||
program though) and which returns a string or something else.
|
||||
|
||||
What follows are a few examples that might help you understand some of the
|
||||
language details.
|
||||
The `InferableFunc` interface adds ad additional `FuncInfer` method which you
|
||||
must implement to satisfy such a function implementation. This lets you
|
||||
dynamically generate a type signature (including unification variables) and a
|
||||
list of invariants before running the type unification solver. It takes as input
|
||||
a list of the statically known input types and input values (if any) and as well
|
||||
the number of input arguments specified. This is usually enough information to
|
||||
generate a fixed type signature of a fixed size.
|
||||
|
||||
##### Example Foo
|
||||
|
||||
TODO: please add an example here!
|
||||
|
||||
##### Example Bar
|
||||
|
||||
TODO: please add an example here!
|
||||
Using this API should generally be pretty rare, but it is how certain special
|
||||
functions such as `fmt.printf` are built. If you'd like to implement such a
|
||||
function, then please notify the project authors as we're curious about your
|
||||
use case.
|
||||
|
||||
## Frequently asked questions
|
||||
|
||||
(Send your questions as a patch to this FAQ! I'll review it, merge it, and
|
||||
respond by commit with the answer.)
|
||||
|
||||
### Why am I getting a deploy.readfile error when the file actually exists?
|
||||
|
||||
You may be seeing an error like:
|
||||
|
||||
`readfile`: open /*/files/foo: file does not exist can't read file `/files/foo`?
|
||||
|
||||
If you look, the `foo` file is indeed in the `files/` directory. The problem is
|
||||
that the `files/` directory won't be seen if you didn't specify to include it as
|
||||
part of your deploy. To do so, chances are that all you need to do is add a
|
||||
`metadata.yaml` file into the parent directory to that files folder. This will
|
||||
be used as the entrypoint instead of the naked `main.mcl` file that you have
|
||||
there, and with that metadata entrypoint, you get a default `files/` directory
|
||||
added. You can of course change the `files/` path by setting a key in the
|
||||
`metadata.yaml` file, but we recommend you leave it as the default.
|
||||
|
||||
### What is the difference between `ExprIf` and `StmtIf`?
|
||||
|
||||
The language contains both an `if` expression, and and `if` statement. An `if`
|
||||
@@ -914,7 +1003,7 @@ Not really, but practically it can be used as such. The `class` statement is not
|
||||
a singleton since it can be called multiple times in different locations, and it
|
||||
can also be parameterized and called multiple times (with `include`) using
|
||||
different input parameters. The reason it can be used as such is that statement
|
||||
output (from multple classes) that is compatible (and usually identical) will
|
||||
output (from multiple classes) that is compatible (and usually identical) will
|
||||
be automatically collated and have the duplicates removed. In that way, you can
|
||||
assume that an unparameterized class is always a singleton, and that
|
||||
parameterized classes can often be singletons depending on their contents and if
|
||||
@@ -956,7 +1045,7 @@ thing FRP experts might notice is that some of the concepts from FRP are either
|
||||
named differently, or are notably absent.
|
||||
|
||||
In mgmt, we don't talk about behaviours, events, or signals in the strict FRP
|
||||
definitons of the words. Firstly, because we only support discretized, streams
|
||||
definitions of the words. Firstly, because we only support discretized, streams
|
||||
of values with no plan to add continuous semantics. Secondly, because we prefer
|
||||
to use terms which are more natural and relatable to what our target audience is
|
||||
expecting. Our users are more likely to have a background in Physiology, or
|
||||
|
||||
@@ -21,15 +21,15 @@ if we missed something that you think is relevant!
|
||||
| Felix Frank | blog | [Puppet Powered Mgmt (puppet to mgmt tl;dr)](https://ffrank.github.io/features/2016/06/19/puppet-powered-mgmt/) |
|
||||
| James Shubin | blog | [Automatic clustering in mgmt](https://purpleidea.com/blog/2016/06/20/automatic-clustering-in-mgmt/) |
|
||||
| James Shubin | video | [Recording from CoreOSFest 2016](https://www.youtube.com/watch?v=KVmDCUA42wc&html5=1) |
|
||||
| James Shubin | video | [Recording from DebConf16](http://meetings-archive.debian.net/pub/debian-meetings/2016/debconf16/Next_Generation_Config_Mgmt.webm) ([Slides](https://annex.debconf.org//debconf-share/debconf16/slides/15-next-generation-config-mgmt.pdf)) |
|
||||
| James Shubin | video | [Recording from DebConf16](http://meetings-archive.debian.net/pub/debian-meetings/2016/debconf16/Next_Generation_Config_Mgmt.webm) |
|
||||
| Felix Frank | blog | [Edging It All In (puppet and mgmt edges)](https://ffrank.github.io/features/2016/07/12/edging-it-all-in/) |
|
||||
| Felix Frank | blog | [Translating All The Things (puppet to mgmt translation warnings)](https://ffrank.github.io/features/2016/08/19/translating-all-the-things/) |
|
||||
| James Shubin | video | [Recording from systemd.conf 2016](https://www.youtube.com/watch?v=jB992Zb3nH0&html5=1) |
|
||||
| James Shubin | video | [Recording from systemd.conf 2016](https://www.youtube.com/watch?v=_TowsFAWWRA) |
|
||||
| James Shubin | blog | [Remote execution in mgmt](https://purpleidea.com/blog/2016/10/07/remote-execution-in-mgmt/) |
|
||||
| James Shubin | video | [Recording from High Load Strategy 2016](https://vimeo.com/191493409) |
|
||||
| James Shubin | video | [Recording from NLUUG 2016](https://www.youtube.com/watch?v=MmpwOQAb_SE&html5=1) |
|
||||
| James Shubin | video | [Recording from High Load Strategy 2016](https://www.youtube.com/watch?v=-4g14KUVPVk) |
|
||||
| James Shubin | video | [Recording from NLUUG 2016](https://www.youtube.com/watch?v=0vO93ni1zos) |
|
||||
| James Shubin | blog | [Send/Recv in mgmt](https://purpleidea.com/blog/2016/12/07/sendrecv-in-mgmt/) |
|
||||
| Julien Pivotto | blog | [Augeas resource for mgmt](https://roidelapluie.be/blog/2017/02/14/mgmt-augeas/) |
|
||||
| Julien Pivotto | blog | [Augeas resource for mgmt](https://purpleidea.com/cached/mgmt-augeas.html) (Cached from: https://roidelapluie.be/blog/2017/02/14/mgmt-augeas/) |
|
||||
| James Shubin | blog | [Metaparameters in mgmt](https://purpleidea.com/blog/2017/03/01/metaparameters-in-mgmt/) |
|
||||
| James Shubin | video | [Recording from Incontro DevOps 2017](https://vimeo.com/212241877) |
|
||||
| Yves Brissaud | blog | [mgmt aux HumanTalks Grenoble (french)](http://log.winsos.net/2017/04/12/mgmt-aux-human-talks-grenoble.html) |
|
||||
@@ -59,3 +59,7 @@ if we missed something that you think is relevant!
|
||||
| James Shubin | video | [Recording from CfgMgmtCamp.eu 2023](https://www.youtube.com/watch?v=FeRGRj8w0BU) |
|
||||
| James Shubin | video | [Recording from FOSDEM 2024, Golang Devroom](https://video.fosdem.org/2024/ud2218a/fosdem-2024-2575-single-binary-full-stack-provisioning.mp4) |
|
||||
| James Shubin | video | [Recording from CfgMgmtCamp.eu 2024](https://www.youtube.com/watch?v=vBt9lpGD4bc) |
|
||||
| James Shubin | blog | [Mgmt Configuration Language: Functions](https://purpleidea.com/blog/2024/11/22/functions-in-mgmt/) |
|
||||
| James Shubin | blog | [Modules and imports in mgmt](https://purpleidea.com/blog/2024/12/03/modules-and-imports-in-mgmt/) |
|
||||
| James Shubin | video | [Recording from FOSDEM 2025, Docs Devroom](https://video.fosdem.org/2025/k4201/fosdem-2025-6143-docs-straight-from-the-code-ast-powered-automation.mp4) |
|
||||
| James Shubin | video | [Recording from CfgMgmtCamp.eu 2025](https://www.youtube.com/watch?v=0Oa7CWx4TEA) |
|
||||
|
||||
316
docs/puppet-guide.md
Normal file
316
docs/puppet-guide.md
Normal file
@@ -0,0 +1,316 @@
|
||||
# Puppet guide
|
||||
|
||||
`mgmt` can use Puppet as its source for the configuration graph.
|
||||
This document goes into detail on how this works, and lists
|
||||
some pitfalls and limitations.
|
||||
|
||||
For basic instructions on how to use the Puppet support, see
|
||||
the [main documentation](documentation.md#puppet-support).
|
||||
|
||||
## Prerequisites
|
||||
|
||||
You need Puppet installed in your system. It is not important how you
|
||||
get it. On the most common Linux distributions, you can use packages
|
||||
from the OS maintainer, or upstream Puppet repositories. An alternative
|
||||
that will also work on OSX is the `puppet` Ruby gem. It also has the
|
||||
advantage that you can install any desired version in your home directory
|
||||
or any other location.
|
||||
|
||||
Any release of Puppet's 3.x and 4.x series should be suitable for use with
|
||||
`mgmt`. Most importantly, make sure to install the `ffrank-mgmtgraph` Puppet
|
||||
module (referred to below as "the translator module").
|
||||
|
||||
```
|
||||
puppet module install ffrank-mgmtgraph
|
||||
```
|
||||
|
||||
Please note that the module is not required on your Puppet master (if you
|
||||
use a master/agent setup). It's needed on the machine that runs `mgmt`.
|
||||
You can install the module on the master anyway, so that it gets distributed
|
||||
to your agents through Puppet's `pluginsync` mechanism.
|
||||
|
||||
### Testing the Puppet side
|
||||
|
||||
The following command should run successfully and print a YAML hash on your
|
||||
terminal:
|
||||
|
||||
```puppet
|
||||
puppet mgmtgraph print --code 'file { "/tmp/mgmt-test": ensure => present }'
|
||||
```
|
||||
|
||||
You can use this CLI to test any manifests before handing them straight
|
||||
to `mgmt`.
|
||||
|
||||
## Writing a suitable manifest
|
||||
|
||||
### Unsupported attributes
|
||||
|
||||
`mgmt` inherited its resource module from Puppet, so by and large, it's quite
|
||||
possible to express `mgmt` graphs in terms of Puppet manifests. However,
|
||||
there isn't (and likely never will be) full feature parity between the
|
||||
respective resource types. In consequence, a manifest can have semantics that
|
||||
cannot be transferred to `mgmt`.
|
||||
|
||||
For example, at the time of writing this, the `file` type in `mgmt` had no
|
||||
notion of permissions (the file `mode`) yet. This lead to the following
|
||||
warning (among others that will be discussed below):
|
||||
|
||||
```
|
||||
$ puppet mgmtgraph print --code 'file { "/tmp/foo": mode => "0600" }'
|
||||
Warning: cannot translate: File[/tmp/foo] { mode => "600" } (attribute is ignored)
|
||||
```
|
||||
|
||||
This is a heads-up for the user, because the resulting `mgmt` graph will
|
||||
in fact not pass this information to the `/tmp/foo` file resource, and
|
||||
`mgmt` will ignore this file's permissions. Including such attributes in
|
||||
manifests that are written expressly for `mgmt` is not sensible and should
|
||||
be avoided.
|
||||
|
||||
### Unsupported resources
|
||||
|
||||
Puppet has a fairly large number of
|
||||
[built-in types](https://www.puppet.com/docs/puppet/8/cheatsheet_core_types.html),
|
||||
and countless more are available through
|
||||
[modules](https://forge.puppet.com/). It's unlikely that all of them will
|
||||
eventually receive native counterparts in `mgmt`.
|
||||
|
||||
When encountering an unknown resource, the translator module will replace
|
||||
it with an `exec` resource in its output. This resource will run the equivalent
|
||||
of a `puppet resource` command to make Puppet apply the original resource
|
||||
itself. This has quite abysmal performance, because processing such a
|
||||
resource requires the forking of at least one Puppet process (two if it
|
||||
is found to be out of sync). This comes with considerable overhead.
|
||||
On most systems, starting up any Puppet command takes several seconds.
|
||||
Compared to the split second that the actual work usually takes,
|
||||
this overhead can amount to several orders of magnitude.
|
||||
|
||||
Avoid Puppet types that `mgmt` does not implement (yet).
|
||||
|
||||
### Avoiding common warnings
|
||||
|
||||
Many resource parameters in Puppet take default values. For the most part,
|
||||
the translator module just ignores them. However, there are cases in which
|
||||
Puppet will default to convenient behavior that `mgmt` cannot quite replicate.
|
||||
For example, translating a plain `file` resource will lead to a warning message:
|
||||
|
||||
```
|
||||
$ puppet mgmtgraph print --code 'file { "/tmp/mgmt-test": }'
|
||||
Warning: File[/tmp/mgmt-test] uses the 'puppet' file bucket, which mgmt cannot
|
||||
do. There will be no backup copies!
|
||||
```
|
||||
|
||||
The reason is that per default, Puppet assumes the following parameter value
|
||||
(among others)
|
||||
|
||||
```puppet
|
||||
file { "/tmp/mgmt-test":
|
||||
backup => 'puppet',
|
||||
}
|
||||
```
|
||||
|
||||
To avoid this, specify the parameter explicitly:
|
||||
|
||||
```bash
|
||||
puppet mgmtgraph print --code 'file { "/tmp/mgmt-test": backup => false }'
|
||||
```
|
||||
|
||||
This is tedious in a more complex manifest. A good simplification is the
|
||||
following [resource default](https://www.puppet.com/docs/puppet/8/lang_defaults)
|
||||
anywhere on the top scope of your manifest:
|
||||
|
||||
```puppet
|
||||
File { backup => false }
|
||||
```
|
||||
|
||||
If you encounter similar warnings from other types and/or parameters,
|
||||
use the same approach to silence them if possible.
|
||||
|
||||
## Configuring Puppet
|
||||
|
||||
Since `mgmt` uses an actual Puppet CLI behind the scenes, you might
|
||||
need to tweak some of Puppet's runtime options in order to make it
|
||||
do what you want. Reasons for this could be among the following:
|
||||
|
||||
* You use the `--puppet agent` variant and need to configure
|
||||
`servername`, `certname` and other master/agent-related options.
|
||||
* You don't want runtime information to end up in the `vardir`
|
||||
that is used by your regular `puppet agent`.
|
||||
* You install specific Puppet modules for `mgmt` in a non-standard
|
||||
location.
|
||||
|
||||
`mgmt` exposes only one Puppet option in order to allow you to
|
||||
control all of them, through its `--puppet-conf` option. It allows
|
||||
you to specify which `puppet.conf` file should be used during
|
||||
translation.
|
||||
|
||||
```
|
||||
mgmt run puppet --puppet /opt/my-manifest.pp --puppet-conf /etc/mgmt/puppet.conf
|
||||
```
|
||||
|
||||
Within this file, you can just specify any needed options in the
|
||||
`[main]` section:
|
||||
|
||||
```
|
||||
[main]
|
||||
server=mgmt-master.example.net
|
||||
vardir=/var/lib/mgmt/puppet
|
||||
```
|
||||
|
||||
## Caveats
|
||||
|
||||
Please see the [README](https://github.com/ffrank/puppet-mgmtgraph/blob/master/README.md)
|
||||
of the translator module for the current state of supported and unsupported
|
||||
language features.
|
||||
|
||||
You should probably make sure to always use the latest release of
|
||||
both `ffrank-mgmtgraph` and `ffrank-yamlresource` (the latter is
|
||||
getting pulled in as a dependency of the former).
|
||||
|
||||
## Using Puppet in conjunction with the mcl lang
|
||||
|
||||
The graph that Puppet generates for `mgmt` can be united with a graph
|
||||
that is created from native `mgmt` code in its mcl language. This is
|
||||
useful when you are in the process of replacing Puppet with mgmt. You
|
||||
can translate your custom modules into mgmt's language one by one,
|
||||
and let mgmt run the current mix.
|
||||
|
||||
Instead of the usual `--puppet-conf` flag and argv for `puppet` and `mcl` input,
|
||||
you need to use alternative flags to make this work:
|
||||
|
||||
* `--lp-lang` to specify the mcl input
|
||||
* `--lp-puppet` to specify the puppet input
|
||||
* `--lp-puppet-conf` to point to the optional puppet.conf file
|
||||
|
||||
`mgmt` will derive a graph that contains all edges and vertices from
|
||||
both inputs. You essentially get two unrelated subgraphs that run in
|
||||
parallel. To form edges between these subgraphs, you have to define
|
||||
special vertices that will be merged. This works through a hard-coded
|
||||
naming scheme.
|
||||
|
||||
### Mixed graph example 1 - No merges
|
||||
|
||||
```mcl
|
||||
# lang
|
||||
file "/tmp/mgmt_dir/" { state => "present" }
|
||||
file "/tmp/mgmt_dir/a" { state => "present" }
|
||||
```
|
||||
|
||||
```puppet
|
||||
# puppet
|
||||
file { "/tmp/puppet_dir": ensure => "directory" }
|
||||
file { "/tmp/puppet_dir/a": ensure => "file" }
|
||||
```
|
||||
|
||||
These very simple inputs (including implicit edges from directory to
|
||||
respective file) result in two subgraphs that do not relate.
|
||||
|
||||
```
|
||||
File[/tmp/mgmt_dir/] -> File[/tmp/mgmt_dir/a]
|
||||
|
||||
File[/tmp/puppet_dir] -> File[/tmp/puppet_dir/a]
|
||||
```
|
||||
|
||||
### Mixed graph example 2 - Merged vertex
|
||||
|
||||
In order to have merged vertices in the resulting graph, you will
|
||||
need to include special resources and classes in the respective
|
||||
input code.
|
||||
|
||||
* On the lang side, add `noop` resources with names starting in `puppet_`.
|
||||
* On the Puppet side, add **empty** classes with names starting in `mgmt_`.
|
||||
|
||||
```mcl
|
||||
# lang
|
||||
noop "puppet_handover_to_mgmt" {}
|
||||
file "/tmp/mgmt_dir/" { state => "present" }
|
||||
file "/tmp/mgmt_dir/a" { state => "present" }
|
||||
|
||||
Noop["puppet_handover_to_mgmt"] -> File["/tmp/mgmt_dir/"]
|
||||
```
|
||||
|
||||
```puppet
|
||||
# puppet
|
||||
class mgmt_handover_to_mgmt {}
|
||||
include mgmt_handover_to_mgmt
|
||||
|
||||
file { "/tmp/puppet_dir": ensure => "directory" }
|
||||
file { "/tmp/puppet_dir/a": ensure => "file" }
|
||||
|
||||
File["/tmp/puppet_dir/a"] -> Class["mgmt_handover_to_mgmt"]
|
||||
```
|
||||
|
||||
The new `noop` resource is merged with the new class, resulting in
|
||||
the following graph:
|
||||
|
||||
```
|
||||
File[/tmp/puppet_dir] -> File[/tmp/puppet_dir/a]
|
||||
|
|
||||
V
|
||||
Noop[handover_to_mgmt]
|
||||
|
|
||||
V
|
||||
File[/tmp/mgmt_dir/] -> File[/tmp/mgmt_dir/a]
|
||||
```
|
||||
|
||||
You put all your ducks in a row, and the resources from the Puppet input
|
||||
run before those from the mcl input.
|
||||
|
||||
**Note:** The names of the `noop` and the class must be identical after the
|
||||
respective prefix. The common part (here, `handover_to_mgmt`) becomes the name
|
||||
of the merged resource.
|
||||
|
||||
## Mixed graph example 3 - Multiple merges
|
||||
|
||||
In most scenarios, it will not be possible to define a single handover
|
||||
point like in the previous example. For example, if some Puppet resources
|
||||
need to run in between two stages of native resources, you need at least
|
||||
two merged vertices:
|
||||
|
||||
```mcl
|
||||
# lang
|
||||
noop "puppet_handover" {}
|
||||
noop "puppet_handback" {}
|
||||
file "/tmp/mgmt_dir/" { state => "present" }
|
||||
file "/tmp/mgmt_dir/a" { state => "present" }
|
||||
file "/tmp/mgmt_dir/puppet_subtree/state-file" { state => "present" }
|
||||
|
||||
File["/tmp/mgmt_dir/"] -> Noop["puppet_handover"]
|
||||
Noop["puppet_handback"] -> File["/tmp/mgmt_dir/puppet_subtree/state-file"]
|
||||
```
|
||||
|
||||
```puppet
|
||||
# puppet
|
||||
class mgmt_handover {}
|
||||
class mgmt_handback {}
|
||||
|
||||
include mgmt_handover, mgmt_handback
|
||||
|
||||
class important_stuff {
|
||||
file { "/tmp/mgmt_dir/puppet_subtree":
|
||||
ensure => "directory"
|
||||
}
|
||||
# ...
|
||||
}
|
||||
|
||||
Class["mgmt_handover"] -> Class["important_stuff"] -> Class["mgmt_handback"]
|
||||
```
|
||||
|
||||
The resulting graph looks roughly like this:
|
||||
|
||||
```
|
||||
File[/tmp/mgmt_dir/] -> File[/tmp/mgmt_dir/a]
|
||||
|
|
||||
V
|
||||
Noop[handover] -> ( class important_stuff resources )
|
||||
|
|
||||
V
|
||||
Noop[handback]
|
||||
|
|
||||
V
|
||||
File[/tmp/mgmt_dir/puppet_subtree/state-file]
|
||||
```
|
||||
|
||||
You can add arbitrary numbers of merge pairs to your code bases,
|
||||
with relationships as needed. From our limited experience, code
|
||||
readability suffers quite a lot from these, however. We advise
|
||||
to keep these structures simple.
|
||||
@@ -21,8 +21,6 @@ to build your own.
|
||||
|
||||
### Downloading a pre-built release:
|
||||
|
||||
This method is not recommended because those packages are now very old.
|
||||
|
||||
The latest releases can be found [here](https://github.com/purpleidea/mgmt/releases/).
|
||||
An alternate mirror is available [here](https://dl.fedoraproject.org/pub/alt/purpleidea/mgmt/releases/).
|
||||
|
||||
@@ -39,7 +37,7 @@ You'll need some dependencies, including `golang`, and some associated tools.
|
||||
|
||||
#### Installing golang
|
||||
|
||||
* You need golang version 1.20 or greater installed.
|
||||
* You need a modern golang version installed.
|
||||
* To install on rpm style systems: `sudo dnf install golang`
|
||||
* To install on apt style systems: `sudo apt install golang`
|
||||
* To install on macOS systems install [Homebrew](https://brew.sh)
|
||||
@@ -103,13 +101,14 @@ This method avoids polluting your workstation with the dependencies for the
|
||||
build. Here is an example using Fedora, Podman and Buildah:
|
||||
|
||||
```shell
|
||||
git clone --recursive https://github.com/purpleidea/mgmt/ ~/mgmt/
|
||||
cd ~/mgmt/docker
|
||||
buildah build -f Dockerfile-fedora.build -t mgmt_build
|
||||
podman run -d -it --name mgmt_build localhost/mgmt_build
|
||||
podman cp mgmt_build:/src/github.com/purpleidea/mgmt/mgmt /tmp/mgmt
|
||||
sudo mv /tmp/mgmt /usr/local/bin # be sure this is in your $PATH
|
||||
sudo chown root:root /usr/local/bin/mgmt
|
||||
git clone --recursive https://github.com/purpleidea/mgmt/
|
||||
cd mgmt
|
||||
docker build -t mgmt -f docker/Dockerfile .
|
||||
docker run --rm --entrypoint cat mgmt mgmt > mgmt
|
||||
chmod +x mgmt
|
||||
./mgmt --version
|
||||
# you could now copy the mgmt binary somewhere into your $PATH
|
||||
# e.g., /usr/local/bin/ to make it accessible from anywhere
|
||||
```
|
||||
|
||||
## Running mgmt
|
||||
|
||||
@@ -62,7 +62,7 @@ status-quo of using your own etcd cluster is stable, and you can even
|
||||
use the embedded etcd server in standalone mode...
|
||||
|
||||
* This means you can run `mgmt etcd` and get the standard etcd binary
|
||||
behviour that you'd get from running `etcd` normally. This makes it
|
||||
behaviour that you'd get from running `etcd` normally. This makes it
|
||||
easy to use both together since you only need to transport one binary
|
||||
around. (And maybe mgmt will do that for you!)
|
||||
|
||||
|
||||
142
docs/release-notes/0.0.26
Normal file
142
docs/release-notes/0.0.26
Normal file
@@ -0,0 +1,142 @@
|
||||
I've just released version 0.0.26 of mgmt!
|
||||
|
||||
> 16 files changed, 869 insertions(+), 181 deletions(-)
|
||||
|
||||
Hot off the heels of the recent large release (0.0.25) I've just
|
||||
released an incremental update...
|
||||
|
||||
See more here:
|
||||
|
||||
https://purpleidea.com/blog/2024/03/27/a-new-provisioning-tool/
|
||||
|
||||
With that, here are a few highlights from the release:
|
||||
|
||||
* We have a new mgmt partner program. Please sign-up for early access
|
||||
to these release notes, along with other special privileges. Details
|
||||
at: https://bit.ly/mgmt-partner-program
|
||||
|
||||
* Type unification for the provisioning tool is about 40x faster.
|
||||
|
||||
* We fix a small bug related to the upcoming fedora 40 release.
|
||||
|
||||
And much more...
|
||||
|
||||
|
||||
DOWNLOAD
|
||||
|
||||
Prebuilt binaries are available here for this release:
|
||||
https://github.com/purpleidea/mgmt/releases/tag/0.0.26
|
||||
|
||||
They can also be found on the Fedora mirror:
|
||||
https://dl.fedoraproject.org/pub/alt/purpleidea/mgmt/releases/0.0.26/
|
||||
|
||||
|
||||
NEWS
|
||||
|
||||
* Added old release notes into git
|
||||
|
||||
* We now skip over unreleased Fedora versions (like "40 Beta") when
|
||||
trying to automatically determine the latest stable release.
|
||||
|
||||
* Type unification was structurally refactored to make way for a bunch
|
||||
of future improvements and generally to modernize the code.
|
||||
|
||||
* Added some unification optimizations and a unification flag
|
||||
optimizations system to allow solvers to support special flags. One of
|
||||
these new flags was used for the provisioner code with a substantial
|
||||
improvement in type unification time by about 40x.
|
||||
|
||||
* New cli args are also available for using these flags.
|
||||
|
||||
* We're looking for help writing Amazon, Google, DigitalOcean, Hetzner,
|
||||
etc, resources if anyone is interested, reach out to us. Particularly
|
||||
if there is support from those organizations as well.
|
||||
|
||||
* Many other bug fixes, changes, etc...
|
||||
|
||||
* See the git log for more NEWS, and for anything notable I left out!
|
||||
|
||||
|
||||
BUGS/TODO
|
||||
|
||||
* Function values getting _passed_ to resources doesn't work yet, but
|
||||
it's not a blocker, but it would definitely be useful. We're looking
|
||||
into it.
|
||||
|
||||
* Function graphs are unnecessarily dynamic. We might make them more
|
||||
static so that we don't need as many transactions. This is really a
|
||||
compiler optimization and not a bug, but it's something important we'd
|
||||
like to have.
|
||||
|
||||
* Running two Txn's during the same pause would be really helpful. I'm
|
||||
not sure how much of a performance improvement we'd get from this, but
|
||||
it would sure be interesting to build. If you want to build a fancy
|
||||
synchronization primitive, then let us know! Again this is not a bug.
|
||||
|
||||
* General type unification performance can be improved drastically. I
|
||||
will have to implement the fast algorithm so that we can scale to very
|
||||
large mcl programs. Help is wanted if you are familiar with "unionfind"
|
||||
and/or type unification.
|
||||
|
||||
|
||||
TALKS
|
||||
|
||||
I don't have anything planned until CfgMgmtCamp 2025. If you'd like to
|
||||
book me for a private event, or sponsor my travel for your conference,
|
||||
please let me know.
|
||||
|
||||
I recently gave two talks: one at CfgMgmtCamp 2024, and one at FOSDEM
|
||||
in the golang room. Both are available online and demonstrated an
|
||||
earlier version of the provisioning tool which is fully available
|
||||
today. The talks can be found here: https://purpleidea.com/talks/
|
||||
|
||||
|
||||
PARTNER PROGRAM
|
||||
|
||||
We have a new mgmt partner program which gets you early access to
|
||||
releases, bug fixes, support, and many other goodies. Please sign-up
|
||||
today: https://bit.ly/mgmt-partner-program
|
||||
|
||||
|
||||
MISC
|
||||
|
||||
Our mailing list host (Red Hat) is no longer letting non-Red Hat
|
||||
employees use their infrastructure. We're looking for a new home. I've
|
||||
opened a ticket with Freedesktop. If you have any sway with them or
|
||||
other recommendations, please let me know:
|
||||
https://gitlab.freedesktop.org/freedesktop/freedesktop/-/issues/1082
|
||||
|
||||
We're still looking for new contributors, and there are easy, medium
|
||||
and hard issues available! You're also welcome to suggest your own!
|
||||
Please join us in #mgmtconfig on Libera IRC or Matrix (preferred) and
|
||||
ping us if you'd like help getting started! For details please see:
|
||||
|
||||
https://github.com/purpleidea/mgmt/blob/master/docs/faq.md#how-do-i-con
|
||||
tribute-to-the-project-if-i-dont-know-golang
|
||||
|
||||
Many tagged #mgmtlove issues exist:
|
||||
https://github.com/purpleidea/mgmt/issues?q=is%3Aissue+is%3Aopen+label%
|
||||
3Amgmtlove
|
||||
|
||||
Although asking in IRC/matrix is the best way to find something to work
|
||||
on.
|
||||
|
||||
|
||||
MENTORING
|
||||
|
||||
We offer mentoring for new golang/mgmt hackers who want to get
|
||||
involved. This is fun and friendly! You get to improve your skills,
|
||||
and we get some patches in return. Ping me off-list for details.
|
||||
|
||||
|
||||
THANKS
|
||||
|
||||
Thanks (alphabetically) to everyone who contributed to the latest
|
||||
release:
|
||||
James Shubin
|
||||
We had 1 unique committers since 0.0.25, and have had 90 overall.
|
||||
|
||||
|
||||
Happy hacking,
|
||||
James
|
||||
@purpleidea
|
||||
205
docs/release-notes/0.0.27
Normal file
205
docs/release-notes/0.0.27
Normal file
@@ -0,0 +1,205 @@
|
||||
I've just released version 0.0.27 of mgmt!
|
||||
|
||||
> 854 files changed, 28882 insertions(+), 16049 deletions(-)
|
||||
|
||||
This is rather large release, as I'm not making regular releases unless there's
|
||||
a specific ask. Most folks that are playing with mgmt are using `git master`.
|
||||
|
||||
With that, here are a few highlights from the release:
|
||||
|
||||
* Type unification is now extremely fast for all scenarios.
|
||||
|
||||
* Added a modules/ directory with shared mcl code for everyone to use. This
|
||||
includes code for virtualization, cups, shorewall, dhcp, routers, and more!
|
||||
|
||||
* New core mgmt commands including setup, firstboot, and docs were added!
|
||||
|
||||
* The provisioner got lots of improvements including handoff, and iPXE support.
|
||||
|
||||
And much more...
|
||||
|
||||
|
||||
DOWNLOAD
|
||||
|
||||
Prebuilt binaries are available here for this release:
|
||||
https://github.com/purpleidea/mgmt/releases/tag/0.0.27
|
||||
|
||||
They can also be found on the Fedora mirror:
|
||||
https://dl.fedoraproject.org/pub/alt/purpleidea/mgmt/releases/0.0.27/
|
||||
|
||||
|
||||
NEWS
|
||||
|
||||
* Primary community channel is now on Matrix. IRC is deprecated until someone
|
||||
wants to run a bridge for us.
|
||||
|
||||
* Type unification is now textbook, and blazingly (linearly) fast. The large
|
||||
programs I'm writing now unify in under 200ms. Most small programs typically
|
||||
unify in ~5ms.
|
||||
|
||||
* Resource and edge names are always lists of strings now unless they're static.
|
||||
|
||||
* We're up to golang 1.23 now. Older versions may still work.
|
||||
|
||||
* Our type system now supports unification variables like ?1, ?2 and so on.
|
||||
|
||||
* I fixed a bug in my contrib.sh script which omitted the Co-authored-by people!
|
||||
This means Samuel Gélineau might have previously been missed in past release
|
||||
notes which is tragic, since he has been by far the most important contributor
|
||||
to mgmt.
|
||||
|
||||
* Made toposort deterministic which fixes some spurious non-determinism.
|
||||
|
||||
* Added the iterator filter function. (An important core primitive.)
|
||||
|
||||
* Cleaned up the output of many resources to make logs more useful / less noisy.
|
||||
|
||||
* Added constants, although I plan to change this to a `const` import package.
|
||||
|
||||
* Added the list and map core packages.
|
||||
|
||||
* Catch $ in metaparams to make the obvious bug cases easier for users to avoid.
|
||||
|
||||
* Consul is now behind a build tag for now, since it's non-free. We'll remove it
|
||||
eventually if there isn't a suitable free replacement.
|
||||
|
||||
* Added mcl modules directory with a good initial set of interesting code.
|
||||
|
||||
* Added the the "vardir" API to our "local" package. This is a helpful primitive
|
||||
which I use in almost every module that I write.
|
||||
|
||||
* Added a gzip resource!
|
||||
|
||||
* Added a tar resource!
|
||||
|
||||
* We moved the template() function to the golang.template namespace. This makes
|
||||
it clear what kind of template it is and de-emphasizes our "love" for it as the
|
||||
blessed template engine at least for now.
|
||||
|
||||
* Added a sysctl resource!
|
||||
|
||||
* Added a virt-builder resource for building images. We can now automate virtual
|
||||
machines really elegantly.
|
||||
|
||||
* A bunch of core functions were added including stuff in net, strings, deploy,
|
||||
and more!
|
||||
|
||||
* The local package got a neat "pool" function. There are lots of possibilities
|
||||
to use this in creative ways!
|
||||
|
||||
* The GAPI/deploy code got more testing and we found some edge cases and patched
|
||||
them. You can now deploy in all sorts of creative ways and things should work
|
||||
as expected!
|
||||
|
||||
* Added a resource for archiving a deploy. This is deploy:tar and helps with
|
||||
bootstrapping new machines.
|
||||
|
||||
* Found a sneaky DHCP bug and fixed it!
|
||||
|
||||
* Added mgmt setup and firstboot commands! This helps bootstrap things without
|
||||
needing to re-implement that logic everywhere as bash too!
|
||||
|
||||
* Added a "docs" command for generating resources and function documentation!
|
||||
|
||||
* The provisioner got lots of improvements including handoff, and iPXE support.
|
||||
|
||||
* New mcl modules include shorewall, dhcp, cups, some meta modules, misc modules
|
||||
and more!
|
||||
|
||||
* Added a BMC resource in case you want to automate your server hardware.
|
||||
|
||||
* We now allow multiple star (*) imports although it's not recommended.
|
||||
|
||||
* Hostname handoff is now also part of the provisioner.
|
||||
|
||||
* Fixed two type unification corner cases with magic struct functions.
|
||||
|
||||
* Added iPXE support to the provisioner.
|
||||
|
||||
* Added pprof support to make it easy to generate performance information.
|
||||
|
||||
* Added anonymous function calling. These are occasionally useful, and now the
|
||||
language has them. They were fun and concise to implement!
|
||||
|
||||
* We're looking for help writing Amazon, Google, DigitalOcean, Hetzner, etc,
|
||||
resources if anyone is interested, reach out to us. Particularly if there is
|
||||
support from those organizations as well.
|
||||
|
||||
* Many other bug fixes, changes, etc...
|
||||
|
||||
* See the git log for more NEWS, and for anything notable I left out!
|
||||
|
||||
|
||||
BUGS/TODO
|
||||
|
||||
* Function values getting _passed_ to resources doesn't work yet, but it's not a
|
||||
blocker, but it would definitely be useful. We're looking into it.
|
||||
|
||||
* Function graphs are unnecessarily dynamic. We might make them more static so
|
||||
that we don't need as many transactions. This is really a compiler optimization
|
||||
and not a bug, but it's something important we'd like to have.
|
||||
|
||||
* Running two Txn's during the same pause would be really helpful. I'm not sure
|
||||
how much of a performance improvement we'd get from this, but it would sure be
|
||||
interesting to build. If you want to build a fancy synchronization primitive,
|
||||
then let us know! Again this is not a bug.
|
||||
|
||||
* The arm64 version doesn't support augeas, so it was built with:
|
||||
GOTAGS='noaugeas' to get the build out.
|
||||
|
||||
|
||||
TALKS
|
||||
|
||||
After FOSDEM/CfgMgmtCamp 2025, I don't have anything planned until CfgMgmtCamp
|
||||
2026. If you'd like to book me for a private event, or sponsor my travel for
|
||||
your conference, please let me know.
|
||||
|
||||
|
||||
PARTNER PROGRAM
|
||||
|
||||
Interest in the partner program has been limited to small individuals with no
|
||||
real corporate backing, so its been officially discontinued for now. If you're
|
||||
interested in partnering with us and receiving support, mgmt products early
|
||||
access to releases, bug fixes, support, and many other goodies, please sign-up
|
||||
today: https://bit.ly/mgmt-partner-program
|
||||
|
||||
|
||||
MISC
|
||||
|
||||
Our mailing list host (Red Hat) is no longer letting non-Red Hat employees use
|
||||
their infrastructure. We're looking for a new home. I've opened a ticket with
|
||||
Freedesktop. If you have any sway with them or other recommendations, please let
|
||||
me know:
|
||||
https://gitlab.freedesktop.org/freedesktop/freedesktop/-/issues/1082
|
||||
|
||||
We're still looking for new contributors, and while there are easy, medium and
|
||||
hard issues available! You're also welcome to suggest your own! Please join us
|
||||
in #mgmtconfig on Libera IRC or Matrix (preferred) and ping us if you'd like
|
||||
help getting started! For details please see:
|
||||
|
||||
https://github.com/purpleidea/mgmt/blob/master/docs/faq.md#how-do-i-contribute-to-the-project-if-i-dont-know-golang
|
||||
|
||||
Many tagged #mgmtlove issues exist:
|
||||
https://github.com/purpleidea/mgmt/issues?q=is%3Aissue+is%3Aopen+label%3Amgmtlove
|
||||
|
||||
Although asking in matrix is the best way to find something to work on.
|
||||
|
||||
|
||||
MENTORING
|
||||
|
||||
We offer mentoring for new golang/mgmt hackers who want to get involved. This is
|
||||
fun and friendly! You get to improve your skills, and we get some patches in
|
||||
return. Ping me off-list for details.
|
||||
|
||||
|
||||
THANKS
|
||||
|
||||
Thanks (alphabetically) to everyone who contributed to the latest release:
|
||||
Cian Yong Leow, Felix Frank, James Shubin, Joe Groocock, Julian Rüth, Omar Al-Shuha, Samuel Gélineau, xlai89
|
||||
We had 8 unique committers since 0.0.26, and have had 96 overall.
|
||||
Run 'git log 0.0.26..0.0.27' to see what has changed since 0.0.26
|
||||
|
||||
|
||||
Happy hacking,
|
||||
James
|
||||
@purpleidea
|
||||
280
docs/release-notes/1.0.0
Normal file
280
docs/release-notes/1.0.0
Normal file
@@ -0,0 +1,280 @@
|
||||
I've just released version 1.0.0 of mgmt!
|
||||
|
||||
> 614 files changed, 30199 insertions(+), 11916 deletions(-)
|
||||
|
||||
This is very important and large release. It's been 10 years since I first
|
||||
publicly released this project, and I might as well stop confusing new users.
|
||||
I'm happily using it in production for some time now, and I love writing `mcl`
|
||||
every day! I am doing customer work in mgmt, and I have happy users.
|
||||
|
||||
With that, here are a few highlights from the release:
|
||||
|
||||
* There is a new function engine which is significantly faster on large graphs.
|
||||
It could be improved further, but the optimizations aren't needed for now.
|
||||
|
||||
* The "automatic embedded etcd clustering" should be considered deprecated. You
|
||||
can run with --no-magic to ensure it's off. It was buggy and we will possibly
|
||||
write it with mcl anyways. Expect it to be removed soon.
|
||||
|
||||
* Type unification errors have context and line numbers! Many other error
|
||||
scenarios have this too! This isn't perfect, and there are still some remaining
|
||||
places when you don't get this information. Please help us find and expand
|
||||
these.
|
||||
|
||||
* The function API has been overhauled which now makes writing most functions
|
||||
significantly easier and simpler. They'll also use less memory. This is a
|
||||
benefit of the new function engine.
|
||||
|
||||
* We have added *declarative* for and forkv statements to the language.
|
||||
|
||||
* Exported resources are merged and gorgeous! They work how I've always wanted.
|
||||
You can actually see my experiment in the very first demo of mgmt, and I finally
|
||||
wrote them to work with the language how I've always wanted.
|
||||
|
||||
* There's an http:server:ui set of resources that have been added. Check out:
|
||||
https://www.youtube.com/watch?v=8vz1MMGkuik for some examples of that in action
|
||||
and more!
|
||||
|
||||
And much more...
|
||||
|
||||
|
||||
SPONSORS
|
||||
|
||||
The `mgmt` project is generously sponsored by:
|
||||
|
||||
m9rx corporation - https://m9rx.com/
|
||||
|
||||
Please reach out if you'd like to sponsor!
|
||||
|
||||
|
||||
DOWNLOAD
|
||||
|
||||
Prebuilt binaries are available here for this release:
|
||||
https://github.com/purpleidea/mgmt/releases/tag/1.0.0
|
||||
|
||||
They can also be found on the Fedora mirror:
|
||||
https://dl.fedoraproject.org/pub/alt/purpleidea/mgmt/releases/1.0.0/
|
||||
|
||||
|
||||
NEWS
|
||||
|
||||
* A bunch of misc mcl code has been added to modules/ for you to see.
|
||||
|
||||
* The user resource has been improved following feedback from cloudflare.
|
||||
|
||||
* Detect self-referential frags when building files that way.
|
||||
|
||||
* Added a new function for URL parsing.
|
||||
|
||||
* Type unification errors have context and line numbers!
|
||||
|
||||
* There's a "baddev" feature branch which gets rebased which you can use if you
|
||||
don't want to install the tools to compiler the lexer/parser stuff. We do the
|
||||
ugly commit for you if that's easier for development.
|
||||
|
||||
* We have added *declarative* for and forkv statements to the language. If you
|
||||
know of a better name that "forkv" we're happy to hear it, but a small poll
|
||||
didn't produce a more convincing suggestion.
|
||||
|
||||
* Waiting for a deploy just happens automatically with the "empty" frontend.
|
||||
|
||||
* Waiting to run a deploy just waits automatically until etcd is online.
|
||||
|
||||
* Automatic mgmt deploying after virt provisioning works with a seeds field.
|
||||
|
||||
* There's a global flag to skip autoedges to improve performance.
|
||||
|
||||
* The docker resource has been modernized and supports running on a docker host
|
||||
that we're bootstrapping.
|
||||
|
||||
* Docker ports were built backwards and these have been corrected.
|
||||
|
||||
* The "world" interface has been cleaned up dramatically. This will make life
|
||||
easier for someone who wants to add a new backend there. Filesystem, scheduler,
|
||||
deployer, and more are all split.
|
||||
|
||||
* We can run our etcd connection over SSH. That's one of the new backends.
|
||||
There's actually a reconnect issue, but it's an easy fix and it should likely
|
||||
come out in the next release.
|
||||
|
||||
* We have an is_virtual function to detect where mgmt is running!
|
||||
|
||||
* Virtualization modules moved to qcow2 by default. It's solid.
|
||||
|
||||
* Improved a lot of user-facing logging so it's clearer what's happening.
|
||||
|
||||
* Exported resources have been implemented ... and they're glorious. They work
|
||||
how I've always dreamed, and are such a breath of fresh air from the Puppet
|
||||
days. There's an export/collect system. Export works by metaparam, not a special
|
||||
language feature, and collect works with core functions. It runs when the
|
||||
resource in the graph actually runs, as opposed to "all at once, even if you
|
||||
fail" like the old days. Yay!
|
||||
|
||||
* fmt.printf handles more cases!
|
||||
|
||||
* The file resource now has a symlink param. Someone test it and find issues.
|
||||
|
||||
* We have an iter.range function which is helpful with `for` statements.
|
||||
|
||||
* We do some speculation which drastically reduces the shape of the function
|
||||
graphs in a lot of constant scenarios. This also reduces the need to change the
|
||||
shape, which brings a huge performance boost.
|
||||
|
||||
* Don't reuse fact pointers. There was a bug around those. In fact get rid of
|
||||
the fact API since it's pointless really.
|
||||
|
||||
* There's some new stuff in the convert package.
|
||||
|
||||
* We added an http:server:ui resource. This is kind of a prototype, but you can
|
||||
see it in action here: https://www.youtube.com/watch?v=8vz1MMGkuik
|
||||
|
||||
* Fix some send/recv corner cases. I wish I had more tests for this. Hint!
|
||||
|
||||
* There's an os.readfilewait() function in temporarily. This will go away when
|
||||
we get the <|> operator.
|
||||
|
||||
* A WatchFiles field was added to the exec resource. Very handy.
|
||||
|
||||
* We have a new "line" resource. It supports "trim"-ing too.
|
||||
|
||||
* There are some new functions that have been added.
|
||||
|
||||
* The modules/ directory got some ssh key things.
|
||||
|
||||
* Automatic grouping logic improved, thanks to http:server:ui stuff.
|
||||
|
||||
* Hierarchical grouping works very reliably as far as I can tell.
|
||||
|
||||
* A bunch of ctx's were added all over where they never were. Legacy code!
|
||||
|
||||
* A bunch of network/NetworkManager/networkd and related mcl code was added. The
|
||||
interfaces are really ugly, what is the correct upstream network config thing?
|
||||
|
||||
* We have a modinfo function.
|
||||
|
||||
* We built in some ulimit settings for modern machines.
|
||||
|
||||
* We have an mcl class for copr setup.
|
||||
|
||||
* We added SSH hostkey logic into our core etcd ssh connection tooling.
|
||||
|
||||
* The provisioner supports exec handoff. It can also handle more scenarios, eg
|
||||
booting from an ipxe usb key and not installing on it.
|
||||
|
||||
* The provisioner supports encrypting machines with LUKS. It does this in a very
|
||||
clever way to allow creation of secure passwords after first boot. Many kudos to
|
||||
the systemd and other authors who built all the needed pieces for this to just
|
||||
work fairly well.
|
||||
|
||||
* We improved a graph function from O(n) to O(1). Woops =D
|
||||
|
||||
* We removed the secret channels from the function graphs. This is much simpler
|
||||
now!
|
||||
|
||||
* ExprIf and StmtIf both do the more correct thing. I guess the bigger graph was
|
||||
eventually going to need to get killed. This was a good choice that I didn't
|
||||
make soon enough.
|
||||
|
||||
* A ton of races were killed. We're building by default with the race checker.
|
||||
I don't know why I didn't do this ten years ago. Performance is not so terrible
|
||||
these days, and it catches so much. Woops. Good lesson to share with others.
|
||||
|
||||
* The language has a nil type, but don't worry, this is only for internal
|
||||
plumbing, and we will NOT let it be user facing!
|
||||
|
||||
* The langpuppet stuff had to be removed again for now. If it's used, patch in.
|
||||
|
||||
* The GAPI stuff got a major cleanup. It was early code that was bad. Now it's a
|
||||
lot better.
|
||||
|
||||
* The new function engine is the really big story. Have a look if you're an
|
||||
algorithmist. We'd love to have people work on improving it further. It's most
|
||||
likely glitch free now too!
|
||||
|
||||
* The virt resource code a big cleanup. It runs hotplug again which had rotted
|
||||
due to libvirt api changes I think.
|
||||
|
||||
* The qemu guest agent works automatically again.
|
||||
|
||||
* The svc resource (one of the earliest) has been overhauled since it had old
|
||||
buggy code which has now been fixed.
|
||||
|
||||
* We're looking for help writing Amazon, Google, DigitalOcean, Hetzner, etc,
|
||||
resources if anyone is interested, reach out to us. Particularly if there is
|
||||
support from those organizations as well.
|
||||
|
||||
* Many other bug fixes, changes, etc...
|
||||
|
||||
* See the git log for more NEWS, and for anything notable I left out!
|
||||
|
||||
|
||||
BUGS/TODO
|
||||
|
||||
* Function values getting _passed_ to resources doesn't work yet, but it's not a
|
||||
blocker, but it would definitely be useful. We're looking into it.
|
||||
|
||||
* The arm64 version doesn't support augeas, so it was built with:
|
||||
GOTAGS='noaugeas' to get the build out.
|
||||
|
||||
* We don't have the <|> operator merged yet. Expect that when we do this, we'll
|
||||
consider removing the || (default) operator. This is the only pending language
|
||||
change that I know of, and these cases are easily caught by the compiler and can
|
||||
be easily patched.
|
||||
|
||||
* Autoedge performance isn't great. It can easily be disabled. Most of the time
|
||||
I just specify my edges, so this is really a convenience feature, but it should
|
||||
be looked into when we have a chance.
|
||||
|
||||
* There's a subtle ssh reconnect issue which can occur. It should be easy to
|
||||
fix and I have a patch in testing.
|
||||
|
||||
* Our wasm code input fields grew tick marks, but I think this disturbed the
|
||||
buggy wasm code. If someone is an expert here, please have at it.
|
||||
|
||||
|
||||
TALKS
|
||||
|
||||
After FOSDEM/CfgMgmtCamp 2026, I don't have anything planned until CfgMgmtCamp
|
||||
2027. If you'd like to book me for a private event, or sponsor my travel for
|
||||
your conference, please let me know.
|
||||
|
||||
|
||||
MISC
|
||||
|
||||
Our mailing list host (Red Hat) is no longer letting non-Red Hat employees use
|
||||
their infrastructure. We're looking for a new home. I've opened a ticket with
|
||||
Freedesktop. If you have any sway with them or other recommendations, please let
|
||||
me know:
|
||||
https://gitlab.freedesktop.org/freedesktop/freedesktop/-/issues/1082
|
||||
|
||||
We're still looking for new contributors, and while there are easy, medium and
|
||||
hard issues available! You're also welcome to suggest your own! Please join us
|
||||
in #mgmtconfig on Libera IRC or Matrix (preferred) and ping us if you'd like
|
||||
help getting started! For details please see:
|
||||
|
||||
https://github.com/purpleidea/mgmt/blob/master/docs/faq.md#how-do-i-contribute-to-the-project-if-i-dont-know-golang
|
||||
|
||||
Many tagged #mgmtlove issues exist:
|
||||
https://github.com/purpleidea/mgmt/issues?q=is%3Aissue+is%3Aopen+label%3Amgmtlove
|
||||
|
||||
Although asking in matrix is the best way to find something to work on.
|
||||
|
||||
|
||||
MENTORING
|
||||
|
||||
We offer mentoring for new golang/mgmt hackers who want to get involved. This is
|
||||
fun and friendly! You get to improve your skills, and we get some patches in
|
||||
return. Ping me off-list for details.
|
||||
|
||||
|
||||
THANKS
|
||||
|
||||
Thanks (alphabetically) to everyone who contributed to the latest release:
|
||||
Ahmad Abuziad, Edward Toroshchyn, Felix Frank, hades, James Shubin, Karpfen, Lourenço, Lourenço Vales, Samuel Gélineau
|
||||
We had 10 unique committers since 0.0.27, and have had 103 overall.
|
||||
run 'git log 0.0.27..1.0.0' to see what has changed since 0.0.27
|
||||
|
||||
|
||||
Happy hacking,
|
||||
James
|
||||
@purpleidea
|
||||
@@ -60,7 +60,10 @@ it is not specified, but others cannot, and some might poorly infer if the
|
||||
struct name is ambiguous.
|
||||
|
||||
If you'd like your resource to be accessible by the `YAML` graph API (GAPI),
|
||||
then you'll need to include the appropriate YAML fields as shown below.
|
||||
then you'll need to include the appropriate YAML fields as shown below. This is
|
||||
used by the `puppet` compiler as well, so make sure you include these struct
|
||||
tags if you want existing `puppet` code to be able to run using the `mgmt`
|
||||
engine.
|
||||
|
||||
#### Example
|
||||
|
||||
@@ -358,14 +361,14 @@ func (obj *FooRes) Watch(ctx context.Context) error {
|
||||
// notify engine that we're running
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
var send = false // send event?
|
||||
for {
|
||||
select {
|
||||
// the actual events!
|
||||
case event := <-obj.foo.Events:
|
||||
if is_an_event {
|
||||
send = true
|
||||
if !is_an_event {
|
||||
continue // skip event
|
||||
}
|
||||
// send below...
|
||||
|
||||
// event errors
|
||||
case err := <-obj.foo.Errors:
|
||||
@@ -375,11 +378,7 @@ func (obj *FooRes) Watch(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
obj.init.Event()
|
||||
}
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -520,9 +519,10 @@ graph edges from another resource. These values are consumed during the
|
||||
any resource that has an appropriate value and that has the `Sendable` trait.
|
||||
You can read more about this in the Send/Recv section below.
|
||||
|
||||
### Collectable
|
||||
### Exportable
|
||||
|
||||
This is currently a stub and will be updated once the DSL is further along.
|
||||
Exportable allows a resource to tell the exporter what subset of its data it
|
||||
wishes to export when that occurs. It is rare that you will need to use this.
|
||||
|
||||
## Resource Initialization
|
||||
|
||||
@@ -620,7 +620,7 @@ func init() { // special golang method that runs once
|
||||
|
||||
To support YAML unmarshalling for your resource, you must implement an
|
||||
additional method. It is recommended if you want to use your resource with the
|
||||
`yaml` compiler.
|
||||
`puppet` compiler.
|
||||
|
||||
```golang
|
||||
UnmarshalYAML(unmarshal func(interface{}) error) error // optional
|
||||
@@ -684,8 +684,41 @@ if val, exists := obj.init.Recv()["some_key"]; exists {
|
||||
}
|
||||
```
|
||||
|
||||
The specifics of resource sending are not currently documented. Please send a
|
||||
patch here!
|
||||
A resource can send a value during CheckApply by running the `obj.init.Send()`
|
||||
method. It must always send a value if (1) it's not erroring in CheckApply, and
|
||||
(2) if the `obj.SendActive()` method inside of CheckApply returns true. It is
|
||||
not harmful to run the Send method if CheckApply is going to error, or if
|
||||
`obj.SendActive()` returns false, just unnecessary. In the `!apply` case where
|
||||
we're running in "noop" mode, and where the state is not correct, then you
|
||||
should still attempt to send a value, but it is a bit ambiguous which value to
|
||||
send. This behaviour may be specified in the future, but at the moment it's
|
||||
mostly inconsequential. At the moment, `obj.SendActive()` is disabled at compile
|
||||
time, but can be enabled if you have a legitimate use-case for it.
|
||||
|
||||
```golang
|
||||
// inside CheckApply, somewhere near the end usually
|
||||
if err := obj.init.Send(&ExecSends{ // send the special data structure
|
||||
Output: obj.output,
|
||||
Stdout: obj.stdout,
|
||||
Stderr: obj.stderr,
|
||||
}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
```
|
||||
|
||||
You must also implement the `Sends()` method which should return the above
|
||||
sending struct with all of the fields containing their default or values. Please
|
||||
note, that those fields must have their struct tags set appropriately.
|
||||
|
||||
### Safety
|
||||
|
||||
Lastly, please note that in order for a resource to send a useful value, even
|
||||
when its state is already correct (it may have run earlier for example) then it
|
||||
may require the implementation of CheckApply to cache a return value for later
|
||||
use. Keep in mind that you should store this securely should there be a chance
|
||||
that sensitive info is contained within, and that an untrusted user could put
|
||||
malicious data in the cache if you are not careful. It's best to make sure the
|
||||
users of your resource are aware of its implementation details here.
|
||||
|
||||
## Composite resources
|
||||
|
||||
@@ -713,7 +746,7 @@ Higher level resource collections will be possible once the `mgmt` DSL is ready.
|
||||
### Why does the resource API have `CheckApply` instead of two separate methods?
|
||||
|
||||
In an early version we actually had both "parts" as separate methods, namely:
|
||||
`StateOK` (Check) and `Apply`, but the [decision](58f41eddd9c06b183f889f15d7c97af81b0331cc)
|
||||
`StateOK` (Check) and `Apply`, but the [decision](https://github.com/purpleidea/mgmt/commit/58f41eddd9c06b183f889f15d7c97af81b0331cc)
|
||||
was made to merge the two into a single method. There are two reasons for this:
|
||||
|
||||
1. Many situations would involve the engine running both `Check` and `Apply`. If
|
||||
|
||||
@@ -56,7 +56,7 @@ It has the following properties:
|
||||
* `image`: docker `image` or `image:tag`
|
||||
* `cmd`: a command or list of commands to run on the container
|
||||
* `env`: a list of environment variables, e.g. `["VAR=val",],`
|
||||
* `ports`: a map of portmappings, e.g. `{"tcp" => {80 => 8080, 443 => 8443,},},`
|
||||
* `ports`: a map of portmappings, e.g. `{"tcp" => {8080 => 80, 8443 => 443,},},`
|
||||
* `apiversion:` override the host's default docker version, e.g. `"v1.35"`
|
||||
* `force`: destroy and rebuild the container instead of erroring on wrong image
|
||||
|
||||
|
||||
145
docs/service-guide.md
Normal file
145
docs/service-guide.md
Normal file
@@ -0,0 +1,145 @@
|
||||
# Service API design guide
|
||||
|
||||
This document is intended as a short instructional design guide in building a
|
||||
service management API. It is certainly intended for someone who wishes to use
|
||||
`mgmt` resources and functions to interact with their facilities, however it may
|
||||
be of more general use as well. Hopefully this will help you make smarter design
|
||||
considerations early on, and prevent some amount of unnecessary technical debt.
|
||||
|
||||
## Main aspects
|
||||
|
||||
What follows are some of the most common considerations which you may wish to
|
||||
take into account when building your service. This list is non-exhaustive. Of
|
||||
particular note, as of the writing of this document, many of these designs are
|
||||
not taken into account or not well-handled or implemented by the major API
|
||||
("cloud") providers.
|
||||
|
||||
### Authentication
|
||||
|
||||
#### The status-quo
|
||||
|
||||
Many services naturally require you to authenticate yourself. Usually the
|
||||
initial user who sets up the account and provides credit card details will need
|
||||
to download secret credentials in order to access the service. The onus is on
|
||||
the user to keep those credentials private, and to prevent leaking them. It is
|
||||
convenient (and insecure) to store them in `git` repositories containing scripts
|
||||
and configuration management code. Since it's likely you will use multiple
|
||||
different services, it also means you will have a ton of different credentials
|
||||
to guard.
|
||||
|
||||
#### An alternative
|
||||
|
||||
Instead, build your service to accept a public key that you store in the users
|
||||
account. Only consumers that can correctly sign messages matching this public
|
||||
key should be authorized. This mechanism is well-understood by anyone who has
|
||||
ever uploaded their public SSH key to a server. You can use SSH keys, GPG keys,
|
||||
or even get into Kerberos if that's appropriate. Best of all, if you and other
|
||||
services use a standardized mechanism like GPG, a user might only need to keep
|
||||
track of their single key-pair, even when they're using multiple services!
|
||||
|
||||
### Events
|
||||
|
||||
#### The problem
|
||||
|
||||
People have been building "[CRUD](https://en.wikipedia.org/wiki/Create,_read,_update_and_delete)"
|
||||
and "[REST](https://en.wikipedia.org/wiki/REST)"ful API's for years. The biggest
|
||||
missing part that most of them don't provide is events. If users want to know
|
||||
when a resource changes, they have to repeatedly poll the server, which is both
|
||||
network intensive, and introduces latency. When services were simpler, this
|
||||
wasn't as much of a consideration, but these days it matters. An embarrassingly
|
||||
small number of major software vendors implement these correctly, if at all.
|
||||
|
||||
#### Why events?
|
||||
|
||||
The `mgmt` tool is different from most other static tools in that it allows
|
||||
reading streams of incoming data, and stream of change events from resources we
|
||||
are managing. If an event API is not available, we can still poll, but this is
|
||||
not as desirable. An event-capable API doesn't prevent polling if that's
|
||||
preferred, you can always repeat a read request periodically.
|
||||
|
||||
#### Variants
|
||||
|
||||
The two common mechanisms for receiving events are "callbacks" and
|
||||
"long-polling". In the former, the service contacts the consumer when something
|
||||
happens. In the latter, the consumer opens a connection, and the service either
|
||||
closes the connection or sends the reply, when it's ready. Long-polling is often
|
||||
preferred since it doesn't require an open firewall on the consumers side.
|
||||
Callbacks are preferred because it's often cheaper for the service to implement
|
||||
that. It's also less reliable since it's hard to know if the callback message
|
||||
wasn't received because it was dropped, or if there just wasn't an event. And it
|
||||
requires static timeouts when retrying a callback message, and so on. It's best
|
||||
to implement long-polling or something equivalent at a minimum.
|
||||
|
||||
#### "Since" requests
|
||||
|
||||
When making an event request, some API's will let you tack on a "since" style
|
||||
parameter that tells the endpoint that we're interested in all of the events
|
||||
_since_ a particular timestamp, or _since_ a particular sequence ID. This can be
|
||||
very useful if missing an intermediate event is a concern. Implement this if you
|
||||
can, but it's better for all concerned if purely declarative facilities are all
|
||||
that is required. It also forces the endpoint to maintain some state, which may
|
||||
be undesirable for them.
|
||||
|
||||
#### Out of band
|
||||
|
||||
Some providers have the event system tacked on to a separate facility. If it's
|
||||
not part of the core API, then it's not useful. You shouldn't have to configure
|
||||
a separate system in order to start getting events.
|
||||
|
||||
### Batching
|
||||
|
||||
With so many resources, you might expect to have 1000's of long-polling
|
||||
connections all sitting open and idle. That can't be efficient! It's not, which
|
||||
is why good API's need a batching facility. This lets the consumer group
|
||||
together many watches (all waiting on a long-poll) inside of a single call. That
|
||||
way, a single connection might only be needed for a large amount of information.
|
||||
|
||||
### Don't auto-generate junk
|
||||
|
||||
Please build an elegant API. Many services auto-generate a "phone book" SDK of
|
||||
junk. It might seem inevitable, so if you absolutely need to do this, then put
|
||||
some extra effort into making it idiomatic. If I'm using an SDK generated for
|
||||
`golang` and I see an internal `foo.String` wrapper, then chances are you have
|
||||
designed your API and code to be easier to maintain for you, instead of
|
||||
prioritizing your customers. Surely the total volume of all customer code is
|
||||
more than your own, so why optimize for that instead of the putting the customer
|
||||
first?
|
||||
|
||||
### Resources and functions
|
||||
|
||||
`Mgmt` has a concept of "resources" and "functions". Resources are used in an
|
||||
idempotent model to express desired state and perform that work, and "functions"
|
||||
are used to receive and pull data into the system. That separation has shown to
|
||||
be an elegant one. Consider it when designing your API's. For example, if some
|
||||
vital information can only be obtained after performing a modifying operation,
|
||||
then it might signal that you're missing some sort of a lookup or event-log
|
||||
system. Design your API's to be idempotent, this solves many distributed-system
|
||||
problems involving receiving duplicate messages, and so on.
|
||||
|
||||
## Using mgmt as a library
|
||||
|
||||
Instead of building a new service from scratch, and re-inventing the typical
|
||||
management and CLI layer, consider using `mgmt` as a library, and directly
|
||||
benefiting from that work. This has not been done for a large production
|
||||
service, but the author believes it would be quite efficient, particularly if
|
||||
your application is written in golang. It's equivalently easy to do it for other
|
||||
languages as well, you just end up with two binaries instead of one. (Or you can
|
||||
embed the other binary into the new golang management tool.)
|
||||
|
||||
## Cloud API considerations
|
||||
|
||||
Many "cloud" companies have a lot of technical debt and a lot of customers. As a
|
||||
result, it might be very hard for them to improve their API's, particularly
|
||||
without breaking compatibility promises for their existing customers. As a
|
||||
result, they should either add a versioned API, which lets newer consumers get
|
||||
the benefit, or add new parallel services which offer the modern features. If
|
||||
they don't, the only solution is for new competitors to build in these better
|
||||
efficiencies, eventually offering better value to cost ratios, which will then
|
||||
make legacy products less lucrative and therefore unmaintainable as compared to
|
||||
their competitors.
|
||||
|
||||
## Suggestions
|
||||
|
||||
If you have any ideas for suggestions or other improvements to this guide,
|
||||
please let us know! I hope this was helpful. Please reach out if you are
|
||||
building an API that you might like to have `mgmt` consume!
|
||||
@@ -67,6 +67,37 @@ Whenever a constant or function parameter is defined, try and have the safer or
|
||||
default value be the `zero` value. For example, instead of `const NoDanger`, use
|
||||
`const AllowDanger` so that the `false` value is the safe scenario.
|
||||
|
||||
### Method receiver pointers
|
||||
|
||||
You almost always want any method receivers to be declared on the pointer to the
|
||||
struct. There are only a few rare situations where this is not the case. This
|
||||
makes it easier to merge future changes that mutate the state without wondering
|
||||
why you now have two different copies of a struct. When you do need to copy a
|
||||
a struct, you can add a `Copy()` method to it. It's true that in many situations
|
||||
adding the pointer adds a small performance penalty, but we haven't found them
|
||||
to be significant in practice. If you do have a performance sensitive patch
|
||||
which benefits from skipping the pointer, please demonstrate this need with
|
||||
data first.
|
||||
|
||||
#### Example
|
||||
|
||||
```golang
|
||||
type Foo struct {
|
||||
Whatever string
|
||||
// ...
|
||||
}
|
||||
|
||||
// Bar is implemented correctly as a pointer on Foo.
|
||||
func (obj *Foo) Bar(baz string) int {
|
||||
// ...
|
||||
}
|
||||
|
||||
// Bar is implemented *incorrectly* without a pointer to Foo.
|
||||
func (obj Foo) Bar(baz string) int {
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
### Method receiver naming
|
||||
|
||||
[Contrary](https://github.com/golang/go/wiki/CodeReviewComments#receiver-names)
|
||||
@@ -98,9 +129,9 @@ For example, in a short string snippet you can use `s` instead of `myString`, as
|
||||
well as other common choices. `i` is a common `int` counter, `f` for files, `fn`
|
||||
for functions, `x` for something else and so on.
|
||||
|
||||
### Variable re-use
|
||||
### Variable reuse
|
||||
|
||||
Feel free to create and use new variables instead of attempting to re-use the
|
||||
Feel free to create and use new variables instead of attempting to reuse the
|
||||
same string. For example, if a function input arg is named `s`, you can use a
|
||||
new variable to receive the first computation result on `s` instead of storing
|
||||
it back into the original `s`. This avoids confusion if a different part of the
|
||||
@@ -114,7 +145,7 @@ MyNotIdealFunc(s string, b bool) string {
|
||||
if !b {
|
||||
return s + "hey"
|
||||
}
|
||||
s = strings.Replace(s, "blah", "", -1) // not ideal (re-use of `s` var)
|
||||
s = strings.Replace(s, "blah", "", -1) // not ideal (reuse of `s` var)
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -122,7 +153,7 @@ MyOkayFunc(s string, b bool) string {
|
||||
if !b {
|
||||
return s + "hey"
|
||||
}
|
||||
s2 := strings.Replace(s, "blah", "", -1) // doesn't re-use `s` variable
|
||||
s2 := strings.Replace(s, "blah", "", -1) // doesn't reuse `s` variable
|
||||
return s2
|
||||
}
|
||||
|
||||
@@ -225,6 +256,15 @@ like: `import "https://github.com/purpleidea/mgmt-banana/"` and namespace it as
|
||||
`import "https://github.com/purpleidea/mgmt-banana/" as tomato` or something
|
||||
similar.
|
||||
|
||||
### Imports
|
||||
|
||||
When importing "golang" modules such as "golang/strings" it's recommended to use
|
||||
the `import "golang/strings" as golang_strings` format. This is to avoid
|
||||
confusion with the normal core package you get from `import "strings"`.
|
||||
|
||||
In the long-term, we expect to remove the `"golang/"` namespace when our own
|
||||
standard library is complete enough.
|
||||
|
||||
### Licensing
|
||||
|
||||
We believe that sharing code helps reduce unnecessary re-invention, so that we
|
||||
|
||||
103
docs/util/metadata.go
Normal file
103
docs/util/metadata.go
Normal file
@@ -0,0 +1,103 @@
|
||||
// Mgmt
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
//
|
||||
// Additional permission under GNU GPL version 3 section 7
|
||||
//
|
||||
// If you modify this program, or any covered work, by linking or combining it
|
||||
// with embedded mcl code and modules (and that the embedded mcl code and
|
||||
// modules which link with this program, contain a copy of their source code in
|
||||
// the authoritative form) containing parts covered by the terms of any other
|
||||
// license, the licensors of this program grant you additional permission to
|
||||
// convey the resulting work. Furthermore, the licensors of this program grant
|
||||
// the original author, James Shubin, additional permission to update this
|
||||
// additional permission if he deems it necessary to achieve the goals of this
|
||||
// additional permission.
|
||||
|
||||
// Package util handles metadata for documentation generation.
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var (
|
||||
registeredResourceMetadata = make(map[string]*Metadata) // must initialize
|
||||
registeredFunctionMetadata = make(map[string]*Metadata) // must initialize
|
||||
)
|
||||
|
||||
// RegisterResource records the metadata for a resource of this kind.
|
||||
func RegisterResource(kind string, metadata *Metadata) error {
|
||||
if _, exists := registeredResourceMetadata[kind]; exists {
|
||||
return fmt.Errorf("metadata kind %s is already registered", kind)
|
||||
}
|
||||
|
||||
registeredResourceMetadata[kind] = metadata
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LookupResource looks up the metadata for a resource of this kind.
|
||||
func LookupResource(kind string) (*Metadata, error) {
|
||||
metadata, exists := registeredResourceMetadata[kind]
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("not found")
|
||||
}
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// RegisterFunction records the metadata for a function of this name.
|
||||
func RegisterFunction(name string, metadata *Metadata) error {
|
||||
if _, exists := registeredFunctionMetadata[name]; exists {
|
||||
return fmt.Errorf("metadata named %s is already registered", name)
|
||||
}
|
||||
|
||||
registeredFunctionMetadata[name] = metadata
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LookupFunction looks up the metadata for a function of this name.
|
||||
func LookupFunction(name string) (*Metadata, error) {
|
||||
metadata, exists := registeredFunctionMetadata[name]
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("not found")
|
||||
}
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// Metadata stores some additional information about the function or resource.
|
||||
// This is used to automatically generate documentation.
|
||||
type Metadata struct {
|
||||
// Filename is the filename (without any base dir path) that this is in.
|
||||
Filename string
|
||||
|
||||
// Typename is the string name of the main resource struct or function.
|
||||
Typename string
|
||||
}
|
||||
|
||||
// GetMetadata returns some metadata about the func. It can be called at any
|
||||
// time. This must not be named the same as the struct it's on or using it as an
|
||||
// anonymous embedded struct will stop us from being able to call this method.
|
||||
func (obj *Metadata) GetMetadata() *Metadata {
|
||||
//if obj == nil { // TODO: Do I need this?
|
||||
// return nil
|
||||
//}
|
||||
return &Metadata{
|
||||
Filename: obj.Filename,
|
||||
Typename: obj.Typename,
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -52,19 +52,27 @@ func (obj *Engine) OKTimestamp(vertex pgraph.Vertex) bool {
|
||||
// BadTimestamps returns the list of vertices that are causing our timestamp to
|
||||
// be bad.
|
||||
func (obj *Engine) BadTimestamps(vertex pgraph.Vertex) []pgraph.Vertex {
|
||||
obj.tlock.RLock()
|
||||
state := obj.state[vertex]
|
||||
obj.tlock.RUnlock()
|
||||
|
||||
vs := []pgraph.Vertex{}
|
||||
obj.state[vertex].mutex.RLock() // concurrent read start
|
||||
ts := obj.state[vertex].timestamp // race
|
||||
obj.state[vertex].mutex.RUnlock() // concurrent read end
|
||||
state.mutex.RLock() // concurrent read start
|
||||
ts := state.timestamp // race
|
||||
state.mutex.RUnlock() // concurrent read end
|
||||
// these are all the vertices pointing TO vertex, eg: ??? -> vertex
|
||||
for _, v := range obj.graph.IncomingGraphVertices(vertex) {
|
||||
obj.tlock.RLock()
|
||||
state := obj.state[v]
|
||||
obj.tlock.RUnlock()
|
||||
|
||||
// If the vertex has a greater timestamp than any prerequisite,
|
||||
// then we can't run right now. If they're equal (eg: initially
|
||||
// with a value of 0) then we also can't run because we should
|
||||
// let our pre-requisites go first.
|
||||
obj.state[v].mutex.RLock() // concurrent read start
|
||||
t := obj.state[v].timestamp // race
|
||||
obj.state[v].mutex.RUnlock() // concurrent read end
|
||||
state.mutex.RLock() // concurrent read start
|
||||
t := state.timestamp // race
|
||||
state.mutex.RUnlock() // concurrent read end
|
||||
if obj.Debug {
|
||||
obj.Logf("OKTimestamp: %d >= %d (%s): !%t", ts, t, v.String(), ts >= t)
|
||||
}
|
||||
@@ -83,6 +91,10 @@ func (obj *Engine) Process(ctx context.Context, vertex pgraph.Vertex) error {
|
||||
return fmt.Errorf("vertex is not a Res")
|
||||
}
|
||||
|
||||
obj.tlock.RLock()
|
||||
state := obj.state[vertex]
|
||||
obj.tlock.RUnlock()
|
||||
|
||||
// backpoke! (can be async)
|
||||
if vs := obj.BadTimestamps(vertex); len(vs) > 0 {
|
||||
// back poke in parallel (sync b/c of waitgroup)
|
||||
@@ -129,12 +141,80 @@ func (obj *Engine) Process(ctx context.Context, vertex pgraph.Vertex) error {
|
||||
// sendrecv!
|
||||
// connect any senders to receivers and detect if values changed
|
||||
// this actually checks and sends into resource trees recursively...
|
||||
|
||||
// XXX: This code is duplicated in the fancier autogrouping code below!
|
||||
//if res, ok := vertex.(engine.RecvableRes); ok {
|
||||
// if obj.Debug {
|
||||
// obj.Logf("SendRecv: %s", res) // receiving here
|
||||
// }
|
||||
// if updated, err := SendRecv(res, nil); err != nil {
|
||||
// return errwrap.Wrapf(err, "could not SendRecv")
|
||||
// } else if len(updated) > 0 {
|
||||
// //for _, s := range graph.UpdatedStrings(updated) {
|
||||
// // obj.Logf("SendRecv: %s", s)
|
||||
// //}
|
||||
// for r, m := range updated { // map[engine.RecvableRes]map[string]*engine.Send
|
||||
// v, ok := r.(pgraph.Vertex)
|
||||
// if !ok {
|
||||
// continue
|
||||
// }
|
||||
// _, stateExists := obj.state[v] // autogrouped children probably don't have a state
|
||||
// if !stateExists {
|
||||
// continue
|
||||
// }
|
||||
// for s, send := range m {
|
||||
// if !send.Changed {
|
||||
// continue
|
||||
// }
|
||||
// obj.Logf("Send/Recv: %v.%s -> %v.%s", send.Res, send.Key, r, s)
|
||||
// // if send.Changed == true, at least one was updated
|
||||
// // invalidate cache, mark as dirty
|
||||
// obj.state[v].setDirty()
|
||||
// //break // we might have more vertices now
|
||||
// }
|
||||
//
|
||||
// // re-validate after we change any values
|
||||
// if err := engine.Validate(r); err != nil {
|
||||
// return errwrap.Wrapf(err, "failed Validate after SendRecv")
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
|
||||
// Send/Recv *can* receive from someone that was grouped! The sender has
|
||||
// to use *their* send/recv handle/implementation, which has to be setup
|
||||
// properly by the parent resource during Init(). See: http:server:flag.
|
||||
collectSendRecv := []engine.Res{} // found resources
|
||||
|
||||
if res, ok := vertex.(engine.RecvableRes); ok {
|
||||
if obj.Debug {
|
||||
obj.Logf("SendRecv: %s", res) // receiving here
|
||||
collectSendRecv = append(collectSendRecv, res)
|
||||
}
|
||||
|
||||
// If we contain grouped resources, maybe someone inside wants to recv?
|
||||
// This code is similar to the above and was added for http:server:ui.
|
||||
// XXX: Maybe this block isn't needed, as mentioned we need to check!
|
||||
if res, ok := vertex.(engine.GroupableRes); ok {
|
||||
process := res.GetGroup() // look through these
|
||||
for len(process) > 0 { // recurse through any nesting
|
||||
var x engine.GroupableRes
|
||||
x, process = process[0], process[1:] // pop from front!
|
||||
|
||||
for _, g := range x.GetGroup() {
|
||||
collectSendRecv = append(collectSendRecv, g.(engine.Res))
|
||||
}
|
||||
}
|
||||
if updated, err := SendRecv(res, nil); err != nil {
|
||||
return errwrap.Wrapf(err, "could not SendRecv")
|
||||
}
|
||||
|
||||
//for _, g := res.GetGroup() // non-recursive, one-layer method
|
||||
for _, g := range collectSendRecv { // recursive method!
|
||||
r, ok := g.(engine.RecvableRes)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// This section looks almost identical to the above one!
|
||||
if updated, err := SendRecv(r, nil); err != nil {
|
||||
return errwrap.Wrapf(err, "could not grouped SendRecv")
|
||||
} else if len(updated) > 0 {
|
||||
//for _, s := range graph.UpdatedStrings(updated) {
|
||||
// obj.Logf("SendRecv: %s", s)
|
||||
@@ -161,11 +241,13 @@ func (obj *Engine) Process(ctx context.Context, vertex pgraph.Vertex) error {
|
||||
|
||||
// re-validate after we change any values
|
||||
if err := engine.Validate(r); err != nil {
|
||||
return errwrap.Wrapf(err, "failed Validate after SendRecv")
|
||||
return errwrap.Wrapf(err, "failed grouped Validate after SendRecv")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// XXX: this might not work with two merged "CompatibleRes" resources...
|
||||
// XXX: fix that so we can have the mappings to do it in lang/interpret.go ?
|
||||
|
||||
var ok = true
|
||||
var applied = false // did we run an apply?
|
||||
@@ -181,21 +263,51 @@ func (obj *Engine) Process(ctx context.Context, vertex pgraph.Vertex) error {
|
||||
refreshableRes.SetRefresh(refresh) // tell the resource
|
||||
}
|
||||
|
||||
// Run the exported resource exporter!
|
||||
var exportOK bool
|
||||
var exportErr error
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
// (Run this concurrently with the CheckApply related stuff below...)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// doesn't really need to be in parallel, but we can...
|
||||
exportOK, exportErr = obj.Exporter.Export(ctx, res)
|
||||
}()
|
||||
|
||||
// Check cached state, to skip CheckApply, but can't skip if refreshing!
|
||||
// If the resource doesn't implement refresh, skip the refresh test.
|
||||
// FIXME: if desired, check that we pass through refresh notifications!
|
||||
if (!refresh || !isRefreshableRes) && obj.state[vertex].isStateOK.Load() { // mutex RLock/RUnlock
|
||||
if (!refresh || !isRefreshableRes) && state.isStateOK.Load() { // mutex RLock/RUnlock
|
||||
checkOK, err = true, nil
|
||||
|
||||
} else if noop && (refresh && isRefreshableRes) { // had a refresh to do w/ noop!
|
||||
checkOK, err = false, nil // therefore the state is wrong
|
||||
|
||||
} else if res.MetaParams().Hidden {
|
||||
// We're not running CheckApply
|
||||
if obj.Debug {
|
||||
obj.Logf("%s: Hidden", res)
|
||||
}
|
||||
checkOK, err = true, nil // default
|
||||
|
||||
} else {
|
||||
// run the CheckApply!
|
||||
obj.Logf("%s: CheckApply(%t)", res, !noop)
|
||||
if obj.Debug {
|
||||
obj.Logf("%s: CheckApply(%t)", res, !noop)
|
||||
}
|
||||
// if this fails, don't UpdateTimestamp()
|
||||
checkOK, err = res.CheckApply(ctx, !noop)
|
||||
obj.Logf("%s: CheckApply(%t): Return(%t, %s)", res, !noop, checkOK, engineUtil.CleanError(err))
|
||||
if !checkOK && obj.Debug { // don't log on (checkOK == true)
|
||||
obj.Logf("%s: CheckApply(%t): Return(%t, %s)", res, !noop, checkOK, engineUtil.CleanError(err))
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
checkOK = checkOK && exportOK // always combine
|
||||
if err == nil { // If CheckApply didn't error, look at exportOK.
|
||||
// This is because if CheckApply errors we don't need to care or
|
||||
// tell anyone about an exporting error.
|
||||
err = exportErr
|
||||
}
|
||||
|
||||
if checkOK && err != nil { // should never return this way
|
||||
@@ -203,7 +315,7 @@ func (obj *Engine) Process(ctx context.Context, vertex pgraph.Vertex) error {
|
||||
}
|
||||
|
||||
if !checkOK { // something changed, restart timer
|
||||
obj.state[vertex].cuid.ResetTimer() // activity!
|
||||
state.cuid.ResetTimer() // activity!
|
||||
if obj.Debug {
|
||||
obj.Logf("%s: converger: reset timer", res)
|
||||
}
|
||||
@@ -211,10 +323,10 @@ func (obj *Engine) Process(ctx context.Context, vertex pgraph.Vertex) error {
|
||||
|
||||
// if CheckApply ran without noop and without error, state should be good
|
||||
if !noop && err == nil { // aka !noop || checkOK
|
||||
obj.state[vertex].tuid.StartTimer()
|
||||
//obj.state[vertex].mutex.Lock()
|
||||
obj.state[vertex].isStateOK.Store(true) // reset
|
||||
//obj.state[vertex].mutex.Unlock()
|
||||
state.tuid.StartTimer()
|
||||
//state.mutex.Lock()
|
||||
state.isStateOK.Store(true) // reset
|
||||
//state.mutex.Unlock()
|
||||
if refresh {
|
||||
obj.SetUpstreamRefresh(vertex, false) // refresh happened, clear the request
|
||||
if isRefreshableRes {
|
||||
@@ -251,9 +363,9 @@ func (obj *Engine) Process(ctx context.Context, vertex pgraph.Vertex) error {
|
||||
wg := &sync.WaitGroup{}
|
||||
// update this timestamp *before* we poke or the poked
|
||||
// nodes might fail due to having a too old timestamp!
|
||||
obj.state[vertex].mutex.Lock() // concurrent write start
|
||||
obj.state[vertex].timestamp = time.Now().UnixNano() // update timestamp (race)
|
||||
obj.state[vertex].mutex.Unlock() // concurrent write end
|
||||
state.mutex.Lock() // concurrent write start
|
||||
state.timestamp = time.Now().UnixNano() // update timestamp (race)
|
||||
state.mutex.Unlock() // concurrent write end
|
||||
for _, v := range obj.graph.OutgoingGraphVertices(vertex) {
|
||||
if !obj.OKTimestamp(v) {
|
||||
// there is at least another one that will poke this...
|
||||
@@ -264,7 +376,7 @@ func (obj *Engine) Process(ctx context.Context, vertex pgraph.Vertex) error {
|
||||
// so that the graph doesn't go on running forever until
|
||||
// it's completely done. This is an optional feature and
|
||||
// we can select it via ^C on user exit or via the GAPI.
|
||||
if obj.fastPause {
|
||||
if obj.fastPause.Load() {
|
||||
obj.Logf("%s: fast pausing, poke skipped", res)
|
||||
continue
|
||||
}
|
||||
@@ -294,57 +406,71 @@ func (obj *Engine) Worker(vertex pgraph.Vertex) error {
|
||||
return fmt.Errorf("vertex is not a resource")
|
||||
}
|
||||
|
||||
obj.tlock.RLock()
|
||||
state := obj.state[vertex]
|
||||
obj.tlock.RUnlock()
|
||||
|
||||
// bonus safety check
|
||||
if res.MetaParams().Burst == 0 && !(res.MetaParams().Limit == rate.Inf) { // blocked
|
||||
return fmt.Errorf("permanently limited (rate != Inf, burst = 0)")
|
||||
}
|
||||
|
||||
// initialize or reinitialize the meta state for this resource uid
|
||||
obj.mlock.Lock()
|
||||
if _, exists := obj.metas[engine.PtrUID(res)]; !exists || res.MetaParams().Reset {
|
||||
obj.metas[engine.PtrUID(res)] = &engine.MetaState{
|
||||
CheckApplyRetry: res.MetaParams().Retry, // lookup the retry value
|
||||
}
|
||||
// if we're using a Hidden resource, we don't support this feature
|
||||
// TODO: should we consider supporting it? is it really necessary?
|
||||
// XXX: to support this for Hidden, we'd need to handle dupe names
|
||||
metas := &engine.MetaState{
|
||||
CheckApplyRetry: res.MetaParams().Retry, // lookup the retry value
|
||||
}
|
||||
if !res.MetaParams().Hidden {
|
||||
// Skip this if Hidden since we can have a hidden res that has
|
||||
// the same kind+name as a regular res, and this would conflict.
|
||||
obj.mlock.Lock()
|
||||
if _, exists := obj.metas[engine.PtrUID(res)]; !exists || res.MetaParams().Reset {
|
||||
obj.metas[engine.PtrUID(res)] = &engine.MetaState{
|
||||
CheckApplyRetry: res.MetaParams().Retry, // lookup the retry value
|
||||
}
|
||||
}
|
||||
metas = obj.metas[engine.PtrUID(res)] // handle
|
||||
obj.mlock.Unlock()
|
||||
}
|
||||
metas := obj.metas[engine.PtrUID(res)] // handle
|
||||
obj.mlock.Unlock()
|
||||
|
||||
//defer close(obj.state[vertex].stopped) // done signal
|
||||
//defer close(state.stopped) // done signal
|
||||
|
||||
obj.state[vertex].cuid = obj.Converger.Register()
|
||||
obj.state[vertex].tuid = obj.Converger.Register()
|
||||
state.cuid = obj.Converger.Register()
|
||||
state.tuid = obj.Converger.Register()
|
||||
// must wait for all users of the cuid to finish *before* we unregister!
|
||||
// as a result, this defer happens *before* the below wait group Wait...
|
||||
defer obj.state[vertex].cuid.Unregister()
|
||||
defer obj.state[vertex].tuid.Unregister()
|
||||
defer state.cuid.Unregister()
|
||||
defer state.tuid.Unregister()
|
||||
|
||||
defer obj.state[vertex].wg.Wait() // this Worker is the last to exit!
|
||||
defer state.wg.Wait() // this Worker is the last to exit!
|
||||
|
||||
obj.state[vertex].wg.Add(1)
|
||||
state.wg.Add(1)
|
||||
go func() {
|
||||
defer obj.state[vertex].wg.Done()
|
||||
defer close(obj.state[vertex].eventsChan) // we close this on behalf of res
|
||||
defer state.wg.Done()
|
||||
defer close(state.eventsChan) // we close this on behalf of res
|
||||
|
||||
// This is a close reverse-multiplexer. If any of the channels
|
||||
// close, then it will cause the doneCtx to cancel. That way,
|
||||
// multiple different folks can send a close signal, without
|
||||
// every worrying about duplicate channel close panics.
|
||||
obj.state[vertex].wg.Add(1)
|
||||
state.wg.Add(1)
|
||||
go func() {
|
||||
defer obj.state[vertex].wg.Done()
|
||||
defer state.wg.Done()
|
||||
|
||||
// reverse-multiplexer: any close, causes *the* close!
|
||||
select {
|
||||
case <-obj.state[vertex].processDone:
|
||||
case <-obj.state[vertex].watchDone:
|
||||
case <-obj.state[vertex].limitDone:
|
||||
case <-obj.state[vertex].retryDone:
|
||||
case <-obj.state[vertex].removeDone:
|
||||
case <-obj.state[vertex].eventsDone:
|
||||
case <-state.processDone:
|
||||
case <-state.watchDone:
|
||||
case <-state.limitDone:
|
||||
case <-state.retryDone:
|
||||
case <-state.removeDone:
|
||||
case <-state.eventsDone:
|
||||
}
|
||||
|
||||
// the main "done" signal gets activated here!
|
||||
obj.state[vertex].doneCtxCancel() // cancels doneCtx
|
||||
state.doneCtxCancel() // cancels doneCtx
|
||||
}()
|
||||
|
||||
var err error
|
||||
@@ -356,14 +482,14 @@ func (obj *Engine) Worker(vertex pgraph.Vertex) error {
|
||||
errDelayExpired := engine.Error("delay exit")
|
||||
err = func() error { // slim watch main loop
|
||||
timer := time.NewTimer(time.Duration(delay) * time.Millisecond)
|
||||
defer obj.state[vertex].init.Logf("the Watch delay expired!")
|
||||
defer state.init.Logf("the Watch delay expired!")
|
||||
defer timer.Stop() // it's nice to cleanup
|
||||
for {
|
||||
select {
|
||||
case <-timer.C: // the wait is over
|
||||
return errDelayExpired // special
|
||||
|
||||
case <-obj.state[vertex].doneCtx.Done():
|
||||
case <-state.doneCtx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -372,16 +498,27 @@ func (obj *Engine) Worker(vertex pgraph.Vertex) error {
|
||||
delay = 0 // reset
|
||||
continue
|
||||
}
|
||||
|
||||
} else if res.MetaParams().Hidden {
|
||||
// We're not running Watch
|
||||
if obj.Debug {
|
||||
obj.Logf("%s: Hidden", res)
|
||||
}
|
||||
state.cuid.StartTimer() // TODO: Should we do this?
|
||||
err = state.hidden(state.doneCtx)
|
||||
state.cuid.StopTimer() // TODO: Should we do this?
|
||||
|
||||
} else if interval := res.MetaParams().Poll; interval > 0 { // poll instead of watching :(
|
||||
obj.state[vertex].cuid.StartTimer()
|
||||
err = obj.state[vertex].poll(obj.state[vertex].doneCtx, interval)
|
||||
obj.state[vertex].cuid.StopTimer() // clean up nicely
|
||||
state.cuid.StartTimer()
|
||||
err = state.poll(state.doneCtx, interval)
|
||||
state.cuid.StopTimer() // clean up nicely
|
||||
|
||||
} else {
|
||||
obj.state[vertex].cuid.StartTimer()
|
||||
state.cuid.StartTimer()
|
||||
if obj.Debug {
|
||||
obj.Logf("%s: Watch...", vertex)
|
||||
}
|
||||
err = res.Watch(obj.state[vertex].doneCtx) // run the watch normally
|
||||
err = res.Watch(state.doneCtx) // run the watch normally
|
||||
if obj.Debug {
|
||||
if s := engineUtil.CleanError(err); err != nil {
|
||||
obj.Logf("%s: Watch Error: %s", vertex, s)
|
||||
@@ -389,11 +526,14 @@ func (obj *Engine) Worker(vertex pgraph.Vertex) error {
|
||||
obj.Logf("%s: Watch Exited...", vertex)
|
||||
}
|
||||
}
|
||||
obj.state[vertex].cuid.StopTimer() // clean up nicely
|
||||
state.cuid.StopTimer() // clean up nicely
|
||||
}
|
||||
if err == nil { // || err == engine.ErrClosed
|
||||
return // exited cleanly, we're done
|
||||
}
|
||||
if err == context.Canceled {
|
||||
return // we shutdown nicely on request
|
||||
}
|
||||
// we've got an error...
|
||||
delay = res.MetaParams().Delay
|
||||
|
||||
@@ -402,7 +542,7 @@ func (obj *Engine) Worker(vertex pgraph.Vertex) error {
|
||||
}
|
||||
if retry > 0 { // don't decrement past 0
|
||||
retry--
|
||||
obj.state[vertex].init.Logf("retrying Watch after %.4f seconds (%d left)", float64(delay)/1000, retry)
|
||||
state.init.Logf("retrying Watch after %.4f seconds (%d left)", float64(delay)/1000, retry)
|
||||
continue
|
||||
}
|
||||
//if retry == 0 { // optional
|
||||
@@ -415,14 +555,14 @@ func (obj *Engine) Worker(vertex pgraph.Vertex) error {
|
||||
// If the CheckApply loop exits and THEN the Watch fails with an
|
||||
// error, then we'd be stuck here if exit signal didn't unblock!
|
||||
select {
|
||||
case obj.state[vertex].eventsChan <- errwrap.Wrapf(err, "watch failed"):
|
||||
case state.eventsChan <- errwrap.Wrapf(err, "watch failed"):
|
||||
// send
|
||||
}
|
||||
}()
|
||||
|
||||
// If this exits cleanly, we must unblock the reverse-multiplexer.
|
||||
// I think this additional close is unnecessary, but it's not harmful.
|
||||
defer close(obj.state[vertex].eventsDone) // causes doneCtx to cancel
|
||||
defer close(state.eventsDone) // causes doneCtx to cancel
|
||||
limiter := rate.NewLimiter(res.MetaParams().Limit, res.MetaParams().Burst)
|
||||
var reserv *rate.Reservation
|
||||
var reterr error
|
||||
@@ -436,7 +576,7 @@ Loop:
|
||||
// This select is also the main event receiver and is also the
|
||||
// only place where we read from the poke channel.
|
||||
select {
|
||||
case err, ok := <-obj.state[vertex].eventsChan: // read from watch channel
|
||||
case err, ok := <-state.eventsChan: // read from watch channel
|
||||
if !ok {
|
||||
return reterr // we only return when chan closes
|
||||
}
|
||||
@@ -445,7 +585,7 @@ Loop:
|
||||
// we then save so we can return it to the caller of us.
|
||||
if err != nil {
|
||||
failed = true
|
||||
close(obj.state[vertex].watchDone) // causes doneCtx to cancel
|
||||
close(state.watchDone) // causes doneCtx to cancel
|
||||
reterr = errwrap.Append(reterr, err) // permanent failure
|
||||
continue
|
||||
}
|
||||
@@ -455,7 +595,7 @@ Loop:
|
||||
reserv = limiter.ReserveN(time.Now(), 1) // one event
|
||||
// reserv.OK() seems to always be true here!
|
||||
|
||||
case _, ok := <-obj.state[vertex].pokeChan: // read from buffered poke channel
|
||||
case _, ok := <-state.pokeChan: // read from buffered poke channel
|
||||
if !ok { // we never close it
|
||||
panic("unexpected close of poke channel")
|
||||
}
|
||||
@@ -464,9 +604,9 @@ Loop:
|
||||
}
|
||||
reserv = nil // we didn't receive a real event here...
|
||||
|
||||
case _, ok := <-obj.state[vertex].pauseSignal: // one message
|
||||
case _, ok := <-state.pauseSignal: // one message
|
||||
if !ok {
|
||||
obj.state[vertex].pauseSignal = nil
|
||||
state.pauseSignal = nil
|
||||
continue // this is not a new pause message
|
||||
}
|
||||
// NOTE: If we allowed a doneCtx below to let us out
|
||||
@@ -478,7 +618,7 @@ Loop:
|
||||
|
||||
// we are paused now, and waiting for resume or exit...
|
||||
select {
|
||||
case _, ok := <-obj.state[vertex].resumeSignal: // channel closes
|
||||
case _, ok := <-state.resumeSignal: // channel closes
|
||||
if !ok {
|
||||
closed = true
|
||||
}
|
||||
@@ -493,9 +633,9 @@ Loop:
|
||||
}
|
||||
|
||||
// drop redundant pokes
|
||||
for len(obj.state[vertex].pokeChan) > 0 {
|
||||
for len(state.pokeChan) > 0 {
|
||||
select {
|
||||
case <-obj.state[vertex].pokeChan:
|
||||
case <-state.pokeChan:
|
||||
default:
|
||||
// race, someone else read one!
|
||||
}
|
||||
@@ -512,7 +652,7 @@ Loop:
|
||||
d = reserv.DelayFrom(time.Now())
|
||||
}
|
||||
if reserv != nil && d > 0 { // delay
|
||||
obj.state[vertex].init.Logf("limited (rate: %v/sec, burst: %d, next: %dms)", res.MetaParams().Limit, res.MetaParams().Burst, d/time.Millisecond)
|
||||
state.init.Logf("limited (rate: %v/sec, burst: %d, next: %dms)", res.MetaParams().Limit, res.MetaParams().Burst, d/time.Millisecond)
|
||||
timer := time.NewTimer(time.Duration(d) * time.Millisecond)
|
||||
LimitWait:
|
||||
for {
|
||||
@@ -524,13 +664,13 @@ Loop:
|
||||
break LimitWait
|
||||
|
||||
// consume other events while we're waiting...
|
||||
case e, ok := <-obj.state[vertex].eventsChan: // read from watch channel
|
||||
case e, ok := <-state.eventsChan: // read from watch channel
|
||||
if !ok {
|
||||
return reterr // we only return when chan closes
|
||||
}
|
||||
if e != nil {
|
||||
failed = true
|
||||
close(obj.state[vertex].limitDone) // causes doneCtx to cancel
|
||||
close(state.limitDone) // causes doneCtx to cancel
|
||||
reterr = errwrap.Append(reterr, e) // permanent failure
|
||||
break LimitWait
|
||||
}
|
||||
@@ -541,13 +681,13 @@ Loop:
|
||||
limiter.ReserveN(time.Now(), 1) // one event
|
||||
|
||||
// this pause/resume block is the same as the upper main one
|
||||
case _, ok := <-obj.state[vertex].pauseSignal:
|
||||
case _, ok := <-state.pauseSignal:
|
||||
if !ok {
|
||||
obj.state[vertex].pauseSignal = nil
|
||||
state.pauseSignal = nil
|
||||
break LimitWait
|
||||
}
|
||||
select {
|
||||
case _, ok := <-obj.state[vertex].resumeSignal: // channel closes
|
||||
case _, ok := <-state.resumeSignal: // channel closes
|
||||
if !ok {
|
||||
closed = true
|
||||
}
|
||||
@@ -556,7 +696,7 @@ Loop:
|
||||
}
|
||||
}
|
||||
timer.Stop() // it's nice to cleanup
|
||||
obj.state[vertex].init.Logf("rate limiting expired!")
|
||||
state.init.Logf("rate limiting expired!")
|
||||
}
|
||||
// don't Process anymore if we've already failed or shutdown...
|
||||
if failed || closed {
|
||||
@@ -583,13 +723,13 @@ Loop:
|
||||
break RetryWait
|
||||
|
||||
// consume other events while we're waiting...
|
||||
case e, ok := <-obj.state[vertex].eventsChan: // read from watch channel
|
||||
case e, ok := <-state.eventsChan: // read from watch channel
|
||||
if !ok {
|
||||
return reterr // we only return when chan closes
|
||||
}
|
||||
if e != nil {
|
||||
failed = true
|
||||
close(obj.state[vertex].retryDone) // causes doneCtx to cancel
|
||||
close(state.retryDone) // causes doneCtx to cancel
|
||||
reterr = errwrap.Append(reterr, e) // permanent failure
|
||||
break RetryWait
|
||||
}
|
||||
@@ -600,13 +740,13 @@ Loop:
|
||||
limiter.ReserveN(time.Now(), 1) // one event
|
||||
|
||||
// this pause/resume block is the same as the upper main one
|
||||
case _, ok := <-obj.state[vertex].pauseSignal:
|
||||
case _, ok := <-state.pauseSignal:
|
||||
if !ok {
|
||||
obj.state[vertex].pauseSignal = nil
|
||||
state.pauseSignal = nil
|
||||
break RetryWait
|
||||
}
|
||||
select {
|
||||
case _, ok := <-obj.state[vertex].resumeSignal: // channel closes
|
||||
case _, ok := <-state.resumeSignal: // channel closes
|
||||
if !ok {
|
||||
closed = true
|
||||
}
|
||||
@@ -616,7 +756,7 @@ Loop:
|
||||
}
|
||||
timer.Stop() // it's nice to cleanup
|
||||
delay = 0 // reset
|
||||
obj.state[vertex].init.Logf("the CheckApply delay expired!")
|
||||
state.init.Logf("the CheckApply delay expired!")
|
||||
}
|
||||
// don't Process anymore if we've already failed or shutdown...
|
||||
if failed || closed {
|
||||
@@ -627,7 +767,7 @@ Loop:
|
||||
obj.Logf("Process(%s)", vertex)
|
||||
}
|
||||
backPoke := false
|
||||
err = obj.Process(obj.state[vertex].doneCtx, vertex)
|
||||
err = obj.Process(state.doneCtx, vertex)
|
||||
if err == engine.ErrBackPoke {
|
||||
backPoke = true
|
||||
err = nil // for future code safety
|
||||
@@ -652,7 +792,7 @@ Loop:
|
||||
}
|
||||
if metas.CheckApplyRetry > 0 { // don't decrement past 0
|
||||
metas.CheckApplyRetry--
|
||||
obj.state[vertex].init.Logf(
|
||||
state.init.Logf(
|
||||
"retrying CheckApply after %.4f seconds (%d left)",
|
||||
float64(delay)/1000,
|
||||
metas.CheckApplyRetry,
|
||||
@@ -667,7 +807,7 @@ Loop:
|
||||
// this dies. If Process fails permanently, we ask it
|
||||
// to exit right here... (It happens when we loop...)
|
||||
failed = true
|
||||
close(obj.state[vertex].processDone) // causes doneCtx to cancel
|
||||
close(state.processDone) // causes doneCtx to cancel
|
||||
reterr = errwrap.Append(reterr, err) // permanent failure
|
||||
continue
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -39,7 +39,7 @@ import (
|
||||
|
||||
// AutoEdge adds the automatic edges to the graph.
|
||||
func AutoEdge(graph *pgraph.Graph, debug bool, logf func(format string, v ...interface{})) error {
|
||||
logf("adding autoedges...")
|
||||
logf("building...")
|
||||
|
||||
// initially get all of the autoedges to seek out all possible errors
|
||||
var err error
|
||||
@@ -56,14 +56,16 @@ func AutoEdge(graph *pgraph.Graph, debug bool, logf func(format string, v ...int
|
||||
sorted = append(sorted, res)
|
||||
}
|
||||
|
||||
for _, res := range sorted { // for each vertexes autoedges
|
||||
for _, res := range sorted { // for each vertices autoedges
|
||||
autoEdgeObj, e := res.AutoEdges()
|
||||
if e != nil {
|
||||
err = errwrap.Append(err, e) // collect all errors
|
||||
continue
|
||||
}
|
||||
if autoEdgeObj == nil {
|
||||
logf("no auto edges were found for: %s", res)
|
||||
if debug {
|
||||
logf("no auto edges were found for: %s", res)
|
||||
}
|
||||
continue // next vertex
|
||||
}
|
||||
autoEdgeObjMap[res] = autoEdgeObj // save for next loop
|
||||
@@ -86,9 +88,9 @@ func AutoEdge(graph *pgraph.Graph, debug bool, logf func(format string, v ...int
|
||||
break // inner loop
|
||||
}
|
||||
if debug {
|
||||
logf("autoedge: UIDS:")
|
||||
logf("UIDS:")
|
||||
for i, u := range uids {
|
||||
logf("autoedge: UID%d: %v", i, u)
|
||||
logf("UID%d: %v", i, u)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -129,7 +131,7 @@ func addEdgesByMatchingUIDS(res engine.EdgeableRes, uids []engine.ResUID, graph
|
||||
continue
|
||||
}
|
||||
if debug {
|
||||
logf("autoedge: Match: %s with UID: %s", r, uid)
|
||||
logf("match: %s with UID: %s", r, uid)
|
||||
}
|
||||
// we must match to an effective UID for the resource,
|
||||
// that is to say, the name value of a res is a helpful
|
||||
@@ -138,13 +140,13 @@ func addEdgesByMatchingUIDS(res engine.EdgeableRes, uids []engine.ResUID, graph
|
||||
if UIDExistsInUIDs(uid, r.UIDs()) {
|
||||
// add edge from: r -> res
|
||||
if uid.IsReversed() {
|
||||
txt := fmt.Sprintf("%s -> %s (autoedge)", r, res)
|
||||
logf("autoedge: adding: %s", txt)
|
||||
txt := fmt.Sprintf("%s -> %s", r, res)
|
||||
logf("adding: %s", txt)
|
||||
edge := &engine.Edge{Name: txt}
|
||||
graph.AddEdge(r, res, edge)
|
||||
} else { // edges go the "normal" way, eg: pkg resource
|
||||
txt := fmt.Sprintf("%s -> %s (autoedge)", res, r)
|
||||
logf("autoedge: adding: %s", txt)
|
||||
txt := fmt.Sprintf("%s -> %s", res, r)
|
||||
logf("adding: %s", txt)
|
||||
edge := &engine.Edge{Name: txt}
|
||||
graph.AddEdge(res, r, edge)
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -95,11 +95,19 @@ func (obj *wrappedGrouper) VertexCmp(v1, v2 pgraph.Vertex) error {
|
||||
return fmt.Errorf("one of the autogroup flags is false")
|
||||
}
|
||||
|
||||
if r1.IsGrouped() { // already grouped!
|
||||
return fmt.Errorf("already grouped")
|
||||
}
|
||||
if len(r2.GetGroup()) > 0 { // already has children grouped!
|
||||
return fmt.Errorf("already has groups")
|
||||
// We don't want to bail on these two conditions if the kinds are the
|
||||
// same. This prevents us from having a linear chain of pkg->pkg->pkg,
|
||||
// instead of flattening all of them into one arbitrary choice. But if
|
||||
// we are doing hierarchical grouping, then we want to allow this type
|
||||
// of grouping, or we won't end up building any hierarchies! This was
|
||||
// added for http:server:ui. Check this condition is really required.
|
||||
if r1.Kind() == r2.Kind() { // XXX: needed or do we unwrap the contents?
|
||||
if r1.IsGrouped() { // already grouped!
|
||||
return fmt.Errorf("already grouped")
|
||||
}
|
||||
if len(r2.GetGroup()) > 0 { // already has children grouped!
|
||||
return fmt.Errorf("already has groups")
|
||||
}
|
||||
}
|
||||
if err := r1.GroupCmp(r2); err != nil { // resource groupcmp failed!
|
||||
return errwrap.Wrapf(err, "the GroupCmp failed")
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -59,11 +59,15 @@ func AutoGroup(ag engine.AutoGrouper, g *pgraph.Graph, debug bool, logf func(for
|
||||
if err := ag.VertexCmp(v, w); err != nil { // cmp ?
|
||||
if debug {
|
||||
logf("!GroupCmp for: %s into: %s", wStr, vStr)
|
||||
logf("!GroupCmp err: %+v", err)
|
||||
}
|
||||
|
||||
// remove grouped vertex and merge edges (res is safe)
|
||||
} else if err := VertexMerge(g, v, w, ag.VertexMerge, ag.EdgeMerge); err != nil { // merge...
|
||||
logf("!VertexMerge for: %s into: %s", wStr, vStr)
|
||||
if debug {
|
||||
logf("!VertexMerge err: %+v", err)
|
||||
}
|
||||
|
||||
} else { // success!
|
||||
logf("%s into %s", wStr, vStr)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -49,6 +49,13 @@ import (
|
||||
|
||||
func init() {
|
||||
engine.RegisterResource("nooptest", func() engine.Res { return &NoopResTest{} })
|
||||
engine.RegisterResource("nooptestkind:foo", func() engine.Res { return &NoopResTest{} })
|
||||
engine.RegisterResource("nooptestkind:foo:hello", func() engine.Res { return &NoopResTest{} })
|
||||
engine.RegisterResource("nooptestkind:foo:world", func() engine.Res { return &NoopResTest{} })
|
||||
engine.RegisterResource("nooptestkind:foo:world:big", func() engine.Res { return &NoopResTest{} })
|
||||
engine.RegisterResource("nooptestkind:foo:world:bad", func() engine.Res { return &NoopResTest{} })
|
||||
engine.RegisterResource("nooptestkind:foo:world:bazzz", func() engine.Res { return &NoopResTest{} })
|
||||
engine.RegisterResource("nooptestkind:this:is:very:long", func() engine.Res { return &NoopResTest{} })
|
||||
}
|
||||
|
||||
// NoopResTest is a no-op resource that groups strangely.
|
||||
@@ -108,19 +115,35 @@ func (obj *NoopResTest) GroupCmp(r engine.GroupableRes) error {
|
||||
}
|
||||
|
||||
// TODO: implement this in vertexCmp for *testGrouper instead?
|
||||
if strings.Contains(res.Name(), ",") { // HACK
|
||||
return fmt.Errorf("already grouped") // element to be grouped is already grouped!
|
||||
k1 := strings.HasPrefix(obj.Kind(), "nooptestkind:")
|
||||
k2 := strings.HasPrefix(res.Kind(), "nooptestkind:")
|
||||
if !k1 && !k2 { // XXX: compat mode, to skip during "kind" tests
|
||||
if strings.Contains(res.Name(), ",") { // HACK
|
||||
return fmt.Errorf("already grouped") // element to be grouped is already grouped!
|
||||
}
|
||||
}
|
||||
|
||||
// XXX: make a better grouping algorithm for test expression
|
||||
// XXX: this prevents us from re-using the same kind twice in a test...
|
||||
// group different kinds if they're hierarchical (helpful hack for testing)
|
||||
if obj.Kind() != res.Kind() {
|
||||
s1 := strings.Split(obj.Kind(), ":")
|
||||
s2 := strings.Split(res.Kind(), ":")
|
||||
if len(s1) > len(s2) { // let longer get grouped INTO shorter
|
||||
return fmt.Errorf("chunk inversion")
|
||||
}
|
||||
}
|
||||
|
||||
// group if they start with the same letter! (helpful hack for testing)
|
||||
if obj.Name()[0] != res.Name()[0] {
|
||||
return fmt.Errorf("different starting letter")
|
||||
}
|
||||
//fmt.Printf("group of: %+v into: %+v\n", res.Kind(), obj.Kind())
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewNoopResTest(name string) *NoopResTest {
|
||||
n, err := engine.NewNamedResource("nooptest", name)
|
||||
func NewKindNoopResTest(kind, name string) *NoopResTest {
|
||||
n, err := engine.NewNamedResource(kind, name)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unexpected error: %+v", err))
|
||||
}
|
||||
@@ -138,6 +161,10 @@ func NewNoopResTest(name string) *NoopResTest {
|
||||
return x
|
||||
}
|
||||
|
||||
func NewNoopResTest(name string) *NoopResTest {
|
||||
return NewKindNoopResTest("nooptest", name)
|
||||
}
|
||||
|
||||
func NewNoopResTestSema(name string, semas []string) *NoopResTest {
|
||||
n := NewNoopResTest(name)
|
||||
n.MetaParams().Sema = semas
|
||||
@@ -174,21 +201,29 @@ func (obj *testGrouper) VertexCmp(v1, v2 pgraph.Vertex) error {
|
||||
return fmt.Errorf("v2 is not a GroupableRes")
|
||||
}
|
||||
|
||||
if r1.Kind() != r2.Kind() { // we must group similar kinds
|
||||
// TODO: maybe future resources won't need this limitation?
|
||||
return fmt.Errorf("the two resources aren't the same kind")
|
||||
}
|
||||
//if r1.Kind() != r2.Kind() { // we must group similar kinds
|
||||
// // TODO: maybe future resources won't need this limitation?
|
||||
// return fmt.Errorf("the two resources aren't the same kind")
|
||||
//}
|
||||
// someone doesn't want to group!
|
||||
if r1.AutoGroupMeta().Disabled || r2.AutoGroupMeta().Disabled {
|
||||
return fmt.Errorf("one of the autogroup flags is false")
|
||||
}
|
||||
|
||||
if r1.IsGrouped() { // already grouped!
|
||||
return fmt.Errorf("already grouped")
|
||||
}
|
||||
if len(r2.GetGroup()) > 0 { // already has children grouped!
|
||||
return fmt.Errorf("already has groups")
|
||||
// We don't want to bail on these two conditions if the kinds are the
|
||||
// same. This prevents us from having a linear chain of pkg->pkg->pkg,
|
||||
// instead of flattening all of them into one arbitrary choice. But if
|
||||
// we are doing hierarchical grouping, then we want to allow this type
|
||||
// of grouping, or we won't end up building any hierarchies!
|
||||
if r1.Kind() == r2.Kind() {
|
||||
if r1.IsGrouped() { // already grouped!
|
||||
return fmt.Errorf("already grouped")
|
||||
}
|
||||
if len(r2.GetGroup()) > 0 { // already has children grouped!
|
||||
return fmt.Errorf("already has groups")
|
||||
}
|
||||
}
|
||||
|
||||
if err := r1.GroupCmp(r2); err != nil { // resource groupcmp failed!
|
||||
return errwrap.Wrapf(err, "the GroupCmp failed")
|
||||
}
|
||||
@@ -197,6 +232,8 @@ func (obj *testGrouper) VertexCmp(v1, v2 pgraph.Vertex) error {
|
||||
}
|
||||
|
||||
func (obj *testGrouper) VertexMerge(v1, v2 pgraph.Vertex) (v pgraph.Vertex, err error) {
|
||||
//fmt.Printf("merge of: %s into: %s\n", v2, v1)
|
||||
// NOTE: this doesn't look at kind!
|
||||
r1 := v1.(engine.GroupableRes)
|
||||
r2 := v2.(engine.GroupableRes)
|
||||
if err := r1.GroupRes(r2); err != nil { // group them first
|
||||
@@ -273,8 +310,12 @@ Loop:
|
||||
for v1 := range g1.Adjacency() { // for each vertex in g1
|
||||
r1 := v1.(engine.GroupableRes)
|
||||
l1 := strings.Split(r1.Name(), ",") // make list of everyone's names...
|
||||
for _, x1 := range r1.GetGroup() {
|
||||
l1 = append(l1, x1.Name()) // add my contents
|
||||
// XXX: this should be recursive for hierarchical grouping...
|
||||
// XXX: instead, hack it for now:
|
||||
if !strings.HasPrefix(r1.Kind(), "nooptestkind:") {
|
||||
for _, x1 := range r1.GetGroup() {
|
||||
l1 = append(l1, x1.Name()) // add my contents
|
||||
}
|
||||
}
|
||||
l1 = util.StrRemoveDuplicatesInList(l1) // remove duplicates
|
||||
sort.Strings(l1)
|
||||
@@ -283,8 +324,12 @@ Loop:
|
||||
for v2 := range g2.Adjacency() { // does it match in g2 ?
|
||||
r2 := v2.(engine.GroupableRes)
|
||||
l2 := strings.Split(r2.Name(), ",")
|
||||
for _, x2 := range r2.GetGroup() {
|
||||
l2 = append(l2, x2.Name())
|
||||
// XXX: this should be recursive for hierarchical grouping...
|
||||
// XXX: instead, hack it for now:
|
||||
if !strings.HasPrefix(r2.Kind(), "nooptestkind:") {
|
||||
for _, x2 := range r2.GetGroup() {
|
||||
l2 = append(l2, x2.Name())
|
||||
}
|
||||
}
|
||||
l2 = util.StrRemoveDuplicatesInList(l2) // remove duplicates
|
||||
sort.Strings(l2)
|
||||
@@ -301,7 +346,7 @@ Loop:
|
||||
|
||||
// check edges
|
||||
for v1 := range g1.Adjacency() { // for each vertex in g1
|
||||
v2 := m[v1] // lookup in map to get correspondance
|
||||
v2 := m[v1] // lookup in map to get correspondence
|
||||
// g1.Adjacency()[v1] corresponds to g2.Adjacency()[v2]
|
||||
if e1, e2 := len(g1.Adjacency()[v1]), len(g2.Adjacency()[v2]); e1 != e2 {
|
||||
r1 := v1.(engine.Res)
|
||||
@@ -771,9 +816,9 @@ func TestPgraphGrouping16(t *testing.T) {
|
||||
a := NewNoopResTest("a1,a2")
|
||||
b1 := NewNoopResTest("b1")
|
||||
c1 := NewNoopResTest("c1")
|
||||
e1 := NE("e1")
|
||||
e2 := NE("e2")
|
||||
e3 := NE("e3")
|
||||
e1 := NE("e1") // +e3 a bit?
|
||||
e2 := NE("e2") // ok!
|
||||
e3 := NE("e3") // +e1 a bit?
|
||||
g3.AddEdge(a, b1, e1)
|
||||
g3.AddEdge(b1, c1, e2)
|
||||
g3.AddEdge(a, c1, e3)
|
||||
@@ -859,9 +904,9 @@ func TestPgraphGrouping18(t *testing.T) {
|
||||
a := NewNoopResTest("a1,a2")
|
||||
b := NewNoopResTest("b1,b2")
|
||||
c1 := NewNoopResTest("c1")
|
||||
e1 := NE("e1")
|
||||
e2 := NE("e2,e4")
|
||||
e3 := NE("e3")
|
||||
e1 := NE("e1") // +e3 a bit?
|
||||
e2 := NE("e2,e4") // ok!
|
||||
e3 := NE("e3") // +e1 a bit?
|
||||
g3.AddEdge(a, b, e1)
|
||||
g3.AddEdge(b, c1, e2)
|
||||
g3.AddEdge(a, c1, e3)
|
||||
@@ -978,3 +1023,110 @@ func TestPgraphSemaphoreGrouping3(t *testing.T) {
|
||||
}
|
||||
runGraphCmp(t, g1, g2)
|
||||
}
|
||||
|
||||
func TestPgraphGroupingKinds0(t *testing.T) {
|
||||
g1, _ := pgraph.NewGraph("g1") // original graph
|
||||
{
|
||||
a1 := NewKindNoopResTest("nooptestkind:foo", "a1")
|
||||
a2 := NewKindNoopResTest("nooptestkind:foo:hello", "a2")
|
||||
g1.AddVertex(a1, a2)
|
||||
}
|
||||
g2, _ := pgraph.NewGraph("g2") // expected result ?
|
||||
{
|
||||
a := NewNoopResTest("a1,a2")
|
||||
g2.AddVertex(a)
|
||||
}
|
||||
runGraphCmp(t, g1, g2)
|
||||
}
|
||||
|
||||
func TestPgraphGroupingKinds1(t *testing.T) {
|
||||
g1, _ := pgraph.NewGraph("g1") // original graph
|
||||
{
|
||||
a1 := NewKindNoopResTest("nooptestkind:foo", "a1")
|
||||
a2 := NewKindNoopResTest("nooptestkind:foo:world", "a2")
|
||||
a3 := NewKindNoopResTest("nooptestkind:foo:world:big", "a3")
|
||||
g1.AddVertex(a1, a2, a3)
|
||||
}
|
||||
g2, _ := pgraph.NewGraph("g2") // expected result ?
|
||||
{
|
||||
a := NewNoopResTest("a1,a2,a3")
|
||||
g2.AddVertex(a)
|
||||
}
|
||||
runGraphCmp(t, g1, g2)
|
||||
}
|
||||
|
||||
func TestPgraphGroupingKinds2(t *testing.T) {
|
||||
g1, _ := pgraph.NewGraph("g1") // original graph
|
||||
{
|
||||
a1 := NewKindNoopResTest("nooptestkind:foo", "a1")
|
||||
a2 := NewKindNoopResTest("nooptestkind:foo:world", "a2")
|
||||
a3 := NewKindNoopResTest("nooptestkind:foo:world:big", "a3")
|
||||
a4 := NewKindNoopResTest("nooptestkind:foo:world:bad", "a4")
|
||||
g1.AddVertex(a1, a2, a3, a4)
|
||||
}
|
||||
g2, _ := pgraph.NewGraph("g2") // expected result ?
|
||||
{
|
||||
a := NewNoopResTest("a1,a2,a3,a4")
|
||||
g2.AddVertex(a)
|
||||
}
|
||||
runGraphCmp(t, g1, g2)
|
||||
}
|
||||
|
||||
func TestPgraphGroupingKinds3(t *testing.T) {
|
||||
g1, _ := pgraph.NewGraph("g1") // original graph
|
||||
{
|
||||
a1 := NewKindNoopResTest("nooptestkind:foo", "a1")
|
||||
a2 := NewKindNoopResTest("nooptestkind:foo:world", "a2")
|
||||
a3 := NewKindNoopResTest("nooptestkind:foo:world:big", "a3")
|
||||
a4 := NewKindNoopResTest("nooptestkind:foo:world:bad", "a4")
|
||||
a5 := NewKindNoopResTest("nooptestkind:foo:world:bazzz", "a5")
|
||||
g1.AddVertex(a1, a2, a3, a4, a5)
|
||||
}
|
||||
g2, _ := pgraph.NewGraph("g2") // expected result ?
|
||||
{
|
||||
a := NewNoopResTest("a1,a2,a3,a4,a5")
|
||||
g2.AddVertex(a)
|
||||
}
|
||||
runGraphCmp(t, g1, g2)
|
||||
}
|
||||
|
||||
// This test is valid, but our test system doesn't support duplicate kinds atm.
|
||||
//func TestPgraphGroupingKinds4(t *testing.T) {
|
||||
// g1, _ := pgraph.NewGraph("g1") // original graph
|
||||
// {
|
||||
// a1 := NewKindNoopResTest("nooptestkind:foo", "a1")
|
||||
// a2 := NewKindNoopResTest("nooptestkind:foo:world", "a2")
|
||||
// a3 := NewKindNoopResTest("nooptestkind:foo:world:big", "a3")
|
||||
// a4 := NewKindNoopResTest("nooptestkind:foo:world:big", "a4")
|
||||
// g1.AddVertex(a1, a2, a3, a4)
|
||||
// }
|
||||
// g2, _ := pgraph.NewGraph("g2") // expected result ?
|
||||
// {
|
||||
// a := NewNoopResTest("a1,a2,a3,a4")
|
||||
// g2.AddVertex(a)
|
||||
// }
|
||||
// runGraphCmp(t, g1, g2)
|
||||
//}
|
||||
|
||||
func TestPgraphGroupingKinds5(t *testing.T) {
|
||||
g1, _ := pgraph.NewGraph("g1") // original graph
|
||||
{
|
||||
a1 := NewKindNoopResTest("nooptestkind:foo", "a1")
|
||||
a2 := NewKindNoopResTest("nooptestkind:foo:world", "a2")
|
||||
a3 := NewKindNoopResTest("nooptestkind:foo:world:big", "a3")
|
||||
a4 := NewKindNoopResTest("nooptestkind:foo:world:bad", "a4")
|
||||
a5 := NewKindNoopResTest("nooptestkind:foo:world:bazzz", "a5")
|
||||
b1 := NewKindNoopResTest("nooptestkind:foo", "b1")
|
||||
// NOTE: the very long one shouldn't group, but our test doesn't
|
||||
// support detecting this pattern at the moment...
|
||||
b2 := NewKindNoopResTest("nooptestkind:this:is:very:long", "b2")
|
||||
g1.AddVertex(a1, a2, a3, a4, a5, b1, b2)
|
||||
}
|
||||
g2, _ := pgraph.NewGraph("g2") // expected result ?
|
||||
{
|
||||
a := NewNoopResTest("a1,a2,a3,a4,a5")
|
||||
b := NewNoopResTest("b1,b2")
|
||||
g2.AddVertex(a, b)
|
||||
}
|
||||
runGraphCmp(t, g1, g2)
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -58,17 +58,18 @@ func (ag *baseGrouper) Init(g *pgraph.Graph) error {
|
||||
ag.graph = g // pointer
|
||||
|
||||
// We sort deterministically, first by kind, and then by name. In
|
||||
// particular, longer kind chunks sort first. So http:ui:text should
|
||||
// appear before http:server and http:ui. This is a hack so that if we
|
||||
// are doing hierarchical automatic grouping, it gives the http:ui:text
|
||||
// a chance to get grouped into http:ui, before http:ui gets grouped
|
||||
// into http:server, because once that happens, http:ui:text will never
|
||||
// get grouped, and this won't work properly. This works, because when
|
||||
// we start comparing iteratively the list of resources, it does this
|
||||
// with a O(n^2) loop that compares the X and Y zero indexes first, and
|
||||
// and then continues along. If the "longer" resources appear first,
|
||||
// then they'll group together first. We should probably put this into
|
||||
// a new Grouper struct, but for now we might as well leave it here.
|
||||
// particular, longer kind chunks sort first. So http:server:ui:input
|
||||
// should appear before http:server and http:server:ui. This is a
|
||||
// strategy so that if we are doing hierarchical automatic grouping, it
|
||||
// gives the http:server:ui:input a chance to get grouped into
|
||||
// http:server:ui, before http:server:ui gets grouped into http:server,
|
||||
// because once that happens, http:server:ui:input will never get
|
||||
// grouped, and this won't work properly. This works, because when we
|
||||
// start comparing iteratively the list of resources, it does this with
|
||||
// a O(n^2) loop that compares the X and Y zero indexes first, and then
|
||||
// continues along. If the "longer" resources appear first, then they'll
|
||||
// group together first. We should probably put this into a new Grouper
|
||||
// struct, but for now we might as well leave it here.
|
||||
//vertices := ag.graph.VerticesSorted() // formerly
|
||||
vertices := RHVSort(ag.graph.Vertices())
|
||||
|
||||
@@ -134,7 +135,7 @@ func (ag *baseGrouper) VertexNext() (v1, v2 pgraph.Vertex, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// VertexCmp can be used in addition to an overridding implementation.
|
||||
// VertexCmp can be used in addition to an overriding implementation.
|
||||
func (ag *baseGrouper) VertexCmp(v1, v2 pgraph.Vertex) error {
|
||||
if v1 == nil || v2 == nil {
|
||||
return fmt.Errorf("the vertex is nil")
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -181,7 +181,7 @@ func (obj RHVSlice) Less(i, j int) bool {
|
||||
li := len(si)
|
||||
lj := len(sj)
|
||||
|
||||
if li != lj { // eg: http:ui vs. http:ui:text
|
||||
if li != lj { // eg: http:server:ui vs. http:server:ui:text
|
||||
return li > lj // reverse
|
||||
}
|
||||
|
||||
|
||||
84
engine/graph/autogroup/util_test.go
Normal file
84
engine/graph/autogroup/util_test.go
Normal file
@@ -0,0 +1,84 @@
|
||||
// Mgmt
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
//
|
||||
// Additional permission under GNU GPL version 3 section 7
|
||||
//
|
||||
// If you modify this program, or any covered work, by linking or combining it
|
||||
// with embedded mcl code and modules (and that the embedded mcl code and
|
||||
// modules which link with this program, contain a copy of their source code in
|
||||
// the authoritative form) containing parts covered by the terms of any other
|
||||
// license, the licensors of this program grant you additional permission to
|
||||
// convey the resulting work. Furthermore, the licensors of this program grant
|
||||
// the original author, James Shubin, additional permission to update this
|
||||
// additional permission if he deems it necessary to achieve the goals of this
|
||||
// additional permission.
|
||||
|
||||
//go:build !root
|
||||
|
||||
package autogroup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/purpleidea/mgmt/engine"
|
||||
_ "github.com/purpleidea/mgmt/engine/resources" // import so the resources register
|
||||
"github.com/purpleidea/mgmt/pgraph"
|
||||
)
|
||||
|
||||
// ListPgraphVertexCmp compares two lists of pgraph.Vertex pointers.
|
||||
func ListPgraphVertexCmp(a, b []pgraph.Vertex) bool {
|
||||
//fmt.Printf("CMP: %v with %v\n", a, b) // debugging
|
||||
if a == nil && b == nil {
|
||||
return true
|
||||
}
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// empty graph
|
||||
func TestRHVSort1(t *testing.T) {
|
||||
|
||||
r1, err := engine.NewNamedResource("http:server", "foo")
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unexpected error: %+v", err))
|
||||
}
|
||||
r2, err := engine.NewNamedResource("http:server:ui", "bar")
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unexpected error: %+v", err))
|
||||
}
|
||||
|
||||
vertices := []pgraph.Vertex{r1, r2}
|
||||
expected := []pgraph.Vertex{r2, r1}
|
||||
|
||||
if out := RHVSort(vertices); !ListPgraphVertexCmp(expected, out) {
|
||||
t.Errorf("vertices: %+v", vertices)
|
||||
t.Errorf("expected: %+v", expected)
|
||||
t.Errorf("test out: %+v", out)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/purpleidea/mgmt/converger"
|
||||
"github.com/purpleidea/mgmt/engine"
|
||||
@@ -59,9 +60,12 @@ type Engine struct {
|
||||
Version string
|
||||
Hostname string
|
||||
|
||||
// Break off separate logical pieces into chunks where possible.
|
||||
Converger *converger.Coordinator
|
||||
Local *local.API
|
||||
World engine.World
|
||||
Exporter *Exporter
|
||||
|
||||
Local *local.API
|
||||
World engine.World
|
||||
|
||||
// Prefix is a unique directory prefix which can be used. It should be
|
||||
// created if needed.
|
||||
@@ -72,6 +76,7 @@ type Engine struct {
|
||||
graph *pgraph.Graph
|
||||
nextGraph *pgraph.Graph
|
||||
state map[pgraph.Vertex]*State
|
||||
tlock *sync.RWMutex // lock around state map
|
||||
waits map[pgraph.Vertex]*sync.WaitGroup // wg for the Worker func
|
||||
wlock *sync.Mutex // lock around waits map
|
||||
|
||||
@@ -84,7 +89,10 @@ type Engine struct {
|
||||
wg *sync.WaitGroup // wg for the whole engine (only used for close)
|
||||
|
||||
paused bool // are we paused?
|
||||
fastPause bool
|
||||
fastPause *atomic.Bool
|
||||
isClosing bool // are we shutting down?
|
||||
|
||||
errMutex *sync.Mutex // wraps the *state workerErr (one mutex for all)
|
||||
}
|
||||
|
||||
// Init initializes the internal structures and starts this the graph running.
|
||||
@@ -106,16 +114,18 @@ func (obj *Engine) Init() error {
|
||||
if obj.Prefix == "" || obj.Prefix == "/" {
|
||||
return fmt.Errorf("the prefix of `%s` is invalid", obj.Prefix)
|
||||
}
|
||||
if err := os.MkdirAll(obj.Prefix, 0770); err != nil {
|
||||
// 0775 since we want children to be able to read this!
|
||||
if err := os.MkdirAll(obj.Prefix, 0775); err != nil {
|
||||
return errwrap.Wrapf(err, "can't create prefix")
|
||||
}
|
||||
|
||||
obj.state = make(map[pgraph.Vertex]*State)
|
||||
obj.tlock = &sync.RWMutex{}
|
||||
obj.waits = make(map[pgraph.Vertex]*sync.WaitGroup)
|
||||
obj.wlock = &sync.Mutex{}
|
||||
|
||||
obj.mlock = &sync.Mutex{}
|
||||
obj.metas = make(map[engine.ResPtrUID]*engine.MetaState)
|
||||
obj.metas = make(map[engine.ResPtrUID]*engine.MetaState) // don't include .Hidden res
|
||||
|
||||
obj.slock = &sync.Mutex{}
|
||||
obj.semas = make(map[string]*semaphore.Semaphore)
|
||||
@@ -123,6 +133,21 @@ func (obj *Engine) Init() error {
|
||||
obj.wg = &sync.WaitGroup{}
|
||||
|
||||
obj.paused = true // start off true, so we can Resume after first Commit
|
||||
obj.fastPause = &atomic.Bool{}
|
||||
|
||||
obj.errMutex = &sync.Mutex{}
|
||||
|
||||
obj.Exporter = &Exporter{
|
||||
World: obj.World,
|
||||
Debug: obj.Debug,
|
||||
Logf: func(format string, v ...interface{}) {
|
||||
// TODO: is this a sane prefix to use here?
|
||||
obj.Logf("export: "+format, v...)
|
||||
},
|
||||
}
|
||||
if err := obj.Exporter.Init(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -187,6 +212,12 @@ func (obj *Engine) Commit() error {
|
||||
if !ok { // should not happen, previously validated
|
||||
return fmt.Errorf("not a Res")
|
||||
}
|
||||
// Skip this if Hidden since we can have a hidden res that has
|
||||
// the same kind+name as a regular res, and this would conflict.
|
||||
if res.MetaParams().Hidden {
|
||||
continue
|
||||
}
|
||||
|
||||
activeMetas[engine.PtrUID(res)] = struct{}{} // add
|
||||
}
|
||||
|
||||
@@ -207,7 +238,11 @@ func (obj *Engine) Commit() error {
|
||||
return fmt.Errorf("the Res state already exists")
|
||||
}
|
||||
|
||||
activeMetas[engine.PtrUID(res)] = struct{}{} // add
|
||||
// Skip this if Hidden since we can have a hidden res that has
|
||||
// the same kind+name as a regular res, and this would conflict.
|
||||
if !res.MetaParams().Hidden {
|
||||
activeMetas[engine.PtrUID(res)] = struct{}{} // add
|
||||
}
|
||||
|
||||
if obj.Debug {
|
||||
obj.Logf("Validate(%s)", res)
|
||||
@@ -224,7 +259,7 @@ func (obj *Engine) Commit() error {
|
||||
statePrefix := fmt.Sprintf("%s/", path.Join(obj.statePrefix(), pathUID))
|
||||
|
||||
// don't create this unless it *will* be used
|
||||
//if err := os.MkdirAll(statePrefix, 0770); err != nil {
|
||||
//if err := os.MkdirAll(statePrefix, 0775); err != nil {
|
||||
// return errwrap.Wrapf(err, "can't create state prefix")
|
||||
//}
|
||||
|
||||
@@ -268,7 +303,7 @@ func (obj *Engine) Commit() error {
|
||||
obj.wlock.Unlock()
|
||||
}()
|
||||
|
||||
if obj.Debug || true {
|
||||
if obj.Debug {
|
||||
obj.Logf("%s: Working...", v)
|
||||
}
|
||||
// contains the Watch and CheckApply loops
|
||||
@@ -280,7 +315,9 @@ func (obj *Engine) Commit() error {
|
||||
obj.Logf("%s: Exited...", v)
|
||||
}
|
||||
}
|
||||
obj.errMutex.Lock()
|
||||
obj.state[v].workerErr = err // store the error
|
||||
obj.errMutex.Unlock()
|
||||
// If the Rewatch metaparam is true, then this will get
|
||||
// restarted if we do a graph cmp swap. This is why the
|
||||
// graph cmp function runs the removes before the adds.
|
||||
@@ -298,7 +335,12 @@ func (obj *Engine) Commit() error {
|
||||
if !ok { // should not happen, previously validated
|
||||
return fmt.Errorf("not a Res")
|
||||
}
|
||||
delete(activeMetas, engine.PtrUID(res))
|
||||
|
||||
// Skip this if Hidden since we can have a hidden res that has
|
||||
// the same kind+name as a regular res, and this would conflict.
|
||||
if !res.MetaParams().Hidden {
|
||||
delete(activeMetas, engine.PtrUID(res))
|
||||
}
|
||||
|
||||
// wait for exit before starting new graph!
|
||||
close(obj.state[vertex].removeDone) // causes doneCtx to cancel
|
||||
@@ -313,7 +355,9 @@ func (obj *Engine) Commit() error {
|
||||
|
||||
// delete to free up memory from old graphs
|
||||
fn := func() error {
|
||||
obj.tlock.Lock()
|
||||
delete(obj.state, vertex)
|
||||
obj.tlock.Unlock()
|
||||
delete(obj.waits, vertex)
|
||||
return nil
|
||||
}
|
||||
@@ -341,12 +385,15 @@ func (obj *Engine) Commit() error {
|
||||
s1, ok1 := obj.state[v1]
|
||||
s2, ok2 := obj.state[v2]
|
||||
x1, x2 := false, false
|
||||
// no need to have different mutexes for each state atm
|
||||
obj.errMutex.Lock()
|
||||
if ok1 {
|
||||
x1 = s1.workerErr != nil && swap1
|
||||
}
|
||||
if ok2 {
|
||||
x2 = s2.workerErr != nil && swap2
|
||||
}
|
||||
obj.errMutex.Unlock()
|
||||
|
||||
if x1 || x2 {
|
||||
// We swap, even if they're the same, so that we reload!
|
||||
@@ -466,7 +513,7 @@ func (obj *Engine) Resume() error {
|
||||
// poke. In general this is only called when you're trying to hurry up the exit.
|
||||
// XXX: Not implemented
|
||||
func (obj *Engine) SetFastPause() {
|
||||
obj.fastPause = true
|
||||
obj.fastPause.Store(true)
|
||||
}
|
||||
|
||||
// Pause the active, running graph.
|
||||
@@ -479,7 +526,7 @@ func (obj *Engine) Pause(fastPause bool) error {
|
||||
return fmt.Errorf("already paused")
|
||||
}
|
||||
|
||||
obj.fastPause = fastPause
|
||||
obj.fastPause.Store(fastPause)
|
||||
topoSort, _ := obj.graph.TopologicalSort()
|
||||
for _, vertex := range topoSort { // squeeze out the events...
|
||||
// The Event is sent to an unbuffered channel, so this event is
|
||||
@@ -492,7 +539,7 @@ func (obj *Engine) Pause(fastPause bool) error {
|
||||
obj.paused = true
|
||||
|
||||
// we are now completely paused...
|
||||
obj.fastPause = false // reset
|
||||
obj.fastPause.Store(false) // reset
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -500,6 +547,7 @@ func (obj *Engine) Pause(fastPause bool) error {
|
||||
// actually just a Load of an empty graph and a Commit. It waits for all the
|
||||
// resources to exit before returning.
|
||||
func (obj *Engine) Shutdown() error {
|
||||
obj.isClosing = true
|
||||
emptyGraph, reterr := pgraph.NewGraph("empty")
|
||||
|
||||
// this is a graph switch (graph sync) that switches to an empty graph!
|
||||
@@ -516,6 +564,15 @@ func (obj *Engine) Shutdown() error {
|
||||
return reterr
|
||||
}
|
||||
|
||||
// IsClosing tells the caller if a Shutdown() was run. This is helpful so that
|
||||
// the graph can behave slightly differently when receiving the final empty
|
||||
// graph. This is because it's empty because we passed one to unload everything,
|
||||
// not because the user actually removed all resources. We may want to preserve
|
||||
// the exported state for example, and not purge it.
|
||||
func (obj *Engine) IsClosing() bool {
|
||||
return obj.isClosing
|
||||
}
|
||||
|
||||
// Graph returns the running graph.
|
||||
func (obj *Engine) Graph() *pgraph.Graph {
|
||||
return obj.graph
|
||||
|
||||
355
engine/graph/exporter.go
Normal file
355
engine/graph/exporter.go
Normal file
@@ -0,0 +1,355 @@
|
||||
// Mgmt
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
//
|
||||
// Additional permission under GNU GPL version 3 section 7
|
||||
//
|
||||
// If you modify this program, or any covered work, by linking or combining it
|
||||
// with embedded mcl code and modules (and that the embedded mcl code and
|
||||
// modules which link with this program, contain a copy of their source code in
|
||||
// the authoritative form) containing parts covered by the terms of any other
|
||||
// license, the licensors of this program grant you additional permission to
|
||||
// convey the resulting work. Furthermore, the licensors of this program grant
|
||||
// the original author, James Shubin, additional permission to update this
|
||||
// additional permission if he deems it necessary to achieve the goals of this
|
||||
// additional permission.
|
||||
|
||||
package graph
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/purpleidea/mgmt/engine"
|
||||
engineUtil "github.com/purpleidea/mgmt/engine/util"
|
||||
"github.com/purpleidea/mgmt/pgraph"
|
||||
)
|
||||
|
||||
// Exporter is the main engine mechanism that sends the exported resource data
|
||||
// to the World database. The code is relatively succinct, but slightly subtle.
|
||||
type Exporter struct {
|
||||
// Watch specifies if we want to enable the additional watch feature. It
|
||||
// should probably be left off unless we're debugging something or using
|
||||
// weird environments where we expect someone to mess with our res data.
|
||||
Watch bool
|
||||
|
||||
World engine.World
|
||||
|
||||
Debug bool
|
||||
Logf func(format string, v ...interface{})
|
||||
|
||||
state map[engine.ResDelete]bool // key NOT a pointer for it to be unique
|
||||
prev map[engine.ResDelete]pgraph.Vertex
|
||||
mutex *sync.Mutex
|
||||
|
||||
// watch specific variables
|
||||
workerRunning bool
|
||||
workerWg *sync.WaitGroup
|
||||
workerCtx context.Context
|
||||
workerCancel func()
|
||||
}
|
||||
|
||||
// Init performs some initialization before first use. This is required.
|
||||
func (obj *Exporter) Init() error {
|
||||
obj.state = make(map[engine.ResDelete]bool)
|
||||
obj.prev = make(map[engine.ResDelete]pgraph.Vertex)
|
||||
obj.mutex = &sync.Mutex{}
|
||||
|
||||
obj.workerRunning = false
|
||||
obj.workerWg = &sync.WaitGroup{}
|
||||
obj.workerCtx, obj.workerCancel = context.WithCancel(context.Background())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Export performs the worldly export, and then stores the resource unique ID in
|
||||
// our in-memory data store. Exported resources use this tracking to know when
|
||||
// to run their cleanups. If this function encounters an error, it returns
|
||||
// (false, err). If it does nothing it returns (true, nil). If it does work it
|
||||
// return (false, nil). These return codes match how CheckApply returns. This
|
||||
// may run concurrently by multiple different resources, so as a result it must
|
||||
// stay thread safe.
|
||||
func (obj *Exporter) Export(ctx context.Context, res engine.Res) (bool, error) {
|
||||
// As a result of running this operation in roughly the same places that
|
||||
// the usual CheckApply step would run, we end up with a more nuanced
|
||||
// and mature "exported resources" model than what was ever possible
|
||||
// with other tools. We can now "wait" (via the resource graph
|
||||
// dependencies) to run an export until an earlier resource dependency
|
||||
// step has run. We can also programmatically "un-export" a resource by
|
||||
// publishing a subsequent resource graph which either removes that
|
||||
// Export flag or the entire resource. The one downside is that
|
||||
// exporting to the database happens in multiple transactions rather
|
||||
// than a batched bolus, but this is more appropriate because we're now
|
||||
// more accurately modelling real-time systems, and this bandwidth is
|
||||
// not a significant amount anyways. Lastly, we make sure to not run the
|
||||
// purge when we ^C, since it should be safe to shutdown without killing
|
||||
// all the data we left there.
|
||||
|
||||
if res.MetaParams().Noop {
|
||||
return true, nil // did nothing
|
||||
}
|
||||
|
||||
exports := res.MetaParams().Export
|
||||
if len(exports) == 0 {
|
||||
return true, nil // did nothing
|
||||
}
|
||||
|
||||
// It's OK to check the cache here instead of re-sending via the World
|
||||
// API and so on, because the only way the Res data would change in
|
||||
// World is if (1) someone messed with etcd, which we'd see with Watch,
|
||||
// or (2) if the Res data changed because we have a new resource graph.
|
||||
// If we have a new resource graph, then any changed elements will get
|
||||
// pruned from this state cache via the Prune method, which helps us.
|
||||
// If send/recv or any other weird resource method changes things, then
|
||||
// we also want to invalidate the state cache.
|
||||
state := true
|
||||
|
||||
// TODO: This recv code is untested!
|
||||
if r, ok := res.(engine.RecvableRes); ok {
|
||||
for _, v := range r.Recv() { // map[string]*Send
|
||||
// XXX: After we read the changed value, will it persist?
|
||||
state = state && !v.Changed
|
||||
}
|
||||
}
|
||||
|
||||
obj.mutex.Lock()
|
||||
for _, ptrUID := range obj.ptrUID(res) {
|
||||
b := obj.state[*ptrUID] // no need to check if exists
|
||||
state = state && b // if any are false, it's all false
|
||||
}
|
||||
obj.mutex.Unlock()
|
||||
if state {
|
||||
return true, nil // state OK!
|
||||
}
|
||||
|
||||
// XXX: Do we want to change any metaparams when we export?
|
||||
// XXX: Do we want to change any metaparams when we collect?
|
||||
b64, err := obj.resToB64(res)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
resourceExports := []*engine.ResExport{}
|
||||
duplicates := make(map[string]struct{})
|
||||
for _, export := range exports {
|
||||
//ptrUID := engine.ResDelete{
|
||||
// Kind: res.Kind(),
|
||||
// Name: res.Name(),
|
||||
// Host: export,
|
||||
//}
|
||||
if export == "*" {
|
||||
export = "" // XXX: use whatever means "all"
|
||||
}
|
||||
if _, exists := duplicates[export]; exists {
|
||||
continue
|
||||
}
|
||||
duplicates[export] = struct{}{}
|
||||
// skip this check since why race it or split the resource...
|
||||
//if stateOK := obj.state[ptrUID]; stateOK {
|
||||
// // rare that we'd have a split of some of these from a
|
||||
// // single resource updated and others already fine, but
|
||||
// // might as well do the check since it's cheap...
|
||||
// continue
|
||||
//}
|
||||
resExport := &engine.ResExport{
|
||||
Kind: res.Kind(),
|
||||
Name: res.Name(),
|
||||
Host: export,
|
||||
Data: b64, // encoded res data
|
||||
}
|
||||
resourceExports = append(resourceExports, resExport)
|
||||
}
|
||||
|
||||
// The fact that we Watch the write-only-by-us values at all, is a
|
||||
// luxury that allows us to handle mischievous actors that overwrote an
|
||||
// exported value. It really isn't necessary. It's the consumers that
|
||||
// really need to watch.
|
||||
if err := obj.worker(); err != nil {
|
||||
return false, err // big error
|
||||
}
|
||||
|
||||
// TODO: Do we want to log more information about where this exports to?
|
||||
obj.Logf("%s", res)
|
||||
//obj.Logf("%s\n", engineUtil.DebugStructFields(res)) // debug
|
||||
// XXX: Add a TTL if requested
|
||||
b, err := obj.World.ResExport(ctx, resourceExports) // do it!
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
obj.mutex.Lock()
|
||||
defer obj.mutex.Unlock()
|
||||
// NOTE: The Watch() method *must* invalidate this state if it changes.
|
||||
// This is only pertinent if we're using the luxury Watch add-ons.
|
||||
for _, ptrUID := range obj.ptrUID(res) {
|
||||
obj.state[*ptrUID] = true // state OK!
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// Prune removes any exports which are no longer actively being presented in the
|
||||
// resource graph. This cleans things up between graph swaps. This should NOT
|
||||
// run if we're shutting down cleanly. Keep in mind that this must act on the
|
||||
// new graph which is available by "Commit", not before we're ready to "Commit".
|
||||
func (obj *Exporter) Prune(ctx context.Context, graph *pgraph.Graph) error {
|
||||
// mutex should be optional since this should only run when graph paused
|
||||
obj.mutex.Lock()
|
||||
defer obj.mutex.Unlock()
|
||||
|
||||
// make searching faster by initially storing it all in a map
|
||||
m := make(map[engine.ResDelete]pgraph.Vertex) // key is NOT a pointer
|
||||
for _, v := range graph.Vertices() {
|
||||
res, ok := v.(engine.Res)
|
||||
if !ok { // should not happen
|
||||
return fmt.Errorf("not a Res")
|
||||
}
|
||||
for _, ptrUID := range obj.ptrUID(res) { // skips non-export things
|
||||
m[*ptrUID] = v
|
||||
}
|
||||
}
|
||||
|
||||
resourceDeletes := []*engine.ResDelete{}
|
||||
for k := range obj.state {
|
||||
v, exists := m[k] // exists means it's in the graph
|
||||
prev := obj.prev[k]
|
||||
obj.prev[k] = v // may be nil
|
||||
if exists && v != prev { // pointer compare to old vertex
|
||||
// Here we have a Res that previously existed under the
|
||||
// same kind/name/host. We need to invalidate the state
|
||||
// only if it's a different Res than the previous one!
|
||||
// If we do this erroneously, it causes extra traffic.
|
||||
obj.state[k] = false // do this only if the Res is NEW
|
||||
continue // skip it, it's staying
|
||||
|
||||
} else if exists {
|
||||
// If it exists and it's the same as it was, do nothing.
|
||||
// This is important to prevent thrashing/flapping...
|
||||
continue
|
||||
}
|
||||
|
||||
// These don't exist anymore, we have to get rid of them...
|
||||
delete(obj.state, k) // it's gone!
|
||||
resourceDeletes = append(resourceDeletes, &k)
|
||||
}
|
||||
|
||||
if len(resourceDeletes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
obj.Logf("prune: %d exports", len(resourceDeletes))
|
||||
for _, x := range resourceDeletes {
|
||||
obj.Logf("prune: %s to %s", engine.Repr(x.Kind, x.Name), x.Host)
|
||||
}
|
||||
// XXX: this function could optimize the grouping since we split the
|
||||
// list of host entries out from the kind/name since we can't have a
|
||||
// unique map key with a struct that contains a slice.
|
||||
if _, err := obj.World.ResDelete(ctx, resourceDeletes); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// resToB64 is a helper to refactor out this method.
|
||||
func (obj *Exporter) resToB64(res engine.Res) (string, error) {
|
||||
if r, ok := res.(engine.ExportableRes); ok {
|
||||
return r.ToB64()
|
||||
}
|
||||
|
||||
return engineUtil.ResToB64(res)
|
||||
}
|
||||
|
||||
// ptrUID is a helper for this repetitive code.
|
||||
func (obj *Exporter) ptrUID(res engine.Res) []*engine.ResDelete {
|
||||
a := []*engine.ResDelete{}
|
||||
for _, export := range res.MetaParams().Export {
|
||||
if export == "*" {
|
||||
export = "" // XXX: use whatever means "all"
|
||||
}
|
||||
|
||||
ptrUID := &engine.ResDelete{
|
||||
Kind: res.Kind(),
|
||||
Name: res.Name(),
|
||||
Host: export,
|
||||
}
|
||||
a = append(a, ptrUID)
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// worker is a helper to kick off the optional Watch workers.
|
||||
func (obj *Exporter) worker() error {
|
||||
if !obj.Watch {
|
||||
return nil // feature is disabled
|
||||
}
|
||||
|
||||
obj.mutex.Lock()
|
||||
defer obj.mutex.Unlock()
|
||||
|
||||
if obj.workerRunning {
|
||||
return nil // already running
|
||||
}
|
||||
|
||||
kind := "" // watch everything
|
||||
ch, err := obj.World.ResWatch(obj.workerCtx, kind) // (chan error, error)
|
||||
if err != nil {
|
||||
return err // big error
|
||||
}
|
||||
obj.workerRunning = true
|
||||
obj.workerWg.Add(1)
|
||||
go func() {
|
||||
defer func() {
|
||||
obj.mutex.Lock()
|
||||
obj.workerRunning = false
|
||||
obj.mutex.Unlock()
|
||||
}()
|
||||
defer obj.workerWg.Done()
|
||||
Loop:
|
||||
for {
|
||||
var e error
|
||||
var ok bool
|
||||
select {
|
||||
case e, ok = <-ch:
|
||||
if !ok {
|
||||
// chan closed
|
||||
break Loop
|
||||
}
|
||||
|
||||
case <-obj.workerCtx.Done():
|
||||
break Loop
|
||||
}
|
||||
if e != nil {
|
||||
// something errored... shutdown coming!
|
||||
}
|
||||
// event!
|
||||
obj.mutex.Lock()
|
||||
for k := range obj.state {
|
||||
obj.state[k] = false // reset it all
|
||||
}
|
||||
obj.mutex.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown cancels any running workers and waits for them to finish.
|
||||
func (obj *Exporter) Shutdown() {
|
||||
obj.workerCancel()
|
||||
obj.workerWg.Wait()
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -128,6 +128,21 @@ func SendRecv(res engine.RecvableRes, fn RecvFn) (map[engine.RecvableRes]map[str
|
||||
}
|
||||
|
||||
if st == nil {
|
||||
// This can happen if there is a send->recv between two
|
||||
// resources where the producer does not send a value.
|
||||
// This can happen for a few reasons. (1) If the
|
||||
// programmer made a mistake and has a non-erroring
|
||||
// CheckApply without a return. Note that it should send
|
||||
// a value for the (true, nil) CheckApply cases too.
|
||||
// (2) If the resource that's sending started off in the
|
||||
// "good" state right at first run, and never produced a
|
||||
// value to send. This may be a programming error since
|
||||
// the implementation must always either produce a value
|
||||
// or be okay that there's an error. It could be a valid
|
||||
// error if the resource was intended to not be run in a
|
||||
// way where it wouldn't initially have a value to send,
|
||||
// whether cached or otherwise, but this scenario should
|
||||
// be rare.
|
||||
e := fmt.Errorf("received nil value from: %s", v.Res)
|
||||
err = errwrap.Append(err, e) // list of errors
|
||||
continue
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -228,7 +228,7 @@ func (obj *State) Init() error {
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
// pass in information on requestor...
|
||||
// pass in information on requester...
|
||||
if err := r1.GraphQueryAllowed(
|
||||
engine.GraphQueryableOptionKind(res.Kind()),
|
||||
engine.GraphQueryableOptionName(res.Name()),
|
||||
@@ -243,7 +243,7 @@ func (obj *State) Init() error {
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
// pass in information on requestor...
|
||||
// pass in information on requester...
|
||||
if err := r2.GraphQueryAllowed(
|
||||
engine.GraphQueryableOptionKind(res.Kind()),
|
||||
engine.GraphQueryableOptionName(res.Name()),
|
||||
@@ -430,3 +430,13 @@ func (obj *State) poll(ctx context.Context, interval uint32) error {
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
|
||||
// hidden is a replacement for Watch when the Hidden metaparameter is used.
|
||||
func (obj *State) hidden(ctx context.Context) error {
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
select {
|
||||
case <-ctx.Done(): // signal for shutdown request
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -54,7 +54,8 @@ func (obj *State) varDir(extra string) (string, error) {
|
||||
|
||||
// an empty string at the end has no effect
|
||||
p := fmt.Sprintf("%s/", path.Join(obj.Prefix, extra))
|
||||
if err := os.MkdirAll(p, 0770); err != nil {
|
||||
// 0775 since we want children to be able to read this!
|
||||
if err := os.MkdirAll(p, 0775); err != nil {
|
||||
return "", errwrap.Wrapf(err, "can't create prefix in: %s", p)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -38,6 +38,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
@@ -54,7 +55,15 @@ type API struct {
|
||||
Logf func(format string, v ...interface{})
|
||||
|
||||
// Each piece of the API can take a handle here.
|
||||
*Value
|
||||
*Value // TODO: Rename to ValueImpl?
|
||||
|
||||
// VarDirImpl is the implementation for the VarDir API's. The API's are
|
||||
// the collection of public methods that exist on this struct.
|
||||
*VarDirImpl
|
||||
|
||||
// PoolImpl is the implementation for the Pool API's. The API's are the
|
||||
// collection of public methods that exist on this struct.
|
||||
*PoolImpl
|
||||
}
|
||||
|
||||
// Init initializes the API before first use. It returns itself so it can be
|
||||
@@ -67,6 +76,20 @@ func (obj *API) Init() *API {
|
||||
Logf: obj.Logf,
|
||||
})
|
||||
|
||||
obj.VarDirImpl = &VarDirImpl{}
|
||||
obj.VarDirImpl.Init(&VarDirInit{
|
||||
Prefix: obj.Prefix,
|
||||
Debug: obj.Debug,
|
||||
Logf: obj.Logf,
|
||||
})
|
||||
|
||||
obj.PoolImpl = &PoolImpl{}
|
||||
obj.PoolImpl.Init(&PoolInit{
|
||||
Prefix: obj.Prefix,
|
||||
Debug: obj.Debug,
|
||||
Logf: obj.Logf,
|
||||
})
|
||||
|
||||
return obj
|
||||
}
|
||||
|
||||
@@ -121,7 +144,7 @@ func (obj *Value) ValueGet(ctx context.Context, key string) (interface{}, error)
|
||||
|
||||
var val interface{}
|
||||
//var err error
|
||||
if _, skip := obj.skipread[key]; skip {
|
||||
if _, skip := obj.skipread[key]; !skip {
|
||||
val, err = valueRead(ctx, prefix, key) // must return val == nil if missing
|
||||
if err != nil {
|
||||
// We had an actual read issue. Report this and stop
|
||||
@@ -154,6 +177,16 @@ func (obj *Value) ValueSet(ctx context.Context, key string, value interface{}) e
|
||||
obj.mutex.Lock()
|
||||
defer obj.mutex.Unlock()
|
||||
|
||||
// If we're already in the correct state, then return early and *don't*
|
||||
// send any events at the very end...
|
||||
v, exists := obj.values[key]
|
||||
if !exists && value == nil {
|
||||
return nil // already in the correct state
|
||||
}
|
||||
if exists && v == value { // XXX: reflect.DeepEqual(v, value) ?
|
||||
return nil // already in the correct state
|
||||
}
|
||||
|
||||
// Write to state dir on disk first. If ctx cancels, we assume it's not
|
||||
// written or it doesn't matter because we're cancelling, meaning we're
|
||||
// shutting down, so our local cache can be invalidated anyways.
|
||||
@@ -332,3 +365,240 @@ func valueRemove(ctx context.Context, prefix, key string) error {
|
||||
}
|
||||
return nil // ignore not found errors
|
||||
}
|
||||
|
||||
// VarDirInit are the init values that the VarDir API needs to work correctly.
|
||||
type VarDirInit struct {
|
||||
Prefix string
|
||||
Debug bool
|
||||
Logf func(format string, v ...interface{})
|
||||
}
|
||||
|
||||
// VarDirImpl is the implementation for the VarDir API's. The API's are the
|
||||
// collection of public methods that exist on this struct.
|
||||
type VarDirImpl struct {
|
||||
init *VarDirInit
|
||||
mutex *sync.Mutex
|
||||
prefix string
|
||||
prefixExists bool // is it okay to use the prefix?
|
||||
}
|
||||
|
||||
// Init runs some initialization code for the VarDir API.
|
||||
func (obj *VarDirImpl) Init(init *VarDirInit) {
|
||||
obj.init = init
|
||||
obj.mutex = &sync.Mutex{}
|
||||
obj.prefix = fmt.Sprintf("%s/", path.Join(obj.init.Prefix, "vardir"))
|
||||
}
|
||||
|
||||
// VarDir returns a directory rooted at the internal prefix.
|
||||
func (obj *VarDirImpl) VarDir(ctx context.Context, reldir string) (string, error) {
|
||||
if strings.HasPrefix(reldir, "/") {
|
||||
return "", fmt.Errorf("path must be relative")
|
||||
}
|
||||
if !strings.HasSuffix(reldir, "/") {
|
||||
return "", fmt.Errorf("path must be a dir")
|
||||
}
|
||||
// NOTE: The above checks ensure we don't get either "" or "/" as input!
|
||||
|
||||
prefix, err := obj.getPrefix()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
result := fmt.Sprintf("%s/", path.Join(prefix, reldir))
|
||||
|
||||
// TODO: Should we mkdir this?
|
||||
obj.mutex.Lock()
|
||||
defer obj.mutex.Unlock()
|
||||
if err := os.MkdirAll(result, 0755); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// getPrefix gets the prefix dir to use, or errors if it can't make one. It
|
||||
// makes it on first use, and returns quickly from any future calls to it.
|
||||
func (obj *VarDirImpl) getPrefix() (string, error) {
|
||||
// NOTE: Moving this mutex to just below the first early return, would
|
||||
// be a benign race, but as it turns out, it's possible that a compiler
|
||||
// would see this behaviour as "undefined" and things might not work as
|
||||
// intended. It could perhaps be replaced with a sync/atomic primitive
|
||||
// if we wanted better performance here.
|
||||
obj.mutex.Lock()
|
||||
defer obj.mutex.Unlock()
|
||||
|
||||
if obj.prefixExists { // former race read
|
||||
return obj.prefix, nil
|
||||
}
|
||||
|
||||
// MkdirAll instead of Mkdir because we have no idea if the parent
|
||||
// local/ directory was already made yet or not. (If at all.) If path is
|
||||
// already a directory, MkdirAll does nothing and returns nil. (Good!)
|
||||
// TODO: I hope MkdirAll is thread-safe on path creation in case another
|
||||
// future local API tries to make the base (parent) directory too!
|
||||
if err := os.MkdirAll(obj.prefix, 0755); err != nil {
|
||||
return "", err
|
||||
}
|
||||
obj.prefixExists = true // former race write
|
||||
|
||||
return obj.prefix, nil
|
||||
}
|
||||
|
||||
// PoolInit are the init values that the Pool API needs to work correctly.
|
||||
type PoolInit struct {
|
||||
Prefix string
|
||||
Debug bool
|
||||
Logf func(format string, v ...interface{})
|
||||
}
|
||||
|
||||
// PoolConfig configures how the Pool operates.
|
||||
// XXX: These are not implemented yet.
|
||||
type PoolConfig struct {
|
||||
// Expiry specifies that we expire old values that have not been read
|
||||
// for this many seconds. Zero disables this and they never expire.
|
||||
Expiry int64 // TODO: or time.Time ?
|
||||
|
||||
// Random lets you allocate a random integer instead of sequential ones.
|
||||
Random bool
|
||||
|
||||
// Max specifies the maximum integer to allocate.
|
||||
Max int
|
||||
}
|
||||
|
||||
// PoolImpl is the implementation for the Pool API's. The API's are the
|
||||
// collection of public methods that exist on this struct.
|
||||
type PoolImpl struct {
|
||||
init *PoolInit
|
||||
mutex *sync.Mutex
|
||||
prefix string
|
||||
prefixExists bool // is it okay to use the prefix?
|
||||
}
|
||||
|
||||
// Init runs some initialization code for the Pool API.
|
||||
func (obj *PoolImpl) Init(init *PoolInit) {
|
||||
obj.init = init
|
||||
obj.mutex = &sync.Mutex{}
|
||||
obj.prefix = fmt.Sprintf("%s/", path.Join(obj.init.Prefix, "pool"))
|
||||
}
|
||||
|
||||
// Pool returns a unique integer from a pool of numbers. Within a given
|
||||
// namespace, it returns the same integer for a given name. It is a simple
|
||||
// mechanism to allocate numbers to different inputs when we don't have a
|
||||
// hashing alternative. It does not allocate zero.
|
||||
func (obj *PoolImpl) Pool(ctx context.Context, namespace, uid string, config *PoolConfig) (int, error) {
|
||||
if namespace == "" {
|
||||
return 0, fmt.Errorf("namespace is empty")
|
||||
}
|
||||
if strings.Contains(namespace, "/") {
|
||||
return 0, fmt.Errorf("namespace contains slash")
|
||||
}
|
||||
if uid == "" {
|
||||
return 0, fmt.Errorf("uid is empty")
|
||||
}
|
||||
if strings.Contains(uid, "/") {
|
||||
return 0, fmt.Errorf("uid contains slash")
|
||||
}
|
||||
|
||||
prefix, err := obj.getPrefix()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
dir := fmt.Sprintf("%s/", path.Join(prefix, namespace))
|
||||
file := fmt.Sprintf("%s.uid", path.Join(dir, uid)) // file
|
||||
|
||||
// TODO: Run clean up funcs here to get rid of any stale/expired values.
|
||||
// TODO: This will happen based on the future config options we build...
|
||||
|
||||
obj.mutex.Lock()
|
||||
defer obj.mutex.Unlock()
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
fn := func(p string) (int, error) {
|
||||
b, err := os.ReadFile(p)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return 0, err // real error
|
||||
}
|
||||
if err != nil {
|
||||
return 0, nil // absent!
|
||||
}
|
||||
|
||||
// File exists!
|
||||
d, err := strconv.Atoi(strings.TrimSpace(string(b)))
|
||||
if err != nil {
|
||||
// Someone put corrupt data in a uid file.
|
||||
return 0, err // real error
|
||||
}
|
||||
return d, nil // value already allocated!
|
||||
}
|
||||
|
||||
d, err := fn(file)
|
||||
if err != nil {
|
||||
return 0, err // real error
|
||||
}
|
||||
if d != 0 {
|
||||
return d, nil // Value already allocated! We're done early.
|
||||
}
|
||||
|
||||
// Not found, so find the max. (0 without error means not found!)
|
||||
|
||||
files, err := os.ReadDir(dir) // ([]os.DirEntry, error)
|
||||
if err != nil {
|
||||
return 0, err // real error
|
||||
}
|
||||
|
||||
m := 0
|
||||
for _, f := range files {
|
||||
if f.IsDir() {
|
||||
continue // unexpected!
|
||||
}
|
||||
d, err := fn(path.Join(dir, f.Name()))
|
||||
if err != nil {
|
||||
return 0, err // real error
|
||||
}
|
||||
if d == 0 {
|
||||
// Must be someone deleting files without our mutex!
|
||||
return 0, fmt.Errorf("unexpected missing file")
|
||||
}
|
||||
|
||||
m = max(m, d)
|
||||
}
|
||||
|
||||
m++ // increment
|
||||
data := []byte(fmt.Sprintf("%d\n", m)) // it's polite to end with \n
|
||||
if err := os.WriteFile(file, data, 0600); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// getPrefix gets the prefix dir to use, or errors if it can't make one. It
|
||||
// makes it on first use, and returns quickly from any future calls to it.
|
||||
func (obj *PoolImpl) getPrefix() (string, error) {
|
||||
// NOTE: Moving this mutex to just below the first early return, would
|
||||
// be a benign race, but as it turns out, it's possible that a compiler
|
||||
// would see this behaviour as "undefined" and things might not work as
|
||||
// intended. It could perhaps be replaced with a sync/atomic primitive
|
||||
// if we wanted better performance here.
|
||||
obj.mutex.Lock()
|
||||
defer obj.mutex.Unlock()
|
||||
|
||||
if obj.prefixExists { // former race read
|
||||
return obj.prefix, nil
|
||||
}
|
||||
|
||||
// MkdirAll instead of Mkdir because we have no idea if the parent
|
||||
// local/ directory was already made yet or not. (If at all.) If path is
|
||||
// already a directory, MkdirAll does nothing and returns nil. (Good!)
|
||||
// TODO: I hope MkdirAll is thread-safe on path creation in case another
|
||||
// future local API tries to make the base (parent) directory too!
|
||||
if err := os.MkdirAll(obj.prefix, 0755); err != nil {
|
||||
return "", err
|
||||
}
|
||||
obj.prefixExists = true // former race write
|
||||
|
||||
return obj.prefix, nil
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -52,6 +52,9 @@ var DefaultMetaParams = &MetaParams{
|
||||
//Sema: []string{},
|
||||
Rewatch: false,
|
||||
Realize: false, // true would be more awesome, but unexpected for users
|
||||
Dollar: false,
|
||||
Hidden: false,
|
||||
Export: []string{},
|
||||
}
|
||||
|
||||
// MetaRes is the interface a resource must implement to support meta params.
|
||||
@@ -132,6 +135,40 @@ type MetaParams struct {
|
||||
// the resource is blocked because of a failed pre-requisite resource.
|
||||
// XXX: Not implemented!
|
||||
Realize bool `yaml:"realize"`
|
||||
|
||||
// Dollar allows you to name a resource to start with the dollar
|
||||
// character. We don't allow this by default since it's probably not
|
||||
// needed, and is more likely to be a typo where the user forgot to
|
||||
// interpolate a variable name. In the rare case when it's needed, you
|
||||
// can disable that check with this meta param.
|
||||
Dollar bool `yaml:"dollar"`
|
||||
|
||||
// Hidden means that this resource will not get executed on the resource
|
||||
// graph on which it is defined. This can be used as a simple boolean
|
||||
// switch, or, more commonly in combination with the Export meta param
|
||||
// which specifies that the resource params are exported into the shared
|
||||
// database. When this is true, it does not prevent export. In fact, it
|
||||
// is commonly used in combination with Export. Using this option will
|
||||
// still include it in the resource graph, but it will exist there in a
|
||||
// special "mode" where it will not conflict with any other identically
|
||||
// named resources. It can even be used as part of an edge or via a
|
||||
// send/recv receiver. It can NOT be a sending vertex. These properties
|
||||
// differentiate the use of this instead of simply wrapping a resource
|
||||
// in an "if" statement. If it is hidden, then it does not need to pass
|
||||
// the resource Validate method step.
|
||||
Hidden bool `yaml:"hidden"`
|
||||
|
||||
// Export is a list of hostnames (and/or the special "*" entry) which if
|
||||
// set, will mark this resource data as intended for export to those
|
||||
// hosts. This does not prevent any users of the shared data storage
|
||||
// from reading these values, so if you want to guarantee secrecy, use
|
||||
// the encryption primitives. This only labels the data accordingly, so
|
||||
// that other hosts can know what data is available for them to collect.
|
||||
// The (kind, name, host) export triple must be unique from any given
|
||||
// exporter. In other words, you may not export two different instances
|
||||
// of a kind+name to the same host, the exports must not conflict. On
|
||||
// resource collect, this parameter is not preserved.
|
||||
Export []string `yaml:"export"`
|
||||
}
|
||||
|
||||
// Cmp compares two AutoGroupMeta structs and determines if they're equivalent.
|
||||
@@ -142,7 +179,7 @@ func (obj *MetaParams) Cmp(meta *MetaParams) error {
|
||||
// XXX: add a one way cmp like we used to have ?
|
||||
//if obj.Noop != meta.Noop {
|
||||
// // obj is the existing res, res is the *new* resource
|
||||
// // if we go from no-noop -> noop, we can re-use the obj
|
||||
// // if we go from no-noop -> noop, we can reuse the obj
|
||||
// // if we go from noop -> no-noop, we need to regenerate
|
||||
// if obj.Noop { // asymmetrical
|
||||
// return fmt.Errorf("values for Noop are different") // going from noop to no-noop!
|
||||
@@ -178,6 +215,15 @@ func (obj *MetaParams) Cmp(meta *MetaParams) error {
|
||||
if obj.Realize != meta.Realize {
|
||||
return fmt.Errorf("values for Realize are different")
|
||||
}
|
||||
if obj.Dollar != meta.Dollar {
|
||||
return fmt.Errorf("values for Dollar are different")
|
||||
}
|
||||
if obj.Hidden != meta.Hidden {
|
||||
return fmt.Errorf("values for Hidden are different")
|
||||
}
|
||||
if err := util.SortedStrSliceCompare(obj.Export, meta.Export); err != nil {
|
||||
return errwrap.Wrapf(err, "values for Export are different")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -197,6 +243,13 @@ func (obj *MetaParams) Validate() error {
|
||||
}
|
||||
}
|
||||
|
||||
for _, s := range obj.Export {
|
||||
if s == "" {
|
||||
return fmt.Errorf("export is empty")
|
||||
}
|
||||
}
|
||||
// TODO: Should we validate the export patterns?
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -207,6 +260,11 @@ func (obj *MetaParams) Copy() *MetaParams {
|
||||
sema = make([]string, len(obj.Sema))
|
||||
copy(sema, obj.Sema)
|
||||
}
|
||||
export := []string{}
|
||||
if obj.Export != nil {
|
||||
export = make([]string, len(obj.Export))
|
||||
copy(export, obj.Export)
|
||||
}
|
||||
return &MetaParams{
|
||||
Noop: obj.Noop,
|
||||
Retry: obj.Retry,
|
||||
@@ -218,6 +276,9 @@ func (obj *MetaParams) Copy() *MetaParams {
|
||||
Sema: sema,
|
||||
Rewatch: obj.Rewatch,
|
||||
Realize: obj.Realize,
|
||||
Dollar: obj.Dollar,
|
||||
Hidden: obj.Hidden,
|
||||
Export: export,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -33,7 +33,12 @@ import (
|
||||
"context"
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
docsUtil "github.com/purpleidea/mgmt/docs/util"
|
||||
"github.com/purpleidea/mgmt/engine/local"
|
||||
"github.com/purpleidea/mgmt/pgraph"
|
||||
"github.com/purpleidea/mgmt/util/errwrap"
|
||||
@@ -41,6 +46,12 @@ import (
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
const (
|
||||
// ResourcesRelDir is the path where the resources are kept, relative to
|
||||
// the main source code root.
|
||||
ResourcesRelDir = "engine/resources/"
|
||||
)
|
||||
|
||||
// TODO: should each resource be a sub-package?
|
||||
var registeredResources = map[string]func() Res{}
|
||||
|
||||
@@ -56,6 +67,23 @@ func RegisterResource(kind string, fn func() Res) {
|
||||
}
|
||||
gob.Register(f)
|
||||
registeredResources[kind] = fn
|
||||
|
||||
// Additional metadata for documentation generation!
|
||||
_, filename, _, ok := runtime.Caller(1)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("could not locate resource filename for %s", kind))
|
||||
}
|
||||
sp := strings.Split(reflect.TypeOf(f).String(), ".")
|
||||
if len(sp) != 2 {
|
||||
panic(fmt.Sprintf("could not parse resource struct name for %s", kind))
|
||||
}
|
||||
|
||||
if err := docsUtil.RegisterResource(kind, &docsUtil.Metadata{
|
||||
Filename: filepath.Base(filename),
|
||||
Typename: sp[1],
|
||||
}); err != nil {
|
||||
panic(fmt.Sprintf("could not register resource metadata for %s", kind))
|
||||
}
|
||||
}
|
||||
|
||||
// RegisteredResourcesNames returns the kind of the registered resources.
|
||||
@@ -67,6 +95,12 @@ func RegisteredResourcesNames() []string {
|
||||
return kinds
|
||||
}
|
||||
|
||||
// IsKind returns true if this is a valid resource kind.
|
||||
func IsKind(kind string) bool {
|
||||
_, ok := registeredResources[kind]
|
||||
return ok
|
||||
}
|
||||
|
||||
// NewResource returns an empty resource object from a registered kind. It
|
||||
// errors if the resource kind doesn't exist.
|
||||
func NewResource(kind string) (Res, error) {
|
||||
@@ -174,6 +208,27 @@ type Init struct {
|
||||
Logf func(format string, v ...interface{})
|
||||
}
|
||||
|
||||
// Copy makes a copy of this Init struct, with all of the same elements inside.
|
||||
func (obj *Init) Copy() *Init {
|
||||
return &Init{
|
||||
Program: obj.Program,
|
||||
Version: obj.Version,
|
||||
Hostname: obj.Hostname,
|
||||
Running: obj.Running,
|
||||
Event: obj.Event,
|
||||
Refresh: obj.Refresh,
|
||||
Send: obj.Send,
|
||||
Recv: obj.Recv,
|
||||
//Graph: obj.Graph, // TODO: not implemented, use FilteredGraph
|
||||
FilteredGraph: obj.FilteredGraph,
|
||||
Local: obj.Local,
|
||||
World: obj.World,
|
||||
VarDir: obj.VarDir,
|
||||
Debug: obj.Debug,
|
||||
Logf: obj.Logf,
|
||||
}
|
||||
}
|
||||
|
||||
// KindedRes is an interface that is required for a resource to have a kind.
|
||||
type KindedRes interface {
|
||||
// Kind returns a string representing the kind of resource this is.
|
||||
@@ -246,8 +301,8 @@ func Stringer(res Res) string {
|
||||
// the resource only. This was formerly a string, but a struct is more precise.
|
||||
// The result is suitable as a unique map key.
|
||||
type ResPtrUID struct {
|
||||
kind string
|
||||
name string
|
||||
Kind string
|
||||
Name string
|
||||
}
|
||||
|
||||
// PtrUID generates a ResPtrUID from a resource. The result is suitable as a
|
||||
@@ -255,7 +310,7 @@ type ResPtrUID struct {
|
||||
func PtrUID(res Res) ResPtrUID {
|
||||
// the use of "repr" is kind of arbitrary as long as it's unique
|
||||
//return ResPtrUID(Repr(res.Kind(), res.Name()))
|
||||
return ResPtrUID{kind: res.Kind(), name: res.Name()}
|
||||
return ResPtrUID{Kind: res.Kind(), Name: res.Name()}
|
||||
}
|
||||
|
||||
// Validate validates a resource by checking multiple aspects. This is the main
|
||||
@@ -272,6 +327,18 @@ func Validate(res Res) error {
|
||||
return errwrap.Wrapf(err, "the Res has an invalid meta param")
|
||||
}
|
||||
|
||||
// TODO: pull dollar prefix from a constant
|
||||
// This catches typos where the user meant to use ${var} interpolation.
|
||||
if !res.MetaParams().Dollar && strings.HasPrefix(res.Name(), "$") {
|
||||
return fmt.Errorf("the Res name starts with a $")
|
||||
}
|
||||
|
||||
// Don't need to validate normally if hidden.
|
||||
// XXX: Check if it's also Exported too? len(res.MetaParams.Export) > 0
|
||||
if res.MetaParams().Hidden {
|
||||
return nil
|
||||
}
|
||||
|
||||
return res.Validate()
|
||||
}
|
||||
|
||||
@@ -336,12 +403,20 @@ type CompatibleRes interface {
|
||||
Merge(CompatibleRes) (CompatibleRes, error)
|
||||
}
|
||||
|
||||
// CollectableRes is an interface for resources that support collection. It is
|
||||
// currently temporary until a proper API for all resources is invented.
|
||||
type CollectableRes interface {
|
||||
// ExportableRes allows the resource to have its own implementation of resource
|
||||
// encoding, so that it can send data over the wire differently. It's unlikely
|
||||
// that you will want to implement this interface for most scenarios. It may be
|
||||
// useful to limit private data exposure, large data sizes, and to add more info
|
||||
// to what would normally be shared.
|
||||
type ExportableRes interface {
|
||||
Res
|
||||
|
||||
CollectPattern(string) // XXX: temporary until Res collection is more advanced
|
||||
// ToB64 lets the resource provide an alternative implementation of the
|
||||
// usual ResToB64 method. This lets the resource omit, add, or modify
|
||||
// the parameter data before it goes out over the wire.
|
||||
ToB64() (string, error)
|
||||
|
||||
// TODO: Do we want to add a FromB64 method for decoding the Resource?
|
||||
}
|
||||
|
||||
// YAMLRes is a resource that supports creation by unmarshalling.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Mgmt
|
||||
# Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
# Copyright (C) James Shubin and the project contributors
|
||||
# Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
@@ -27,22 +27,17 @@
|
||||
# additional permission if he deems it necessary to achieve the goals of this
|
||||
# additional permission.
|
||||
|
||||
SHELL = /usr/bin/env bash
|
||||
.PHONY: all build clean
|
||||
SHELL = bash
|
||||
.PHONY: build clean
|
||||
default: build
|
||||
|
||||
all: build
|
||||
WASM_FILE = http_server_ui/main.wasm
|
||||
|
||||
build: kind_stringer.go
|
||||
build: $(WASM_FILE)
|
||||
|
||||
$(WASM_FILE): http_server_ui/main.go
|
||||
@echo "Generating: wasm..."
|
||||
cd http_server_ui/ && env GOOS=js GOARCH=wasm go build -o `basename $(WASM_FILE)`
|
||||
|
||||
clean:
|
||||
@rm -f kind_stringer.go || true
|
||||
|
||||
kind_stringer.go: type.go
|
||||
@echo "Generating: type kind strings..."
|
||||
@# stringer fails if run from GOROOT.
|
||||
@# see: https://github.com/golang/go/issues/31843
|
||||
@if which stringer | grep `go env GOROOT`; then \
|
||||
echo "stringer cannot run from GOROOT"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@go generate
|
||||
@rm -f $(WASM_FILE) || true
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -148,7 +148,6 @@ func (obj *AugeasRes) Watch(ctx context.Context) error {
|
||||
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
var send = false // send event?
|
||||
for {
|
||||
if obj.init.Debug {
|
||||
obj.init.Logf("Watching: %s", obj.File) // attempting to watch...
|
||||
@@ -165,17 +164,12 @@ func (obj *AugeasRes) Watch(ctx context.Context) error {
|
||||
if obj.init.Debug { // don't access event.Body if event.Error isn't nil
|
||||
obj.init.Logf("Event(%s): %v", event.Body.Name, event.Body.Op)
|
||||
}
|
||||
send = true
|
||||
|
||||
case <-ctx.Done(): // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -206,7 +200,6 @@ func (obj *AugeasRes) checkApplySet(ctx context.Context, apply bool, ag *augeas.
|
||||
|
||||
// CheckApply method for Augeas resource.
|
||||
func (obj *AugeasRes) CheckApply(ctx context.Context, apply bool) (bool, error) {
|
||||
obj.init.Logf("CheckApply: %s", obj.File)
|
||||
// By default we do not set any option to augeas, we use the defaults.
|
||||
opts := augeas.None
|
||||
if obj.Lens != "" {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -159,7 +159,6 @@ var AwsRegions = []string{
|
||||
// http://docs.aws.amazon.com/cli/latest/userguide/cli-config-files.html
|
||||
type AwsEc2Res struct {
|
||||
traits.Base // add the base methods without re-implementation
|
||||
traits.Sendable
|
||||
|
||||
init *engine.Init
|
||||
|
||||
@@ -193,7 +192,7 @@ type AwsEc2Res struct {
|
||||
|
||||
// UserData is used to run bash and cloud-init commands on first launch.
|
||||
// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html
|
||||
// for documantation and examples.
|
||||
// for documentation and examples.
|
||||
UserData string `lang:"userdata" yaml:"userdata"`
|
||||
|
||||
client *ec2.EC2 // client session for AWS API calls
|
||||
@@ -448,8 +447,6 @@ func (obj *AwsEc2Res) Watch(ctx context.Context) error {
|
||||
// longpollWatch uses the ec2 api's built in methods to watch ec2 resource
|
||||
// state.
|
||||
func (obj *AwsEc2Res) longpollWatch(ctx context.Context) error {
|
||||
send := false
|
||||
|
||||
// We tell the engine that we're running right away. This is not correct,
|
||||
// but the api doesn't have a way to signal when the waiters are ready.
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
@@ -528,17 +525,13 @@ func (obj *AwsEc2Res) longpollWatch(ctx context.Context) error {
|
||||
continue
|
||||
default:
|
||||
obj.init.Logf("State: %v", msg.state)
|
||||
send = true
|
||||
}
|
||||
|
||||
case <-ctx.Done(): // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
|
||||
if send {
|
||||
send = false
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -548,7 +541,6 @@ func (obj *AwsEc2Res) longpollWatch(ctx context.Context) error {
|
||||
// it can publish to. snsWatch creates an http server which listens for messages
|
||||
// published to the topic and processes them accordingly.
|
||||
func (obj *AwsEc2Res) snsWatch(ctx context.Context) error {
|
||||
send := false
|
||||
defer obj.wg.Wait()
|
||||
// create the sns listener
|
||||
// closing is handled by http.Server.Shutdown in the defer func below
|
||||
@@ -623,22 +615,18 @@ func (obj *AwsEc2Res) snsWatch(ctx context.Context) error {
|
||||
continue
|
||||
}
|
||||
obj.init.Logf("State: %v", msg.event)
|
||||
send = true
|
||||
|
||||
case <-ctx.Done(): // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
|
||||
if send {
|
||||
send = false
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
|
||||
// CheckApply method for AwsEc2 resource.
|
||||
func (obj *AwsEc2Res) CheckApply(ctx context.Context, apply bool) (bool, error) {
|
||||
obj.init.Logf("CheckApply(%t)", apply)
|
||||
obj.init.Logf("CheckApply(%t)", apply) // XXX: replace with logf on change
|
||||
|
||||
// find the instance we need to check
|
||||
instance, err := describeInstanceByName(obj.client, obj.prependName())
|
||||
|
||||
466
engine/resources/bmc_power.go
Normal file
466
engine/resources/bmc_power.go
Normal file
@@ -0,0 +1,466 @@
|
||||
// Mgmt
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
//
|
||||
// Additional permission under GNU GPL version 3 section 7
|
||||
//
|
||||
// If you modify this program, or any covered work, by linking or combining it
|
||||
// with embedded mcl code and modules (and that the embedded mcl code and
|
||||
// modules which link with this program, contain a copy of their source code in
|
||||
// the authoritative form) containing parts covered by the terms of any other
|
||||
// license, the licensors of this program grant you additional permission to
|
||||
// convey the resulting work. Furthermore, the licensors of this program grant
|
||||
// the original author, James Shubin, additional permission to update this
|
||||
// additional permission if he deems it necessary to achieve the goals of this
|
||||
// additional permission.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/purpleidea/mgmt/engine"
|
||||
"github.com/purpleidea/mgmt/engine/traits"
|
||||
|
||||
bmclib "github.com/bmc-toolbox/bmclib/v2"
|
||||
"github.com/bmc-toolbox/bmclib/v2/providers/rpc"
|
||||
)
|
||||
|
||||
func init() {
|
||||
engine.RegisterResource("bmc:power", func() engine.Res { return &BmcPowerRes{} })
|
||||
}
|
||||
|
||||
const (
|
||||
// DefaultBmcPowerPort is the default port we try to connect on.
|
||||
DefaultBmcPowerPort = 443
|
||||
|
||||
// BmcDriverSecureSuffix is the magic char we append to a driver name to
|
||||
// specify we want the SSL/TLS variant.
|
||||
BmcDriverSecureSuffix = "s"
|
||||
|
||||
// BmcDriverRPC is the RPC driver.
|
||||
BmcDriverRPC = "rpc"
|
||||
|
||||
// BmcDriverGofish is the gofish driver.
|
||||
BmcDriverGofish = "gofish"
|
||||
)
|
||||
|
||||
// BmcPowerRes is a resource that manages power state of a BMC. This is usually
|
||||
// used for turning computers on and off. The name value can be a big URL string
|
||||
// in the form: `driver://user:pass@hostname:port` for example you may see:
|
||||
// gofishs://ADMIN:hunter2@127.0.0.1:8800 to use the "https" variant of the
|
||||
// gofish driver.
|
||||
//
|
||||
// NOTE: New drivers should either not end in "s" or at least not be identical
|
||||
// to the name of another driver an "s" is added or removed to the end.
|
||||
type BmcPowerRes struct {
|
||||
traits.Base // add the base methods without re-implementation
|
||||
|
||||
init *engine.Init
|
||||
|
||||
// Hostname to connect to. If not specified, we parse this from the
|
||||
// Name.
|
||||
Hostname string `lang:"hostname" yaml:"hostname"`
|
||||
|
||||
// Port to connect to. If not specified, we parse this from the Name.
|
||||
Port int `lang:"port" yaml:"port"`
|
||||
|
||||
// Username to use to connect. If not specified, we parse this from the
|
||||
// Name.
|
||||
// TODO: If the Username field is not set, should we parse from the
|
||||
// Name? It's not really part of the BMC unique identifier so maybe we
|
||||
// shouldn't use that.
|
||||
Username string `lang:"username" yaml:"username"`
|
||||
|
||||
// Password to use to connect. We do NOT parse this from the Name unless
|
||||
// you set InsecurePassword to true.
|
||||
// XXX: Use mgmt magic credentials in the future.
|
||||
Password string `lang:"password" yaml:"password"`
|
||||
|
||||
// InsecurePassword can be set to true to allow a password in the Name.
|
||||
InsecurePassword bool `lang:"insecure_password" yaml:"insecure_password"`
|
||||
|
||||
// Driver to use, such as: "gofish" or "rpc". This is a different
|
||||
// concept than the "bmclib" driver vs provider distinction. Here we
|
||||
// just statically pick what we're using without any magic. If not
|
||||
// specified, we parse this from the Name scheme. If this ends with an
|
||||
// extra "s" then we use https instead of http.
|
||||
Driver string `lang:"driver" yaml:"driver"`
|
||||
|
||||
// State of machine power. Can be "on" or "off".
|
||||
State string `lang:"state" yaml:"state"`
|
||||
|
||||
driver string
|
||||
scheme string
|
||||
}
|
||||
|
||||
// validDriver determines if we are using a valid drive. This does not include
|
||||
// the magic "s" bits. This function need to be expanded as we support more
|
||||
// drivers.
|
||||
func (obj *BmcPowerRes) validDriver(driver string) error {
|
||||
if driver == BmcDriverRPC {
|
||||
return nil
|
||||
}
|
||||
if driver == BmcDriverGofish {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("unknown driver: %s", driver)
|
||||
}
|
||||
|
||||
// getHostname returns the hostname that we want to connect to. If the Hostname
|
||||
// field is set, we use that, otherwise we parse from the Name.
|
||||
func (obj *BmcPowerRes) getHostname() string {
|
||||
if obj.Hostname != "" {
|
||||
return obj.Hostname
|
||||
}
|
||||
|
||||
u, err := url.Parse(obj.Name())
|
||||
if err != nil || u == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
// SplitHostPort splits a network address of the form "host:port",
|
||||
// "host%zone:port", "[host]:port" or "[host%zone]:port" into host or
|
||||
// host%zone and port.
|
||||
host, port, err := net.SplitHostPort(u.Host)
|
||||
if err != nil {
|
||||
return u.Host // must be a naked hostname or ip w/o port
|
||||
}
|
||||
_ = port
|
||||
|
||||
return host
|
||||
}
|
||||
|
||||
// getPort returns the port that we want to connect to. If the Port field is
|
||||
// set, we use that, otherwise we parse from the Name.
|
||||
//
|
||||
// NOTE: We return a string since all the bmclib things usually expect a string,
|
||||
// but if that gets fixed we should return an int here instead.
|
||||
func (obj *BmcPowerRes) getPort() string {
|
||||
if obj.Port != 0 {
|
||||
return strconv.Itoa(obj.Port)
|
||||
}
|
||||
|
||||
u, err := url.Parse(obj.Name())
|
||||
if err != nil || u == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
// SplitHostPort splits a network address of the form "host:port",
|
||||
// "host%zone:port", "[host]:port" or "[host%zone]:port" into host or
|
||||
// host%zone and port.
|
||||
host, port, err := net.SplitHostPort(u.Host)
|
||||
if err != nil {
|
||||
return strconv.Itoa(DefaultBmcPowerPort) // default port
|
||||
}
|
||||
_ = host
|
||||
|
||||
return port
|
||||
}
|
||||
|
||||
// getUsername returns the username that we want to connect with. If the
|
||||
// Username field is set, we use that, otherwise we parse from the Name.
|
||||
// TODO: If the Username field is not set, should we parse from the Name? It's
|
||||
// not really part of the BMC unique identifier so maybe we shouldn't use that.
|
||||
func (obj *BmcPowerRes) getUsername() string {
|
||||
if obj.Username != "" {
|
||||
return obj.Username
|
||||
}
|
||||
|
||||
u, err := url.Parse(obj.Name())
|
||||
if err != nil || u == nil || u.User == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return u.User.Username()
|
||||
}
|
||||
|
||||
// getPassword returns the password that we want to connect with.
|
||||
// XXX: Use mgmt magic credentials in the future.
|
||||
func (obj *BmcPowerRes) getPassword() string {
|
||||
if obj.Password != "" || !obj.InsecurePassword {
|
||||
return obj.Password
|
||||
}
|
||||
// NOTE: We don't look at any password string from the name unless the
|
||||
// InsecurePassword field is true.
|
||||
|
||||
u, err := url.Parse(obj.Name())
|
||||
if err != nil || u == nil || u.User == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
password, ok := u.User.Password()
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
|
||||
return password
|
||||
}
|
||||
|
||||
// getRawDriver returns the raw magic driver string. If the Driver field is set,
|
||||
// we use that, otherwise we parse from the Name. This version may include the
|
||||
// magic "s" at the end.
|
||||
func (obj *BmcPowerRes) getRawDriver() string {
|
||||
if obj.Driver != "" {
|
||||
return obj.Driver
|
||||
}
|
||||
|
||||
u, err := url.Parse(obj.Name())
|
||||
if err != nil || u == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return u.Scheme
|
||||
}
|
||||
|
||||
// getDriverAndScheme figures out which driver and scheme we want to use.
|
||||
func (obj *BmcPowerRes) getDriverAndScheme() (string, string, error) {
|
||||
driver := obj.getRawDriver()
|
||||
err := obj.validDriver(driver)
|
||||
if err == nil {
|
||||
return driver, "http", nil
|
||||
}
|
||||
|
||||
driver = strings.TrimSuffix(driver, BmcDriverSecureSuffix)
|
||||
if err := obj.validDriver(driver); err == nil {
|
||||
return driver, "https", nil
|
||||
}
|
||||
|
||||
return "", "", err // return the first error
|
||||
}
|
||||
|
||||
// getDriver returns the actual driver that we want to connect with. If the
|
||||
// Driver field is set, we use that, otherwise we parse from the Name. This
|
||||
// version does NOT include the magic "s" at the end.
|
||||
func (obj *BmcPowerRes) getDriver() string {
|
||||
return obj.driver
|
||||
}
|
||||
|
||||
// getScheme figures out which scheme we want to use.
|
||||
func (obj *BmcPowerRes) getScheme() string {
|
||||
return obj.scheme
|
||||
}
|
||||
|
||||
// Default returns some sensible defaults for this resource.
|
||||
func (obj *BmcPowerRes) Default() engine.Res {
|
||||
return &BmcPowerRes{}
|
||||
}
|
||||
|
||||
// Validate if the params passed in are valid data.
|
||||
func (obj *BmcPowerRes) Validate() error {
|
||||
// XXX: Force polling until we have real events...
|
||||
if obj.MetaParams().Poll == 0 {
|
||||
return fmt.Errorf("events are not yet supported, use polling")
|
||||
}
|
||||
|
||||
if obj.getHostname() == "" {
|
||||
return fmt.Errorf("need a Hostname")
|
||||
}
|
||||
//if obj.getUsername() == "" {
|
||||
// return fmt.Errorf("need a Username")
|
||||
//}
|
||||
|
||||
if obj.getRawDriver() == "" {
|
||||
return fmt.Errorf("need a Driver")
|
||||
}
|
||||
if _, _, err := obj.getDriverAndScheme(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Init runs some startup code for this resource.
|
||||
func (obj *BmcPowerRes) Init(init *engine.Init) error {
|
||||
obj.init = init // save for later
|
||||
|
||||
driver, scheme, err := obj.getDriverAndScheme()
|
||||
if err != nil {
|
||||
// programming error (we checked in Validate)
|
||||
return err
|
||||
}
|
||||
obj.driver = driver
|
||||
obj.scheme = scheme
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cleanup is run by the engine to clean up after the resource is done.
|
||||
func (obj *BmcPowerRes) Cleanup() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// client builds the bmclib client. The API to build it is complicated.
|
||||
func (obj *BmcPowerRes) client() *bmclib.Client {
|
||||
// NOTE: The bmclib API is weird, you can't put the port in this string!
|
||||
u := fmt.Sprintf("%s://%s", obj.getScheme(), obj.getHostname())
|
||||
|
||||
uPort := u
|
||||
if p := obj.getPort(); p != "" {
|
||||
uPort = u + ":" + p
|
||||
}
|
||||
|
||||
opts := []bmclib.Option{}
|
||||
|
||||
if obj.getDriver() == BmcDriverRPC {
|
||||
opts = append(opts, bmclib.WithRPCOpt(rpc.Provider{
|
||||
// NOTE: The main API cannot take a port, but here we do!
|
||||
ConsumerURL: uPort,
|
||||
}))
|
||||
}
|
||||
|
||||
if p := obj.getPort(); p != "" {
|
||||
switch obj.getDriver() {
|
||||
case BmcDriverRPC:
|
||||
// TODO: ???
|
||||
|
||||
case BmcDriverGofish:
|
||||
// XXX: Why doesn't this accept an int?
|
||||
opts = append(opts, bmclib.WithRedfishPort(p))
|
||||
|
||||
//case BmcDriverOpenbmc:
|
||||
// // XXX: Why doesn't this accept an int?
|
||||
// opts = append(opts, openbmc.WithPort(p))
|
||||
|
||||
default:
|
||||
// TODO: error or pass through?
|
||||
obj.init.Logf("unhandled driver: %s", obj.getDriver())
|
||||
}
|
||||
}
|
||||
|
||||
client := bmclib.NewClient(u, obj.getUsername(), obj.Password, opts...)
|
||||
|
||||
if obj.getDriver() != "" && obj.getDriver() != BmcDriverRPC {
|
||||
client = client.For(obj.getDriver()) // limit to this provider
|
||||
}
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
// Watch is the primary listener for this resource and it outputs events.
|
||||
func (obj *BmcPowerRes) Watch(ctx context.Context) error {
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
select {
|
||||
case <-ctx.Done(): // closed by the engine to signal shutdown
|
||||
}
|
||||
|
||||
//obj.init.Event() // notify engine of an event (this can block)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckApply method for BmcPower resource. Does nothing, returns happy!
|
||||
func (obj *BmcPowerRes) CheckApply(ctx context.Context, apply bool) (bool, error) {
|
||||
|
||||
client := obj.client()
|
||||
|
||||
if err := client.Open(ctx); err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer client.Close(ctx) // (err error)
|
||||
|
||||
if obj.init.Debug {
|
||||
obj.init.Logf("connected ok")
|
||||
}
|
||||
|
||||
state, err := client.GetPowerState(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
state = strings.ToLower(state) // normalize
|
||||
obj.init.Logf("get state: %s", state)
|
||||
|
||||
if !apply {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if obj.State == state {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// TODO: should this be "On" and "Off"? Does case matter?
|
||||
ok, err := client.SetPowerState(ctx, obj.State)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !ok {
|
||||
// TODO: When is this ever false?
|
||||
}
|
||||
obj.init.Logf("set state: %s", obj.State)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Cmp compares two resources and returns an error if they are not equivalent.
|
||||
func (obj *BmcPowerRes) Cmp(r engine.Res) error {
|
||||
// we can only compare BmcPowerRes to others of the same resource kind
|
||||
res, ok := r.(*BmcPowerRes)
|
||||
if !ok {
|
||||
return fmt.Errorf("not a %s", obj.Kind())
|
||||
}
|
||||
|
||||
if obj.Hostname != res.Hostname {
|
||||
return fmt.Errorf("the Hostname differs")
|
||||
}
|
||||
if obj.Port != res.Port {
|
||||
return fmt.Errorf("the Port differs")
|
||||
}
|
||||
if obj.Username != res.Username {
|
||||
return fmt.Errorf("the Username differs")
|
||||
}
|
||||
if obj.Password != res.Password {
|
||||
return fmt.Errorf("the Password differs")
|
||||
}
|
||||
if obj.InsecurePassword != res.InsecurePassword {
|
||||
return fmt.Errorf("the InsecurePassword differs")
|
||||
}
|
||||
|
||||
if obj.Driver != res.Driver {
|
||||
return fmt.Errorf("the Driver differs")
|
||||
}
|
||||
if obj.State != res.State {
|
||||
return fmt.Errorf("the State differs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalYAML is the custom unmarshal handler for this struct. It is
|
||||
// primarily useful for setting the defaults.
|
||||
func (obj *BmcPowerRes) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
type rawRes BmcPowerRes // indirection to avoid infinite recursion
|
||||
|
||||
def := obj.Default() // get the default
|
||||
res, ok := def.(*BmcPowerRes) // put in the right format
|
||||
if !ok {
|
||||
return fmt.Errorf("could not convert to BmcPowerRes")
|
||||
}
|
||||
raw := rawRes(*res) // convert; the defaults go here
|
||||
|
||||
if err := unmarshal(&raw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*obj = BmcPowerRes(raw) // restore from indirection with type conversion!
|
||||
return nil
|
||||
}
|
||||
512
engine/resources/cloudflare_dns.go
Normal file
512
engine/resources/cloudflare_dns.go
Normal file
@@ -0,0 +1,512 @@
|
||||
// Mgmt
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
//
|
||||
// Additional permission under GNU GPL version 3 section 7
|
||||
//
|
||||
// If you modify this program, or any covered work, by linking or combining it
|
||||
// with embedded mcl code and modules (and that the embedded mcl code and
|
||||
// modules which link with this program, contain a copy of their source code in
|
||||
// the authoritative form) containing parts covered by the terms of any other
|
||||
// license, the licensors of this program grant you additional permission to
|
||||
// convey the resulting work. Furthermore, the licensors of this program grant
|
||||
// the original author, James Shubin, additional permission to update this
|
||||
// additional permission if he deems it necessary to achieve the goals of this
|
||||
// additional permission.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/purpleidea/mgmt/engine"
|
||||
"github.com/purpleidea/mgmt/engine/traits"
|
||||
"github.com/purpleidea/mgmt/util/errwrap"
|
||||
|
||||
"github.com/cloudflare/cloudflare-go/v6"
|
||||
"github.com/cloudflare/cloudflare-go/v6/zones"
|
||||
)
|
||||
|
||||
func init() {
|
||||
engine.RegisterResource("cloudflare:dns", func() engine.Res { return &CloudflareDNSRes{} })
|
||||
}
|
||||
|
||||
// TODO: description of cloudflare_dns resource
|
||||
type CloudflareDNSRes struct {
|
||||
traits.Base
|
||||
init *engine.Init
|
||||
|
||||
APIToken string `lang:"apitoken"`
|
||||
|
||||
Comment string `lang:"comment"`
|
||||
|
||||
Content string `lang:"content"`
|
||||
|
||||
// using a *int64 here to help with disambiguating nil values
|
||||
Priority *int64 `lang:"priority"`
|
||||
|
||||
// using a *bool here to help with disambiguating nil values
|
||||
Proxied *bool `lang:"proxied"`
|
||||
|
||||
Purged bool `lang:"purged"`
|
||||
|
||||
RecordName string `lang:"record_name"`
|
||||
|
||||
State string `lang:"state"`
|
||||
|
||||
TTL int64 `lang:"ttl"`
|
||||
|
||||
Type string `lang:"type"`
|
||||
|
||||
Zone string `lang:"zone"`
|
||||
|
||||
client *cloudflare.Client
|
||||
zoneID string
|
||||
}
|
||||
|
||||
func (obj *CloudflareDNSRes) Default() engine.Res {
|
||||
return &CloudflareDNSRes{
|
||||
State: "exists",
|
||||
TTL: 1, // this sets TTL to automatic
|
||||
}
|
||||
}
|
||||
|
||||
func (obj *CloudflareDNSRes) Validate() error {
|
||||
if obj.RecordName == "" {
|
||||
return fmt.Errorf("record name is required")
|
||||
}
|
||||
|
||||
if obj.APIToken == "" {
|
||||
return fmt.Errorf("API token is required")
|
||||
}
|
||||
|
||||
if obj.Type == "" {
|
||||
return fmt.Errorf("record type is required")
|
||||
}
|
||||
|
||||
if (obj.TTL < 60 || obj.TTL > 86400) && obj.TTL != 1 { // API requirement
|
||||
return fmt.Errorf("TTL must be between 60s and 86400s, or set to 1")
|
||||
}
|
||||
|
||||
if obj.Zone == "" {
|
||||
return fmt.Errorf("zone name is required")
|
||||
}
|
||||
|
||||
if obj.State != "exists" && obj.State != "absent" && obj.State != "" {
|
||||
return fmt.Errorf("state must be either 'exists', 'absent', or empty")
|
||||
}
|
||||
|
||||
if obj.State == "exists" && obj.Content == "" && !obj.Purge {
|
||||
return fmt.Errorf("content is required when state is 'exists'")
|
||||
}
|
||||
|
||||
if obj.MetaParams().Poll == 0 {
|
||||
return fmt.Errorf("cloudflare:dns requiers polling, set Meta:poll param (e.g., 60 seconds)")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (obj *CloudflareDNSRes) Init(init *engine.Init) error {
|
||||
obj.init = init
|
||||
|
||||
obj.client = cloudflare.NewClient(
|
||||
option.WithAPIToken(obj.APIToken),
|
||||
)
|
||||
|
||||
//TODO: does it make more sense to check it here or in CheckApply()?
|
||||
//zoneListParams := zones.ZoneListParams{
|
||||
// name: cloudflare.F(obj.Zone),
|
||||
//}
|
||||
|
||||
//zoneList, err := obj.client.Zones.List(context.Background(), zoneListParams)
|
||||
//if err != nil {
|
||||
// return errwrap.Wrapf(err, "failed to list zones")
|
||||
//}
|
||||
|
||||
//if len(zoneList.Result) == 0 {
|
||||
// return fmt.Errorf("zone %s not found", obj.Zone)
|
||||
//}
|
||||
|
||||
obj.zoneID = zoneList.Results[0].ID
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (obj *CloudflareDNSRes) Cleanup() error {
|
||||
obj.APIToken = ""
|
||||
obj.client = nil
|
||||
obj.zoneID = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
// Watch isn't implemented for this resource, since the Cloudflare API does not
|
||||
// provide any event stream. Instead, always use polling.
|
||||
func (obj *CloudflareDNSRes) Watch(context.Context) error {
|
||||
return fmt.Errorf("invalid Watch call: requires poll metaparam")
|
||||
}
|
||||
|
||||
func (obj *CloudflareDNSRes) CheckApply(ctx context.Context, apply bool) (bool, error) {
|
||||
zone, err := obj.client.Zones.List(ctx, zones.ZoneListParams{
|
||||
RecordName: cloudflare.F(obj.Zone),
|
||||
})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf(err)
|
||||
}
|
||||
|
||||
if len(zone.Result) == 0 {
|
||||
return false, fmt.Errorf("there's no zone registered with name %s", obj.Zone)
|
||||
}
|
||||
|
||||
if len(zone.Result) > 1 {
|
||||
return false, fmt.Errorf("there's more than one zone with name %s", obj.Zone)
|
||||
}
|
||||
|
||||
// We start by checking the need for purging
|
||||
if obj.Purge {
|
||||
checkOK, err := obj.purgeCheckApply(ctx, apply)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !checkOK {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// List existing records
|
||||
listParams := dns.RecordListParams{
|
||||
ZoneID: cloudflare.F(obj.zoneID),
|
||||
Name: cloudflare.F(obj.RecordName),
|
||||
Type: cloudflare.F(dns.RecordListParamsType(obj.Type)),
|
||||
}
|
||||
|
||||
recordList, err := obj.client.DNS.Records.List(ctx, listParams)
|
||||
if err != nil {
|
||||
return false, errwrap.Wrapf(err, "failed to list DNS records")
|
||||
}
|
||||
|
||||
recordExists := len(records.Result) > 0
|
||||
var record dns.Record
|
||||
if recordExists {
|
||||
record = recordList.Result[0]
|
||||
}
|
||||
|
||||
switch obj.State {
|
||||
case "exists", "":
|
||||
if !recordExists {
|
||||
if !apply {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if err := obj.createRecord(ctx); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if obj.needsUpdate(record) {
|
||||
if !apply {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if err := obj.updateRecord(ctx, record.ID); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
case "absent":
|
||||
if recordExists {
|
||||
if !apply {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
deleteParams := dns.RecordDeleteParams{
|
||||
ZoneID: cloudflare.F(obj.zoneID),
|
||||
}
|
||||
|
||||
_, err := obj.client.DNS.Reords.Delete(ctx, record.ID, deleteParams)
|
||||
if err != nil {
|
||||
return false, errwrap.Wrapf(err, "failed to delete DNS record")
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (obj *CloudflareDNSRes) Cmp(r engine.Res) error {
|
||||
if obj == nil && r == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if (obj == nil) != (r == nil) {
|
||||
return fmt.Errorf("one resource is empty")
|
||||
}
|
||||
|
||||
res, ok := r.(*CloudflareDNSRes)
|
||||
if !ok {
|
||||
return fmt.Errorf("not a %s", obj.Kind())
|
||||
}
|
||||
|
||||
if obj.APIToken != res.APIToken {
|
||||
return fmt.Errorf("apitoken differs")
|
||||
}
|
||||
|
||||
// check how this being a pointer influences this check
|
||||
if obj.Proxied != res.Proxied {
|
||||
return fmt.Errorf("proxied values differ")
|
||||
}
|
||||
|
||||
if obj.RecordName != res.RecordName {
|
||||
return fmt.Errorf("record name differs")
|
||||
}
|
||||
|
||||
if obj.Purged != res.Purged {
|
||||
return fmt.Errorf("purge value differs")
|
||||
}
|
||||
|
||||
if obj.State != res.State {
|
||||
return fmt.Errorf("state differs")
|
||||
}
|
||||
|
||||
if obj.TTL != res.TTL {
|
||||
return fmt.Errorf("ttl differs")
|
||||
}
|
||||
|
||||
if obj.Type != res.Type {
|
||||
return fmt.Errorf("record type differs")
|
||||
}
|
||||
|
||||
if obj.Zone != res.Zone {
|
||||
return fmt.Errorf("zone differs")
|
||||
}
|
||||
|
||||
if obj.zoneID != res.zoneID {
|
||||
return fmt.Errorf("zoneid differs")
|
||||
}
|
||||
|
||||
if obj.Content != res.Content {
|
||||
return fmt.Errorf("content param differs")
|
||||
}
|
||||
|
||||
// check how this being a pointer influences this check
|
||||
if obj.Priority != res.Priority {
|
||||
return fmt.Errorf("the priority param differs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (obj *CloudflareDNSRes) buildRecordParam() dns.RecordNewParamsBodyUnion {
|
||||
ttl := dns.TTL(obj.TTL)
|
||||
|
||||
switch obj.Type {
|
||||
case "A":
|
||||
param := dns.ARecordParam{
|
||||
Name: cloudflare.F(obj.RecordName),
|
||||
Type: cloudflare.F(dns.ARecordTypeA),
|
||||
Content: cloudflare.F(obj.Content),
|
||||
TTL: cloudflare.F(ttl),
|
||||
}
|
||||
if obj.Proxied != nil {
|
||||
param.Proxied = cloudflare.F(*obj.Proxied)
|
||||
}
|
||||
if obj.Comment != "" {
|
||||
param.Comment = cloudflare.F(obj.Comment)
|
||||
}
|
||||
return param
|
||||
|
||||
case "AAAA":
|
||||
param := dns.AAAARecordParam{
|
||||
Name: cloudflare.F(obj.RecordName),
|
||||
Type: cloudflare.F(dns.AAAARecordTypeAAAA),
|
||||
Content: cloudflare.F(obj.Content),
|
||||
TTL: cloudflare.F(ttl),
|
||||
}
|
||||
if obj.Proxied != nil {
|
||||
param.Proxied = cloudflare.F(*obj.Proxied)
|
||||
}
|
||||
if obj.Comment != "" {
|
||||
param.Comment = cloudflare.F(obj.Comment)
|
||||
}
|
||||
return param
|
||||
|
||||
case "CNAME":
|
||||
param := dns.CNAMERecordParam{
|
||||
Name: cloudflare.F(obj.RecordName),
|
||||
Type: cloudflare.F(dns.CNAMERecordTypeCNAME),
|
||||
Content: cloudflare.F(obj.Content),
|
||||
TTL: cloudflare.F(ttl),
|
||||
}
|
||||
if obj.Proxied != nil {
|
||||
param.Proxied = cloudflare.F(*obj.Proxied)
|
||||
}
|
||||
if obj.Comment != "" {
|
||||
param.Comment = cloudflare.F(obj.Comment)
|
||||
}
|
||||
return param
|
||||
|
||||
case "MX":
|
||||
param := dns.MXRecordParam{
|
||||
Name: cloudflare.F(obj.RecordName),
|
||||
Type: cloudflare.F(dns.MXRecordTypeMX),
|
||||
Content: cloudflare.F(obj.Content),
|
||||
TTL: cloudflare.F(ttl),
|
||||
}
|
||||
if obj.Proxied != nil {
|
||||
param.Proxied = cloudflare.F(*obj.Proxied)
|
||||
}
|
||||
if obj.Priority != nil { // required for MX record
|
||||
param.Priority = cloudflare.F(*obj.Priority)
|
||||
}
|
||||
if obj.Comment != "" {
|
||||
param.Comment = cloudflare.F(obj.Comment)
|
||||
}
|
||||
return param
|
||||
|
||||
case "TXT":
|
||||
param := dns.TXTRecordParam{
|
||||
Name: cloudflare.F(obj.RecordName),
|
||||
Type: cloudflare.F(dns.TXTRecordTypeTXT),
|
||||
Content: cloudflare.F(obj.Content),
|
||||
TTL: cloudflare.F(ttl),
|
||||
}
|
||||
if obj.Proxied != nil {
|
||||
param.Proxied = cloudflare.F(*obj.Proxied)
|
||||
}
|
||||
if obj.Comment != "" {
|
||||
param.Comment = cloudflare.F(obj.Comment)
|
||||
}
|
||||
return param
|
||||
|
||||
case "NS":
|
||||
param := dns.NSRecordParam{
|
||||
Name: cloudflare.F(obj.RecordName),
|
||||
Type: cloudflare.F(dns.NSRecordTypeNS),
|
||||
Content: cloudflare.F(obj.Content),
|
||||
TTL: cloudflare.F(ttl),
|
||||
}
|
||||
if obj.Proxied != nil {
|
||||
param.Proxied = cloudflare.F(*obj.Proxied)
|
||||
}
|
||||
if obj.Comment != "" {
|
||||
param.Comment = cloudflare.F(obj.Comment)
|
||||
}
|
||||
return param
|
||||
|
||||
case "SRV":
|
||||
param := dns.SRVRecordParam{
|
||||
Name: cloudflare.F(obj.RecordName),
|
||||
Type: cloudflare.F(dns.SRVRecordTypeSRV),
|
||||
Content: cloudflare.F(obj.Content),
|
||||
TTL: cloudflare.F(ttl),
|
||||
}
|
||||
if obj.Proxied != nil {
|
||||
param.Proxied = cloudflare.F(*obj.Proxied)
|
||||
}
|
||||
if obj.Priority != nil {
|
||||
param.Priority = cloudflare.F(*obj.Priority)
|
||||
}
|
||||
if obj.Comment != "" {
|
||||
param.Comment = cloudflare.F(obj.Comment)
|
||||
}
|
||||
return param
|
||||
|
||||
case "PTR":
|
||||
param := dns.PTRRecordParam{
|
||||
Name: cloudflare.F(obj.RecordName),
|
||||
Type: cloudflare.F(dns.PTRRecordTypePTR),
|
||||
Content: cloudflare.F(obj.Content),
|
||||
TTL: cloudflare.F(ttl),
|
||||
}
|
||||
if obj.Proxied != nil {
|
||||
param.Proxied = cloudflare.F(*obj.Proxied)
|
||||
}
|
||||
if obj.Comment != "" {
|
||||
param.Comment = cloudflare.F(obj.Comment)
|
||||
}
|
||||
return param
|
||||
|
||||
default: // we should return something else here, need to investigate
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (obj *CloudflareDNSRes) createRecord(ctx context.Context) error {
|
||||
recordParams := obj.buildRecordParam()
|
||||
|
||||
createParams := dns.RecordNewParams{
|
||||
ZoneID: cloudflare.F(obj.zoneID),
|
||||
Body: recordParams,
|
||||
}
|
||||
|
||||
_, err := obj.client.DNS.Records.New(ctx, createParams)
|
||||
if err != nil {
|
||||
return errwrap.Wrapf(err, "failed to create dns record")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (obj *CloudflareDNSRes) updateRecord(ctx context.Context, recordID string) error {
|
||||
recordParams := obj.buildRecordParam()
|
||||
|
||||
editParams := dns.RecordEditParams{
|
||||
ZoneID: cloudflare.F(obj.zoneID),
|
||||
Body: recordParams,
|
||||
}
|
||||
|
||||
_, err := obj.client.DNS.Records.Edit(ctx, recordID, editParams)
|
||||
if err != nil {
|
||||
return errwrap.Wrapf(err, "failed to update dns record")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (obj *CloudflareDNSRes) needsUpdate(record dns.Record) bool {
|
||||
if obj.Content != record.Content {
|
||||
return true
|
||||
}
|
||||
|
||||
if obj.TTL != int64(record.TTL) {
|
||||
return true
|
||||
}
|
||||
|
||||
if obj.Proxied != nil && record.Proxied != nil {
|
||||
if *obj.Proxied != *record.Proxied {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if obj.Priority != nil && record.Priority != nil {
|
||||
if *obj.Priority != *record.Priority {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if obj.Comment != record.Comment {
|
||||
return true
|
||||
}
|
||||
|
||||
// TODO add more checks?
|
||||
|
||||
return false
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -65,6 +65,8 @@ type ConfigEtcdRes struct {
|
||||
// IdealClusterSize to zero.
|
||||
AllowSizeShutdown bool `lang:"allow_size_shutdown"`
|
||||
|
||||
world engine.EtcdWorld
|
||||
|
||||
// sizeFlag determines whether sizeCheckApply already ran or not.
|
||||
sizeFlag bool
|
||||
|
||||
@@ -93,6 +95,12 @@ func (obj *ConfigEtcdRes) Validate() error {
|
||||
func (obj *ConfigEtcdRes) Init(init *engine.Init) error {
|
||||
obj.init = init // save for later
|
||||
|
||||
world, ok := obj.init.World.(engine.EtcdWorld)
|
||||
if !ok {
|
||||
return fmt.Errorf("world backend does not support the EtcdWorld interface")
|
||||
}
|
||||
obj.world = world
|
||||
|
||||
obj.interruptChan = make(chan struct{})
|
||||
|
||||
return nil
|
||||
@@ -109,7 +117,7 @@ func (obj *ConfigEtcdRes) Watch(ctx context.Context) error {
|
||||
defer wg.Wait()
|
||||
innerCtx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
ch, err := obj.init.World.IdealClusterSizeWatch(util.CtxWithWg(innerCtx, wg))
|
||||
ch, err := obj.world.IdealClusterSizeWatch(util.CtxWithWg(innerCtx, wg))
|
||||
if err != nil {
|
||||
return errwrap.Wrapf(err, "could not watch ideal cluster size")
|
||||
}
|
||||
@@ -158,7 +166,7 @@ func (obj *ConfigEtcdRes) sizeCheckApply(ctx context.Context, apply bool) (bool,
|
||||
}
|
||||
}()
|
||||
|
||||
val, err := obj.init.World.IdealClusterSizeGet(ctx)
|
||||
val, err := obj.world.IdealClusterSizeGet(ctx)
|
||||
if err != nil {
|
||||
return false, errwrap.Wrapf(err, "could not get ideal cluster size")
|
||||
}
|
||||
@@ -181,7 +189,7 @@ func (obj *ConfigEtcdRes) sizeCheckApply(ctx context.Context, apply bool) (bool,
|
||||
|
||||
// set!
|
||||
// This is run as a transaction so we detect if we needed to change it.
|
||||
changed, err := obj.init.World.IdealClusterSizeSet(ctx, obj.IdealClusterSize)
|
||||
changed, err := obj.world.IdealClusterSizeSet(ctx, obj.IdealClusterSize)
|
||||
if err != nil {
|
||||
return false, errwrap.Wrapf(err, "could not set ideal cluster size")
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -27,6 +27,8 @@
|
||||
// additional permission if he deems it necessary to achieve the goals of this
|
||||
// additional permission.
|
||||
|
||||
//go:build !noconsul
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -27,6 +27,8 @@
|
||||
// additional permission if he deems it necessary to achieve the goals of this
|
||||
// additional permission.
|
||||
|
||||
//go:build !root || !noconsul
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -36,7 +36,6 @@ import (
|
||||
"os/user"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/purpleidea/mgmt/engine"
|
||||
"github.com/purpleidea/mgmt/engine/traits"
|
||||
@@ -76,10 +75,6 @@ const (
|
||||
// in 'man systemd-timer', and whose format is a time span as defined in
|
||||
// 'man systemd-time'.
|
||||
OnUnitInactiveSec = "OnUnitInactiveSec"
|
||||
|
||||
// ctxTimeout is the delay, in seconds, before the calls to restart or stop
|
||||
// the systemd unit will error due to timeout.
|
||||
ctxTimeout = 30
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -104,6 +99,11 @@ type CronRes struct {
|
||||
// State must be 'exists' or 'absent'.
|
||||
State string `lang:"state" yaml:"state"`
|
||||
|
||||
// Startup specifies what should happen on startup. Values can be:
|
||||
// enabled, disabled, and undefined (empty string). We default to
|
||||
// enabled.
|
||||
Startup string `lang:"startup" yaml:"startup"`
|
||||
|
||||
// Session, if true, creates the timer as the current user, rather than
|
||||
// root. The service it points to must also be a user unit. It defaults
|
||||
// to false.
|
||||
@@ -142,7 +142,7 @@ type CronRes struct {
|
||||
WakeSystem bool `lang:"wakesystem" yaml:"wakesystem"`
|
||||
|
||||
// RemainAfterElapse, if true, means an elapsed timer will stay loaded,
|
||||
// and its state remains queriable. If false, an elapsed timer unit that
|
||||
// and its state remains queryable. If false, an elapsed timer unit that
|
||||
// cannot elapse anymore is unloaded. It defaults to true.
|
||||
RemainAfterElapse bool `lang:"remainafterelapse" yaml:"remainafterelapse"`
|
||||
|
||||
@@ -154,6 +154,7 @@ type CronRes struct {
|
||||
func (obj *CronRes) Default() engine.Res {
|
||||
return &CronRes{
|
||||
State: "exists",
|
||||
Startup: "enabled",
|
||||
RemainAfterElapse: true,
|
||||
}
|
||||
}
|
||||
@@ -188,6 +189,9 @@ func (obj *CronRes) Validate() error {
|
||||
if obj.State != "absent" && obj.State != "exists" {
|
||||
return fmt.Errorf("state must be 'absent' or 'exists'")
|
||||
}
|
||||
if obj.Startup != "enabled" && obj.Startup != "disabled" && obj.Startup != "" {
|
||||
return fmt.Errorf("startup must be either `enabled` or `disabled` or undefined")
|
||||
}
|
||||
|
||||
// validate trigger
|
||||
if obj.State == "absent" && obj.Trigger == "" {
|
||||
@@ -264,12 +268,12 @@ func (obj *CronRes) Watch(ctx context.Context) error {
|
||||
args := []string{}
|
||||
args = append(args, "type='signal'")
|
||||
args = append(args, "interface='org.freedesktop.systemd1.Manager'")
|
||||
args = append(args, "eavesdrop='true'")
|
||||
//args = append(args, "eavesdrop='true'") // XXX: not allowed anymore?
|
||||
args = append(args, fmt.Sprintf("arg2='%s.timer'", obj.Name()))
|
||||
|
||||
// match dbus messsages
|
||||
// match dbus messages
|
||||
if call := bus.BusObject().Call(engineUtil.DBusAddMatch, 0, strings.Join(args, ",")); call.Err != nil {
|
||||
return err
|
||||
return call.Err
|
||||
}
|
||||
defer bus.BusObject().Call(engineUtil.DBusRemoveMatch, 0, args) // ignore the error
|
||||
|
||||
@@ -292,7 +296,6 @@ func (obj *CronRes) Watch(ctx context.Context) error {
|
||||
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
var send = false // send event?
|
||||
for {
|
||||
select {
|
||||
case event := <-dbusChan:
|
||||
@@ -300,7 +303,6 @@ func (obj *CronRes) Watch(ctx context.Context) error {
|
||||
if obj.init.Debug {
|
||||
obj.init.Logf("%+v", event)
|
||||
}
|
||||
send = true
|
||||
|
||||
case event, ok := <-obj.recWatcher.Events():
|
||||
// process unit file recwatch events
|
||||
@@ -313,16 +315,12 @@ func (obj *CronRes) Watch(ctx context.Context) error {
|
||||
if obj.init.Debug {
|
||||
obj.init.Logf("Event(%s): %v", event.Body.Name, event.Body.Op)
|
||||
}
|
||||
send = true
|
||||
|
||||
case <-ctx.Done(): // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -390,14 +388,10 @@ func (obj *CronRes) unitCheckApply(ctx context.Context, apply bool) (bool, error
|
||||
}
|
||||
|
||||
// systemctl daemon-reload
|
||||
if err := conn.Reload(); err != nil {
|
||||
if err := conn.ReloadContext(ctx); err != nil {
|
||||
return false, errwrap.Wrapf(err, "error reloading daemon")
|
||||
}
|
||||
|
||||
// context for stopping/restarting the unit
|
||||
ctx, cancel := context.WithTimeout(ctx, ctxTimeout*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// godbus connection for stopping/restarting the unit
|
||||
if obj.Session {
|
||||
godbusConn, err = util.SessionBusPrivateUsable()
|
||||
@@ -409,6 +403,18 @@ func (obj *CronRes) unitCheckApply(ctx context.Context, apply bool) (bool, error
|
||||
}
|
||||
defer godbusConn.Close()
|
||||
|
||||
// We probably always want to enable this...
|
||||
svc := fmt.Sprintf("%s.timer", obj.Name()) // systemd name
|
||||
files := []string{svc} // the svc represented in a list
|
||||
if obj.Startup == "enabled" {
|
||||
_, _, err = conn.EnableUnitFilesContext(ctx, files, false, true)
|
||||
} else if obj.Startup == "disabled" {
|
||||
_, err = conn.DisableUnitFilesContext(ctx, files, false)
|
||||
}
|
||||
if err != nil {
|
||||
return false, errwrap.Wrapf(err, "unable to change startup status")
|
||||
}
|
||||
|
||||
// stop or restart the unit
|
||||
if obj.State == "absent" {
|
||||
return false, engineUtil.StopUnit(ctx, godbusConn, fmt.Sprintf("%s.timer", obj.Name()))
|
||||
@@ -426,6 +432,9 @@ func (obj *CronRes) Cmp(r engine.Res) error {
|
||||
if obj.State != res.State {
|
||||
return fmt.Errorf("state differs: %s vs %s", obj.State, res.State)
|
||||
}
|
||||
if obj.Startup != res.Startup {
|
||||
return fmt.Errorf("the Startup differs")
|
||||
}
|
||||
if obj.Trigger != res.Trigger {
|
||||
return fmt.Errorf("trigger differs: %s vs %s", obj.Trigger, res.Trigger)
|
||||
}
|
||||
|
||||
522
engine/resources/deploy_tar.go
Normal file
522
engine/resources/deploy_tar.go
Normal file
@@ -0,0 +1,522 @@
|
||||
// Mgmt
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
//
|
||||
// Additional permission under GNU GPL version 3 section 7
|
||||
//
|
||||
// If you modify this program, or any covered work, by linking or combining it
|
||||
// with embedded mcl code and modules (and that the embedded mcl code and
|
||||
// modules which link with this program, contain a copy of their source code in
|
||||
// the authoritative form) containing parts covered by the terms of any other
|
||||
// license, the licensors of this program grant you additional permission to
|
||||
// convey the resulting work. Furthermore, the licensors of this program grant
|
||||
// the original author, James Shubin, additional permission to update this
|
||||
// additional permission if he deems it necessary to achieve the goals of this
|
||||
// additional permission.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/purpleidea/mgmt/engine"
|
||||
"github.com/purpleidea/mgmt/engine/traits"
|
||||
"github.com/purpleidea/mgmt/util/errwrap"
|
||||
"github.com/purpleidea/mgmt/util/recwatch"
|
||||
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
func init() {
|
||||
engine.RegisterResource("deploy:tar", func() engine.Res { return &DeployTar{} })
|
||||
}
|
||||
|
||||
// DeployTar is a resource that archives a deploy filesystem using tar, thus
|
||||
// combining them into a single file. The name of the resource is the path to
|
||||
// the resultant archive file. The input comes from the current deploy. This
|
||||
// uses hashes to determine if something was changed, so as a result, this may
|
||||
// not be suitable if you can create a sha256 hash collision.
|
||||
// TODO: support send/recv to send the output instead of writing to a file?
|
||||
// TODO: This resource is very similar to the tar resource. Update that one if
|
||||
// this changes, or consider porting this to use that as a composite resource.
|
||||
// TODO: consider using a `deploy.get_archive()` function to make a .tar, and a
|
||||
// file resource to store those contents on disk with whatever mode we want...
|
||||
type DeployTar struct {
|
||||
traits.Base // add the base methods without re-implementation
|
||||
|
||||
init *engine.Init
|
||||
|
||||
// Path, which defaults to the name if not specified, represents the
|
||||
// destination path for the compressed file being created. It must be an
|
||||
// absolute path, and as a result must start with a slash. Since it is a
|
||||
// file, it must not end with a slash.
|
||||
Path string `lang:"path" yaml:"path"`
|
||||
|
||||
// Format is the header format to use. If you change this, then the
|
||||
// file will get rearchived. The strange thing is that it seems the
|
||||
// header format is stored for each individual file. The available
|
||||
// values are: const.res.tar.format.unknown, const.res.tar.format.ustar,
|
||||
// const.res.tar.format.pax, and const.res.tar.format.gnu which have
|
||||
// values of 0, 2, 4, and 8 respectively.
|
||||
Format int `lang:"format" yaml:"format"`
|
||||
|
||||
// SendOnly specifies that we don't write the file to disk, and as a
|
||||
// result, the output is only be accessible by the send/recv mechanism.
|
||||
// TODO: Rename this?
|
||||
// TODO: Not implemented
|
||||
//SendOnly bool `lang:"sendonly" yaml:"sendonly"`
|
||||
|
||||
// varDirPathInput is the path we use to store the content hash.
|
||||
varDirPathInput string
|
||||
|
||||
// varDirPathOutput is the path we use to store the output file hash.
|
||||
varDirPathOutput string
|
||||
}
|
||||
|
||||
// getPath returns the actual path to use for this resource. It computes this
|
||||
// after analysis of the Path and Name.
|
||||
func (obj *DeployTar) getPath() string {
|
||||
p := obj.Path
|
||||
if obj.Path == "" { // use the name as the path default if missing
|
||||
p = obj.Name()
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// Default returns some sensible defaults for this resource.
|
||||
func (obj *DeployTar) Default() engine.Res {
|
||||
return &DeployTar{
|
||||
Format: int(tar.FormatUnknown), // TODO: will this let it auto-choose?
|
||||
}
|
||||
}
|
||||
|
||||
// Validate if the params passed in are valid data.
|
||||
func (obj *DeployTar) Validate() error {
|
||||
if obj.getPath() == "" {
|
||||
return fmt.Errorf("path is empty")
|
||||
}
|
||||
if !strings.HasPrefix(obj.getPath(), "/") {
|
||||
return fmt.Errorf("path must be absolute")
|
||||
}
|
||||
if strings.HasSuffix(obj.getPath(), "/") {
|
||||
return fmt.Errorf("path must not end with a slash")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Init runs some startup code for this resource.
|
||||
func (obj *DeployTar) Init(init *engine.Init) error {
|
||||
obj.init = init // save for later
|
||||
|
||||
dir, err := obj.init.VarDir("")
|
||||
if err != nil {
|
||||
return errwrap.Wrapf(err, "could not get VarDir in Init()")
|
||||
}
|
||||
// return unique files
|
||||
obj.varDirPathInput = path.Join(dir, "input.sha256")
|
||||
obj.varDirPathOutput = path.Join(dir, "output.sha256")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cleanup is run by the engine to clean up after the resource is done.
|
||||
func (obj *DeployTar) Cleanup() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Watch is the primary listener for this resource and it outputs events.
|
||||
func (obj *DeployTar) Watch(ctx context.Context) error {
|
||||
recurse := false // single (output) file
|
||||
recWatcher, err := recwatch.NewRecWatcher(obj.getPath(), recurse)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer recWatcher.Close()
|
||||
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-recWatcher.Events():
|
||||
if !ok { // channel shutdown
|
||||
// TODO: Should this be an error? Previously it
|
||||
// was a `return nil`, and i'm not sure why...
|
||||
//return nil
|
||||
return fmt.Errorf("unexpected close")
|
||||
}
|
||||
if err := event.Error; err != nil {
|
||||
return errwrap.Wrapf(err, "unknown %s watcher error", obj)
|
||||
}
|
||||
if obj.init.Debug { // don't access event.Body if event.Error isn't nil
|
||||
obj.init.Logf("event(%s): %v", event.Body.Name, event.Body.Op)
|
||||
}
|
||||
|
||||
case <-ctx.Done(): // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
|
||||
// CheckApply checks the resource state and applies the resource if the bool
|
||||
// input is true. It returns error info and if the state check passed or not.
|
||||
// This is where we actually do the archiving into a tar file work when needed.
|
||||
func (obj *DeployTar) CheckApply(ctx context.Context, apply bool) (bool, error) {
|
||||
uri := obj.init.World.URI() // request each time to ensure it's fresh!
|
||||
|
||||
filesystem, err := obj.init.World.Fs(uri) // open the remote file system
|
||||
if err != nil {
|
||||
return false, errwrap.Wrapf(err, "can't load code from file system `%s`", uri)
|
||||
}
|
||||
|
||||
h1, err := obj.hashFile(obj.getPath()) // output
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
h2, err := obj.readHashFile(obj.varDirPathOutput, true)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
i1 := ""
|
||||
i1 = obj.formatPrefix() + "\n" // add the prefix so it is considered
|
||||
|
||||
// TODO: use standard filesystem API's when we can make them work!
|
||||
//fsys := afero.NewIOFS(filesystem)
|
||||
|
||||
if err := afero.Walk(filesystem, "/", func(path string, info fs.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() {
|
||||
if path == "/" { // special case for root
|
||||
i1 += path + "|" + "\n"
|
||||
return nil
|
||||
}
|
||||
// hash the dir itself too (eg: empty dirs!)
|
||||
i1 += path + "/" + "|" + "\n"
|
||||
return nil
|
||||
}
|
||||
|
||||
h, err := obj.hashFileAferoFs(filesystem, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i1 += path + "|" + h + "\n"
|
||||
return nil
|
||||
|
||||
}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
i2, err := obj.readHashFile(obj.varDirPathInput, false)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// We're cheating by computing this before we know if we errored!
|
||||
inputMatches := i1 == i2
|
||||
outputMatches := h1 == h2
|
||||
if err == nil && inputMatches && outputMatches {
|
||||
// If the two hashes match, we assume that the file is correct!
|
||||
// The file has to also exist of course...
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if !apply {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
fail := true // assume we have a failure
|
||||
|
||||
defer func() {
|
||||
if !fail {
|
||||
return
|
||||
}
|
||||
// Don't leave a partial file lying around...
|
||||
obj.init.Logf("removing partial tar file")
|
||||
err := os.Remove(obj.getPath())
|
||||
if err == nil || os.IsNotExist(err) {
|
||||
return
|
||||
}
|
||||
obj.init.Logf("error removing corrupt tar file: %v", err)
|
||||
}()
|
||||
|
||||
// FIXME: Do we instead want to write to a tmp file and do a move once
|
||||
// we finish writing to be atomic here and avoid partial corrupt files?
|
||||
// FIXME: Add a param called Atomic to specify that behaviour. It's
|
||||
// instant so that might be preferred as it might generate fewer events,
|
||||
// but there's a chance it's copying from obj.init.VarDir() to a
|
||||
// different filesystem.
|
||||
outputFile, err := os.Create(obj.getPath()) // io.Writer
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
//defer outputFile.Sync() // not needed?
|
||||
defer outputFile.Close()
|
||||
|
||||
hash := sha256.New()
|
||||
|
||||
// Write to both to avoid needing to wait for fsync to calculate hash!
|
||||
multiWriter := io.MultiWriter(outputFile, hash)
|
||||
|
||||
tarWriter := tar.NewWriter(multiWriter) // (*tar.Writer, error)
|
||||
defer tarWriter.Close() // Might as well always close if we error early!
|
||||
|
||||
// TODO: formerly tarWriter.AddFS(fsys) // buggy!
|
||||
if err := obj.addAferoFs(tarWriter, filesystem); err != nil {
|
||||
return false, errwrap.Wrapf(err, "error writing fs")
|
||||
}
|
||||
|
||||
// NOTE: Must run this before hashing so that it includes the footer!
|
||||
if err := tarWriter.Close(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
sha256sum := hex.EncodeToString(hash.Sum(nil))
|
||||
|
||||
// TODO: add better logging counts if we can see tarWriter.AddFs too!
|
||||
//obj.init.Logf("wrote %d files into archive", ?)
|
||||
obj.init.Logf("wrote tar archive")
|
||||
|
||||
// After tar is successfully written, store the hashed input result.
|
||||
if !inputMatches {
|
||||
if err := os.WriteFile(obj.varDirPathInput, []byte(i1), 0600); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// Also store the new hashed output result.
|
||||
if !outputMatches || h2 == "" { // If missing, we always write it out!
|
||||
if err := os.WriteFile(obj.varDirPathOutput, []byte(sha256sum+"\n"), 0600); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
fail = false // defer can exit safely!
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// formatPrefix is a simple helper to add a format identifier for our hash.
|
||||
func (obj *DeployTar) formatPrefix() string {
|
||||
return fmt.Sprintf("format:%d|%s", obj.Format, tar.Format(obj.Format))
|
||||
}
|
||||
|
||||
// hashContent is a simple helper to run our hashing function.
|
||||
func (obj *DeployTar) hashContent(handle io.Reader) (string, error) {
|
||||
hash := sha256.New()
|
||||
if _, err := io.Copy(hash, handle); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return hex.EncodeToString(hash.Sum(nil)), nil
|
||||
}
|
||||
|
||||
// hashFile is a helper that returns the hash of the specified file. If the file
|
||||
// doesn't exist, it returns the empty string. Otherwise it errors.
|
||||
func (obj *DeployTar) hashFile(file string) (string, error) {
|
||||
f, err := os.Open(file) // io.Reader
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
// This is likely a permissions error.
|
||||
return "", err
|
||||
|
||||
} else if err != nil {
|
||||
return "", nil // File doesn't exist!
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
|
||||
// File exists, lets hash it!
|
||||
|
||||
return obj.hashContent(f)
|
||||
}
|
||||
|
||||
// hashFileAferoFs is a helper that returns the hash of the specified file with
|
||||
// an Afero fs. If the file doesn't exist, it returns the empty string.
|
||||
// Otherwise it errors.
|
||||
func (obj *DeployTar) hashFileAferoFs(fsys afero.Fs, file string) (string, error) {
|
||||
f, err := fsys.Open(file) // io.Reader
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
// This is likely a permissions error.
|
||||
return "", err
|
||||
|
||||
} else if err != nil {
|
||||
return "", nil // File doesn't exist!
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
|
||||
// File exists, lets hash it!
|
||||
|
||||
return obj.hashContent(f)
|
||||
}
|
||||
|
||||
// readHashFile reads the hashed value that we stored for the output file.
|
||||
func (obj *DeployTar) readHashFile(file string, trim bool) (string, error) {
|
||||
// TODO: Use io.ReadFull to avoid reading in a file that's too big!
|
||||
if expected, err := os.ReadFile(file); err != nil && !os.IsNotExist(err) { // ([]byte, error)
|
||||
// This is likely a permissions error?
|
||||
return "", err
|
||||
|
||||
} else if err == nil {
|
||||
if trim {
|
||||
return strings.TrimSpace(string(expected)), nil
|
||||
}
|
||||
return string(expected), nil
|
||||
}
|
||||
|
||||
// File doesn't exist!
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// addFS is an edited copy of archive/tar's *Writer.AddFs function. This version
|
||||
// correctly adds the directories too! https://github.com/golang/go/issues/69459
|
||||
func (obj *DeployTar) addFS(tw *tar.Writer, fsys fs.FS) error {
|
||||
return fs.WalkDir(fsys, ".", func(name string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if name == "." {
|
||||
return nil
|
||||
}
|
||||
info, err := d.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: Handle symlinks when fs.ReadLinkFS is available. (#49580)
|
||||
if !info.Mode().IsRegular() && !info.Mode().IsDir() {
|
||||
return fmt.Errorf("deploy:tar: cannot add non-regular file")
|
||||
}
|
||||
h, err := tar.FileInfoHeader(info, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.Name = name
|
||||
h.Format = tar.Format(obj.Format)
|
||||
if d.IsDir() {
|
||||
h.Name += "/" // dir
|
||||
}
|
||||
|
||||
if err := tw.WriteHeader(h); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if d.IsDir() {
|
||||
return nil // no contents to copy in
|
||||
}
|
||||
|
||||
f, err := fsys.Open(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = io.Copy(tw, f)
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// addAferoFs is an edited copy of archive/tar's *Writer.AddFs function but for
|
||||
// the deprecated Afero.Fs API. This version correctly adds the directories too!
|
||||
// https://github.com/golang/go/issues/69459
|
||||
func (obj *DeployTar) addAferoFs(tw *tar.Writer, fsys afero.Fs) error {
|
||||
return afero.Walk(fsys, "/", func(name string, info fs.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if name == "/" {
|
||||
return nil
|
||||
}
|
||||
// TODO: Handle symlinks when fs.ReadLinkFS is available. (#49580)
|
||||
if !info.Mode().IsRegular() && !info.Mode().IsDir() {
|
||||
return fmt.Errorf("deploy:tar: cannot add non-regular file")
|
||||
}
|
||||
h, err := tar.FileInfoHeader(info, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.Name = name
|
||||
h.Format = tar.Format(obj.Format)
|
||||
if info.IsDir() {
|
||||
h.Name += "/" // dir
|
||||
}
|
||||
|
||||
if err := tw.WriteHeader(h); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
return nil // no contents to copy in
|
||||
}
|
||||
|
||||
f, err := fsys.Open(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = io.Copy(tw, f)
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// Cmp compares two resources and returns an error if they are not equivalent.
|
||||
func (obj *DeployTar) Cmp(r engine.Res) error {
|
||||
// we can only compare DeployTar to others of the same resource kind
|
||||
res, ok := r.(*DeployTar)
|
||||
if !ok {
|
||||
return fmt.Errorf("not a %s", obj.Kind())
|
||||
}
|
||||
|
||||
if obj.Path != res.Path {
|
||||
return fmt.Errorf("the Path differs")
|
||||
}
|
||||
|
||||
if obj.Format != res.Format {
|
||||
return fmt.Errorf("the Format differs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalYAML is the custom unmarshal handler for this struct. It is
|
||||
// primarily useful for setting the defaults.
|
||||
func (obj *DeployTar) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
type rawRes DeployTar // indirection to avoid infinite recursion
|
||||
|
||||
def := obj.Default() // get the default
|
||||
res, ok := def.(*DeployTar) // put in the right format
|
||||
if !ok {
|
||||
return fmt.Errorf("could not convert to DeployTar")
|
||||
}
|
||||
raw := rawRes(*res) // convert; the defaults go here
|
||||
|
||||
if err := unmarshal(&raw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*obj = DeployTar(raw) // restore from indirection with type conversion!
|
||||
return nil
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2024+ James Shubin and the project contributors
|
||||
// Copyright (C) James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -514,7 +514,6 @@ func (obj *DHCPServerRes) Watch(ctx context.Context) error {
|
||||
startupChan := make(chan struct{})
|
||||
close(startupChan) // send one initial signal
|
||||
|
||||
var send = false // send event?
|
||||
for {
|
||||
if obj.init.Debug {
|
||||
obj.init.Logf("Looping...")
|
||||
@@ -523,7 +522,6 @@ func (obj *DHCPServerRes) Watch(ctx context.Context) error {
|
||||
select {
|
||||
case <-startupChan:
|
||||
startupChan = nil
|
||||
send = true
|
||||
|
||||
case <-closeSignal: // something shut us down early
|
||||
return closeError
|
||||
@@ -532,11 +530,7 @@ func (obj *DHCPServerRes) Watch(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -894,6 +888,10 @@ func (obj *DHCPServerRes) handler4() func(net.PacketConn, net.Addr, *dhcpv4.DHCP
|
||||
tmp.UpdateOption(dhcpv4.OptMessageType(dhcpv4.MessageTypeOffer))
|
||||
case dhcpv4.MessageTypeRequest:
|
||||
tmp.UpdateOption(dhcpv4.OptMessageType(dhcpv4.MessageTypeAck))
|
||||
case dhcpv4.MessageTypeDecline:
|
||||
// If mask is not set, some DHCP clients will DECLINE.
|
||||
obj.init.Logf("handler4: Unhandled decline message: %+v", req)
|
||||
return
|
||||
default:
|
||||
obj.init.Logf("handler4: Unhandled message type: %v", mt)
|
||||
return
|
||||
@@ -979,6 +977,7 @@ func (obj *DHCPServerRes) handler4() func(net.PacketConn, net.Addr, *dhcpv4.DHCP
|
||||
|
||||
if resp != nil {
|
||||
if obj.init.Debug {
|
||||
// NOTE: This is very useful for debugging!
|
||||
obj.init.Logf("sending a DHCPv4 packet: %s", resp.Summary())
|
||||
}
|
||||
var peer net.Addr
|
||||
@@ -1251,7 +1250,7 @@ func (obj *DHCPHostRes) handler4(data *HostData) (func(*dhcpv4.DHCPv4, *dhcpv4.D
|
||||
// XXX: https://tools.ietf.org/html/rfc2132#section-3.3
|
||||
// If both the subnet mask and the router option are specified
|
||||
// in a DHCP reply, the subnet mask option MUST be first.
|
||||
// XXX: Should we do this? Does it matter? Does the lib do it?
|
||||
// If mask is not set, some DHCP clients will DECLINE.
|
||||
resp.Options.Update(dhcpv4.OptSubnetMask(obj.ipv4Mask)) // net.IPMask
|
||||
|
||||
// nbp section
|
||||
@@ -1714,7 +1713,7 @@ func (obj *DHCPRangeRes) Init(init *engine.Init) error {
|
||||
|
||||
obj.init.Logf("from: %s", obj.from)
|
||||
obj.init.Logf(" to: %s", obj.to)
|
||||
obj.init.Logf("mask: %s", obj.mask) // TODO: print as cidr or dotted quad
|
||||
obj.init.Logf("mask: %s", netmaskAsQuadString(obj.mask))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1858,7 +1857,7 @@ func (obj *DHCPRangeRes) handler4(data *HostData) (func(*dhcpv4.DHCPv4, *dhcpv4.
|
||||
|
||||
// FIXME: Run this somewhere for now, eventually it should get scheduled
|
||||
// to run in the returned duration of time. This way, it would clean old
|
||||
// peristed entries when they're stale, not when a new request comes in.
|
||||
// persisted entries when they're stale, not when a new request comes in.
|
||||
if _, err := obj.leaseClean(); err != nil {
|
||||
return nil, errwrap.Wrapf(err, "clean error")
|
||||
}
|
||||
@@ -1932,8 +1931,8 @@ func (obj *DHCPRangeRes) handler4(data *HostData) (func(*dhcpv4.DHCPv4, *dhcpv4.
|
||||
// XXX: https://tools.ietf.org/html/rfc2132#section-3.3
|
||||
// If both the subnet mask and the router option are specified
|
||||
// in a DHCP reply, the subnet mask option MUST be first.
|
||||
// XXX: Should we do this? Does it matter? Does the lib do it?
|
||||
//resp.Options.Update(dhcpv4.OptSubnetMask(obj.mask)) // net.IPMask
|
||||
// If mask is not set, some DHCP clients will DECLINE.
|
||||
resp.Options.Update(dhcpv4.OptSubnetMask(obj.mask)) // net.IPMask
|
||||
|
||||
// nbp section
|
||||
if obj.opt66 != nil && req.IsOptionRequested(dhcpv4.OptionTFTPServerName) {
|
||||
@@ -2049,3 +2048,9 @@ func checkValidNetmask(netmask net.IPMask) bool {
|
||||
y := x + 1
|
||||
return (y & x) == 0
|
||||
}
|
||||
|
||||
// netmaskAsQuadString returns a dotted-quad string giving you something like:
|
||||
// 255.255.255.0 instead of ffffff00 which is what's seen when you print it now.
|
||||
func netmaskAsQuadString(netmask net.IPMask) string {
|
||||
return fmt.Sprintf("%d.%d.%d.%d", netmask[0], netmask[1], netmask[2], netmask[3])
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user