Compare commits
176 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
33d20ac6d8 | ||
|
|
660554cc45 | ||
|
|
a455324e8c | ||
|
|
cd5e2e1148 | ||
|
|
074da4da19 | ||
|
|
e4e39d820c | ||
|
|
e5dbb214a2 | ||
|
|
91af528ff8 | ||
|
|
18c4e39ea3 | ||
|
|
bda455ce78 | ||
|
|
a07aea1ad3 | ||
|
|
18e2dbf144 | ||
|
|
564a07e62e | ||
|
|
a358135e41 | ||
|
|
6d9be15035 | ||
|
|
b740e0b78a | ||
|
|
9546949945 | ||
|
|
8ff048d055 | ||
|
|
95a1c6e7fb | ||
|
|
0b1a4a0f30 | ||
|
|
22b48e296a | ||
|
|
c696ebf53c | ||
|
|
a0686b7d2b | ||
|
|
8d94be8924 | ||
|
|
e97ac5033f | ||
|
|
44771a0049 | ||
|
|
32aae8f57a | ||
|
|
8207e23cd9 | ||
|
|
a469029698 | ||
|
|
203d866643 | ||
|
|
1488e5ec4d | ||
|
|
af66138a17 | ||
|
|
5f060d60a7 | ||
|
|
73ccbb69ea | ||
|
|
be60440b20 | ||
|
|
837efb78e6 | ||
|
|
4a62a290d8 | ||
|
|
018399cb1f | ||
|
|
646a576358 | ||
|
|
d8e19cd79a | ||
|
|
757cb0cf23 | ||
|
|
7d92ab335a | ||
|
|
46c6d6f656 | ||
|
|
46260749c1 | ||
|
|
50664fe115 | ||
|
|
c480bd94db | ||
|
|
79923a939b | ||
|
|
327b22113a | ||
|
|
12160ab539 | ||
|
|
2462ea0892 | ||
|
|
8be09eadd4 | ||
|
|
98bc96c911 | ||
|
|
b0fce6a80d | ||
|
|
53b8a21d1e | ||
|
|
1346492d72 | ||
|
|
e5bb8d7992 | ||
|
|
49594b8435 | ||
|
|
3bd37a7906 | ||
|
|
e070a85ae0 | ||
|
|
c189278e24 | ||
|
|
2a8606bd98 | ||
|
|
18ea05c837 | ||
|
|
86c3072515 | ||
|
|
fccf508dde | ||
|
|
2da21f90f4 | ||
|
|
bec7f1726f | ||
|
|
74dfb9d88d | ||
|
|
02dddfc227 | ||
|
|
545016b38f | ||
|
|
0ccceaf226 | ||
|
|
a601115650 | ||
|
|
ae6267c906 | ||
|
|
ac142694f5 | ||
|
|
69b0913315 | ||
|
|
421bacd7dc | ||
|
|
573a76eedb | ||
|
|
b7948c7f40 | ||
|
|
2647d09b8f | ||
|
|
57e919d7e5 | ||
|
|
f456aa1407 | ||
|
|
d0d62892c8 | ||
|
|
a981cfa053 | ||
|
|
55290dd1e3 | ||
|
|
9c4e255994 | ||
|
|
f9c7d5f7bc | ||
|
|
49baea5f6a | ||
|
|
6209cf3933 | ||
|
|
d170a523c3 | ||
|
|
be5040e7a8 | ||
|
|
ecbaa5bfc1 | ||
|
|
25e2af7c89 | ||
|
|
605688426d | ||
|
|
0e069f1e75 | ||
|
|
e9adbf18d3 | ||
|
|
610202097a | ||
|
|
8c2c552164 | ||
|
|
b9976cf693 | ||
|
|
3261c405bd | ||
|
|
35d3328e3e | ||
|
|
e96041d76f | ||
|
|
c2034bc0c0 | ||
|
|
e8855f7621 | ||
|
|
bdb8368e89 | ||
|
|
f160db2032 | ||
|
|
de9a32a273 | ||
|
|
6ba7422c3b | ||
|
|
5cbb0ceb80 | ||
|
|
5b29358b37 | ||
|
|
90147f3dfb | ||
|
|
72873abe05 | ||
|
|
de1810ba68 | ||
|
|
7b7c765d78 | ||
|
|
806d4660cf | ||
|
|
5ae5d364bb | ||
|
|
1af67e72d4 | ||
|
|
ed268ad683 | ||
|
|
5bdd2ca02f | ||
|
|
eb59861d4d | ||
|
|
427e46a2aa | ||
|
|
68a8649292 | ||
|
|
2aff8709a5 | ||
|
|
62c3add888 | ||
|
|
3ac878db62 | ||
|
|
c247cd8fea | ||
|
|
b6772b7280 | ||
|
|
807a3df9d1 | ||
|
|
491d60e267 | ||
|
|
4811eafd67 | ||
|
|
8dedbb9620 | ||
|
|
dd8454161f | ||
|
|
9421f2cddd | ||
|
|
d8c4f78ec1 | ||
|
|
54296da647 | ||
|
|
357102fdb5 | ||
|
|
7e15a9e181 | ||
|
|
12e0b2d6f7 | ||
|
|
11b40bf32f | ||
|
|
8d2b53373f | ||
|
|
9ecc49e592 | ||
|
|
4f34f7083b | ||
|
|
2a6df875ec | ||
|
|
51c83116a2 | ||
|
|
74435aac76 | ||
|
|
5dfdb5b5f9 | ||
|
|
ac892a3f3d | ||
|
|
1a2e99f559 | ||
|
|
e97bba0524 | ||
|
|
0538f0c524 | ||
|
|
fc3e35868d | ||
|
|
f1e0cfea1c | ||
|
|
56efef69ba | ||
|
|
668ec8a248 | ||
|
|
60912bd01c | ||
|
|
0b416e44f8 | ||
|
|
ecc4aa09d3 | ||
|
|
b921aabbed | ||
|
|
6ad8ac0b6b | ||
|
|
44e7e0e970 | ||
|
|
45820b4ce3 | ||
|
|
3a098377cb | ||
|
|
35875485ee | ||
|
|
19760be0bc | ||
|
|
b3ea33f88d | ||
|
|
5b3425a689 | ||
|
|
a3d157bde6 | ||
|
|
2c8c9264a4 | ||
|
|
0009d9b20e | ||
|
|
dd8d17232f | ||
|
|
6312b9225f | ||
|
|
68cc09fef2 | ||
|
|
0651c9de65 | ||
|
|
38261ec809 | ||
|
|
067932aebf | ||
|
|
af47511d58 | ||
|
|
36b916f27f | ||
|
|
e519811893 |
9
.github/ISSUE_TEMPLATE.md
vendored
Normal file
9
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
## Versions:
|
||||||
|
|
||||||
|
* mgmt version (eg: `mgmt --version`):
|
||||||
|
|
||||||
|
* operating system/distribution (eg: `uname -a`):
|
||||||
|
|
||||||
|
* golang version (eg: `go version`):
|
||||||
|
|
||||||
|
## Description:
|
||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -2,7 +2,6 @@
|
|||||||
.omv/
|
.omv/
|
||||||
.ssh/
|
.ssh/
|
||||||
.vagrant/
|
.vagrant/
|
||||||
mgmt-documentation.pdf
|
|
||||||
old/
|
old/
|
||||||
tmp/
|
tmp/
|
||||||
*_stringer.go
|
*_stringer.go
|
||||||
|
|||||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -13,3 +13,6 @@
|
|||||||
[submodule "vendor/github.com/purpleidea/go-systemd"]
|
[submodule "vendor/github.com/purpleidea/go-systemd"]
|
||||||
path = vendor/github.com/purpleidea/go-systemd
|
path = vendor/github.com/purpleidea/go-systemd
|
||||||
url = https://github.com/purpleidea/go-systemd
|
url = https://github.com/purpleidea/go-systemd
|
||||||
|
[submodule "vendor/honnef.co/go/augeas"]
|
||||||
|
path = vendor/honnef.co/go/augeas
|
||||||
|
url = https://github.com/dominikh/go-augeas/
|
||||||
|
|||||||
17
.travis.yml
17
.travis.yml
@@ -1,18 +1,22 @@
|
|||||||
language: go
|
language: go
|
||||||
go:
|
go:
|
||||||
- 1.6
|
- 1.6.x
|
||||||
- 1.7
|
- 1.7.x
|
||||||
|
- 1.8.x
|
||||||
- tip
|
- tip
|
||||||
|
go_import_path: github.com/purpleidea/mgmt
|
||||||
sudo: true
|
sudo: true
|
||||||
dist: trusty
|
dist: trusty
|
||||||
before_install: 'git fetch --unshallow'
|
before_install:
|
||||||
|
- sudo apt update
|
||||||
|
- git fetch --unshallow
|
||||||
install: 'make deps'
|
install: 'make deps'
|
||||||
script: 'make test'
|
script: 'make test'
|
||||||
matrix:
|
matrix:
|
||||||
fast_finish: true
|
fast_finish: true
|
||||||
allow_failures:
|
allow_failures:
|
||||||
- go: tip
|
- go: tip
|
||||||
- go: 1.7
|
- go: 1.8.x
|
||||||
notifications:
|
notifications:
|
||||||
irc:
|
irc:
|
||||||
channels:
|
channels:
|
||||||
@@ -25,4 +29,7 @@ notifications:
|
|||||||
use_notice: false
|
use_notice: false
|
||||||
skip_join: false
|
skip_join: false
|
||||||
email:
|
email:
|
||||||
- travis-ci@shubin.ca
|
recipients:
|
||||||
|
- travis-ci@shubin.ca
|
||||||
|
on_failure: change
|
||||||
|
on_success: change
|
||||||
|
|||||||
2
AUTHORS
2
AUTHORS
@@ -4,5 +4,7 @@ If you appreciate the work of one of the contributors, thank them a beverage!
|
|||||||
For a more exhaustive list please run: git log --format='%aN' | sort -u
|
For a more exhaustive list please run: git log --format='%aN' | sort -u
|
||||||
This list is sorted alphabetically by first name.
|
This list is sorted alphabetically by first name.
|
||||||
|
|
||||||
|
Felix Frank
|
||||||
James Shubin
|
James Shubin
|
||||||
|
Julien Pivotto
|
||||||
Paul Morgan
|
Paul Morgan
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
Mgmt
|
Mgmt
|
||||||
Copyright (C) 2013-2016+ James Shubin and the project contributors
|
Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
Written by James Shubin <james@shubin.ca> and the project contributors
|
Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
This program is free software: you can redistribute it and/or modify
|
||||||
|
|||||||
21
Makefile
21
Makefile
@@ -1,5 +1,5 @@
|
|||||||
# Mgmt
|
# Mgmt
|
||||||
# Copyright (C) 2013-2016+ James Shubin and the project contributors
|
# Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
# Written by James Shubin <james@shubin.ca> and the project contributors
|
# Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
#
|
#
|
||||||
# This program is free software: you can redistribute it and/or modify
|
# This program is free software: you can redistribute it and/or modify
|
||||||
@@ -37,6 +37,11 @@ RPM = rpmbuild/RPMS/$(PROGRAM)-$(VERSION)-$(RELEASE).$(ARCH).rpm
|
|||||||
USERNAME := $(shell cat ~/.config/copr 2>/dev/null | grep username | awk -F '=' '{print $$2}' | tr -d ' ')
|
USERNAME := $(shell cat ~/.config/copr 2>/dev/null | grep username | awk -F '=' '{print $$2}' | tr -d ' ')
|
||||||
SERVER = 'dl.fedoraproject.org'
|
SERVER = 'dl.fedoraproject.org'
|
||||||
REMOTE_PATH = 'pub/alt/$(USERNAME)/$(PROGRAM)'
|
REMOTE_PATH = 'pub/alt/$(USERNAME)/$(PROGRAM)'
|
||||||
|
ifneq ($(GOTAGS),)
|
||||||
|
BUILD_FLAGS = -tags '$(GOTAGS)'
|
||||||
|
endif
|
||||||
|
|
||||||
|
default: build
|
||||||
|
|
||||||
#
|
#
|
||||||
# art
|
# art
|
||||||
@@ -105,9 +110,9 @@ $(PROGRAM): main.go
|
|||||||
@echo "Building: $(PROGRAM), version: $(SVERSION)..."
|
@echo "Building: $(PROGRAM), version: $(SVERSION)..."
|
||||||
ifneq ($(OLDGOLANG),)
|
ifneq ($(OLDGOLANG),)
|
||||||
@# avoid equals sign in old golang versions eg in: -X foo=bar
|
@# avoid equals sign in old golang versions eg in: -X foo=bar
|
||||||
time go build -ldflags "-X main.program $(PROGRAM) -X main.version $(SVERSION)" -o $(PROGRAM);
|
time go build -ldflags "-X main.program $(PROGRAM) -X main.version $(SVERSION)" -o $(PROGRAM) $(BUILD_FLAGS);
|
||||||
else
|
else
|
||||||
time go build -ldflags "-X main.program=$(PROGRAM) -X main.version=$(SVERSION)" -o $(PROGRAM);
|
time go build -i -ldflags "-X main.program=$(PROGRAM) -X main.version=$(SVERSION)" -o $(PROGRAM) $(BUILD_FLAGS);
|
||||||
endif
|
endif
|
||||||
|
|
||||||
$(PROGRAM).static: main.go
|
$(PROGRAM).static: main.go
|
||||||
@@ -115,9 +120,9 @@ $(PROGRAM).static: main.go
|
|||||||
go generate
|
go generate
|
||||||
ifneq ($(OLDGOLANG),)
|
ifneq ($(OLDGOLANG),)
|
||||||
@# avoid equals sign in old golang versions eg in: -X foo=bar
|
@# avoid equals sign in old golang versions eg in: -X foo=bar
|
||||||
go build -a -installsuffix cgo -tags netgo -ldflags '-extldflags "-static" -X main.program $(PROGRAM) -X main.version $(SVERSION)' -o $(PROGRAM).static;
|
go build -a -installsuffix cgo -tags netgo -ldflags '-extldflags "-static" -X main.program $(PROGRAM) -X main.version $(SVERSION)' -o $(PROGRAM).static $(BUILD_FLAGS);
|
||||||
else
|
else
|
||||||
go build -a -installsuffix cgo -tags netgo -ldflags '-extldflags "-static" -X main.program=$(PROGRAM) -X main.version=$(SVERSION)' -o $(PROGRAM).static;
|
go build -a -installsuffix cgo -tags netgo -ldflags '-extldflags "-static" -X main.program=$(PROGRAM) -X main.version=$(SVERSION)' -o $(PROGRAM).static $(BUILD_FLAGS);
|
||||||
endif
|
endif
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
@@ -138,8 +143,8 @@ format: gofmt yamlfmt
|
|||||||
|
|
||||||
docs: $(PROGRAM)-documentation.pdf
|
docs: $(PROGRAM)-documentation.pdf
|
||||||
|
|
||||||
$(PROGRAM)-documentation.pdf: DOCUMENTATION.md
|
$(PROGRAM)-documentation.pdf: docs/documentation.md
|
||||||
pandoc DOCUMENTATION.md -o '$(PROGRAM)-documentation.pdf'
|
pandoc docs/documentation.md -o docs/'$(PROGRAM)-documentation.pdf'
|
||||||
|
|
||||||
#
|
#
|
||||||
# build aliases
|
# build aliases
|
||||||
@@ -183,7 +188,7 @@ $(SRPM): $(SPEC) $(SOURCE)
|
|||||||
#
|
#
|
||||||
$(SPEC): rpmbuild/ spec.in
|
$(SPEC): rpmbuild/ spec.in
|
||||||
@echo Running templater...
|
@echo Running templater...
|
||||||
#cat spec.in > $(SPEC)
|
cat spec.in > $(SPEC)
|
||||||
sed -e s/__PROGRAM__/$(PROGRAM)/g -e s/__VERSION__/$(VERSION)/g -e s/__RELEASE__/$(RELEASE)/g < spec.in > $(SPEC)
|
sed -e s/__PROGRAM__/$(PROGRAM)/g -e s/__VERSION__/$(VERSION)/g -e s/__RELEASE__/$(RELEASE)/g < spec.in > $(SPEC)
|
||||||
# append a changelog to the .spec file
|
# append a changelog to the .spec file
|
||||||
git log --format="* %cd %aN <%aE>%n- (%h) %s%d%n" --date=local | sed -r 's/[0-9]+:[0-9]+:[0-9]+ //' >> $(SPEC)
|
git log --format="* %cd %aN <%aE>%n- (%h) %s%d%n" --date=local | sed -r 's/[0-9]+:[0-9]+:[0-9]+ //' >> $(SPEC)
|
||||||
|
|||||||
131
README.md
131
README.md
@@ -2,18 +2,20 @@
|
|||||||
|
|
||||||
[](art/)
|
[](art/)
|
||||||
|
|
||||||
[](https://goreportcard.com/report/github.com/purpleidea/mgmt)
|
[](https://goreportcard.com/report/github.com/purpleidea/mgmt)
|
||||||
[](http://travis-ci.org/purpleidea/mgmt)
|
[](http://travis-ci.org/purpleidea/mgmt)
|
||||||
[](DOCUMENTATION.md)
|
[](https://godoc.org/github.com/purpleidea/mgmt)
|
||||||
[](https://godoc.org/github.com/purpleidea/mgmt)
|
[](https://webchat.freenode.net/?channels=#mgmtconfig)
|
||||||
[](https://webchat.freenode.net/?channels=#mgmtconfig)
|
[](https://ci.centos.org/job/purpleidea-mgmt/)
|
||||||
[](https://ci.centos.org/job/purpleidea-mgmt/)
|
|
||||||
[](https://copr.fedoraproject.org/coprs/purpleidea/mgmt/)
|
|
||||||
[](https://aur.archlinux.org/packages/mgmt/)
|
|
||||||
|
|
||||||
## Community:
|
## Community:
|
||||||
Come join us on IRC in [#mgmtconfig](https://webchat.freenode.net/?channels=#mgmtconfig) on Freenode!
|
Come join us in the `mgmt` community!
|
||||||
You may like the [#mgmtconfig](https://twitter.com/hashtag/mgmtconfig) hashtag if you're on [Twitter](https://twitter.com/#!/purpleidea).
|
|
||||||
|
| Medium | Link |
|
||||||
|
|---|---|---|
|
||||||
|
| IRC | [#mgmtconfig](https://webchat.freenode.net/?channels=#mgmtconfig) on Freenode |
|
||||||
|
| Twitter | [@mgmtconfig](https://twitter.com/mgmtconfig) & [#mgmtconfig](https://twitter.com/hashtag/mgmtconfig) |
|
||||||
|
| Mailing list | [mgmtconfig-list@redhat.com](https://www.redhat.com/mailman/listinfo/mgmtconfig-list) |
|
||||||
|
|
||||||
## Status:
|
## Status:
|
||||||
Mgmt is a fairly new project.
|
Mgmt is a fairly new project.
|
||||||
@@ -21,35 +23,21 @@ We're working towards being minimally useful for production environments.
|
|||||||
We aren't feature complete for what we'd consider a 1.x release yet.
|
We aren't feature complete for what we'd consider a 1.x release yet.
|
||||||
With your help you'll be able to influence our design and get us there sooner!
|
With your help you'll be able to influence our design and get us there sooner!
|
||||||
|
|
||||||
## Questions:
|
|
||||||
Please join the [#mgmtconfig](https://webchat.freenode.net/?channels=#mgmtconfig) IRC community!
|
|
||||||
If you have a well phrased question that might benefit others, consider asking it by sending a patch to the documentation [FAQ](https://github.com/purpleidea/mgmt/blob/master/DOCUMENTATION.md#usage-and-frequently-asked-questions) section. I'll merge your question, and a patch with the answer!
|
|
||||||
|
|
||||||
## Quick start:
|
|
||||||
* Make sure you have golang version 1.6 or greater installed.
|
|
||||||
* If you do not have a GOPATH yet, create one and export it:
|
|
||||||
```
|
|
||||||
mkdir $HOME/gopath
|
|
||||||
export GOPATH=$HOME/gopath
|
|
||||||
```
|
|
||||||
* You might also want to add the GOPATH to your `~/.bashrc` or `~/.profile`.
|
|
||||||
* For more information you can read the [GOPATH documentation](https://golang.org/cmd/go/#hdr-GOPATH_environment_variable).
|
|
||||||
* Next download the mgmt code base, and switch to that directory:
|
|
||||||
```
|
|
||||||
go get -u github.com/purpleidea/mgmt
|
|
||||||
cd $GOPATH/src/github.com/purpleidea/mgmt
|
|
||||||
```
|
|
||||||
* Get the remaining golang deps with `go get ./...`, or run `make deps` if you're comfortable with how we install them.
|
|
||||||
* Run `make build` to get a freshly built `mgmt` binary.
|
|
||||||
* Run `time ./mgmt run --yaml examples/graph0.yaml --converged-timeout=5 --tmp-prefix` to try out a very simple example!
|
|
||||||
* To run continuously in the default mode of operation, omit the `--converged-timeout` option.
|
|
||||||
* Have fun hacking on our future technology!
|
|
||||||
|
|
||||||
## Examples:
|
|
||||||
Please look in the [examples/](examples/) folder for more examples!
|
|
||||||
|
|
||||||
## Documentation:
|
## Documentation:
|
||||||
Please see: the manually created [DOCUMENTATION.md](DOCUMENTATION.md) (also available as [PDF](https://pdfdoc-purpleidea.rhcloud.com/pdf/https://github.com/purpleidea/mgmt/blob/master/DOCUMENTATION.md)) and the automatically generated [GoDoc documentation](https://godoc.org/github.com/purpleidea/mgmt).
|
Please read, enjoy and help improve our documentation!
|
||||||
|
|
||||||
|
| Documentation | Additional Notes |
|
||||||
|
|---|---|
|
||||||
|
| [general documentation](docs/documentation.md) | for everyone |
|
||||||
|
| [quick start guide](docs/quick-start-guide.md) | for mgmt developers |
|
||||||
|
| [resource guide](docs/resource-guide.md) | for mgmt developers |
|
||||||
|
| [godoc API reference](https://godoc.org/github.com/purpleidea/mgmt) | for mgmt developers |
|
||||||
|
| [prometheus guide](docs/prometheus.md) | for everyone |
|
||||||
|
| [puppet guide](docs/puppet-guide.md) | for puppet sysadmins |
|
||||||
|
|
||||||
|
## Questions:
|
||||||
|
Please ask in the [community](#community)!
|
||||||
|
If you have a well phrased question that might benefit others, consider asking it by sending a patch to the documentation [FAQ](https://github.com/purpleidea/mgmt/blob/master/docs/documentation.md#usage-and-frequently-asked-questions) section. I'll merge your question, and a patch with the answer!
|
||||||
|
|
||||||
## Roadmap:
|
## Roadmap:
|
||||||
Please see: [TODO.md](TODO.md) for a list of upcoming work and TODO items.
|
Please see: [TODO.md](TODO.md) for a list of upcoming work and TODO items.
|
||||||
@@ -61,52 +49,35 @@ Please set the `DEBUG` constant in [main.go](https://github.com/purpleidea/mgmt/
|
|||||||
Bonus points if you provide a [shell](https://github.com/purpleidea/mgmt/tree/master/test/shell) or [OMV](https://github.com/purpleidea/mgmt/tree/master/test/omv) reproducible test case.
|
Bonus points if you provide a [shell](https://github.com/purpleidea/mgmt/tree/master/test/shell) or [OMV](https://github.com/purpleidea/mgmt/tree/master/test/omv) reproducible test case.
|
||||||
Feel free to read my article on [debugging golang programs](https://ttboj.wordpress.com/2016/02/15/debugging-golang-programs/).
|
Feel free to read my article on [debugging golang programs](https://ttboj.wordpress.com/2016/02/15/debugging-golang-programs/).
|
||||||
|
|
||||||
## Dependencies:
|
|
||||||
* golang 1.6 or higher (required, available in most distros)
|
|
||||||
* golang libraries (required, available with `go get`)
|
|
||||||
```
|
|
||||||
go get github.com/coreos/etcd/client
|
|
||||||
go get gopkg.in/yaml.v2
|
|
||||||
go get gopkg.in/fsnotify.v1
|
|
||||||
go get github.com/urfave/cli
|
|
||||||
go get github.com/coreos/go-systemd/dbus
|
|
||||||
go get github.com/coreos/go-systemd/util
|
|
||||||
go get github.com/coreos/pkg/capnslog
|
|
||||||
go get github.com/rgbkrk/libvirt-go
|
|
||||||
```
|
|
||||||
* stringer (optional for building), available as a package on some platforms, otherwise via `go get`
|
|
||||||
```
|
|
||||||
go get golang.org/x/tools/cmd/stringer
|
|
||||||
```
|
|
||||||
* pandoc (optional, for building a pdf of the documentation)
|
|
||||||
* graphviz (optional, for building a visual representation of the graph)
|
|
||||||
|
|
||||||
## Patches:
|
## Patches:
|
||||||
We'd love to have your patches! Please send them by email, or as a pull request.
|
We'd love to have your patches! Please send them by email, or as a pull request.
|
||||||
|
|
||||||
## On the web:
|
## On the web:
|
||||||
* James Shubin; blog: [Next generation configuration mgmt](https://ttboj.wordpress.com/2016/01/18/next-generation-configuration-mgmt/)
|
| Author | Format | Subject |
|
||||||
* James Shubin; video: [Introductory recording from DevConf.cz 2016](https://www.youtube.com/watch?v=GVhpPF0j-iE&html5=1)
|
|---|---|---|
|
||||||
* James Shubin; video: [Introductory recording from CfgMgmtCamp.eu 2016](https://www.youtube.com/watch?v=fNeooSiIRnA&html5=1)
|
| James Shubin | blog | [Next generation configuration mgmt](https://ttboj.wordpress.com/2016/01/18/next-generation-configuration-mgmt/) |
|
||||||
* Julian Dunn; video: [On mgmt at CfgMgmtCamp.eu 2016](https://www.youtube.com/watch?v=kfF9IATUask&t=1949&html5=1)
|
| James Shubin | video | [Introductory recording from DevConf.cz 2016](https://www.youtube.com/watch?v=GVhpPF0j-iE&html5=1) |
|
||||||
* Walter Heck; slides: [On mgmt at CfgMgmtCamp.eu 2016](http://www.slideshare.net/olindata/configuration-management-time-for-a-4th-generation/3)
|
| James Shubin | video | [Introductory recording from CfgMgmtCamp.eu 2016](https://www.youtube.com/watch?v=fNeooSiIRnA&html5=1) |
|
||||||
* Marco Marongiu; blog: [On mgmt](http://syslog.me/2016/02/15/leap-or-die/)
|
| Julian Dunn | video | [On mgmt at CfgMgmtCamp.eu 2016](https://www.youtube.com/watch?v=kfF9IATUask&t=1949&html5=1) |
|
||||||
* Felix Frank; blog: [From Catalog To Mgmt (on puppet to mgmt "transpiling")](https://ffrank.github.io/features/2016/02/18/from-catalog-to-mgmt/)
|
| Walter Heck | slides | [On mgmt at CfgMgmtCamp.eu 2016](http://www.slideshare.net/olindata/configuration-management-time-for-a-4th-generation/3) |
|
||||||
* James Shubin; blog: [Automatic edges in mgmt (...and the pkg resource)](https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/)
|
| Marco Marongiu | blog | [On mgmt](http://syslog.me/2016/02/15/leap-or-die/) |
|
||||||
* James Shubin; blog: [Automatic grouping in mgmt](https://ttboj.wordpress.com/2016/03/30/automatic-grouping-in-mgmt/)
|
| Felix Frank | blog | [From Catalog To Mgmt (on puppet to mgmt "transpiling")](https://ffrank.github.io/features/2016/02/18/from-catalog-to-mgmt/) |
|
||||||
* John Arundel; tweet: [“Puppet’s days are numbered.”](https://twitter.com/bitfield/status/732157519142002688)
|
| James Shubin | blog | [Automatic edges in mgmt (...and the pkg resource)](https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/) |
|
||||||
* Felix Frank; blog: [Puppet, Meet Mgmt (on puppet to mgmt internals)](https://ffrank.github.io/features/2016/06/12/puppet,-meet-mgmt/)
|
| James Shubin | blog | [Automatic grouping in mgmt](https://ttboj.wordpress.com/2016/03/30/automatic-grouping-in-mgmt/) |
|
||||||
* Felix Frank; blog: [Puppet Powered Mgmt (puppet to mgmt tl;dr)](https://ffrank.github.io/features/2016/06/19/puppet-powered-mgmt/)
|
| John Arundel | tweet | [“Puppet’s days are numbered.”](https://twitter.com/bitfield/status/732157519142002688) |
|
||||||
* James Shubin; blog: [Automatic clustering in mgmt](https://ttboj.wordpress.com/2016/06/20/automatic-clustering-in-mgmt/)
|
| Felix Frank | blog | [Puppet, Meet Mgmt (on puppet to mgmt internals)](https://ffrank.github.io/features/2016/06/12/puppet,-meet-mgmt/) |
|
||||||
* James Shubin; video: [Recording from CoreOSFest 2016](https://www.youtube.com/watch?v=KVmDCUA42wc&html5=1)
|
| Felix Frank | blog | [Puppet Powered Mgmt (puppet to mgmt tl;dr)](https://ffrank.github.io/features/2016/06/19/puppet-powered-mgmt/) |
|
||||||
* James Shubin; video: [Recording from DebConf16](http://meetings-archive.debian.net/pub/debian-meetings/2016/debconf16/Next_Generation_Config_Mgmt.webm) ([Slides](https://annex.debconf.org//debconf-share/debconf16/slides/15-next-generation-config-mgmt.pdf))
|
| James Shubin | blog | [Automatic clustering in mgmt](https://ttboj.wordpress.com/2016/06/20/automatic-clustering-in-mgmt/) |
|
||||||
* Felix Frank; blog: [Edging It All In (puppet and mgmt edges)](https://ffrank.github.io/features/2016/07/12/edging-it-all-in/)
|
| James Shubin | video | [Recording from CoreOSFest 2016](https://www.youtube.com/watch?v=KVmDCUA42wc&html5=1) |
|
||||||
* Felix Frank; blog: [Translating All The Things (puppet to mgmt translation warnings)](https://ffrank.github.io/features/2016/08/19/translating-all-the-things/)
|
| James Shubin | video | [Recording from DebConf16](http://meetings-archive.debian.net/pub/debian-meetings/2016/debconf16/Next_Generation_Config_Mgmt.webm) ([Slides](https://annex.debconf.org//debconf-share/debconf16/slides/15-next-generation-config-mgmt.pdf)) |
|
||||||
* James Shubin; video: [Recording from systemd.conf 2016](https://www.youtube.com/watch?v=jB992Zb3nH0&html5=1)
|
| Felix Frank | blog | [Edging It All In (puppet and mgmt edges)](https://ffrank.github.io/features/2016/07/12/edging-it-all-in/) |
|
||||||
* James Shubin; blog: [Remote execution in mgmt](https://ttboj.wordpress.com/2016/10/07/remote-execution-in-mgmt/)
|
| Felix Frank | blog | [Translating All The Things (puppet to mgmt translation warnings)](https://ffrank.github.io/features/2016/08/19/translating-all-the-things/) |
|
||||||
* James Shubin; video: [Recording from High Load Strategy 2016](https://vimeo.com/191493409)
|
| James Shubin | video | [Recording from systemd.conf 2016](https://www.youtube.com/watch?v=jB992Zb3nH0&html5=1) |
|
||||||
* James Shubin; video: [Recording from NLUUG 2016](https://www.youtube.com/watch?v=MmpwOQAb_SE&html5=1)
|
| James Shubin | blog | [Remote execution in mgmt](https://ttboj.wordpress.com/2016/10/07/remote-execution-in-mgmt/) |
|
||||||
* James Shubin; blog: [Send/Recv in mgmt](https://ttboj.wordpress.com/2016/12/07/sendrecv-in-mgmt/)
|
| James Shubin | video | [Recording from High Load Strategy 2016](https://vimeo.com/191493409) |
|
||||||
|
| James Shubin | video | [Recording from NLUUG 2016](https://www.youtube.com/watch?v=MmpwOQAb_SE&html5=1) |
|
||||||
|
| James Shubin | blog | [Send/Recv in mgmt](https://ttboj.wordpress.com/2016/12/07/sendrecv-in-mgmt/) |
|
||||||
|
| James Shubin | blog | [Metaparameters in mgmt](https://ttboj.wordpress.com/2017/03/01/metaparameters-in-mgmt/) |
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
|
|||||||
16
TODO.md
16
TODO.md
@@ -1,16 +1,16 @@
|
|||||||
# TODO
|
# TODO
|
||||||
If you're looking for something to do, look here!
|
If you're looking for something to do, look here!
|
||||||
Let us know if you're working on one of the items.
|
Let us know if you're working on one of the items.
|
||||||
|
If you'd like something to work on, ping @purpleidea and I'll create an issue
|
||||||
|
tailored especially for you! Just let me know your approximate golang skill
|
||||||
|
level and how many hours you'd like to spend on the patch.
|
||||||
|
|
||||||
## Package resource
|
## Package resource
|
||||||
- [ ] getfiles support on debian [bug](https://github.com/hughsie/PackageKit/issues/118)
|
- [ ] getfiles support on debian [bug](https://github.com/hughsie/PackageKit/issues/118)
|
||||||
- [ ] directory info on fedora [bug](https://github.com/hughsie/PackageKit/issues/117)
|
- [ ] directory info on fedora [bug](https://github.com/hughsie/PackageKit/issues/117)
|
||||||
- [ ] dnf blocker [bug](https://github.com/hughsie/PackageKit/issues/110)
|
- [ ] dnf blocker [bug](https://github.com/hughsie/PackageKit/issues/110)
|
||||||
- [ ] install signal blocker [bug](https://github.com/hughsie/PackageKit/issues/109)
|
|
||||||
|
|
||||||
## File resource [bug](https://github.com/purpleidea/mgmt/issues/13) [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
## File resource [bug](https://github.com/purpleidea/mgmt/issues/64) [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||||
- [ ] chown/chmod support [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
|
||||||
- [ ] user/group support [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
|
||||||
- [ ] recurse limit support [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
- [ ] recurse limit support [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||||
- [ ] fanotify support [bug](https://github.com/go-fsnotify/fsnotify/issues/114)
|
- [ ] fanotify support [bug](https://github.com/go-fsnotify/fsnotify/issues/114)
|
||||||
|
|
||||||
@@ -21,7 +21,6 @@ Let us know if you're working on one of the items.
|
|||||||
- [ ] base resource improvements
|
- [ ] base resource improvements
|
||||||
|
|
||||||
## Timer resource
|
## Timer resource
|
||||||
- [ ] reset on recompile
|
|
||||||
- [ ] increment algorithm (linear, exponential, etc...) [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
- [ ] increment algorithm (linear, exponential, etc...) [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||||
|
|
||||||
## User/Group resource
|
## User/Group resource
|
||||||
@@ -29,7 +28,7 @@ Let us know if you're working on one of the items.
|
|||||||
- [ ] automatic edges to file resource [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
- [ ] automatic edges to file resource [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||||
|
|
||||||
## Virt (libvirt) resource
|
## Virt (libvirt) resource
|
||||||
- [ ] base resource [bug](https://github.com/purpleidea/mgmt/issues/25)
|
- [ ] base resource improvements [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||||
|
|
||||||
## Net (systemd-networkd) resource
|
## Net (systemd-networkd) resource
|
||||||
- [ ] base resource
|
- [ ] base resource
|
||||||
@@ -44,7 +43,7 @@ Let us know if you're working on one of the items.
|
|||||||
- [ ] base resource [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
- [ ] base resource [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||||
|
|
||||||
## Http resource
|
## Http resource
|
||||||
- [ ] base resource
|
- [ ] base resource [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||||
|
|
||||||
## Etcd improvements
|
## Etcd improvements
|
||||||
- [ ] fix embedded etcd master race
|
- [ ] fix embedded etcd master race
|
||||||
@@ -52,6 +51,9 @@ Let us know if you're working on one of the items.
|
|||||||
## Torrent/dht file transfer
|
## Torrent/dht file transfer
|
||||||
- [ ] base plumbing
|
- [ ] base plumbing
|
||||||
|
|
||||||
|
## GPG/Auth improvements
|
||||||
|
- [ ] base plumbing
|
||||||
|
|
||||||
## Language improvements
|
## Language improvements
|
||||||
- [ ] language design
|
- [ ] language design
|
||||||
- [ ] lexer/parser
|
- [ ] lexer/parser
|
||||||
|
|||||||
38
Vagrantfile
vendored
Normal file
38
Vagrantfile
vendored
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
Vagrant.configure(2) do |config|
|
||||||
|
config.ssh.forward_agent = true
|
||||||
|
config.ssh.username = 'vagrant'
|
||||||
|
config.vm.network "private_network", ip: "192.168.219.2"
|
||||||
|
|
||||||
|
config.vm.synced_folder ".", "/vagrant", disabled: true
|
||||||
|
|
||||||
|
config.vm.define "mgmt-dev" do |instance|
|
||||||
|
instance.vm.box = "fedora/24-cloud-base"
|
||||||
|
end
|
||||||
|
|
||||||
|
config.vm.provider "virtualbox" do |v|
|
||||||
|
v.memory = 1536
|
||||||
|
v.cpus = 2
|
||||||
|
end
|
||||||
|
|
||||||
|
config.vm.provision "file", source: "vagrant/motd", destination: ".motd"
|
||||||
|
config.vm.provision "shell", inline: "cp ~vagrant/.motd /etc/motd"
|
||||||
|
|
||||||
|
config.vm.provision "file", source: "vagrant/mgmt.bashrc", destination: ".mgmt.bashrc"
|
||||||
|
config.vm.provision "file", source: "~/.gitconfig", destination: ".gitconfig"
|
||||||
|
|
||||||
|
# copied from make-deps.sh (with added git)
|
||||||
|
config.vm.provision "shell", inline: "dnf install -y libvirt-devel golang golang-googlecode-tools-stringer hg git"
|
||||||
|
|
||||||
|
# set up vagrant home
|
||||||
|
script = <<-SCRIPT
|
||||||
|
grep -q 'mgmt\.bashrc' ~/.bashrc || echo '. ~/.mgmt.bashrc' >>~/.bashrc
|
||||||
|
. ~/.mgmt.bashrc
|
||||||
|
go get -u github.com/purpleidea/mgmt
|
||||||
|
cd ~/gopath/src/github.com/purpleidea/mgmt
|
||||||
|
make deps
|
||||||
|
SCRIPT
|
||||||
|
config.vm.provision "shell" do |shell|
|
||||||
|
shell.privileged = false
|
||||||
|
shell.inline = script
|
||||||
|
end
|
||||||
|
end
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -29,24 +29,24 @@ import (
|
|||||||
// TODO: we could make a new function that masks out the state of certain
|
// TODO: we could make a new function that masks out the state of certain
|
||||||
// UID's, but at the moment the new Timer code has obsoleted the need...
|
// UID's, but at the moment the new Timer code has obsoleted the need...
|
||||||
|
|
||||||
// Converger is the general interface for implementing a convergence watcher
|
// Converger is the general interface for implementing a convergence watcher.
|
||||||
type Converger interface { // TODO: need a better name
|
type Converger interface { // TODO: need a better name
|
||||||
Register() ConvergerUID
|
Register() UID
|
||||||
IsConverged(ConvergerUID) bool // is the UID converged ?
|
IsConverged(UID) bool // is the UID converged ?
|
||||||
SetConverged(ConvergerUID, bool) error // set the converged state of the UID
|
SetConverged(UID, bool) error // set the converged state of the UID
|
||||||
Unregister(ConvergerUID)
|
Unregister(UID)
|
||||||
Start()
|
Start()
|
||||||
Pause()
|
Pause()
|
||||||
Loop(bool)
|
Loop(bool)
|
||||||
ConvergedTimer(ConvergerUID) <-chan time.Time
|
ConvergedTimer(UID) <-chan time.Time
|
||||||
Status() map[uint64]bool
|
Status() map[uint64]bool
|
||||||
Timeout() int // returns the timeout that this was created with
|
Timeout() int // returns the timeout that this was created with
|
||||||
SetStateFn(func(bool) error) // sets the stateFn
|
SetStateFn(func(bool) error) // sets the stateFn
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConvergerUID is the interface resources can use to notify with if converged
|
// UID is the interface resources can use to notify with if converged. You'll
|
||||||
// you'll need to use part of the Converger interface to Register initially too
|
// need to use part of the Converger interface to Register initially too.
|
||||||
type ConvergerUID interface {
|
type UID interface {
|
||||||
ID() uint64 // get Id
|
ID() uint64 // get Id
|
||||||
Name() string // get a friendly name
|
Name() string // get a friendly name
|
||||||
SetName(string)
|
SetName(string)
|
||||||
@@ -61,7 +61,7 @@ type ConvergerUID interface {
|
|||||||
StopTimer() error
|
StopTimer() error
|
||||||
}
|
}
|
||||||
|
|
||||||
// converger is an implementation of the Converger interface
|
// converger is an implementation of the Converger interface.
|
||||||
type converger struct {
|
type converger struct {
|
||||||
timeout int // must be zero (instant) or greater seconds to run
|
timeout int // must be zero (instant) or greater seconds to run
|
||||||
stateFn func(bool) error // run on converged state changes with state bool
|
stateFn func(bool) error // run on converged state changes with state bool
|
||||||
@@ -73,17 +73,18 @@ type converger struct {
|
|||||||
status map[uint64]bool
|
status map[uint64]bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// convergerUID is an implementation of the ConvergerUID interface
|
// cuid is an implementation of the UID interface.
|
||||||
type convergerUID struct {
|
type cuid struct {
|
||||||
converger Converger
|
converger Converger
|
||||||
id uint64
|
id uint64
|
||||||
name string // user defined, friendly name
|
name string // user defined, friendly name
|
||||||
mutex sync.Mutex
|
mutex sync.Mutex
|
||||||
timer chan struct{}
|
timer chan struct{}
|
||||||
running bool // is the above timer running?
|
running bool // is the above timer running?
|
||||||
|
wg sync.WaitGroup
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewConverger builds a new converger struct
|
// NewConverger builds a new converger struct.
|
||||||
func NewConverger(timeout int, stateFn func(bool) error) *converger {
|
func NewConverger(timeout int, stateFn func(bool) error) *converger {
|
||||||
return &converger{
|
return &converger{
|
||||||
timeout: timeout,
|
timeout: timeout,
|
||||||
@@ -95,13 +96,13 @@ func NewConverger(timeout int, stateFn func(bool) error) *converger {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register assigns a ConvergerUID to the caller
|
// Register assigns a UID to the caller.
|
||||||
func (obj *converger) Register() ConvergerUID {
|
func (obj *converger) Register() UID {
|
||||||
obj.mutex.Lock()
|
obj.mutex.Lock()
|
||||||
defer obj.mutex.Unlock()
|
defer obj.mutex.Unlock()
|
||||||
obj.lastid++
|
obj.lastid++
|
||||||
obj.status[obj.lastid] = false // initialize as not converged
|
obj.status[obj.lastid] = false // initialize as not converged
|
||||||
return &convergerUID{
|
return &cuid{
|
||||||
converger: obj,
|
converger: obj,
|
||||||
id: obj.lastid,
|
id: obj.lastid,
|
||||||
name: fmt.Sprintf("%d", obj.lastid), // some default
|
name: fmt.Sprintf("%d", obj.lastid), // some default
|
||||||
@@ -110,28 +111,28 @@ func (obj *converger) Register() ConvergerUID {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsConverged gets the converged status of a uid
|
// IsConverged gets the converged status of a uid.
|
||||||
func (obj *converger) IsConverged(uid ConvergerUID) bool {
|
func (obj *converger) IsConverged(uid UID) bool {
|
||||||
if !uid.IsValid() {
|
if !uid.IsValid() {
|
||||||
panic(fmt.Sprintf("Id of ConvergerUID(%s) is nil!", uid.Name()))
|
panic(fmt.Sprintf("the ID of UID(%s) is nil", uid.Name()))
|
||||||
}
|
}
|
||||||
obj.mutex.RLock()
|
obj.mutex.RLock()
|
||||||
isConverged, found := obj.status[uid.ID()] // lookup
|
isConverged, found := obj.status[uid.ID()] // lookup
|
||||||
obj.mutex.RUnlock()
|
obj.mutex.RUnlock()
|
||||||
if !found {
|
if !found {
|
||||||
panic("Id of ConvergerUID is unregistered!")
|
panic("the ID of UID is unregistered")
|
||||||
}
|
}
|
||||||
return isConverged
|
return isConverged
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetConverged updates the converger with the converged state of the UID
|
// SetConverged updates the converger with the converged state of the UID.
|
||||||
func (obj *converger) SetConverged(uid ConvergerUID, isConverged bool) error {
|
func (obj *converger) SetConverged(uid UID, isConverged bool) error {
|
||||||
if !uid.IsValid() {
|
if !uid.IsValid() {
|
||||||
return fmt.Errorf("Id of ConvergerUID(%s) is nil!", uid.Name())
|
return fmt.Errorf("the ID of UID(%s) is nil", uid.Name())
|
||||||
}
|
}
|
||||||
obj.mutex.Lock()
|
obj.mutex.Lock()
|
||||||
if _, found := obj.status[uid.ID()]; !found {
|
if _, found := obj.status[uid.ID()]; !found {
|
||||||
panic("Id of ConvergerUID is unregistered!")
|
panic("the ID of UID is unregistered")
|
||||||
}
|
}
|
||||||
obj.status[uid.ID()] = isConverged // set
|
obj.status[uid.ID()] = isConverged // set
|
||||||
obj.mutex.Unlock() // unlock *before* poke or deadlock!
|
obj.mutex.Unlock() // unlock *before* poke or deadlock!
|
||||||
@@ -143,7 +144,7 @@ func (obj *converger) SetConverged(uid ConvergerUID, isConverged bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// isConverged returns true if *every* registered uid has converged
|
// isConverged returns true if *every* registered uid has converged.
|
||||||
func (obj *converger) isConverged() bool {
|
func (obj *converger) isConverged() bool {
|
||||||
obj.mutex.RLock() // take a read lock
|
obj.mutex.RLock() // take a read lock
|
||||||
defer obj.mutex.RUnlock()
|
defer obj.mutex.RUnlock()
|
||||||
@@ -155,10 +156,10 @@ func (obj *converger) isConverged() bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unregister dissociates the ConvergedUID from the converged checking
|
// Unregister dissociates the ConvergedUID from the converged checking.
|
||||||
func (obj *converger) Unregister(uid ConvergerUID) {
|
func (obj *converger) Unregister(uid UID) {
|
||||||
if !uid.IsValid() {
|
if !uid.IsValid() {
|
||||||
panic(fmt.Sprintf("Id of ConvergerUID(%s) is nil!", uid.Name()))
|
panic(fmt.Sprintf("the ID of UID(%s) is nil", uid.Name()))
|
||||||
}
|
}
|
||||||
obj.mutex.Lock()
|
obj.mutex.Lock()
|
||||||
uid.StopTimer() // ignore any errors
|
uid.StopTimer() // ignore any errors
|
||||||
@@ -167,30 +168,30 @@ func (obj *converger) Unregister(uid ConvergerUID) {
|
|||||||
uid.InvalidateID()
|
uid.InvalidateID()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start causes a Converger object to start or resume running
|
// Start causes a Converger object to start or resume running.
|
||||||
func (obj *converger) Start() {
|
func (obj *converger) Start() {
|
||||||
obj.control <- true
|
obj.control <- true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pause causes a Converger object to stop running temporarily
|
// Pause causes a Converger object to stop running temporarily.
|
||||||
func (obj *converger) Pause() { // FIXME: add a sync ACK on pause before return
|
func (obj *converger) Pause() { // FIXME: add a sync ACK on pause before return
|
||||||
obj.control <- false
|
obj.control <- false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Loop is the main loop for a Converger object; it usually runs in a goroutine
|
// Loop is the main loop for a Converger object. It usually runs in a goroutine.
|
||||||
// TODO: we could eventually have each resource tell us as soon as it converges
|
// TODO: we could eventually have each resource tell us as soon as it converges,
|
||||||
// and then keep track of the time delays here, to avoid callers needing select
|
// and then keep track of the time delays here, to avoid callers needing select.
|
||||||
// NOTE: when we have very short timeouts, if we start before all the resources
|
// NOTE: when we have very short timeouts, if we start before all the resources
|
||||||
// have joined the map, then it might appears as if we converged before we did!
|
// have joined the map, then it might appear as if we converged before we did!
|
||||||
func (obj *converger) Loop(startPaused bool) {
|
func (obj *converger) Loop(startPaused bool) {
|
||||||
if obj.control == nil {
|
if obj.control == nil {
|
||||||
panic("Converger not initialized correctly")
|
panic("converger not initialized correctly")
|
||||||
}
|
}
|
||||||
if startPaused { // start paused without racing
|
if startPaused { // start paused without racing
|
||||||
select {
|
select {
|
||||||
case e := <-obj.control:
|
case e := <-obj.control:
|
||||||
if !e {
|
if !e {
|
||||||
panic("Converger expected true!")
|
panic("converger expected true")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -198,13 +199,13 @@ func (obj *converger) Loop(startPaused bool) {
|
|||||||
select {
|
select {
|
||||||
case e := <-obj.control: // expecting "false" which means pause!
|
case e := <-obj.control: // expecting "false" which means pause!
|
||||||
if e {
|
if e {
|
||||||
panic("Converger expected false!")
|
panic("converger expected false")
|
||||||
}
|
}
|
||||||
// now i'm paused...
|
// now i'm paused...
|
||||||
select {
|
select {
|
||||||
case e := <-obj.control:
|
case e := <-obj.control:
|
||||||
if !e {
|
if !e {
|
||||||
panic("Converger expected true!")
|
panic("converger expected true")
|
||||||
}
|
}
|
||||||
// restart
|
// restart
|
||||||
// kick once to refresh the check...
|
// kick once to refresh the check...
|
||||||
@@ -243,9 +244,9 @@ func (obj *converger) Loop(startPaused bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConvergedTimer adds a timeout to a select call and blocks until then
|
// ConvergedTimer adds a timeout to a select call and blocks until then.
|
||||||
// TODO: this means we could eventually have per resource converged timeouts
|
// TODO: this means we could eventually have per resource converged timeouts
|
||||||
func (obj *converger) ConvergedTimer(uid ConvergerUID) <-chan time.Time {
|
func (obj *converger) ConvergedTimer(uid UID) <-chan time.Time {
|
||||||
// be clever: if i'm already converged, this timeout should block which
|
// be clever: if i'm already converged, this timeout should block which
|
||||||
// avoids unnecessary new signals being sent! this avoids fast loops if
|
// avoids unnecessary new signals being sent! this avoids fast loops if
|
||||||
// we have a low timeout, or in particular a timeout == 0
|
// we have a low timeout, or in particular a timeout == 0
|
||||||
@@ -279,63 +280,65 @@ func (obj *converger) SetStateFn(stateFn func(bool) error) {
|
|||||||
obj.stateFn = stateFn
|
obj.stateFn = stateFn
|
||||||
}
|
}
|
||||||
|
|
||||||
// Id returns the unique id of this UID object
|
// ID returns the unique id of this UID object.
|
||||||
func (obj *convergerUID) ID() uint64 {
|
func (obj *cuid) ID() uint64 {
|
||||||
return obj.id
|
return obj.id
|
||||||
}
|
}
|
||||||
|
|
||||||
// Name returns a user defined name for the specific convergerUID.
|
// Name returns a user defined name for the specific cuid.
|
||||||
func (obj *convergerUID) Name() string {
|
func (obj *cuid) Name() string {
|
||||||
return obj.name
|
return obj.name
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetName sets a user defined name for the specific convergerUID.
|
// SetName sets a user defined name for the specific cuid.
|
||||||
func (obj *convergerUID) SetName(name string) {
|
func (obj *cuid) SetName(name string) {
|
||||||
obj.name = name
|
obj.name = name
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsValid tells us if the id is valid or has already been destroyed
|
// IsValid tells us if the id is valid or has already been destroyed.
|
||||||
func (obj *convergerUID) IsValid() bool {
|
func (obj *cuid) IsValid() bool {
|
||||||
return obj.id != 0 // an id of 0 is invalid
|
return obj.id != 0 // an id of 0 is invalid
|
||||||
}
|
}
|
||||||
|
|
||||||
// InvalidateID marks the id as no longer valid
|
// InvalidateID marks the id as no longer valid.
|
||||||
func (obj *convergerUID) InvalidateID() {
|
func (obj *cuid) InvalidateID() {
|
||||||
obj.id = 0 // an id of 0 is invalid
|
obj.id = 0 // an id of 0 is invalid
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsConverged is a helper function to the regular IsConverged method
|
// IsConverged is a helper function to the regular IsConverged method.
|
||||||
func (obj *convergerUID) IsConverged() bool {
|
func (obj *cuid) IsConverged() bool {
|
||||||
return obj.converger.IsConverged(obj)
|
return obj.converger.IsConverged(obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetConverged is a helper function to the regular SetConverged notification
|
// SetConverged is a helper function to the regular SetConverged notification.
|
||||||
func (obj *convergerUID) SetConverged(isConverged bool) error {
|
func (obj *cuid) SetConverged(isConverged bool) error {
|
||||||
return obj.converger.SetConverged(obj, isConverged)
|
return obj.converger.SetConverged(obj, isConverged)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unregister is a helper function to unregister myself
|
// Unregister is a helper function to unregister myself.
|
||||||
func (obj *convergerUID) Unregister() {
|
func (obj *cuid) Unregister() {
|
||||||
obj.converger.Unregister(obj)
|
obj.converger.Unregister(obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConvergedTimer is a helper around the regular ConvergedTimer method
|
// ConvergedTimer is a helper around the regular ConvergedTimer method.
|
||||||
func (obj *convergerUID) ConvergedTimer() <-chan time.Time {
|
func (obj *cuid) ConvergedTimer() <-chan time.Time {
|
||||||
return obj.converger.ConvergedTimer(obj)
|
return obj.converger.ConvergedTimer(obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartTimer runs an invisible timer that automatically converges on timeout.
|
// StartTimer runs an invisible timer that automatically converges on timeout.
|
||||||
func (obj *convergerUID) StartTimer() (func() error, error) {
|
func (obj *cuid) StartTimer() (func() error, error) {
|
||||||
obj.mutex.Lock()
|
obj.mutex.Lock()
|
||||||
if !obj.running {
|
if !obj.running {
|
||||||
obj.timer = make(chan struct{})
|
obj.timer = make(chan struct{})
|
||||||
obj.running = true
|
obj.running = true
|
||||||
} else {
|
} else {
|
||||||
obj.mutex.Unlock()
|
obj.mutex.Unlock()
|
||||||
return obj.StopTimer, fmt.Errorf("Timer already started!")
|
return obj.StopTimer, fmt.Errorf("timer already started")
|
||||||
}
|
}
|
||||||
obj.mutex.Unlock()
|
obj.mutex.Unlock()
|
||||||
|
obj.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
|
defer obj.wg.Done()
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case _, ok := <-obj.timer: // reset signal channel
|
case _, ok := <-obj.timer: // reset signal channel
|
||||||
@@ -359,24 +362,25 @@ func (obj *convergerUID) StartTimer() (func() error, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ResetTimer resets the counter to zero if using a StartTimer internally.
|
// ResetTimer resets the counter to zero if using a StartTimer internally.
|
||||||
func (obj *convergerUID) ResetTimer() error {
|
func (obj *cuid) ResetTimer() error {
|
||||||
obj.mutex.Lock()
|
obj.mutex.Lock()
|
||||||
defer obj.mutex.Unlock()
|
defer obj.mutex.Unlock()
|
||||||
if obj.running {
|
if obj.running {
|
||||||
obj.timer <- struct{}{} // send the reset message
|
obj.timer <- struct{}{} // send the reset message
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fmt.Errorf("Timer hasn't been started!")
|
return fmt.Errorf("timer hasn't been started")
|
||||||
}
|
}
|
||||||
|
|
||||||
// StopTimer stops the running timer permanently until a StartTimer is run.
|
// StopTimer stops the running timer permanently until a StartTimer is run.
|
||||||
func (obj *convergerUID) StopTimer() error {
|
func (obj *cuid) StopTimer() error {
|
||||||
obj.mutex.Lock()
|
obj.mutex.Lock()
|
||||||
defer obj.mutex.Unlock()
|
defer obj.mutex.Unlock()
|
||||||
if !obj.running {
|
if !obj.running {
|
||||||
return fmt.Errorf("Timer isn't running!")
|
return fmt.Errorf("timer isn't running")
|
||||||
}
|
}
|
||||||
close(obj.timer)
|
close(obj.timer)
|
||||||
|
obj.wg.Wait()
|
||||||
obj.running = false
|
obj.running = false
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
2
doc.go
2
doc.go
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
|||||||
2
docs/.gitignore
vendored
Normal file
2
docs/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
mgmt-documentation.pdf
|
||||||
|
_build
|
||||||
20
docs/Makefile
Normal file
20
docs/Makefile
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# Minimal makefile for Sphinx documentation
|
||||||
|
#
|
||||||
|
|
||||||
|
# You can set these variables from the command line.
|
||||||
|
SPHINXOPTS =
|
||||||
|
SPHINXBUILD = sphinx-build
|
||||||
|
SPHINXPROJ = mgmt
|
||||||
|
SOURCEDIR = .
|
||||||
|
BUILDDIR = _build
|
||||||
|
|
||||||
|
# Put it first so that "make" without argument is like "make help".
|
||||||
|
help:
|
||||||
|
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||||
|
|
||||||
|
.PHONY: help Makefile
|
||||||
|
|
||||||
|
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||||
|
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||||
|
%: Makefile
|
||||||
|
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||||
158
docs/conf.py
Normal file
158
docs/conf.py
Normal file
@@ -0,0 +1,158 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# mgmt documentation build configuration file, created by
|
||||||
|
# sphinx-quickstart on Wed Feb 15 21:34:09 2017.
|
||||||
|
#
|
||||||
|
# This file is execfile()d with the current directory set to its
|
||||||
|
# containing dir.
|
||||||
|
#
|
||||||
|
# Note that not all possible configuration values are present in this
|
||||||
|
# autogenerated file.
|
||||||
|
#
|
||||||
|
# All configuration values have a default; values that are commented out
|
||||||
|
# serve to show the default.
|
||||||
|
|
||||||
|
# If extensions (or modules to document with autodoc) are in another directory,
|
||||||
|
# add these directories to sys.path here. If the directory is relative to the
|
||||||
|
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||||
|
#
|
||||||
|
# import os
|
||||||
|
# import sys
|
||||||
|
# sys.path.insert(0, os.path.abspath('.'))
|
||||||
|
|
||||||
|
from recommonmark.parser import CommonMarkParser
|
||||||
|
|
||||||
|
# -- General configuration ------------------------------------------------
|
||||||
|
|
||||||
|
# If your documentation needs a minimal Sphinx version, state it here.
|
||||||
|
#
|
||||||
|
# needs_sphinx = '1.0'
|
||||||
|
|
||||||
|
# Add any Sphinx extension module names here, as strings. They can be
|
||||||
|
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||||
|
# ones.
|
||||||
|
extensions = []
|
||||||
|
|
||||||
|
# Add any paths that contain templates here, relative to this directory.
|
||||||
|
templates_path = ['_templates']
|
||||||
|
|
||||||
|
# The suffix(es) of source filenames.
|
||||||
|
# You can specify multiple suffix as a list of string:
|
||||||
|
#
|
||||||
|
|
||||||
|
source_parsers = {
|
||||||
|
'.md': CommonMarkParser,
|
||||||
|
}
|
||||||
|
|
||||||
|
source_suffix = ['.rst', '.md']
|
||||||
|
|
||||||
|
# The master toctree document.
|
||||||
|
master_doc = 'index'
|
||||||
|
|
||||||
|
# General information about the project.
|
||||||
|
project = u'mgmt'
|
||||||
|
copyright = u'2013-2017+ James Shubin and the project contributors'
|
||||||
|
author = u'James Shubin'
|
||||||
|
|
||||||
|
# The version info for the project you're documenting, acts as replacement for
|
||||||
|
# |version| and |release|, also used in various other places throughout the
|
||||||
|
# built documents.
|
||||||
|
#
|
||||||
|
# The short X.Y version.
|
||||||
|
version = u''
|
||||||
|
# The full version, including alpha/beta/rc tags.
|
||||||
|
release = u''
|
||||||
|
|
||||||
|
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||||
|
# for a list of supported languages.
|
||||||
|
#
|
||||||
|
# This is also used if you do content translation via gettext catalogs.
|
||||||
|
# Usually you set "language" from the command line for these cases.
|
||||||
|
language = None
|
||||||
|
|
||||||
|
# List of patterns, relative to source directory, that match files and
|
||||||
|
# directories to ignore when looking for source files.
|
||||||
|
# This patterns also effect to html_static_path and html_extra_path
|
||||||
|
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'venv']
|
||||||
|
|
||||||
|
# The name of the Pygments (syntax highlighting) style to use.
|
||||||
|
pygments_style = 'sphinx'
|
||||||
|
|
||||||
|
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||||
|
todo_include_todos = False
|
||||||
|
|
||||||
|
|
||||||
|
# -- Options for HTML output ----------------------------------------------
|
||||||
|
|
||||||
|
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||||
|
# a list of builtin themes.
|
||||||
|
#
|
||||||
|
#html_theme = 'alabaster'
|
||||||
|
|
||||||
|
# Theme options are theme-specific and customize the look and feel of a theme
|
||||||
|
# further. For a list of options available for each theme, see the
|
||||||
|
# documentation.
|
||||||
|
#
|
||||||
|
# html_theme_options = {}
|
||||||
|
|
||||||
|
# Add any paths that contain custom static files (such as style sheets) here,
|
||||||
|
# relative to this directory. They are copied after the builtin static files,
|
||||||
|
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||||
|
html_static_path = ['_static']
|
||||||
|
|
||||||
|
|
||||||
|
# -- Options for HTMLHelp output ------------------------------------------
|
||||||
|
|
||||||
|
# Output file base name for HTML help builder.
|
||||||
|
htmlhelp_basename = 'mgmtdoc'
|
||||||
|
|
||||||
|
|
||||||
|
# -- Options for LaTeX output ---------------------------------------------
|
||||||
|
|
||||||
|
latex_elements = {
|
||||||
|
# The paper size ('letterpaper' or 'a4paper').
|
||||||
|
#
|
||||||
|
# 'papersize': 'letterpaper',
|
||||||
|
|
||||||
|
# The font size ('10pt', '11pt' or '12pt').
|
||||||
|
#
|
||||||
|
# 'pointsize': '10pt',
|
||||||
|
|
||||||
|
# Additional stuff for the LaTeX preamble.
|
||||||
|
#
|
||||||
|
# 'preamble': '',
|
||||||
|
|
||||||
|
# Latex figure (float) alignment
|
||||||
|
#
|
||||||
|
# 'figure_align': 'htbp',
|
||||||
|
}
|
||||||
|
|
||||||
|
# Grouping the document tree into LaTeX files. List of tuples
|
||||||
|
# (source start file, target name, title,
|
||||||
|
# author, documentclass [howto, manual, or own class]).
|
||||||
|
latex_documents = [
|
||||||
|
(master_doc, 'mgmt.tex', u'mgmt Documentation',
|
||||||
|
u'James Shubin', 'manual'),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
# -- Options for manual page output ---------------------------------------
|
||||||
|
|
||||||
|
# One entry per manual page. List of tuples
|
||||||
|
# (source start file, name, description, authors, manual section).
|
||||||
|
man_pages = [
|
||||||
|
(master_doc, 'mgmt', u'mgmt Documentation',
|
||||||
|
[author], 1)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
# -- Options for Texinfo output -------------------------------------------
|
||||||
|
|
||||||
|
# Grouping the document tree into Texinfo files. List of tuples
|
||||||
|
# (source start file, target name, title, author,
|
||||||
|
# dir menu entry, description, category)
|
||||||
|
texinfo_documents = [
|
||||||
|
(master_doc, 'mgmt', u'mgmt Documentation',
|
||||||
|
author, 'mgmt', 'A next generation config management prototype!',
|
||||||
|
'Miscellaneous'),
|
||||||
|
]
|
||||||
@@ -1,57 +1,16 @@
|
|||||||
#mgmt
|
# mgmt
|
||||||
|
|
||||||
<!--
|
Available from:
|
||||||
Mgmt
|
[https://github.com/purpleidea/mgmt/](https://github.com/purpleidea/mgmt/)
|
||||||
Copyright (C) 2013-2016+ James Shubin and the project contributors
|
|
||||||
Written by James Shubin <james@shubin.ca> and the project contributors
|
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
This documentation is available in: [Markdown](https://github.com/purpleidea/mgmt/blob/master/docs/documentation.md) or [PDF](https://pdfdoc-purpleidea.rhcloud.com/pdf/https://github.com/purpleidea/mgmt/blob/master/docs/documentation.md) format.
|
||||||
it under the terms of the GNU Affero General Public License as published by
|
|
||||||
the Free Software Foundation, either version 3 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
## Overview
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU Affero General Public License
|
|
||||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
-->
|
|
||||||
|
|
||||||
##mgmt by [James](https://ttboj.wordpress.com/)
|
|
||||||
####Available from:
|
|
||||||
####[https://github.com/purpleidea/mgmt/](https://github.com/purpleidea/mgmt/)
|
|
||||||
|
|
||||||
####This documentation is available in: [Markdown](https://github.com/purpleidea/mgmt/blob/master/DOCUMENTATION.md) or [PDF](https://pdfdoc-purpleidea.rhcloud.com/pdf/https://github.com/purpleidea/mgmt/blob/master/DOCUMENTATION.md) format.
|
|
||||||
|
|
||||||
####Table of Contents
|
|
||||||
|
|
||||||
1. [Overview](#overview)
|
|
||||||
2. [Project description - What the project does](#project-description)
|
|
||||||
3. [Setup - Getting started with mgmt](#setup)
|
|
||||||
4. [Features - All things mgmt can do](#features)
|
|
||||||
* [Autoedges - Automatic resource relationships](#autoedges)
|
|
||||||
* [Autogrouping - Automatic resource grouping](#autogrouping)
|
|
||||||
* [Automatic clustering - Automatic cluster management](#automatic-clustering)
|
|
||||||
* [Remote mode - Remote "agent-less" execution](#remote-agent-less-mode)
|
|
||||||
* [Puppet support - write manifest code for mgmt](#puppet-support)
|
|
||||||
5. [Resources - All built-in primitives](#resources)
|
|
||||||
6. [Usage/FAQ - Notes on usage and frequently asked questions](#usage-and-frequently-asked-questions)
|
|
||||||
7. [Reference - Detailed reference](#reference)
|
|
||||||
* [Meta parameters](#meta-parameters)
|
|
||||||
* [Graph definition file](#graph-definition-file)
|
|
||||||
* [Command line](#command-line)
|
|
||||||
8. [Examples - Example configurations](#examples)
|
|
||||||
9. [Development - Background on module development and reporting bugs](#development)
|
|
||||||
10. [Authors - Authors and contact information](#authors)
|
|
||||||
|
|
||||||
##Overview
|
|
||||||
|
|
||||||
The `mgmt` tool is a next generation config management prototype. It's not yet
|
The `mgmt` tool is a next generation config management prototype. It's not yet
|
||||||
ready for production, but we hope to get there soon. Get involved today!
|
ready for production, but we hope to get there soon. Get involved today!
|
||||||
|
|
||||||
##Project Description
|
## Project Description
|
||||||
|
|
||||||
The mgmt tool is a distributed, event driven, config management tool, that
|
The mgmt tool is a distributed, event driven, config management tool, that
|
||||||
supports parallel execution, and librarification to be used as the management
|
supports parallel execution, and librarification to be used as the management
|
||||||
@@ -63,11 +22,14 @@ For more information, you may like to read some blog posts from the author:
|
|||||||
* [Automatic edges in mgmt](https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/)
|
* [Automatic edges in mgmt](https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/)
|
||||||
* [Automatic grouping in mgmt](https://ttboj.wordpress.com/2016/03/30/automatic-grouping-in-mgmt/)
|
* [Automatic grouping in mgmt](https://ttboj.wordpress.com/2016/03/30/automatic-grouping-in-mgmt/)
|
||||||
* [Automatic clustering in mgmt](https://ttboj.wordpress.com/2016/06/20/automatic-clustering-in-mgmt/)
|
* [Automatic clustering in mgmt](https://ttboj.wordpress.com/2016/06/20/automatic-clustering-in-mgmt/)
|
||||||
|
* [Remote execution in mgmt](https://ttboj.wordpress.com/2016/10/07/remote-execution-in-mgmt/)
|
||||||
|
* [Send/Recv in mgmt](https://ttboj.wordpress.com/2016/12/07/sendrecv-in-mgmt/)
|
||||||
|
* [Metaparameters in mgmt](https://ttboj.wordpress.com/2017/03/01/metaparameters-in-mgmt/)
|
||||||
|
|
||||||
There is also an [introductory video](http://meetings-archive.debian.net/pub/debian-meetings/2016/debconf16/Next_Generation_Config_Mgmt.webm) available.
|
There is also an [introductory video](http://meetings-archive.debian.net/pub/debian-meetings/2016/debconf16/Next_Generation_Config_Mgmt.webm) available.
|
||||||
Older videos and other material [is available](https://github.com/purpleidea/mgmt/#on-the-web).
|
Older videos and other material [is available](https://github.com/purpleidea/mgmt/#on-the-web).
|
||||||
|
|
||||||
##Setup
|
## Setup
|
||||||
|
|
||||||
During this prototype phase, the tool can be run out of the source directory.
|
During this prototype phase, the tool can be run out of the source directory.
|
||||||
You'll probably want to use ```./run.sh run --yaml examples/graph1.yaml``` to
|
You'll probably want to use ```./run.sh run --yaml examples/graph1.yaml``` to
|
||||||
@@ -75,12 +37,12 @@ get started. Beware that this _can_ cause data loss. Understand what you're
|
|||||||
doing first, or perform these actions in a virtual environment such as the one
|
doing first, or perform these actions in a virtual environment such as the one
|
||||||
provided by [Oh-My-Vagrant](https://github.com/purpleidea/oh-my-vagrant).
|
provided by [Oh-My-Vagrant](https://github.com/purpleidea/oh-my-vagrant).
|
||||||
|
|
||||||
##Features
|
## Features
|
||||||
|
|
||||||
This section details the numerous features of mgmt and some caveats you might
|
This section details the numerous features of mgmt and some caveats you might
|
||||||
need to be aware of.
|
need to be aware of.
|
||||||
|
|
||||||
###Autoedges
|
### Autoedges
|
||||||
|
|
||||||
Automatic edges, or AutoEdges, is the mechanism in mgmt by which it will
|
Automatic edges, or AutoEdges, is the mechanism in mgmt by which it will
|
||||||
automatically create dependencies for you between resources. For example,
|
automatically create dependencies for you between resources. For example,
|
||||||
@@ -89,7 +51,7 @@ automatically ensure that any file resource you declare that matches a
|
|||||||
file installed by your package resource will only be processed after the
|
file installed by your package resource will only be processed after the
|
||||||
package is installed.
|
package is installed.
|
||||||
|
|
||||||
####Controlling autoedges
|
#### Controlling autoedges
|
||||||
|
|
||||||
Though autoedges is likely to be very helpful and avoid you having to declare
|
Though autoedges is likely to be very helpful and avoid you having to declare
|
||||||
all dependencies explicitly, there are cases where this behaviour is
|
all dependencies explicitly, there are cases where this behaviour is
|
||||||
@@ -106,12 +68,12 @@ installation of the `mysql-server` package.
|
|||||||
You can disable autoedges for a resource by setting the `autoedge` key on
|
You can disable autoedges for a resource by setting the `autoedge` key on
|
||||||
the meta attributes of that resource to `false`.
|
the meta attributes of that resource to `false`.
|
||||||
|
|
||||||
####Blog post
|
#### Blog post
|
||||||
|
|
||||||
You can read the introductory blog post about this topic here:
|
You can read the introductory blog post about this topic here:
|
||||||
[https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/](https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/)
|
[https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/](https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/)
|
||||||
|
|
||||||
###Autogrouping
|
### Autogrouping
|
||||||
|
|
||||||
Automatic grouping or AutoGroup is the mechanism in mgmt by which it will
|
Automatic grouping or AutoGroup is the mechanism in mgmt by which it will
|
||||||
automatically group multiple resource vertices into a single one. This is
|
automatically group multiple resource vertices into a single one. This is
|
||||||
@@ -125,12 +87,12 @@ used for other use cases too.
|
|||||||
You can disable autogrouping for a resource by setting the `autogroup` key on
|
You can disable autogrouping for a resource by setting the `autogroup` key on
|
||||||
the meta attributes of that resource to `false`.
|
the meta attributes of that resource to `false`.
|
||||||
|
|
||||||
####Blog post
|
#### Blog post
|
||||||
|
|
||||||
You can read the introductory blog post about this topic here:
|
You can read the introductory blog post about this topic here:
|
||||||
[https://ttboj.wordpress.com/2016/03/30/automatic-grouping-in-mgmt/](https://ttboj.wordpress.com/2016/03/30/automatic-grouping-in-mgmt/)
|
[https://ttboj.wordpress.com/2016/03/30/automatic-grouping-in-mgmt/](https://ttboj.wordpress.com/2016/03/30/automatic-grouping-in-mgmt/)
|
||||||
|
|
||||||
###Automatic clustering
|
### Automatic clustering
|
||||||
|
|
||||||
Automatic clustering is a feature by which mgmt automatically builds, scales,
|
Automatic clustering is a feature by which mgmt automatically builds, scales,
|
||||||
and manages the embedded etcd cluster which is compiled into mgmt itself. It is
|
and manages the embedded etcd cluster which is compiled into mgmt itself. It is
|
||||||
@@ -141,12 +103,12 @@ If you prefer to avoid this feature. you can always opt to use an existing etcd
|
|||||||
cluster that is managed separately from mgmt by pointing your mgmt agents at it
|
cluster that is managed separately from mgmt by pointing your mgmt agents at it
|
||||||
with the `--seeds` variable.
|
with the `--seeds` variable.
|
||||||
|
|
||||||
####Blog post
|
#### Blog post
|
||||||
|
|
||||||
You can read the introductory blog post about this topic here:
|
You can read the introductory blog post about this topic here:
|
||||||
[https://ttboj.wordpress.com/2016/06/20/automatic-clustering-in-mgmt/](https://ttboj.wordpress.com/2016/06/20/automatic-clustering-in-mgmt/)
|
[https://ttboj.wordpress.com/2016/06/20/automatic-clustering-in-mgmt/](https://ttboj.wordpress.com/2016/06/20/automatic-clustering-in-mgmt/)
|
||||||
|
|
||||||
###Remote ("agent-less") mode
|
### Remote ("agent-less") mode
|
||||||
|
|
||||||
Remote mode is a special mode that lets you kick off mgmt runs on one or more
|
Remote mode is a special mode that lets you kick off mgmt runs on one or more
|
||||||
remote machines which are only accessible via SSH. In this mode the initiating
|
remote machines which are only accessible via SSH. In this mode the initiating
|
||||||
@@ -168,12 +130,12 @@ entire set of running mgmt agents will need to all simultaneously converge for
|
|||||||
the group to exit. This is particularly useful for bootstrapping new clusters
|
the group to exit. This is particularly useful for bootstrapping new clusters
|
||||||
which need to exchange information that is only available at run time.
|
which need to exchange information that is only available at run time.
|
||||||
|
|
||||||
####Blog post
|
#### Blog post
|
||||||
|
|
||||||
You can read the introductory blog post about this topic here:
|
You can read the introductory blog post about this topic here:
|
||||||
[https://ttboj.wordpress.com/2016/10/07/remote-execution-in-mgmt/](https://ttboj.wordpress.com/2016/10/07/remote-execution-in-mgmt/)
|
[https://ttboj.wordpress.com/2016/10/07/remote-execution-in-mgmt/](https://ttboj.wordpress.com/2016/10/07/remote-execution-in-mgmt/)
|
||||||
|
|
||||||
###Puppet support
|
### Puppet support
|
||||||
|
|
||||||
You can supply a Puppet manifest instead of creating the (YAML) graph manually.
|
You can supply a Puppet manifest instead of creating the (YAML) graph manually.
|
||||||
Puppet must be installed and in `mgmt`'s search path. You also need the
|
Puppet must be installed and in `mgmt`'s search path. You also need the
|
||||||
@@ -195,12 +157,12 @@ Invoke `mgmt` with the `--puppet` switch, which supports 3 variants:
|
|||||||
|
|
||||||
For more details and caveats see [Puppet.md](Puppet.md).
|
For more details and caveats see [Puppet.md](Puppet.md).
|
||||||
|
|
||||||
####Blog post
|
#### Blog post
|
||||||
|
|
||||||
An introductory post on the Puppet support is on
|
An introductory post on the Puppet support is on
|
||||||
[Felix's blog](http://ffrank.github.io/features/2016/06/19/puppet-powered-mgmt/).
|
[Felix's blog](http://ffrank.github.io/features/2016/06/19/puppet-powered-mgmt/).
|
||||||
|
|
||||||
##Resources
|
## Resources
|
||||||
|
|
||||||
This section lists all the built-in resources and their properties. The
|
This section lists all the built-in resources and their properties. The
|
||||||
resource primitives in `mgmt` are typically more powerful than resources in
|
resource primitives in `mgmt` are typically more powerful than resources in
|
||||||
@@ -216,9 +178,11 @@ meta parameters aren't very useful when combined with certain resources, but
|
|||||||
in general, it should be fairly obvious, such as when combining the `noop` meta
|
in general, it should be fairly obvious, such as when combining the `noop` meta
|
||||||
parameter with the [Noop](#Noop) resource.
|
parameter with the [Noop](#Noop) resource.
|
||||||
|
|
||||||
|
* [Augeas](#Augeas): Manipulate files using augeas.
|
||||||
* [Exec](#Exec): Execute shell commands on the system.
|
* [Exec](#Exec): Execute shell commands on the system.
|
||||||
* [File](#File): Manage files and directories.
|
* [File](#File): Manage files and directories.
|
||||||
* [Hostname](#Hostname): Manages the hostname on the system.
|
* [Hostname](#Hostname): Manages the hostname on the system.
|
||||||
|
* [KV](#KV): Set a key value pair in our shared world database.
|
||||||
* [Msg](#Msg): Send log messages.
|
* [Msg](#Msg): Send log messages.
|
||||||
* [Noop](#Noop): A simple resource that does nothing.
|
* [Noop](#Noop): A simple resource that does nothing.
|
||||||
* [Nspawn](#Nspawn): Manage systemd-machined nspawn containers.
|
* [Nspawn](#Nspawn): Manage systemd-machined nspawn containers.
|
||||||
@@ -228,45 +192,60 @@ parameter with the [Noop](#Noop) resource.
|
|||||||
* [Timer](#Timer): Manage system systemd services.
|
* [Timer](#Timer): Manage system systemd services.
|
||||||
* [Virt](#Virt): Manage virtual machines with libvirt.
|
* [Virt](#Virt): Manage virtual machines with libvirt.
|
||||||
|
|
||||||
###Exec
|
|
||||||
|
### Augeas
|
||||||
|
|
||||||
|
The augeas resource uses [augeas](http://augeas.net/) commands to manipulate
|
||||||
|
files.
|
||||||
|
|
||||||
|
### Exec
|
||||||
|
|
||||||
The exec resource can execute commands on your system.
|
The exec resource can execute commands on your system.
|
||||||
|
|
||||||
###File
|
### File
|
||||||
|
|
||||||
The file resource manages files and directories. In `mgmt`, directories are
|
The file resource manages files and directories. In `mgmt`, directories are
|
||||||
identified by a trailing slash in their path name. File have no such slash.
|
identified by a trailing slash in their path name. File have no such slash.
|
||||||
|
|
||||||
####Path
|
It has the following properties:
|
||||||
|
|
||||||
|
- `path`: file path (directories have a trailing slash here)
|
||||||
|
- `content`: raw file content
|
||||||
|
- `state`: either `exists` (the default value) or `absent`
|
||||||
|
- `mode`: octal unix file permissions
|
||||||
|
- `owner`: username or uid for the file owner
|
||||||
|
- `group`: group name or gid for the file group
|
||||||
|
|
||||||
|
#### Path
|
||||||
|
|
||||||
The path property specifies the file or directory that we are managing.
|
The path property specifies the file or directory that we are managing.
|
||||||
|
|
||||||
####Content
|
#### Content
|
||||||
|
|
||||||
The content property is a string that specifies the desired file contents.
|
The content property is a string that specifies the desired file contents.
|
||||||
|
|
||||||
####Source
|
#### Source
|
||||||
|
|
||||||
The source property points to a source file or directory path that we wish to
|
The source property points to a source file or directory path that we wish to
|
||||||
copy over and use as the desired contents for our resource.
|
copy over and use as the desired contents for our resource.
|
||||||
|
|
||||||
####State
|
#### State
|
||||||
|
|
||||||
The state property describes the action we'd like to apply for the resource. The
|
The state property describes the action we'd like to apply for the resource. The
|
||||||
possible values are: `exists` and `absent`.
|
possible values are: `exists` and `absent`.
|
||||||
|
|
||||||
####Recurse
|
#### Recurse
|
||||||
|
|
||||||
The recurse property limits whether file resource operations should recurse into
|
The recurse property limits whether file resource operations should recurse into
|
||||||
and monitor directory contents with a depth greater than one.
|
and monitor directory contents with a depth greater than one.
|
||||||
|
|
||||||
####Force
|
#### Force
|
||||||
|
|
||||||
The force property is required if we want the file resource to be able to change
|
The force property is required if we want the file resource to be able to change
|
||||||
a file into a directory or vice-versa. If such a change is needed, but the force
|
a file into a directory or vice-versa. If such a change is needed, but the force
|
||||||
property is not set to `true`, then this file resource will error.
|
property is not set to `true`, then this file resource will error.
|
||||||
|
|
||||||
###Hostname
|
### Hostname
|
||||||
|
|
||||||
The hostname resource manages static, transient/dynamic and pretty hostnames
|
The hostname resource manages static, transient/dynamic and pretty hostnames
|
||||||
on the system and watches them for changes.
|
on the system and watches them for changes.
|
||||||
@@ -290,55 +269,79 @@ The pretty hostname is a free-form UTF8 host name for presentation to the user.
|
|||||||
Hostname is the fallback value for all 3 fields above, if only `hostname` is
|
Hostname is the fallback value for all 3 fields above, if only `hostname` is
|
||||||
specified, it will set all 3 fields to this value.
|
specified, it will set all 3 fields to this value.
|
||||||
|
|
||||||
###Msg
|
### KV
|
||||||
|
|
||||||
|
The KV resource sets a key and value pair in the global world database. This is
|
||||||
|
quite useful for setting a flag after a number of resources have run. It will
|
||||||
|
ignore database updates to the value that are greater in compare order than the
|
||||||
|
requested key if the `SkipLessThan` parameter is set to true. If we receive a
|
||||||
|
refresh, then the stored value will be reset to the requested value even if the
|
||||||
|
stored value is greater.
|
||||||
|
|
||||||
|
#### Key
|
||||||
|
The string key used to store the key.
|
||||||
|
|
||||||
|
#### Value
|
||||||
|
The string value to set. This can also be set via Send/Recv.
|
||||||
|
|
||||||
|
#### SkipLessThan
|
||||||
|
If this parameter is set to `true`, then it will ignore updating the value as
|
||||||
|
long as the database versions are greater than the requested value. The compare
|
||||||
|
operation used is based on the `SkipCmpStyle` parameter.
|
||||||
|
|
||||||
|
#### SkipCmpStyle
|
||||||
|
By default this converts the string values to integers and compares them as you
|
||||||
|
would expect.
|
||||||
|
|
||||||
|
### Msg
|
||||||
|
|
||||||
The msg resource sends messages to the main log, or an external service such
|
The msg resource sends messages to the main log, or an external service such
|
||||||
as systemd's journal.
|
as systemd's journal.
|
||||||
|
|
||||||
###Noop
|
### Noop
|
||||||
|
|
||||||
The noop resource does absolutely nothing. It does have some utility in testing
|
The noop resource does absolutely nothing. It does have some utility in testing
|
||||||
`mgmt` and also as a placeholder in the resource graph.
|
`mgmt` and also as a placeholder in the resource graph.
|
||||||
|
|
||||||
###Nspawn
|
### Nspawn
|
||||||
|
|
||||||
The nspawn resource is used to manage systemd-machined style containers.
|
The nspawn resource is used to manage systemd-machined style containers.
|
||||||
|
|
||||||
###Password
|
### Password
|
||||||
|
|
||||||
The password resource can generate a random string to be used as a password. It
|
The password resource can generate a random string to be used as a password. It
|
||||||
will re-generate the password if it receives a refresh notification.
|
will re-generate the password if it receives a refresh notification.
|
||||||
|
|
||||||
###Pkg
|
### Pkg
|
||||||
|
|
||||||
The pkg resource is used to manage system packages. This resource works on many
|
The pkg resource is used to manage system packages. This resource works on many
|
||||||
different distributions because it uses the underlying packagekit facility which
|
different distributions because it uses the underlying packagekit facility which
|
||||||
supports different backends for different environments. This ensures that we
|
supports different backends for different environments. This ensures that we
|
||||||
have great Debian (deb/dpkg) and Fedora (rpm/dnf) support simultaneously.
|
have great Debian (deb/dpkg) and Fedora (rpm/dnf) support simultaneously.
|
||||||
|
|
||||||
###Svc
|
### Svc
|
||||||
|
|
||||||
The service resource is still very WIP. Please help us my improving it!
|
The service resource is still very WIP. Please help us my improving it!
|
||||||
|
|
||||||
###Timer
|
### Timer
|
||||||
|
|
||||||
This resource needs better documentation. Please help us my improving it!
|
This resource needs better documentation. Please help us my improving it!
|
||||||
|
|
||||||
###Virt
|
### Virt
|
||||||
|
|
||||||
The virt resource can manage virtual machines via libvirt.
|
The virt resource can manage virtual machines via libvirt.
|
||||||
|
|
||||||
##Usage and frequently asked questions
|
## Usage and frequently asked questions
|
||||||
(Send your questions as a patch to this FAQ! I'll review it, merge it, and
|
(Send your questions as a patch to this FAQ! I'll review it, merge it, and
|
||||||
respond by commit with the answer.)
|
respond by commit with the answer.)
|
||||||
|
|
||||||
###Why did you start this project?
|
### Why did you start this project?
|
||||||
|
|
||||||
I wanted a next generation config management solution that didn't have all of
|
I wanted a next generation config management solution that didn't have all of
|
||||||
the design flaws or limitations that the current generation of tools do, and no
|
the design flaws or limitations that the current generation of tools do, and no
|
||||||
tool existed!
|
tool existed!
|
||||||
|
|
||||||
###Why did you use etcd? What about consul?
|
### Why did you use etcd? What about consul?
|
||||||
|
|
||||||
Etcd and consul are both written in golang, which made them the top two
|
Etcd and consul are both written in golang, which made them the top two
|
||||||
contenders for my prototype. Ultimately a choice had to be made, and etcd was
|
contenders for my prototype. Ultimately a choice had to be made, and etcd was
|
||||||
@@ -346,7 +349,7 @@ chosen, but it was also somewhat arbitrary. If there is available interest,
|
|||||||
good reasoning, *and* patches, then we would consider either switching or
|
good reasoning, *and* patches, then we would consider either switching or
|
||||||
supporting both, but this is not a high priority at this time.
|
supporting both, but this is not a high priority at this time.
|
||||||
|
|
||||||
###Can I use an existing etcd cluster instead of the automatic embedded servers?
|
### Can I use an existing etcd cluster instead of the automatic embedded servers?
|
||||||
|
|
||||||
Yes, it's possible to use an existing etcd cluster instead of the automatic,
|
Yes, it's possible to use an existing etcd cluster instead of the automatic,
|
||||||
elastic embedded etcd servers. To do so, simply point to the cluster with the
|
elastic embedded etcd servers. To do so, simply point to the cluster with the
|
||||||
@@ -357,7 +360,7 @@ The downside to this approach is that you won't benefit from the automatic
|
|||||||
elastic nature of the embedded etcd servers, and that you're responsible if you
|
elastic nature of the embedded etcd servers, and that you're responsible if you
|
||||||
accidentally break your etcd cluster, or if you use an unsupported version.
|
accidentally break your etcd cluster, or if you use an unsupported version.
|
||||||
|
|
||||||
###What does the error message about an inconsistent dataDir mean?
|
### What does the error message about an inconsistent dataDir mean?
|
||||||
|
|
||||||
If you get an error message similar to:
|
If you get an error message similar to:
|
||||||
|
|
||||||
@@ -375,7 +378,7 @@ starting up, and as a result, a default endpoint never gets added. The solution
|
|||||||
is to either reconcile the mistake, and if there is no important data saved, you
|
is to either reconcile the mistake, and if there is no important data saved, you
|
||||||
can remove the etcd dataDir. This is typically `/var/lib/mgmt/etcd/member/`.
|
can remove the etcd dataDir. This is typically `/var/lib/mgmt/etcd/member/`.
|
||||||
|
|
||||||
###Why do resources have both a `Compare` method and an `IFF` (on the UID) method?
|
### Why do resources have both a `Compare` method and an `IFF` (on the UID) method?
|
||||||
|
|
||||||
The `Compare()` methods are for determining if two resources are effectively the
|
The `Compare()` methods are for determining if two resources are effectively the
|
||||||
same, which is used to make graph change delta's efficient. This is when we want
|
same, which is used to make graph change delta's efficient. This is when we want
|
||||||
@@ -391,14 +394,14 @@ equality. In the future it might be helpful or sane to merge the two similar
|
|||||||
comparison functions although for now they are separate because they are
|
comparison functions although for now they are separate because they are
|
||||||
actually answer different questions.
|
actually answer different questions.
|
||||||
|
|
||||||
###Did you know that there is a band named `MGMT`?
|
### Did you know that there is a band named `MGMT`?
|
||||||
|
|
||||||
I didn't realize this when naming the project, and it is accidental. After much
|
I didn't realize this when naming the project, and it is accidental. After much
|
||||||
anguishing, I chose the name because it was short and I thought it was
|
anguishing, I chose the name because it was short and I thought it was
|
||||||
appropriately descriptive. If you need a less ambiguous search term or phrase,
|
appropriately descriptive. If you need a less ambiguous search term or phrase,
|
||||||
you can try using `mgmtconfig` or `mgmt config`.
|
you can try using `mgmtconfig` or `mgmt config`.
|
||||||
|
|
||||||
###You didn't answer my question, or I have a question!
|
### You didn't answer my question, or I have a question!
|
||||||
|
|
||||||
It's best to ask on [IRC](https://webchat.freenode.net/?channels=#mgmtconfig)
|
It's best to ask on [IRC](https://webchat.freenode.net/?channels=#mgmtconfig)
|
||||||
to see if someone can help you. Once we get a big enough community going, we'll
|
to see if someone can help you. Once we get a big enough community going, we'll
|
||||||
@@ -408,40 +411,41 @@ and I'll do my best to help. If you have a good question, please add it as a
|
|||||||
patch to this documentation. I'll merge your question, and add a patch with the
|
patch to this documentation. I'll merge your question, and add a patch with the
|
||||||
answer!
|
answer!
|
||||||
|
|
||||||
##Reference
|
## Reference
|
||||||
Please note that there are a number of undocumented options. For more
|
Please note that there are a number of undocumented options. For more
|
||||||
information on these options, please view the source at:
|
information on these options, please view the source at:
|
||||||
[https://github.com/purpleidea/mgmt/](https://github.com/purpleidea/mgmt/).
|
[https://github.com/purpleidea/mgmt/](https://github.com/purpleidea/mgmt/).
|
||||||
If you feel that a well used option needs documenting here, please patch it!
|
If you feel that a well used option needs documenting here, please patch it!
|
||||||
|
|
||||||
###Overview of reference
|
### Overview of reference
|
||||||
* [Meta parameters](#meta-parameters): List of available resource meta parameters.
|
* [Meta parameters](#meta-parameters): List of available resource meta parameters.
|
||||||
* [Graph definition file](#graph-definition-file): Main graph definition file.
|
* [Graph definition file](#graph-definition-file): Main graph definition file.
|
||||||
* [Command line](#command-line): Command line parameters.
|
* [Command line](#command-line): Command line parameters.
|
||||||
|
* [Compilation options](#compilation-options): Compilation options.
|
||||||
|
|
||||||
###Meta parameters
|
### Meta parameters
|
||||||
These meta parameters are special parameters (or properties) which can apply to
|
These meta parameters are special parameters (or properties) which can apply to
|
||||||
any resource. The usefulness of doing so will depend on the particular meta
|
any resource. The usefulness of doing so will depend on the particular meta
|
||||||
parameter and resource combination.
|
parameter and resource combination.
|
||||||
|
|
||||||
####AutoEdge
|
#### AutoEdge
|
||||||
Boolean. Should we generate auto edges for this resource?
|
Boolean. Should we generate auto edges for this resource?
|
||||||
|
|
||||||
####AutoGroup
|
#### AutoGroup
|
||||||
Boolean. Should we attempt to automatically group this resource with others?
|
Boolean. Should we attempt to automatically group this resource with others?
|
||||||
|
|
||||||
####Noop
|
#### Noop
|
||||||
Boolean. Should the Apply portion of the CheckApply method of the resource
|
Boolean. Should the Apply portion of the CheckApply method of the resource
|
||||||
make any changes? Noop is a concatenation of no-operation.
|
make any changes? Noop is a concatenation of no-operation.
|
||||||
|
|
||||||
####Retry
|
#### Retry
|
||||||
Integer. The number of times to retry running the resource on error. Use -1 for
|
Integer. The number of times to retry running the resource on error. Use -1 for
|
||||||
infinite. This currently applies for both the Watch operation (which can fail)
|
infinite. This currently applies for both the Watch operation (which can fail)
|
||||||
and for the CheckApply operation. While they could have separate values, I've
|
and for the CheckApply operation. While they could have separate values, I've
|
||||||
decided to use the same ones for both until there's a proper reason to want to
|
decided to use the same ones for both until there's a proper reason to want to
|
||||||
do something differently for the Watch errors.
|
do something differently for the Watch errors.
|
||||||
|
|
||||||
####Delay
|
#### Delay
|
||||||
Integer. Number of milliseconds to wait between retries. The same value is
|
Integer. Number of milliseconds to wait between retries. The same value is
|
||||||
shared between the Watch and CheckApply retries. This currently applies for both
|
shared between the Watch and CheckApply retries. This currently applies for both
|
||||||
the Watch operation (which can fail) and for the CheckApply operation. While
|
the Watch operation (which can fail) and for the CheckApply operation. While
|
||||||
@@ -449,63 +453,113 @@ they could have separate values, I've decided to use the same ones for both
|
|||||||
until there's a proper reason to want to do something differently for the Watch
|
until there's a proper reason to want to do something differently for the Watch
|
||||||
errors.
|
errors.
|
||||||
|
|
||||||
###Graph definition file
|
#### Poll
|
||||||
|
Integer. Number of seconds to wait between `CheckApply` checks. If this is
|
||||||
|
greater than zero, then the standard event based `Watch` mechanism for this
|
||||||
|
resource is replaced with a simple polling mechanism. In general, this is not
|
||||||
|
recommended, unless you have a very good reason for doing so.
|
||||||
|
|
||||||
|
Please keep in mind that if you have a resource which changes every `I` seconds,
|
||||||
|
and you poll it every `J` seconds, and you've asked for a converged timeout of
|
||||||
|
`K` seconds, and `I <= J <= K`, then your graph will likely never converge.
|
||||||
|
|
||||||
|
When polling, the system detects that a resource is not converged if its
|
||||||
|
`CheckApply` method returns false. This allows a resource which changes every
|
||||||
|
`I` seconds, and which is polled every `J` seconds, and with a converged timeout
|
||||||
|
of `K` seconds to still converge when `J <= K`, as long as `I > J || I > K`,
|
||||||
|
which is another way of saying that if the resource finally settles down to give
|
||||||
|
the graph enough time, it can probably converge.
|
||||||
|
|
||||||
|
#### Limit
|
||||||
|
Float. Maximum rate of `CheckApply` runs started per second. Useful to limit
|
||||||
|
an especially _eventful_ process from causing excessive checks to run. This
|
||||||
|
defaults to `+Infinity` which adds no limiting. If you change this value, you
|
||||||
|
will also need to change the `Burst` value to a non-zero value. Please see the
|
||||||
|
[rate](https://godoc.org/golang.org/x/time/rate) package for more information.
|
||||||
|
|
||||||
|
#### Burst
|
||||||
|
Integer. Burst is the maximum number of runs which can happen without invoking
|
||||||
|
the rate limiter as designated by the `Limit` value. If the `Limit` is not set
|
||||||
|
to `+Infinity`, this must be a non-zero value. Please see the
|
||||||
|
[rate](https://godoc.org/golang.org/x/time/rate) package for more information.
|
||||||
|
|
||||||
|
#### Sema
|
||||||
|
List of string ids. Sema is a P/V style counting semaphore which can be used to
|
||||||
|
limit parallelism during the CheckApply phase of resource execution. Each
|
||||||
|
resource can have `N` different semaphores which share a graph global namespace.
|
||||||
|
Each semaphore has a maximum count associated with it. The default value of the
|
||||||
|
size is 1 (one) if size is unspecified. Each string id is the unique id of the
|
||||||
|
semaphore. If the id contains a trailing colon (:) followed by a positive
|
||||||
|
integer, then that value is the max size for that semaphore. Valid semaphore
|
||||||
|
id's include: `some_id`, `hello:42`, `not:smart:4` and `:13`. It is expected
|
||||||
|
that the last bare example be only used by the engine to add a global semaphore.
|
||||||
|
|
||||||
|
### Graph definition file
|
||||||
graph.yaml is the compiled graph definition file. The format is currently
|
graph.yaml is the compiled graph definition file. The format is currently
|
||||||
undocumented, but by looking through the [examples/](https://github.com/purpleidea/mgmt/tree/master/examples)
|
undocumented, but by looking through the [examples/](https://github.com/purpleidea/mgmt/tree/master/examples)
|
||||||
you can probably figure out most of it, as it's fairly intuitive.
|
you can probably figure out most of it, as it's fairly intuitive.
|
||||||
|
|
||||||
###Command line
|
### Command line
|
||||||
The main interface to the `mgmt` tool is the command line. For the most recent
|
The main interface to the `mgmt` tool is the command line. For the most recent
|
||||||
documentation, please run `mgmt --help`.
|
documentation, please run `mgmt --help`.
|
||||||
|
|
||||||
####`--yaml <graph.yaml>`
|
#### `--yaml <graph.yaml>`
|
||||||
Point to a graph file to run.
|
Point to a graph file to run.
|
||||||
|
|
||||||
####`--converged-timeout <seconds>`
|
#### `--converged-timeout <seconds>`
|
||||||
Exit if the machine has converged for approximately this many seconds.
|
Exit if the machine has converged for approximately this many seconds.
|
||||||
|
|
||||||
####`--max-runtime <seconds>`
|
#### `--max-runtime <seconds>`
|
||||||
Exit when the agent has run for approximately this many seconds. This is not
|
Exit when the agent has run for approximately this many seconds. This is not
|
||||||
generally recommended, but may be useful for users who know what they're doing.
|
generally recommended, but may be useful for users who know what they're doing.
|
||||||
|
|
||||||
####`--noop`
|
#### `--noop`
|
||||||
Globally force all resources into no-op mode. This also disables the export to
|
Globally force all resources into no-op mode. This also disables the export to
|
||||||
etcd functionality, but does not disable resource collection, however all
|
etcd functionality, but does not disable resource collection, however all
|
||||||
resources that are collected will have their individual noop settings set.
|
resources that are collected will have their individual noop settings set.
|
||||||
|
|
||||||
####`--remote <graph.yaml>`
|
#### `--sema <size>`
|
||||||
|
Globally add a counting semaphore of this size to each resource in the graph.
|
||||||
|
The semaphore will get given an id of `:size`. In other words if you specify a
|
||||||
|
size of 42, you can expect a semaphore if named: `:42`. It is expected that
|
||||||
|
consumers of the semaphore metaparameter always include a prefix to avoid a
|
||||||
|
collision with this globally defined semaphore. The size value must be greater
|
||||||
|
than zero at this time. The traditional non-parallel execution found in config
|
||||||
|
management tools such as `Puppet` can be obtained with `--sema 1`.
|
||||||
|
|
||||||
|
#### `--remote <graph.yaml>`
|
||||||
Point to a graph file to run on the remote host specified within. This parameter
|
Point to a graph file to run on the remote host specified within. This parameter
|
||||||
can be used multiple times if you'd like to remotely run on multiple hosts in
|
can be used multiple times if you'd like to remotely run on multiple hosts in
|
||||||
parallel.
|
parallel.
|
||||||
|
|
||||||
####`--allow-interactive`
|
#### `--allow-interactive`
|
||||||
Allow interactive prompting for SSH passwords if there is no authentication
|
Allow interactive prompting for SSH passwords if there is no authentication
|
||||||
method that works.
|
method that works.
|
||||||
|
|
||||||
####`--ssh-priv-id-rsa`
|
#### `--ssh-priv-id-rsa`
|
||||||
Specify the path for finding SSH keys. This defaults to `~/.ssh/id_rsa`. To
|
Specify the path for finding SSH keys. This defaults to `~/.ssh/id_rsa`. To
|
||||||
never use this method of authentication, set this to the empty string.
|
never use this method of authentication, set this to the empty string.
|
||||||
|
|
||||||
####`--cconns`
|
#### `--cconns`
|
||||||
The maximum number of concurrent remote ssh connections to run. This defaults
|
The maximum number of concurrent remote ssh connections to run. This defaults
|
||||||
to `0`, which means unlimited.
|
to `0`, which means unlimited.
|
||||||
|
|
||||||
####`--no-caching`
|
#### `--no-caching`
|
||||||
Don't allow remote caching of the remote execution binary. This will require
|
Don't allow remote caching of the remote execution binary. This will require
|
||||||
the binary to be copied over for every remote execution, but it limits the
|
the binary to be copied over for every remote execution, but it limits the
|
||||||
likelihood that there is leftover information from the configuration process.
|
likelihood that there is leftover information from the configuration process.
|
||||||
|
|
||||||
####`--prefix <path>`
|
#### `--prefix <path>`
|
||||||
Specify a path to a custom working directory prefix. This directory will get
|
Specify a path to a custom working directory prefix. This directory will get
|
||||||
created if it does not exist. This usually defaults to `/var/lib/mgmt/`. This
|
created if it does not exist. This usually defaults to `/var/lib/mgmt/`. This
|
||||||
can't be combined with the `--tmp-prefix` option. It can be combined with the
|
can't be combined with the `--tmp-prefix` option. It can be combined with the
|
||||||
`--allow-tmp-prefix` option.
|
`--allow-tmp-prefix` option.
|
||||||
|
|
||||||
####`--tmp-prefix`
|
#### `--tmp-prefix`
|
||||||
If this option is specified, a temporary prefix will be used instead of the
|
If this option is specified, a temporary prefix will be used instead of the
|
||||||
default prefix. This can't be combined with the `--prefix` option.
|
default prefix. This can't be combined with the `--prefix` option.
|
||||||
|
|
||||||
####`--allow-tmp-prefix`
|
#### `--allow-tmp-prefix`
|
||||||
If this option is specified, we will attempt to fall back to a temporary prefix
|
If this option is specified, we will attempt to fall back to a temporary prefix
|
||||||
if the primary prefix couldn't be created. This is useful for avoiding failures
|
if the primary prefix couldn't be created. This is useful for avoiding failures
|
||||||
in environments where the primary prefix may or may not be available, but you'd
|
in environments where the primary prefix may or may not be available, but you'd
|
||||||
@@ -513,7 +567,35 @@ like to try. The canonical example is when running `mgmt` with `--remote` there
|
|||||||
might be a cached copy of the binary in the primary prefix, but in case there's
|
might be a cached copy of the binary in the primary prefix, but in case there's
|
||||||
no binary available continue working in a temporary directory to avoid failure.
|
no binary available continue working in a temporary directory to avoid failure.
|
||||||
|
|
||||||
##Examples
|
### Compilation options
|
||||||
|
|
||||||
|
You can control some compilation variables by using environment variables.
|
||||||
|
|
||||||
|
#### Disable libvirt support
|
||||||
|
|
||||||
|
If you wish to compile mgmt without libvirt, you can use the following command:
|
||||||
|
|
||||||
|
```
|
||||||
|
GOTAGS=novirt make build
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Disable augeas support
|
||||||
|
|
||||||
|
If you wish to compile mgmt without augeas support, you can use the following command:
|
||||||
|
|
||||||
|
```
|
||||||
|
GOTAGS=noaugeas make build
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Combining compile-time flags
|
||||||
|
|
||||||
|
You can combine multiple tags by using a space-separated list:
|
||||||
|
|
||||||
|
```
|
||||||
|
GOTAGS="noaugeas novirt" make build
|
||||||
|
```
|
||||||
|
|
||||||
|
## Examples
|
||||||
For example configurations, please consult the [examples/](https://github.com/purpleidea/mgmt/tree/master/examples) directory in the git
|
For example configurations, please consult the [examples/](https://github.com/purpleidea/mgmt/tree/master/examples) directory in the git
|
||||||
source repository. It is available from:
|
source repository. It is available from:
|
||||||
|
|
||||||
@@ -541,7 +623,7 @@ EOF
|
|||||||
sudo systemctl daemon-reload
|
sudo systemctl daemon-reload
|
||||||
```
|
```
|
||||||
|
|
||||||
##Development
|
## Development
|
||||||
|
|
||||||
This is a project that I started in my free time in 2013. Development is driven
|
This is a project that I started in my free time in 2013. Development is driven
|
||||||
by all of our collective patches! Dive right in, and start hacking!
|
by all of our collective patches! Dive right in, and start hacking!
|
||||||
@@ -551,9 +633,9 @@ You can follow along [on my technical blog](https://ttboj.wordpress.com/).
|
|||||||
|
|
||||||
To report any bugs, please file a ticket at: [https://github.com/purpleidea/mgmt/issues](https://github.com/purpleidea/mgmt/issues).
|
To report any bugs, please file a ticket at: [https://github.com/purpleidea/mgmt/issues](https://github.com/purpleidea/mgmt/issues).
|
||||||
|
|
||||||
##Authors
|
## Authors
|
||||||
|
|
||||||
Copyright (C) 2013-2016+ James Shubin and the project contributors
|
Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
|
|
||||||
Please see the
|
Please see the
|
||||||
[AUTHORS](https://github.com/purpleidea/mgmt/tree/master/AUTHORS) file
|
[AUTHORS](https://github.com/purpleidea/mgmt/tree/master/AUTHORS) file
|
||||||
17
docs/index.rst
Normal file
17
docs/index.rst
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
.. mgmt documentation master file, created by
|
||||||
|
sphinx-quickstart on Wed Feb 15 21:34:09 2017.
|
||||||
|
You can adapt this file completely to your liking, but it should at least
|
||||||
|
contain the root `toctree` directive.
|
||||||
|
|
||||||
|
Welcome to mgmt's documentation!
|
||||||
|
================================
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 2
|
||||||
|
:caption: Contents:
|
||||||
|
|
||||||
|
documentation
|
||||||
|
quick-start-guide
|
||||||
|
resource-guide
|
||||||
|
prometheus
|
||||||
|
puppet-guide
|
||||||
66
docs/prometheus.md
Normal file
66
docs/prometheus.md
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
# Prometheus support
|
||||||
|
|
||||||
|
Mgmt comes with a built-in prometheus support. It is disabled by default, and
|
||||||
|
can be enabled with the `--prometheus` command line switch.
|
||||||
|
|
||||||
|
By default, the prometheus instance will listen on [`127.0.0.1:9233`][pd]. You
|
||||||
|
can change this setting by using the `--prometheus-listen` cli option:
|
||||||
|
|
||||||
|
To have mgmt prometheus bind interface on 0.0.0.0:45001, use:
|
||||||
|
`./mgmt r --prometheus --prometheus-listen :45001`
|
||||||
|
|
||||||
|
## Metrics
|
||||||
|
|
||||||
|
Mgmt exposes three kinds of resources: _go_ metrics, _etcd_ metrics and _mgmt_
|
||||||
|
metrics.
|
||||||
|
|
||||||
|
### go metrics
|
||||||
|
|
||||||
|
We use the [prometheus go_collector][pgc] to expose go metrics. Those metrics
|
||||||
|
are mainly useful for debugging and perf testing.
|
||||||
|
|
||||||
|
### etcd metrics
|
||||||
|
|
||||||
|
mgmt exposes etcd metrics. Read more in the [upstream documentation][etcdm]
|
||||||
|
|
||||||
|
### mgmt metrics
|
||||||
|
|
||||||
|
Here is a list of the metrics we provide:
|
||||||
|
|
||||||
|
- `mgmt_resources_total`: The number of resources that mgmt is managing
|
||||||
|
- `mgmt_checkapply_total`: The number of CheckApply's that mgmt has run
|
||||||
|
- `mgmt_failures_total`: The number of resources that have failed
|
||||||
|
- `mgmt_failures_current`: The number of resources that have failed
|
||||||
|
- `mgmt_graph_start_time_seconds`: Start time of the current graph since unix epoch in seconds
|
||||||
|
|
||||||
|
For each metric, you will get some extra labels:
|
||||||
|
|
||||||
|
- `kind`: The kind of mgmt resource
|
||||||
|
|
||||||
|
For `mgmt_checkapply_total`, those extra labels are set:
|
||||||
|
|
||||||
|
- `eventful`: "true" or "false", if the CheckApply triggered some changes
|
||||||
|
- `errorful`: "true" or "false", if the CheckApply reported an error
|
||||||
|
- `apply`: "true" or "false", if the CheckApply ran in apply or noop mode
|
||||||
|
|
||||||
|
## Alerting
|
||||||
|
|
||||||
|
You can use prometheus to alert you upon changes or failures. We do not provide
|
||||||
|
such templates yet, but we plan to provide some examples in this repository.
|
||||||
|
Patches welcome!
|
||||||
|
|
||||||
|
## Grafana
|
||||||
|
|
||||||
|
We do not have grafana dashboards yet. Patches welcome!
|
||||||
|
|
||||||
|
## External resources
|
||||||
|
|
||||||
|
- [prometheus website](https://prometheus.io/)
|
||||||
|
- [prometheus documentation](https://prometheus.io/docs/introduction/overview/)
|
||||||
|
- [prometheus best practices regarding metrics
|
||||||
|
naming](https://prometheus.io/docs/practices/naming/)
|
||||||
|
- [grafana website](http://grafana.org/)
|
||||||
|
|
||||||
|
[pgc]: https://github.com/prometheus/client_golang/blob/master/prometheus/go_collector.go
|
||||||
|
[etcdm]: https://coreos.com/etcd/docs/latest/metrics.html
|
||||||
|
[pd]: https://github.com/prometheus/prometheus/wiki/Default-port-allocation
|
||||||
@@ -1,22 +1,13 @@
|
|||||||
#mgmt Puppet support
|
# Puppet guide
|
||||||
|
|
||||||
1. [Prerequisites](#prerequisites)
|
|
||||||
* [Testing the Puppet side](#testing-the-puppet-side)
|
|
||||||
2. [Writing a suitable manifest](#writing-a-suitable-manifest)
|
|
||||||
* [Unsupported attributes](#unsupported-attributes)
|
|
||||||
* [Unsupported resources](#unsupported-resources)
|
|
||||||
* [Avoiding common warnings](#avoiding-common-warnings)
|
|
||||||
3. [Configuring Puppet](#configuring-puppet)
|
|
||||||
4. [Caveats](#caveats)
|
|
||||||
|
|
||||||
`mgmt` can use Puppet as its source for the configuration graph.
|
`mgmt` can use Puppet as its source for the configuration graph.
|
||||||
This document goes into detail on how this works, and lists
|
This document goes into detail on how this works, and lists
|
||||||
some pitfalls and limitations.
|
some pitfalls and limitations.
|
||||||
|
|
||||||
For basic instructions on how to use the Puppet support, see
|
For basic instructions on how to use the Puppet support, see
|
||||||
the [main documentation](DOCUMENTATION.md#puppet-support).
|
the [main documentation](documentation.md#puppet-support).
|
||||||
|
|
||||||
##Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
You need Puppet installed in your system. It is not important how you
|
You need Puppet installed in your system. It is not important how you
|
||||||
get it. On the most common Linux distributions, you can use packages
|
get it. On the most common Linux distributions, you can use packages
|
||||||
@@ -29,14 +20,16 @@ Any release of Puppet's 3.x and 4.x series should be suitable for use with
|
|||||||
`mgmt`. Most importantly, make sure to install the `ffrank-mgmtgraph` Puppet
|
`mgmt`. Most importantly, make sure to install the `ffrank-mgmtgraph` Puppet
|
||||||
module (referred to below as "the translator module").
|
module (referred to below as "the translator module").
|
||||||
|
|
||||||
puppet module install ffrank-mgmtgraph
|
```
|
||||||
|
puppet module install ffrank-mgmtgraph
|
||||||
|
```
|
||||||
|
|
||||||
Please note that the module is not required on your Puppet master (if you
|
Please note that the module is not required on your Puppet master (if you
|
||||||
use a master/agent setup). It's needed on the machine that runs `mgmt`.
|
use a master/agent setup). It's needed on the machine that runs `mgmt`.
|
||||||
You can install the module on the master anyway, so that it gets distributed
|
You can install the module on the master anyway, so that it gets distributed
|
||||||
to your agents through Puppet's `pluginsync` mechanism.
|
to your agents through Puppet's `pluginsync` mechanism.
|
||||||
|
|
||||||
###Testing the Puppet side
|
### Testing the Puppet side
|
||||||
|
|
||||||
The following command should run successfully and print a YAML hash on your
|
The following command should run successfully and print a YAML hash on your
|
||||||
terminal:
|
terminal:
|
||||||
@@ -48,9 +41,9 @@ puppet mgmtgraph print --code 'file { "/tmp/mgmt-test": ensure => present }'
|
|||||||
You can use this CLI to test any manifests before handing them straight
|
You can use this CLI to test any manifests before handing them straight
|
||||||
to `mgmt`.
|
to `mgmt`.
|
||||||
|
|
||||||
##Writing a suitable manifest
|
## Writing a suitable manifest
|
||||||
|
|
||||||
###Unsupported attributes
|
### Unsupported attributes
|
||||||
|
|
||||||
`mgmt` inherited its resource module from Puppet, so by and large, it's quite
|
`mgmt` inherited its resource module from Puppet, so by and large, it's quite
|
||||||
possible to express `mgmt` graphs in terms of Puppet manifests. However,
|
possible to express `mgmt` graphs in terms of Puppet manifests. However,
|
||||||
@@ -62,8 +55,10 @@ For example, at the time of writing this, the `file` type in `mgmt` had no
|
|||||||
notion of permissions (the file `mode`) yet. This lead to the following
|
notion of permissions (the file `mode`) yet. This lead to the following
|
||||||
warning (among others that will be discussed below):
|
warning (among others that will be discussed below):
|
||||||
|
|
||||||
$ puppet mgmtgraph print --code 'file { "/tmp/foo": mode => "0600" }'
|
```
|
||||||
Warning: cannot translate: File[/tmp/foo] { mode => "600" } (attribute is ignored)
|
$ puppet mgmtgraph print --code 'file { "/tmp/foo": mode => "0600" }'
|
||||||
|
Warning: cannot translate: File[/tmp/foo] { mode => "600" } (attribute is ignored)
|
||||||
|
```
|
||||||
|
|
||||||
This is a heads-up for the user, because the resulting `mgmt` graph will
|
This is a heads-up for the user, because the resulting `mgmt` graph will
|
||||||
in fact not pass this information to the `/tmp/foo` file resource, and
|
in fact not pass this information to the `/tmp/foo` file resource, and
|
||||||
@@ -71,7 +66,7 @@ in fact not pass this information to the `/tmp/foo` file resource, and
|
|||||||
manifests that are written expressly for `mgmt` is not sensible and should
|
manifests that are written expressly for `mgmt` is not sensible and should
|
||||||
be avoided.
|
be avoided.
|
||||||
|
|
||||||
###Unsupported resources
|
### Unsupported resources
|
||||||
|
|
||||||
Puppet has a fairly large number of
|
Puppet has a fairly large number of
|
||||||
[built-in types](https://docs.puppet.com/puppet/latest/reference/type.html),
|
[built-in types](https://docs.puppet.com/puppet/latest/reference/type.html),
|
||||||
@@ -91,28 +86,32 @@ this overhead can amount to several orders of magnitude.
|
|||||||
|
|
||||||
Avoid Puppet types that `mgmt` does not implement (yet).
|
Avoid Puppet types that `mgmt` does not implement (yet).
|
||||||
|
|
||||||
###Avoiding common warnings
|
### Avoiding common warnings
|
||||||
|
|
||||||
Many resource parameters in Puppet take default values. For the most part,
|
Many resource parameters in Puppet take default values. For the most part,
|
||||||
the translator module just ignores them. However, there are cases in which
|
the translator module just ignores them. However, there are cases in which
|
||||||
Puppet will default to convenient behavior that `mgmt` cannot quite replicate.
|
Puppet will default to convenient behavior that `mgmt` cannot quite replicate.
|
||||||
For example, translating a plain `file` resource will lead to a warning message:
|
For example, translating a plain `file` resource will lead to a warning message:
|
||||||
|
|
||||||
$ puppet mgmtgraph print --code 'file { "/tmp/mgmt-test": }'
|
```
|
||||||
Warning: File[/tmp/mgmt-test] uses the 'puppet' file bucket, which mgmt cannot do. There will be no backup copies!
|
$ puppet mgmtgraph print --code 'file { "/tmp/mgmt-test": }'
|
||||||
|
Warning: File[/tmp/mgmt-test] uses the 'puppet' file bucket, which mgmt cannot do. There will be no backup copies!
|
||||||
|
```
|
||||||
|
|
||||||
The reason is that per default, Puppet assumes the following parameter value
|
The reason is that per default, Puppet assumes the following parameter value
|
||||||
(among others)
|
(among others)
|
||||||
|
|
||||||
```puppet
|
```puppet
|
||||||
file { "/tmp/mgmt-test":
|
file { "/tmp/mgmt-test":
|
||||||
backup => 'puppet',
|
backup => 'puppet',
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
To avoid this, specify the parameter explicitly:
|
To avoid this, specify the parameter explicitly:
|
||||||
|
|
||||||
$ puppet mgmtgraph print --code 'file { "/tmp/mgmt-test": backup => false }'
|
```
|
||||||
|
$ puppet mgmtgraph print --code 'file { "/tmp/mgmt-test": backup => false }'
|
||||||
|
```
|
||||||
|
|
||||||
This is tedious in a more complex manifest. A good simplification is the
|
This is tedious in a more complex manifest. A good simplification is the
|
||||||
following [resource default](https://docs.puppet.com/puppet/latest/reference/lang_defaults.html)
|
following [resource default](https://docs.puppet.com/puppet/latest/reference/lang_defaults.html)
|
||||||
@@ -125,7 +124,7 @@ File { backup => false }
|
|||||||
If you encounter similar warnings from other types and/or parameters,
|
If you encounter similar warnings from other types and/or parameters,
|
||||||
use the same approach to silence them if possible.
|
use the same approach to silence them if possible.
|
||||||
|
|
||||||
##Configuring Puppet
|
## Configuring Puppet
|
||||||
|
|
||||||
Since `mgmt` uses an actual Puppet CLI behind the scenes, you might
|
Since `mgmt` uses an actual Puppet CLI behind the scenes, you might
|
||||||
need to tweak some of Puppet's runtime options in order to make it
|
need to tweak some of Puppet's runtime options in order to make it
|
||||||
@@ -143,16 +142,20 @@ control all of them, through its `--puppet-conf` option. It allows
|
|||||||
you to specify which `puppet.conf` file should be used during
|
you to specify which `puppet.conf` file should be used during
|
||||||
translation.
|
translation.
|
||||||
|
|
||||||
mgmt run --puppet /opt/my-manifest.pp --puppet-conf /etc/mgmt/puppet.conf
|
```
|
||||||
|
mgmt run --puppet /opt/my-manifest.pp --puppet-conf /etc/mgmt/puppet.conf
|
||||||
|
```
|
||||||
|
|
||||||
Within this file, you can just specify any needed options in the
|
Within this file, you can just specify any needed options in the
|
||||||
`[main]` section:
|
`[main]` section:
|
||||||
|
|
||||||
[main]
|
```
|
||||||
server=mgmt-master.example.net
|
[main]
|
||||||
vardir=/var/lib/mgmt/puppet
|
server=mgmt-master.example.net
|
||||||
|
vardir=/var/lib/mgmt/puppet
|
||||||
|
```
|
||||||
|
|
||||||
##Caveats
|
## Caveats
|
||||||
|
|
||||||
Please see the [README](https://github.com/ffrank/puppet-mgmtgraph/blob/master/README.md)
|
Please see the [README](https://github.com/ffrank/puppet-mgmtgraph/blob/master/README.md)
|
||||||
of the translator module for the current state of supported and unsupported
|
of the translator module for the current state of supported and unsupported
|
||||||
93
docs/quick-start-guide.md
Normal file
93
docs/quick-start-guide.md
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
# Quick start guide
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
This guide is intended for developers. Once `mgmt` is minimally viable, we'll
|
||||||
|
publish a quick start guide for users too. In the meantime, please contribute!
|
||||||
|
If you're brand new to `mgmt`, it's probably a good idea to start by reading the
|
||||||
|
[introductory article](https://ttboj.wordpress.com/2016/01/18/next-generation-configuration-mgmt/)
|
||||||
|
or to watch an [introductory video](https://github.com/purpleidea/mgmt/#on-the-web).
|
||||||
|
Once you're familiar with the general idea, please start hacking...
|
||||||
|
|
||||||
|
## Vagrant
|
||||||
|
If you would like to avoid doing the following steps manually, we have prepared
|
||||||
|
a [Vagrant](https://www.vagrantup.com/) environment for your convenience. From
|
||||||
|
the project directory, run a `vagrant up`, and then a `vagrant status`. From
|
||||||
|
there, you can `vagrant ssh` into the `mgmt` machine. The MOTD will explain the
|
||||||
|
rest.
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
Software projects have a few different kinds of dependencies. There are _build_
|
||||||
|
dependencies, _runtime_ dependencies, and additionally, a few extra dependencies
|
||||||
|
required for running the _test_ suite.
|
||||||
|
|
||||||
|
### Build
|
||||||
|
* `golang` 1.6 or higher (required, available in most distros)
|
||||||
|
* golang libraries (required, available with `go get ./...`) a partial list includes:
|
||||||
|
```
|
||||||
|
github.com/coreos/etcd/client
|
||||||
|
gopkg.in/yaml.v2
|
||||||
|
gopkg.in/fsnotify.v1
|
||||||
|
github.com/urfave/cli
|
||||||
|
github.com/coreos/go-systemd/dbus
|
||||||
|
github.com/coreos/go-systemd/util
|
||||||
|
github.com/libvirt/libvirt-go
|
||||||
|
```
|
||||||
|
* `stringer` (optional), available as a package on some platforms, otherwise via `go get`
|
||||||
|
```
|
||||||
|
golang.org/x/tools/cmd/stringer
|
||||||
|
```
|
||||||
|
* `pandoc` (optional), for building a pdf of the documentation
|
||||||
|
|
||||||
|
### Runtime
|
||||||
|
A relatively modern GNU/Linux system should be able to run `mgmt` without any
|
||||||
|
problems. Since `mgmt` runs as a single statically compiled binary, all of the
|
||||||
|
library dependencies are included. It is expected, that certain advanced
|
||||||
|
resources require host specific facilities to work. These requirements are
|
||||||
|
listed below:
|
||||||
|
|
||||||
|
| Resource | Dependency | Version |
|
||||||
|
|----------|-------------------|---------|
|
||||||
|
| file | inotify | ? |
|
||||||
|
| hostname | systemd-hostnamed | ? |
|
||||||
|
| nspawn | systemd-nspawn | ? |
|
||||||
|
| pkg | packagekitd | ? |
|
||||||
|
| svc | systemd | ? |
|
||||||
|
| virt | libvirtd | ? |
|
||||||
|
|
||||||
|
For building a visual representation of the graph, `graphviz` is required.
|
||||||
|
|
||||||
|
### Testing
|
||||||
|
* golint `github.com/golang/lint/golint`
|
||||||
|
|
||||||
|
## Quick start
|
||||||
|
* Make sure you have golang version 1.6 or greater installed.
|
||||||
|
* If you do not have a GOPATH yet, create one and export it:
|
||||||
|
```
|
||||||
|
mkdir $HOME/gopath
|
||||||
|
export GOPATH=$HOME/gopath
|
||||||
|
```
|
||||||
|
* You might also want to add the GOPATH to your `~/.bashrc` or `~/.profile`.
|
||||||
|
* For more information you can read the [GOPATH documentation](https://golang.org/cmd/go/#hdr-GOPATH_environment_variable).
|
||||||
|
* Next download the mgmt code base, and switch to that directory:
|
||||||
|
```
|
||||||
|
mkdir -p $GOPATH/src/github.com/purpleidea/
|
||||||
|
cd $GOPATH/src/github.com/purpleidea/
|
||||||
|
git clone --recursive https://github.com/purpleidea/mgmt/
|
||||||
|
cd $GOPATH/src/github.com/purpleidea/mgmt
|
||||||
|
```
|
||||||
|
* Run `make deps` to install system and golang dependencies. Take a look at `misc/make-deps.sh` for details.
|
||||||
|
* Run `make build` to get a freshly built `mgmt` binary.
|
||||||
|
* Run `time ./mgmt run --yaml examples/graph0.yaml --converged-timeout=5 --tmp-prefix` to try out a very simple example!
|
||||||
|
* To run continuously in the default mode of operation, omit the `--converged-timeout` option.
|
||||||
|
* Have fun hacking on our future technology!
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
Please look in the [examples/](../examples/) folder for some examples!
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
Installation of `mgmt` from distribution packages currently needs improvement.
|
||||||
|
At the moment we have:
|
||||||
|
* [COPR](https://copr.fedoraproject.org/coprs/purpleidea/mgmt/)
|
||||||
|
* [Arch](https://aur.archlinux.org/packages/mgmt/)
|
||||||
|
|
||||||
|
Please contribute more! We'd especially like to see a Debian package!
|
||||||
575
docs/resource-guide.md
Normal file
575
docs/resource-guide.md
Normal file
@@ -0,0 +1,575 @@
|
|||||||
|
# Resource guide
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The `mgmt` tool has built-in resource primitives which make up the building
|
||||||
|
blocks of any configuration. Each instance of a resource is mapped to a single
|
||||||
|
vertex in the resource [graph](https://en.wikipedia.org/wiki/Directed_acyclic_graph).
|
||||||
|
This guide is meant to instruct developers on how to write a brand new resource.
|
||||||
|
Since `mgmt` and the core resources are written in golang, some prior golang
|
||||||
|
knowledge is assumed.
|
||||||
|
|
||||||
|
## Theory
|
||||||
|
|
||||||
|
Resources in `mgmt` are similar to resources in other systems in that they are
|
||||||
|
[idempotent](https://en.wikipedia.org/wiki/Idempotence). Our resources are
|
||||||
|
uniquely different in that they can detect when their state has changed, and as
|
||||||
|
a result can run to revert or repair this change instantly. For some background
|
||||||
|
on this design, please read the
|
||||||
|
[original article](https://ttboj.wordpress.com/2016/01/18/next-generation-configuration-mgmt/)
|
||||||
|
on the subject.
|
||||||
|
|
||||||
|
## Resource API
|
||||||
|
|
||||||
|
To implement a resource in `mgmt` it must satisfy the
|
||||||
|
[`Res`](https://github.com/purpleidea/mgmt/blob/master/resources/resources.go)
|
||||||
|
interface. What follows are each of the method signatures and a description of
|
||||||
|
each.
|
||||||
|
|
||||||
|
### Default
|
||||||
|
```golang
|
||||||
|
Default() Res
|
||||||
|
```
|
||||||
|
|
||||||
|
This returns a populated resource struct as a `Res`. It shouldn't populate any
|
||||||
|
values which already have the correct default as the golang zero value. In
|
||||||
|
general it is preferable if the zero values make for the correct defaults.
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
```golang
|
||||||
|
// Default returns some sensible defaults for this resource.
|
||||||
|
func (obj *FooRes) Default() Res {
|
||||||
|
return &FooRes{
|
||||||
|
Answer: 42, // sometimes, defaults shouldn't be the zero value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Validate
|
||||||
|
```golang
|
||||||
|
Validate() error
|
||||||
|
```
|
||||||
|
|
||||||
|
This method is used to validate if the populated resource struct is a valid
|
||||||
|
representation of the resource kind. If it does not conform to the resource
|
||||||
|
specifications, it should generate an error. If you notice that this method is
|
||||||
|
quite large, it might be an indication that you should reconsider the parameter
|
||||||
|
list and interface to this resource. This method is called _before_ `Init`.
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
```golang
|
||||||
|
// Validate reports any problems with the struct definition.
|
||||||
|
func (obj *FooRes) Validate() error {
|
||||||
|
if obj.Answer != 42 { // validate whatever you want
|
||||||
|
return fmt.Errorf("expected an answer of 42")
|
||||||
|
}
|
||||||
|
return obj.BaseRes.Validate() // remember to call the base method!
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Init
|
||||||
|
```golang
|
||||||
|
Init() error
|
||||||
|
```
|
||||||
|
|
||||||
|
This is called to initialize the resource. If something goes wrong, it should
|
||||||
|
return an error. It should set the resource `kind`, do any resource specific
|
||||||
|
work, and finish by calling the `Init` method of the base resource.
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
```golang
|
||||||
|
// Init initializes the Foo resource.
|
||||||
|
func (obj *FooRes) Init() error {
|
||||||
|
obj.BaseRes.kind = "foo" // must lower case resource kind
|
||||||
|
// run the resource specific initialization, and error if anything fails
|
||||||
|
if some_error {
|
||||||
|
return err // something went wrong!
|
||||||
|
}
|
||||||
|
return obj.BaseRes.Init() // call the base resource init
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This method is always called after `Validate` has run successfully, with the
|
||||||
|
exception that we can't prevent a malicious or buggy `libmgmt` user to not run
|
||||||
|
this. In other words, you should expect `Validate` to have run first, but you
|
||||||
|
shouldn't allow `Init` to dangerously `rm -rf /$the_world` if your code only
|
||||||
|
checks `$the_world` in `Validate`. Remember to always program safely!
|
||||||
|
|
||||||
|
### Close
|
||||||
|
```golang
|
||||||
|
Close() error
|
||||||
|
```
|
||||||
|
|
||||||
|
This is called to cleanup after the resource. It is usually not necessary, but
|
||||||
|
can be useful if you'd like to properly close a persistent connection that you
|
||||||
|
opened in the `Init` method and were using throughout the resource.
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
```golang
|
||||||
|
// Close runs some cleanup code for this resource.
|
||||||
|
func (obj *FooRes) Close() error {
|
||||||
|
err := obj.conn.Close() // close some internal connection
|
||||||
|
|
||||||
|
// call base close, b/c we're overriding
|
||||||
|
if e := obj.BaseRes.Close(); err == nil {
|
||||||
|
err = e
|
||||||
|
} else if e != nil {
|
||||||
|
err = multierr.Append(err, e) // list of errors
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
You should probably check the return errors of your internal methods, and pass
|
||||||
|
on an error if something went wrong. Remember to always call the base `Close`
|
||||||
|
method! If you plan to return early if you hit an internal error, then at least
|
||||||
|
call it with a defer!
|
||||||
|
|
||||||
|
### CheckApply
|
||||||
|
```golang
|
||||||
|
CheckApply(apply bool) (checkOK bool, err error)
|
||||||
|
```
|
||||||
|
|
||||||
|
`CheckApply` is where the real _work_ is done. Under normal circumstances, this
|
||||||
|
function should check if the state of this resource is correct, and if so, it
|
||||||
|
should return: `(true, nil)`. If the `apply` variable is set to `true`, then
|
||||||
|
this means that we should then proceed to run the changes required to bring the
|
||||||
|
resource into the correct state. If the `apply` variable is set to `false`, then
|
||||||
|
the resource is operating in _noop_ mode and _no operations_ should be executed!
|
||||||
|
|
||||||
|
After having executed the necessary operations to bring the resource back into
|
||||||
|
the desired state, or after having detected that the state was incorrect, but
|
||||||
|
that changes can't be made because `apply` is `false`, you should then return
|
||||||
|
`(false, nil)`.
|
||||||
|
|
||||||
|
You must cause the resource to converge during a single execution of this
|
||||||
|
function. If you cannot, then you must return an error! The exception to this
|
||||||
|
rule is that if an external force changes the state of the resource while it is
|
||||||
|
being remedied, it is possible to return from this function even though the
|
||||||
|
resource isn't now converged. This is not a bug, as the resources `Watch`
|
||||||
|
facility will detect the change, ultimately resulting in a subsequent call to
|
||||||
|
`CheckApply`.
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
```golang
|
||||||
|
// CheckApply does the idempotent work of checking and applying resource state.
|
||||||
|
func (obj *FooRes) CheckApply(apply bool) (bool, error) {
|
||||||
|
// check the state
|
||||||
|
if state_is_okay { return true, nil } // done early! :)
|
||||||
|
// state was bad
|
||||||
|
if !apply { return false, nil } // don't apply; !stateok, nil
|
||||||
|
// do the apply!
|
||||||
|
return false, nil // after success applying
|
||||||
|
if any_error { return false, err } // anytime there's an err!
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The `CheckApply` function is called by the `mgmt` engine when it believes a call
|
||||||
|
is necessary. Under certain conditions when a `Watch` call does not invalidate
|
||||||
|
the state of the resource, and no refresh call was sent, its execution might be
|
||||||
|
skipped. This is an engine optimization, and not a bug. It is mentioned here in
|
||||||
|
the documentation in case you are confused as to why a debug message you've
|
||||||
|
added to the code isn't always printed.
|
||||||
|
|
||||||
|
#### Refresh notifications
|
||||||
|
Some resources may choose to support receiving refresh notifications. In general
|
||||||
|
these should be avoided if possible, but nevertheless, they do make sense in
|
||||||
|
certain situations. Resources that support these need to verify if one was sent
|
||||||
|
during the CheckApply phase of execution. This is accomplished by calling the
|
||||||
|
`Refresh() bool` method of the resource, and inspecting the return value. This
|
||||||
|
is only necessary if you plan to perform a refresh action. Refresh actions
|
||||||
|
should still respect the `apply` variable, and no system changes should be made
|
||||||
|
if it is `false`. Refresh notifications are generated by any resource when an
|
||||||
|
action is applied by that resource and are transmitted through graph edges which
|
||||||
|
have enabled their propagation. Resources that currently perform some refresh
|
||||||
|
action include `svc`, `timer`, and `password`.
|
||||||
|
|
||||||
|
#### Paired execution
|
||||||
|
For many resources it is not uncommon to see `CheckApply` run twice in rapid
|
||||||
|
succession. This is usually not a pathological occurrence, but rather a healthy
|
||||||
|
pattern which is a consequence of the event system. When the state of the
|
||||||
|
resource is incorrect, `CheckApply` will run to remedy the state. In response to
|
||||||
|
having just changed the state, it is usually the case that this repair will
|
||||||
|
trigger the `Watch` code! In response, a second `CheckApply` is triggered, which
|
||||||
|
will likely find the state to now be correct.
|
||||||
|
|
||||||
|
#### Summary
|
||||||
|
* Anytime an error occurs during `CheckApply`, you should return `(false, err)`.
|
||||||
|
* If the state is correct and no changes are needed, return `(true, nil)`.
|
||||||
|
* You should only make changes to the system if `apply` is set to `true`.
|
||||||
|
* After checking the state and possibly applying the fix, return `(false, nil)`.
|
||||||
|
* Returning `(true, err)` is a programming error and will cause a `Fatal`.
|
||||||
|
|
||||||
|
### Watch
|
||||||
|
```golang
|
||||||
|
Watch(chan *Event) error
|
||||||
|
```
|
||||||
|
|
||||||
|
`Watch` is a main loop that runs and sends messages when it detects that the
|
||||||
|
state of the resource might have changed. To send a message you should write to
|
||||||
|
the input event channel using the `Event` helper method. The Watch function
|
||||||
|
should run continuously until a shutdown message is received. If at any time
|
||||||
|
something goes wrong, you should return an error, and the `mgmt` engine will
|
||||||
|
handle possibly restarting the main loop based on the `retry` meta parameters.
|
||||||
|
|
||||||
|
It is better to send an event notification which turns out to be spurious, than
|
||||||
|
to miss a possible event. Resources which can miss events are incorrect and need
|
||||||
|
to be re-engineered so that this isn't the case. If you have an idea for a
|
||||||
|
resource which would fit this criteria, but you can't find a solution, please
|
||||||
|
contact the `mgmt` maintainers so that this problem can be investigated and a
|
||||||
|
possible system level engineering fix can be found.
|
||||||
|
|
||||||
|
You may have trouble deciding how much resource state checking should happen in
|
||||||
|
the `Watch` loop versus deferring it all to the `CheckApply` method. You may
|
||||||
|
want to put some simple fast path checking in `Watch` to avoid generating
|
||||||
|
obviously spurious events, but in general it's best to keep the `Watch` method
|
||||||
|
as simple as possible. Contact the `mgmt` maintainers if you're not sure.
|
||||||
|
|
||||||
|
If the resource is activated in `polling` mode, the `Watch` method will not get
|
||||||
|
executed. As a result, the resource must still work even if the main loop is not
|
||||||
|
running.
|
||||||
|
|
||||||
|
#### Select
|
||||||
|
The lifetime of most resources `Watch` method should be spent in an infinite
|
||||||
|
loop that is bounded by a `select` call. The `select` call is the point where
|
||||||
|
our method hands back control to the engine (and the kernel) so that we can
|
||||||
|
sleep until something of interest wakes us up. In this loop we must process
|
||||||
|
events from the engine via the `<-obj.Events()` call, and receive events for our
|
||||||
|
resource itself!
|
||||||
|
|
||||||
|
#### Events
|
||||||
|
If we receive an internal event from the `<-obj.Events()` method, we can read it
|
||||||
|
with the ReadEvent helper function. This function tells us if we should shutdown
|
||||||
|
our resource, and if we should generate an event. When we want to send an event,
|
||||||
|
we use the `Event` helper function. It is also important to mark the resource
|
||||||
|
state as `dirty` if we believe it might have changed. We do this with the
|
||||||
|
`StateOK(false)` function.
|
||||||
|
|
||||||
|
#### Startup
|
||||||
|
Once the `Watch` function has finished starting up successfully, it is important
|
||||||
|
to generate one event to notify the `mgmt` engine that we're now listening
|
||||||
|
successfully, so that it can run an initial `CheckApply` to ensure we're safely
|
||||||
|
tracking a healthy state and that we didn't miss anything when `Watch` was down
|
||||||
|
or from before `mgmt` was running. It does this by calling the `Running` method.
|
||||||
|
|
||||||
|
#### Converged
|
||||||
|
The engine might be asked to shutdown when the entire state of the system has
|
||||||
|
not seen any changes for some duration of time. The engine can determine this
|
||||||
|
automatically, but each resource can block this if it is absolutely necessary.
|
||||||
|
To do this, the `Watch` method should get the `ConvergedUID` handle that has
|
||||||
|
been prepared for it by the engine. This is done by calling the `ConvergerUID`
|
||||||
|
method on the resource object. The result can be used to set the converged
|
||||||
|
status with `SetConverged`, and to notify when the particular timeout has been
|
||||||
|
reached by waiting on `ConvergedTimer`.
|
||||||
|
|
||||||
|
Instead of interacting with the `ConvergedUID` with these two methods, we can
|
||||||
|
instead use the `StartTimer` and `ResetTimer` methods which accomplish the same
|
||||||
|
thing, but provide a `select`-free interface for different coding situations.
|
||||||
|
|
||||||
|
This particular facility is most likely not required for most resources. It may
|
||||||
|
prove to be useful if a resource wants to start off a long operation, but avoid
|
||||||
|
sending out erroneous `Event` messages to keep things alive until it finishes.
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
```golang
|
||||||
|
// Watch is the listener and main loop for this resource.
|
||||||
|
func (obj *FooRes) Watch() error {
|
||||||
|
// setup the Foo resource
|
||||||
|
var err error
|
||||||
|
if err, obj.foo = OpenFoo(); err != nil {
|
||||||
|
return err // we couldn't startup
|
||||||
|
}
|
||||||
|
defer obj.whatever.CloseFoo() // shutdown our
|
||||||
|
|
||||||
|
// notify engine that we're running
|
||||||
|
if err := obj.Running(); err != nil {
|
||||||
|
return err // bubble up a NACK...
|
||||||
|
}
|
||||||
|
|
||||||
|
var send = false // send event?
|
||||||
|
var exit *error
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case event := <-obj.Events():
|
||||||
|
// we avoid sending events on unpause
|
||||||
|
if exit, send = obj.ReadEvent(event); exit != nil {
|
||||||
|
return *exit // exit
|
||||||
|
}
|
||||||
|
|
||||||
|
// the actual events!
|
||||||
|
case event := <-obj.foo.Events:
|
||||||
|
if is_an_event {
|
||||||
|
send = true // used below
|
||||||
|
obj.StateOK(false) // dirty
|
||||||
|
}
|
||||||
|
|
||||||
|
// event errors
|
||||||
|
case err := <-obj.foo.Errors:
|
||||||
|
return err // will cause a retry or permanent failure
|
||||||
|
}
|
||||||
|
|
||||||
|
// do all our event sending all together to avoid duplicate msgs
|
||||||
|
if send {
|
||||||
|
send = false
|
||||||
|
obj.Event() // send the event!
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Summary
|
||||||
|
* Remember to call the appropriate `converger` methods throughout the resource.
|
||||||
|
* Remember to call `Startup` when the `Watch` is running successfully.
|
||||||
|
* Remember to process internal events and shutdown promptly if asked to.
|
||||||
|
* Ensure the design of your resource is well thought out.
|
||||||
|
* Have a look at the existing resources for a rough idea of how this all works.
|
||||||
|
|
||||||
|
### Compare
|
||||||
|
```golang
|
||||||
|
Compare(Res) bool
|
||||||
|
```
|
||||||
|
|
||||||
|
Each resource must have a `Compare` method. This takes as input another resource
|
||||||
|
and must return whether they are identical or not. This is used for identifying
|
||||||
|
if an existing resource can be used in place of a new one with a similar set of
|
||||||
|
parameters. In particular, when switching from one graph to a new (possibly
|
||||||
|
identical) graph, this avoids recomputing the state for resources which don't
|
||||||
|
change or that are sufficiently similar that they don't need to be swapped out.
|
||||||
|
|
||||||
|
In general if all the resource properties are identical, then they usually don't
|
||||||
|
need to be changed. On occasion, not all of them need to be compared, in
|
||||||
|
particular if they store some generated state, or if they aren't significant in
|
||||||
|
some way.
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
```golang
|
||||||
|
// Compare two resources and return if they are equivalent.
|
||||||
|
func (obj *FooRes) Compare(res Res) bool {
|
||||||
|
switch res.(type) {
|
||||||
|
case *FooRes: // only compare to other resources of the Foo kind!
|
||||||
|
res := res.(*FileRes)
|
||||||
|
if !obj.BaseRes.Compare(res) { // call base Compare
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.Name != res.Name {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.whatever != res.whatever {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.Flag != res.Flag {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return false // different kind of resource
|
||||||
|
}
|
||||||
|
return true // they must match!
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### UIDs
|
||||||
|
```golang
|
||||||
|
UIDs() []ResUID
|
||||||
|
```
|
||||||
|
|
||||||
|
The `UIDs` method returns a list of `ResUID` interfaces that represent the
|
||||||
|
particular resource uniquely. This is used with the AutoEdges API to determine
|
||||||
|
if another resource can match a dependency to this one.
|
||||||
|
|
||||||
|
### AutoEdges
|
||||||
|
```golang
|
||||||
|
AutoEdges() AutoEdge
|
||||||
|
```
|
||||||
|
|
||||||
|
This returns a struct that implements the `AutoEdge` interface. This struct
|
||||||
|
is used to match other resources that might be relevant dependencies for this
|
||||||
|
resource.
|
||||||
|
|
||||||
|
### CollectPattern
|
||||||
|
```golang
|
||||||
|
CollectPattern() string
|
||||||
|
```
|
||||||
|
|
||||||
|
This is currently a stub and will be updated once the DSL is further along.
|
||||||
|
|
||||||
|
### UnmarshalYAML
|
||||||
|
```golang
|
||||||
|
UnmarshalYAML(unmarshal func(interface{}) error) error // optional
|
||||||
|
```
|
||||||
|
|
||||||
|
This is optional, but recommended for any resource that will have a YAML
|
||||||
|
accessible struct, and an entry in the `GraphConfig` struct. It is not required
|
||||||
|
because to do so would mean that third-party or custom resources (such as those
|
||||||
|
someone writes to use with `libmgmt`) would have to implement this needlessly.
|
||||||
|
|
||||||
|
The signature intentionally matches what is required to satisfy the `go-yaml`
|
||||||
|
[Unmarshaler](https://godoc.org/gopkg.in/yaml.v2#Unmarshaler) interface.
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
```golang
|
||||||
|
// UnmarshalYAML is the custom unmarshal handler for this struct.
|
||||||
|
// It is primarily useful for setting the defaults.
|
||||||
|
func (obj *FooRes) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
type rawRes FooRes // indirection to avoid infinite recursion
|
||||||
|
|
||||||
|
def := obj.Default() // get the default
|
||||||
|
res, ok := def.(*FooRes) // put in the right format
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("could not convert to FooRes")
|
||||||
|
}
|
||||||
|
raw := rawRes(*res) // convert; the defaults go here
|
||||||
|
|
||||||
|
if err := unmarshal(&raw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*obj = FooRes(raw) // restore from indirection with type conversion!
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Further considerations
|
||||||
|
There is some additional information that any resource writer will need to know.
|
||||||
|
Each issue is listed separately below!
|
||||||
|
|
||||||
|
### Resource struct
|
||||||
|
Each resource will implement methods as pointer receivers on a resource struct.
|
||||||
|
The resource struct must include an anonymous reference to the `BaseRes` struct.
|
||||||
|
The naming convention for resources is that they end with a `Res` suffix. If
|
||||||
|
you'd like your resource to be accessible by the `YAML` graph API (GAPI), then
|
||||||
|
you'll need to include the appropriate YAML fields as shown below.
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
```golang
|
||||||
|
type FooRes struct {
|
||||||
|
BaseRes `yaml:",inline"` // base properties
|
||||||
|
|
||||||
|
Whatever string `yaml:"whatever"` // you pick!
|
||||||
|
Bar int // no yaml, used as public output value for send/recv
|
||||||
|
Baz bool `yaml:"baz"` // something else
|
||||||
|
|
||||||
|
something string // some private field
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### YAML
|
||||||
|
In addition to labelling your resource struct with YAML fields, you must also
|
||||||
|
add an entry to the internal `GraphConfig` struct. It is a fairly straight
|
||||||
|
forward one line patch.
|
||||||
|
|
||||||
|
```golang
|
||||||
|
type GraphConfig struct {
|
||||||
|
// [snip...]
|
||||||
|
Resources struct {
|
||||||
|
Noop []*resources.NoopRes `yaml:"noop"`
|
||||||
|
File []*resources.FileRes `yaml:"file"`
|
||||||
|
// [snip...]
|
||||||
|
Foo []*resources.FooRes `yaml:"foo"` // tada :)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
It's also recommended that you add the [UnmarshalYAML](#unmarshalyaml) method to
|
||||||
|
your resources so that unspecified values are given sane defaults.
|
||||||
|
|
||||||
|
### Gob registration
|
||||||
|
All resources must be registered with the `golang` _gob_ module so that they can
|
||||||
|
be encoded and decoded. Make sure to include the following code snippet for this
|
||||||
|
to work.
|
||||||
|
|
||||||
|
```golang
|
||||||
|
import "encoding/gob"
|
||||||
|
func init() { // special golang method that runs once
|
||||||
|
gob.Register(&FooRes{}) // substitude your resource here
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Automatic edges
|
||||||
|
Automatic edges in `mgmt` are well described in [this article](https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/).
|
||||||
|
The best example of this technique can be seen in the `svc` resource.
|
||||||
|
Unfortunately no further documentation about this subject has been written. To
|
||||||
|
expand this section, please send a patch! Please contact us if you'd like to
|
||||||
|
work on a resource that uses this feature, or to add it to an existing one!
|
||||||
|
|
||||||
|
## Automatic grouping
|
||||||
|
Automatic grouping in `mgmt` is well described in [this article](https://ttboj.wordpress.com/2016/03/30/automatic-grouping-in-mgmt/).
|
||||||
|
The best example of this technique can be seen in the `pkg` resource.
|
||||||
|
Unfortunately no further documentation about this subject has been written. To
|
||||||
|
expand this section, please send a patch! Please contact us if you'd like to
|
||||||
|
work on a resource that uses this feature, or to add it to an existing one!
|
||||||
|
|
||||||
|
|
||||||
|
## Send/Recv
|
||||||
|
In `mgmt` there is a novel concept called _Send/Recv_. For some background,
|
||||||
|
please [read the introductory article](https://ttboj.wordpress.com/2016/12/07/sendrecv-in-mgmt/).
|
||||||
|
When using this feature, the engine will automatically send the user specified
|
||||||
|
value to the intended destination without requiring any resource specific code.
|
||||||
|
Any time that one of the destination values is changed, the engine automatically
|
||||||
|
marks the resource state as `dirty`. To detect if a particular value was
|
||||||
|
received, and if it changed (during this invocation of CheckApply) from the
|
||||||
|
previous value, you can query the Recv parameter. It will contain a `map` of all
|
||||||
|
the keys which can be received on, and the value has a `Changed` property which
|
||||||
|
will indicate whether the value was updated on this particular `CheckApply`
|
||||||
|
invocation. The type of the sending key must match that of the receiving one.
|
||||||
|
This can _only_ be done inside of the `CheckApply` function!
|
||||||
|
|
||||||
|
```golang
|
||||||
|
// inside CheckApply, probably near the top
|
||||||
|
if val, exists := obj.Recv["SomeKey"]; exists {
|
||||||
|
log.Printf("SomeKey was sent to us from: %s[%s].%s", val.Res.Kind(), val.Res.GetName(), val.Key)
|
||||||
|
if val.Changed {
|
||||||
|
log.Printf("SomeKey was just updated!")
|
||||||
|
// you may want to invalidate some local cache
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Astute readers will note that there isn't anything that prevents a user from
|
||||||
|
sending an identically typed value to some arbitrary (public) key that the
|
||||||
|
resource author hadn't considered! While this is true, resources should probably
|
||||||
|
work within this problem space anyways. The rule of thumb is that any public
|
||||||
|
parameter which is normally used in a resource can be used safely.
|
||||||
|
|
||||||
|
One subtle scenario is that if a resource creates a local cache or stores a
|
||||||
|
computation that depends on the value of a public parameter and will require
|
||||||
|
invalidation should that public parameter change, then you must detect that
|
||||||
|
scenario and invalidate the cache when it occurs. This *must* be processed
|
||||||
|
before there is a possibility of failure in CheckApply, because if we fail (and
|
||||||
|
possibly run again) the subsequent send->recv transfer might not have a new
|
||||||
|
value to copy, and therefore we won't see this notification of change.
|
||||||
|
Therefore, it is important to process these promptly, if they must not be lost,
|
||||||
|
such as for cache invalidation.
|
||||||
|
|
||||||
|
Remember, `Send/Recv` only changes your resource code if you cache state.
|
||||||
|
|
||||||
|
## Composite resources
|
||||||
|
Composite resources are resources which embed one or more existing resources.
|
||||||
|
This is useful to prevent code duplication in higher level resource scenarios.
|
||||||
|
The best example of this technique can be seen in the `nspawn` resource which
|
||||||
|
can be seen to partially embed a `svc` resource, but without its `Watch`.
|
||||||
|
Unfortunately no further documentation about this subject has been written. To
|
||||||
|
expand this section, please send a patch! Please contact us if you'd like to
|
||||||
|
work on a resource that uses this feature, or to add it to an existing one!
|
||||||
|
|
||||||
|
## Frequently asked questions
|
||||||
|
(Send your questions as a patch to this FAQ! I'll review it, merge it, and
|
||||||
|
respond by commit with the answer.)
|
||||||
|
|
||||||
|
### Can I write resources in a different language?
|
||||||
|
Currently `golang` is the only supported language for built-in resources. We
|
||||||
|
might consider allowing external resources to be imported in the future. This
|
||||||
|
will likely require a language that can expose a C-like API, such as `python` or
|
||||||
|
`ruby`. Custom `golang` resources are already possible when using mgmt as a lib.
|
||||||
|
Higher level resource collections will be possible once the `mgmt` DSL is ready.
|
||||||
|
|
||||||
|
### What new resource primitives need writing?
|
||||||
|
There are still many ideas for new resources that haven't been written yet. If
|
||||||
|
you'd like to contribute one, please contact us and tell us about your idea!
|
||||||
|
|
||||||
|
### Where can I find more information about mgmt?
|
||||||
|
Additional blog posts, videos and other material [is available!](https://github.com/purpleidea/mgmt/#on-the-web).
|
||||||
|
|
||||||
|
## Suggestions
|
||||||
|
If you have any ideas for API changes or other improvements to resource writing,
|
||||||
|
please let us know! We're still pre 1.0 and pre 0.1 and happy to break API in
|
||||||
|
order to get it right!
|
||||||
253
etcd/etcd.go
253
etcd/etcd.go
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -37,11 +37,12 @@
|
|||||||
//
|
//
|
||||||
// Smoke testing:
|
// Smoke testing:
|
||||||
// mkdir /tmp/mgmt{A..E}
|
// mkdir /tmp/mgmt{A..E}
|
||||||
// ./mgmt run --yaml examples/etcd1a.yaml --hostname h1 --tmp-prefix
|
// ./mgmt run --yaml examples/etcd1a.yaml --hostname h1 --tmp-prefix --no-pgp
|
||||||
// ./mgmt run --yaml examples/etcd1b.yaml --hostname h2 --tmp-prefix --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2381 --server-urls http://127.0.0.1:2382
|
// ./mgmt run --yaml examples/etcd1b.yaml --hostname h2 --tmp-prefix --no-pgp --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2381 --server-urls http://127.0.0.1:2382
|
||||||
// ./mgmt run --yaml examples/etcd1c.yaml --hostname h3 --tmp-prefix --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2383 --server-urls http://127.0.0.1:2384
|
// ./mgmt run --yaml examples/etcd1c.yaml --hostname h3 --tmp-prefix --no-pgp --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2383 --server-urls http://127.0.0.1:2384
|
||||||
// ETCDCTL_API=3 etcdctl --endpoints 127.0.0.1:2379 put /_mgmt/idealClusterSize 3
|
// ETCDCTL_API=3 etcdctl --endpoints 127.0.0.1:2379 put /_mgmt/idealClusterSize 3
|
||||||
// ./mgmt run --yaml examples/etcd1d.yaml --hostname h4 --tmp-prefix --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2385 --server-urls http://127.0.0.1:2386
|
// ./mgmt run --yaml examples/etcd1d.yaml --hostname h4 --tmp-prefix --no-pgp --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2385 --server-urls http://127.0.0.1:2386
|
||||||
|
// ./mgmt run --yaml examples/etcd1e.yaml --hostname h5 --tmp-prefix --no-pgp --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2387 --server-urls http://127.0.0.1:2388
|
||||||
// ETCDCTL_API=3 etcdctl --endpoints 127.0.0.1:2379 member list
|
// ETCDCTL_API=3 etcdctl --endpoints 127.0.0.1:2379 member list
|
||||||
// ETCDCTL_API=3 etcdctl --endpoints 127.0.0.1:2381 put /_mgmt/idealClusterSize 5
|
// ETCDCTL_API=3 etcdctl --endpoints 127.0.0.1:2381 put /_mgmt/idealClusterSize 5
|
||||||
// ETCDCTL_API=3 etcdctl --endpoints 127.0.0.1:2381 member list
|
// ETCDCTL_API=3 etcdctl --endpoints 127.0.0.1:2381 member list
|
||||||
@@ -92,7 +93,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errApplyDeltaEventsInconsistent = errors.New("Etcd: ApplyDeltaEvents: Inconsistent key!")
|
errApplyDeltaEventsInconsistent = errors.New("inconsistent key in ApplyDeltaEvents")
|
||||||
)
|
)
|
||||||
|
|
||||||
// AW is a struct for the AddWatcher queue
|
// AW is a struct for the AddWatcher queue
|
||||||
@@ -316,7 +317,7 @@ func (obj *EmbdEtcd) Connect(reconnect bool) error {
|
|||||||
if emax > maxClientConnectRetries {
|
if emax > maxClientConnectRetries {
|
||||||
log.Printf("Etcd: The dataDir (%s) might be inconsistent or corrupt.", obj.dataDir)
|
log.Printf("Etcd: The dataDir (%s) might be inconsistent or corrupt.", obj.dataDir)
|
||||||
log.Printf("Etcd: Please see: %s", "https://github.com/purpleidea/mgmt/blob/master/DOCUMENTATION.md#what-does-the-error-message-about-an-inconsistent-datadir-mean")
|
log.Printf("Etcd: Please see: %s", "https://github.com/purpleidea/mgmt/blob/master/DOCUMENTATION.md#what-does-the-error-message-about-an-inconsistent-datadir-mean")
|
||||||
obj.cError = fmt.Errorf("Can't find an available endpoint.")
|
obj.cError = fmt.Errorf("can't find an available endpoint")
|
||||||
return obj.cError
|
return obj.cError
|
||||||
}
|
}
|
||||||
err = &CtxDelayErr{time.Duration(emax) * time.Second, "No endpoints available yet!"} // retry with backoff...
|
err = &CtxDelayErr{time.Duration(emax) * time.Second, "No endpoints available yet!"} // retry with backoff...
|
||||||
@@ -367,7 +368,7 @@ func (obj *EmbdEtcd) Startup() error {
|
|||||||
// if we have no endpoints, it means we are bootstrapping...
|
// if we have no endpoints, it means we are bootstrapping...
|
||||||
if !bootstrapping {
|
if !bootstrapping {
|
||||||
log.Println("Etcd: Startup: Getting initial values...")
|
log.Println("Etcd: Startup: Getting initial values...")
|
||||||
if nominated, err := EtcdNominated(obj); err == nil {
|
if nominated, err := Nominated(obj); err == nil {
|
||||||
obj.nominated = nominated // store a local copy
|
obj.nominated = nominated // store a local copy
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Etcd: Startup: Nominate lookup error.")
|
log.Printf("Etcd: Startup: Nominate lookup error.")
|
||||||
@@ -376,7 +377,7 @@ func (obj *EmbdEtcd) Startup() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// get initial ideal cluster size
|
// get initial ideal cluster size
|
||||||
if idealClusterSize, err := EtcdGetClusterSize(obj); err == nil {
|
if idealClusterSize, err := GetClusterSize(obj); err == nil {
|
||||||
obj.idealClusterSize = idealClusterSize
|
obj.idealClusterSize = idealClusterSize
|
||||||
log.Printf("Etcd: Startup: Ideal cluster size is: %d", idealClusterSize)
|
log.Printf("Etcd: Startup: Ideal cluster size is: %d", idealClusterSize)
|
||||||
} else {
|
} else {
|
||||||
@@ -395,7 +396,7 @@ func (obj *EmbdEtcd) Startup() error {
|
|||||||
if !obj.noServer && bootstrapping {
|
if !obj.noServer && bootstrapping {
|
||||||
log.Printf("Etcd: Bootstrapping...")
|
log.Printf("Etcd: Bootstrapping...")
|
||||||
// give an initial value to the obj.nominate map we keep in sync
|
// give an initial value to the obj.nominate map we keep in sync
|
||||||
// this emulates EtcdNominate(obj, obj.hostname, obj.serverURLs)
|
// this emulates Nominate(obj, obj.hostname, obj.serverURLs)
|
||||||
obj.nominated[obj.hostname] = obj.serverURLs // initial value
|
obj.nominated[obj.hostname] = obj.serverURLs // initial value
|
||||||
// NOTE: when we are stuck waiting for the server to start up,
|
// NOTE: when we are stuck waiting for the server to start up,
|
||||||
// it is probably happening on this call right here...
|
// it is probably happening on this call right here...
|
||||||
@@ -406,11 +407,11 @@ func (obj *EmbdEtcd) Startup() error {
|
|||||||
if !obj.noServer && len(obj.serverURLs) > 0 {
|
if !obj.noServer && len(obj.serverURLs) > 0 {
|
||||||
// we run this in a go routine because it blocks waiting for server
|
// we run this in a go routine because it blocks waiting for server
|
||||||
log.Printf("Etcd: Startup: Volunteering...")
|
log.Printf("Etcd: Startup: Volunteering...")
|
||||||
go EtcdVolunteer(obj, obj.serverURLs)
|
go Volunteer(obj, obj.serverURLs)
|
||||||
}
|
}
|
||||||
|
|
||||||
if bootstrapping {
|
if bootstrapping {
|
||||||
if err := EtcdSetClusterSize(obj, obj.idealClusterSize); err != nil {
|
if err := SetClusterSize(obj, obj.idealClusterSize); err != nil {
|
||||||
log.Printf("Etcd: Startup: Ideal cluster size storage error.")
|
log.Printf("Etcd: Startup: Ideal cluster size storage error.")
|
||||||
obj.Destroy()
|
obj.Destroy()
|
||||||
return fmt.Errorf("Etcd: Startup: Error: %v", err)
|
return fmt.Errorf("Etcd: Startup: Error: %v", err)
|
||||||
@@ -431,7 +432,7 @@ func (obj *EmbdEtcd) Destroy() error {
|
|||||||
|
|
||||||
// this should also trigger an unnominate, which should cause a shutdown
|
// this should also trigger an unnominate, which should cause a shutdown
|
||||||
log.Printf("Etcd: Destroy: Unvolunteering...")
|
log.Printf("Etcd: Destroy: Unvolunteering...")
|
||||||
if err := EtcdVolunteer(obj, nil); err != nil { // unvolunteer so we can shutdown...
|
if err := Volunteer(obj, nil); err != nil { // unvolunteer so we can shutdown...
|
||||||
log.Printf("Etcd: Destroy: Error: %v", err) // we have a problem
|
log.Printf("Etcd: Destroy: Error: %v", err) // we have a problem
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -578,7 +579,7 @@ func (obj *EmbdEtcd) CtxError(ctx context.Context, err error) (context.Context,
|
|||||||
log.Fatal("Etcd: CtxError: Error: Unexpected lack of error!")
|
log.Fatal("Etcd: CtxError: Error: Unexpected lack of error!")
|
||||||
}
|
}
|
||||||
if obj.exiting {
|
if obj.exiting {
|
||||||
obj.ctxErr = fmt.Errorf("Etcd: CtxError: Exit in progress!")
|
obj.ctxErr = fmt.Errorf("exit in progress")
|
||||||
return ctx, obj.ctxErr
|
return ctx, obj.ctxErr
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -599,14 +600,14 @@ func (obj *EmbdEtcd) CtxError(ctx context.Context, err error) (context.Context,
|
|||||||
if retriesErr, ok := err.(*CtxRetriesErr); ok { // custom retry error
|
if retriesErr, ok := err.(*CtxRetriesErr); ok { // custom retry error
|
||||||
log.Printf("Etcd: CtxError: Reason: %s", retriesErr.Error())
|
log.Printf("Etcd: CtxError: Reason: %s", retriesErr.Error())
|
||||||
if retriesErr.Retries == 0 {
|
if retriesErr.Retries == 0 {
|
||||||
obj.ctxErr = fmt.Errorf("Etcd: CtxError: CtxRetriesErr: No more retries!")
|
obj.ctxErr = fmt.Errorf("no more retries due to CtxRetriesErr")
|
||||||
return ctx, obj.ctxErr
|
return ctx, obj.ctxErr
|
||||||
}
|
}
|
||||||
return ctx, nil
|
return ctx, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if permanentErr, ok := err.(*CtxPermanentErr); ok { // custom permanent error
|
if permanentErr, ok := err.(*CtxPermanentErr); ok { // custom permanent error
|
||||||
obj.ctxErr = fmt.Errorf("Etcd: CtxError: Reason: %s", permanentErr.Error())
|
obj.ctxErr = fmt.Errorf("error due to CtxPermanentErr: %s", permanentErr.Error())
|
||||||
return ctx, obj.ctxErr // quit
|
return ctx, obj.ctxErr // quit
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -626,7 +627,7 @@ func (obj *EmbdEtcd) CtxError(ctx context.Context, err error) (context.Context,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err == grpc.ErrClientConnTimeout { // sometimes caused by "too many colons" misconfiguration
|
if err == grpc.ErrClientConnTimeout { // sometimes caused by "too many colons" misconfiguration
|
||||||
return ctx, fmt.Errorf("Etcd: Error: Misconfiguration: %v", err) // permanent failure?
|
return ctx, fmt.Errorf("misconfiguration: %v", err) // permanent failure?
|
||||||
}
|
}
|
||||||
|
|
||||||
// this can happen if my client connection shuts down, but without any
|
// this can happen if my client connection shuts down, but without any
|
||||||
@@ -670,7 +671,7 @@ func (obj *EmbdEtcd) CtxError(ctx context.Context, err error) (context.Context,
|
|||||||
log.Printf("Etcd: CtxError: Reconnecting...")
|
log.Printf("Etcd: CtxError: Reconnecting...")
|
||||||
if err := obj.Connect(true); err != nil {
|
if err := obj.Connect(true); err != nil {
|
||||||
defer obj.rLock.Unlock()
|
defer obj.rLock.Unlock()
|
||||||
obj.ctxErr = fmt.Errorf("Etcd: Permanent connect error: %v", err)
|
obj.ctxErr = fmt.Errorf("permanent connect error: %v", err)
|
||||||
return ctx, obj.ctxErr
|
return ctx, obj.ctxErr
|
||||||
}
|
}
|
||||||
if obj.flags.Debug {
|
if obj.flags.Debug {
|
||||||
@@ -694,7 +695,7 @@ func (obj *EmbdEtcd) CtxError(ctx context.Context, err error) (context.Context,
|
|||||||
// if you hit this code path here, please report the unmatched error!
|
// if you hit this code path here, please report the unmatched error!
|
||||||
log.Printf("Etcd: CtxError: Unknown error(%T): %+v", err, err)
|
log.Printf("Etcd: CtxError: Unknown error(%T): %+v", err, err)
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
obj.ctxErr = fmt.Errorf("Etcd: CtxError: Unknown error!")
|
obj.ctxErr = fmt.Errorf("unknown CtxError")
|
||||||
return ctx, obj.ctxErr
|
return ctx, obj.ctxErr
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1188,7 +1189,7 @@ func (obj *EmbdEtcd) volunteerCallback(re *RE) error {
|
|||||||
return nil // if we're not a server, we're not a leader, return
|
return nil // if we're not a server, we're not a leader, return
|
||||||
}
|
}
|
||||||
|
|
||||||
membersMap, err := EtcdMembers(obj) // map[uint64]string
|
membersMap, err := Members(obj) // map[uint64]string
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Etcd: Members: Error: %+v", err)
|
return fmt.Errorf("Etcd: Members: Error: %+v", err)
|
||||||
}
|
}
|
||||||
@@ -1211,7 +1212,7 @@ func (obj *EmbdEtcd) volunteerCallback(re *RE) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
leader, err := EtcdLeader(obj) // XXX: race!
|
leader, err := Leader(obj) // XXX: race!
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Etcd: Leader: Error: %+v", err)
|
log.Printf("Etcd: Leader: Error: %+v", err)
|
||||||
return fmt.Errorf("Etcd: Leader: Error: %+v", err)
|
return fmt.Errorf("Etcd: Leader: Error: %+v", err)
|
||||||
@@ -1224,7 +1225,7 @@ func (obj *EmbdEtcd) volunteerCallback(re *RE) error {
|
|||||||
// i am the leader!
|
// i am the leader!
|
||||||
|
|
||||||
// get the list of available volunteers
|
// get the list of available volunteers
|
||||||
volunteersMap, err := EtcdVolunteers(obj)
|
volunteersMap, err := Volunteers(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Etcd: Volunteers: Error: %+v", err)
|
log.Printf("Etcd: Volunteers: Error: %+v", err)
|
||||||
return fmt.Errorf("Etcd: Volunteers: Error: %+v", err)
|
return fmt.Errorf("Etcd: Volunteers: Error: %+v", err)
|
||||||
@@ -1244,7 +1245,7 @@ func (obj *EmbdEtcd) volunteerCallback(re *RE) error {
|
|||||||
// if we're the only member left, just shutdown...
|
// if we're the only member left, just shutdown...
|
||||||
if len(members) == 1 && members[0] == obj.hostname && len(quitters) == 1 && quitters[0] == obj.hostname {
|
if len(members) == 1 && members[0] == obj.hostname && len(quitters) == 1 && quitters[0] == obj.hostname {
|
||||||
log.Printf("Etcd: Quitters: Shutting down self...")
|
log.Printf("Etcd: Quitters: Shutting down self...")
|
||||||
if err := EtcdNominate(obj, obj.hostname, nil); err != nil { // unnominate myself
|
if err := Nominate(obj, obj.hostname, nil); err != nil { // unnominate myself
|
||||||
return &CtxDelayErr{1 * time.Second, fmt.Sprintf("error shutting down self: %v", err)}
|
return &CtxDelayErr{1 * time.Second, fmt.Sprintf("error shutting down self: %v", err)}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -1266,7 +1267,7 @@ func (obj *EmbdEtcd) volunteerCallback(re *RE) error {
|
|||||||
// NOTE: storing peerURLs when they're already in volunteers/ is
|
// NOTE: storing peerURLs when they're already in volunteers/ is
|
||||||
// redundant, but it seems to be necessary for a sane algorithm.
|
// redundant, but it seems to be necessary for a sane algorithm.
|
||||||
// nominate before we call the API so that members see it first!
|
// nominate before we call the API so that members see it first!
|
||||||
EtcdNominate(obj, chosen, peerURLs)
|
Nominate(obj, chosen, peerURLs)
|
||||||
// XXX: add a ttl here, because once we nominate someone, we
|
// XXX: add a ttl here, because once we nominate someone, we
|
||||||
// need to give them up to N seconds to start up after we run
|
// need to give them up to N seconds to start up after we run
|
||||||
// the MemberAdd API because if they don't, in some situations
|
// the MemberAdd API because if they don't, in some situations
|
||||||
@@ -1279,7 +1280,7 @@ func (obj *EmbdEtcd) volunteerCallback(re *RE) error {
|
|||||||
// member at a time!
|
// member at a time!
|
||||||
|
|
||||||
log.Printf("Etcd: Member Add: %v", peerURLs)
|
log.Printf("Etcd: Member Add: %v", peerURLs)
|
||||||
mresp, err := EtcdMemberAdd(obj, peerURLs)
|
mresp, err := MemberAdd(obj, peerURLs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// on error this function will run again, which is good
|
// on error this function will run again, which is good
|
||||||
// because we need to make sure to run the below parts!
|
// because we need to make sure to run the below parts!
|
||||||
@@ -1310,7 +1311,7 @@ func (obj *EmbdEtcd) volunteerCallback(re *RE) error {
|
|||||||
// programming error
|
// programming error
|
||||||
log.Fatalf("Etcd: Member Remove: Error: %v(%v) not in members list!", quitter, mID)
|
log.Fatalf("Etcd: Member Remove: Error: %v(%v) not in members list!", quitter, mID)
|
||||||
}
|
}
|
||||||
EtcdNominate(obj, quitter, nil) // unnominate
|
Nominate(obj, quitter, nil) // unnominate
|
||||||
// once we issue the above unnominate, that peer will
|
// once we issue the above unnominate, that peer will
|
||||||
// shutdown, and this might cause us to loose quorum,
|
// shutdown, and this might cause us to loose quorum,
|
||||||
// therefore, let that member remove itself, and then
|
// therefore, let that member remove itself, and then
|
||||||
@@ -1325,7 +1326,7 @@ func (obj *EmbdEtcd) volunteerCallback(re *RE) error {
|
|||||||
log.Printf("Etcd: Waiting %d seconds for %s to self remove...", selfRemoveTimeout, quitter)
|
log.Printf("Etcd: Waiting %d seconds for %s to self remove...", selfRemoveTimeout, quitter)
|
||||||
time.Sleep(selfRemoveTimeout * time.Second)
|
time.Sleep(selfRemoveTimeout * time.Second)
|
||||||
// in case the removed member doesn't remove itself, do it!
|
// in case the removed member doesn't remove itself, do it!
|
||||||
removed, err := EtcdMemberRemove(obj, mID)
|
removed, err := MemberRemove(obj, mID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Etcd: Member Remove: Error: %+v", err)
|
return fmt.Errorf("Etcd: Member Remove: Error: %+v", err)
|
||||||
}
|
}
|
||||||
@@ -1372,7 +1373,7 @@ func (obj *EmbdEtcd) nominateCallback(re *RE) error {
|
|||||||
// leader once the current leader calls the MemberAdd API and it
|
// leader once the current leader calls the MemberAdd API and it
|
||||||
// steps down trying to form a two host cluster. Instead, we can
|
// steps down trying to form a two host cluster. Instead, we can
|
||||||
// look at the event response data to read the nominated values!
|
// look at the event response data to read the nominated values!
|
||||||
//nominated, err = EtcdNominated(obj) // nope, won't always work
|
//nominated, err = Nominated(obj) // nope, won't always work
|
||||||
// since we only see what has *changed* in the response data, we
|
// since we only see what has *changed* in the response data, we
|
||||||
// have to keep track of the original state and apply the deltas
|
// have to keep track of the original state and apply the deltas
|
||||||
// this must be idempotent in case it errors and is called again
|
// this must be idempotent in case it errors and is called again
|
||||||
@@ -1386,7 +1387,7 @@ func (obj *EmbdEtcd) nominateCallback(re *RE) error {
|
|||||||
|
|
||||||
} else {
|
} else {
|
||||||
// TODO: should we just use the above delta method for everything?
|
// TODO: should we just use the above delta method for everything?
|
||||||
//nominated, err := EtcdNominated(obj) // just get it
|
//nominated, err := Nominated(obj) // just get it
|
||||||
//if err != nil {
|
//if err != nil {
|
||||||
// return fmt.Errorf("Etcd: Nominate: Error: %+v", err)
|
// return fmt.Errorf("Etcd: Nominate: Error: %+v", err)
|
||||||
//}
|
//}
|
||||||
@@ -1435,7 +1436,7 @@ func (obj *EmbdEtcd) nominateCallback(re *RE) error {
|
|||||||
|
|
||||||
// XXX: just put this wherever for now so we don't block
|
// XXX: just put this wherever for now so we don't block
|
||||||
// nominate self so "member" list is correct for peers to see
|
// nominate self so "member" list is correct for peers to see
|
||||||
EtcdNominate(obj, obj.hostname, obj.serverURLs)
|
Nominate(obj, obj.hostname, obj.serverURLs)
|
||||||
// XXX: if this fails, where will we retry this part ?
|
// XXX: if this fails, where will we retry this part ?
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1443,7 +1444,7 @@ func (obj *EmbdEtcd) nominateCallback(re *RE) error {
|
|||||||
if curls := obj.clientURLs; len(curls) > 0 {
|
if curls := obj.clientURLs; len(curls) > 0 {
|
||||||
// XXX: don't advertise local addresses! 127.0.0.1:2381 doesn't really help remote hosts
|
// XXX: don't advertise local addresses! 127.0.0.1:2381 doesn't really help remote hosts
|
||||||
// XXX: but sometimes this is what we want... hmmm how do we decide? filter on callback?
|
// XXX: but sometimes this is what we want... hmmm how do we decide? filter on callback?
|
||||||
EtcdAdvertiseEndpoints(obj, curls)
|
AdvertiseEndpoints(obj, curls)
|
||||||
// XXX: if this fails, where will we retry this part ?
|
// XXX: if this fails, where will we retry this part ?
|
||||||
|
|
||||||
// force this to remove sentinel before we reconnect...
|
// force this to remove sentinel before we reconnect...
|
||||||
@@ -1454,14 +1455,14 @@ func (obj *EmbdEtcd) nominateCallback(re *RE) error {
|
|||||||
|
|
||||||
} else if obj.server != nil && !exists {
|
} else if obj.server != nil && !exists {
|
||||||
// un advertise client urls
|
// un advertise client urls
|
||||||
EtcdAdvertiseEndpoints(obj, nil)
|
AdvertiseEndpoints(obj, nil)
|
||||||
|
|
||||||
// i have been un-nominated, remove self and shutdown server!
|
// i have been un-nominated, remove self and shutdown server!
|
||||||
if len(obj.nominated) != 0 { // don't call if nobody left but me!
|
if len(obj.nominated) != 0 { // don't call if nobody left but me!
|
||||||
// this works around: https://github.com/coreos/etcd/issues/5482,
|
// this works around: https://github.com/coreos/etcd/issues/5482,
|
||||||
// and it probably makes sense to avoid calling if we're the last
|
// and it probably makes sense to avoid calling if we're the last
|
||||||
log.Printf("Etcd: Member Remove: Removing self: %v", obj.memberID)
|
log.Printf("Etcd: Member Remove: Removing self: %v", obj.memberID)
|
||||||
removed, err := EtcdMemberRemove(obj, obj.memberID)
|
removed, err := MemberRemove(obj, obj.memberID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Etcd: Member Remove: Error: %+v", err)
|
return fmt.Errorf("Etcd: Member Remove: Error: %+v", err)
|
||||||
}
|
}
|
||||||
@@ -1523,7 +1524,7 @@ func (obj *EmbdEtcd) endpointCallback(re *RE) error {
|
|||||||
endpoints, err := ApplyDeltaEvents(re, endpoints)
|
endpoints, err := ApplyDeltaEvents(re, endpoints)
|
||||||
if err != nil || exists {
|
if err != nil || exists {
|
||||||
// TODO: we could also lookup endpoints from the maintenance api
|
// TODO: we could also lookup endpoints from the maintenance api
|
||||||
endpoints, err = EtcdEndpoints(obj)
|
endpoints, err = Endpoints(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1671,16 +1672,16 @@ func (obj *EmbdEtcd) StartServer(newCluster bool, peerURLsMap etcdtypes.URLsMap)
|
|||||||
case <-obj.server.Server.ReadyNotify(): // we hang here if things are bad
|
case <-obj.server.Server.ReadyNotify(): // we hang here if things are bad
|
||||||
log.Printf("Etcd: StartServer: Done starting server!") // it didn't hang!
|
log.Printf("Etcd: StartServer: Done starting server!") // it didn't hang!
|
||||||
case <-time.After(time.Duration(maxStartServerTimeout) * time.Second):
|
case <-time.After(time.Duration(maxStartServerTimeout) * time.Second):
|
||||||
e := fmt.Errorf("Etcd: StartServer: Timeout of %d seconds reached!", maxStartServerTimeout)
|
e := fmt.Errorf("timeout of %d seconds reached", maxStartServerTimeout)
|
||||||
log.Printf(e.Error())
|
log.Printf("Etcd: StartServer: %s", e.Error())
|
||||||
obj.server.Server.Stop() // trigger a shutdown
|
obj.server.Server.Stop() // trigger a shutdown
|
||||||
obj.serverwg.Add(1) // add for the DestroyServer()
|
obj.serverwg.Add(1) // add for the DestroyServer()
|
||||||
obj.DestroyServer()
|
obj.DestroyServer()
|
||||||
return e
|
return e
|
||||||
// TODO: should we wait for this notification elsewhere?
|
// TODO: should we wait for this notification elsewhere?
|
||||||
case <-obj.server.Server.StopNotify(): // it's going down now...
|
case <-obj.server.Server.StopNotify(): // it's going down now...
|
||||||
e := fmt.Errorf("Etcd: StartServer: Received stop notification.")
|
e := fmt.Errorf("received stop notification")
|
||||||
log.Printf(e.Error())
|
log.Printf("Etcd: StartServer: %s", e.Error())
|
||||||
obj.server.Server.Stop() // trigger a shutdown
|
obj.server.Server.Stop() // trigger a shutdown
|
||||||
obj.serverwg.Add(1) // add for the DestroyServer()
|
obj.serverwg.Add(1) // add for the DestroyServer()
|
||||||
obj.DestroyServer()
|
obj.DestroyServer()
|
||||||
@@ -1716,11 +1717,11 @@ func (obj *EmbdEtcd) DestroyServer() error {
|
|||||||
// TODO: Could all these Etcd*(obj *EmbdEtcd, ...) functions which deal with the
|
// TODO: Could all these Etcd*(obj *EmbdEtcd, ...) functions which deal with the
|
||||||
// interface between etcd paths and behaviour be grouped into a single struct ?
|
// interface between etcd paths and behaviour be grouped into a single struct ?
|
||||||
|
|
||||||
// EtcdNominate nominates a particular client to be a server (peer)
|
// Nominate nominates a particular client to be a server (peer)
|
||||||
func EtcdNominate(obj *EmbdEtcd, hostname string, urls etcdtypes.URLs) error {
|
func Nominate(obj *EmbdEtcd, hostname string, urls etcdtypes.URLs) error {
|
||||||
if obj.flags.Trace {
|
if obj.flags.Trace {
|
||||||
log.Printf("Trace: Etcd: EtcdNominate(%v): %v", hostname, urls.String())
|
log.Printf("Trace: Etcd: Nominate(%v): %v", hostname, urls.String())
|
||||||
defer log.Printf("Trace: Etcd: EtcdNominate(%v): Finished!", hostname)
|
defer log.Printf("Trace: Etcd: Nominate(%v): Finished!", hostname)
|
||||||
}
|
}
|
||||||
// nominate someone to be a server
|
// nominate someone to be a server
|
||||||
nominate := fmt.Sprintf("/%s/nominated/%s", NS, hostname)
|
nominate := fmt.Sprintf("/%s/nominated/%s", NS, hostname)
|
||||||
@@ -1733,18 +1734,18 @@ func EtcdNominate(obj *EmbdEtcd, hostname string, urls etcdtypes.URLs) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if _, err := obj.Txn(nil, ops, nil); err != nil {
|
if _, err := obj.Txn(nil, ops, nil); err != nil {
|
||||||
return fmt.Errorf("Etcd: Nominate failed!") // exit in progress?
|
return fmt.Errorf("nominate failed") // exit in progress?
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EtcdNominated returns a urls map of nominated etcd server volunteers
|
// Nominated returns a urls map of nominated etcd server volunteers
|
||||||
// NOTE: I know 'nominees' might be more correct, but is less consistent here
|
// NOTE: I know 'nominees' might be more correct, but is less consistent here
|
||||||
func EtcdNominated(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
func Nominated(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
||||||
path := fmt.Sprintf("/%s/nominated/", NS)
|
path := fmt.Sprintf("/%s/nominated/", NS)
|
||||||
keyMap, err := obj.Get(path, etcd.WithPrefix()) // map[string]string, bool
|
keyMap, err := obj.Get(path, etcd.WithPrefix()) // map[string]string, bool
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Etcd: Nominated isn't available: %v", err)
|
return nil, fmt.Errorf("nominated isn't available: %v", err)
|
||||||
}
|
}
|
||||||
nominated := make(etcdtypes.URLsMap)
|
nominated := make(etcdtypes.URLsMap)
|
||||||
for key, val := range keyMap { // loop through directory of nominated
|
for key, val := range keyMap { // loop through directory of nominated
|
||||||
@@ -1757,7 +1758,7 @@ func EtcdNominated(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
|||||||
}
|
}
|
||||||
urls, err := etcdtypes.NewURLs(strings.Split(val, ","))
|
urls, err := etcdtypes.NewURLs(strings.Split(val, ","))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Etcd: Nominated: Data format error!: %v", err)
|
return nil, fmt.Errorf("nominated data format error: %v", err)
|
||||||
}
|
}
|
||||||
nominated[name] = urls // add to map
|
nominated[name] = urls // add to map
|
||||||
if obj.flags.Debug {
|
if obj.flags.Debug {
|
||||||
@@ -1767,11 +1768,11 @@ func EtcdNominated(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
|||||||
return nominated, nil
|
return nominated, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EtcdVolunteer offers yourself up to be a server if needed
|
// Volunteer offers yourself up to be a server if needed
|
||||||
func EtcdVolunteer(obj *EmbdEtcd, urls etcdtypes.URLs) error {
|
func Volunteer(obj *EmbdEtcd, urls etcdtypes.URLs) error {
|
||||||
if obj.flags.Trace {
|
if obj.flags.Trace {
|
||||||
log.Printf("Trace: Etcd: EtcdVolunteer(%v): %v", obj.hostname, urls.String())
|
log.Printf("Trace: Etcd: Volunteer(%v): %v", obj.hostname, urls.String())
|
||||||
defer log.Printf("Trace: Etcd: EtcdVolunteer(%v): Finished!", obj.hostname)
|
defer log.Printf("Trace: Etcd: Volunteer(%v): Finished!", obj.hostname)
|
||||||
}
|
}
|
||||||
// volunteer to be a server
|
// volunteer to be a server
|
||||||
volunteer := fmt.Sprintf("/%s/volunteers/%s", NS, obj.hostname)
|
volunteer := fmt.Sprintf("/%s/volunteers/%s", NS, obj.hostname)
|
||||||
@@ -1785,21 +1786,21 @@ func EtcdVolunteer(obj *EmbdEtcd, urls etcdtypes.URLs) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if _, err := obj.Txn(nil, ops, nil); err != nil {
|
if _, err := obj.Txn(nil, ops, nil); err != nil {
|
||||||
return fmt.Errorf("Etcd: Volunteering failed!") // exit in progress?
|
return fmt.Errorf("volunteering failed") // exit in progress?
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EtcdVolunteers returns a urls map of available etcd server volunteers
|
// Volunteers returns a urls map of available etcd server volunteers
|
||||||
func EtcdVolunteers(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
func Volunteers(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
||||||
if obj.flags.Trace {
|
if obj.flags.Trace {
|
||||||
log.Printf("Trace: Etcd: EtcdVolunteers()")
|
log.Printf("Trace: Etcd: Volunteers()")
|
||||||
defer log.Printf("Trace: Etcd: EtcdVolunteers(): Finished!")
|
defer log.Printf("Trace: Etcd: Volunteers(): Finished!")
|
||||||
}
|
}
|
||||||
path := fmt.Sprintf("/%s/volunteers/", NS)
|
path := fmt.Sprintf("/%s/volunteers/", NS)
|
||||||
keyMap, err := obj.Get(path, etcd.WithPrefix())
|
keyMap, err := obj.Get(path, etcd.WithPrefix())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Etcd: Volunteers aren't available: %v", err)
|
return nil, fmt.Errorf("volunteers aren't available: %v", err)
|
||||||
}
|
}
|
||||||
volunteers := make(etcdtypes.URLsMap)
|
volunteers := make(etcdtypes.URLsMap)
|
||||||
for key, val := range keyMap { // loop through directory of volunteers
|
for key, val := range keyMap { // loop through directory of volunteers
|
||||||
@@ -1812,7 +1813,7 @@ func EtcdVolunteers(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
|||||||
}
|
}
|
||||||
urls, err := etcdtypes.NewURLs(strings.Split(val, ","))
|
urls, err := etcdtypes.NewURLs(strings.Split(val, ","))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Etcd: Volunteers: Data format error!: %v", err)
|
return nil, fmt.Errorf("volunteers data format error: %v", err)
|
||||||
}
|
}
|
||||||
volunteers[name] = urls // add to map
|
volunteers[name] = urls // add to map
|
||||||
if obj.flags.Debug {
|
if obj.flags.Debug {
|
||||||
@@ -1822,11 +1823,11 @@ func EtcdVolunteers(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
|||||||
return volunteers, nil
|
return volunteers, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EtcdAdvertiseEndpoints advertises the list of available client endpoints
|
// AdvertiseEndpoints advertises the list of available client endpoints
|
||||||
func EtcdAdvertiseEndpoints(obj *EmbdEtcd, urls etcdtypes.URLs) error {
|
func AdvertiseEndpoints(obj *EmbdEtcd, urls etcdtypes.URLs) error {
|
||||||
if obj.flags.Trace {
|
if obj.flags.Trace {
|
||||||
log.Printf("Trace: Etcd: EtcdAdvertiseEndpoints(%v): %v", obj.hostname, urls.String())
|
log.Printf("Trace: Etcd: AdvertiseEndpoints(%v): %v", obj.hostname, urls.String())
|
||||||
defer log.Printf("Trace: Etcd: EtcdAdvertiseEndpoints(%v): Finished!", obj.hostname)
|
defer log.Printf("Trace: Etcd: AdvertiseEndpoints(%v): Finished!", obj.hostname)
|
||||||
}
|
}
|
||||||
// advertise endpoints
|
// advertise endpoints
|
||||||
endpoints := fmt.Sprintf("/%s/endpoints/%s", NS, obj.hostname)
|
endpoints := fmt.Sprintf("/%s/endpoints/%s", NS, obj.hostname)
|
||||||
@@ -1840,21 +1841,21 @@ func EtcdAdvertiseEndpoints(obj *EmbdEtcd, urls etcdtypes.URLs) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if _, err := obj.Txn(nil, ops, nil); err != nil {
|
if _, err := obj.Txn(nil, ops, nil); err != nil {
|
||||||
return fmt.Errorf("Etcd: Endpoint advertising failed!") // exit in progress?
|
return fmt.Errorf("endpoint advertising failed") // exit in progress?
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EtcdEndpoints returns a urls map of available etcd server endpoints
|
// Endpoints returns a urls map of available etcd server endpoints
|
||||||
func EtcdEndpoints(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
func Endpoints(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
||||||
if obj.flags.Trace {
|
if obj.flags.Trace {
|
||||||
log.Printf("Trace: Etcd: EtcdEndpoints()")
|
log.Printf("Trace: Etcd: Endpoints()")
|
||||||
defer log.Printf("Trace: Etcd: EtcdEndpoints(): Finished!")
|
defer log.Printf("Trace: Etcd: Endpoints(): Finished!")
|
||||||
}
|
}
|
||||||
path := fmt.Sprintf("/%s/endpoints/", NS)
|
path := fmt.Sprintf("/%s/endpoints/", NS)
|
||||||
keyMap, err := obj.Get(path, etcd.WithPrefix())
|
keyMap, err := obj.Get(path, etcd.WithPrefix())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Etcd: Endpoints aren't available: %v", err)
|
return nil, fmt.Errorf("endpoints aren't available: %v", err)
|
||||||
}
|
}
|
||||||
endpoints := make(etcdtypes.URLsMap)
|
endpoints := make(etcdtypes.URLsMap)
|
||||||
for key, val := range keyMap { // loop through directory of endpoints
|
for key, val := range keyMap { // loop through directory of endpoints
|
||||||
@@ -1867,7 +1868,7 @@ func EtcdEndpoints(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
|||||||
}
|
}
|
||||||
urls, err := etcdtypes.NewURLs(strings.Split(val, ","))
|
urls, err := etcdtypes.NewURLs(strings.Split(val, ","))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Etcd: Endpoints: Data format error!: %v", err)
|
return nil, fmt.Errorf("endpoints data format error: %v", err)
|
||||||
}
|
}
|
||||||
endpoints[name] = urls // add to map
|
endpoints[name] = urls // add to map
|
||||||
if obj.flags.Debug {
|
if obj.flags.Debug {
|
||||||
@@ -1877,30 +1878,30 @@ func EtcdEndpoints(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
|||||||
return endpoints, nil
|
return endpoints, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EtcdSetHostnameConverged sets whether a specific hostname is converged.
|
// SetHostnameConverged sets whether a specific hostname is converged.
|
||||||
func EtcdSetHostnameConverged(obj *EmbdEtcd, hostname string, isConverged bool) error {
|
func SetHostnameConverged(obj *EmbdEtcd, hostname string, isConverged bool) error {
|
||||||
if obj.flags.Trace {
|
if obj.flags.Trace {
|
||||||
log.Printf("Trace: Etcd: EtcdSetHostnameConverged(%s): %v", hostname, isConverged)
|
log.Printf("Trace: Etcd: SetHostnameConverged(%s): %v", hostname, isConverged)
|
||||||
defer log.Printf("Trace: Etcd: EtcdSetHostnameConverged(%v): Finished!", hostname)
|
defer log.Printf("Trace: Etcd: SetHostnameConverged(%v): Finished!", hostname)
|
||||||
}
|
}
|
||||||
converged := fmt.Sprintf("/%s/converged/%s", NS, hostname)
|
converged := fmt.Sprintf("/%s/converged/%s", NS, hostname)
|
||||||
op := []etcd.Op{etcd.OpPut(converged, fmt.Sprintf("%t", isConverged))}
|
op := []etcd.Op{etcd.OpPut(converged, fmt.Sprintf("%t", isConverged))}
|
||||||
if _, err := obj.Txn(nil, op, nil); err != nil { // TODO: do we need a skipConv flag here too?
|
if _, err := obj.Txn(nil, op, nil); err != nil { // TODO: do we need a skipConv flag here too?
|
||||||
return fmt.Errorf("Etcd: Set converged failed!") // exit in progress?
|
return fmt.Errorf("set converged failed") // exit in progress?
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EtcdHostnameConverged returns a map of every hostname's converged state.
|
// HostnameConverged returns a map of every hostname's converged state.
|
||||||
func EtcdHostnameConverged(obj *EmbdEtcd) (map[string]bool, error) {
|
func HostnameConverged(obj *EmbdEtcd) (map[string]bool, error) {
|
||||||
if obj.flags.Trace {
|
if obj.flags.Trace {
|
||||||
log.Printf("Trace: Etcd: EtcdHostnameConverged()")
|
log.Printf("Trace: Etcd: HostnameConverged()")
|
||||||
defer log.Printf("Trace: Etcd: EtcdHostnameConverged(): Finished!")
|
defer log.Printf("Trace: Etcd: HostnameConverged(): Finished!")
|
||||||
}
|
}
|
||||||
path := fmt.Sprintf("/%s/converged/", NS)
|
path := fmt.Sprintf("/%s/converged/", NS)
|
||||||
keyMap, err := obj.ComplexGet(path, true, etcd.WithPrefix()) // don't un-converge
|
keyMap, err := obj.ComplexGet(path, true, etcd.WithPrefix()) // don't un-converge
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Etcd: Converged values aren't available: %v", err)
|
return nil, fmt.Errorf("converged values aren't available: %v", err)
|
||||||
}
|
}
|
||||||
converged := make(map[string]bool)
|
converged := make(map[string]bool)
|
||||||
for key, val := range keyMap { // loop through directory...
|
for key, val := range keyMap { // loop through directory...
|
||||||
@@ -1913,21 +1914,21 @@ func EtcdHostnameConverged(obj *EmbdEtcd) (map[string]bool, error) {
|
|||||||
}
|
}
|
||||||
b, err := strconv.ParseBool(val)
|
b, err := strconv.ParseBool(val)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Etcd: Converged: Data format error!: %v", err)
|
return nil, fmt.Errorf("converged data format error: %v", err)
|
||||||
}
|
}
|
||||||
converged[name] = b // add to map
|
converged[name] = b // add to map
|
||||||
}
|
}
|
||||||
return converged, nil
|
return converged, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EtcdAddHostnameConvergedWatcher adds a watcher with a callback that runs on
|
// AddHostnameConvergedWatcher adds a watcher with a callback that runs on
|
||||||
// hostname state changes.
|
// hostname state changes.
|
||||||
func EtcdAddHostnameConvergedWatcher(obj *EmbdEtcd, callbackFn func(map[string]bool) error) (func(), error) {
|
func AddHostnameConvergedWatcher(obj *EmbdEtcd, callbackFn func(map[string]bool) error) (func(), error) {
|
||||||
path := fmt.Sprintf("/%s/converged/", NS)
|
path := fmt.Sprintf("/%s/converged/", NS)
|
||||||
internalCbFn := func(re *RE) error {
|
internalCbFn := func(re *RE) error {
|
||||||
// TODO: get the value from the response, and apply delta...
|
// TODO: get the value from the response, and apply delta...
|
||||||
// for now, just run a get operation which is easier to code!
|
// for now, just run a get operation which is easier to code!
|
||||||
m, err := EtcdHostnameConverged(obj)
|
m, err := HostnameConverged(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1936,49 +1937,49 @@ func EtcdAddHostnameConvergedWatcher(obj *EmbdEtcd, callbackFn func(map[string]b
|
|||||||
return obj.AddWatcher(path, internalCbFn, true, true, etcd.WithPrefix()) // no block and no converger reset
|
return obj.AddWatcher(path, internalCbFn, true, true, etcd.WithPrefix()) // no block and no converger reset
|
||||||
}
|
}
|
||||||
|
|
||||||
// EtcdSetClusterSize sets the ideal target cluster size of etcd peers
|
// SetClusterSize sets the ideal target cluster size of etcd peers
|
||||||
func EtcdSetClusterSize(obj *EmbdEtcd, value uint16) error {
|
func SetClusterSize(obj *EmbdEtcd, value uint16) error {
|
||||||
if obj.flags.Trace {
|
if obj.flags.Trace {
|
||||||
log.Printf("Trace: Etcd: EtcdSetClusterSize(): %v", value)
|
log.Printf("Trace: Etcd: SetClusterSize(): %v", value)
|
||||||
defer log.Printf("Trace: Etcd: EtcdSetClusterSize(): Finished!")
|
defer log.Printf("Trace: Etcd: SetClusterSize(): Finished!")
|
||||||
}
|
}
|
||||||
key := fmt.Sprintf("/%s/idealClusterSize", NS)
|
key := fmt.Sprintf("/%s/idealClusterSize", NS)
|
||||||
|
|
||||||
if err := obj.Set(key, strconv.FormatUint(uint64(value), 10)); err != nil {
|
if err := obj.Set(key, strconv.FormatUint(uint64(value), 10)); err != nil {
|
||||||
return fmt.Errorf("Etcd: SetClusterSize failed!") // exit in progress?
|
return fmt.Errorf("function SetClusterSize failed: %v", err) // exit in progress?
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EtcdGetClusterSize gets the ideal target cluster size of etcd peers
|
// GetClusterSize gets the ideal target cluster size of etcd peers
|
||||||
func EtcdGetClusterSize(obj *EmbdEtcd) (uint16, error) {
|
func GetClusterSize(obj *EmbdEtcd) (uint16, error) {
|
||||||
key := fmt.Sprintf("/%s/idealClusterSize", NS)
|
key := fmt.Sprintf("/%s/idealClusterSize", NS)
|
||||||
keyMap, err := obj.Get(key)
|
keyMap, err := obj.Get(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("Etcd: GetClusterSize failed: %v", err)
|
return 0, fmt.Errorf("function GetClusterSize failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
val, exists := keyMap[key]
|
val, exists := keyMap[key]
|
||||||
if !exists || val == "" {
|
if !exists || val == "" {
|
||||||
return 0, fmt.Errorf("Etcd: GetClusterSize failed: %v", err)
|
return 0, fmt.Errorf("function GetClusterSize failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
v, err := strconv.ParseUint(val, 10, 16)
|
v, err := strconv.ParseUint(val, 10, 16)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("Etcd: GetClusterSize failed: %v", err)
|
return 0, fmt.Errorf("function GetClusterSize failed: %v", err)
|
||||||
}
|
}
|
||||||
return uint16(v), nil
|
return uint16(v), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EtcdMemberAdd adds a member to the cluster.
|
// MemberAdd adds a member to the cluster.
|
||||||
func EtcdMemberAdd(obj *EmbdEtcd, peerURLs etcdtypes.URLs) (*etcd.MemberAddResponse, error) {
|
func MemberAdd(obj *EmbdEtcd, peerURLs etcdtypes.URLs) (*etcd.MemberAddResponse, error) {
|
||||||
//obj.Connect(false) // TODO: ?
|
//obj.Connect(false) // TODO: ?
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
var response *etcd.MemberAddResponse
|
var response *etcd.MemberAddResponse
|
||||||
var err error
|
var err error
|
||||||
for {
|
for {
|
||||||
if obj.exiting { // the exit signal has been sent!
|
if obj.exiting { // the exit signal has been sent!
|
||||||
return nil, fmt.Errorf("Exiting...")
|
return nil, fmt.Errorf("exiting etcd")
|
||||||
}
|
}
|
||||||
obj.rLock.RLock()
|
obj.rLock.RLock()
|
||||||
response, err = obj.client.MemberAdd(ctx, peerURLs.StringSlice())
|
response, err = obj.client.MemberAdd(ctx, peerURLs.StringSlice())
|
||||||
@@ -1993,15 +1994,15 @@ func EtcdMemberAdd(obj *EmbdEtcd, peerURLs etcdtypes.URLs) (*etcd.MemberAddRespo
|
|||||||
return response, nil
|
return response, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EtcdMemberRemove removes a member by mID and returns if it worked, and also
|
// MemberRemove removes a member by mID and returns if it worked, and also
|
||||||
// if there was an error. This is because it might have run without error, but
|
// if there was an error. This is because it might have run without error, but
|
||||||
// the member wasn't found, for example.
|
// the member wasn't found, for example.
|
||||||
func EtcdMemberRemove(obj *EmbdEtcd, mID uint64) (bool, error) {
|
func MemberRemove(obj *EmbdEtcd, mID uint64) (bool, error) {
|
||||||
//obj.Connect(false) // TODO: ?
|
//obj.Connect(false) // TODO: ?
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
for {
|
for {
|
||||||
if obj.exiting { // the exit signal has been sent!
|
if obj.exiting { // the exit signal has been sent!
|
||||||
return false, fmt.Errorf("Exiting...")
|
return false, fmt.Errorf("exiting etcd")
|
||||||
}
|
}
|
||||||
obj.rLock.RLock()
|
obj.rLock.RLock()
|
||||||
_, err := obj.client.MemberRemove(ctx, mID)
|
_, err := obj.client.MemberRemove(ctx, mID)
|
||||||
@@ -2019,21 +2020,21 @@ func EtcdMemberRemove(obj *EmbdEtcd, mID uint64) (bool, error) {
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EtcdMembers returns information on cluster membership.
|
// Members returns information on cluster membership.
|
||||||
// The member ID's are the keys, because an empty names means unstarted!
|
// The member ID's are the keys, because an empty names means unstarted!
|
||||||
// TODO: consider queueing this through the main loop with CtxError(ctx, err)
|
// TODO: consider queueing this through the main loop with CtxError(ctx, err)
|
||||||
func EtcdMembers(obj *EmbdEtcd) (map[uint64]string, error) {
|
func Members(obj *EmbdEtcd) (map[uint64]string, error) {
|
||||||
//obj.Connect(false) // TODO: ?
|
//obj.Connect(false) // TODO: ?
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
var response *etcd.MemberListResponse
|
var response *etcd.MemberListResponse
|
||||||
var err error
|
var err error
|
||||||
for {
|
for {
|
||||||
if obj.exiting { // the exit signal has been sent!
|
if obj.exiting { // the exit signal has been sent!
|
||||||
return nil, fmt.Errorf("Exiting...")
|
return nil, fmt.Errorf("exiting etcd")
|
||||||
}
|
}
|
||||||
obj.rLock.RLock()
|
obj.rLock.RLock()
|
||||||
if obj.flags.Trace {
|
if obj.flags.Trace {
|
||||||
log.Printf("Trace: Etcd: EtcdMembers(): Endpoints are: %v", obj.client.Endpoints())
|
log.Printf("Trace: Etcd: Members(): Endpoints are: %v", obj.client.Endpoints())
|
||||||
}
|
}
|
||||||
response, err = obj.client.MemberList(ctx)
|
response, err = obj.client.MemberList(ctx)
|
||||||
obj.rLock.RUnlock()
|
obj.rLock.RUnlock()
|
||||||
@@ -2052,18 +2053,18 @@ func EtcdMembers(obj *EmbdEtcd) (map[uint64]string, error) {
|
|||||||
return members, nil
|
return members, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EtcdLeader returns the current leader of the etcd server cluster
|
// Leader returns the current leader of the etcd server cluster
|
||||||
func EtcdLeader(obj *EmbdEtcd) (string, error) {
|
func Leader(obj *EmbdEtcd) (string, error) {
|
||||||
//obj.Connect(false) // TODO: ?
|
//obj.Connect(false) // TODO: ?
|
||||||
var err error
|
var err error
|
||||||
membersMap := make(map[uint64]string)
|
membersMap := make(map[uint64]string)
|
||||||
if membersMap, err = EtcdMembers(obj); err != nil {
|
if membersMap, err = Members(obj); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
addresses := obj.LocalhostClientURLs() // heuristic, but probably correct
|
addresses := obj.LocalhostClientURLs() // heuristic, but probably correct
|
||||||
if len(addresses) == 0 {
|
if len(addresses) == 0 {
|
||||||
// probably a programming error...
|
// probably a programming error...
|
||||||
return "", fmt.Errorf("Etcd: Leader: Programming error!")
|
return "", fmt.Errorf("programming error")
|
||||||
}
|
}
|
||||||
endpoint := addresses[0].Host // FIXME: arbitrarily picked the first one
|
endpoint := addresses[0].Host // FIXME: arbitrarily picked the first one
|
||||||
|
|
||||||
@@ -2072,7 +2073,7 @@ func EtcdLeader(obj *EmbdEtcd) (string, error) {
|
|||||||
var response *etcd.StatusResponse
|
var response *etcd.StatusResponse
|
||||||
for {
|
for {
|
||||||
if obj.exiting { // the exit signal has been sent!
|
if obj.exiting { // the exit signal has been sent!
|
||||||
return "", fmt.Errorf("Exiting...")
|
return "", fmt.Errorf("exiting etcd")
|
||||||
}
|
}
|
||||||
|
|
||||||
obj.rLock.RLock()
|
obj.rLock.RLock()
|
||||||
@@ -2092,20 +2093,20 @@ func EtcdLeader(obj *EmbdEtcd) (string, error) {
|
|||||||
return name, nil
|
return name, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return "", fmt.Errorf("Etcd: Members map is not current!") // not found
|
return "", fmt.Errorf("members map is not current") // not found
|
||||||
}
|
}
|
||||||
|
|
||||||
// EtcdWatch returns a channel that outputs a true bool when activity occurs
|
// WatchAll returns a channel that outputs a true bool when activity occurs
|
||||||
// TODO: Filter our watch (on the server side if possible) based on the
|
// TODO: Filter our watch (on the server side if possible) based on the
|
||||||
// collection prefixes and filters that we care about...
|
// collection prefixes and filters that we care about...
|
||||||
func EtcdWatch(obj *EmbdEtcd) chan bool {
|
func WatchAll(obj *EmbdEtcd) chan bool {
|
||||||
ch := make(chan bool, 1) // buffer it so we can measure it
|
ch := make(chan bool, 1) // buffer it so we can measure it
|
||||||
path := fmt.Sprintf("/%s/exported/", NS)
|
path := fmt.Sprintf("/%s/exported/", NS)
|
||||||
callback := func(re *RE) error {
|
callback := func(re *RE) error {
|
||||||
// TODO: is this even needed? it used to happen on conn errors
|
// TODO: is this even needed? it used to happen on conn errors
|
||||||
log.Printf("Etcd: Watch: Path: %v", path) // event
|
log.Printf("Etcd: Watch: Path: %v", path) // event
|
||||||
if re == nil || re.response.Canceled {
|
if re == nil || re.response.Canceled {
|
||||||
return fmt.Errorf("Etcd: Watch is empty!") // will cause a CtxError+retry
|
return fmt.Errorf("watch is empty") // will cause a CtxError+retry
|
||||||
}
|
}
|
||||||
// we normally need to check if anything changed since the last
|
// we normally need to check if anything changed since the last
|
||||||
// event, since a set (export) with no changes still causes the
|
// event, since a set (export) with no changes still causes the
|
||||||
@@ -2125,15 +2126,15 @@ func EtcdWatch(obj *EmbdEtcd) chan bool {
|
|||||||
return ch
|
return ch
|
||||||
}
|
}
|
||||||
|
|
||||||
// EtcdSetResources exports all of the resources which we pass in to etcd
|
// SetResources exports all of the resources which we pass in to etcd
|
||||||
func EtcdSetResources(obj *EmbdEtcd, hostname string, resourceList []resources.Res) error {
|
func SetResources(obj *EmbdEtcd, hostname string, resourceList []resources.Res) error {
|
||||||
// key structure is /$NS/exported/$hostname/resources/$uid = $data
|
// key structure is /$NS/exported/$hostname/resources/$uid = $data
|
||||||
|
|
||||||
var kindFilter []string // empty to get from everyone
|
var kindFilter []string // empty to get from everyone
|
||||||
hostnameFilter := []string{hostname}
|
hostnameFilter := []string{hostname}
|
||||||
// this is not a race because we should only be reading keys which we
|
// this is not a race because we should only be reading keys which we
|
||||||
// set, and there should not be any contention with other hosts here!
|
// set, and there should not be any contention with other hosts here!
|
||||||
originals, err := EtcdGetResources(obj, hostnameFilter, kindFilter)
|
originals, err := GetResources(obj, hostnameFilter, kindFilter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -2154,7 +2155,7 @@ func EtcdSetResources(obj *EmbdEtcd, hostname string, resourceList []resources.R
|
|||||||
ifs = append(ifs, etcd.Compare(etcd.Value(path), "=", data)) // desired state
|
ifs = append(ifs, etcd.Compare(etcd.Value(path), "=", data)) // desired state
|
||||||
ops = append(ops, etcd.OpPut(path, data))
|
ops = append(ops, etcd.OpPut(path, data))
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("Etcd: SetResources: Error: Can't convert to B64: %v", err)
|
return fmt.Errorf("can't convert to B64: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2196,18 +2197,18 @@ func EtcdSetResources(obj *EmbdEtcd, hostname string, resourceList []resources.R
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// EtcdGetResources collects all of the resources which match a filter from etcd
|
// GetResources collects all of the resources which match a filter from etcd
|
||||||
// If the kindfilter or hostnameFilter is empty, then it assumes no filtering...
|
// If the kindfilter or hostnameFilter is empty, then it assumes no filtering...
|
||||||
// TODO: Expand this with a more powerful filter based on what we eventually
|
// TODO: Expand this with a more powerful filter based on what we eventually
|
||||||
// support in our collect DSL. Ideally a server side filter like WithFilter()
|
// support in our collect DSL. Ideally a server side filter like WithFilter()
|
||||||
// We could do this if the pattern was /$NS/exported/$kind/$hostname/$uid = $data
|
// We could do this if the pattern was /$NS/exported/$kind/$hostname/$uid = $data
|
||||||
func EtcdGetResources(obj *EmbdEtcd, hostnameFilter, kindFilter []string) ([]resources.Res, error) {
|
func GetResources(obj *EmbdEtcd, hostnameFilter, kindFilter []string) ([]resources.Res, error) {
|
||||||
// key structure is /$NS/exported/$hostname/resources/$uid = $data
|
// key structure is /$NS/exported/$hostname/resources/$uid = $data
|
||||||
path := fmt.Sprintf("/%s/exported/", NS)
|
path := fmt.Sprintf("/%s/exported/", NS)
|
||||||
resourceList := []resources.Res{}
|
resourceList := []resources.Res{}
|
||||||
keyMap, err := obj.Get(path, etcd.WithPrefix(), etcd.WithSort(etcd.SortByKey, etcd.SortAscend))
|
keyMap, err := obj.Get(path, etcd.WithPrefix(), etcd.WithSort(etcd.SortByKey, etcd.SortAscend))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Etcd: GetResources: Error: Could not get resources: %v", err)
|
return nil, fmt.Errorf("could not get resources: %v", err)
|
||||||
}
|
}
|
||||||
for key, val := range keyMap {
|
for key, val := range keyMap {
|
||||||
if !strings.HasPrefix(key, path) { // sanity check
|
if !strings.HasPrefix(key, path) { // sanity check
|
||||||
@@ -2216,14 +2217,14 @@ func EtcdGetResources(obj *EmbdEtcd, hostnameFilter, kindFilter []string) ([]res
|
|||||||
|
|
||||||
str := strings.Split(key[len(path):], "/")
|
str := strings.Split(key[len(path):], "/")
|
||||||
if len(str) != 4 {
|
if len(str) != 4 {
|
||||||
return nil, fmt.Errorf("Etcd: GetResources: Error: Unexpected chunk count!")
|
return nil, fmt.Errorf("unexpected chunk count")
|
||||||
}
|
}
|
||||||
hostname, r, kind, name := str[0], str[1], str[2], str[3]
|
hostname, r, kind, name := str[0], str[1], str[2], str[3]
|
||||||
if r != "resources" {
|
if r != "resources" {
|
||||||
return nil, fmt.Errorf("Etcd: GetResources: Error: Unexpected chunk pattern!")
|
return nil, fmt.Errorf("unexpected chunk pattern")
|
||||||
}
|
}
|
||||||
if kind == "" {
|
if kind == "" {
|
||||||
return nil, fmt.Errorf("Etcd: GetResources: Error: Unexpected kind chunk!")
|
return nil, fmt.Errorf("unexpected kind chunk")
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: ideally this would be a server side filter instead!
|
// FIXME: ideally this would be a server side filter instead!
|
||||||
@@ -2241,7 +2242,7 @@ func EtcdGetResources(obj *EmbdEtcd, hostnameFilter, kindFilter []string) ([]res
|
|||||||
log.Printf("Etcd: Get: (Hostname, Kind, Name): (%s, %s, %s)", hostname, kind, name)
|
log.Printf("Etcd: Get: (Hostname, Kind, Name): (%s, %s, %s)", hostname, kind, name)
|
||||||
resourceList = append(resourceList, obj)
|
resourceList = append(resourceList, obj)
|
||||||
} else {
|
} else {
|
||||||
return nil, fmt.Errorf("Etcd: GetResources: Error: Can't convert from B64: %v", err)
|
return nil, fmt.Errorf("can't convert from B64: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return resourceList, nil
|
return resourceList, nil
|
||||||
@@ -2269,11 +2270,11 @@ func ApplyDeltaEvents(re *RE, urlsmap etcdtypes.URLsMap) (etcdtypes.URLsMap, err
|
|||||||
case etcd.EventTypePut:
|
case etcd.EventTypePut:
|
||||||
val := bytes.NewBuffer(event.Kv.Value).String()
|
val := bytes.NewBuffer(event.Kv.Value).String()
|
||||||
if val == "" {
|
if val == "" {
|
||||||
return nil, fmt.Errorf("Etcd: ApplyDeltaEvents: Value is empty!")
|
return nil, fmt.Errorf("value in ApplyDeltaEvents is empty")
|
||||||
}
|
}
|
||||||
urls, err := etcdtypes.NewURLs(strings.Split(val, ","))
|
urls, err := etcdtypes.NewURLs(strings.Split(val, ","))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Etcd: ApplyDeltaEvents: Format error: %v", err)
|
return nil, fmt.Errorf("format error in ApplyDeltaEvents: %v", err)
|
||||||
}
|
}
|
||||||
urlsmap[key] = urls // add to map
|
urlsmap[key] = urls // add to map
|
||||||
|
|
||||||
@@ -2290,7 +2291,7 @@ func ApplyDeltaEvents(re *RE, urlsmap etcdtypes.URLsMap) (etcdtypes.URLsMap, err
|
|||||||
delete(urlsmap, key)
|
delete(urlsmap, key)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("Etcd: ApplyDeltaEvents: Error: Unknown event: %+v", event.Type)
|
return nil, fmt.Errorf("unknown event in ApplyDeltaEvents: %+v", event.Type)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return urlsmap, nil
|
return urlsmap, nil
|
||||||
|
|||||||
115
etcd/str.go
Normal file
115
etcd/str.go
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package etcd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/util"
|
||||||
|
|
||||||
|
etcd "github.com/coreos/etcd/clientv3"
|
||||||
|
errwrap "github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WatchStr returns a channel which spits out events on key activity.
|
||||||
|
// FIXME: It should close the channel when it's done, and spit out errors when
|
||||||
|
// something goes wrong.
|
||||||
|
func WatchStr(obj *EmbdEtcd, key string) chan error {
|
||||||
|
// new key structure is /$NS/strings/$key/$hostname = $data
|
||||||
|
path := fmt.Sprintf("/%s/strings/%s", NS, key)
|
||||||
|
ch := make(chan error, 1)
|
||||||
|
// FIXME: fix our API so that we get a close event on shutdown.
|
||||||
|
callback := func(re *RE) error {
|
||||||
|
// TODO: is this even needed? it used to happen on conn errors
|
||||||
|
//log.Printf("Etcd: Watch: Path: %v", path) // event
|
||||||
|
if re == nil || re.response.Canceled {
|
||||||
|
return fmt.Errorf("watch is empty") // will cause a CtxError+retry
|
||||||
|
}
|
||||||
|
if len(ch) == 0 { // send event only if one isn't pending
|
||||||
|
ch <- nil // event
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
_, _ = obj.AddWatcher(path, callback, true, false, etcd.WithPrefix()) // no need to check errors
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStr collects all of the strings which match a namespace in etcd.
|
||||||
|
func GetStr(obj *EmbdEtcd, hostnameFilter []string, key string) (map[string]string, error) {
|
||||||
|
// old key structure is /$NS/strings/$hostname/$key = $data
|
||||||
|
// new key structure is /$NS/strings/$key/$hostname = $data
|
||||||
|
// FIXME: if we have the $key as the last token (old key structure), we
|
||||||
|
// can allow the key to contain the slash char, otherwise we need to
|
||||||
|
// verify that one isn't present in the input string.
|
||||||
|
path := fmt.Sprintf("/%s/strings/%s", NS, key)
|
||||||
|
keyMap, err := obj.Get(path, etcd.WithPrefix(), etcd.WithSort(etcd.SortByKey, etcd.SortAscend))
|
||||||
|
if err != nil {
|
||||||
|
return nil, errwrap.Wrapf(err, "could not get strings in: %s", key)
|
||||||
|
}
|
||||||
|
result := make(map[string]string)
|
||||||
|
for key, val := range keyMap {
|
||||||
|
if !strings.HasPrefix(key, path) { // sanity check
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
str := strings.Split(key[len(path):], "/")
|
||||||
|
if len(str) != 2 {
|
||||||
|
return nil, fmt.Errorf("unexpected chunk count of %d", len(str))
|
||||||
|
}
|
||||||
|
_, hostname := str[0], str[1]
|
||||||
|
|
||||||
|
if hostname == "" {
|
||||||
|
return nil, fmt.Errorf("unexpected chunk length of %d", len(hostname))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME: ideally this would be a server side filter instead!
|
||||||
|
if len(hostnameFilter) > 0 && !util.StrInList(hostname, hostnameFilter) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
//log.Printf("Etcd: GetStr(%s): (Hostname, Data): (%s, %s)", key, hostname, val)
|
||||||
|
result[hostname] = val
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStr sets a key and hostname pair to a certain value. If the value is nil,
|
||||||
|
// then it deletes the key. Otherwise the value should point to a string.
|
||||||
|
// TODO: TTL or delete disconnect?
|
||||||
|
func SetStr(obj *EmbdEtcd, hostname, key string, data *string) error {
|
||||||
|
// key structure is /$NS/strings/$key/$hostname = $data
|
||||||
|
path := fmt.Sprintf("/%s/strings/%s/%s", NS, key, hostname)
|
||||||
|
ifs := []etcd.Cmp{} // list matching the desired state
|
||||||
|
ops := []etcd.Op{} // list of ops in this transaction (then)
|
||||||
|
els := []etcd.Op{} // list of ops in this transaction (else)
|
||||||
|
if data == nil { // perform a delete
|
||||||
|
// TODO: use https://github.com/coreos/etcd/pull/7417 if merged
|
||||||
|
//ifs = append(ifs, etcd.KeyExists(path))
|
||||||
|
ifs = append(ifs, etcd.Compare(etcd.Version(path), ">", 0))
|
||||||
|
ops = append(ops, etcd.OpDelete(path))
|
||||||
|
} else {
|
||||||
|
data := *data // get the real value
|
||||||
|
ifs = append(ifs, etcd.Compare(etcd.Value(path), "=", data)) // desired state
|
||||||
|
els = append(els, etcd.OpPut(path, data))
|
||||||
|
}
|
||||||
|
|
||||||
|
// it's important to do this in one transaction, and atomically, because
|
||||||
|
// this way, we only generate one watch event, and only when it's needed
|
||||||
|
_, err := obj.Txn(ifs, ops, els) // TODO: do we need to look at response?
|
||||||
|
return errwrap.Wrapf(err, "could not set strings in: %s", key)
|
||||||
|
}
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -30,7 +30,7 @@ type World struct {
|
|||||||
// ResExport exports a list of resources under our hostname namespace.
|
// ResExport exports a list of resources under our hostname namespace.
|
||||||
// Subsequent calls replace the previously set collection atomically.
|
// Subsequent calls replace the previously set collection atomically.
|
||||||
func (obj *World) ResExport(resourceList []resources.Res) error {
|
func (obj *World) ResExport(resourceList []resources.Res) error {
|
||||||
return EtcdSetResources(obj.EmbdEtcd, obj.Hostname, resourceList)
|
return SetResources(obj.EmbdEtcd, obj.Hostname, resourceList)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResCollect gets the collection of exported resources which match the filter.
|
// ResCollect gets the collection of exported resources which match the filter.
|
||||||
@@ -39,5 +39,26 @@ func (obj *World) ResCollect(hostnameFilter, kindFilter []string) ([]resources.R
|
|||||||
// XXX: should we be restricted to retrieving resources that were
|
// XXX: should we be restricted to retrieving resources that were
|
||||||
// exported with a tag that allows or restricts our hostname? We could
|
// exported with a tag that allows or restricts our hostname? We could
|
||||||
// enforce that here if the underlying API supported it... Add this?
|
// enforce that here if the underlying API supported it... Add this?
|
||||||
return EtcdGetResources(obj.EmbdEtcd, hostnameFilter, kindFilter)
|
return GetResources(obj.EmbdEtcd, hostnameFilter, kindFilter)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWatch returns a channel which spits out events on possible string changes.
|
||||||
|
func (obj *World) StrWatch(namespace string) chan error {
|
||||||
|
return WatchStr(obj.EmbdEtcd, namespace)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StrGet returns a map of hostnames to values in the given namespace.
|
||||||
|
func (obj *World) StrGet(namespace string) (map[string]string, error) {
|
||||||
|
return GetStr(obj.EmbdEtcd, []string{}, namespace)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StrSet sets the namespace value to a particular string under the identity of
|
||||||
|
// its own hostname.
|
||||||
|
func (obj *World) StrSet(namespace, value string) error {
|
||||||
|
return SetStr(obj.EmbdEtcd, obj.Hostname, namespace, &value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StrDel deletes the value in a particular namespace.
|
||||||
|
func (obj *World) StrDel(namespace string) error {
|
||||||
|
return SetStr(obj.EmbdEtcd, obj.Hostname, namespace, nil)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -22,14 +22,14 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate stringer -type=EventName -output=eventname_stringer.go
|
//go:generate stringer -type=Kind -output=kind_stringer.go
|
||||||
|
|
||||||
// EventName represents the type of event being passed.
|
// Kind represents the type of event being passed.
|
||||||
type EventName int
|
type Kind int
|
||||||
|
|
||||||
// The different event names are used in different contexts.
|
// The different event kinds are used in different contexts.
|
||||||
const (
|
const (
|
||||||
EventNil EventName = iota
|
EventNil Kind = iota
|
||||||
EventExit
|
EventExit
|
||||||
EventStart
|
EventStart
|
||||||
EventPause
|
EventPause
|
||||||
@@ -43,11 +43,10 @@ type Resp chan error
|
|||||||
|
|
||||||
// Event is the main struct that stores event information and responses.
|
// Event is the main struct that stores event information and responses.
|
||||||
type Event struct {
|
type Event struct {
|
||||||
Name EventName
|
Kind Kind
|
||||||
Resp Resp // channel to send an ack response on, nil to skip
|
Resp Resp // channel to send an ack response on, nil to skip
|
||||||
//Wg *sync.WaitGroup // receiver barrier to Wait() for everyone else on
|
//Wg *sync.WaitGroup // receiver barrier to Wait() for everyone else on
|
||||||
Msg string // some words for fun
|
Err error // store an error in our event
|
||||||
Activity bool // did something interesting happen?
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ACK sends a single acknowledgement on the channel if one was requested.
|
// ACK sends a single acknowledgement on the channel if one was requested.
|
||||||
@@ -80,7 +79,7 @@ func NewResp() Resp {
|
|||||||
// ACK sends a true value to resp.
|
// ACK sends a true value to resp.
|
||||||
func (resp Resp) ACK() {
|
func (resp Resp) ACK() {
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
resp <- nil
|
resp <- nil // TODO: close instead?
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -114,7 +113,7 @@ func (resp Resp) ACKWait() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetActivity returns the activity value.
|
// Error returns the stored error value.
|
||||||
func (event *Event) GetActivity() bool {
|
func (event *Event) Error() error {
|
||||||
return event.Activity
|
return event.Err
|
||||||
}
|
}
|
||||||
|
|||||||
11
examples/augeas1.yaml
Normal file
11
examples/augeas1.yaml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
resources:
|
||||||
|
augeas:
|
||||||
|
- name: sshd_config
|
||||||
|
lens: Sshd.lns
|
||||||
|
file: "/etc/ssh/sshd_config"
|
||||||
|
sets:
|
||||||
|
- path: X11Forwarding
|
||||||
|
value: false
|
||||||
|
edges:
|
||||||
@@ -5,11 +5,13 @@ resources:
|
|||||||
- name: drbd-utils
|
- name: drbd-utils
|
||||||
meta:
|
meta:
|
||||||
autoedge: true
|
autoedge: true
|
||||||
|
noop: true
|
||||||
state: installed
|
state: installed
|
||||||
file:
|
file:
|
||||||
- name: file1
|
- name: file1
|
||||||
meta:
|
meta:
|
||||||
autoedge: true
|
autoedge: true
|
||||||
|
noop: true
|
||||||
path: "/etc/drbd.conf"
|
path: "/etc/drbd.conf"
|
||||||
content: |
|
content: |
|
||||||
# this is an mgmt test
|
# this is an mgmt test
|
||||||
@@ -17,13 +19,14 @@ resources:
|
|||||||
- name: file2
|
- name: file2
|
||||||
meta:
|
meta:
|
||||||
autoedge: true
|
autoedge: true
|
||||||
|
noop: true
|
||||||
path: "/etc/drbd.d/"
|
path: "/etc/drbd.d/"
|
||||||
content: |
|
source: /dev/null
|
||||||
i am a directory
|
|
||||||
state: exists
|
state: exists
|
||||||
svc:
|
svc:
|
||||||
- name: drbd
|
- name: drbd
|
||||||
meta:
|
meta:
|
||||||
autoedge: true
|
autoedge: true
|
||||||
|
noop: true
|
||||||
state: stopped
|
state: stopped
|
||||||
edges: []
|
edges: []
|
||||||
|
|||||||
9
examples/deep-dirs.yaml
Normal file
9
examples/deep-dirs.yaml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
resources:
|
||||||
|
file:
|
||||||
|
- name: file1
|
||||||
|
path: "/tmp/mgmt/a/b/c/f1"
|
||||||
|
content: |
|
||||||
|
i am f1
|
||||||
|
state: exists
|
||||||
@@ -2,15 +2,10 @@
|
|||||||
graph: mygraph
|
graph: mygraph
|
||||||
resources:
|
resources:
|
||||||
file:
|
file:
|
||||||
- name: file1a
|
- name: "@@filea"
|
||||||
path: "/tmp/mgmtA/f1a"
|
path: "/tmp/mgmtA/fA"
|
||||||
content: |
|
content: |
|
||||||
i am f1
|
i am fA, exported from host A
|
||||||
state: exists
|
|
||||||
- name: "@@file2a"
|
|
||||||
path: "/tmp/mgmtA/f2a"
|
|
||||||
content: |
|
|
||||||
i am f2, exported from host A
|
|
||||||
state: exists
|
state: exists
|
||||||
collect:
|
collect:
|
||||||
- kind: file
|
- kind: file
|
||||||
|
|||||||
@@ -2,15 +2,10 @@
|
|||||||
graph: mygraph
|
graph: mygraph
|
||||||
resources:
|
resources:
|
||||||
file:
|
file:
|
||||||
- name: file1b
|
- name: "@@fileb"
|
||||||
path: "/tmp/mgmtB/f1b"
|
path: "/tmp/mgmtB/fB"
|
||||||
content: |
|
content: |
|
||||||
i am f1
|
i am fB, exported from host B
|
||||||
state: exists
|
|
||||||
- name: "@@file2b"
|
|
||||||
path: "/tmp/mgmtB/f2b"
|
|
||||||
content: |
|
|
||||||
i am f2, exported from host B
|
|
||||||
state: exists
|
state: exists
|
||||||
collect:
|
collect:
|
||||||
- kind: file
|
- kind: file
|
||||||
|
|||||||
@@ -2,15 +2,10 @@
|
|||||||
graph: mygraph
|
graph: mygraph
|
||||||
resources:
|
resources:
|
||||||
file:
|
file:
|
||||||
- name: file1c
|
- name: "@@filec"
|
||||||
path: "/tmp/mgmtC/f1c"
|
path: "/tmp/mgmtC/fC"
|
||||||
content: |
|
content: |
|
||||||
i am f1
|
i am fC, exported from host C
|
||||||
state: exists
|
|
||||||
- name: "@@file2c"
|
|
||||||
path: "/tmp/mgmtC/f2c"
|
|
||||||
content: |
|
|
||||||
i am f2, exported from host C
|
|
||||||
state: exists
|
state: exists
|
||||||
collect:
|
collect:
|
||||||
- kind: file
|
- kind: file
|
||||||
|
|||||||
@@ -2,15 +2,10 @@
|
|||||||
graph: mygraph
|
graph: mygraph
|
||||||
resources:
|
resources:
|
||||||
file:
|
file:
|
||||||
- name: file1d
|
- name: "@@filed"
|
||||||
path: "/tmp/mgmtD/f1d"
|
path: "/tmp/mgmtD/fD"
|
||||||
content: |
|
content: |
|
||||||
i am f1
|
i am fD, exported from host D
|
||||||
state: exists
|
|
||||||
- name: "@@file2d"
|
|
||||||
path: "/tmp/mgmtD/f2d"
|
|
||||||
content: |
|
|
||||||
i am f2, exported from host D
|
|
||||||
state: exists
|
state: exists
|
||||||
collect:
|
collect:
|
||||||
- kind: file
|
- kind: file
|
||||||
|
|||||||
13
examples/etcd1e.yaml
Normal file
13
examples/etcd1e.yaml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
resources:
|
||||||
|
file:
|
||||||
|
- name: "@@filee"
|
||||||
|
path: "/tmp/mgmtE/fE"
|
||||||
|
content: |
|
||||||
|
i am fE, exported from host E
|
||||||
|
state: exists
|
||||||
|
collect:
|
||||||
|
- kind: file
|
||||||
|
pattern: "/tmp/mgmtE/"
|
||||||
|
edges: []
|
||||||
67
examples/exec3-sema.yaml
Normal file
67
examples/exec3-sema.yaml
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
---
|
||||||
|
graph: parallel
|
||||||
|
resources:
|
||||||
|
exec:
|
||||||
|
- name: pkg10
|
||||||
|
meta:
|
||||||
|
sema: ['mylock:1', 'otherlock:42']
|
||||||
|
cmd: sleep 10s
|
||||||
|
shell: ''
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
- name: svc10
|
||||||
|
meta:
|
||||||
|
sema: ['mylock:1']
|
||||||
|
cmd: sleep 10s
|
||||||
|
shell: ''
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
- name: exec10
|
||||||
|
meta:
|
||||||
|
sema: ['mylock:1']
|
||||||
|
cmd: sleep 10s
|
||||||
|
shell: ''
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
- name: pkg15
|
||||||
|
meta:
|
||||||
|
sema: ['mylock:1', 'otherlock:42']
|
||||||
|
cmd: sleep 15s
|
||||||
|
shell: ''
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
edges:
|
||||||
|
- name: e1
|
||||||
|
from:
|
||||||
|
kind: exec
|
||||||
|
name: pkg10
|
||||||
|
to:
|
||||||
|
kind: exec
|
||||||
|
name: svc10
|
||||||
|
- name: e2
|
||||||
|
from:
|
||||||
|
kind: exec
|
||||||
|
name: svc10
|
||||||
|
to:
|
||||||
|
kind: exec
|
||||||
|
name: exec10
|
||||||
@@ -1,14 +1,13 @@
|
|||||||
---
|
---
|
||||||
graph: mygraph
|
graph: mygraph
|
||||||
comment: You can test Watch and CheckApply failures with chmod ugo-r and chmod ugo-w.
|
|
||||||
resources:
|
resources:
|
||||||
file:
|
file:
|
||||||
- name: file1
|
- name: file1
|
||||||
path: "/tmp/mgmt/f1"
|
|
||||||
meta:
|
meta:
|
||||||
retry: 3
|
limit: .inf
|
||||||
delay: 5000
|
burst: 0
|
||||||
|
path: "/tmp/mgmt/hello"
|
||||||
content: |
|
content: |
|
||||||
i am f1
|
i am a file
|
||||||
state: exists
|
state: exists
|
||||||
edges: []
|
edges: []
|
||||||
|
|||||||
10
examples/file4.yaml
Normal file
10
examples/file4.yaml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
resources:
|
||||||
|
file:
|
||||||
|
- name: file1
|
||||||
|
path: "/tmp/mgmt/hello"
|
||||||
|
content: |
|
||||||
|
i am a file
|
||||||
|
state: exists
|
||||||
|
edges: []
|
||||||
8
examples/kv1.yaml
Normal file
8
examples/kv1.yaml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
resources:
|
||||||
|
kv:
|
||||||
|
- name: kv1
|
||||||
|
key: "hello"
|
||||||
|
value: "world"
|
||||||
|
edges: []
|
||||||
7
examples/kv2.yaml
Normal file
7
examples/kv2.yaml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
resources:
|
||||||
|
kv:
|
||||||
|
- name: kv1
|
||||||
|
key: "iamdeleted"
|
||||||
|
edges: []
|
||||||
9
examples/kv3.yaml
Normal file
9
examples/kv3.yaml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
resources:
|
||||||
|
kv:
|
||||||
|
- name: kv1
|
||||||
|
key: "stage"
|
||||||
|
value: "3"
|
||||||
|
skiplessthan: true
|
||||||
|
edges: []
|
||||||
31
examples/kv4.yaml
Normal file
31
examples/kv4.yaml
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
resources:
|
||||||
|
kv:
|
||||||
|
- name: kv1
|
||||||
|
key: "stage"
|
||||||
|
value: "1"
|
||||||
|
skiplessthan: true
|
||||||
|
- name: kv2
|
||||||
|
key: "stage"
|
||||||
|
value: "2"
|
||||||
|
skiplessthan: true
|
||||||
|
- name: kv3
|
||||||
|
key: "stage"
|
||||||
|
value: "3"
|
||||||
|
skiplessthan: true
|
||||||
|
edges:
|
||||||
|
- name: e1
|
||||||
|
from:
|
||||||
|
kind: kv
|
||||||
|
name: kv1
|
||||||
|
to:
|
||||||
|
kind: kv
|
||||||
|
name: kv2
|
||||||
|
- name: e2
|
||||||
|
from:
|
||||||
|
kind: kv
|
||||||
|
name: kv2
|
||||||
|
to:
|
||||||
|
kind: kv
|
||||||
|
name: kv3
|
||||||
@@ -40,10 +40,10 @@ func NewMyGAPI(data gapi.Data, name string, interval uint) (*MyGAPI, error) {
|
|||||||
// Init initializes the MyGAPI struct.
|
// Init initializes the MyGAPI struct.
|
||||||
func (obj *MyGAPI) Init(data gapi.Data) error {
|
func (obj *MyGAPI) Init(data gapi.Data) error {
|
||||||
if obj.initialized {
|
if obj.initialized {
|
||||||
return fmt.Errorf("Already initialized!")
|
return fmt.Errorf("already initialized")
|
||||||
}
|
}
|
||||||
if obj.Name == "" {
|
if obj.Name == "" {
|
||||||
return fmt.Errorf("The graph name must be specified!")
|
return fmt.Errorf("the graph name must be specified")
|
||||||
}
|
}
|
||||||
obj.data = data // store for later
|
obj.data = data // store for later
|
||||||
obj.closeChan = make(chan struct{})
|
obj.closeChan = make(chan struct{})
|
||||||
@@ -59,7 +59,7 @@ func (obj *MyGAPI) Graph() (*pgraph.Graph, error) {
|
|||||||
|
|
||||||
n1, err := resources.NewNoopRes("noop1")
|
n1, err := resources.NewNoopRes("noop1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Can't create resource: %v", err)
|
return nil, fmt.Errorf("can't create resource: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// we can still build a graph via the yaml method
|
// we can still build a graph via the yaml method
|
||||||
@@ -85,8 +85,8 @@ func (obj *MyGAPI) Graph() (*pgraph.Graph, error) {
|
|||||||
return g, err
|
return g, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// SwitchStream returns nil errors every time there could be a new graph.
|
// Next returns nil errors every time there could be a new graph.
|
||||||
func (obj *MyGAPI) SwitchStream() chan error {
|
func (obj *MyGAPI) Next() chan error {
|
||||||
if obj.data.NoWatch || obj.Interval <= 0 {
|
if obj.data.NoWatch || obj.Interval <= 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -107,7 +107,11 @@ func (obj *MyGAPI) SwitchStream() chan error {
|
|||||||
select {
|
select {
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
log.Printf("libmgmt: Generating new graph...")
|
log.Printf("libmgmt: Generating new graph...")
|
||||||
ch <- nil // trigger a run
|
select {
|
||||||
|
case ch <- nil: // trigger a run
|
||||||
|
case <-obj.closeChan:
|
||||||
|
return
|
||||||
|
}
|
||||||
case <-obj.closeChan:
|
case <-obj.closeChan:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -164,7 +168,7 @@ func Run() error {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Println("Interrupted by signal")
|
log.Println("Interrupted by signal")
|
||||||
obj.Exit(fmt.Errorf("Killed by %v", sig))
|
obj.Exit(fmt.Errorf("killed by %v", sig))
|
||||||
return
|
return
|
||||||
case <-exit:
|
case <-exit:
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -42,10 +42,10 @@ func NewMyGAPI(data gapi.Data, name string, interval uint, count uint) (*MyGAPI,
|
|||||||
// Init initializes the MyGAPI struct.
|
// Init initializes the MyGAPI struct.
|
||||||
func (obj *MyGAPI) Init(data gapi.Data) error {
|
func (obj *MyGAPI) Init(data gapi.Data) error {
|
||||||
if obj.initialized {
|
if obj.initialized {
|
||||||
return fmt.Errorf("Already initialized!")
|
return fmt.Errorf("already initialized")
|
||||||
}
|
}
|
||||||
if obj.Name == "" {
|
if obj.Name == "" {
|
||||||
return fmt.Errorf("The graph name must be specified!")
|
return fmt.Errorf("the graph name must be specified")
|
||||||
}
|
}
|
||||||
obj.data = data // store for later
|
obj.data = data // store for later
|
||||||
obj.closeChan = make(chan struct{})
|
obj.closeChan = make(chan struct{})
|
||||||
@@ -64,7 +64,7 @@ func (obj *MyGAPI) Graph() (*pgraph.Graph, error) {
|
|||||||
for i := uint(0); i < obj.Count; i++ {
|
for i := uint(0); i < obj.Count; i++ {
|
||||||
n, err := resources.NewNoopRes(fmt.Sprintf("noop%d", i))
|
n, err := resources.NewNoopRes(fmt.Sprintf("noop%d", i))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Can't create resource: %v", err)
|
return nil, fmt.Errorf("can't create resource: %v", err)
|
||||||
}
|
}
|
||||||
v := pgraph.NewVertex(n)
|
v := pgraph.NewVertex(n)
|
||||||
g.AddVertex(v)
|
g.AddVertex(v)
|
||||||
@@ -78,8 +78,8 @@ func (obj *MyGAPI) Graph() (*pgraph.Graph, error) {
|
|||||||
return g, nil
|
return g, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SwitchStream returns nil errors every time there could be a new graph.
|
// Next returns nil errors every time there could be a new graph.
|
||||||
func (obj *MyGAPI) SwitchStream() chan error {
|
func (obj *MyGAPI) Next() chan error {
|
||||||
if obj.data.NoWatch || obj.Interval <= 0 {
|
if obj.data.NoWatch || obj.Interval <= 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -100,7 +100,11 @@ func (obj *MyGAPI) SwitchStream() chan error {
|
|||||||
select {
|
select {
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
log.Printf("libmgmt: Generating new graph...")
|
log.Printf("libmgmt: Generating new graph...")
|
||||||
ch <- nil // trigger a run
|
select {
|
||||||
|
case ch <- nil: // trigger a run
|
||||||
|
case <-obj.closeChan:
|
||||||
|
return
|
||||||
|
}
|
||||||
case <-obj.closeChan:
|
case <-obj.closeChan:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -158,7 +162,7 @@ func Run(count uint) error {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Println("Interrupted by signal")
|
log.Println("Interrupted by signal")
|
||||||
obj.Exit(fmt.Errorf("Killed by %v", sig))
|
obj.Exit(fmt.Errorf("killed by %v", sig))
|
||||||
return
|
return
|
||||||
case <-exit:
|
case <-exit:
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -14,6 +14,8 @@ import (
|
|||||||
mgmt "github.com/purpleidea/mgmt/lib"
|
mgmt "github.com/purpleidea/mgmt/lib"
|
||||||
"github.com/purpleidea/mgmt/pgraph"
|
"github.com/purpleidea/mgmt/pgraph"
|
||||||
"github.com/purpleidea/mgmt/resources"
|
"github.com/purpleidea/mgmt/resources"
|
||||||
|
|
||||||
|
"golang.org/x/time/rate"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MyGAPI implements the main GAPI interface.
|
// MyGAPI implements the main GAPI interface.
|
||||||
@@ -39,10 +41,10 @@ func NewMyGAPI(data gapi.Data, name string, interval uint) (*MyGAPI, error) {
|
|||||||
// Init initializes the MyGAPI struct.
|
// Init initializes the MyGAPI struct.
|
||||||
func (obj *MyGAPI) Init(data gapi.Data) error {
|
func (obj *MyGAPI) Init(data gapi.Data) error {
|
||||||
if obj.initialized {
|
if obj.initialized {
|
||||||
return fmt.Errorf("Already initialized!")
|
return fmt.Errorf("already initialized")
|
||||||
}
|
}
|
||||||
if obj.Name == "" {
|
if obj.Name == "" {
|
||||||
return fmt.Errorf("The graph name must be specified!")
|
return fmt.Errorf("the graph name must be specified")
|
||||||
}
|
}
|
||||||
obj.data = data // store for later
|
obj.data = data // store for later
|
||||||
obj.closeChan = make(chan struct{})
|
obj.closeChan = make(chan struct{})
|
||||||
@@ -58,10 +60,17 @@ func (obj *MyGAPI) Graph() (*pgraph.Graph, error) {
|
|||||||
|
|
||||||
g := pgraph.NewGraph(obj.Name)
|
g := pgraph.NewGraph(obj.Name)
|
||||||
|
|
||||||
|
// FIXME: these are being specified temporarily until it's the default!
|
||||||
|
metaparams := resources.MetaParams{
|
||||||
|
Limit: rate.Inf,
|
||||||
|
Burst: 0,
|
||||||
|
}
|
||||||
|
|
||||||
content := "Delete me to trigger a notification!\n"
|
content := "Delete me to trigger a notification!\n"
|
||||||
f0 := &resources.FileRes{
|
f0 := &resources.FileRes{
|
||||||
BaseRes: resources.BaseRes{
|
BaseRes: resources.BaseRes{
|
||||||
Name: "README",
|
Name: "README",
|
||||||
|
MetaParams: metaparams,
|
||||||
},
|
},
|
||||||
Path: "/tmp/mgmt/README",
|
Path: "/tmp/mgmt/README",
|
||||||
Content: &content,
|
Content: &content,
|
||||||
@@ -73,7 +82,8 @@ func (obj *MyGAPI) Graph() (*pgraph.Graph, error) {
|
|||||||
|
|
||||||
p1 := &resources.PasswordRes{
|
p1 := &resources.PasswordRes{
|
||||||
BaseRes: resources.BaseRes{
|
BaseRes: resources.BaseRes{
|
||||||
Name: "password1",
|
Name: "password1",
|
||||||
|
MetaParams: metaparams,
|
||||||
},
|
},
|
||||||
Length: 8, // generated string will have this many characters
|
Length: 8, // generated string will have this many characters
|
||||||
Saved: true, // this causes passwords to be stored in plain text!
|
Saved: true, // this causes passwords to be stored in plain text!
|
||||||
@@ -83,10 +93,11 @@ func (obj *MyGAPI) Graph() (*pgraph.Graph, error) {
|
|||||||
|
|
||||||
f1 := &resources.FileRes{
|
f1 := &resources.FileRes{
|
||||||
BaseRes: resources.BaseRes{
|
BaseRes: resources.BaseRes{
|
||||||
Name: "file1",
|
Name: "file1",
|
||||||
|
MetaParams: metaparams,
|
||||||
// send->recv!
|
// send->recv!
|
||||||
Recv: map[string]*resources.Send{
|
Recv: map[string]*resources.Send{
|
||||||
"Content": &resources.Send{Res: p1, Key: "Password"},
|
"Content": {Res: p1, Key: "Password"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Path: "/tmp/mgmt/secret",
|
Path: "/tmp/mgmt/secret",
|
||||||
@@ -99,7 +110,8 @@ func (obj *MyGAPI) Graph() (*pgraph.Graph, error) {
|
|||||||
|
|
||||||
n1 := &resources.NoopRes{
|
n1 := &resources.NoopRes{
|
||||||
BaseRes: resources.BaseRes{
|
BaseRes: resources.BaseRes{
|
||||||
Name: "noop1",
|
Name: "noop1",
|
||||||
|
MetaParams: metaparams,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -120,8 +132,8 @@ func (obj *MyGAPI) Graph() (*pgraph.Graph, error) {
|
|||||||
return g, nil
|
return g, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SwitchStream returns nil errors every time there could be a new graph.
|
// Next returns nil errors every time there could be a new graph.
|
||||||
func (obj *MyGAPI) SwitchStream() chan error {
|
func (obj *MyGAPI) Next() chan error {
|
||||||
if obj.data.NoWatch || obj.Interval <= 0 {
|
if obj.data.NoWatch || obj.Interval <= 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -142,7 +154,11 @@ func (obj *MyGAPI) SwitchStream() chan error {
|
|||||||
select {
|
select {
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
log.Printf("libmgmt: Generating new graph...")
|
log.Printf("libmgmt: Generating new graph...")
|
||||||
ch <- nil // trigger a run
|
select {
|
||||||
|
case ch <- nil: // trigger a run
|
||||||
|
case <-obj.closeChan:
|
||||||
|
return
|
||||||
|
}
|
||||||
case <-obj.closeChan:
|
case <-obj.closeChan:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -201,7 +217,7 @@ func Run() error {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Println("Interrupted by signal")
|
log.Println("Interrupted by signal")
|
||||||
obj.Exit(fmt.Errorf("Killed by %v", sig))
|
obj.Exit(fmt.Errorf("killed by %v", sig))
|
||||||
return
|
return
|
||||||
case <-exit:
|
case <-exit:
|
||||||
return
|
return
|
||||||
|
|||||||
13
examples/limit1.yaml
Normal file
13
examples/limit1.yaml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
resources:
|
||||||
|
file:
|
||||||
|
- name: file1
|
||||||
|
meta:
|
||||||
|
limit: 0.2
|
||||||
|
burst: 5
|
||||||
|
path: "/tmp/mgmt/limit"
|
||||||
|
content: |
|
||||||
|
i am a normal file
|
||||||
|
state: exists
|
||||||
|
edges: []
|
||||||
30
examples/noop2.yaml
Normal file
30
examples/noop2.yaml
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
comment: dangerous noop example
|
||||||
|
resources:
|
||||||
|
noop:
|
||||||
|
- name: noop1
|
||||||
|
meta:
|
||||||
|
noop: true
|
||||||
|
file:
|
||||||
|
- name: file1
|
||||||
|
path: "/tmp/mgmt/hello-noop"
|
||||||
|
content: |
|
||||||
|
hello world from @purpleidea
|
||||||
|
state: exists
|
||||||
|
meta:
|
||||||
|
noop: true
|
||||||
|
exec:
|
||||||
|
- name: exec1
|
||||||
|
meta:
|
||||||
|
noop: true
|
||||||
|
cmd: 'rm -rf /'
|
||||||
|
shell: '/bin/bash'
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
edges: []
|
||||||
24
examples/poll1.yaml
Normal file
24
examples/poll1.yaml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
resources:
|
||||||
|
file:
|
||||||
|
- name: file1
|
||||||
|
meta:
|
||||||
|
poll: 5
|
||||||
|
path: "/tmp/mgmt/f1"
|
||||||
|
content: |
|
||||||
|
i poll every 5 seconds
|
||||||
|
state: exists
|
||||||
|
- name: file2
|
||||||
|
path: "/tmp/mgmt/f2"
|
||||||
|
content: |
|
||||||
|
i use the event based watcher
|
||||||
|
state: exists
|
||||||
|
- name: file3
|
||||||
|
meta:
|
||||||
|
poll: 1
|
||||||
|
path: "/tmp/mgmt/f3"
|
||||||
|
content: |
|
||||||
|
i poll every second
|
||||||
|
state: exists
|
||||||
|
edges: []
|
||||||
57
examples/retry1.yaml
Normal file
57
examples/retry1.yaml
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
comment: You can test Watch and CheckApply failures with chmod ugo-r and chmod ugo-w.
|
||||||
|
resources:
|
||||||
|
exec:
|
||||||
|
- name: exec1
|
||||||
|
cmd: 'touch /tmp/mgmt/no-read && chmod ugo-r /tmp/mgmt/no-read'
|
||||||
|
shell: '/bin/bash'
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
- name: exec2
|
||||||
|
cmd: 'touch /tmp/mgmt/no-write && chmod ugo-w /tmp/mgmt/no-write'
|
||||||
|
shell: '/bin/bash'
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
file:
|
||||||
|
- name: noread
|
||||||
|
path: "/tmp/mgmt/no-read"
|
||||||
|
meta:
|
||||||
|
retry: 3
|
||||||
|
delay: 5000
|
||||||
|
content: |
|
||||||
|
i am f1
|
||||||
|
state: exists
|
||||||
|
- name: nowrite
|
||||||
|
path: "/tmp/mgmt/no-write"
|
||||||
|
meta:
|
||||||
|
retry: 3
|
||||||
|
delay: 5000
|
||||||
|
content: |
|
||||||
|
i am f1
|
||||||
|
state: exists
|
||||||
|
edges:
|
||||||
|
- name: e1
|
||||||
|
from:
|
||||||
|
kind: exec
|
||||||
|
name: exec1
|
||||||
|
to:
|
||||||
|
kind: file
|
||||||
|
name: noread
|
||||||
|
- name: e2
|
||||||
|
from:
|
||||||
|
kind: exec
|
||||||
|
name: exec2
|
||||||
|
to:
|
||||||
|
kind: file
|
||||||
|
name: nowrite
|
||||||
@@ -4,7 +4,7 @@ comment: timer example
|
|||||||
resources:
|
resources:
|
||||||
timer:
|
timer:
|
||||||
- name: timer1
|
- name: timer1
|
||||||
interval: 30
|
interval: 3
|
||||||
exec:
|
exec:
|
||||||
- name: exec1
|
- name: exec1
|
||||||
cmd: echo hello world
|
cmd: echo hello world
|
||||||
|
|||||||
21
examples/virt4.yaml
Normal file
21
examples/virt4.yaml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
resources:
|
||||||
|
virt:
|
||||||
|
- name: mgmt4
|
||||||
|
meta:
|
||||||
|
limit: .inf
|
||||||
|
burst: 0
|
||||||
|
uri: 'qemu:///session'
|
||||||
|
cpus: 1
|
||||||
|
maxcpus: 4
|
||||||
|
memory: 524288
|
||||||
|
boot:
|
||||||
|
- hd
|
||||||
|
disk:
|
||||||
|
- type: qcow2
|
||||||
|
source: "~/.local/share/libvirt/images/fedora-23-scratch.qcow2"
|
||||||
|
state: running
|
||||||
|
transient: false
|
||||||
|
edges: []
|
||||||
|
comment: "qemu-img create -b fedora-23.qcow2 -f qcow2 fedora-23-scratch.qcow2"
|
||||||
15
gapi/gapi.go
15
gapi/gapi.go
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -23,19 +23,10 @@ import (
|
|||||||
"github.com/purpleidea/mgmt/resources"
|
"github.com/purpleidea/mgmt/resources"
|
||||||
)
|
)
|
||||||
|
|
||||||
// World is an interface to the rest of the different graph state. It allows
|
|
||||||
// the GAPI to store state and exchange information throughout the cluster. It
|
|
||||||
// is the interface each machine uses to communicate with the rest of the world.
|
|
||||||
type World interface { // TODO: is there a better name for this interface?
|
|
||||||
ResExport([]resources.Res) error
|
|
||||||
// FIXME: should this method take a "filter" data struct instead of many args?
|
|
||||||
ResCollect(hostnameFilter, kindFilter []string) ([]resources.Res, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Data is the set of input values passed into the GAPI structs via Init.
|
// Data is the set of input values passed into the GAPI structs via Init.
|
||||||
type Data struct {
|
type Data struct {
|
||||||
Hostname string // uuid for the host, required for GAPI
|
Hostname string // uuid for the host, required for GAPI
|
||||||
World World
|
World resources.World
|
||||||
Noop bool
|
Noop bool
|
||||||
NoWatch bool
|
NoWatch bool
|
||||||
// NOTE: we can add more fields here if needed by GAPI endpoints
|
// NOTE: we can add more fields here if needed by GAPI endpoints
|
||||||
@@ -45,6 +36,6 @@ type Data struct {
|
|||||||
type GAPI interface {
|
type GAPI interface {
|
||||||
Init(Data) error // initializes the GAPI and passes in useful data
|
Init(Data) error // initializes the GAPI and passes in useful data
|
||||||
Graph() (*pgraph.Graph, error) // returns the most recent pgraph
|
Graph() (*pgraph.Graph, error) // returns the most recent pgraph
|
||||||
SwitchStream() chan error // returns a stream of switch events
|
Next() chan error // returns a stream of switch events
|
||||||
Close() error // shutdown the GAPI
|
Close() error // shutdown the GAPI
|
||||||
}
|
}
|
||||||
|
|||||||
34
lib/cli.go
34
lib/cli.go
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -55,17 +55,17 @@ func run(c *cli.Context) error {
|
|||||||
|
|
||||||
if _ = c.String("code"); c.IsSet("code") {
|
if _ = c.String("code"); c.IsSet("code") {
|
||||||
if obj.GAPI != nil {
|
if obj.GAPI != nil {
|
||||||
return fmt.Errorf("Can't combine code GAPI with existing GAPI.")
|
return fmt.Errorf("can't combine code GAPI with existing GAPI")
|
||||||
}
|
}
|
||||||
// TODO: implement DSL GAPI
|
// TODO: implement DSL GAPI
|
||||||
//obj.GAPI = &dsl.GAPI{
|
//obj.GAPI = &dsl.GAPI{
|
||||||
// Code: &s,
|
// Code: &s,
|
||||||
//}
|
//}
|
||||||
return fmt.Errorf("The Code GAPI is not implemented yet!") // TODO: DSL
|
return fmt.Errorf("the Code GAPI is not implemented yet") // TODO: DSL
|
||||||
}
|
}
|
||||||
if y := c.String("yaml"); c.IsSet("yaml") {
|
if y := c.String("yaml"); c.IsSet("yaml") {
|
||||||
if obj.GAPI != nil {
|
if obj.GAPI != nil {
|
||||||
return fmt.Errorf("Can't combine YAML GAPI with existing GAPI.")
|
return fmt.Errorf("can't combine YAML GAPI with existing GAPI")
|
||||||
}
|
}
|
||||||
obj.GAPI = &yamlgraph.GAPI{
|
obj.GAPI = &yamlgraph.GAPI{
|
||||||
File: &y,
|
File: &y,
|
||||||
@@ -73,7 +73,7 @@ func run(c *cli.Context) error {
|
|||||||
}
|
}
|
||||||
if p := c.String("puppet"); c.IsSet("puppet") {
|
if p := c.String("puppet"); c.IsSet("puppet") {
|
||||||
if obj.GAPI != nil {
|
if obj.GAPI != nil {
|
||||||
return fmt.Errorf("Can't combine puppet GAPI with existing GAPI.")
|
return fmt.Errorf("can't combine puppet GAPI with existing GAPI")
|
||||||
}
|
}
|
||||||
obj.GAPI = &puppet.GAPI{
|
obj.GAPI = &puppet.GAPI{
|
||||||
PuppetParam: &p,
|
PuppetParam: &p,
|
||||||
@@ -84,6 +84,7 @@ func run(c *cli.Context) error {
|
|||||||
|
|
||||||
obj.NoWatch = c.Bool("no-watch")
|
obj.NoWatch = c.Bool("no-watch")
|
||||||
obj.Noop = c.Bool("noop")
|
obj.Noop = c.Bool("noop")
|
||||||
|
obj.Sema = c.Int("sema")
|
||||||
obj.Graphviz = c.String("graphviz")
|
obj.Graphviz = c.String("graphviz")
|
||||||
obj.GraphvizFilter = c.String("graphviz-filter")
|
obj.GraphvizFilter = c.String("graphviz-filter")
|
||||||
obj.ConvergedTimeout = c.Int("converged-timeout")
|
obj.ConvergedTimeout = c.Int("converged-timeout")
|
||||||
@@ -115,6 +116,9 @@ func run(c *cli.Context) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
obj.Prometheus = c.Bool("prometheus")
|
||||||
|
obj.PrometheusListen = c.String("prometheus-listen")
|
||||||
|
|
||||||
// install the exit signal handler
|
// install the exit signal handler
|
||||||
exit := make(chan struct{})
|
exit := make(chan struct{})
|
||||||
defer close(exit)
|
defer close(exit)
|
||||||
@@ -132,7 +136,7 @@ func run(c *cli.Context) error {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Println("Interrupted by signal")
|
log.Println("Interrupted by signal")
|
||||||
obj.Exit(fmt.Errorf("Killed by %v", sig))
|
obj.Exit(fmt.Errorf("killed by %v", sig))
|
||||||
return
|
return
|
||||||
case <-exit:
|
case <-exit:
|
||||||
return
|
return
|
||||||
@@ -152,7 +156,7 @@ func CLI(program, version string, flags Flags) error {
|
|||||||
|
|
||||||
// test for sanity
|
// test for sanity
|
||||||
if program == "" || version == "" {
|
if program == "" || version == "" {
|
||||||
return fmt.Errorf("Program was not compiled correctly. Please see Makefile.")
|
return fmt.Errorf("program was not compiled correctly, see Makefile")
|
||||||
}
|
}
|
||||||
app := cli.NewApp()
|
app := cli.NewApp()
|
||||||
app.Name = program // App.name and App.version pass these values through
|
app.Name = program // App.name and App.version pass these values through
|
||||||
@@ -225,6 +229,11 @@ func CLI(program, version string, flags Flags) error {
|
|||||||
Name: "noop",
|
Name: "noop",
|
||||||
Usage: "globally force all resources into no-op mode",
|
Usage: "globally force all resources into no-op mode",
|
||||||
},
|
},
|
||||||
|
cli.IntFlag{
|
||||||
|
Name: "sema",
|
||||||
|
Value: -1,
|
||||||
|
Usage: "globally add a semaphore to all resources with this lock count",
|
||||||
|
},
|
||||||
cli.StringFlag{
|
cli.StringFlag{
|
||||||
Name: "graphviz, g",
|
Name: "graphviz, g",
|
||||||
Value: "",
|
Value: "",
|
||||||
@@ -232,7 +241,7 @@ func CLI(program, version string, flags Flags) error {
|
|||||||
},
|
},
|
||||||
cli.StringFlag{
|
cli.StringFlag{
|
||||||
Name: "graphviz-filter, gf",
|
Name: "graphviz-filter, gf",
|
||||||
Value: "dot", // directed graph default
|
Value: "",
|
||||||
Usage: "graphviz filter to use",
|
Usage: "graphviz filter to use",
|
||||||
},
|
},
|
||||||
cli.IntFlag{
|
cli.IntFlag{
|
||||||
@@ -320,6 +329,15 @@ func CLI(program, version string, flags Flags) error {
|
|||||||
Value: "",
|
Value: "",
|
||||||
Usage: "default identity used for generation",
|
Usage: "default identity used for generation",
|
||||||
},
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "prometheus",
|
||||||
|
Usage: "start a prometheus instance",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "prometheus-listen",
|
||||||
|
Value: "",
|
||||||
|
Usage: "specify prometheus instance binding",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
176
lib/main.go
176
lib/main.go
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -23,7 +23,6 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/purpleidea/mgmt/converger"
|
"github.com/purpleidea/mgmt/converger"
|
||||||
@@ -31,6 +30,7 @@ import (
|
|||||||
"github.com/purpleidea/mgmt/gapi"
|
"github.com/purpleidea/mgmt/gapi"
|
||||||
"github.com/purpleidea/mgmt/pgp"
|
"github.com/purpleidea/mgmt/pgp"
|
||||||
"github.com/purpleidea/mgmt/pgraph"
|
"github.com/purpleidea/mgmt/pgraph"
|
||||||
|
"github.com/purpleidea/mgmt/prometheus"
|
||||||
"github.com/purpleidea/mgmt/recwatch"
|
"github.com/purpleidea/mgmt/recwatch"
|
||||||
"github.com/purpleidea/mgmt/remote"
|
"github.com/purpleidea/mgmt/remote"
|
||||||
"github.com/purpleidea/mgmt/resources"
|
"github.com/purpleidea/mgmt/resources"
|
||||||
@@ -67,6 +67,7 @@ type Main struct {
|
|||||||
|
|
||||||
NoWatch bool // do not update graph on watched graph definition file changes
|
NoWatch bool // do not update graph on watched graph definition file changes
|
||||||
Noop bool // globally force all resources into no-op mode
|
Noop bool // globally force all resources into no-op mode
|
||||||
|
Sema int // add a semaphore with this lock count to each resource
|
||||||
Graphviz string // output file for graphviz data
|
Graphviz string // output file for graphviz data
|
||||||
GraphvizFilter string // graphviz filter to use
|
GraphvizFilter string // graphviz filter to use
|
||||||
ConvergedTimeout int // exit after approximately this many seconds in a converged state; -1 to disable
|
ConvergedTimeout int // exit after approximately this many seconds in a converged state; -1 to disable
|
||||||
@@ -94,6 +95,9 @@ type Main struct {
|
|||||||
PgpIdentity *string
|
PgpIdentity *string
|
||||||
pgpKeys *pgp.PGP // agent key pair
|
pgpKeys *pgp.PGP // agent key pair
|
||||||
|
|
||||||
|
Prometheus bool // enable prometheus metrics
|
||||||
|
PrometheusListen string // prometheus instance bind specification
|
||||||
|
|
||||||
exit chan error // exit signal
|
exit chan error // exit signal
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -101,11 +105,11 @@ type Main struct {
|
|||||||
func (obj *Main) Init() error {
|
func (obj *Main) Init() error {
|
||||||
|
|
||||||
if obj.Program == "" || obj.Version == "" {
|
if obj.Program == "" || obj.Version == "" {
|
||||||
return fmt.Errorf("You must set the Program and Version strings!")
|
return fmt.Errorf("you must set the Program and Version strings")
|
||||||
}
|
}
|
||||||
|
|
||||||
if obj.Prefix != nil && obj.TmpPrefix {
|
if obj.Prefix != nil && obj.TmpPrefix {
|
||||||
return fmt.Errorf("Choosing a prefix and the request for a tmp prefix is illogical!")
|
return fmt.Errorf("choosing a prefix and the request for a tmp prefix is illogical")
|
||||||
}
|
}
|
||||||
|
|
||||||
obj.idealClusterSize = uint16(obj.IdealClusterSize)
|
obj.idealClusterSize = uint16(obj.IdealClusterSize)
|
||||||
@@ -114,7 +118,7 @@ func (obj *Main) Init() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if obj.idealClusterSize < 1 {
|
if obj.idealClusterSize < 1 {
|
||||||
return fmt.Errorf("IdealClusterSize should be at least one!")
|
return fmt.Errorf("the IdealClusterSize should be at least one")
|
||||||
}
|
}
|
||||||
|
|
||||||
if obj.NoServer && len(obj.Remotes) > 0 {
|
if obj.NoServer && len(obj.Remotes) > 0 {
|
||||||
@@ -122,19 +126,19 @@ func (obj *Main) Init() error {
|
|||||||
// here, so if we're okay with every remote graph running in an
|
// here, so if we're okay with every remote graph running in an
|
||||||
// isolated mode, then this is okay. Improve on this if there's
|
// isolated mode, then this is okay. Improve on this if there's
|
||||||
// someone who really wants to be able to do this.
|
// someone who really wants to be able to do this.
|
||||||
return fmt.Errorf("The Server is required when using Remotes!")
|
return fmt.Errorf("the Server is required when using Remotes")
|
||||||
}
|
}
|
||||||
|
|
||||||
if obj.CConns < 0 {
|
if obj.CConns < 0 {
|
||||||
return fmt.Errorf("The CConns value should be at least zero!")
|
return fmt.Errorf("the CConns value should be at least zero")
|
||||||
}
|
}
|
||||||
|
|
||||||
if obj.ConvergedTimeout >= 0 && obj.CConns > 0 && len(obj.Remotes) > int(obj.CConns) {
|
if obj.ConvergedTimeout >= 0 && obj.CConns > 0 && len(obj.Remotes) > int(obj.CConns) {
|
||||||
return fmt.Errorf("You can't converge if you have more remotes than available connections!")
|
return fmt.Errorf("you can't converge if you have more remotes than available connections")
|
||||||
}
|
}
|
||||||
|
|
||||||
if obj.Depth < 0 { // user should not be using this argument manually
|
if obj.Depth < 0 { // user should not be using this argument manually
|
||||||
return fmt.Errorf("Negative values for Depth are not permitted!")
|
return fmt.Errorf("negative values for Depth are not permitted")
|
||||||
}
|
}
|
||||||
|
|
||||||
// transform the url list inputs into etcd typed lists
|
// transform the url list inputs into etcd typed lists
|
||||||
@@ -143,19 +147,19 @@ func (obj *Main) Init() error {
|
|||||||
util.FlattenListWithSplit(obj.Seeds, []string{",", ";", " "}),
|
util.FlattenListWithSplit(obj.Seeds, []string{",", ";", " "}),
|
||||||
)
|
)
|
||||||
if err != nil && len(obj.Seeds) > 0 {
|
if err != nil && len(obj.Seeds) > 0 {
|
||||||
return fmt.Errorf("Seeds didn't parse correctly!")
|
return fmt.Errorf("the Seeds didn't parse correctly")
|
||||||
}
|
}
|
||||||
obj.clientURLs, err = etcdtypes.NewURLs(
|
obj.clientURLs, err = etcdtypes.NewURLs(
|
||||||
util.FlattenListWithSplit(obj.ClientURLs, []string{",", ";", " "}),
|
util.FlattenListWithSplit(obj.ClientURLs, []string{",", ";", " "}),
|
||||||
)
|
)
|
||||||
if err != nil && len(obj.ClientURLs) > 0 {
|
if err != nil && len(obj.ClientURLs) > 0 {
|
||||||
return fmt.Errorf("ClientURLs didn't parse correctly!")
|
return fmt.Errorf("the ClientURLs didn't parse correctly")
|
||||||
}
|
}
|
||||||
obj.serverURLs, err = etcdtypes.NewURLs(
|
obj.serverURLs, err = etcdtypes.NewURLs(
|
||||||
util.FlattenListWithSplit(obj.ServerURLs, []string{",", ";", " "}),
|
util.FlattenListWithSplit(obj.ServerURLs, []string{",", ";", " "}),
|
||||||
)
|
)
|
||||||
if err != nil && len(obj.ServerURLs) > 0 {
|
if err != nil && len(obj.ServerURLs) > 0 {
|
||||||
return fmt.Errorf("ServerURLs didn't parse correctly!")
|
return fmt.Errorf("the ServerURLs didn't parse correctly")
|
||||||
}
|
}
|
||||||
|
|
||||||
obj.exit = make(chan error)
|
obj.exit = make(chan error)
|
||||||
@@ -195,10 +199,10 @@ func (obj *Main) Run() error {
|
|||||||
if h := obj.Hostname; h != nil && *h != "" { // override by cli
|
if h := obj.Hostname; h != nil && *h != "" { // override by cli
|
||||||
hostname = *h
|
hostname = *h
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return errwrap.Wrapf(err, "Can't get default hostname!")
|
return errwrap.Wrapf(err, "can't get default hostname")
|
||||||
}
|
}
|
||||||
if hostname == "" { // safety check
|
if hostname == "" { // safety check
|
||||||
return fmt.Errorf("Hostname cannot be empty!")
|
return fmt.Errorf("hostname cannot be empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
var prefix = fmt.Sprintf("/var/lib/%s/", obj.Program) // default prefix
|
var prefix = fmt.Sprintf("/var/lib/%s/", obj.Program) // default prefix
|
||||||
@@ -210,24 +214,39 @@ func (obj *Main) Run() error {
|
|||||||
if obj.TmpPrefix || obj.AllowTmpPrefix {
|
if obj.TmpPrefix || obj.AllowTmpPrefix {
|
||||||
var err error
|
var err error
|
||||||
if prefix, err = ioutil.TempDir("", obj.Program+"-"+hostname+"-"); err != nil {
|
if prefix, err = ioutil.TempDir("", obj.Program+"-"+hostname+"-"); err != nil {
|
||||||
return fmt.Errorf("Main: Error: Can't create temporary prefix!")
|
return fmt.Errorf("can't create temporary prefix")
|
||||||
}
|
}
|
||||||
log.Println("Main: Warning: Working prefix directory is temporary!")
|
log.Println("Main: Warning: Working prefix directory is temporary!")
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("Main: Error: Can't create prefix!")
|
return fmt.Errorf("can't create prefix")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Printf("Main: Working prefix is: %s", prefix)
|
log.Printf("Main: Working prefix is: %s", prefix)
|
||||||
pgraphPrefix := fmt.Sprintf("%s/", path.Join(prefix, "pgraph")) // pgraph namespace
|
pgraphPrefix := fmt.Sprintf("%s/", path.Join(prefix, "pgraph")) // pgraph namespace
|
||||||
if err := os.MkdirAll(pgraphPrefix, 0770); err != nil {
|
if err := os.MkdirAll(pgraphPrefix, 0770); err != nil {
|
||||||
return errwrap.Wrapf(err, "Can't create pgraph prefix")
|
return errwrap.Wrapf(err, "can't create pgraph prefix")
|
||||||
|
}
|
||||||
|
|
||||||
|
var prom *prometheus.Prometheus
|
||||||
|
if obj.Prometheus {
|
||||||
|
prom = &prometheus.Prometheus{
|
||||||
|
Listen: obj.PrometheusListen,
|
||||||
|
}
|
||||||
|
if err := prom.Init(); err != nil {
|
||||||
|
return errwrap.Wrapf(err, "can't create initiate Prometheus instance")
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Main: Prometheus: Starting instance on %s", prom.Listen)
|
||||||
|
if err := prom.Start(); err != nil {
|
||||||
|
return errwrap.Wrapf(err, "can't start initiate Prometheus instance")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !obj.NoPgp {
|
if !obj.NoPgp {
|
||||||
pgpPrefix := fmt.Sprintf("%s/", path.Join(prefix, "pgp"))
|
pgpPrefix := fmt.Sprintf("%s/", path.Join(prefix, "pgp"))
|
||||||
if err := os.MkdirAll(pgpPrefix, 0770); err != nil {
|
if err := os.MkdirAll(pgpPrefix, 0770); err != nil {
|
||||||
return errwrap.Wrapf(err, "Can't create pgp prefix")
|
return errwrap.Wrapf(err, "can't create pgp prefix")
|
||||||
}
|
}
|
||||||
|
|
||||||
pgpKeyringPath := path.Join(pgpPrefix, pgp.DefaultKeyringFile) // default path
|
pgpKeyringPath := path.Join(pgpPrefix, pgp.DefaultKeyringFile) // default path
|
||||||
@@ -238,7 +257,7 @@ func (obj *Main) Run() error {
|
|||||||
|
|
||||||
var err error
|
var err error
|
||||||
if obj.pgpKeys, err = pgp.Import(pgpKeyringPath); err != nil && !os.IsNotExist(err) {
|
if obj.pgpKeys, err = pgp.Import(pgpKeyringPath); err != nil && !os.IsNotExist(err) {
|
||||||
return errwrap.Wrapf(err, "Can't import pgp key")
|
return errwrap.Wrapf(err, "can't import pgp key")
|
||||||
}
|
}
|
||||||
|
|
||||||
if obj.pgpKeys == nil {
|
if obj.pgpKeys == nil {
|
||||||
@@ -250,24 +269,23 @@ func (obj *Main) Run() error {
|
|||||||
|
|
||||||
name, comment, email, err := pgp.ParseIdentity(identity)
|
name, comment, email, err := pgp.ParseIdentity(identity)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errwrap.Wrapf(err, "Can't parse user string")
|
return errwrap.Wrapf(err, "can't parse user string")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Make hash configurable
|
// TODO: Make hash configurable
|
||||||
if obj.pgpKeys, err = pgp.Generate(name, comment, email, nil); err != nil {
|
if obj.pgpKeys, err = pgp.Generate(name, comment, email, nil); err != nil {
|
||||||
return errwrap.Wrapf(err, "Can't creating pgp key")
|
return errwrap.Wrapf(err, "can't create pgp key")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := obj.pgpKeys.SaveKey(pgpKeyringPath); err != nil {
|
if err := obj.pgpKeys.SaveKey(pgpKeyringPath); err != nil {
|
||||||
return errwrap.Wrapf(err, "Can't save pgp key")
|
return errwrap.Wrapf(err, "can't save pgp key")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Import admin key
|
// TODO: Import admin key
|
||||||
}
|
}
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
var G, oldGraph *pgraph.Graph
|
var G, oldGraph *pgraph.Graph
|
||||||
|
|
||||||
// exit after `max-runtime` seconds for no reason at all...
|
// exit after `max-runtime` seconds for no reason at all...
|
||||||
@@ -308,7 +326,7 @@ func (obj *Main) Run() error {
|
|||||||
)
|
)
|
||||||
if EmbdEtcd == nil {
|
if EmbdEtcd == nil {
|
||||||
// TODO: verify EmbdEtcd is not nil below...
|
// TODO: verify EmbdEtcd is not nil below...
|
||||||
obj.Exit(fmt.Errorf("Main: Etcd: Creation failed!"))
|
obj.Exit(fmt.Errorf("Main: Etcd: Creation failed"))
|
||||||
} else if err := EmbdEtcd.Startup(); err != nil { // startup (returns when etcd main loop is running)
|
} else if err := EmbdEtcd.Startup(); err != nil { // startup (returns when etcd main loop is running)
|
||||||
obj.Exit(fmt.Errorf("Main: Etcd: Startup failed: %v", err))
|
obj.Exit(fmt.Errorf("Main: Etcd: Startup failed: %v", err))
|
||||||
}
|
}
|
||||||
@@ -325,43 +343,46 @@ func (obj *Main) Run() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// send our individual state into etcd for others to see
|
// send our individual state into etcd for others to see
|
||||||
return etcd.EtcdSetHostnameConverged(EmbdEtcd, hostname, b) // TODO: what should happen on error?
|
return etcd.SetHostnameConverged(EmbdEtcd, hostname, b) // TODO: what should happen on error?
|
||||||
}
|
}
|
||||||
if EmbdEtcd != nil {
|
if EmbdEtcd != nil {
|
||||||
converger.SetStateFn(convergerStateFn)
|
converger.SetStateFn(convergerStateFn)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// implementation of the World API (alternates can be substituted in)
|
||||||
|
world := &etcd.World{
|
||||||
|
Hostname: hostname,
|
||||||
|
EmbdEtcd: EmbdEtcd,
|
||||||
|
}
|
||||||
|
|
||||||
var gapiChan chan error // stream events are nil errors
|
var gapiChan chan error // stream events are nil errors
|
||||||
if obj.GAPI != nil {
|
if obj.GAPI != nil {
|
||||||
data := gapi.Data{
|
data := gapi.Data{
|
||||||
Hostname: hostname,
|
Hostname: hostname,
|
||||||
// NOTE: alternate implementations can be substituted in
|
World: world,
|
||||||
World: &etcd.World{
|
Noop: obj.Noop,
|
||||||
Hostname: hostname,
|
NoWatch: obj.NoWatch,
|
||||||
EmbdEtcd: EmbdEtcd,
|
|
||||||
},
|
|
||||||
Noop: obj.Noop,
|
|
||||||
NoWatch: obj.NoWatch,
|
|
||||||
}
|
}
|
||||||
if err := obj.GAPI.Init(data); err != nil {
|
if err := obj.GAPI.Init(data); err != nil {
|
||||||
obj.Exit(fmt.Errorf("Main: GAPI: Init failed: %v", err))
|
obj.Exit(fmt.Errorf("Main: GAPI: Init failed: %v", err))
|
||||||
} else if !obj.NoWatch {
|
} else if !obj.NoWatch {
|
||||||
gapiChan = obj.GAPI.SwitchStream() // stream of graph switch events!
|
gapiChan = obj.GAPI.Next() // stream of graph switch events!
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
exitchan := make(chan struct{}) // exit on close
|
exitchan := make(chan struct{}) // exit on close
|
||||||
go func() {
|
go func() {
|
||||||
startChan := make(chan struct{}) // start signal
|
startChan := make(chan struct{}) // start signal
|
||||||
go func() { startChan <- struct{}{} }()
|
close(startChan) // kick it off!
|
||||||
|
|
||||||
log.Println("Etcd: Starting...")
|
log.Println("Etcd: Starting...")
|
||||||
etcdChan := etcd.EtcdWatch(EmbdEtcd)
|
etcdChan := etcd.WatchAll(EmbdEtcd)
|
||||||
first := true // first loop or not
|
first := true // first loop or not
|
||||||
for {
|
for {
|
||||||
log.Println("Main: Waiting...")
|
log.Println("Main: Waiting...")
|
||||||
select {
|
select {
|
||||||
case <-startChan: // kick the loop once at start
|
case <-startChan: // kick the loop once at start
|
||||||
|
startChan = nil // disable
|
||||||
// pass
|
// pass
|
||||||
|
|
||||||
case b := <-etcdChan:
|
case b := <-etcdChan:
|
||||||
@@ -380,8 +401,7 @@ func (obj *Main) Run() error {
|
|||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
obj.Exit(err) // trigger exit
|
obj.Exit(err) // trigger exit
|
||||||
continue
|
continue // wait for exitchan
|
||||||
//return // TODO: return or wait for exitchan?
|
|
||||||
}
|
}
|
||||||
if obj.NoWatch { // extra safety for bad GAPI's
|
if obj.NoWatch { // extra safety for bad GAPI's
|
||||||
log.Printf("Main: GAPI stream should be quiet with NoWatch!") // fix the GAPI!
|
log.Printf("Main: GAPI stream should be quiet with NoWatch!") // fix the GAPI!
|
||||||
@@ -401,7 +421,7 @@ func (obj *Main) Run() error {
|
|||||||
// run graph vertex LOCK...
|
// run graph vertex LOCK...
|
||||||
if !first { // TODO: we can flatten this check out I think
|
if !first { // TODO: we can flatten this check out I think
|
||||||
converger.Pause() // FIXME: add sync wait?
|
converger.Pause() // FIXME: add sync wait?
|
||||||
G.Pause() // sync
|
G.Pause(false) // sync
|
||||||
|
|
||||||
//G.UnGroup() // FIXME: implement me if needed!
|
//G.UnGroup() // FIXME: implement me if needed!
|
||||||
}
|
}
|
||||||
@@ -412,36 +432,46 @@ func (obj *Main) Run() error {
|
|||||||
log.Printf("Config: Error creating new graph: %v", err)
|
log.Printf("Config: Error creating new graph: %v", err)
|
||||||
// unpause!
|
// unpause!
|
||||||
if !first {
|
if !first {
|
||||||
G.Start(&wg, first) // sync
|
G.Start(first) // sync
|
||||||
converger.Start() // after G.Start()
|
converger.Start() // after G.Start()
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
newGraph.Flags = pgraph.Flags{Debug: obj.Flags.Debug}
|
newGraph.Flags = pgraph.Flags{Debug: obj.Flags.Debug}
|
||||||
// pass in the information we need
|
// pass in the information we need
|
||||||
newGraph.AssociateData(&resources.Data{
|
newGraph.AssociateData(&resources.Data{
|
||||||
Converger: converger,
|
Hostname: hostname,
|
||||||
Prefix: pgraphPrefix,
|
Converger: converger,
|
||||||
Debug: obj.Flags.Debug,
|
Prometheus: prom,
|
||||||
|
World: world,
|
||||||
|
Prefix: pgraphPrefix,
|
||||||
|
Debug: obj.Flags.Debug,
|
||||||
})
|
})
|
||||||
|
|
||||||
// apply the global noop parameter if requested
|
for _, m := range newGraph.GraphMetas() {
|
||||||
if obj.Noop {
|
// apply the global noop parameter if requested
|
||||||
for _, m := range newGraph.GraphMetas() {
|
if obj.Noop {
|
||||||
m.Noop = obj.Noop
|
m.Noop = obj.Noop
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// append the semaphore to each resource
|
||||||
|
if obj.Sema > 0 { // NOTE: size == 0 would block
|
||||||
|
// a semaphore with an empty id is valid
|
||||||
|
m.Sema = append(m.Sema, fmt.Sprintf(":%d", obj.Sema))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: make sure we "UnGroup()" any semi-destructive
|
// FIXME: make sure we "UnGroup()" any semi-destructive
|
||||||
// changes to the resources so our efficient GraphSync
|
// changes to the resources so our efficient GraphSync
|
||||||
// will be able to re-use and cmp to the old graph.
|
// will be able to re-use and cmp to the old graph.
|
||||||
|
log.Printf("Main: GraphSync...")
|
||||||
newFullGraph, err := newGraph.GraphSync(oldGraph)
|
newFullGraph, err := newGraph.GraphSync(oldGraph)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Config: Error running graph sync: %v", err)
|
log.Printf("Config: Error running graph sync: %v", err)
|
||||||
// unpause!
|
// unpause!
|
||||||
if !first {
|
if !first {
|
||||||
G.Start(&wg, first) // sync
|
G.Start(first) // sync
|
||||||
converger.Start() // after G.Start()
|
converger.Start() // after G.Start()
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -453,21 +483,31 @@ func (obj *Main) Run() error {
|
|||||||
// TODO: do we want to do a transitive reduction?
|
// TODO: do we want to do a transitive reduction?
|
||||||
// FIXME: run a type checker that verifies all the send->recv relationships
|
// FIXME: run a type checker that verifies all the send->recv relationships
|
||||||
|
|
||||||
log.Printf("Graph: %v", G) // show graph
|
// Call this here because at this point the graph does not
|
||||||
if obj.GraphvizFilter != "" {
|
// know anything about the prometheus instance.
|
||||||
if err := G.ExecGraphviz(obj.GraphvizFilter, obj.Graphviz); err != nil {
|
if err := prom.UpdatePgraphStartTime(); err != nil {
|
||||||
log.Printf("Graphviz: %v", err)
|
log.Printf("Main: Prometheus.UpdatePgraphStartTime() errored: %v", err)
|
||||||
} else {
|
|
||||||
log.Printf("Graphviz: Successfully generated graph!")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// G.Start(...) needs to be synchronous or wait,
|
// G.Start(...) needs to be synchronous or wait,
|
||||||
// because if half of the nodes are started and
|
// because if half of the nodes are started and
|
||||||
// some are not ready yet and the EtcdWatch
|
// some are not ready yet and the EtcdWatch
|
||||||
// loops, we'll cause G.Pause(...) before we
|
// loops, we'll cause G.Pause(...) before we
|
||||||
// even got going, thus causing nil pointer errors
|
// even got going, thus causing nil pointer errors
|
||||||
G.Start(&wg, first) // sync
|
G.Start(first) // sync
|
||||||
converger.Start() // after G.Start()
|
converger.Start() // after G.Start()
|
||||||
|
|
||||||
|
log.Printf("Graph: %v", G) // show graph
|
||||||
|
if obj.Graphviz != "" {
|
||||||
|
filter := obj.GraphvizFilter
|
||||||
|
if filter == "" {
|
||||||
|
filter = "dot" // directed graph default
|
||||||
|
}
|
||||||
|
if err := G.ExecGraphviz(filter, obj.Graphviz, hostname); err != nil {
|
||||||
|
log.Printf("Graphviz: %v", err)
|
||||||
|
} else {
|
||||||
|
log.Printf("Graphviz: Successfully generated graph!")
|
||||||
|
}
|
||||||
|
}
|
||||||
first = false
|
first = false
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@@ -492,7 +532,7 @@ func (obj *Main) Run() error {
|
|||||||
|
|
||||||
// initialize the add watcher, which calls the f callback on map changes
|
// initialize the add watcher, which calls the f callback on map changes
|
||||||
convergerCb := func(f func(map[string]bool) error) (func(), error) {
|
convergerCb := func(f func(map[string]bool) error) (func(), error) {
|
||||||
return etcd.EtcdAddHostnameConvergedWatcher(EmbdEtcd, f)
|
return etcd.AddHostnameConvergedWatcher(EmbdEtcd, f)
|
||||||
}
|
}
|
||||||
|
|
||||||
// build remotes struct for remote ssh
|
// build remotes struct for remote ssh
|
||||||
@@ -531,34 +571,40 @@ func (obj *Main) Run() error {
|
|||||||
|
|
||||||
if obj.GAPI != nil {
|
if obj.GAPI != nil {
|
||||||
if err := obj.GAPI.Close(); err != nil {
|
if err := obj.GAPI.Close(); err != nil {
|
||||||
err = errwrap.Wrapf(err, "GAPI closed poorly!")
|
err = errwrap.Wrapf(err, "the GAPI closed poorly")
|
||||||
reterr = multierr.Append(reterr, err) // list of errors
|
reterr = multierr.Append(reterr, err) // list of errors
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
configWatcher.Close() // stop sending file changes to remotes
|
configWatcher.Close() // stop sending file changes to remotes
|
||||||
if err := remotes.Exit(); err != nil { // tell all the remote connections to shutdown; waits!
|
if err := remotes.Exit(); err != nil { // tell all the remote connections to shutdown; waits!
|
||||||
err = errwrap.Wrapf(err, "Remote exited poorly!")
|
err = errwrap.Wrapf(err, "the Remote exited poorly")
|
||||||
reterr = multierr.Append(reterr, err) // list of errors
|
reterr = multierr.Append(reterr, err) // list of errors
|
||||||
}
|
}
|
||||||
|
|
||||||
G.Exit() // tell all the children to exit
|
|
||||||
|
|
||||||
// tell inner main loop to exit
|
// tell inner main loop to exit
|
||||||
close(exitchan)
|
close(exitchan)
|
||||||
|
|
||||||
|
G.Exit() // tells all the children to exit, and waits for them to do so
|
||||||
|
|
||||||
// cleanup etcd main loop last so it can process everything first
|
// cleanup etcd main loop last so it can process everything first
|
||||||
if err := EmbdEtcd.Destroy(); err != nil { // shutdown and cleanup etcd
|
if err := EmbdEtcd.Destroy(); err != nil { // shutdown and cleanup etcd
|
||||||
err = errwrap.Wrapf(err, "Etcd exited poorly!")
|
err = errwrap.Wrapf(err, "embedded Etcd exited poorly")
|
||||||
reterr = multierr.Append(reterr, err) // list of errors
|
reterr = multierr.Append(reterr, err) // list of errors
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if obj.Prometheus {
|
||||||
|
log.Printf("Main: Prometheus: Stopping instance")
|
||||||
|
if err := prom.Stop(); err != nil {
|
||||||
|
err = errwrap.Wrapf(err, "the Prometheus instance exited poorly")
|
||||||
|
reterr = multierr.Append(reterr, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if obj.Flags.Debug {
|
if obj.Flags.Debug {
|
||||||
log.Printf("Main: Graph: %v", G)
|
log.Printf("Main: Graph: %v", G)
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Wait() // wait for primary go routines to exit
|
|
||||||
|
|
||||||
// TODO: wait for each vertex to exit...
|
// TODO: wait for each vertex to exit...
|
||||||
log.Println("Goodbye!")
|
log.Println("Goodbye!")
|
||||||
return reterr
|
return reterr
|
||||||
|
|||||||
2
main.go
2
main.go
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
|||||||
81
misc/delta-cpu.sh
Executable file
81
misc/delta-cpu.sh
Executable file
@@ -0,0 +1,81 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# shitty cpu count control, useful for live demos
|
||||||
|
|
||||||
|
minimum=1 # don't decrease below this number of cpus
|
||||||
|
maximum=8 # don't increase above this number of cpus
|
||||||
|
count=1 # initial count
|
||||||
|
factor=3
|
||||||
|
function output() {
|
||||||
|
count=$1 # arg!
|
||||||
|
cat << EOF > ~/code/mgmt/examples/virt4.yaml
|
||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
resources:
|
||||||
|
virt:
|
||||||
|
- name: mgmt4
|
||||||
|
meta:
|
||||||
|
limit: .inf
|
||||||
|
burst: 0
|
||||||
|
uri: 'qemu:///session'
|
||||||
|
cpus: $count
|
||||||
|
maxcpus: $maximum
|
||||||
|
memory: 524288
|
||||||
|
boot:
|
||||||
|
- hd
|
||||||
|
disk:
|
||||||
|
- type: qcow2
|
||||||
|
source: "~/.local/share/libvirt/images/fedora-23-scratch.qcow2"
|
||||||
|
state: running
|
||||||
|
transient: false
|
||||||
|
edges: []
|
||||||
|
comment: "qemu-img create -b fedora-23.qcow2 -f qcow2 fedora-23-scratch.qcow2"
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
#tput cuu 1 && tput el # remove last line
|
||||||
|
str=''
|
||||||
|
tnuoc=$((maximum-count)) # backwards count
|
||||||
|
count2=$((count * factor))
|
||||||
|
tnuoc2=$((tnuoc * factor))
|
||||||
|
left=`yes '>' | head -$count2 | paste -s -d '' -`
|
||||||
|
right=`yes ' ' | head -$tnuoc2 | paste -s -d '' -`
|
||||||
|
str="${left}${right}"
|
||||||
|
_min=$((minimum-1))
|
||||||
|
_max=$((maximum+1))
|
||||||
|
reset # clean up once...
|
||||||
|
output $count # call function
|
||||||
|
while true; do
|
||||||
|
|
||||||
|
read -n1 -r -s -p "CPUs count is: $count; ${str} Press +/- key to adjust." key
|
||||||
|
if [ "$key" = "q" ] || [ "$key" = "Q" ]; then
|
||||||
|
echo # newline
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
if [ ! "$key" = "+" ] && [ ! "$key" = "-" ] && [ ! "$key" = "=" ] && [ ! "$key" = "_" ]; then # wrong key
|
||||||
|
reset # woops, reset it all...
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
if [ "$key" == "+" ] || [ "$key" == "=" ]; then
|
||||||
|
count=$((count+1))
|
||||||
|
fi
|
||||||
|
if [ "$key" == "-" ] || [ "$key" == "_" ]; then
|
||||||
|
count=$((count-1))
|
||||||
|
fi
|
||||||
|
if [ $count -eq $_min ]; then # min
|
||||||
|
count=$minimum
|
||||||
|
fi
|
||||||
|
if [ $count -eq $_max ]; then # max
|
||||||
|
count=$maximum
|
||||||
|
fi
|
||||||
|
|
||||||
|
tnuoc=$((maximum-count)) # backwards count
|
||||||
|
#echo "count is: $count"
|
||||||
|
#echo "tnuoc is: $tnuoc"
|
||||||
|
count2=$((count * factor))
|
||||||
|
tnuoc2=$((tnuoc * factor))
|
||||||
|
left=`yes '>' | head -$count2 | paste -s -d '' -`
|
||||||
|
right=`yes ' ' | head -$tnuoc2 | paste -s -d '' -`
|
||||||
|
str="${left}${right}"
|
||||||
|
#echo "str is: $str"
|
||||||
|
echo -ne '\r' # backup
|
||||||
|
output $count # call function
|
||||||
|
done
|
||||||
@@ -14,26 +14,38 @@ sudo_command=$(which sudo)
|
|||||||
YUM=`which yum 2>/dev/null`
|
YUM=`which yum 2>/dev/null`
|
||||||
DNF=`which dnf 2>/dev/null`
|
DNF=`which dnf 2>/dev/null`
|
||||||
APT=`which apt-get 2>/dev/null`
|
APT=`which apt-get 2>/dev/null`
|
||||||
|
BREW=`which brew 2>/dev/null`
|
||||||
|
PACMAN=`which pacman 2>/dev/null`
|
||||||
|
|
||||||
# if DNF is available use it
|
# if DNF is available use it
|
||||||
if [ -x "$DNF" ]; then
|
if [ -x "$DNF" ]; then
|
||||||
YUM=$DNF
|
YUM=$DNF
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$YUM" -a -z "$APT" ]; then
|
if [ -z "$YUM" -a -z "$APT" -a -z "$BREW" -a -z "$PACMAN" ]; then
|
||||||
echo "The package managers can't be found."
|
echo "The package managers can't be found."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -z "$YUM" ]; then
|
if [ ! -z "$YUM" ]; then
|
||||||
$sudo_command $YUM install -y libvirt-devel
|
$sudo_command $YUM install -y libvirt-devel
|
||||||
|
$sudo_command $YUM install -y augeas-devel
|
||||||
|
|
||||||
fi
|
fi
|
||||||
if [ ! -z "$APT" ]; then
|
if [ ! -z "$APT" ]; then
|
||||||
$sudo_command $APT install -y libvirt-dev || true
|
$sudo_command $APT install -y libvirt-dev || true
|
||||||
|
$sudo_command $APT install -y libaugeas-dev || true
|
||||||
$sudo_command $APT install -y libpcap0.8-dev || true
|
$sudo_command $APT install -y libpcap0.8-dev || true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ ! -z "$BREW" ]; then
|
||||||
|
$BREW install libvirt || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -z "$PACMAN" ]; then
|
||||||
|
$sudo_command $PACMAN -S --noconfirm libvirt augeas libpcap
|
||||||
|
fi
|
||||||
|
|
||||||
if [ $travis -eq 0 ]; then
|
if [ $travis -eq 0 ]; then
|
||||||
if [ ! -z "$YUM" ]; then
|
if [ ! -z "$YUM" ]; then
|
||||||
# some go dependencies are stored in mercurial
|
# some go dependencies are stored in mercurial
|
||||||
@@ -47,11 +59,14 @@ if [ $travis -eq 0 ]; then
|
|||||||
$sudo_command $APT install -y golang-golang-x-tools || true
|
$sudo_command $APT install -y golang-golang-x-tools || true
|
||||||
$sudo_command $APT install -y golang-go.tools || true
|
$sudo_command $APT install -y golang-go.tools || true
|
||||||
fi
|
fi
|
||||||
|
if [ ! -z "$PACMAN" ]; then
|
||||||
|
$sudo_command $PACMAN -S --noconfirm go
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# if golang is too old, we don't want to fail with an obscure error later
|
# if golang is too old, we don't want to fail with an obscure error later
|
||||||
if go version | grep 'go1\.[0123]\.'; then
|
if go version | grep 'go1\.[012345]\.'; then
|
||||||
echo "mgmt requires go1.4 or higher."
|
echo "mgmt requires go1.6 or higher."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ After=systemd-networkd.service
|
|||||||
Requires=systemd-networkd.service
|
Requires=systemd-networkd.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
ExecStart=/usr/bin/mgmt run ${OPTS}
|
ExecStart=/usr/bin/mgmt run $OPTS
|
||||||
RestartSec=5s
|
RestartSec=5s
|
||||||
Restart=always
|
Restart=always
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -21,13 +21,17 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"math"
|
"math"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/purpleidea/mgmt/event"
|
"github.com/purpleidea/mgmt/event"
|
||||||
|
"github.com/purpleidea/mgmt/prometheus"
|
||||||
"github.com/purpleidea/mgmt/resources"
|
"github.com/purpleidea/mgmt/resources"
|
||||||
|
|
||||||
|
multierr "github.com/hashicorp/go-multierror"
|
||||||
errwrap "github.com/pkg/errors"
|
errwrap "github.com/pkg/errors"
|
||||||
|
"golang.org/x/time/rate"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetTimestamp returns the timestamp of a vertex
|
// GetTimestamp returns the timestamp of a vertex
|
||||||
@@ -60,29 +64,32 @@ func (g *Graph) OKTimestamp(v *Vertex) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Poke notifies nodes after me in the dependency graph that they need refreshing...
|
// Poke tells nodes after me in the dependency graph that they need to refresh.
|
||||||
// NOTE: this assumes that this can never fail or need to be rescheduled
|
func (g *Graph) Poke(v *Vertex) error {
|
||||||
func (g *Graph) Poke(v *Vertex, activity bool) error {
|
|
||||||
|
// if we're pausing (or exiting) then we should suspend poke's so that
|
||||||
|
// the graph doesn't go on running forever until it's completely done!
|
||||||
|
// this is an optional feature which we can do by default on user exit
|
||||||
|
if g.fastPause {
|
||||||
|
return nil // TODO: should this be an error instead?
|
||||||
|
}
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
// these are all the vertices pointing AWAY FROM v, eg: v -> ???
|
// these are all the vertices pointing AWAY FROM v, eg: v -> ???
|
||||||
for _, n := range g.OutgoingGraphVertices(v) {
|
for _, n := range g.OutgoingGraphVertices(v) {
|
||||||
// XXX: if we're in state event and haven't been cancelled by
|
// we can skip this poke if resource hasn't done work yet... it
|
||||||
// apply, then we can cancel a poke to a child, right? XXX
|
// needs to be poked if already running, or not running though!
|
||||||
// XXX: if n.Res.getState() != resources.ResStateEvent || activity { // is this correct?
|
// TODO: does this need an || activity flag?
|
||||||
if true || activity { // XXX: ???
|
if n.Res.GetState() != resources.ResStateProcess {
|
||||||
if g.Flags.Debug {
|
if g.Flags.Debug {
|
||||||
log.Printf("%s[%s]: Poke: %s[%s]", v.Kind(), v.GetName(), n.Kind(), n.GetName())
|
log.Printf("%s[%s]: Poke: %s[%s]", v.Kind(), v.GetName(), n.Kind(), n.GetName())
|
||||||
}
|
}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(nn *Vertex) error {
|
go func(nn *Vertex) error {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
edge := g.Adjacency[v][nn] // lookup
|
//edge := g.Adjacency[v][nn] // lookup
|
||||||
notify := edge.Notify && edge.Refresh()
|
//notify := edge.Notify && edge.Refresh()
|
||||||
|
return nn.SendEvent(event.EventPoke, nil)
|
||||||
// FIXME: is it okay that this is sync?
|
|
||||||
nn.SendEvent(event.EventPoke, true, notify)
|
|
||||||
// TODO: check return value?
|
|
||||||
return nil // never error for now...
|
|
||||||
}(n)
|
}(n)
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
@@ -91,33 +98,41 @@ func (g *Graph) Poke(v *Vertex, activity bool) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// TODO: do something with return values?
|
||||||
wg.Wait() // wait for all the pokes to complete
|
wg.Wait() // wait for all the pokes to complete
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// BackPoke pokes the pre-requisites that are stale and need to run before I can run.
|
// BackPoke pokes the pre-requisites that are stale and need to run before I can run.
|
||||||
func (g *Graph) BackPoke(v *Vertex) {
|
func (g *Graph) BackPoke(v *Vertex) {
|
||||||
|
var wg sync.WaitGroup
|
||||||
// these are all the vertices pointing TO v, eg: ??? -> v
|
// these are all the vertices pointing TO v, eg: ??? -> v
|
||||||
for _, n := range g.IncomingGraphVertices(v) {
|
for _, n := range g.IncomingGraphVertices(v) {
|
||||||
x, y, s := v.GetTimestamp(), n.GetTimestamp(), n.Res.GetState()
|
x, y, s := v.GetTimestamp(), n.GetTimestamp(), n.Res.GetState()
|
||||||
// if the parent timestamp needs poking AND it's not in state
|
// If the parent timestamp needs poking AND it's not running
|
||||||
// ResStateEvent, then poke it. If the parent is in ResStateEvent it
|
// Process, then poke it. If the parent is in ResStateProcess it
|
||||||
// means that an event is pending, so we'll be expecting a poke
|
// means that an event is pending, so we'll be expecting a poke
|
||||||
// back soon, so we can safely discard the extra parent poke...
|
// back soon, so we can safely discard the extra parent poke...
|
||||||
// TODO: implement a stateLT (less than) to tell if something
|
// TODO: implement a stateLT (less than) to tell if something
|
||||||
// happens earlier in the state cycle and that doesn't wrap nil
|
// happens earlier in the state cycle and that doesn't wrap nil
|
||||||
if x >= y && (s != resources.ResStateEvent && s != resources.ResStateCheckApply) {
|
if x >= y && (s != resources.ResStateProcess && s != resources.ResStateCheckApply) {
|
||||||
if g.Flags.Debug {
|
if g.Flags.Debug {
|
||||||
log.Printf("%s[%s]: BackPoke: %s[%s]", v.Kind(), v.GetName(), n.Kind(), n.GetName())
|
log.Printf("%s[%s]: BackPoke: %s[%s]", v.Kind(), v.GetName(), n.Kind(), n.GetName())
|
||||||
}
|
}
|
||||||
// FIXME: is it okay that this is sync?
|
wg.Add(1)
|
||||||
n.SendEvent(event.EventBackPoke, true, false)
|
go func(nn *Vertex) error {
|
||||||
|
defer wg.Done()
|
||||||
|
return nn.SendEvent(event.EventBackPoke, nil)
|
||||||
|
}(n)
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
if g.Flags.Debug {
|
if g.Flags.Debug {
|
||||||
log.Printf("%s[%s]: BackPoke: %s[%s]: Skipped!", v.Kind(), v.GetName(), n.Kind(), n.GetName())
|
log.Printf("%s[%s]: BackPoke: %s[%s]: Skipped!", v.Kind(), v.GetName(), n.Kind(), n.GetName())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// TODO: do something with return values?
|
||||||
|
wg.Wait() // wait for all the pokes to complete
|
||||||
}
|
}
|
||||||
|
|
||||||
// RefreshPending determines if any previous nodes have a refresh pending here.
|
// RefreshPending determines if any previous nodes have a refresh pending here.
|
||||||
@@ -159,114 +174,156 @@ func (g *Graph) Process(v *Vertex) error {
|
|||||||
if g.Flags.Debug {
|
if g.Flags.Debug {
|
||||||
log.Printf("%s[%s]: Process()", obj.Kind(), obj.GetName())
|
log.Printf("%s[%s]: Process()", obj.Kind(), obj.GetName())
|
||||||
}
|
}
|
||||||
obj.SetState(resources.ResStateEvent)
|
// FIXME: should these SetState methods be here or after the sema code?
|
||||||
var ok = true
|
defer obj.SetState(resources.ResStateNil) // reset state when finished
|
||||||
var applied = false // did we run an apply?
|
obj.SetState(resources.ResStateProcess)
|
||||||
|
|
||||||
// is it okay to run dependency wise right now?
|
// is it okay to run dependency wise right now?
|
||||||
// if not, that's okay because when the dependency runs, it will poke
|
// if not, that's okay because when the dependency runs, it will poke
|
||||||
// us back and we will run if needed then!
|
// us back and we will run if needed then!
|
||||||
if g.OKTimestamp(v) {
|
if !g.OKTimestamp(v) {
|
||||||
if g.Flags.Debug {
|
go g.BackPoke(v)
|
||||||
log.Printf("%s[%s]: OKTimestamp(%v)", obj.Kind(), obj.GetName(), v.GetTimestamp())
|
return nil
|
||||||
|
}
|
||||||
|
// timestamp must be okay...
|
||||||
|
if g.Flags.Debug {
|
||||||
|
log.Printf("%s[%s]: OKTimestamp(%v)", obj.Kind(), obj.GetName(), v.GetTimestamp())
|
||||||
|
}
|
||||||
|
|
||||||
|
// semaphores!
|
||||||
|
// These shouldn't ever block an exit, since the graph should eventually
|
||||||
|
// converge causing their them to unlock. More interestingly, since they
|
||||||
|
// run in a DAG alphabetically, there is no way to permanently deadlock,
|
||||||
|
// assuming that resources individually don't ever block from finishing!
|
||||||
|
// The exception is that semaphores with a zero count will always block!
|
||||||
|
// TODO: Add a close mechanism to close/unblock zero count semaphores...
|
||||||
|
semas := obj.Meta().Sema
|
||||||
|
if g.Flags.Debug && len(semas) > 0 {
|
||||||
|
log.Printf("%s[%s]: Sema: P(%s)", obj.Kind(), obj.GetName(), strings.Join(semas, ", "))
|
||||||
|
}
|
||||||
|
if err := g.SemaLock(semas); err != nil { // lock
|
||||||
|
// NOTE: in practice, this might not ever be truly necessary...
|
||||||
|
return fmt.Errorf("shutdown of semaphores")
|
||||||
|
}
|
||||||
|
defer g.SemaUnlock(semas) // unlock
|
||||||
|
if g.Flags.Debug && len(semas) > 0 {
|
||||||
|
defer log.Printf("%s[%s]: Sema: V(%s)", obj.Kind(), obj.GetName(), strings.Join(semas, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
var ok = true
|
||||||
|
var applied = false // did we run an apply?
|
||||||
|
|
||||||
|
// connect any senders to receivers and detect if values changed
|
||||||
|
if updated, err := obj.SendRecv(obj); err != nil {
|
||||||
|
return errwrap.Wrapf(err, "could not SendRecv in Process")
|
||||||
|
} else if len(updated) > 0 {
|
||||||
|
for _, changed := range updated {
|
||||||
|
if changed { // at least one was updated
|
||||||
|
obj.StateOK(false) // invalidate cache, mark as dirty
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
obj.SetState(resources.ResStateCheckApply)
|
var noop = obj.Meta().Noop // lookup the noop value
|
||||||
|
var refresh bool
|
||||||
|
var checkOK bool
|
||||||
|
var err error
|
||||||
|
|
||||||
// connect any senders to receivers and detect if values changed
|
if g.Flags.Debug {
|
||||||
if updated, err := obj.SendRecv(obj); err != nil {
|
log.Printf("%s[%s]: CheckApply(%t)", obj.Kind(), obj.GetName(), !noop)
|
||||||
return errwrap.Wrapf(err, "could not SendRecv in Process")
|
}
|
||||||
} else if len(updated) > 0 {
|
|
||||||
for _, changed := range updated {
|
// lookup the refresh (notification) variable
|
||||||
if changed { // at least one was updated
|
refresh = g.RefreshPending(v) // do i need to perform a refresh?
|
||||||
obj.StateOK(false) // invalidate cache, mark as dirty
|
obj.SetRefresh(refresh) // tell the resource
|
||||||
break
|
|
||||||
|
// changes can occur after this...
|
||||||
|
obj.SetState(resources.ResStateCheckApply)
|
||||||
|
|
||||||
|
// check cached state, to skip CheckApply; can't skip if refreshing
|
||||||
|
if !refresh && obj.IsStateOK() {
|
||||||
|
checkOK, err = true, nil
|
||||||
|
|
||||||
|
// NOTE: technically this block is wrong because we don't know
|
||||||
|
// if the resource implements refresh! If it doesn't, we could
|
||||||
|
// skip this, but it doesn't make a big difference under noop!
|
||||||
|
} else if noop && refresh { // had a refresh to do w/ noop!
|
||||||
|
checkOK, err = false, nil // therefore the state is wrong
|
||||||
|
|
||||||
|
// run the CheckApply!
|
||||||
|
} else {
|
||||||
|
// if this fails, don't UpdateTimestamp()
|
||||||
|
checkOK, err = obj.CheckApply(!noop)
|
||||||
|
|
||||||
|
if promErr := obj.Prometheus().UpdateCheckApplyTotal(obj.Kind(), !noop, !checkOK, err != nil); promErr != nil {
|
||||||
|
// TODO: how to error correctly
|
||||||
|
log.Printf("%s[%s]: Prometheus.UpdateCheckApplyTotal() errored: %v", v.Kind(), v.GetName(), err)
|
||||||
|
}
|
||||||
|
// TODO: Can the `Poll` converged timeout tracking be a
|
||||||
|
// more general method for all converged timeouts? this
|
||||||
|
// would simplify the resources by removing boilerplate
|
||||||
|
if v.Meta().Poll > 0 {
|
||||||
|
if !checkOK { // something changed, restart timer
|
||||||
|
cuid, _, _ := v.Res.ConvergerUIDs() // get the converger uid used to report status
|
||||||
|
cuid.ResetTimer() // activity!
|
||||||
|
if g.Flags.Debug {
|
||||||
|
log.Printf("%s[%s]: Converger: ResetTimer", obj.Kind(), obj.GetName())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var noop = obj.Meta().Noop // lookup the noop value
|
|
||||||
var refresh bool
|
|
||||||
var checkOK bool
|
|
||||||
var err error
|
|
||||||
|
|
||||||
if g.Flags.Debug {
|
|
||||||
log.Printf("%s[%s]: CheckApply(%t)", obj.Kind(), obj.GetName(), !noop)
|
|
||||||
}
|
|
||||||
|
|
||||||
// lookup the refresh (notification) variable
|
|
||||||
refresh = g.RefreshPending(v) // do i need to perform a refresh?
|
|
||||||
obj.SetRefresh(refresh) // tell the resource
|
|
||||||
|
|
||||||
// check cached state, to skip CheckApply; can't skip if refreshing
|
|
||||||
if !refresh && obj.IsStateOK() {
|
|
||||||
checkOK, err = true, nil
|
|
||||||
|
|
||||||
// NOTE: technically this block is wrong because we don't know
|
|
||||||
// if the resource implements refresh! If it doesn't, we could
|
|
||||||
// skip this, but it doesn't make a big difference under noop!
|
|
||||||
} else if noop && refresh { // had a refresh to do w/ noop!
|
|
||||||
checkOK, err = false, nil // therefore the state is wrong
|
|
||||||
|
|
||||||
// run the CheckApply!
|
|
||||||
} else {
|
|
||||||
// if this fails, don't UpdateTimestamp()
|
|
||||||
checkOK, err = obj.CheckApply(!noop)
|
|
||||||
}
|
|
||||||
|
|
||||||
if checkOK && err != nil { // should never return this way
|
|
||||||
log.Fatalf("%s[%s]: CheckApply(): %t, %+v", obj.Kind(), obj.GetName(), checkOK, err)
|
|
||||||
}
|
|
||||||
if g.Flags.Debug {
|
|
||||||
log.Printf("%s[%s]: CheckApply(): %t, %v", obj.Kind(), obj.GetName(), checkOK, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// if CheckApply ran without noop and without error, state should be good
|
|
||||||
if !noop && err == nil { // aka !noop || checkOK
|
|
||||||
obj.StateOK(true) // reset
|
|
||||||
if refresh {
|
|
||||||
g.SetUpstreamRefresh(v, false) // refresh happened, clear the request
|
|
||||||
obj.SetRefresh(false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !checkOK { // if state *was* not ok, we had to have apply'ed
|
|
||||||
if err != nil { // error during check or apply
|
|
||||||
ok = false
|
|
||||||
} else {
|
|
||||||
applied = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// when noop is true we always want to update timestamp
|
|
||||||
if noop && err == nil {
|
|
||||||
ok = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if ok {
|
|
||||||
// did we actually do work?
|
|
||||||
activity := applied
|
|
||||||
if noop {
|
|
||||||
activity = false // no we didn't do work...
|
|
||||||
}
|
|
||||||
|
|
||||||
if activity { // add refresh flag to downstream edges...
|
|
||||||
g.SetDownstreamRefresh(v, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// update this timestamp *before* we poke or the poked
|
|
||||||
// nodes might fail due to having a too old timestamp!
|
|
||||||
v.UpdateTimestamp() // this was touched...
|
|
||||||
obj.SetState(resources.ResStatePoking) // can't cancel parent poke
|
|
||||||
if err := g.Poke(v, activity); err != nil {
|
|
||||||
return errwrap.Wrapf(err, "the Poke() failed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// poke at our pre-req's instead since they need to refresh/run...
|
|
||||||
return errwrap.Wrapf(err, "could not Process() successfully")
|
|
||||||
}
|
}
|
||||||
// else... only poke at the pre-req's that need to run
|
|
||||||
go g.BackPoke(v)
|
if checkOK && err != nil { // should never return this way
|
||||||
return nil
|
log.Fatalf("%s[%s]: CheckApply(): %t, %+v", obj.Kind(), obj.GetName(), checkOK, err)
|
||||||
|
}
|
||||||
|
if g.Flags.Debug {
|
||||||
|
log.Printf("%s[%s]: CheckApply(): %t, %v", obj.Kind(), obj.GetName(), checkOK, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// if CheckApply ran without noop and without error, state should be good
|
||||||
|
if !noop && err == nil { // aka !noop || checkOK
|
||||||
|
obj.StateOK(true) // reset
|
||||||
|
if refresh {
|
||||||
|
g.SetUpstreamRefresh(v, false) // refresh happened, clear the request
|
||||||
|
obj.SetRefresh(false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !checkOK { // if state *was* not ok, we had to have apply'ed
|
||||||
|
if err != nil { // error during check or apply
|
||||||
|
ok = false
|
||||||
|
} else {
|
||||||
|
applied = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// when noop is true we always want to update timestamp
|
||||||
|
if noop && err == nil {
|
||||||
|
ok = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok {
|
||||||
|
// did we actually do work?
|
||||||
|
activity := applied
|
||||||
|
if noop {
|
||||||
|
activity = false // no we didn't do work...
|
||||||
|
}
|
||||||
|
|
||||||
|
if activity { // add refresh flag to downstream edges...
|
||||||
|
g.SetDownstreamRefresh(v, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// update this timestamp *before* we poke or the poked
|
||||||
|
// nodes might fail due to having a too old timestamp!
|
||||||
|
v.UpdateTimestamp() // this was touched...
|
||||||
|
obj.SetState(resources.ResStatePoking) // can't cancel parent poke
|
||||||
|
if err := g.Poke(v); err != nil {
|
||||||
|
return errwrap.Wrapf(err, "the Poke() failed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// poke at our pre-req's instead since they need to refresh/run...
|
||||||
|
return errwrap.Wrapf(err, "could not Process() successfully")
|
||||||
}
|
}
|
||||||
|
|
||||||
// SentinelErr is a sentinal as an error type that wraps an arbitrary error.
|
// SentinelErr is a sentinal as an error type that wraps an arbitrary error.
|
||||||
@@ -279,6 +336,180 @@ func (obj *SentinelErr) Error() string {
|
|||||||
return obj.err.Error()
|
return obj.err.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// innerWorker is the CheckApply runner that reads from processChan.
|
||||||
|
// TODO: would it be better if this was a method on BaseRes that took in *Graph?
|
||||||
|
func (g *Graph) innerWorker(v *Vertex) {
|
||||||
|
obj := v.Res
|
||||||
|
running := false
|
||||||
|
done := make(chan struct{})
|
||||||
|
playback := false // do we need to run another one?
|
||||||
|
_, wcuid, pcuid := obj.ConvergerUIDs() // get extra cuids (worker, process)
|
||||||
|
|
||||||
|
waiting := false
|
||||||
|
var timer = time.NewTimer(time.Duration(math.MaxInt64)) // longest duration
|
||||||
|
if !timer.Stop() {
|
||||||
|
<-timer.C // unnecessary, shouldn't happen
|
||||||
|
}
|
||||||
|
|
||||||
|
var delay = time.Duration(v.Meta().Delay) * time.Millisecond
|
||||||
|
var retry = v.Meta().Retry // number of tries left, -1 for infinite
|
||||||
|
var limiter = rate.NewLimiter(v.Meta().Limit, v.Meta().Burst)
|
||||||
|
limited := false
|
||||||
|
|
||||||
|
wg := &sync.WaitGroup{} // wait for Process routine to exit
|
||||||
|
|
||||||
|
Loop:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case ev, ok := <-obj.ProcessChan(): // must use like this
|
||||||
|
if !ok { // processChan closed, let's exit
|
||||||
|
break Loop // no event, so no ack!
|
||||||
|
}
|
||||||
|
if v.Res.Meta().Poll == 0 { // skip for polling
|
||||||
|
wcuid.SetConverged(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// if process started, but no action yet, skip!
|
||||||
|
if v.Res.GetState() == resources.ResStateProcess {
|
||||||
|
if g.Flags.Debug {
|
||||||
|
log.Printf("%s[%s]: Skipped event!", v.Kind(), v.GetName())
|
||||||
|
}
|
||||||
|
ev.ACK() // ready for next message
|
||||||
|
v.Res.QuiesceGroup().Done()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// if running, we skip running a new execution!
|
||||||
|
// if waiting, we skip running a new execution!
|
||||||
|
if running || waiting {
|
||||||
|
if g.Flags.Debug {
|
||||||
|
log.Printf("%s[%s]: Playback added!", v.Kind(), v.GetName())
|
||||||
|
}
|
||||||
|
playback = true
|
||||||
|
ev.ACK() // ready for next message
|
||||||
|
v.Res.QuiesceGroup().Done()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// catch invalid rates
|
||||||
|
if v.Meta().Burst == 0 && !(v.Meta().Limit == rate.Inf) { // blocked
|
||||||
|
e := fmt.Errorf("%s[%s]: Permanently limited (rate != Inf, burst: 0)", v.Kind(), v.GetName())
|
||||||
|
ev.ACK() // ready for next message
|
||||||
|
v.Res.QuiesceGroup().Done()
|
||||||
|
v.SendEvent(event.EventExit, &SentinelErr{e})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// rate limit
|
||||||
|
// FIXME: consider skipping rate limit check if
|
||||||
|
// the event is a poke instead of a watch event
|
||||||
|
if !limited && !(v.Meta().Limit == rate.Inf) { // skip over the playback event...
|
||||||
|
now := time.Now()
|
||||||
|
r := limiter.ReserveN(now, 1) // one event
|
||||||
|
// r.OK() seems to always be true here!
|
||||||
|
d := r.DelayFrom(now)
|
||||||
|
if d > 0 { // delay
|
||||||
|
limited = true
|
||||||
|
playback = true
|
||||||
|
log.Printf("%s[%s]: Limited (rate: %v/sec, burst: %d, next: %v)", v.Kind(), v.GetName(), v.Meta().Limit, v.Meta().Burst, d)
|
||||||
|
// start the timer...
|
||||||
|
timer.Reset(d)
|
||||||
|
waiting = true // waiting for retry timer
|
||||||
|
ev.ACK()
|
||||||
|
v.Res.QuiesceGroup().Done()
|
||||||
|
continue
|
||||||
|
} // otherwise, we run directly!
|
||||||
|
}
|
||||||
|
limited = false // let one through
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
running = true
|
||||||
|
go func(ev *event.Event) {
|
||||||
|
pcuid.SetConverged(false) // "block" Process
|
||||||
|
defer wg.Done()
|
||||||
|
if e := g.Process(v); e != nil {
|
||||||
|
playback = true
|
||||||
|
log.Printf("%s[%s]: CheckApply errored: %v", v.Kind(), v.GetName(), e)
|
||||||
|
if retry == 0 {
|
||||||
|
if err := obj.Prometheus().UpdateState(fmt.Sprintf("%v[%v]", v.Kind(), v.GetName()), v.Kind(), prometheus.ResStateHardFail); err != nil {
|
||||||
|
// TODO: how to error this?
|
||||||
|
log.Printf("%s[%s]: Prometheus.UpdateState() errored: %v", v.Kind(), v.GetName(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// wrap the error in the sentinel
|
||||||
|
v.Res.QuiesceGroup().Done() // before the Wait that happens in SendEvent!
|
||||||
|
v.SendEvent(event.EventExit, &SentinelErr{e})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if retry > 0 { // don't decrement the -1
|
||||||
|
retry--
|
||||||
|
}
|
||||||
|
if err := obj.Prometheus().UpdateState(fmt.Sprintf("%v[%v]", v.Kind(), v.GetName()), v.Kind(), prometheus.ResStateSoftFail); err != nil {
|
||||||
|
// TODO: how to error this?
|
||||||
|
log.Printf("%s[%s]: Prometheus.UpdateState() errored: %v", v.Kind(), v.GetName(), err)
|
||||||
|
}
|
||||||
|
log.Printf("%s[%s]: CheckApply: Retrying after %.4f seconds (%d left)", v.Kind(), v.GetName(), delay.Seconds(), retry)
|
||||||
|
// start the timer...
|
||||||
|
timer.Reset(delay)
|
||||||
|
waiting = true // waiting for retry timer
|
||||||
|
// don't v.Res.QuiesceGroup().Done() b/c
|
||||||
|
// the timer is running and it can exit!
|
||||||
|
return
|
||||||
|
}
|
||||||
|
retry = v.Meta().Retry // reset on success
|
||||||
|
close(done) // trigger
|
||||||
|
}(ev)
|
||||||
|
ev.ACK() // sync (now mostly useless)
|
||||||
|
|
||||||
|
case <-timer.C:
|
||||||
|
if v.Res.Meta().Poll == 0 { // skip for polling
|
||||||
|
wcuid.SetConverged(false)
|
||||||
|
}
|
||||||
|
waiting = false
|
||||||
|
if !timer.Stop() {
|
||||||
|
//<-timer.C // blocks, docs are wrong!
|
||||||
|
}
|
||||||
|
log.Printf("%s[%s]: CheckApply delay expired!", v.Kind(), v.GetName())
|
||||||
|
close(done)
|
||||||
|
|
||||||
|
// a CheckApply run (with possibly retry pause) finished
|
||||||
|
case <-done:
|
||||||
|
if v.Res.Meta().Poll == 0 { // skip for polling
|
||||||
|
wcuid.SetConverged(false)
|
||||||
|
}
|
||||||
|
if g.Flags.Debug {
|
||||||
|
log.Printf("%s[%s]: CheckApply finished!", v.Kind(), v.GetName())
|
||||||
|
}
|
||||||
|
done = make(chan struct{}) // reset
|
||||||
|
// re-send this event, to trigger a CheckApply()
|
||||||
|
if playback {
|
||||||
|
// this lock avoids us sending to
|
||||||
|
// channel after we've closed it!
|
||||||
|
// TODO: can this experience indefinite postponement ?
|
||||||
|
// see: https://github.com/golang/go/issues/11506
|
||||||
|
// pause or exit is in process if not quiescing!
|
||||||
|
if !v.Res.IsQuiescing() {
|
||||||
|
playback = false
|
||||||
|
v.Res.QuiesceGroup().Add(1) // lock around it, b/c still running...
|
||||||
|
go func() {
|
||||||
|
obj.Event() // replay a new event
|
||||||
|
v.Res.QuiesceGroup().Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
running = false
|
||||||
|
pcuid.SetConverged(true) // "unblock" Process
|
||||||
|
v.Res.QuiesceGroup().Done()
|
||||||
|
|
||||||
|
case <-wcuid.ConvergedTimer():
|
||||||
|
wcuid.SetConverged(true) // converged!
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// Worker is the common run frontend of the vertex. It handles all of the retry
|
// Worker is the common run frontend of the vertex. It handles all of the retry
|
||||||
// and retry delay common code, and ultimately returns the final status of this
|
// and retry delay common code, and ultimately returns the final status of this
|
||||||
// vertex execution.
|
// vertex execution.
|
||||||
@@ -289,68 +520,36 @@ func (g *Graph) Worker(v *Vertex) error {
|
|||||||
// the Watch() function about which graph it is
|
// the Watch() function about which graph it is
|
||||||
// running on, which isolates things nicely...
|
// running on, which isolates things nicely...
|
||||||
obj := v.Res
|
obj := v.Res
|
||||||
processChan := make(chan event.Event)
|
if g.Flags.Debug {
|
||||||
|
log.Printf("%s[%s]: Worker: Running", v.Kind(), v.GetName())
|
||||||
|
defer log.Printf("%s[%s]: Worker: Stopped", v.Kind(), v.GetName())
|
||||||
|
}
|
||||||
|
// run the init (should match 1-1 with Close function)
|
||||||
|
if err := obj.Init(); err != nil {
|
||||||
|
obj.ProcessExit()
|
||||||
|
// always exit the worker function by finishing with Close()
|
||||||
|
if e := obj.Close(); e != nil {
|
||||||
|
err = multierr.Append(err, e) // list of errors
|
||||||
|
}
|
||||||
|
return errwrap.Wrapf(err, "could not Init() resource")
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the CheckApply run takes longer than the converged
|
||||||
|
// timeout, we could inappropriately converge mid-apply!
|
||||||
|
// avoid this by blocking convergence with a fake report
|
||||||
|
// we also add a similar blocker around the worker loop!
|
||||||
|
_, wcuid, pcuid := obj.ConvergerUIDs() // get extra cuids (worker, process)
|
||||||
|
// XXX: put these in Init() ?
|
||||||
|
wcuid.SetConverged(true) // starts off false, and waits for loop timeout
|
||||||
|
pcuid.SetConverged(true) // starts off true, because it's not running...
|
||||||
|
|
||||||
|
wg := obj.ProcessSync()
|
||||||
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
running := false
|
defer wg.Done()
|
||||||
var timer = time.NewTimer(time.Duration(math.MaxInt64)) // longest duration
|
g.innerWorker(v)
|
||||||
if !timer.Stop() {
|
|
||||||
<-timer.C // unnecessary, shouldn't happen
|
|
||||||
}
|
|
||||||
var delay = time.Duration(v.Meta().Delay) * time.Millisecond
|
|
||||||
var retry = v.Meta().Retry // number of tries left, -1 for infinite
|
|
||||||
var saved event.Event
|
|
||||||
Loop:
|
|
||||||
for {
|
|
||||||
// this has to be synchronous, because otherwise the Res
|
|
||||||
// event loop will keep running and change state,
|
|
||||||
// causing the converged timeout to fire!
|
|
||||||
select {
|
|
||||||
case event, ok := <-processChan: // must use like this
|
|
||||||
if running && ok {
|
|
||||||
// we got an event that wasn't a close,
|
|
||||||
// while we were waiting for the timer!
|
|
||||||
// if this happens, it might be a bug:(
|
|
||||||
log.Fatalf("%s[%s]: Worker: Unexpected event: %+v", v.Kind(), v.GetName(), event)
|
|
||||||
}
|
|
||||||
if !ok { // processChan closed, let's exit
|
|
||||||
break Loop // no event, so no ack!
|
|
||||||
}
|
|
||||||
|
|
||||||
// the above mentioned synchronous part, is the
|
|
||||||
// running of this function, paired with an ack.
|
|
||||||
if e := g.Process(v); e != nil {
|
|
||||||
saved = event
|
|
||||||
log.Printf("%s[%s]: CheckApply errored: %v", v.Kind(), v.GetName(), e)
|
|
||||||
if retry == 0 {
|
|
||||||
// wrap the error in the sentinel
|
|
||||||
event.ACKNACK(&SentinelErr{e}) // fail the Watch()
|
|
||||||
break Loop
|
|
||||||
}
|
|
||||||
if retry > 0 { // don't decrement the -1
|
|
||||||
retry--
|
|
||||||
}
|
|
||||||
log.Printf("%s[%s]: CheckApply: Retrying after %.4f seconds (%d left)", v.Kind(), v.GetName(), delay.Seconds(), retry)
|
|
||||||
// start the timer...
|
|
||||||
timer.Reset(delay)
|
|
||||||
running = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
retry = v.Meta().Retry // reset on success
|
|
||||||
event.ACK() // sync
|
|
||||||
|
|
||||||
case <-timer.C:
|
|
||||||
if !timer.Stop() {
|
|
||||||
//<-timer.C // blocks, docs are wrong!
|
|
||||||
}
|
|
||||||
running = false
|
|
||||||
log.Printf("%s[%s]: CheckApply delay expired!", v.Kind(), v.GetName())
|
|
||||||
// re-send this failed event, to trigger a CheckApply()
|
|
||||||
go func() { processChan <- saved }()
|
|
||||||
// TODO: should we send a fake event instead?
|
|
||||||
//saved = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var err error // propagate the error up (this is a permanent BAD error!)
|
var err error // propagate the error up (this is a permanent BAD error!)
|
||||||
// the watch delay runs inside of the Watch resource loop, so that it
|
// the watch delay runs inside of the Watch resource loop, so that it
|
||||||
// can still process signals and exit if needed. It shouldn't run any
|
// can still process signals and exit if needed. It shouldn't run any
|
||||||
@@ -378,8 +577,15 @@ func (g *Graph) Worker(v *Vertex) error {
|
|||||||
case event := <-obj.Events():
|
case event := <-obj.Events():
|
||||||
// NOTE: this code should match the similar Res code!
|
// NOTE: this code should match the similar Res code!
|
||||||
//cuid.SetConverged(false) // TODO: ?
|
//cuid.SetConverged(false) // TODO: ?
|
||||||
if exit, send := obj.ReadEvent(&event); exit {
|
if exit, send := obj.ReadEvent(event); exit != nil {
|
||||||
return nil // exit
|
obj.ProcessExit()
|
||||||
|
err := *exit // exit err
|
||||||
|
if e := obj.Close(); err == nil {
|
||||||
|
err = e
|
||||||
|
} else if e != nil {
|
||||||
|
err = multierr.Append(err, e) // list of errors
|
||||||
|
}
|
||||||
|
return err // exit
|
||||||
} else if send {
|
} else if send {
|
||||||
// if we dive down this rabbit hole, our
|
// if we dive down this rabbit hole, our
|
||||||
// timer.C won't get seen until we get out!
|
// timer.C won't get seen until we get out!
|
||||||
@@ -408,14 +614,22 @@ func (g *Graph) Worker(v *Vertex) error {
|
|||||||
// NOTE: we can avoid the send if running Watch guarantees
|
// NOTE: we can avoid the send if running Watch guarantees
|
||||||
// one CheckApply event on startup!
|
// one CheckApply event on startup!
|
||||||
//if pendingSendEvent { // TODO: should this become a list in the future?
|
//if pendingSendEvent { // TODO: should this become a list in the future?
|
||||||
// if exit, err := obj.DoSend(processChan, ""); exit || err != nil {
|
// if err := obj.Event() err != nil {
|
||||||
// return err // we exit or bubble up a NACK...
|
// return err // we exit or bubble up a NACK...
|
||||||
// }
|
// }
|
||||||
//}
|
//}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: reset the watch retry count after some amount of success
|
// TODO: reset the watch retry count after some amount of success
|
||||||
e := v.Res.Watch(processChan)
|
var e error
|
||||||
|
if v.Res.Meta().Poll > 0 { // poll instead of watching :(
|
||||||
|
cuid, _, _ := v.Res.ConvergerUIDs() // get the converger uid used to report status
|
||||||
|
cuid.StartTimer()
|
||||||
|
e = v.Res.Poll()
|
||||||
|
cuid.StopTimer() // clean up nicely
|
||||||
|
} else {
|
||||||
|
e = v.Res.Watch() // run the watch normally
|
||||||
|
}
|
||||||
if e == nil { // exit signal
|
if e == nil { // exit signal
|
||||||
err = nil // clean exit
|
err = nil // clean exit
|
||||||
break
|
break
|
||||||
@@ -439,37 +653,40 @@ func (g *Graph) Worker(v *Vertex) error {
|
|||||||
// by getting the Watch resource to send one event once it's up!
|
// by getting the Watch resource to send one event once it's up!
|
||||||
//v.SendEvent(eventPoke, false, false)
|
//v.SendEvent(eventPoke, false, false)
|
||||||
}
|
}
|
||||||
close(processChan)
|
|
||||||
|
obj.ProcessExit()
|
||||||
|
// close resource and return possible errors if any
|
||||||
|
if e := obj.Close(); err == nil {
|
||||||
|
err = e
|
||||||
|
} else if e != nil {
|
||||||
|
err = multierr.Append(err, e) // list of errors
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start is a main kick to start the graph. It goes through in reverse topological
|
// Start is a main kick to start the graph. It goes through in reverse topological
|
||||||
// sort order so that events can't hit un-started vertices.
|
// sort order so that events can't hit un-started vertices.
|
||||||
func (g *Graph) Start(wg *sync.WaitGroup, first bool) { // start or continue
|
func (g *Graph) Start(first bool) { // start or continue
|
||||||
log.Printf("State: %v -> %v", g.setState(graphStateStarting), g.getState())
|
log.Printf("State: %v -> %v", g.setState(graphStateStarting), g.getState())
|
||||||
defer log.Printf("State: %v -> %v", g.setState(graphStateStarted), g.getState())
|
defer log.Printf("State: %v -> %v", g.setState(graphStateStarted), g.getState())
|
||||||
t, _ := g.TopologicalSort()
|
t, _ := g.TopologicalSort()
|
||||||
// TODO: only calculate indegree if `first` is true to save resources
|
|
||||||
indegree := g.InDegree() // compute all of the indegree's
|
indegree := g.InDegree() // compute all of the indegree's
|
||||||
for _, v := range Reverse(t) {
|
reversed := Reverse(t)
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
if !v.Res.IsWatching() { // if Watch() is not running...
|
for _, v := range reversed { // run the Setup() for everyone first
|
||||||
wg.Add(1)
|
// run these in parallel, as long as we wait before continuing
|
||||||
// must pass in value to avoid races...
|
wg.Add(1)
|
||||||
// see: https://ttboj.wordpress.com/2015/07/27/golang-parallelism-issues-causing-too-many-open-files-error/
|
go func(vv *Vertex) {
|
||||||
go func(vv *Vertex) {
|
defer wg.Done()
|
||||||
defer wg.Done()
|
if !vv.Res.IsWorking() { // if Worker() is not running...
|
||||||
// TODO: if a sufficient number of workers error,
|
vv.Res.Setup() // initialize some vars in the resource
|
||||||
// should something be done? Will these restart
|
}
|
||||||
// after perma-failure if we have a graph change?
|
}(v)
|
||||||
if err := g.Worker(vv); err != nil { // contains the Watch and CheckApply loops
|
}
|
||||||
log.Printf("%s[%s]: Exited with failure: %v", vv.Kind(), vv.GetName(), err)
|
wg.Wait()
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Printf("%s[%s]: Exited", vv.Kind(), vv.GetName())
|
|
||||||
}(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
// run through the topological reverse, and start or unpause each vertex
|
||||||
|
for _, v := range reversed {
|
||||||
// selective poke: here we reduce the number of initial pokes
|
// selective poke: here we reduce the number of initial pokes
|
||||||
// to the minimum required to activate every vertex in the
|
// to the minimum required to activate every vertex in the
|
||||||
// graph, either by direct action, or by getting poked by a
|
// graph, either by direct action, or by getting poked by a
|
||||||
@@ -482,36 +699,75 @@ func (g *Graph) Start(wg *sync.WaitGroup, first bool) { // start or continue
|
|||||||
// if we are unpausing (since it's not the first run of this
|
// if we are unpausing (since it's not the first run of this
|
||||||
// function) we need to poke to *unpause* every graph vertex,
|
// function) we need to poke to *unpause* every graph vertex,
|
||||||
// and not just selectively the subset with no indegree.
|
// and not just selectively the subset with no indegree.
|
||||||
if (!first) || indegree[v] == 0 {
|
|
||||||
// ensure state is started before continuing on to next vertex
|
// let the startup code know to poke or not
|
||||||
for !v.SendEvent(event.EventStart, true, false) {
|
// this triggers a CheckApply AFTER Watch is Running()
|
||||||
if g.Flags.Debug {
|
// We *don't* need to also do this to new nodes or nodes that
|
||||||
// if SendEvent fails, we aren't up yet
|
// are about to get unpaused, because they'll get poked by one
|
||||||
log.Printf("%s[%s]: Retrying SendEvent(Start)", v.Kind(), v.GetName())
|
// of the indegree == 0 vertices, and an important aspect of the
|
||||||
// sleep here briefly or otherwise cause
|
// Process() function is that even if the state is correct, it
|
||||||
// a different goroutine to be scheduled
|
// will pass through the Poke so that it flows through the DAG.
|
||||||
time.Sleep(1 * time.Millisecond)
|
v.Res.Starter(indegree[v] == 0)
|
||||||
|
|
||||||
|
var unpause = true
|
||||||
|
if !v.Res.IsWorking() { // if Worker() is not running...
|
||||||
|
unpause = false // doesn't need unpausing on first start
|
||||||
|
g.wg.Add(1)
|
||||||
|
// must pass in value to avoid races...
|
||||||
|
// see: https://ttboj.wordpress.com/2015/07/27/golang-parallelism-issues-causing-too-many-open-files-error/
|
||||||
|
go func(vv *Vertex) {
|
||||||
|
defer g.wg.Done()
|
||||||
|
defer v.Res.Reset()
|
||||||
|
// TODO: if a sufficient number of workers error,
|
||||||
|
// should something be done? Should these restart
|
||||||
|
// after perma-failure if we have a graph change?
|
||||||
|
log.Printf("%s[%s]: Started", vv.Kind(), vv.GetName())
|
||||||
|
if err := g.Worker(vv); err != nil { // contains the Watch and CheckApply loops
|
||||||
|
log.Printf("%s[%s]: Exited with failure: %v", vv.Kind(), vv.GetName(), err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
}
|
log.Printf("%s[%s]: Exited", vv.Kind(), vv.GetName())
|
||||||
|
}(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-v.Res.Started(): // block until started
|
||||||
|
case <-v.Res.Stopped(): // we failed on init
|
||||||
|
// if the resource Init() fails, we don't hang!
|
||||||
|
}
|
||||||
|
|
||||||
|
if unpause { // unpause (if needed)
|
||||||
|
v.Res.SendEvent(event.EventStart, nil) // sync!
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// we wait for everyone to start before exiting!
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pause sends pause events to the graph in a topological sort order.
|
// Pause sends pause events to the graph in a topological sort order. If you set
|
||||||
func (g *Graph) Pause() {
|
// the fastPause argument to true, then it will ask future propagation waves to
|
||||||
|
// not run through the graph before exiting, and instead will exit much quicker.
|
||||||
|
func (g *Graph) Pause(fastPause bool) {
|
||||||
log.Printf("State: %v -> %v", g.setState(graphStatePausing), g.getState())
|
log.Printf("State: %v -> %v", g.setState(graphStatePausing), g.getState())
|
||||||
defer log.Printf("State: %v -> %v", g.setState(graphStatePaused), g.getState())
|
defer log.Printf("State: %v -> %v", g.setState(graphStatePaused), g.getState())
|
||||||
|
if fastPause {
|
||||||
|
g.fastPause = true // set flag
|
||||||
|
}
|
||||||
t, _ := g.TopologicalSort()
|
t, _ := g.TopologicalSort()
|
||||||
for _, v := range t { // squeeze out the events...
|
for _, v := range t { // squeeze out the events...
|
||||||
v.SendEvent(event.EventPause, true, false)
|
v.SendEvent(event.EventPause, nil) // sync
|
||||||
}
|
}
|
||||||
|
g.fastPause = false // reset flag
|
||||||
}
|
}
|
||||||
|
|
||||||
// Exit sends exit events to the graph in a topological sort order.
|
// Exit sends exit events to the graph in a topological sort order.
|
||||||
func (g *Graph) Exit() {
|
func (g *Graph) Exit() {
|
||||||
if g == nil {
|
if g == nil { // empty graph that wasn't populated yet
|
||||||
return
|
return
|
||||||
} // empty graph that wasn't populated yet
|
}
|
||||||
|
|
||||||
|
// FIXME: a second ^C could put this into fast pause, but do it for now!
|
||||||
|
g.Pause(true) // implement this with pause to avoid duplicating the code
|
||||||
|
|
||||||
t, _ := g.TopologicalSort()
|
t, _ := g.TopologicalSort()
|
||||||
for _, v := range t { // squeeze out the events...
|
for _, v := range t { // squeeze out the events...
|
||||||
// turn off the taps...
|
// turn off the taps...
|
||||||
@@ -520,6 +776,8 @@ func (g *Graph) Exit() {
|
|||||||
// when we hit the 'default' in the select statement!
|
// when we hit the 'default' in the select statement!
|
||||||
// XXX: we can do this to quiesce, but it's not necessary now
|
// XXX: we can do this to quiesce, but it's not necessary now
|
||||||
|
|
||||||
v.SendEvent(event.EventExit, true, false)
|
v.SendEvent(event.EventExit, nil)
|
||||||
|
v.Res.WaitGroup().Wait()
|
||||||
}
|
}
|
||||||
|
g.wg.Wait() // for now, this doesn't need to be a separate Wait() method
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -45,7 +45,7 @@ func (g *Graph) addEdgesByMatchingUIDS(v *Vertex, uids []resources.ResUID) []boo
|
|||||||
// that is to say, the name value of a res is a helpful
|
// that is to say, the name value of a res is a helpful
|
||||||
// handle, but it is not necessarily a unique identity!
|
// handle, but it is not necessarily a unique identity!
|
||||||
// remember, resources can return multiple UID's each!
|
// remember, resources can return multiple UID's each!
|
||||||
if resources.UIDExistsInUIDs(uid, vv.GetUIDs()) {
|
if resources.UIDExistsInUIDs(uid, vv.UIDs()) {
|
||||||
// add edge from: vv -> v
|
// add edge from: vv -> v
|
||||||
if uid.Reversed() {
|
if uid.Reversed() {
|
||||||
txt := fmt.Sprintf("AutoEdge: %v[%v] -> %v[%v]", vv.Kind(), vv.GetName(), v.Kind(), v.GetName())
|
txt := fmt.Sprintf("AutoEdge: %v[%v] -> %v[%v]", vv.Kind(), vv.GetName(), v.Kind(), v.GetName())
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -54,7 +54,7 @@ func (ag *baseGrouper) name() string {
|
|||||||
// the name method is the only exception: call it any time without side effects!
|
// the name method is the only exception: call it any time without side effects!
|
||||||
func (ag *baseGrouper) init(g *Graph) error {
|
func (ag *baseGrouper) init(g *Graph) error {
|
||||||
if ag.graph != nil {
|
if ag.graph != nil {
|
||||||
return fmt.Errorf("The init method has already been called!")
|
return fmt.Errorf("the init method has already been called")
|
||||||
}
|
}
|
||||||
ag.graph = g // pointer
|
ag.graph = g // pointer
|
||||||
ag.vertices = ag.graph.GetVerticesSorted() // cache in deterministic order!
|
ag.vertices = ag.graph.GetVerticesSorted() // cache in deterministic order!
|
||||||
@@ -108,27 +108,27 @@ func (ag *baseGrouper) vertexNext() (v1, v2 *Vertex, err error) {
|
|||||||
|
|
||||||
func (ag *baseGrouper) vertexCmp(v1, v2 *Vertex) error {
|
func (ag *baseGrouper) vertexCmp(v1, v2 *Vertex) error {
|
||||||
if v1 == nil || v2 == nil {
|
if v1 == nil || v2 == nil {
|
||||||
return fmt.Errorf("Vertex is nil!")
|
return fmt.Errorf("the vertex is nil")
|
||||||
}
|
}
|
||||||
if v1 == v2 { // skip yourself
|
if v1 == v2 { // skip yourself
|
||||||
return fmt.Errorf("Vertices are the same!")
|
return fmt.Errorf("the vertices are the same")
|
||||||
}
|
}
|
||||||
if v1.Kind() != v2.Kind() { // we must group similar kinds
|
if v1.Kind() != v2.Kind() { // we must group similar kinds
|
||||||
// TODO: maybe future resources won't need this limitation?
|
// TODO: maybe future resources won't need this limitation?
|
||||||
return fmt.Errorf("The two resources aren't the same kind!")
|
return fmt.Errorf("the two resources aren't the same kind")
|
||||||
}
|
}
|
||||||
// someone doesn't want to group!
|
// someone doesn't want to group!
|
||||||
if !v1.Meta().AutoGroup || !v2.Meta().AutoGroup {
|
if !v1.Meta().AutoGroup || !v2.Meta().AutoGroup {
|
||||||
return fmt.Errorf("One of the autogroup flags is false!")
|
return fmt.Errorf("one of the autogroup flags is false")
|
||||||
}
|
}
|
||||||
if v1.Res.IsGrouped() { // already grouped!
|
if v1.Res.IsGrouped() { // already grouped!
|
||||||
return fmt.Errorf("Already grouped!")
|
return fmt.Errorf("already grouped")
|
||||||
}
|
}
|
||||||
if len(v2.Res.GetGroup()) > 0 { // already has children grouped!
|
if len(v2.Res.GetGroup()) > 0 { // already has children grouped!
|
||||||
return fmt.Errorf("Already has groups!")
|
return fmt.Errorf("already has groups")
|
||||||
}
|
}
|
||||||
if !v1.Res.GroupCmp(v2.Res) { // resource groupcmp failed!
|
if !v1.Res.GroupCmp(v2.Res) { // resource groupcmp failed!
|
||||||
return fmt.Errorf("The GroupCmp failed!")
|
return fmt.Errorf("the GroupCmp failed")
|
||||||
}
|
}
|
||||||
return nil // success
|
return nil // success
|
||||||
}
|
}
|
||||||
@@ -173,7 +173,7 @@ func (ag *nonReachabilityGrouper) vertexNext() (v1, v2 *Vertex, err error) {
|
|||||||
for {
|
for {
|
||||||
v1, v2, err = ag.baseGrouper.vertexNext() // get all iterable pairs
|
v1, v2, err = ag.baseGrouper.vertexNext() // get all iterable pairs
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Error running autoGroup(vertexNext): %v", err)
|
log.Fatalf("error running autoGroup(vertexNext): %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if v1 != v2 { // ignore self cmp early (perf optimization)
|
if v1 != v2 { // ignore self cmp early (perf optimization)
|
||||||
@@ -187,7 +187,7 @@ func (ag *nonReachabilityGrouper) vertexNext() (v1, v2 *Vertex, err error) {
|
|||||||
|
|
||||||
// if we got here, it means we're skipping over this candidate!
|
// if we got here, it means we're skipping over this candidate!
|
||||||
if ok, err := ag.baseGrouper.vertexTest(false); err != nil {
|
if ok, err := ag.baseGrouper.vertexTest(false); err != nil {
|
||||||
log.Fatalf("Error running autoGroup(vertexTest): %v", err)
|
log.Fatalf("error running autoGroup(vertexTest): %v", err)
|
||||||
} else if !ok {
|
} else if !ok {
|
||||||
return nil, nil, nil // done!
|
return nil, nil, nil // done!
|
||||||
}
|
}
|
||||||
@@ -284,7 +284,7 @@ func (g *Graph) VertexMerge(v1, v2 *Vertex, vertexMergeFn func(*Vertex, *Vertex)
|
|||||||
|
|
||||||
// 5) creation of a cyclic graph should throw an error
|
// 5) creation of a cyclic graph should throw an error
|
||||||
if _, err := g.TopologicalSort(); err != nil { // am i a dag or not?
|
if _, err := g.TopologicalSort(); err != nil { // am i a dag or not?
|
||||||
return errwrap.Wrapf(err, "TopologicalSort failed") // not a dag
|
return errwrap.Wrapf(err, "the TopologicalSort failed") // not a dag
|
||||||
}
|
}
|
||||||
return nil // success
|
return nil // success
|
||||||
}
|
}
|
||||||
@@ -295,14 +295,14 @@ func (g *Graph) autoGroup(ag AutoGrouper) chan string {
|
|||||||
go func(strch chan string) {
|
go func(strch chan string) {
|
||||||
strch <- fmt.Sprintf("Compile: Grouping: Algorithm: %v...", ag.name())
|
strch <- fmt.Sprintf("Compile: Grouping: Algorithm: %v...", ag.name())
|
||||||
if err := ag.init(g); err != nil {
|
if err := ag.init(g); err != nil {
|
||||||
log.Fatalf("Error running autoGroup(init): %v", err)
|
log.Fatalf("error running autoGroup(init): %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
var v, w *Vertex
|
var v, w *Vertex
|
||||||
v, w, err := ag.vertexNext() // get pair to compare
|
v, w, err := ag.vertexNext() // get pair to compare
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Error running autoGroup(vertexNext): %v", err)
|
log.Fatalf("error running autoGroup(vertexNext): %v", err)
|
||||||
}
|
}
|
||||||
merged := false
|
merged := false
|
||||||
// save names since they change during the runs
|
// save names since they change during the runs
|
||||||
@@ -325,7 +325,7 @@ func (g *Graph) autoGroup(ag AutoGrouper) chan string {
|
|||||||
|
|
||||||
// did these get used?
|
// did these get used?
|
||||||
if ok, err := ag.vertexTest(merged); err != nil {
|
if ok, err := ag.vertexTest(merged); err != nil {
|
||||||
log.Fatalf("Error running autoGroup(vertexTest): %v", err)
|
log.Fatalf("error running autoGroup(vertexTest): %v", err)
|
||||||
} else if !ok {
|
} else if !ok {
|
||||||
break // done!
|
break // done!
|
||||||
}
|
}
|
||||||
|
|||||||
486
pgraph/autogroup_test.go
Normal file
486
pgraph/autogroup_test.go
Normal file
@@ -0,0 +1,486 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package pgraph
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// all of the following test cases are laid out with the following semantics:
|
||||||
|
// * vertices which start with the same single letter are considered "like"
|
||||||
|
// * "like" elements should be merged
|
||||||
|
// * vertices can have any integer after their single letter "family" type
|
||||||
|
// * grouped vertices should have a name with a comma separated list of names
|
||||||
|
// * edges follow the same conventions about grouping
|
||||||
|
|
||||||
|
// empty graph
|
||||||
|
func TestPgraphGrouping1(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// single vertex
|
||||||
|
func TestPgraphGrouping2(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{ // grouping to limit variable scope
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
g1.AddVertex(a1)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
g2.AddVertex(a1)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// two vertices
|
||||||
|
func TestPgraphGrouping3(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
g1.AddVertex(a1, b1)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
g2.AddVertex(a1, b1)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// two vertices merge
|
||||||
|
func TestPgraphGrouping4(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
g1.AddVertex(a1, a2)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a := NewVertex(NewNoopResTest("a1,a2"))
|
||||||
|
g2.AddVertex(a)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// three vertices merge
|
||||||
|
func TestPgraphGrouping5(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
a3 := NewVertex(NewNoopResTest("a3"))
|
||||||
|
g1.AddVertex(a1, a2, a3)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a := NewVertex(NewNoopResTest("a1,a2,a3"))
|
||||||
|
g2.AddVertex(a)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// three vertices, two merge
|
||||||
|
func TestPgraphGrouping6(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
g1.AddVertex(a1, a2, b1)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a := NewVertex(NewNoopResTest("a1,a2"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
g2.AddVertex(a, b1)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// four vertices, three merge
|
||||||
|
func TestPgraphGrouping7(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
a3 := NewVertex(NewNoopResTest("a3"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
g1.AddVertex(a1, a2, a3, b1)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a := NewVertex(NewNoopResTest("a1,a2,a3"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
g2.AddVertex(a, b1)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// four vertices, two&two merge
|
||||||
|
func TestPgraphGrouping8(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
b2 := NewVertex(NewNoopResTest("b2"))
|
||||||
|
g1.AddVertex(a1, a2, b1, b2)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a := NewVertex(NewNoopResTest("a1,a2"))
|
||||||
|
b := NewVertex(NewNoopResTest("b1,b2"))
|
||||||
|
g2.AddVertex(a, b)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// five vertices, two&three merge
|
||||||
|
func TestPgraphGrouping9(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
b2 := NewVertex(NewNoopResTest("b2"))
|
||||||
|
b3 := NewVertex(NewNoopResTest("b3"))
|
||||||
|
g1.AddVertex(a1, a2, b1, b2, b3)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a := NewVertex(NewNoopResTest("a1,a2"))
|
||||||
|
b := NewVertex(NewNoopResTest("b1,b2,b3"))
|
||||||
|
g2.AddVertex(a, b)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// three unique vertices
|
||||||
|
func TestPgraphGrouping10(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
c1 := NewVertex(NewNoopResTest("c1"))
|
||||||
|
g1.AddVertex(a1, b1, c1)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
c1 := NewVertex(NewNoopResTest("c1"))
|
||||||
|
g2.AddVertex(a1, b1, c1)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// three unique vertices, two merge
|
||||||
|
func TestPgraphGrouping11(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
b2 := NewVertex(NewNoopResTest("b2"))
|
||||||
|
c1 := NewVertex(NewNoopResTest("c1"))
|
||||||
|
g1.AddVertex(a1, b1, b2, c1)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
b := NewVertex(NewNoopResTest("b1,b2"))
|
||||||
|
c1 := NewVertex(NewNoopResTest("c1"))
|
||||||
|
g2.AddVertex(a1, b, c1)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// simple merge 1
|
||||||
|
// a1 a2 a1,a2
|
||||||
|
// \ / >>> | (arrows point downwards)
|
||||||
|
// b b
|
||||||
|
func TestPgraphGrouping12(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
e1 := NewEdge("e1")
|
||||||
|
e2 := NewEdge("e2")
|
||||||
|
g1.AddEdge(a1, b1, e1)
|
||||||
|
g1.AddEdge(a2, b1, e2)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a := NewVertex(NewNoopResTest("a1,a2"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
e := NewEdge("e1,e2")
|
||||||
|
g2.AddEdge(a, b1, e)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// simple merge 2
|
||||||
|
// b b
|
||||||
|
// / \ >>> | (arrows point downwards)
|
||||||
|
// a1 a2 a1,a2
|
||||||
|
func TestPgraphGrouping13(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
e1 := NewEdge("e1")
|
||||||
|
e2 := NewEdge("e2")
|
||||||
|
g1.AddEdge(b1, a1, e1)
|
||||||
|
g1.AddEdge(b1, a2, e2)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a := NewVertex(NewNoopResTest("a1,a2"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
e := NewEdge("e1,e2")
|
||||||
|
g2.AddEdge(b1, a, e)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// triple merge
|
||||||
|
// a1 a2 a3 a1,a2,a3
|
||||||
|
// \ | / >>> | (arrows point downwards)
|
||||||
|
// b b
|
||||||
|
func TestPgraphGrouping14(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
a3 := NewVertex(NewNoopResTest("a3"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
e1 := NewEdge("e1")
|
||||||
|
e2 := NewEdge("e2")
|
||||||
|
e3 := NewEdge("e3")
|
||||||
|
g1.AddEdge(a1, b1, e1)
|
||||||
|
g1.AddEdge(a2, b1, e2)
|
||||||
|
g1.AddEdge(a3, b1, e3)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a := NewVertex(NewNoopResTest("a1,a2,a3"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
e := NewEdge("e1,e2,e3")
|
||||||
|
g2.AddEdge(a, b1, e)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// chain merge
|
||||||
|
// a1 a1
|
||||||
|
// / \ |
|
||||||
|
// b1 b2 >>> b1,b2 (arrows point downwards)
|
||||||
|
// \ / |
|
||||||
|
// c1 c1
|
||||||
|
func TestPgraphGrouping15(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
b2 := NewVertex(NewNoopResTest("b2"))
|
||||||
|
c1 := NewVertex(NewNoopResTest("c1"))
|
||||||
|
e1 := NewEdge("e1")
|
||||||
|
e2 := NewEdge("e2")
|
||||||
|
e3 := NewEdge("e3")
|
||||||
|
e4 := NewEdge("e4")
|
||||||
|
g1.AddEdge(a1, b1, e1)
|
||||||
|
g1.AddEdge(a1, b2, e2)
|
||||||
|
g1.AddEdge(b1, c1, e3)
|
||||||
|
g1.AddEdge(b2, c1, e4)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
b := NewVertex(NewNoopResTest("b1,b2"))
|
||||||
|
c1 := NewVertex(NewNoopResTest("c1"))
|
||||||
|
e1 := NewEdge("e1,e2")
|
||||||
|
e2 := NewEdge("e3,e4")
|
||||||
|
g2.AddEdge(a1, b, e1)
|
||||||
|
g2.AddEdge(b, c1, e2)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// re-attach 1 (outer)
|
||||||
|
// technically the second possibility is valid too, depending on which order we
|
||||||
|
// merge edges in, and if we don't filter out any unnecessary edges afterwards!
|
||||||
|
// a1 a2 a1,a2 a1,a2
|
||||||
|
// | / | | \
|
||||||
|
// b1 / >>> b1 OR b1 / (arrows point downwards)
|
||||||
|
// | / | | /
|
||||||
|
// c1 c1 c1
|
||||||
|
func TestPgraphGrouping16(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
c1 := NewVertex(NewNoopResTest("c1"))
|
||||||
|
e1 := NewEdge("e1")
|
||||||
|
e2 := NewEdge("e2")
|
||||||
|
e3 := NewEdge("e3")
|
||||||
|
g1.AddEdge(a1, b1, e1)
|
||||||
|
g1.AddEdge(b1, c1, e2)
|
||||||
|
g1.AddEdge(a2, c1, e3)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a := NewVertex(NewNoopResTest("a1,a2"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
c1 := NewVertex(NewNoopResTest("c1"))
|
||||||
|
e1 := NewEdge("e1,e3")
|
||||||
|
e2 := NewEdge("e2,e3") // e3 gets "merged through" to BOTH edges!
|
||||||
|
g2.AddEdge(a, b1, e1)
|
||||||
|
g2.AddEdge(b1, c1, e2)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// re-attach 2 (inner)
|
||||||
|
// a1 b2 a1
|
||||||
|
// | / |
|
||||||
|
// b1 / >>> b1,b2 (arrows point downwards)
|
||||||
|
// | / |
|
||||||
|
// c1 c1
|
||||||
|
func TestPgraphGrouping17(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
b2 := NewVertex(NewNoopResTest("b2"))
|
||||||
|
c1 := NewVertex(NewNoopResTest("c1"))
|
||||||
|
e1 := NewEdge("e1")
|
||||||
|
e2 := NewEdge("e2")
|
||||||
|
e3 := NewEdge("e3")
|
||||||
|
g1.AddEdge(a1, b1, e1)
|
||||||
|
g1.AddEdge(b1, c1, e2)
|
||||||
|
g1.AddEdge(b2, c1, e3)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
b := NewVertex(NewNoopResTest("b1,b2"))
|
||||||
|
c1 := NewVertex(NewNoopResTest("c1"))
|
||||||
|
e1 := NewEdge("e1")
|
||||||
|
e2 := NewEdge("e2,e3")
|
||||||
|
g2.AddEdge(a1, b, e1)
|
||||||
|
g2.AddEdge(b, c1, e2)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// re-attach 3 (double)
|
||||||
|
// similar to "re-attach 1", technically there is a second possibility for this
|
||||||
|
// a2 a1 b2 a1,a2
|
||||||
|
// \ | / |
|
||||||
|
// \ b1 / >>> b1,b2 (arrows point downwards)
|
||||||
|
// \ | / |
|
||||||
|
// c1 c1
|
||||||
|
func TestPgraphGrouping18(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
b2 := NewVertex(NewNoopResTest("b2"))
|
||||||
|
c1 := NewVertex(NewNoopResTest("c1"))
|
||||||
|
e1 := NewEdge("e1")
|
||||||
|
e2 := NewEdge("e2")
|
||||||
|
e3 := NewEdge("e3")
|
||||||
|
e4 := NewEdge("e4")
|
||||||
|
g1.AddEdge(a1, b1, e1)
|
||||||
|
g1.AddEdge(b1, c1, e2)
|
||||||
|
g1.AddEdge(a2, c1, e3)
|
||||||
|
g1.AddEdge(b2, c1, e4)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a := NewVertex(NewNoopResTest("a1,a2"))
|
||||||
|
b := NewVertex(NewNoopResTest("b1,b2"))
|
||||||
|
c1 := NewVertex(NewNoopResTest("c1"))
|
||||||
|
e1 := NewEdge("e1,e3")
|
||||||
|
e2 := NewEdge("e2,e3,e4") // e3 gets "merged through" to BOTH edges!
|
||||||
|
g2.AddEdge(a, b, e1)
|
||||||
|
g2.AddEdge(b, c1, e2)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// connected merge 0, (no change!)
|
||||||
|
// a1 a1
|
||||||
|
// \ >>> \ (arrows point downwards)
|
||||||
|
// a2 a2
|
||||||
|
func TestPgraphGroupingConnected0(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
e1 := NewEdge("e1")
|
||||||
|
g1.AddEdge(a1, a2, e1)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result ?
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
e1 := NewEdge("e1")
|
||||||
|
g2.AddEdge(a1, a2, e1)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// connected merge 1, (no change!)
|
||||||
|
// a1 a1
|
||||||
|
// \ \
|
||||||
|
// b >>> b (arrows point downwards)
|
||||||
|
// \ \
|
||||||
|
// a2 a2
|
||||||
|
func TestPgraphGroupingConnected1(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
b := NewVertex(NewNoopResTest("b"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
e1 := NewEdge("e1")
|
||||||
|
e2 := NewEdge("e2")
|
||||||
|
g1.AddEdge(a1, b, e1)
|
||||||
|
g1.AddEdge(b, a2, e2)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result ?
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
b := NewVertex(NewNoopResTest("b"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
e1 := NewEdge("e1")
|
||||||
|
e2 := NewEdge("e2")
|
||||||
|
g2.AddEdge(a1, b, e1)
|
||||||
|
g2.AddEdge(b, a2, e2)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -15,7 +15,7 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package pgraph
|
package pgraph // TODO: this should be a subpackage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -46,11 +46,15 @@ func (g *Graph) Graphviz() (out string) {
|
|||||||
//out += "\tnode [shape=box];\n"
|
//out += "\tnode [shape=box];\n"
|
||||||
str := ""
|
str := ""
|
||||||
for i := range g.Adjacency { // reverse paths
|
for i := range g.Adjacency { // reverse paths
|
||||||
out += fmt.Sprintf("\t%s [label=\"%s[%s]\"];\n", i.GetName(), i.Kind(), i.GetName())
|
out += fmt.Sprintf("\t\"%s\" [label=\"%s[%s]\"];\n", i.GetName(), i.Kind(), i.GetName())
|
||||||
for j := range g.Adjacency[i] {
|
for j := range g.Adjacency[i] {
|
||||||
k := g.Adjacency[i][j]
|
k := g.Adjacency[i][j]
|
||||||
// use str for clearer output ordering
|
// use str for clearer output ordering
|
||||||
str += fmt.Sprintf("\t%s -> %s [label=%s];\n", i.GetName(), j.GetName(), k.Name)
|
if k.Notify {
|
||||||
|
str += fmt.Sprintf("\t\"%s\" -> \"%s\" [label=\"%s\",style=bold];\n", i.GetName(), j.GetName(), k.Name)
|
||||||
|
} else {
|
||||||
|
str += fmt.Sprintf("\t\"%s\" -> \"%s\" [label=\"%s\"];\n", i.GetName(), j.GetName(), k.Name)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
out += str
|
out += str
|
||||||
@@ -60,16 +64,20 @@ func (g *Graph) Graphviz() (out string) {
|
|||||||
|
|
||||||
// ExecGraphviz writes out the graphviz data and runs the correct graphviz
|
// ExecGraphviz writes out the graphviz data and runs the correct graphviz
|
||||||
// filter command.
|
// filter command.
|
||||||
func (g *Graph) ExecGraphviz(program, filename string) error {
|
func (g *Graph) ExecGraphviz(program, filename, hostname string) error {
|
||||||
|
|
||||||
switch program {
|
switch program {
|
||||||
case "dot", "neato", "twopi", "circo", "fdp":
|
case "dot", "neato", "twopi", "circo", "fdp":
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("Invalid graphviz program selected!")
|
return fmt.Errorf("invalid graphviz program selected")
|
||||||
}
|
}
|
||||||
|
|
||||||
if filename == "" {
|
if filename == "" {
|
||||||
return fmt.Errorf("No filename given!")
|
return fmt.Errorf("no filename given")
|
||||||
|
}
|
||||||
|
|
||||||
|
if hostname != "" {
|
||||||
|
filename = fmt.Sprintf("%s@%s", filename, hostname)
|
||||||
}
|
}
|
||||||
|
|
||||||
// run as a normal user if possible when run with sudo
|
// run as a normal user if possible when run with sudo
|
||||||
@@ -78,18 +86,18 @@ func (g *Graph) ExecGraphviz(program, filename string) error {
|
|||||||
|
|
||||||
err := ioutil.WriteFile(filename, []byte(g.Graphviz()), 0644)
|
err := ioutil.WriteFile(filename, []byte(g.Graphviz()), 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error writing to filename!")
|
return fmt.Errorf("error writing to filename")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err1 == nil && err2 == nil {
|
if err1 == nil && err2 == nil {
|
||||||
if err := os.Chown(filename, uid, gid); err != nil {
|
if err := os.Chown(filename, uid, gid); err != nil {
|
||||||
return fmt.Errorf("Error changing file owner!")
|
return fmt.Errorf("error changing file owner")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
path, err := exec.LookPath(program)
|
path, err := exec.LookPath(program)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Graphviz is missing!")
|
return fmt.Errorf("the Graphviz program is missing")
|
||||||
}
|
}
|
||||||
|
|
||||||
out := fmt.Sprintf("%s.png", filename)
|
out := fmt.Sprintf("%s.png", filename)
|
||||||
@@ -104,7 +112,7 @@ func (g *Graph) ExecGraphviz(program, filename string) error {
|
|||||||
}
|
}
|
||||||
_, err = cmd.Output()
|
_, err = cmd.Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error writing to image!")
|
return fmt.Errorf("error writing to image")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -24,7 +24,9 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/purpleidea/mgmt/event"
|
"github.com/purpleidea/mgmt/event"
|
||||||
|
"github.com/purpleidea/mgmt/prometheus"
|
||||||
"github.com/purpleidea/mgmt/resources"
|
"github.com/purpleidea/mgmt/resources"
|
||||||
|
"github.com/purpleidea/mgmt/util/semaphore"
|
||||||
|
|
||||||
errwrap "github.com/pkg/errors"
|
errwrap "github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
@@ -40,6 +42,7 @@ const (
|
|||||||
graphStatePaused
|
graphStatePaused
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Flags contains specific constants used by the graph.
|
||||||
type Flags struct {
|
type Flags struct {
|
||||||
Debug bool
|
Debug bool
|
||||||
}
|
}
|
||||||
@@ -55,7 +58,13 @@ type Graph struct {
|
|||||||
Adjacency map[*Vertex]map[*Vertex]*Edge // *Vertex -> *Vertex (edge)
|
Adjacency map[*Vertex]map[*Vertex]*Edge // *Vertex -> *Vertex (edge)
|
||||||
Flags Flags
|
Flags Flags
|
||||||
state graphState
|
state graphState
|
||||||
mutex sync.Mutex // used when modifying graph State variable
|
fastPause bool // used to disable pokes for a fast pause
|
||||||
|
mutex *sync.Mutex // used when modifying graph State variable
|
||||||
|
wg *sync.WaitGroup
|
||||||
|
semas map[string]*semaphore.Semaphore
|
||||||
|
slock *sync.Mutex // semaphore mutex
|
||||||
|
|
||||||
|
prometheus *prometheus.Prometheus // the prometheus instance
|
||||||
}
|
}
|
||||||
|
|
||||||
// Vertex is the primary vertex struct in this library.
|
// Vertex is the primary vertex struct in this library.
|
||||||
@@ -78,6 +87,11 @@ func NewGraph(name string) *Graph {
|
|||||||
Name: name,
|
Name: name,
|
||||||
Adjacency: make(map[*Vertex]map[*Vertex]*Edge),
|
Adjacency: make(map[*Vertex]map[*Vertex]*Edge),
|
||||||
state: graphStateNil,
|
state: graphStateNil,
|
||||||
|
// ptr b/c: Mutex/WaitGroup must not be copied after first use
|
||||||
|
mutex: &sync.Mutex{},
|
||||||
|
wg: &sync.WaitGroup{},
|
||||||
|
semas: make(map[string]*semaphore.Semaphore),
|
||||||
|
slock: &sync.Mutex{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -112,6 +126,13 @@ func (g *Graph) Copy() *Graph {
|
|||||||
Adjacency: make(map[*Vertex]map[*Vertex]*Edge, len(g.Adjacency)),
|
Adjacency: make(map[*Vertex]map[*Vertex]*Edge, len(g.Adjacency)),
|
||||||
Flags: g.Flags,
|
Flags: g.Flags,
|
||||||
state: g.state,
|
state: g.state,
|
||||||
|
mutex: g.mutex,
|
||||||
|
wg: g.wg,
|
||||||
|
semas: g.semas,
|
||||||
|
slock: g.slock,
|
||||||
|
fastPause: g.fastPause,
|
||||||
|
|
||||||
|
prometheus: g.prometheus,
|
||||||
}
|
}
|
||||||
for k, v := range g.Adjacency {
|
for k, v := range g.Adjacency {
|
||||||
newGraph.Adjacency[k] = v // copy
|
newGraph.Adjacency[k] = v // copy
|
||||||
@@ -185,17 +206,28 @@ func (g *Graph) DeleteEdge(e *Edge) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetVertexMatch searches for an equivalent resource in the graph and returns
|
// CompareMatch searches for an equivalent resource in the graph and returns the
|
||||||
// the vertex it is found in, or nil if not found.
|
// vertex it is found in, or nil if not found.
|
||||||
func (g *Graph) GetVertexMatch(obj resources.Res) *Vertex {
|
func (g *Graph) CompareMatch(obj resources.Res) *Vertex {
|
||||||
for k := range g.Adjacency {
|
for v := range g.Adjacency {
|
||||||
if k.Res.Compare(obj) {
|
if v.Res.Compare(obj) {
|
||||||
return k
|
return v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: consider adding a mutate API.
|
||||||
|
//func (g *Graph) MutateMatch(obj resources.Res) *Vertex {
|
||||||
|
// for v := range g.Adjacency {
|
||||||
|
// if err := v.Res.Mutate(obj); err == nil {
|
||||||
|
// // transmogrified!
|
||||||
|
// return v
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// return nil
|
||||||
|
//}
|
||||||
|
|
||||||
// HasVertex returns if the input vertex exists in the graph.
|
// HasVertex returns if the input vertex exists in the graph.
|
||||||
func (g *Graph) HasVertex(v *Vertex) bool {
|
func (g *Graph) HasVertex(v *Vertex) bool {
|
||||||
if _, exists := g.Adjacency[v]; exists {
|
if _, exists := g.Adjacency[v]; exists {
|
||||||
@@ -475,7 +507,7 @@ func (g *Graph) TopologicalSort() ([]*Vertex, error) { // kahn's algorithm
|
|||||||
if in > 0 {
|
if in > 0 {
|
||||||
for n := range g.Adjacency[c] {
|
for n := range g.Adjacency[c] {
|
||||||
if remaining[n] > 0 {
|
if remaining[n] > 0 {
|
||||||
return nil, fmt.Errorf("Not a dag!")
|
return nil, fmt.Errorf("not a dag")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -525,7 +557,8 @@ func (g *Graph) Reachability(a, b *Vertex) []*Vertex {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GraphSync updates the oldGraph so that it matches the newGraph receiver. It
|
// GraphSync updates the oldGraph so that it matches the newGraph receiver. It
|
||||||
// leaves identical elements alone so that they don't need to be refreshed.
|
// leaves identical elements alone so that they don't need to be refreshed. It
|
||||||
|
// tries to mutate existing elements into new ones, if they support this.
|
||||||
// FIXME: add test cases
|
// FIXME: add test cases
|
||||||
func (g *Graph) GraphSync(oldGraph *Graph) (*Graph, error) {
|
func (g *Graph) GraphSync(oldGraph *Graph) (*Graph, error) {
|
||||||
|
|
||||||
@@ -540,13 +573,24 @@ func (g *Graph) GraphSync(oldGraph *Graph) (*Graph, error) {
|
|||||||
|
|
||||||
for v := range g.Adjacency { // loop through the vertices (resources)
|
for v := range g.Adjacency { // loop through the vertices (resources)
|
||||||
res := v.Res // resource
|
res := v.Res // resource
|
||||||
|
var vertex *Vertex
|
||||||
|
|
||||||
vertex := oldGraph.GetVertexMatch(res)
|
// step one, direct compare with res.Compare
|
||||||
if vertex == nil { // no match found
|
if vertex == nil { // redundant guard for consistency
|
||||||
if err := res.Init(); err != nil {
|
vertex = oldGraph.CompareMatch(res)
|
||||||
return nil, errwrap.Wrapf(err, "could not Init() resource")
|
}
|
||||||
|
|
||||||
|
// TODO: consider adding a mutate API.
|
||||||
|
// step two, try and mutate with res.Mutate
|
||||||
|
//if vertex == nil { // not found yet...
|
||||||
|
// vertex = oldGraph.MutateMatch(res)
|
||||||
|
//}
|
||||||
|
|
||||||
|
if vertex == nil { // no match found yet
|
||||||
|
if err := res.Validate(); err != nil {
|
||||||
|
return nil, errwrap.Wrapf(err, "could not Validate() resource")
|
||||||
}
|
}
|
||||||
vertex = NewVertex(res)
|
vertex = v
|
||||||
oldGraph.AddVertex(vertex) // call standalone in case not part of an edge
|
oldGraph.AddVertex(vertex) // call standalone in case not part of an edge
|
||||||
}
|
}
|
||||||
lookup[v] = vertex // used for constructing edges
|
lookup[v] = vertex // used for constructing edges
|
||||||
@@ -557,7 +601,8 @@ func (g *Graph) GraphSync(oldGraph *Graph) (*Graph, error) {
|
|||||||
for v := range oldGraph.Adjacency {
|
for v := range oldGraph.Adjacency {
|
||||||
if !VertexContains(v, vertexKeep) {
|
if !VertexContains(v, vertexKeep) {
|
||||||
// wait for exit before starting new graph!
|
// wait for exit before starting new graph!
|
||||||
v.SendEvent(event.EventExit, true, false)
|
v.SendEvent(event.EventExit, nil) // sync
|
||||||
|
v.Res.WaitGroup().Wait()
|
||||||
oldGraph.DeleteVertex(v)
|
oldGraph.DeleteVertex(v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -570,13 +615,13 @@ func (g *Graph) GraphSync(oldGraph *Graph) (*Graph, error) {
|
|||||||
// lookup vertices (these should exist now)
|
// lookup vertices (these should exist now)
|
||||||
//res1 := v1.Res // resource
|
//res1 := v1.Res // resource
|
||||||
//res2 := v2.Res
|
//res2 := v2.Res
|
||||||
//vertex1 := oldGraph.GetVertexMatch(res1)
|
//vertex1 := oldGraph.CompareMatch(res1)
|
||||||
//vertex2 := oldGraph.GetVertexMatch(res2)
|
//vertex2 := oldGraph.CompareMatch(res2)
|
||||||
vertex1, exists1 := lookup[v1]
|
vertex1, exists1 := lookup[v1]
|
||||||
vertex2, exists2 := lookup[v2]
|
vertex2, exists2 := lookup[v2]
|
||||||
if !exists1 || !exists2 { // no match found, bug?
|
if !exists1 || !exists2 { // no match found, bug?
|
||||||
//if vertex1 == nil || vertex2 == nil { // no match found
|
//if vertex1 == nil || vertex2 == nil { // no match found
|
||||||
return nil, fmt.Errorf("New vertices weren't found!") // programming error
|
return nil, fmt.Errorf("new vertices weren't found") // programming error
|
||||||
}
|
}
|
||||||
|
|
||||||
edge, exists := oldGraph.Adjacency[vertex1][vertex2]
|
edge, exists := oldGraph.Adjacency[vertex1][vertex2]
|
||||||
@@ -614,8 +659,11 @@ func (g *Graph) GraphMetas() []*resources.MetaParams {
|
|||||||
|
|
||||||
// AssociateData associates some data with the object in the graph in question.
|
// AssociateData associates some data with the object in the graph in question.
|
||||||
func (g *Graph) AssociateData(data *resources.Data) {
|
func (g *Graph) AssociateData(data *resources.Data) {
|
||||||
|
// prometheus needs to be associated to this graph as well
|
||||||
|
g.prometheus = data.Prometheus
|
||||||
|
|
||||||
for k := range g.Adjacency {
|
for k := range g.Adjacency {
|
||||||
k.Res.AssociateData(data)
|
*k.Res.Data() = *data
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -24,13 +24,18 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/resources"
|
||||||
|
"github.com/purpleidea/mgmt/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NV is a helper function to make testing easier. It creates a new noop vertex.
|
// NV is a helper function to make testing easier. It creates a new noop vertex.
|
||||||
func NV(s string) *Vertex {
|
func NV(s string) *Vertex {
|
||||||
obj, err := NewNoopRes(s)
|
obj := &resources.NoopRes{
|
||||||
if err != nil {
|
BaseRes: resources.BaseRes{
|
||||||
panic(err) // unlikely test failure!
|
Name: s,
|
||||||
|
},
|
||||||
|
Comment: "Testing!",
|
||||||
}
|
}
|
||||||
return NewVertex(obj)
|
return NewVertex(obj)
|
||||||
}
|
}
|
||||||
@@ -40,11 +45,11 @@ func TestPgraphT1(t *testing.T) {
|
|||||||
G := NewGraph("g1")
|
G := NewGraph("g1")
|
||||||
|
|
||||||
if i := G.NumVertices(); i != 0 {
|
if i := G.NumVertices(); i != 0 {
|
||||||
t.Errorf("Should have 0 vertices instead of: %d.", i)
|
t.Errorf("should have 0 vertices instead of: %d", i)
|
||||||
}
|
}
|
||||||
|
|
||||||
if i := G.NumEdges(); i != 0 {
|
if i := G.NumEdges(); i != 0 {
|
||||||
t.Errorf("Should have 0 edges instead of: %d.", i)
|
t.Errorf("should have 0 edges instead of: %d", i)
|
||||||
}
|
}
|
||||||
|
|
||||||
v1 := NV("v1")
|
v1 := NV("v1")
|
||||||
@@ -53,11 +58,11 @@ func TestPgraphT1(t *testing.T) {
|
|||||||
G.AddEdge(v1, v2, e1)
|
G.AddEdge(v1, v2, e1)
|
||||||
|
|
||||||
if i := G.NumVertices(); i != 2 {
|
if i := G.NumVertices(); i != 2 {
|
||||||
t.Errorf("Should have 2 vertices instead of: %d.", i)
|
t.Errorf("should have 2 vertices instead of: %d", i)
|
||||||
}
|
}
|
||||||
|
|
||||||
if i := G.NumEdges(); i != 1 {
|
if i := G.NumEdges(); i != 1 {
|
||||||
t.Errorf("Should have 1 edges instead of: %d.", i)
|
t.Errorf("should have 1 edges instead of: %d", i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -84,7 +89,7 @@ func TestPgraphT2(t *testing.T) {
|
|||||||
G.AddEdge(v5, v6, e5)
|
G.AddEdge(v5, v6, e5)
|
||||||
|
|
||||||
if i := G.NumVertices(); i != 6 {
|
if i := G.NumVertices(); i != 6 {
|
||||||
t.Errorf("Should have 6 vertices instead of: %d.", i)
|
t.Errorf("should have 6 vertices instead of: %d", i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -112,19 +117,19 @@ func TestPgraphT3(t *testing.T) {
|
|||||||
//G.AddEdge(v6, v4, e6)
|
//G.AddEdge(v6, v4, e6)
|
||||||
out1 := G.DFS(v1)
|
out1 := G.DFS(v1)
|
||||||
if i := len(out1); i != 3 {
|
if i := len(out1); i != 3 {
|
||||||
t.Errorf("Should have 3 vertices instead of: %d.", i)
|
t.Errorf("should have 3 vertices instead of: %d", i)
|
||||||
t.Errorf("Found: %v", out1)
|
t.Errorf("found: %v", out1)
|
||||||
for _, v := range out1 {
|
for _, v := range out1 {
|
||||||
t.Errorf("Value: %v", v.GetName())
|
t.Errorf("value: %v", v.GetName())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out2 := G.DFS(v4)
|
out2 := G.DFS(v4)
|
||||||
if i := len(out2); i != 3 {
|
if i := len(out2); i != 3 {
|
||||||
t.Errorf("Should have 3 vertices instead of: %d.", i)
|
t.Errorf("should have 3 vertices instead of: %d", i)
|
||||||
t.Errorf("Found: %v", out1)
|
t.Errorf("found: %v", out1)
|
||||||
for _, v := range out1 {
|
for _, v := range out1 {
|
||||||
t.Errorf("Value: %v", v.GetName())
|
t.Errorf("value: %v", v.GetName())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -144,10 +149,10 @@ func TestPgraphT4(t *testing.T) {
|
|||||||
|
|
||||||
out := G.DFS(v1)
|
out := G.DFS(v1)
|
||||||
if i := len(out); i != 3 {
|
if i := len(out); i != 3 {
|
||||||
t.Errorf("Should have 3 vertices instead of: %d.", i)
|
t.Errorf("should have 3 vertices instead of: %d", i)
|
||||||
t.Errorf("Found: %v", out)
|
t.Errorf("found: %v", out)
|
||||||
for _, v := range out {
|
for _, v := range out {
|
||||||
t.Errorf("Value: %v", v.GetName())
|
t.Errorf("value: %v", v.GetName())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -177,7 +182,7 @@ func TestPgraphT5(t *testing.T) {
|
|||||||
save := []*Vertex{v1, v2, v3}
|
save := []*Vertex{v1, v2, v3}
|
||||||
out := G.FilterGraph("new g5", save)
|
out := G.FilterGraph("new g5", save)
|
||||||
if i := out.NumVertices(); i != 3 {
|
if i := out.NumVertices(); i != 3 {
|
||||||
t.Errorf("Should have 3 vertices instead of: %d.", i)
|
t.Errorf("should have 3 vertices instead of: %d", i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -214,7 +219,7 @@ func TestPgraphT6(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if i := HeisenbergGraphCount(graphs); i != 2 {
|
if i := HeisenbergGraphCount(graphs); i != 2 {
|
||||||
t.Errorf("Should have 2 graphs instead of: %d.", i)
|
t.Errorf("should have 2 graphs instead of: %d", i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -232,31 +237,31 @@ func TestPgraphT7(t *testing.T) {
|
|||||||
G.AddEdge(v3, v1, e3)
|
G.AddEdge(v3, v1, e3)
|
||||||
|
|
||||||
if i := G.NumVertices(); i != 3 {
|
if i := G.NumVertices(); i != 3 {
|
||||||
t.Errorf("Should have 3 vertices instead of: %d.", i)
|
t.Errorf("should have 3 vertices instead of: %d", i)
|
||||||
}
|
}
|
||||||
|
|
||||||
G.DeleteVertex(v2)
|
G.DeleteVertex(v2)
|
||||||
|
|
||||||
if i := G.NumVertices(); i != 2 {
|
if i := G.NumVertices(); i != 2 {
|
||||||
t.Errorf("Should have 2 vertices instead of: %d.", i)
|
t.Errorf("should have 2 vertices instead of: %d", i)
|
||||||
}
|
}
|
||||||
|
|
||||||
G.DeleteVertex(v1)
|
G.DeleteVertex(v1)
|
||||||
|
|
||||||
if i := G.NumVertices(); i != 1 {
|
if i := G.NumVertices(); i != 1 {
|
||||||
t.Errorf("Should have 1 vertices instead of: %d.", i)
|
t.Errorf("should have 1 vertices instead of: %d", i)
|
||||||
}
|
}
|
||||||
|
|
||||||
G.DeleteVertex(v3)
|
G.DeleteVertex(v3)
|
||||||
|
|
||||||
if i := G.NumVertices(); i != 0 {
|
if i := G.NumVertices(); i != 0 {
|
||||||
t.Errorf("Should have 0 vertices instead of: %d.", i)
|
t.Errorf("should have 0 vertices instead of: %d", i)
|
||||||
}
|
}
|
||||||
|
|
||||||
G.DeleteVertex(v2) // duplicate deletes don't error...
|
G.DeleteVertex(v2) // duplicate deletes don't error...
|
||||||
|
|
||||||
if i := G.NumVertices(); i != 0 {
|
if i := G.NumVertices(); i != 0 {
|
||||||
t.Errorf("Should have 0 vertices instead of: %d.", i)
|
t.Errorf("should have 0 vertices instead of: %d", i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -266,26 +271,26 @@ func TestPgraphT8(t *testing.T) {
|
|||||||
v2 := NV("v2")
|
v2 := NV("v2")
|
||||||
v3 := NV("v3")
|
v3 := NV("v3")
|
||||||
if VertexContains(v1, []*Vertex{v1, v2, v3}) != true {
|
if VertexContains(v1, []*Vertex{v1, v2, v3}) != true {
|
||||||
t.Errorf("Should be true instead of false.")
|
t.Errorf("should be true instead of false.")
|
||||||
}
|
}
|
||||||
|
|
||||||
v4 := NV("v4")
|
v4 := NV("v4")
|
||||||
v5 := NV("v5")
|
v5 := NV("v5")
|
||||||
v6 := NV("v6")
|
v6 := NV("v6")
|
||||||
if VertexContains(v4, []*Vertex{v5, v6}) != false {
|
if VertexContains(v4, []*Vertex{v5, v6}) != false {
|
||||||
t.Errorf("Should be false instead of true.")
|
t.Errorf("should be false instead of true.")
|
||||||
}
|
}
|
||||||
|
|
||||||
v7 := NV("v7")
|
v7 := NV("v7")
|
||||||
v8 := NV("v8")
|
v8 := NV("v8")
|
||||||
v9 := NV("v9")
|
v9 := NV("v9")
|
||||||
if VertexContains(v8, []*Vertex{v7, v8, v9}) != true {
|
if VertexContains(v8, []*Vertex{v7, v8, v9}) != true {
|
||||||
t.Errorf("Should be true instead of false.")
|
t.Errorf("should be true instead of false.")
|
||||||
}
|
}
|
||||||
|
|
||||||
v1b := NV("v1") // same value, different objects
|
v1b := NV("v1") // same value, different objects
|
||||||
if VertexContains(v1b, []*Vertex{v1, v2, v3}) != false {
|
if VertexContains(v1b, []*Vertex{v1, v2, v3}) != false {
|
||||||
t.Errorf("Should be false instead of true.")
|
t.Errorf("should be false instead of true.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -314,49 +319,49 @@ func TestPgraphT9(t *testing.T) {
|
|||||||
|
|
||||||
indegree := G.InDegree() // map[*Vertex]int
|
indegree := G.InDegree() // map[*Vertex]int
|
||||||
if i := indegree[v1]; i != 0 {
|
if i := indegree[v1]; i != 0 {
|
||||||
t.Errorf("Indegree of v1 should be 0 instead of: %d.", i)
|
t.Errorf("indegree of v1 should be 0 instead of: %d", i)
|
||||||
}
|
}
|
||||||
if i := indegree[v2]; i != 1 {
|
if i := indegree[v2]; i != 1 {
|
||||||
t.Errorf("Indegree of v2 should be 1 instead of: %d.", i)
|
t.Errorf("indegree of v2 should be 1 instead of: %d", i)
|
||||||
}
|
}
|
||||||
if i := indegree[v3]; i != 1 {
|
if i := indegree[v3]; i != 1 {
|
||||||
t.Errorf("Indegree of v3 should be 1 instead of: %d.", i)
|
t.Errorf("indegree of v3 should be 1 instead of: %d", i)
|
||||||
}
|
}
|
||||||
if i := indegree[v4]; i != 2 {
|
if i := indegree[v4]; i != 2 {
|
||||||
t.Errorf("Indegree of v4 should be 2 instead of: %d.", i)
|
t.Errorf("indegree of v4 should be 2 instead of: %d", i)
|
||||||
}
|
}
|
||||||
if i := indegree[v5]; i != 1 {
|
if i := indegree[v5]; i != 1 {
|
||||||
t.Errorf("Indegree of v5 should be 1 instead of: %d.", i)
|
t.Errorf("indegree of v5 should be 1 instead of: %d", i)
|
||||||
}
|
}
|
||||||
if i := indegree[v6]; i != 1 {
|
if i := indegree[v6]; i != 1 {
|
||||||
t.Errorf("Indegree of v6 should be 1 instead of: %d.", i)
|
t.Errorf("indegree of v6 should be 1 instead of: %d", i)
|
||||||
}
|
}
|
||||||
|
|
||||||
outdegree := G.OutDegree() // map[*Vertex]int
|
outdegree := G.OutDegree() // map[*Vertex]int
|
||||||
if i := outdegree[v1]; i != 2 {
|
if i := outdegree[v1]; i != 2 {
|
||||||
t.Errorf("Outdegree of v1 should be 2 instead of: %d.", i)
|
t.Errorf("outdegree of v1 should be 2 instead of: %d", i)
|
||||||
}
|
}
|
||||||
if i := outdegree[v2]; i != 1 {
|
if i := outdegree[v2]; i != 1 {
|
||||||
t.Errorf("Outdegree of v2 should be 1 instead of: %d.", i)
|
t.Errorf("outdegree of v2 should be 1 instead of: %d", i)
|
||||||
}
|
}
|
||||||
if i := outdegree[v3]; i != 1 {
|
if i := outdegree[v3]; i != 1 {
|
||||||
t.Errorf("Outdegree of v3 should be 1 instead of: %d.", i)
|
t.Errorf("outdegree of v3 should be 1 instead of: %d", i)
|
||||||
}
|
}
|
||||||
if i := outdegree[v4]; i != 1 {
|
if i := outdegree[v4]; i != 1 {
|
||||||
t.Errorf("Outdegree of v4 should be 1 instead of: %d.", i)
|
t.Errorf("outdegree of v4 should be 1 instead of: %d", i)
|
||||||
}
|
}
|
||||||
if i := outdegree[v5]; i != 1 {
|
if i := outdegree[v5]; i != 1 {
|
||||||
t.Errorf("Outdegree of v5 should be 1 instead of: %d.", i)
|
t.Errorf("outdegree of v5 should be 1 instead of: %d", i)
|
||||||
}
|
}
|
||||||
if i := outdegree[v6]; i != 0 {
|
if i := outdegree[v6]; i != 0 {
|
||||||
t.Errorf("Outdegree of v6 should be 0 instead of: %d.", i)
|
t.Errorf("outdegree of v6 should be 0 instead of: %d", i)
|
||||||
}
|
}
|
||||||
|
|
||||||
s, err := G.TopologicalSort()
|
s, err := G.TopologicalSort()
|
||||||
// either possibility is a valid toposort
|
// either possibility is a valid toposort
|
||||||
match := reflect.DeepEqual(s, []*Vertex{v1, v2, v3, v4, v5, v6}) || reflect.DeepEqual(s, []*Vertex{v1, v3, v2, v4, v5, v6})
|
match := reflect.DeepEqual(s, []*Vertex{v1, v2, v3, v4, v5, v6}) || reflect.DeepEqual(s, []*Vertex{v1, v3, v2, v4, v5, v6})
|
||||||
if err != nil || !match {
|
if err != nil || !match {
|
||||||
t.Errorf("Topological sort failed, error: %v.", err)
|
t.Errorf("topological sort failed, error: %v", err)
|
||||||
str := "Found:"
|
str := "Found:"
|
||||||
for _, v := range s {
|
for _, v := range s {
|
||||||
str += " " + v.Res.GetName()
|
str += " " + v.Res.GetName()
|
||||||
@@ -388,7 +393,7 @@ func TestPgraphT10(t *testing.T) {
|
|||||||
G.AddEdge(v4, v2, e6) // cycle
|
G.AddEdge(v4, v2, e6) // cycle
|
||||||
|
|
||||||
if _, err := G.TopologicalSort(); err == nil {
|
if _, err := G.TopologicalSort(); err == nil {
|
||||||
t.Errorf("Topological sort passed, but graph is cyclic!")
|
t.Errorf("topological sort passed, but graph is cyclic")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -398,7 +403,7 @@ func TestPgraphReachability0(t *testing.T) {
|
|||||||
G := NewGraph("g")
|
G := NewGraph("g")
|
||||||
result := G.Reachability(nil, nil)
|
result := G.Reachability(nil, nil)
|
||||||
if result != nil {
|
if result != nil {
|
||||||
t.Logf("Reachability failed!")
|
t.Logf("reachability failed")
|
||||||
str := "Got:"
|
str := "Got:"
|
||||||
for _, v := range result {
|
for _, v := range result {
|
||||||
str += " " + v.Res.GetName()
|
str += " " + v.Res.GetName()
|
||||||
@@ -415,7 +420,7 @@ func TestPgraphReachability0(t *testing.T) {
|
|||||||
expected := []*Vertex{}
|
expected := []*Vertex{}
|
||||||
|
|
||||||
if !reflect.DeepEqual(result, expected) {
|
if !reflect.DeepEqual(result, expected) {
|
||||||
t.Logf("Reachability failed!")
|
t.Logf("reachability failed")
|
||||||
str := "Got:"
|
str := "Got:"
|
||||||
for _, v := range result {
|
for _, v := range result {
|
||||||
str += " " + v.Res.GetName()
|
str += " " + v.Res.GetName()
|
||||||
@@ -446,7 +451,7 @@ func TestPgraphReachability0(t *testing.T) {
|
|||||||
expected := []*Vertex{}
|
expected := []*Vertex{}
|
||||||
|
|
||||||
if !reflect.DeepEqual(result, expected) {
|
if !reflect.DeepEqual(result, expected) {
|
||||||
t.Logf("Reachability failed!")
|
t.Logf("reachability failed")
|
||||||
str := "Got:"
|
str := "Got:"
|
||||||
for _, v := range result {
|
for _, v := range result {
|
||||||
str += " " + v.Res.GetName()
|
str += " " + v.Res.GetName()
|
||||||
@@ -481,7 +486,7 @@ func TestPgraphReachability1(t *testing.T) {
|
|||||||
expected := []*Vertex{v1, v2, v3, v4, v5, v6}
|
expected := []*Vertex{v1, v2, v3, v4, v5, v6}
|
||||||
|
|
||||||
if !reflect.DeepEqual(result, expected) {
|
if !reflect.DeepEqual(result, expected) {
|
||||||
t.Logf("Reachability failed!")
|
t.Logf("reachability failed")
|
||||||
str := "Got:"
|
str := "Got:"
|
||||||
for _, v := range result {
|
for _, v := range result {
|
||||||
str += " " + v.Res.GetName()
|
str += " " + v.Res.GetName()
|
||||||
@@ -518,7 +523,7 @@ func TestPgraphReachability2(t *testing.T) {
|
|||||||
|
|
||||||
// !xor test
|
// !xor test
|
||||||
if reflect.DeepEqual(result, expected1) == reflect.DeepEqual(result, expected2) {
|
if reflect.DeepEqual(result, expected1) == reflect.DeepEqual(result, expected2) {
|
||||||
t.Logf("Reachability failed!")
|
t.Logf("reachability failed")
|
||||||
str := "Got:"
|
str := "Got:"
|
||||||
for _, v := range result {
|
for _, v := range result {
|
||||||
str += " " + v.Res.GetName()
|
str += " " + v.Res.GetName()
|
||||||
@@ -553,7 +558,7 @@ func TestPgraphReachability3(t *testing.T) {
|
|||||||
expected := []*Vertex{v1, v5, v6}
|
expected := []*Vertex{v1, v5, v6}
|
||||||
|
|
||||||
if !reflect.DeepEqual(result, expected) {
|
if !reflect.DeepEqual(result, expected) {
|
||||||
t.Logf("Reachability failed!")
|
t.Logf("reachability failed")
|
||||||
str := "Got:"
|
str := "Got:"
|
||||||
for _, v := range result {
|
for _, v := range result {
|
||||||
str += " " + v.Res.GetName()
|
str += " " + v.Res.GetName()
|
||||||
@@ -588,7 +593,7 @@ func TestPgraphReachability4(t *testing.T) {
|
|||||||
expected := []*Vertex{v1, v6}
|
expected := []*Vertex{v1, v6}
|
||||||
|
|
||||||
if !reflect.DeepEqual(result, expected) {
|
if !reflect.DeepEqual(result, expected) {
|
||||||
t.Logf("Reachability failed!")
|
t.Logf("reachability failed")
|
||||||
str := "Got:"
|
str := "Got:"
|
||||||
for _, v := range result {
|
for _, v := range result {
|
||||||
str += " " + v.Res.GetName()
|
str += " " + v.Res.GetName()
|
||||||
@@ -606,27 +611,27 @@ func TestPgraphT11(t *testing.T) {
|
|||||||
v6 := NV("v6")
|
v6 := NV("v6")
|
||||||
|
|
||||||
if rev := Reverse([]*Vertex{}); !reflect.DeepEqual(rev, []*Vertex{}) {
|
if rev := Reverse([]*Vertex{}); !reflect.DeepEqual(rev, []*Vertex{}) {
|
||||||
t.Errorf("Reverse of vertex slice failed.")
|
t.Errorf("reverse of vertex slice failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
if rev := Reverse([]*Vertex{v1}); !reflect.DeepEqual(rev, []*Vertex{v1}) {
|
if rev := Reverse([]*Vertex{v1}); !reflect.DeepEqual(rev, []*Vertex{v1}) {
|
||||||
t.Errorf("Reverse of vertex slice failed.")
|
t.Errorf("reverse of vertex slice failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
if rev := Reverse([]*Vertex{v1, v2, v3, v4, v5, v6}); !reflect.DeepEqual(rev, []*Vertex{v6, v5, v4, v3, v2, v1}) {
|
if rev := Reverse([]*Vertex{v1, v2, v3, v4, v5, v6}); !reflect.DeepEqual(rev, []*Vertex{v6, v5, v4, v3, v2, v1}) {
|
||||||
t.Errorf("Reverse of vertex slice failed.")
|
t.Errorf("reverse of vertex slice failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
if rev := Reverse([]*Vertex{v6, v5, v4, v3, v2, v1}); !reflect.DeepEqual(rev, []*Vertex{v1, v2, v3, v4, v5, v6}) {
|
if rev := Reverse([]*Vertex{v6, v5, v4, v3, v2, v1}); !reflect.DeepEqual(rev, []*Vertex{v1, v2, v3, v4, v5, v6}) {
|
||||||
t.Errorf("Reverse of vertex slice failed.")
|
t.Errorf("reverse of vertex slice failed")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type NoopResTest struct {
|
type NoopResTest struct {
|
||||||
NoopRes
|
resources.NoopRes
|
||||||
}
|
}
|
||||||
|
|
||||||
func (obj *NoopResTest) GroupCmp(r Res) bool {
|
func (obj *NoopResTest) GroupCmp(r resources.Res) bool {
|
||||||
res, ok := r.(*NoopResTest)
|
res, ok := r.(*NoopResTest)
|
||||||
if !ok {
|
if !ok {
|
||||||
return false
|
return false
|
||||||
@@ -643,16 +648,15 @@ func (obj *NoopResTest) GroupCmp(r Res) bool {
|
|||||||
|
|
||||||
func NewNoopResTest(name string) *NoopResTest {
|
func NewNoopResTest(name string) *NoopResTest {
|
||||||
obj := &NoopResTest{
|
obj := &NoopResTest{
|
||||||
NoopRes: NoopRes{
|
NoopRes: resources.NoopRes{
|
||||||
BaseRes: BaseRes{
|
BaseRes: resources.BaseRes{
|
||||||
Name: name,
|
Name: name,
|
||||||
MetaParams: MetaParams{
|
MetaParams: resources.MetaParams{
|
||||||
AutoGroup: true, // always autogroup
|
AutoGroup: true, // always autogroup
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
obj.Init() // optional here in this testing scenario (for now)
|
|
||||||
return obj
|
return obj
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -680,10 +684,10 @@ func ListStrCmp(a, b []string) bool {
|
|||||||
// It also compares if grouped element groups are identical
|
// It also compares if grouped element groups are identical
|
||||||
func GraphCmp(g1, g2 *Graph) error {
|
func GraphCmp(g1, g2 *Graph) error {
|
||||||
if n1, n2 := g1.NumVertices(), g2.NumVertices(); n1 != n2 {
|
if n1, n2 := g1.NumVertices(), g2.NumVertices(); n1 != n2 {
|
||||||
return fmt.Errorf("Graph g1 has %d vertices, while g2 has %d.", n1, n2)
|
return fmt.Errorf("graph g1 has %d vertices, while g2 has %d", n1, n2)
|
||||||
}
|
}
|
||||||
if e1, e2 := g1.NumEdges(), g2.NumEdges(); e1 != e2 {
|
if e1, e2 := g1.NumEdges(), g2.NumEdges(); e1 != e2 {
|
||||||
return fmt.Errorf("Graph g1 has %d edges, while g2 has %d.", e1, e2)
|
return fmt.Errorf("graph g1 has %d edges, while g2 has %d", e1, e2)
|
||||||
}
|
}
|
||||||
|
|
||||||
var m = make(map[*Vertex]*Vertex) // g1 to g2 vertex correspondence
|
var m = make(map[*Vertex]*Vertex) // g1 to g2 vertex correspondence
|
||||||
@@ -695,7 +699,7 @@ Loop:
|
|||||||
for _, x1 := range v1.GetGroup() {
|
for _, x1 := range v1.GetGroup() {
|
||||||
l1 = append(l1, x1.GetName()) // add my contents
|
l1 = append(l1, x1.GetName()) // add my contents
|
||||||
}
|
}
|
||||||
l1 = StrRemoveDuplicatesInList(l1) // remove duplicates
|
l1 = util.StrRemoveDuplicatesInList(l1) // remove duplicates
|
||||||
sort.Strings(l1)
|
sort.Strings(l1)
|
||||||
|
|
||||||
// inner loop
|
// inner loop
|
||||||
@@ -705,7 +709,7 @@ Loop:
|
|||||||
for _, x2 := range v2.GetGroup() {
|
for _, x2 := range v2.GetGroup() {
|
||||||
l2 = append(l2, x2.GetName())
|
l2 = append(l2, x2.GetName())
|
||||||
}
|
}
|
||||||
l2 = StrRemoveDuplicatesInList(l2) // remove duplicates
|
l2 = util.StrRemoveDuplicatesInList(l2) // remove duplicates
|
||||||
sort.Strings(l2)
|
sort.Strings(l2)
|
||||||
|
|
||||||
// does l1 match l2 ?
|
// does l1 match l2 ?
|
||||||
@@ -714,7 +718,7 @@ Loop:
|
|||||||
continue Loop
|
continue Loop
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return fmt.Errorf("Graph g1, has no match in g2 for: %v", v1.GetName())
|
return fmt.Errorf("graph g1, has no match in g2 for: %v", v1.GetName())
|
||||||
}
|
}
|
||||||
// vertices (and groups) match :)
|
// vertices (and groups) match :)
|
||||||
|
|
||||||
@@ -723,7 +727,7 @@ Loop:
|
|||||||
v2 := m[v1] // lookup in map to get correspondance
|
v2 := m[v1] // lookup in map to get correspondance
|
||||||
// g1.Adjacency[v1] corresponds to g2.Adjacency[v2]
|
// g1.Adjacency[v1] corresponds to g2.Adjacency[v2]
|
||||||
if e1, e2 := len(g1.Adjacency[v1]), len(g2.Adjacency[v2]); e1 != e2 {
|
if e1, e2 := len(g1.Adjacency[v1]), len(g2.Adjacency[v2]); e1 != e2 {
|
||||||
return fmt.Errorf("Graph g1, vertex(%v) has %d edges, while g2, vertex(%v) has %d.", v1.GetName(), e1, v2.GetName(), e2)
|
return fmt.Errorf("graph g1, vertex(%v) has %d edges, while g2, vertex(%v) has %d", v1.GetName(), e1, v2.GetName(), e2)
|
||||||
}
|
}
|
||||||
|
|
||||||
for vv1, ee1 := range g1.Adjacency[v1] {
|
for vv1, ee1 := range g1.Adjacency[v1] {
|
||||||
@@ -738,24 +742,36 @@ Loop:
|
|||||||
for _, x1 := range vv1.GetGroup() {
|
for _, x1 := range vv1.GetGroup() {
|
||||||
l1 = append(l1, x1.GetName()) // add my contents
|
l1 = append(l1, x1.GetName()) // add my contents
|
||||||
}
|
}
|
||||||
l1 = StrRemoveDuplicatesInList(l1) // remove duplicates
|
l1 = util.StrRemoveDuplicatesInList(l1) // remove duplicates
|
||||||
sort.Strings(l1)
|
sort.Strings(l1)
|
||||||
|
|
||||||
l2 := strings.Split(vv2.GetName(), ",")
|
l2 := strings.Split(vv2.GetName(), ",")
|
||||||
for _, x2 := range vv2.GetGroup() {
|
for _, x2 := range vv2.GetGroup() {
|
||||||
l2 = append(l2, x2.GetName())
|
l2 = append(l2, x2.GetName())
|
||||||
}
|
}
|
||||||
l2 = StrRemoveDuplicatesInList(l2) // remove duplicates
|
l2 = util.StrRemoveDuplicatesInList(l2) // remove duplicates
|
||||||
sort.Strings(l2)
|
sort.Strings(l2)
|
||||||
|
|
||||||
// does l1 match l2 ?
|
// does l1 match l2 ?
|
||||||
if !ListStrCmp(l1, l2) { // cmp!
|
if !ListStrCmp(l1, l2) { // cmp!
|
||||||
return fmt.Errorf("Graph g1 and g2 don't agree on: %v and %v", vv1.GetName(), vv2.GetName())
|
return fmt.Errorf("graph g1 and g2 don't agree on: %v and %v", vv1.GetName(), vv2.GetName())
|
||||||
}
|
}
|
||||||
|
|
||||||
// check: (2) ee1 == ee2
|
// check: (2) ee1 == ee2
|
||||||
if ee1.Name != ee2.Name {
|
if ee1.Name != ee2.Name {
|
||||||
return fmt.Errorf("Graph g1 edge(%v) doesn't match g2 edge(%v)", ee1.Name, ee2.Name)
|
return fmt.Errorf("graph g1 edge(%v) doesn't match g2 edge(%v)", ee1.Name, ee2.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check meta parameters
|
||||||
|
for v1 := range g1.Adjacency { // for each vertex in g1
|
||||||
|
for v2 := range g2.Adjacency { // does it match in g2 ?
|
||||||
|
s1, s2 := v1.Meta().Sema, v2.Meta().Sema
|
||||||
|
sort.Strings(s1)
|
||||||
|
sort.Strings(s2)
|
||||||
|
if !reflect.DeepEqual(s1, s2) {
|
||||||
|
return fmt.Errorf("vertex %s and vertex %s have different semaphores", v1.GetName(), v2.GetName())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -782,7 +798,7 @@ func (ag *testGrouper) vertexMerge(v1, v2 *Vertex) (v *Vertex, err error) {
|
|||||||
for _, n := range obj.GetGroup() {
|
for _, n := range obj.GetGroup() {
|
||||||
names = append(names, n.GetName()) // add my contents
|
names = append(names, n.GetName()) // add my contents
|
||||||
}
|
}
|
||||||
names = StrRemoveDuplicatesInList(names) // remove duplicates
|
names = util.StrRemoveDuplicatesInList(names) // remove duplicates
|
||||||
sort.Strings(names)
|
sort.Strings(names)
|
||||||
obj.SetName(strings.Join(names, ","))
|
obj.SetName(strings.Join(names, ","))
|
||||||
return // success or fail, and no need to merge the actual vertices!
|
return // success or fail, and no need to merge the actual vertices!
|
||||||
@@ -793,7 +809,7 @@ func (ag *testGrouper) edgeMerge(e1, e2 *Edge) *Edge {
|
|||||||
n1 := strings.Split(e1.Name, ",") // load
|
n1 := strings.Split(e1.Name, ",") // load
|
||||||
n2 := strings.Split(e2.Name, ",") // load
|
n2 := strings.Split(e2.Name, ",") // load
|
||||||
names := append(n1, n2...)
|
names := append(n1, n2...)
|
||||||
names = StrRemoveDuplicatesInList(names) // remove duplicates
|
names = util.StrRemoveDuplicatesInList(names) // remove duplicates
|
||||||
sort.Strings(names)
|
sort.Strings(names)
|
||||||
return NewEdge(strings.Join(names, ","))
|
return NewEdge(strings.Join(names, ","))
|
||||||
}
|
}
|
||||||
@@ -801,7 +817,11 @@ func (ag *testGrouper) edgeMerge(e1, e2 *Edge) *Edge {
|
|||||||
func (g *Graph) fullPrint() (str string) {
|
func (g *Graph) fullPrint() (str string) {
|
||||||
str += "\n"
|
str += "\n"
|
||||||
for v := range g.Adjacency {
|
for v := range g.Adjacency {
|
||||||
str += fmt.Sprintf("* v: %v\n", v.GetName())
|
if semas := v.Meta().Sema; len(semas) > 0 {
|
||||||
|
str += fmt.Sprintf("* v: %v; sema: %v\n", v.GetName(), semas)
|
||||||
|
} else {
|
||||||
|
str += fmt.Sprintf("* v: %v\n", v.GetName())
|
||||||
|
}
|
||||||
// TODO: add explicit grouping data?
|
// TODO: add explicit grouping data?
|
||||||
}
|
}
|
||||||
for v1 := range g.Adjacency {
|
for v1 := range g.Adjacency {
|
||||||
@@ -827,476 +847,12 @@ func runGraphCmp(t *testing.T, g1, g2 *Graph) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// all of the following test cases are laid out with the following semantics:
|
|
||||||
// * vertices which start with the same single letter are considered "like"
|
|
||||||
// * "like" elements should be merged
|
|
||||||
// * vertices can have any integer after their single letter "family" type
|
|
||||||
// * grouped vertices should have a name with a comma separated list of names
|
|
||||||
// * edges follow the same conventions about grouping
|
|
||||||
|
|
||||||
// empty graph
|
|
||||||
func TestPgraphGrouping1(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// single vertex
|
|
||||||
func TestPgraphGrouping2(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{ // grouping to limit variable scope
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
g1.AddVertex(a1)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
g2.AddVertex(a1)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// two vertices
|
|
||||||
func TestPgraphGrouping3(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
g1.AddVertex(a1, b1)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
g2.AddVertex(a1, b1)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// two vertices merge
|
|
||||||
func TestPgraphGrouping4(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
g1.AddVertex(a1, a2)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a := NewVertex(NewNoopResTest("a1,a2"))
|
|
||||||
g2.AddVertex(a)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// three vertices merge
|
|
||||||
func TestPgraphGrouping5(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
a3 := NewVertex(NewNoopResTest("a3"))
|
|
||||||
g1.AddVertex(a1, a2, a3)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a := NewVertex(NewNoopResTest("a1,a2,a3"))
|
|
||||||
g2.AddVertex(a)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// three vertices, two merge
|
|
||||||
func TestPgraphGrouping6(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
g1.AddVertex(a1, a2, b1)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a := NewVertex(NewNoopResTest("a1,a2"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
g2.AddVertex(a, b1)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// four vertices, three merge
|
|
||||||
func TestPgraphGrouping7(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
a3 := NewVertex(NewNoopResTest("a3"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
g1.AddVertex(a1, a2, a3, b1)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a := NewVertex(NewNoopResTest("a1,a2,a3"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
g2.AddVertex(a, b1)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// four vertices, two&two merge
|
|
||||||
func TestPgraphGrouping8(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
b2 := NewVertex(NewNoopResTest("b2"))
|
|
||||||
g1.AddVertex(a1, a2, b1, b2)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a := NewVertex(NewNoopResTest("a1,a2"))
|
|
||||||
b := NewVertex(NewNoopResTest("b1,b2"))
|
|
||||||
g2.AddVertex(a, b)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// five vertices, two&three merge
|
|
||||||
func TestPgraphGrouping9(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
b2 := NewVertex(NewNoopResTest("b2"))
|
|
||||||
b3 := NewVertex(NewNoopResTest("b3"))
|
|
||||||
g1.AddVertex(a1, a2, b1, b2, b3)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a := NewVertex(NewNoopResTest("a1,a2"))
|
|
||||||
b := NewVertex(NewNoopResTest("b1,b2,b3"))
|
|
||||||
g2.AddVertex(a, b)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// three unique vertices
|
|
||||||
func TestPgraphGrouping10(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
c1 := NewVertex(NewNoopResTest("c1"))
|
|
||||||
g1.AddVertex(a1, b1, c1)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
c1 := NewVertex(NewNoopResTest("c1"))
|
|
||||||
g2.AddVertex(a1, b1, c1)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// three unique vertices, two merge
|
|
||||||
func TestPgraphGrouping11(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
b2 := NewVertex(NewNoopResTest("b2"))
|
|
||||||
c1 := NewVertex(NewNoopResTest("c1"))
|
|
||||||
g1.AddVertex(a1, b1, b2, c1)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
b := NewVertex(NewNoopResTest("b1,b2"))
|
|
||||||
c1 := NewVertex(NewNoopResTest("c1"))
|
|
||||||
g2.AddVertex(a1, b, c1)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// simple merge 1
|
|
||||||
// a1 a2 a1,a2
|
|
||||||
// \ / >>> | (arrows point downwards)
|
|
||||||
// b b
|
|
||||||
func TestPgraphGrouping12(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
e1 := NewEdge("e1")
|
|
||||||
e2 := NewEdge("e2")
|
|
||||||
g1.AddEdge(a1, b1, e1)
|
|
||||||
g1.AddEdge(a2, b1, e2)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a := NewVertex(NewNoopResTest("a1,a2"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
e := NewEdge("e1,e2")
|
|
||||||
g2.AddEdge(a, b1, e)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// simple merge 2
|
|
||||||
// b b
|
|
||||||
// / \ >>> | (arrows point downwards)
|
|
||||||
// a1 a2 a1,a2
|
|
||||||
func TestPgraphGrouping13(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
e1 := NewEdge("e1")
|
|
||||||
e2 := NewEdge("e2")
|
|
||||||
g1.AddEdge(b1, a1, e1)
|
|
||||||
g1.AddEdge(b1, a2, e2)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a := NewVertex(NewNoopResTest("a1,a2"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
e := NewEdge("e1,e2")
|
|
||||||
g2.AddEdge(b1, a, e)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// triple merge
|
|
||||||
// a1 a2 a3 a1,a2,a3
|
|
||||||
// \ | / >>> | (arrows point downwards)
|
|
||||||
// b b
|
|
||||||
func TestPgraphGrouping14(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
a3 := NewVertex(NewNoopResTest("a3"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
e1 := NewEdge("e1")
|
|
||||||
e2 := NewEdge("e2")
|
|
||||||
e3 := NewEdge("e3")
|
|
||||||
g1.AddEdge(a1, b1, e1)
|
|
||||||
g1.AddEdge(a2, b1, e2)
|
|
||||||
g1.AddEdge(a3, b1, e3)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a := NewVertex(NewNoopResTest("a1,a2,a3"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
e := NewEdge("e1,e2,e3")
|
|
||||||
g2.AddEdge(a, b1, e)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// chain merge
|
|
||||||
// a1 a1
|
|
||||||
// / \ |
|
|
||||||
// b1 b2 >>> b1,b2 (arrows point downwards)
|
|
||||||
// \ / |
|
|
||||||
// c1 c1
|
|
||||||
func TestPgraphGrouping15(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
b2 := NewVertex(NewNoopResTest("b2"))
|
|
||||||
c1 := NewVertex(NewNoopResTest("c1"))
|
|
||||||
e1 := NewEdge("e1")
|
|
||||||
e2 := NewEdge("e2")
|
|
||||||
e3 := NewEdge("e3")
|
|
||||||
e4 := NewEdge("e4")
|
|
||||||
g1.AddEdge(a1, b1, e1)
|
|
||||||
g1.AddEdge(a1, b2, e2)
|
|
||||||
g1.AddEdge(b1, c1, e3)
|
|
||||||
g1.AddEdge(b2, c1, e4)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
b := NewVertex(NewNoopResTest("b1,b2"))
|
|
||||||
c1 := NewVertex(NewNoopResTest("c1"))
|
|
||||||
e1 := NewEdge("e1,e2")
|
|
||||||
e2 := NewEdge("e3,e4")
|
|
||||||
g2.AddEdge(a1, b, e1)
|
|
||||||
g2.AddEdge(b, c1, e2)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// re-attach 1 (outer)
|
|
||||||
// technically the second possibility is valid too, depending on which order we
|
|
||||||
// merge edges in, and if we don't filter out any unnecessary edges afterwards!
|
|
||||||
// a1 a2 a1,a2 a1,a2
|
|
||||||
// | / | | \
|
|
||||||
// b1 / >>> b1 OR b1 / (arrows point downwards)
|
|
||||||
// | / | | /
|
|
||||||
// c1 c1 c1
|
|
||||||
func TestPgraphGrouping16(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
c1 := NewVertex(NewNoopResTest("c1"))
|
|
||||||
e1 := NewEdge("e1")
|
|
||||||
e2 := NewEdge("e2")
|
|
||||||
e3 := NewEdge("e3")
|
|
||||||
g1.AddEdge(a1, b1, e1)
|
|
||||||
g1.AddEdge(b1, c1, e2)
|
|
||||||
g1.AddEdge(a2, c1, e3)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a := NewVertex(NewNoopResTest("a1,a2"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
c1 := NewVertex(NewNoopResTest("c1"))
|
|
||||||
e1 := NewEdge("e1,e3")
|
|
||||||
e2 := NewEdge("e2,e3") // e3 gets "merged through" to BOTH edges!
|
|
||||||
g2.AddEdge(a, b1, e1)
|
|
||||||
g2.AddEdge(b1, c1, e2)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// re-attach 2 (inner)
|
|
||||||
// a1 b2 a1
|
|
||||||
// | / |
|
|
||||||
// b1 / >>> b1,b2 (arrows point downwards)
|
|
||||||
// | / |
|
|
||||||
// c1 c1
|
|
||||||
func TestPgraphGrouping17(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
b2 := NewVertex(NewNoopResTest("b2"))
|
|
||||||
c1 := NewVertex(NewNoopResTest("c1"))
|
|
||||||
e1 := NewEdge("e1")
|
|
||||||
e2 := NewEdge("e2")
|
|
||||||
e3 := NewEdge("e3")
|
|
||||||
g1.AddEdge(a1, b1, e1)
|
|
||||||
g1.AddEdge(b1, c1, e2)
|
|
||||||
g1.AddEdge(b2, c1, e3)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
b := NewVertex(NewNoopResTest("b1,b2"))
|
|
||||||
c1 := NewVertex(NewNoopResTest("c1"))
|
|
||||||
e1 := NewEdge("e1")
|
|
||||||
e2 := NewEdge("e2,e3")
|
|
||||||
g2.AddEdge(a1, b, e1)
|
|
||||||
g2.AddEdge(b, c1, e2)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// re-attach 3 (double)
|
|
||||||
// similar to "re-attach 1", technically there is a second possibility for this
|
|
||||||
// a2 a1 b2 a1,a2
|
|
||||||
// \ | / |
|
|
||||||
// \ b1 / >>> b1,b2 (arrows point downwards)
|
|
||||||
// \ | / |
|
|
||||||
// c1 c1
|
|
||||||
func TestPgraphGrouping18(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
b2 := NewVertex(NewNoopResTest("b2"))
|
|
||||||
c1 := NewVertex(NewNoopResTest("c1"))
|
|
||||||
e1 := NewEdge("e1")
|
|
||||||
e2 := NewEdge("e2")
|
|
||||||
e3 := NewEdge("e3")
|
|
||||||
e4 := NewEdge("e4")
|
|
||||||
g1.AddEdge(a1, b1, e1)
|
|
||||||
g1.AddEdge(b1, c1, e2)
|
|
||||||
g1.AddEdge(a2, c1, e3)
|
|
||||||
g1.AddEdge(b2, c1, e4)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a := NewVertex(NewNoopResTest("a1,a2"))
|
|
||||||
b := NewVertex(NewNoopResTest("b1,b2"))
|
|
||||||
c1 := NewVertex(NewNoopResTest("c1"))
|
|
||||||
e1 := NewEdge("e1,e3")
|
|
||||||
e2 := NewEdge("e2,e3,e4") // e3 gets "merged through" to BOTH edges!
|
|
||||||
g2.AddEdge(a, b, e1)
|
|
||||||
g2.AddEdge(b, c1, e2)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// connected merge 0, (no change!)
|
|
||||||
// a1 a1
|
|
||||||
// \ >>> \ (arrows point downwards)
|
|
||||||
// a2 a2
|
|
||||||
func TestPgraphGroupingConnected0(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
e1 := NewEdge("e1")
|
|
||||||
g1.AddEdge(a1, a2, e1)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result ?
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
e1 := NewEdge("e1")
|
|
||||||
g2.AddEdge(a1, a2, e1)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// connected merge 1, (no change!)
|
|
||||||
// a1 a1
|
|
||||||
// \ \
|
|
||||||
// b >>> b (arrows point downwards)
|
|
||||||
// \ \
|
|
||||||
// a2 a2
|
|
||||||
func TestPgraphGroupingConnected1(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
b := NewVertex(NewNoopResTest("b"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
e1 := NewEdge("e1")
|
|
||||||
e2 := NewEdge("e2")
|
|
||||||
g1.AddEdge(a1, b, e1)
|
|
||||||
g1.AddEdge(b, a2, e2)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result ?
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
b := NewVertex(NewNoopResTest("b"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
e1 := NewEdge("e1")
|
|
||||||
e2 := NewEdge("e2")
|
|
||||||
g2.AddEdge(a1, b, e1)
|
|
||||||
g2.AddEdge(b, a2, e2)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDurationAssumptions(t *testing.T) {
|
func TestDurationAssumptions(t *testing.T) {
|
||||||
var d time.Duration
|
var d time.Duration
|
||||||
if (d == 0) != true {
|
if (d == 0) != true {
|
||||||
t.Errorf("Empty time.Duration is no longer equal to zero!")
|
t.Errorf("empty time.Duration is no longer equal to zero")
|
||||||
}
|
}
|
||||||
if (d > 0) != false {
|
if (d > 0) != false {
|
||||||
t.Errorf("Empty time.Duration is now greater than zero!")
|
t.Errorf("empty time.Duration is now greater than zero")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
85
pgraph/semaphore.go
Normal file
85
pgraph/semaphore.go
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package pgraph
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/util/semaphore"
|
||||||
|
|
||||||
|
multierr "github.com/hashicorp/go-multierror"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SemaSep is the trailing separator to split the semaphore id from the size.
|
||||||
|
const SemaSep = ":"
|
||||||
|
|
||||||
|
// SemaLock acquires the list of semaphores in the graph.
|
||||||
|
func (g *Graph) SemaLock(semas []string) error {
|
||||||
|
var reterr error
|
||||||
|
sort.Strings(semas) // very important to avoid deadlock in the dag!
|
||||||
|
for _, id := range semas {
|
||||||
|
g.slock.Lock() // semaphore creation lock
|
||||||
|
sema, ok := g.semas[id] // lookup
|
||||||
|
if !ok {
|
||||||
|
size := SemaSize(id) // defaults to 1
|
||||||
|
g.semas[id] = semaphore.NewSemaphore(size)
|
||||||
|
sema = g.semas[id]
|
||||||
|
}
|
||||||
|
g.slock.Unlock()
|
||||||
|
|
||||||
|
if err := sema.P(1); err != nil { // lock!
|
||||||
|
reterr = multierr.Append(reterr, err) // list of errors
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return reterr
|
||||||
|
}
|
||||||
|
|
||||||
|
// SemaUnlock releases the list of semaphores in the graph.
|
||||||
|
func (g *Graph) SemaUnlock(semas []string) error {
|
||||||
|
var reterr error
|
||||||
|
sort.Strings(semas) // unlock in the same order to remove partial locks
|
||||||
|
for _, id := range semas {
|
||||||
|
sema, ok := g.semas[id] // lookup
|
||||||
|
if !ok {
|
||||||
|
// programming error!
|
||||||
|
panic(fmt.Sprintf("graph: sema: %s does not exist", id))
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := sema.V(1); err != nil { // unlock!
|
||||||
|
reterr = multierr.Append(reterr, err) // list of errors
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return reterr
|
||||||
|
}
|
||||||
|
|
||||||
|
// SemaSize returns the size integer associated with the semaphore id. It
|
||||||
|
// defaults to 1 if not found.
|
||||||
|
func SemaSize(id string) int {
|
||||||
|
size := 1 // default semaphore size
|
||||||
|
// valid id's include "some_id", "hello:42" and ":13"
|
||||||
|
if index := strings.LastIndex(id, SemaSep); index > -1 && (len(id)-index+len(SemaSep)) >= 1 {
|
||||||
|
// NOTE: we only allow size > 0 here!
|
||||||
|
if i, err := strconv.Atoi(id[index+len(SemaSep):]); err == nil && i > 0 {
|
||||||
|
size = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return size
|
||||||
|
}
|
||||||
106
pgraph/semaphore_test.go
Normal file
106
pgraph/semaphore_test.go
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package pgraph
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/resources"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSemaSize(t *testing.T) {
|
||||||
|
pairs := map[string]int{
|
||||||
|
"id:42": 42,
|
||||||
|
":13": 13,
|
||||||
|
"some_id": 1,
|
||||||
|
}
|
||||||
|
for id, size := range pairs {
|
||||||
|
if i := SemaSize(id); i != size {
|
||||||
|
t.Errorf("sema id `%s`, expected: `%d`, got: `%d`", id, size, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewNoopResTestSema(name string, semas []string) *NoopResTest {
|
||||||
|
obj := &NoopResTest{
|
||||||
|
NoopRes: resources.NoopRes{
|
||||||
|
BaseRes: resources.BaseRes{
|
||||||
|
Name: name,
|
||||||
|
MetaParams: resources.MetaParams{
|
||||||
|
AutoGroup: true, // always autogroup
|
||||||
|
Sema: semas,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPgraphSemaphoreGrouping1(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTestSema("a1", []string{"s:1"}))
|
||||||
|
a2 := NewVertex(NewNoopResTestSema("a2", []string{"s:2"}))
|
||||||
|
a3 := NewVertex(NewNoopResTestSema("a3", []string{"s:3"}))
|
||||||
|
g1.AddVertex(a1)
|
||||||
|
g1.AddVertex(a2)
|
||||||
|
g1.AddVertex(a3)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a123 := NewVertex(NewNoopResTestSema("a1,a2,a3", []string{"s:1", "s:2", "s:3"}))
|
||||||
|
g2.AddVertex(a123)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPgraphSemaphoreGrouping2(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTestSema("a1", []string{"s:10", "s:11"}))
|
||||||
|
a2 := NewVertex(NewNoopResTestSema("a2", []string{"s:2"}))
|
||||||
|
a3 := NewVertex(NewNoopResTestSema("a3", []string{"s:3"}))
|
||||||
|
g1.AddVertex(a1)
|
||||||
|
g1.AddVertex(a2)
|
||||||
|
g1.AddVertex(a3)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a123 := NewVertex(NewNoopResTestSema("a1,a2,a3", []string{"s:10", "s:11", "s:2", "s:3"}))
|
||||||
|
g2.AddVertex(a123)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPgraphSemaphoreGrouping3(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTestSema("a1", []string{"s:1", "s:2"}))
|
||||||
|
a2 := NewVertex(NewNoopResTestSema("a2", []string{"s:2"}))
|
||||||
|
a3 := NewVertex(NewNoopResTestSema("a3", []string{"s:3"}))
|
||||||
|
g1.AddVertex(a1)
|
||||||
|
g1.AddVertex(a2)
|
||||||
|
g1.AddVertex(a3)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a123 := NewVertex(NewNoopResTestSema("a1,a2,a3", []string{"s:1", "s:2", "s:3"}))
|
||||||
|
g2.AddVertex(a123)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
263
prometheus/prometheus.go
Normal file
263
prometheus/prometheus.go
Normal file
@@ -0,0 +1,263 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// Package prometheus provides functions that are useful to control and manage
|
||||||
|
// the build-in prometheus instance.
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
|
|
||||||
|
errwrap "github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultPrometheusListen is registered in
|
||||||
|
// https://github.com/prometheus/prometheus/wiki/Default-port-allocations
|
||||||
|
const DefaultPrometheusListen = "127.0.0.1:9233"
|
||||||
|
|
||||||
|
// ResState represents the status of a resource.
|
||||||
|
type ResState int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ResStateOK represents a working resource
|
||||||
|
ResStateOK ResState = iota
|
||||||
|
// ResStateSoftFail represents a resource in soft fail (will be retried)
|
||||||
|
ResStateSoftFail
|
||||||
|
// ResStateHardFail represents a resource in hard fail (will NOT be retried)
|
||||||
|
ResStateHardFail
|
||||||
|
)
|
||||||
|
|
||||||
|
// Prometheus is the struct that contains information about the
|
||||||
|
// prometheus instance. Run Init() on it.
|
||||||
|
type Prometheus struct {
|
||||||
|
Listen string // the listen specification for the net/http server
|
||||||
|
|
||||||
|
checkApplyTotal *prometheus.CounterVec // total of CheckApplies that have been triggered
|
||||||
|
pgraphStartTimeSeconds prometheus.Gauge // process start time in seconds since unix epoch
|
||||||
|
managedResources *prometheus.GaugeVec // Resources we manage now
|
||||||
|
failedResourcesTotal *prometheus.CounterVec // Total of failures since mgmt has started
|
||||||
|
failedResources *prometheus.GaugeVec // Number of current resources
|
||||||
|
|
||||||
|
resourcesState map[string]resStateWithKind // Maps the resources with their current kind/state
|
||||||
|
mutex *sync.Mutex // Mutex used to update resourcesState
|
||||||
|
}
|
||||||
|
|
||||||
|
// resStateWithKind is used to count the failures by kind
|
||||||
|
type resStateWithKind struct {
|
||||||
|
state ResState
|
||||||
|
kind string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init some parameters - currently the Listen address.
|
||||||
|
func (obj *Prometheus) Init() error {
|
||||||
|
if len(obj.Listen) == 0 {
|
||||||
|
obj.Listen = DefaultPrometheusListen
|
||||||
|
}
|
||||||
|
|
||||||
|
obj.mutex = &sync.Mutex{}
|
||||||
|
obj.resourcesState = make(map[string]resStateWithKind)
|
||||||
|
|
||||||
|
obj.checkApplyTotal = prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "mgmt_checkapply_total",
|
||||||
|
Help: "Number of CheckApply that have run.",
|
||||||
|
},
|
||||||
|
// Labels for this metric.
|
||||||
|
// kind: resource type: Svc, File, ...
|
||||||
|
// apply: if the CheckApply happened in "apply" mode
|
||||||
|
// eventful: did the CheckApply generate an event
|
||||||
|
// errorful: did the CheckApply generate an error
|
||||||
|
[]string{"kind", "apply", "eventful", "errorful"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(obj.checkApplyTotal)
|
||||||
|
|
||||||
|
obj.pgraphStartTimeSeconds = prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "mgmt_graph_start_time_seconds",
|
||||||
|
Help: "Start time of the current graph since unix epoch in seconds.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(obj.pgraphStartTimeSeconds)
|
||||||
|
|
||||||
|
obj.managedResources = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "mgmt_resources",
|
||||||
|
Help: "Number of managed resources.",
|
||||||
|
},
|
||||||
|
// kind: resource type: Svc, File, ...
|
||||||
|
[]string{"kind"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(obj.managedResources)
|
||||||
|
|
||||||
|
obj.failedResourcesTotal = prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "mgmt_failures_total",
|
||||||
|
Help: "Total of failed resources.",
|
||||||
|
},
|
||||||
|
// kind: resource type: Svc, File, ...
|
||||||
|
// failure: soft or hard
|
||||||
|
[]string{"kind", "failure"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(obj.failedResourcesTotal)
|
||||||
|
|
||||||
|
obj.failedResources = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "mgmt_failures",
|
||||||
|
Help: "Number of failing resources.",
|
||||||
|
},
|
||||||
|
// kind: resource type: Svc, File, ...
|
||||||
|
// failure: soft or hard
|
||||||
|
[]string{"kind", "failure"},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(obj.failedResources)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start runs a http server in a go routine, that responds to /metrics
|
||||||
|
// as prometheus would expect.
|
||||||
|
func (obj *Prometheus) Start() error {
|
||||||
|
http.Handle("/metrics", promhttp.Handler())
|
||||||
|
go http.ListenAndServe(obj.Listen, nil)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop the http server.
|
||||||
|
func (obj *Prometheus) Stop() error {
|
||||||
|
// TODO: There is no way in go < 1.8 to stop a http server.
|
||||||
|
// https://stackoverflow.com/questions/39320025/go-how-to-stop-http-listenandserve/41433555#41433555
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateCheckApplyTotal refreshes the failing gauge by parsing the internal
|
||||||
|
// state map.
|
||||||
|
func (obj *Prometheus) UpdateCheckApplyTotal(kind string, apply, eventful, errorful bool) error {
|
||||||
|
if obj == nil {
|
||||||
|
return nil // happens when mgmt is launched without --prometheus
|
||||||
|
}
|
||||||
|
labels := prometheus.Labels{"kind": kind, "apply": strconv.FormatBool(apply), "eventful": strconv.FormatBool(eventful), "errorful": strconv.FormatBool(errorful)}
|
||||||
|
metric := obj.checkApplyTotal.With(labels)
|
||||||
|
metric.Inc()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatePgraphStartTime updates the mgmt_graph_start_time_seconds metric
|
||||||
|
// to the current timestamp.
|
||||||
|
func (obj *Prometheus) UpdatePgraphStartTime() error {
|
||||||
|
if obj == nil {
|
||||||
|
return nil // happens when mgmt is launched without --prometheus
|
||||||
|
}
|
||||||
|
obj.pgraphStartTimeSeconds.SetToCurrentTime()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddManagedResource increments the Managed Resource counter and updates the resource status.
|
||||||
|
func (obj *Prometheus) AddManagedResource(resUUID string, rtype string) error {
|
||||||
|
if obj == nil {
|
||||||
|
return nil // happens when mgmt is launched without --prometheus
|
||||||
|
}
|
||||||
|
obj.managedResources.With(prometheus.Labels{"kind": rtype}).Inc()
|
||||||
|
if err := obj.UpdateState(resUUID, rtype, ResStateOK); err != nil {
|
||||||
|
return errwrap.Wrapf(err, "can't update the resource status in the map")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveManagedResource decrements the Managed Resource counter and updates the resource status.
|
||||||
|
func (obj *Prometheus) RemoveManagedResource(resUUID string, rtype string) error {
|
||||||
|
if obj == nil {
|
||||||
|
return nil // happens when mgmt is launched without --prometheus
|
||||||
|
}
|
||||||
|
obj.managedResources.With(prometheus.Labels{"kind": rtype}).Dec()
|
||||||
|
if err := obj.deleteState(resUUID); err != nil {
|
||||||
|
return errwrap.Wrapf(err, "can't remove the resource status from the map")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteState removes the resources for the state map and re-populates the failing gauge.
|
||||||
|
func (obj *Prometheus) deleteState(resUUID string) error {
|
||||||
|
if obj == nil {
|
||||||
|
return nil // happens when mgmt is launched without --prometheus
|
||||||
|
}
|
||||||
|
obj.mutex.Lock()
|
||||||
|
delete(obj.resourcesState, resUUID)
|
||||||
|
obj.mutex.Unlock()
|
||||||
|
if err := obj.updateFailingGauge(); err != nil {
|
||||||
|
return errwrap.Wrapf(err, "can't update the failing gauge")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateState updates the state of the resources in our internal state map
|
||||||
|
// then triggers a refresh of the failing gauge.
|
||||||
|
func (obj *Prometheus) UpdateState(resUUID string, rtype string, newState ResState) error {
|
||||||
|
defer obj.updateFailingGauge()
|
||||||
|
if obj == nil {
|
||||||
|
return nil // happens when mgmt is launched without --prometheus
|
||||||
|
}
|
||||||
|
obj.mutex.Lock()
|
||||||
|
obj.resourcesState[resUUID] = resStateWithKind{state: newState, kind: rtype}
|
||||||
|
obj.mutex.Unlock()
|
||||||
|
if newState != ResStateOK {
|
||||||
|
var strState string
|
||||||
|
if newState == ResStateSoftFail {
|
||||||
|
strState = "soft"
|
||||||
|
} else if newState == ResStateHardFail {
|
||||||
|
strState = "hard"
|
||||||
|
} else {
|
||||||
|
return errors.New("state should be soft or hard failure")
|
||||||
|
}
|
||||||
|
obj.failedResourcesTotal.With(prometheus.Labels{"kind": rtype, "failure": strState}).Inc()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateFailingGauge refreshes the failing gauge by parsking the internal
|
||||||
|
// state map.
|
||||||
|
func (obj *Prometheus) updateFailingGauge() error {
|
||||||
|
if obj == nil {
|
||||||
|
return nil // happens when mgmt is launched without --prometheus
|
||||||
|
}
|
||||||
|
var softFails, hardFails map[string]float64
|
||||||
|
softFails = make(map[string]float64)
|
||||||
|
hardFails = make(map[string]float64)
|
||||||
|
for _, v := range obj.resourcesState {
|
||||||
|
if v.state == ResStateSoftFail {
|
||||||
|
softFails[v.kind]++
|
||||||
|
} else if v.state == ResStateHardFail {
|
||||||
|
hardFails[v.kind]++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// TODO: we might want to Zero the metrics we are not using
|
||||||
|
// because in prometheus design the metrics keep living for some time
|
||||||
|
// even after they are removed.
|
||||||
|
obj.failedResources.Reset()
|
||||||
|
for k, v := range softFails {
|
||||||
|
obj.failedResources.With(prometheus.Labels{"kind": k, "failure": "soft"}).Set(v)
|
||||||
|
}
|
||||||
|
for k, v := range hardFails {
|
||||||
|
obj.failedResources.With(prometheus.Labels{"kind": k, "failure": "hard"}).Set(v)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -50,10 +50,10 @@ func NewGAPI(data gapi.Data, puppetParam *string, puppetConf string) (*GAPI, err
|
|||||||
// Init initializes the puppet GAPI struct.
|
// Init initializes the puppet GAPI struct.
|
||||||
func (obj *GAPI) Init(data gapi.Data) error {
|
func (obj *GAPI) Init(data gapi.Data) error {
|
||||||
if obj.initialized {
|
if obj.initialized {
|
||||||
return fmt.Errorf("Already initialized!")
|
return fmt.Errorf("already initialized")
|
||||||
}
|
}
|
||||||
if obj.PuppetParam == nil {
|
if obj.PuppetParam == nil {
|
||||||
return fmt.Errorf("The PuppetParam param must be specified!")
|
return fmt.Errorf("the PuppetParam param must be specified")
|
||||||
}
|
}
|
||||||
obj.data = data // store for later
|
obj.data = data // store for later
|
||||||
obj.closeChan = make(chan struct{})
|
obj.closeChan = make(chan struct{})
|
||||||
@@ -64,23 +64,23 @@ func (obj *GAPI) Init(data gapi.Data) error {
|
|||||||
// Graph returns a current Graph.
|
// Graph returns a current Graph.
|
||||||
func (obj *GAPI) Graph() (*pgraph.Graph, error) {
|
func (obj *GAPI) Graph() (*pgraph.Graph, error) {
|
||||||
if !obj.initialized {
|
if !obj.initialized {
|
||||||
return nil, fmt.Errorf("Puppet: GAPI is not initialized!")
|
return nil, fmt.Errorf("the puppet GAPI is not initialized")
|
||||||
}
|
}
|
||||||
config := ParseConfigFromPuppet(*obj.PuppetParam, obj.PuppetConf)
|
config := ParseConfigFromPuppet(*obj.PuppetParam, obj.PuppetConf)
|
||||||
if config == nil {
|
if config == nil {
|
||||||
return nil, fmt.Errorf("Puppet: ParseConfigFromPuppet returned nil!")
|
return nil, fmt.Errorf("function ParseConfigFromPuppet returned nil")
|
||||||
}
|
}
|
||||||
g, err := config.NewGraphFromConfig(obj.data.Hostname, obj.data.World, obj.data.Noop)
|
g, err := config.NewGraphFromConfig(obj.data.Hostname, obj.data.World, obj.data.Noop)
|
||||||
return g, err
|
return g, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// SwitchStream returns nil errors every time there could be a new graph.
|
// Next returns nil errors every time there could be a new graph.
|
||||||
func (obj *GAPI) SwitchStream() chan error {
|
func (obj *GAPI) Next() chan error {
|
||||||
if obj.data.NoWatch {
|
if obj.data.NoWatch {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
puppetChan := func() <-chan time.Time { // helper function
|
puppetChan := func() <-chan time.Time { // helper function
|
||||||
return time.Tick(time.Duration(PuppetInterval(obj.PuppetConf)) * time.Second)
|
return time.Tick(time.Duration(RefreshInterval(obj.PuppetConf)) * time.Second)
|
||||||
}
|
}
|
||||||
ch := make(chan error)
|
ch := make(chan error)
|
||||||
obj.wg.Add(1)
|
obj.wg.Add(1)
|
||||||
@@ -88,7 +88,7 @@ func (obj *GAPI) SwitchStream() chan error {
|
|||||||
defer obj.wg.Done()
|
defer obj.wg.Done()
|
||||||
defer close(ch) // this will run before the obj.wg.Done()
|
defer close(ch) // this will run before the obj.wg.Done()
|
||||||
if !obj.initialized {
|
if !obj.initialized {
|
||||||
ch <- fmt.Errorf("Puppet: GAPI is not initialized!")
|
ch <- fmt.Errorf("the puppet GAPI is not initialized")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
pChan := puppetChan()
|
pChan := puppetChan()
|
||||||
@@ -100,7 +100,12 @@ func (obj *GAPI) SwitchStream() chan error {
|
|||||||
}
|
}
|
||||||
log.Printf("Puppet: Generating new graph...")
|
log.Printf("Puppet: Generating new graph...")
|
||||||
pChan = puppetChan() // TODO: okay to update interval in case it changed?
|
pChan = puppetChan() // TODO: okay to update interval in case it changed?
|
||||||
ch <- nil // trigger a run
|
select {
|
||||||
|
case ch <- nil: // trigger a run (send a msg)
|
||||||
|
// unblock if we exit while waiting to send!
|
||||||
|
case <-obj.closeChan:
|
||||||
|
return
|
||||||
|
}
|
||||||
case <-obj.closeChan:
|
case <-obj.closeChan:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -112,7 +117,7 @@ func (obj *GAPI) SwitchStream() chan error {
|
|||||||
// Close shuts down the Puppet GAPI.
|
// Close shuts down the Puppet GAPI.
|
||||||
func (obj *GAPI) Close() error {
|
func (obj *GAPI) Close() error {
|
||||||
if !obj.initialized {
|
if !obj.initialized {
|
||||||
return fmt.Errorf("Puppet: GAPI is not initialized!")
|
return fmt.Errorf("the puppet GAPI is not initialized")
|
||||||
}
|
}
|
||||||
close(obj.closeChan)
|
close(obj.closeChan)
|
||||||
obj.wg.Wait()
|
obj.wg.Wait()
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -32,7 +32,8 @@ import (
|
|||||||
const (
|
const (
|
||||||
// PuppetYAMLBufferSize is the maximum buffer size for the yaml input data
|
// PuppetYAMLBufferSize is the maximum buffer size for the yaml input data
|
||||||
PuppetYAMLBufferSize = 65535
|
PuppetYAMLBufferSize = 65535
|
||||||
Debug = false // FIXME: integrate with global debug flag
|
// Debug is a local debug constant used in this module
|
||||||
|
Debug = false // FIXME: integrate with global debug flag
|
||||||
)
|
)
|
||||||
|
|
||||||
func runPuppetCommand(cmd *exec.Cmd) ([]byte, error) {
|
func runPuppetCommand(cmd *exec.Cmd) ([]byte, error) {
|
||||||
@@ -115,8 +116,8 @@ func ParseConfigFromPuppet(puppetParam, puppetConf string) *yamlgraph.GraphConfi
|
|||||||
return &config
|
return &config
|
||||||
}
|
}
|
||||||
|
|
||||||
// PuppetInterval returns the graph refresh interval from the puppet configuration.
|
// RefreshInterval returns the graph refresh interval from the puppet configuration.
|
||||||
func PuppetInterval(puppetConf string) int {
|
func RefreshInterval(puppetConf string) int {
|
||||||
if Debug {
|
if Debug {
|
||||||
log.Printf("Puppet: determining graph refresh interval")
|
log.Printf("Puppet: determining graph refresh interval")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -59,15 +59,23 @@ func (obj *ConfigWatcher) Add(file ...string) {
|
|||||||
ch := obj.ConfigWatch(file[0])
|
ch := obj.ConfigWatch(file[0])
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case e := <-ch:
|
case e, ok := <-ch:
|
||||||
|
if !ok { // channel closed
|
||||||
|
return
|
||||||
|
}
|
||||||
if e != nil {
|
if e != nil {
|
||||||
obj.errorchan <- e
|
obj.errorchan <- e
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
obj.ch <- file[0]
|
select {
|
||||||
|
case obj.ch <- file[0]: // send on channel
|
||||||
|
case <-obj.closechan:
|
||||||
|
return // never mind, close early!
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
case <-obj.closechan:
|
// not needed, closes via ConfigWatch() chan close
|
||||||
return
|
//case <-obj.closechan:
|
||||||
|
// return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@@ -126,7 +134,12 @@ func (obj *ConfigWatcher) ConfigWatch(file string) chan error {
|
|||||||
close(ch)
|
close(ch)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ch <- nil // send event!
|
select {
|
||||||
|
case ch <- nil: // send event!
|
||||||
|
case <-obj.closechan:
|
||||||
|
close(ch)
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
//close(ch)
|
//close(ch)
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -51,10 +51,10 @@ type RecWatcher struct {
|
|||||||
watcher *fsnotify.Watcher
|
watcher *fsnotify.Watcher
|
||||||
watches map[string]struct{}
|
watches map[string]struct{}
|
||||||
events chan Event // one channel for events and err...
|
events chan Event // one channel for events and err...
|
||||||
once sync.Once
|
closed bool // is the events channel closed?
|
||||||
|
mutex sync.Mutex // lock guarding the channel closing
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
exit chan struct{}
|
exit chan struct{}
|
||||||
closeErr error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRecWatcher creates an initializes a new recursive watcher.
|
// NewRecWatcher creates an initializes a new recursive watcher.
|
||||||
@@ -87,11 +87,22 @@ func (obj *RecWatcher) Init() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
obj.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
|
defer obj.wg.Done()
|
||||||
if err := obj.Watch(); err != nil {
|
if err := obj.Watch(); err != nil {
|
||||||
obj.events <- Event{Error: err}
|
// we need this mutex, because if we Init and then Close
|
||||||
|
// immediately, this can send after closed which panics!
|
||||||
|
obj.mutex.Lock()
|
||||||
|
if !obj.closed {
|
||||||
|
select {
|
||||||
|
case obj.events <- Event{Error: err}:
|
||||||
|
case <-obj.exit:
|
||||||
|
// pass
|
||||||
|
}
|
||||||
|
}
|
||||||
|
obj.mutex.Unlock()
|
||||||
}
|
}
|
||||||
obj.Close()
|
|
||||||
}()
|
}()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -106,26 +117,18 @@ func (obj *RecWatcher) Init() error {
|
|||||||
|
|
||||||
// Close shuts down the watcher.
|
// Close shuts down the watcher.
|
||||||
func (obj *RecWatcher) Close() error {
|
func (obj *RecWatcher) Close() error {
|
||||||
obj.once.Do(obj.close) // don't cause the channel to close twice
|
|
||||||
return obj.closeErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// This close function is the function that actually does the close work. Don't
|
|
||||||
// call it more than once!
|
|
||||||
func (obj *RecWatcher) close() {
|
|
||||||
var err error
|
var err error
|
||||||
close(obj.exit) // send exit signal
|
close(obj.exit) // send exit signal
|
||||||
obj.wg.Wait()
|
obj.wg.Wait()
|
||||||
if obj.watcher != nil {
|
if obj.watcher != nil {
|
||||||
err = obj.watcher.Close()
|
err = obj.watcher.Close()
|
||||||
obj.watcher = nil
|
obj.watcher = nil
|
||||||
// TODO: should we send the close error?
|
|
||||||
//if err != nil {
|
|
||||||
// obj.events <- Event{Error: err}
|
|
||||||
//}
|
|
||||||
}
|
}
|
||||||
|
obj.mutex.Lock() // FIXME: I don't think this mutex is needed anymore...
|
||||||
|
obj.closed = true
|
||||||
close(obj.events)
|
close(obj.events)
|
||||||
obj.closeErr = err // set the error
|
obj.mutex.Unlock()
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Events returns a channel of events. These include events for errors.
|
// Events returns a channel of events. These include events for errors.
|
||||||
@@ -134,10 +137,8 @@ func (obj *RecWatcher) Events() chan Event { return obj.events }
|
|||||||
// Watch is the primary listener for this resource and it outputs events.
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
func (obj *RecWatcher) Watch() error {
|
func (obj *RecWatcher) Watch() error {
|
||||||
if obj.watcher == nil {
|
if obj.watcher == nil {
|
||||||
return fmt.Errorf("Watcher is not initialized!")
|
return fmt.Errorf("the watcher is not initialized")
|
||||||
}
|
}
|
||||||
obj.wg.Add(1)
|
|
||||||
defer obj.wg.Done()
|
|
||||||
|
|
||||||
patharray := util.PathSplit(obj.safename) // tokenize the path
|
patharray := util.PathSplit(obj.safename) // tokenize the path
|
||||||
var index = len(patharray) // starting index
|
var index = len(patharray) // starting index
|
||||||
@@ -169,11 +170,11 @@ func (obj *RecWatcher) Watch() error {
|
|||||||
// no space left on device, out of inotify watches
|
// no space left on device, out of inotify watches
|
||||||
// TODO: consider letting the user fall back to
|
// TODO: consider letting the user fall back to
|
||||||
// polling if they hit this error very often...
|
// polling if they hit this error very often...
|
||||||
return fmt.Errorf("Out of inotify watches: %v", err)
|
return fmt.Errorf("out of inotify watches: %v", err)
|
||||||
} else if os.IsPermission(err) {
|
} else if os.IsPermission(err) {
|
||||||
return fmt.Errorf("Permission denied adding a watch: %v", err)
|
return fmt.Errorf("permission denied adding a watch: %v", err)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("Unknown error: %v", err)
|
return fmt.Errorf("unknown error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
@@ -236,6 +237,13 @@ func (obj *RecWatcher) Watch() error {
|
|||||||
index--
|
index--
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// when the file is moved, remove the watcher and add a new one,
|
||||||
|
// so we stop tracking the old inode.
|
||||||
|
if deltaDepth >= 0 && (event.Op&fsnotify.Rename == fsnotify.Rename) {
|
||||||
|
obj.watcher.Remove(current)
|
||||||
|
obj.watcher.Add(current)
|
||||||
|
}
|
||||||
|
|
||||||
// we must be a parent watcher, so descend in
|
// we must be a parent watcher, so descend in
|
||||||
if deltaDepth < 0 {
|
if deltaDepth < 0 {
|
||||||
// XXX: we can block here due to: https://github.com/fsnotify/fsnotify/issues/123
|
// XXX: we can block here due to: https://github.com/fsnotify/fsnotify/issues/123
|
||||||
@@ -272,11 +280,16 @@ func (obj *RecWatcher) Watch() error {
|
|||||||
if send {
|
if send {
|
||||||
send = false
|
send = false
|
||||||
// only invalid state on certain types of events
|
// only invalid state on certain types of events
|
||||||
obj.events <- Event{Error: nil, Body: &event}
|
select {
|
||||||
|
// exit even when we're blocked on event sending
|
||||||
|
case obj.events <- Event{Error: nil, Body: &event}:
|
||||||
|
case <-obj.exit:
|
||||||
|
return fmt.Errorf("pending event not sent")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
case err := <-obj.watcher.Errors:
|
case err := <-obj.watcher.Errors:
|
||||||
return fmt.Errorf("Unknown watcher error: %v", err)
|
return fmt.Errorf("unknown watcher error: %v", err)
|
||||||
|
|
||||||
case <-obj.exit:
|
case <-obj.exit:
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -63,6 +63,7 @@ import (
|
|||||||
|
|
||||||
cv "github.com/purpleidea/mgmt/converger"
|
cv "github.com/purpleidea/mgmt/converger"
|
||||||
"github.com/purpleidea/mgmt/util"
|
"github.com/purpleidea/mgmt/util"
|
||||||
|
"github.com/purpleidea/mgmt/util/semaphore"
|
||||||
"github.com/purpleidea/mgmt/yamlgraph"
|
"github.com/purpleidea/mgmt/yamlgraph"
|
||||||
|
|
||||||
multierr "github.com/hashicorp/go-multierror"
|
multierr "github.com/hashicorp/go-multierror"
|
||||||
@@ -156,11 +157,11 @@ func (obj *SSH) Sftp() error {
|
|||||||
var err error
|
var err error
|
||||||
|
|
||||||
if obj.client == nil {
|
if obj.client == nil {
|
||||||
return fmt.Errorf("Not dialed!")
|
return fmt.Errorf("not dialed")
|
||||||
}
|
}
|
||||||
// this check is needed because the golang path.Base function is weird!
|
// this check is needed because the golang path.Base function is weird!
|
||||||
if strings.HasSuffix(obj.file, "/") {
|
if strings.HasSuffix(obj.file, "/") {
|
||||||
return fmt.Errorf("File must not be a directory.")
|
return fmt.Errorf("file must not be a directory")
|
||||||
}
|
}
|
||||||
|
|
||||||
// we run local operations first so that remote clean up is easier...
|
// we run local operations first so that remote clean up is easier...
|
||||||
@@ -254,7 +255,7 @@ func (obj *SSH) Sftp() error {
|
|||||||
// make file executable; don't cache this in case it didn't ever happen
|
// make file executable; don't cache this in case it didn't ever happen
|
||||||
// TODO: do we want the group or other bits set?
|
// TODO: do we want the group or other bits set?
|
||||||
if err := obj.sftp.Chmod(obj.execpath, 0770); err != nil {
|
if err := obj.sftp.Chmod(obj.execpath, 0770); err != nil {
|
||||||
return fmt.Errorf("Can't set file mode bits!")
|
return fmt.Errorf("can't set file mode bits")
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy graph file
|
// copy graph file
|
||||||
@@ -273,7 +274,7 @@ func (obj *SSH) Sftp() error {
|
|||||||
// SftpGraphCopy is a helper function used for re-copying the graph definition.
|
// SftpGraphCopy is a helper function used for re-copying the graph definition.
|
||||||
func (obj *SSH) SftpGraphCopy() (int64, error) {
|
func (obj *SSH) SftpGraphCopy() (int64, error) {
|
||||||
if obj.filepath == "" {
|
if obj.filepath == "" {
|
||||||
return -1, fmt.Errorf("Sftp session isn't ready yet!")
|
return -1, fmt.Errorf("sftp session isn't ready yet")
|
||||||
}
|
}
|
||||||
return obj.SftpCopy(obj.file, obj.filepath)
|
return obj.SftpCopy(obj.file, obj.filepath)
|
||||||
}
|
}
|
||||||
@@ -281,7 +282,7 @@ func (obj *SSH) SftpGraphCopy() (int64, error) {
|
|||||||
// SftpCopy is a simple helper function that runs a local -> remote sftp copy.
|
// SftpCopy is a simple helper function that runs a local -> remote sftp copy.
|
||||||
func (obj *SSH) SftpCopy(src, dst string) (int64, error) {
|
func (obj *SSH) SftpCopy(src, dst string) (int64, error) {
|
||||||
if obj.sftp == nil {
|
if obj.sftp == nil {
|
||||||
return -1, fmt.Errorf("Sftp session is not active!")
|
return -1, fmt.Errorf("sftp session is not active")
|
||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
// TODO: add a check to make sure we don't run two copies of this
|
// TODO: add a check to make sure we don't run two copies of this
|
||||||
@@ -313,7 +314,7 @@ func (obj *SSH) SftpCopy(src, dst string) (int64, error) {
|
|||||||
return n, fmt.Errorf("Can't copy to remote path: %v", err)
|
return n, fmt.Errorf("Can't copy to remote path: %v", err)
|
||||||
}
|
}
|
||||||
if n <= 0 {
|
if n <= 0 {
|
||||||
return n, fmt.Errorf("Zero bytes copied!")
|
return n, fmt.Errorf("zero bytes copied")
|
||||||
}
|
}
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
@@ -391,10 +392,10 @@ func (obj *SSH) Tunnel() error {
|
|||||||
var err error
|
var err error
|
||||||
|
|
||||||
if len(obj.clientURLs) < 1 {
|
if len(obj.clientURLs) < 1 {
|
||||||
return fmt.Errorf("Need at least one client URL to tunnel!")
|
return fmt.Errorf("need at least one client URL to tunnel")
|
||||||
}
|
}
|
||||||
if len(obj.remoteURLs) < 1 {
|
if len(obj.remoteURLs) < 1 {
|
||||||
return fmt.Errorf("Need at least one remote URL to tunnel!")
|
return fmt.Errorf("need at least one remote URL to tunnel")
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: do something less arbitrary about which one we pick?
|
// TODO: do something less arbitrary about which one we pick?
|
||||||
@@ -477,10 +478,10 @@ func (obj *SSH) TunnelClose() error {
|
|||||||
// Exec runs the binary on the remote server.
|
// Exec runs the binary on the remote server.
|
||||||
func (obj *SSH) Exec() error {
|
func (obj *SSH) Exec() error {
|
||||||
if obj.execpath == "" {
|
if obj.execpath == "" {
|
||||||
return fmt.Errorf("Must have a binary path to execute!")
|
return fmt.Errorf("must have a binary path to execute")
|
||||||
}
|
}
|
||||||
if obj.filepath == "" {
|
if obj.filepath == "" {
|
||||||
return fmt.Errorf("Must have a graph definition to run!")
|
return fmt.Errorf("must have a graph definition to run")
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
@@ -698,16 +699,16 @@ type Remotes struct {
|
|||||||
converger cv.Converger
|
converger cv.Converger
|
||||||
convergerCb func(func(map[string]bool) error) (func(), error)
|
convergerCb func(func(map[string]bool) error) (func(), error)
|
||||||
|
|
||||||
wg sync.WaitGroup // keep track of each running SSH connection
|
wg sync.WaitGroup // keep track of each running SSH connection
|
||||||
lock sync.Mutex // mutex for access to sshmap
|
lock sync.Mutex // mutex for access to sshmap
|
||||||
sshmap map[string]*SSH // map to each SSH struct with the remote as the key
|
sshmap map[string]*SSH // map to each SSH struct with the remote as the key
|
||||||
exiting bool // flag to let us know if we're exiting
|
exiting bool // flag to let us know if we're exiting
|
||||||
exitChan chan struct{} // closes when we should exit
|
exitChan chan struct{} // closes when we should exit
|
||||||
semaphore Semaphore // counting semaphore to limit concurrent connections
|
semaphore *semaphore.Semaphore // counting semaphore to limit concurrent connections
|
||||||
hostnames []string // list of hostnames we've seen so far
|
hostnames []string // list of hostnames we've seen so far
|
||||||
cuid cv.ConvergerUID // convergerUID for the remote itself
|
cuid cv.UID // convergerUID for the remote itself
|
||||||
cuids map[string]cv.ConvergerUID // map to each SSH struct with the remote as the key
|
cuids map[string]cv.UID // map to each SSH struct with the remote as the key
|
||||||
callbackCancelFunc func() // stored callback function cancel function
|
callbackCancelFunc func() // stored callback function cancel function
|
||||||
|
|
||||||
flags Flags // constant runtime values
|
flags Flags // constant runtime values
|
||||||
}
|
}
|
||||||
@@ -730,9 +731,9 @@ func NewRemotes(clientURLs, remoteURLs []string, noop bool, remotes []string, fi
|
|||||||
convergerCb: convergerCb,
|
convergerCb: convergerCb,
|
||||||
sshmap: make(map[string]*SSH),
|
sshmap: make(map[string]*SSH),
|
||||||
exitChan: make(chan struct{}),
|
exitChan: make(chan struct{}),
|
||||||
semaphore: NewSemaphore(int(cConns)),
|
semaphore: semaphore.NewSemaphore(int(cConns)),
|
||||||
hostnames: make([]string, len(remotes)),
|
hostnames: make([]string, len(remotes)),
|
||||||
cuids: make(map[string]cv.ConvergerUID),
|
cuids: make(map[string]cv.UID),
|
||||||
flags: flags,
|
flags: flags,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -772,7 +773,7 @@ func (obj *Remotes) NewSSH(file string) (*SSH, error) {
|
|||||||
}
|
}
|
||||||
host = x[0]
|
host = x[0]
|
||||||
if host == "" {
|
if host == "" {
|
||||||
return nil, fmt.Errorf("Empty hostname!")
|
return nil, fmt.Errorf("empty hostname")
|
||||||
}
|
}
|
||||||
|
|
||||||
user := defaultUser // default
|
user := defaultUser // default
|
||||||
@@ -795,7 +796,7 @@ func (obj *Remotes) NewSSH(file string) (*SSH, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(auth) == 0 {
|
if len(auth) == 0 {
|
||||||
return nil, fmt.Errorf("No authentication methods available!")
|
return nil, fmt.Errorf("no authentication methods available")
|
||||||
}
|
}
|
||||||
|
|
||||||
//hostname := config.Hostname // TODO: optionally specify local hostname somehow
|
//hostname := config.Hostname // TODO: optionally specify local hostname somehow
|
||||||
@@ -804,7 +805,7 @@ func (obj *Remotes) NewSSH(file string) (*SSH, error) {
|
|||||||
hostname = host // default to above
|
hostname = host // default to above
|
||||||
}
|
}
|
||||||
if util.StrInList(hostname, obj.hostnames) {
|
if util.StrInList(hostname, obj.hostnames) {
|
||||||
return nil, fmt.Errorf("Remote: Hostname `%s` already exists!", hostname)
|
return nil, fmt.Errorf("Remote: Hostname `%s` already exists", hostname)
|
||||||
}
|
}
|
||||||
obj.hostnames = append(obj.hostnames, hostname)
|
obj.hostnames = append(obj.hostnames, hostname)
|
||||||
|
|
||||||
@@ -830,7 +831,7 @@ func (obj *Remotes) NewSSH(file string) (*SSH, error) {
|
|||||||
// sshKeyAuth is a helper function to get the ssh key auth struct needed
|
// sshKeyAuth is a helper function to get the ssh key auth struct needed
|
||||||
func (obj *Remotes) sshKeyAuth() (ssh.AuthMethod, error) {
|
func (obj *Remotes) sshKeyAuth() (ssh.AuthMethod, error) {
|
||||||
if obj.sshPrivIdRsa == "" {
|
if obj.sshPrivIdRsa == "" {
|
||||||
return nil, fmt.Errorf("Empty path specified!")
|
return nil, fmt.Errorf("empty path specified")
|
||||||
}
|
}
|
||||||
p := ""
|
p := ""
|
||||||
// TODO: this doesn't match strings of the form: ~james/.ssh/id_rsa
|
// TODO: this doesn't match strings of the form: ~james/.ssh/id_rsa
|
||||||
@@ -843,7 +844,7 @@ func (obj *Remotes) sshKeyAuth() (ssh.AuthMethod, error) {
|
|||||||
p = path.Join(usr.HomeDir, obj.sshPrivIdRsa[len("~/"):])
|
p = path.Join(usr.HomeDir, obj.sshPrivIdRsa[len("~/"):])
|
||||||
}
|
}
|
||||||
if p == "" {
|
if p == "" {
|
||||||
return nil, fmt.Errorf("Empty path specified!")
|
return nil, fmt.Errorf("empty path specified")
|
||||||
}
|
}
|
||||||
// A public key may be used to authenticate against the server by using
|
// A public key may be used to authenticate against the server by using
|
||||||
// an unencrypted PEM-encoded private key file. If you have an encrypted
|
// an unencrypted PEM-encoded private key file. If you have an encrypted
|
||||||
@@ -892,7 +893,7 @@ func (obj *Remotes) passwordCallback(user, host string) func() (string, error) {
|
|||||||
case e := <-failchan:
|
case e := <-failchan:
|
||||||
return "", e
|
return "", e
|
||||||
case <-util.TimeAfterOrBlock(timeout):
|
case <-util.TimeAfterOrBlock(timeout):
|
||||||
return "", fmt.Errorf("Interactive timeout reached!")
|
return "", fmt.Errorf("interactive timeout reached")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return cb
|
return cb
|
||||||
@@ -1078,29 +1079,6 @@ func cleanURL(s string) string {
|
|||||||
return u.Host
|
return u.Host
|
||||||
}
|
}
|
||||||
|
|
||||||
// Semaphore is a counting semaphore.
|
|
||||||
type Semaphore chan struct{}
|
|
||||||
|
|
||||||
// NewSemaphore creates a new semaphore.
|
|
||||||
func NewSemaphore(size int) Semaphore {
|
|
||||||
return make(Semaphore, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
// P acquires n resources.
|
|
||||||
func (s Semaphore) P(n int) {
|
|
||||||
e := struct{}{}
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
s <- e // acquire one
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// V releases n resources.
|
|
||||||
func (s Semaphore) V(n int) {
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
<-s // release one
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// combinedWriter mimics what the ssh.CombinedOutput command does.
|
// combinedWriter mimics what the ssh.CombinedOutput command does.
|
||||||
type combinedWriter struct {
|
type combinedWriter struct {
|
||||||
b bytes.Buffer
|
b bytes.Buffer
|
||||||
|
|||||||
305
resources/augeas.go
Normal file
305
resources/augeas.go
Normal file
@@ -0,0 +1,305 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
// +build !noaugeas
|
||||||
|
|
||||||
|
package resources
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/gob"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/recwatch"
|
||||||
|
|
||||||
|
errwrap "github.com/pkg/errors"
|
||||||
|
// FIXME: we vendor go/augeas because master requires augeas 1.6.0
|
||||||
|
// and libaugeas-dev-1.6.0 is not yet available in a PPA.
|
||||||
|
"honnef.co/go/augeas"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// NS is a namespace for augeas operations
|
||||||
|
NS = "Xmgmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
gob.Register(&AugeasRes{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AugeasRes is a resource that enables you to use the augeas resource.
|
||||||
|
// Currently only allows you to change simple files (e.g sshd_config).
|
||||||
|
type AugeasRes struct {
|
||||||
|
BaseRes `yaml:",inline"`
|
||||||
|
|
||||||
|
// File is the path to the file targeted by this resource.
|
||||||
|
File string `yaml:"file"`
|
||||||
|
|
||||||
|
// Lens is the lens used by this resource. If specified, mgmt
|
||||||
|
// will lower the augeas overhead by only loading that lens.
|
||||||
|
Lens string `yaml:"lens"`
|
||||||
|
|
||||||
|
// Sets is a list of changes that will be applied to the file, in the form of
|
||||||
|
// ["path", "value"]. mgmt will run augeas.Get() before augeas.Set(), to
|
||||||
|
// prevent changing the file when it is not needed.
|
||||||
|
Sets []AugeasSet `yaml:"sets"`
|
||||||
|
|
||||||
|
recWatcher *recwatch.RecWatcher // used to watch the changed files
|
||||||
|
}
|
||||||
|
|
||||||
|
// AugeasSet represents a key/value pair of settings to be applied.
|
||||||
|
type AugeasSet struct {
|
||||||
|
Path string `yaml:"path"` // The relative path to the value to be changed.
|
||||||
|
Value string `yaml:"value"` // The value to be set on the given Path.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default returns some sensible defaults for this resource.
|
||||||
|
func (obj *AugeasRes) Default() Res {
|
||||||
|
return &AugeasRes{
|
||||||
|
BaseRes: BaseRes{
|
||||||
|
MetaParams: DefaultMetaParams, // force a default
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate if the params passed in are valid data.
|
||||||
|
func (obj *AugeasRes) Validate() error {
|
||||||
|
if !strings.HasPrefix(obj.File, "/") {
|
||||||
|
return fmt.Errorf("the File param should start with a slash")
|
||||||
|
}
|
||||||
|
if obj.Lens != "" && !strings.HasSuffix(obj.Lens, ".lns") {
|
||||||
|
return fmt.Errorf("the Lens param should have a .lns suffix")
|
||||||
|
}
|
||||||
|
if (obj.Lens == "") != (obj.File == "") {
|
||||||
|
return fmt.Errorf("the File and Lens params must be specified together")
|
||||||
|
}
|
||||||
|
return obj.BaseRes.Validate()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init initiates the resource.
|
||||||
|
func (obj *AugeasRes) Init() error {
|
||||||
|
obj.BaseRes.kind = "augeas"
|
||||||
|
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
|
// Taken from the File resource.
|
||||||
|
// FIXME: DRY - This is taken from the file resource
|
||||||
|
func (obj *AugeasRes) Watch() error {
|
||||||
|
var err error
|
||||||
|
obj.recWatcher, err = recwatch.NewRecWatcher(obj.File, false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer obj.recWatcher.Close()
|
||||||
|
|
||||||
|
// notify engine that we're running
|
||||||
|
if err := obj.Running(); err != nil {
|
||||||
|
return err // bubble up a NACK...
|
||||||
|
}
|
||||||
|
|
||||||
|
var send = false // send event?
|
||||||
|
var exit *error
|
||||||
|
|
||||||
|
for {
|
||||||
|
if obj.debug {
|
||||||
|
log.Printf("%s[%s]: Watching: %s", obj.Kind(), obj.GetName(), obj.File) // attempting to watch...
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case event, ok := <-obj.recWatcher.Events():
|
||||||
|
if !ok { // channel shutdown
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err := event.Error; err != nil {
|
||||||
|
return errwrap.Wrapf(err, "Unknown %s[%s] watcher error", obj.Kind(), obj.GetName())
|
||||||
|
}
|
||||||
|
if obj.debug { // don't access event.Body if event.Error isn't nil
|
||||||
|
log.Printf("%s[%s]: Event(%s): %v", obj.Kind(), obj.GetName(), event.Body.Name, event.Body.Op)
|
||||||
|
}
|
||||||
|
send = true
|
||||||
|
obj.StateOK(false) // dirty
|
||||||
|
|
||||||
|
case event := <-obj.Events():
|
||||||
|
if exit, send = obj.ReadEvent(event); exit != nil {
|
||||||
|
return *exit // exit
|
||||||
|
}
|
||||||
|
//obj.StateOK(false) // dirty // these events don't invalidate state
|
||||||
|
}
|
||||||
|
|
||||||
|
// do all our event sending all together to avoid duplicate msgs
|
||||||
|
if send {
|
||||||
|
send = false
|
||||||
|
obj.Event()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkApplySet runs CheckApply for one element of the AugeasRes.Set
|
||||||
|
func (obj *AugeasRes) checkApplySet(apply bool, ag *augeas.Augeas, set AugeasSet) (bool, error) {
|
||||||
|
fullpath := fmt.Sprintf("/files/%v/%v", obj.File, set.Path)
|
||||||
|
|
||||||
|
// We do not check for errors because errors are also thrown when
|
||||||
|
// the path does not exist.
|
||||||
|
if getValue, _ := ag.Get(fullpath); set.Value == getValue {
|
||||||
|
// The value is what we expect, return directly
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !apply {
|
||||||
|
// If noop, we can return here directly. We return with
|
||||||
|
// nil even if err is not nil because it does not mean
|
||||||
|
// there is an error.
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ag.Set(fullpath, set.Value); err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "augeas: error while setting value")
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckApply method for Augeas resource.
|
||||||
|
func (obj *AugeasRes) CheckApply(apply bool) (bool, error) {
|
||||||
|
log.Printf("%s[%s]: CheckApply: %s", obj.Kind(), obj.GetName(), obj.File)
|
||||||
|
// By default we do not set any option to augeas, we use the defaults.
|
||||||
|
opts := augeas.None
|
||||||
|
if obj.Lens != "" {
|
||||||
|
// if the lens is specified, we can speed up augeas by not
|
||||||
|
// loading everything. Without this option, augeas will try to
|
||||||
|
// read all the files it knows in the complete filesystem.
|
||||||
|
// e.g. to change /etc/ssh/sshd_config, it would load /etc/hosts, /etc/ntpd.conf, etc...
|
||||||
|
opts = augeas.NoModlAutoload
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initiate augeas
|
||||||
|
ag, err := augeas.New("/", "", opts)
|
||||||
|
if err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "augeas: error while initializing")
|
||||||
|
}
|
||||||
|
defer ag.Close()
|
||||||
|
|
||||||
|
if obj.Lens != "" {
|
||||||
|
// If the lens is set, load the lens for the file we want to edit.
|
||||||
|
// We pick Xmgmt, as this name will not collide with any other lens name.
|
||||||
|
// We do not pick Mgmt as in the future there might be an Mgmt lens.
|
||||||
|
// https://github.com/hercules-team/augeas/wiki/Loading-specific-files
|
||||||
|
if err = ag.Set(fmt.Sprintf("/augeas/load/%s/lens", NS), obj.Lens); err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "augeas: error while initializing lens")
|
||||||
|
}
|
||||||
|
if err = ag.Set(fmt.Sprintf("/augeas/load/%s/incl", NS), obj.File); err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "augeas: error while initializing incl")
|
||||||
|
}
|
||||||
|
if err = ag.Load(); err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "augeas: error while loading")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
checkOK := true
|
||||||
|
for _, set := range obj.Sets {
|
||||||
|
if setCheckOK, err := obj.checkApplySet(apply, &ag, set); err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "augeas: error during CheckApply of one Set")
|
||||||
|
} else if !setCheckOK {
|
||||||
|
checkOK = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the state is correct or we can't apply, return early.
|
||||||
|
if checkOK || !apply {
|
||||||
|
return checkOK, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("%s[%s]: changes needed, saving", obj.Kind(), obj.GetName())
|
||||||
|
if err = ag.Save(); err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "augeas: error while saving augeas values")
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME: Workaround for https://github.com/dominikh/go-augeas/issues/13
|
||||||
|
// To be fixed upstream.
|
||||||
|
if obj.File != "" {
|
||||||
|
if _, err := os.Stat(obj.File); os.IsNotExist(err) {
|
||||||
|
return false, errwrap.Wrapf(err, "augeas: error: file does not exist")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AugeasUID is the UID struct for AugeasRes.
|
||||||
|
type AugeasUID struct {
|
||||||
|
BaseUID
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutoEdges returns the AutoEdge interface. In this case no autoedges are used.
|
||||||
|
func (obj *AugeasRes) AutoEdges() AutoEdge {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UIDs includes all params to make a unique identification of this object.
|
||||||
|
func (obj *AugeasRes) UIDs() []ResUID {
|
||||||
|
x := &AugeasUID{
|
||||||
|
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
||||||
|
name: obj.Name,
|
||||||
|
}
|
||||||
|
return []ResUID{x}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupCmp returns whether two resources can be grouped together or not.
|
||||||
|
func (obj *AugeasRes) GroupCmp(r Res) bool {
|
||||||
|
return false // Augeas commands can not be grouped together.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare two resources and return if they are equivalent.
|
||||||
|
func (obj *AugeasRes) Compare(res Res) bool {
|
||||||
|
switch res.(type) {
|
||||||
|
// we can only compare AugeasRes to others of the same resource
|
||||||
|
case *AugeasRes:
|
||||||
|
res := res.(*AugeasRes)
|
||||||
|
if !obj.BaseRes.Compare(res) { // call base Compare
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.Name != res.Name {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML is the custom unmarshal handler for this struct.
|
||||||
|
// It is primarily useful for setting the defaults.
|
||||||
|
func (obj *AugeasRes) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
type rawRes AugeasRes // indirection to avoid infinite recursion
|
||||||
|
|
||||||
|
def := obj.Default() // get the default
|
||||||
|
res, ok := def.(*AugeasRes) // put in the right format
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("could not convert to AugeasRes")
|
||||||
|
}
|
||||||
|
raw := rawRes(*res) // convert; the defaults go here
|
||||||
|
|
||||||
|
if err := unmarshal(&raw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*obj = AugeasRes(raw) // restore from indirection with type conversion!
|
||||||
|
return nil
|
||||||
|
}
|
||||||
24
resources/augeas_disabled.go
Normal file
24
resources/augeas_disabled.go
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
// +build noaugeas
|
||||||
|
|
||||||
|
package resources
|
||||||
|
|
||||||
|
// AugeasRes represents the fields of the Augeas resource. Since this file is
|
||||||
|
// only invoked with the tag "noaugeas", we do not need any fields here.
|
||||||
|
type AugeasRes struct {
|
||||||
|
}
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -25,9 +25,8 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"syscall"
|
||||||
|
|
||||||
"github.com/purpleidea/mgmt/event"
|
|
||||||
"github.com/purpleidea/mgmt/util"
|
"github.com/purpleidea/mgmt/util"
|
||||||
|
|
||||||
errwrap "github.com/pkg/errors"
|
errwrap "github.com/pkg/errors"
|
||||||
@@ -40,7 +39,6 @@ func init() {
|
|||||||
// ExecRes is an exec resource for running commands.
|
// ExecRes is an exec resource for running commands.
|
||||||
type ExecRes struct {
|
type ExecRes struct {
|
||||||
BaseRes `yaml:",inline"`
|
BaseRes `yaml:",inline"`
|
||||||
State string `yaml:"state"` // state: exists/present?, absent, (undefined?)
|
|
||||||
Cmd string `yaml:"cmd"` // the command to run
|
Cmd string `yaml:"cmd"` // the command to run
|
||||||
Shell string `yaml:"shell"` // the (optional) shell to use to run the cmd
|
Shell string `yaml:"shell"` // the (optional) shell to use to run the cmd
|
||||||
Timeout int `yaml:"timeout"` // the cmd timeout in seconds
|
Timeout int `yaml:"timeout"` // the cmd timeout in seconds
|
||||||
@@ -48,49 +46,32 @@ type ExecRes struct {
|
|||||||
WatchShell string `yaml:"watchshell"` // the (optional) shell to use to run the watch cmd
|
WatchShell string `yaml:"watchshell"` // the (optional) shell to use to run the watch cmd
|
||||||
IfCmd string `yaml:"ifcmd"` // the if command to run
|
IfCmd string `yaml:"ifcmd"` // the if command to run
|
||||||
IfShell string `yaml:"ifshell"` // the (optional) shell to use to run the if cmd
|
IfShell string `yaml:"ifshell"` // the (optional) shell to use to run the if cmd
|
||||||
PollInt int `yaml:"pollint"` // the poll interval for the ifcmd
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewExecRes is a constructor for this resource. It also calls Init() for you.
|
// Default returns some sensible defaults for this resource.
|
||||||
func NewExecRes(name, cmd, shell string, timeout int, watchcmd, watchshell, ifcmd, ifshell string, pollint int, state string) (*ExecRes, error) {
|
func (obj *ExecRes) Default() Res {
|
||||||
obj := &ExecRes{
|
return &ExecRes{
|
||||||
BaseRes: BaseRes{
|
BaseRes: BaseRes{
|
||||||
Name: name,
|
MetaParams: DefaultMetaParams, // force a default
|
||||||
},
|
},
|
||||||
Cmd: cmd,
|
|
||||||
Shell: shell,
|
|
||||||
Timeout: timeout,
|
|
||||||
WatchCmd: watchcmd,
|
|
||||||
WatchShell: watchshell,
|
|
||||||
IfCmd: ifcmd,
|
|
||||||
IfShell: ifshell,
|
|
||||||
PollInt: pollint,
|
|
||||||
State: state,
|
|
||||||
}
|
}
|
||||||
return obj, obj.Init()
|
}
|
||||||
|
|
||||||
|
// Validate if the params passed in are valid data.
|
||||||
|
func (obj *ExecRes) Validate() error {
|
||||||
|
if obj.Cmd == "" { // this is the only thing that is really required
|
||||||
|
return fmt.Errorf("command can't be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
return obj.BaseRes.Validate()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init runs some startup code for this resource.
|
// Init runs some startup code for this resource.
|
||||||
func (obj *ExecRes) Init() error {
|
func (obj *ExecRes) Init() error {
|
||||||
obj.BaseRes.kind = "Exec"
|
obj.BaseRes.kind = "exec"
|
||||||
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate if the params passed in are valid data.
|
|
||||||
// FIXME: where should this get called ?
|
|
||||||
func (obj *ExecRes) Validate() error {
|
|
||||||
if obj.Cmd == "" { // this is the only thing that is really required
|
|
||||||
return fmt.Errorf("Command can't be empty!")
|
|
||||||
}
|
|
||||||
|
|
||||||
// if we have a watch command, then we don't poll with the if command!
|
|
||||||
if obj.WatchCmd != "" && obj.PollInt > 0 {
|
|
||||||
return fmt.Errorf("Don't poll when we have a watch command.")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BufioChanScanner wraps the scanner output in a channel.
|
// BufioChanScanner wraps the scanner output in a channel.
|
||||||
func (obj *ExecRes) BufioChanScanner(scanner *bufio.Scanner) (chan string, chan error) {
|
func (obj *ExecRes) BufioChanScanner(scanner *bufio.Scanner) (chan string, chan error) {
|
||||||
ch, errch := make(chan string), make(chan error)
|
ch, errch := make(chan string), make(chan error)
|
||||||
@@ -110,26 +91,9 @@ func (obj *ExecRes) BufioChanScanner(scanner *bufio.Scanner) (chan string, chan
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Watch is the primary listener for this resource and it outputs events.
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
func (obj *ExecRes) Watch(processChan chan event.Event) error {
|
func (obj *ExecRes) Watch() error {
|
||||||
if obj.IsWatching() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
obj.SetWatching(true)
|
|
||||||
defer obj.SetWatching(false)
|
|
||||||
cuid := obj.converger.Register()
|
|
||||||
defer cuid.Unregister()
|
|
||||||
|
|
||||||
var startup bool
|
|
||||||
Startup := func(block bool) <-chan time.Time {
|
|
||||||
if block {
|
|
||||||
return nil // blocks forever
|
|
||||||
//return make(chan time.Time) // blocks forever
|
|
||||||
}
|
|
||||||
return time.After(time.Duration(500) * time.Millisecond) // 1/2 the resolution of converged timeout
|
|
||||||
}
|
|
||||||
|
|
||||||
var send = false // send event?
|
var send = false // send event?
|
||||||
var exit = false
|
var exit *error
|
||||||
bufioch, errch := make(chan string), make(chan error)
|
bufioch, errch := make(chan string), make(chan error)
|
||||||
|
|
||||||
if obj.WatchCmd != "" {
|
if obj.WatchCmd != "" {
|
||||||
@@ -149,71 +113,65 @@ func (obj *ExecRes) Watch(processChan chan event.Event) error {
|
|||||||
}
|
}
|
||||||
cmd := exec.Command(cmdName, cmdArgs...)
|
cmd := exec.Command(cmdName, cmdArgs...)
|
||||||
//cmd.Dir = "" // look for program in pwd ?
|
//cmd.Dir = "" // look for program in pwd ?
|
||||||
|
// ignore signals sent to parent process (we're in our own group)
|
||||||
|
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||||
|
Setpgid: true,
|
||||||
|
Pgid: 0,
|
||||||
|
}
|
||||||
|
|
||||||
cmdReader, err := cmd.StdoutPipe()
|
cmdReader, err := cmd.StdoutPipe()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errwrap.Wrapf(err, "Error creating StdoutPipe for Cmd")
|
return errwrap.Wrapf(err, "error creating StdoutPipe for Cmd")
|
||||||
}
|
}
|
||||||
scanner := bufio.NewScanner(cmdReader)
|
scanner := bufio.NewScanner(cmdReader)
|
||||||
|
|
||||||
defer cmd.Wait() // XXX: is this necessary?
|
defer cmd.Wait() // wait for the command to exit before return!
|
||||||
defer func() {
|
defer func() {
|
||||||
// FIXME: without wrapping this in this func it panic's
|
// FIXME: without wrapping this in this func it panic's
|
||||||
// when running examples/graph8d.yaml
|
// when running certain graphs... why?
|
||||||
cmd.Process.Kill() // TODO: is this necessary?
|
cmd.Process.Kill() // shutdown the Watch command on exit
|
||||||
}()
|
}()
|
||||||
if err := cmd.Start(); err != nil {
|
if err := cmd.Start(); err != nil {
|
||||||
return errwrap.Wrapf(err, "Error starting Cmd")
|
return errwrap.Wrapf(err, "error starting Cmd")
|
||||||
}
|
}
|
||||||
|
|
||||||
bufioch, errch = obj.BufioChanScanner(scanner)
|
bufioch, errch = obj.BufioChanScanner(scanner)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// notify engine that we're running
|
||||||
|
if err := obj.Running(); err != nil {
|
||||||
|
return err // bubble up a NACK...
|
||||||
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
obj.SetState(ResStateWatching) // reset
|
|
||||||
select {
|
select {
|
||||||
case text := <-bufioch:
|
case text := <-bufioch:
|
||||||
cuid.SetConverged(false)
|
|
||||||
// each time we get a line of output, we loop!
|
// each time we get a line of output, we loop!
|
||||||
log.Printf("%s[%s]: Watch output: %s", obj.Kind(), obj.GetName(), text)
|
log.Printf("%s[%s]: Watch output: %s", obj.Kind(), obj.GetName(), text)
|
||||||
if text != "" {
|
if text != "" {
|
||||||
send = true
|
send = true
|
||||||
|
obj.StateOK(false) // something made state dirty
|
||||||
}
|
}
|
||||||
|
|
||||||
case err := <-errch:
|
case err := <-errch:
|
||||||
cuid.SetConverged(false)
|
|
||||||
if err == nil { // EOF
|
if err == nil { // EOF
|
||||||
// FIXME: add an "if watch command ends/crashes"
|
// FIXME: add an "if watch command ends/crashes"
|
||||||
// restart or generate error option
|
// restart or generate error option
|
||||||
return fmt.Errorf("Reached EOF")
|
return fmt.Errorf("reached EOF")
|
||||||
}
|
}
|
||||||
// error reading input?
|
// error reading input?
|
||||||
return errwrap.Wrapf(err, "Unknown error")
|
return errwrap.Wrapf(err, "unknown error")
|
||||||
|
|
||||||
case event := <-obj.Events():
|
case event := <-obj.Events():
|
||||||
cuid.SetConverged(false)
|
if exit, send = obj.ReadEvent(event); exit != nil {
|
||||||
if exit, send = obj.ReadEvent(&event); exit {
|
return *exit // exit
|
||||||
return nil // exit
|
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-cuid.ConvergedTimer():
|
|
||||||
cuid.SetConverged(true) // converged!
|
|
||||||
continue
|
|
||||||
|
|
||||||
case <-Startup(startup):
|
|
||||||
cuid.SetConverged(false)
|
|
||||||
send = true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// do all our event sending all together to avoid duplicate msgs
|
// do all our event sending all together to avoid duplicate msgs
|
||||||
if send {
|
if send {
|
||||||
startup = true // startup finished
|
|
||||||
send = false
|
send = false
|
||||||
// it is okay to invalidate the clean state on poke too
|
obj.Event()
|
||||||
obj.StateOK(false) // something made state dirty
|
|
||||||
if exit, err := obj.DoSend(processChan, ""); exit || err != nil {
|
|
||||||
return err // we exit or bubble up a NACK...
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -221,24 +179,12 @@ func (obj *ExecRes) Watch(processChan chan event.Event) error {
|
|||||||
// CheckApply checks the resource state and applies the resource if the bool
|
// CheckApply checks the resource state and applies the resource if the bool
|
||||||
// input is true. It returns error info and if the state check passed or not.
|
// input is true. It returns error info and if the state check passed or not.
|
||||||
// TODO: expand the IfCmd to be a list of commands
|
// TODO: expand the IfCmd to be a list of commands
|
||||||
func (obj *ExecRes) CheckApply(apply bool) (checkOK bool, err error) {
|
func (obj *ExecRes) CheckApply(apply bool) (bool, error) {
|
||||||
|
// If we receive a refresh signal, then the engine skips the IsStateOK()
|
||||||
|
// check and this will run. It is still guarded by the IfCmd, but it can
|
||||||
|
// have a chance to execute, and all without the check of obj.Refresh()!
|
||||||
|
|
||||||
// if there is a watch command, but no if command, run based on state
|
if obj.IfCmd != "" { // if there is no onlyif check, we should just run
|
||||||
if obj.WatchCmd != "" && obj.IfCmd == "" {
|
|
||||||
if obj.IsStateOK() { // FIXME: this is done by engine now...
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// if there is no watcher, but there is an onlyif check, run it to see
|
|
||||||
} else if obj.IfCmd != "" { // && obj.WatchCmd == ""
|
|
||||||
// there is a watcher, but there is also an if command
|
|
||||||
//} else if obj.IfCmd != "" && obj.WatchCmd != "" {
|
|
||||||
|
|
||||||
if obj.PollInt > 0 { // && obj.WatchCmd == ""
|
|
||||||
// XXX: have the Watch() command output onlyif poll events...
|
|
||||||
// XXX: we can optimize by saving those results for returning here
|
|
||||||
// return XXX
|
|
||||||
}
|
|
||||||
|
|
||||||
var cmdName string
|
var cmdName string
|
||||||
var cmdArgs []string
|
var cmdArgs []string
|
||||||
@@ -254,18 +200,17 @@ func (obj *ExecRes) CheckApply(apply bool) (checkOK bool, err error) {
|
|||||||
cmdName = obj.IfShell // usually bash, or sh
|
cmdName = obj.IfShell // usually bash, or sh
|
||||||
cmdArgs = []string{"-c", obj.IfCmd}
|
cmdArgs = []string{"-c", obj.IfCmd}
|
||||||
}
|
}
|
||||||
err = exec.Command(cmdName, cmdArgs...).Run()
|
cmd := exec.Command(cmdName, cmdArgs...)
|
||||||
if err != nil {
|
// ignore signals sent to parent process (we're in our own group)
|
||||||
|
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||||
|
Setpgid: true,
|
||||||
|
Pgid: 0,
|
||||||
|
}
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
// TODO: check exit value
|
// TODO: check exit value
|
||||||
return true, nil // don't run
|
return true, nil // don't run
|
||||||
}
|
}
|
||||||
|
|
||||||
// if there is no watcher and no onlyif check, assume we should run
|
|
||||||
} else { // if obj.WatchCmd == "" && obj.IfCmd == "" {
|
|
||||||
// just run if state is dirty
|
|
||||||
if obj.IsStateOK() { // FIXME: this is done by engine now...
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// state is not okay, no work done, exit, but without error
|
// state is not okay, no work done, exit, but without error
|
||||||
@@ -292,11 +237,17 @@ func (obj *ExecRes) CheckApply(apply bool) (checkOK bool, err error) {
|
|||||||
}
|
}
|
||||||
cmd := exec.Command(cmdName, cmdArgs...)
|
cmd := exec.Command(cmdName, cmdArgs...)
|
||||||
//cmd.Dir = "" // look for program in pwd ?
|
//cmd.Dir = "" // look for program in pwd ?
|
||||||
|
// ignore signals sent to parent process (we're in our own group)
|
||||||
|
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||||
|
Setpgid: true,
|
||||||
|
Pgid: 0,
|
||||||
|
}
|
||||||
|
|
||||||
var out bytes.Buffer
|
var out bytes.Buffer
|
||||||
cmd.Stdout = &out
|
cmd.Stdout = &out
|
||||||
|
|
||||||
if err := cmd.Start(); err != nil {
|
if err := cmd.Start(); err != nil {
|
||||||
return false, errwrap.Wrapf(err, "Error starting Cmd")
|
return false, errwrap.Wrapf(err, "error starting cmd")
|
||||||
}
|
}
|
||||||
|
|
||||||
timeout := obj.Timeout
|
timeout := obj.Timeout
|
||||||
@@ -306,16 +257,30 @@ func (obj *ExecRes) CheckApply(apply bool) (checkOK bool, err error) {
|
|||||||
done := make(chan error)
|
done := make(chan error)
|
||||||
go func() { done <- cmd.Wait() }()
|
go func() { done <- cmd.Wait() }()
|
||||||
|
|
||||||
|
var err error // error returned by cmd
|
||||||
select {
|
select {
|
||||||
case err := <-done:
|
case e := <-done:
|
||||||
if err != nil {
|
err = e // store
|
||||||
e := errwrap.Wrapf(err, "Error waiting for Cmd")
|
|
||||||
return false, e
|
|
||||||
}
|
|
||||||
|
|
||||||
case <-util.TimeAfterOrBlock(timeout):
|
case <-util.TimeAfterOrBlock(timeout):
|
||||||
//cmd.Process.Kill() // TODO: is this necessary?
|
cmd.Process.Kill() // TODO: check error?
|
||||||
return false, fmt.Errorf("Timeout waiting for Cmd!")
|
return false, fmt.Errorf("timeout for cmd")
|
||||||
|
}
|
||||||
|
|
||||||
|
// process the err result from cmd, we process non-zero exits here too!
|
||||||
|
exitErr, ok := err.(*exec.ExitError) // embeds an os.ProcessState
|
||||||
|
if err != nil && ok {
|
||||||
|
pStateSys := exitErr.Sys() // (*os.ProcessState) Sys
|
||||||
|
wStatus, ok := pStateSys.(syscall.WaitStatus)
|
||||||
|
if !ok {
|
||||||
|
e := errwrap.Wrapf(err, "error running cmd")
|
||||||
|
return false, e
|
||||||
|
}
|
||||||
|
return false, fmt.Errorf("cmd error, exit status: %d", wStatus.ExitStatus())
|
||||||
|
|
||||||
|
} else if err != nil {
|
||||||
|
e := errwrap.Wrapf(err, "general cmd error")
|
||||||
|
return false, e
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: if we printed the stdout while the command is running, this
|
// TODO: if we printed the stdout while the command is running, this
|
||||||
@@ -328,7 +293,6 @@ func (obj *ExecRes) CheckApply(apply bool) (checkOK bool, err error) {
|
|||||||
log.Printf("%s[%s]: Command output is:", obj.Kind(), obj.GetName())
|
log.Printf("%s[%s]: Command output is:", obj.Kind(), obj.GetName())
|
||||||
log.Printf(out.String())
|
log.Printf(out.String())
|
||||||
}
|
}
|
||||||
// XXX: return based on exit value!!
|
|
||||||
|
|
||||||
// The state tracking is for exec resources that can't "detect" their
|
// The state tracking is for exec resources that can't "detect" their
|
||||||
// state, and assume it's invalid when the Watch() function triggers.
|
// state, and assume it's invalid when the Watch() function triggers.
|
||||||
@@ -346,40 +310,6 @@ type ExecUID struct {
|
|||||||
// TODO: add more elements here
|
// TODO: add more elements here
|
||||||
}
|
}
|
||||||
|
|
||||||
// IFF aka if and only if they are equivalent, return true. If not, false.
|
|
||||||
func (obj *ExecUID) IFF(uid ResUID) bool {
|
|
||||||
res, ok := uid.(*ExecUID)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if obj.Cmd != res.Cmd {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// TODO: add more checks here
|
|
||||||
//if obj.Shell != res.Shell {
|
|
||||||
// return false
|
|
||||||
//}
|
|
||||||
//if obj.Timeout != res.Timeout {
|
|
||||||
// return false
|
|
||||||
//}
|
|
||||||
//if obj.WatchCmd != res.WatchCmd {
|
|
||||||
// return false
|
|
||||||
//}
|
|
||||||
//if obj.WatchShell != res.WatchShell {
|
|
||||||
// return false
|
|
||||||
//}
|
|
||||||
if obj.IfCmd != res.IfCmd {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
//if obj.PollInt != res.PollInt {
|
|
||||||
// return false
|
|
||||||
//}
|
|
||||||
//if obj.State != res.State {
|
|
||||||
// return false
|
|
||||||
//}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// AutoEdges returns the AutoEdge interface. In this case no autoedges are used.
|
// AutoEdges returns the AutoEdge interface. In this case no autoedges are used.
|
||||||
func (obj *ExecRes) AutoEdges() AutoEdge {
|
func (obj *ExecRes) AutoEdges() AutoEdge {
|
||||||
// TODO: parse as many exec params to look for auto edges, for example
|
// TODO: parse as many exec params to look for auto edges, for example
|
||||||
@@ -387,9 +317,9 @@ func (obj *ExecRes) AutoEdges() AutoEdge {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUIDs includes all params to make a unique identification of this object.
|
// UIDs includes all params to make a unique identification of this object.
|
||||||
// Most resources only return one, although some resources can return multiple.
|
// Most resources only return one, although some resources can return multiple.
|
||||||
func (obj *ExecRes) GetUIDs() []ResUID {
|
func (obj *ExecRes) UIDs() []ResUID {
|
||||||
x := &ExecUID{
|
x := &ExecUID{
|
||||||
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
||||||
Cmd: obj.Cmd,
|
Cmd: obj.Cmd,
|
||||||
@@ -438,10 +368,7 @@ func (obj *ExecRes) Compare(res Res) bool {
|
|||||||
if obj.IfCmd != res.IfCmd {
|
if obj.IfCmd != res.IfCmd {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if obj.PollInt != res.PollInt {
|
if obj.IfShell != res.IfShell {
|
||||||
return false
|
|
||||||
}
|
|
||||||
if obj.State != res.State {
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
@@ -449,3 +376,23 @@ func (obj *ExecRes) Compare(res Res) bool {
|
|||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML is the custom unmarshal handler for this struct.
|
||||||
|
// It is primarily useful for setting the defaults.
|
||||||
|
func (obj *ExecRes) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
type rawRes ExecRes // indirection to avoid infinite recursion
|
||||||
|
|
||||||
|
def := obj.Default() // get the default
|
||||||
|
res, ok := def.(*ExecRes) // put in the right format
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("could not convert to ExecRes")
|
||||||
|
}
|
||||||
|
raw := rawRes(*res) // convert; the defaults go here
|
||||||
|
|
||||||
|
if err := unmarshal(&raw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*obj = ExecRes(raw) // restore from indirection with type conversion!
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -27,12 +27,13 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
"os/user"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"syscall"
|
||||||
|
|
||||||
"github.com/purpleidea/mgmt/event"
|
|
||||||
"github.com/purpleidea/mgmt/recwatch"
|
"github.com/purpleidea/mgmt/recwatch"
|
||||||
"github.com/purpleidea/mgmt/util"
|
"github.com/purpleidea/mgmt/util"
|
||||||
|
|
||||||
@@ -52,6 +53,9 @@ type FileRes struct {
|
|||||||
Content *string `yaml:"content"` // nil to mark as undefined
|
Content *string `yaml:"content"` // nil to mark as undefined
|
||||||
Source string `yaml:"source"` // file path for source content
|
Source string `yaml:"source"` // file path for source content
|
||||||
State string `yaml:"state"` // state: exists/present?, absent, (undefined?)
|
State string `yaml:"state"` // state: exists/present?, absent, (undefined?)
|
||||||
|
Owner string `yaml:"owner"`
|
||||||
|
Group string `yaml:"group"`
|
||||||
|
Mode string `yaml:"mode"`
|
||||||
Recurse bool `yaml:"recurse"`
|
Recurse bool `yaml:"recurse"`
|
||||||
Force bool `yaml:"force"`
|
Force bool `yaml:"force"`
|
||||||
path string // computed path
|
path string // computed path
|
||||||
@@ -60,44 +64,105 @@ type FileRes struct {
|
|||||||
recWatcher *recwatch.RecWatcher
|
recWatcher *recwatch.RecWatcher
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFileRes is a constructor for this resource. It also calls Init() for you.
|
// Default returns some sensible defaults for this resource.
|
||||||
func NewFileRes(name, path, dirname, basename string, content *string, source, state string, recurse, force bool) (*FileRes, error) {
|
func (obj *FileRes) Default() Res {
|
||||||
obj := &FileRes{
|
return &FileRes{
|
||||||
BaseRes: BaseRes{
|
BaseRes: BaseRes{
|
||||||
Name: name,
|
MetaParams: DefaultMetaParams, // force a default
|
||||||
},
|
},
|
||||||
Path: path,
|
State: "exists",
|
||||||
Dirname: dirname,
|
|
||||||
Basename: basename,
|
|
||||||
Content: content,
|
|
||||||
Source: source,
|
|
||||||
State: state,
|
|
||||||
Recurse: recurse,
|
|
||||||
Force: force,
|
|
||||||
}
|
}
|
||||||
return obj, obj.Init()
|
}
|
||||||
|
|
||||||
|
// Validate reports any problems with the struct definition.
|
||||||
|
func (obj *FileRes) Validate() error {
|
||||||
|
if obj.Dirname != "" && !strings.HasSuffix(obj.Dirname, "/") {
|
||||||
|
return fmt.Errorf("dirname must end with a slash")
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(obj.Basename, "/") {
|
||||||
|
return fmt.Errorf("basename must not start with a slash")
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.Content != nil && obj.Source != "" {
|
||||||
|
return fmt.Errorf("can't specify both Content and Source")
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.isDir && obj.Content != nil { // makes no sense
|
||||||
|
return fmt.Errorf("can't specify Content when creating a Dir")
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.Mode != "" {
|
||||||
|
if _, err := obj.mode(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := obj.uid(); obj.Owner != "" && err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := obj.gid(); obj.Group != "" && err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX: should this specify that we create an empty directory instead?
|
||||||
|
//if obj.Source == "" && obj.isDir {
|
||||||
|
// return fmt.Errorf("Can't specify an empty source when creating a Dir.")
|
||||||
|
//}
|
||||||
|
|
||||||
|
return obj.BaseRes.Validate()
|
||||||
|
}
|
||||||
|
|
||||||
|
// mode returns the file permission specified on the graph. It doesn't handle
|
||||||
|
// the case where the mode is not specified. The caller should check obj.Mode is
|
||||||
|
// not empty.
|
||||||
|
func (obj *FileRes) mode() (os.FileMode, error) {
|
||||||
|
m, err := strconv.ParseInt(obj.Mode, 8, 32)
|
||||||
|
if err != nil {
|
||||||
|
return os.FileMode(0), errwrap.Wrapf(err, "Mode should be an octal number (%s)", obj.Mode)
|
||||||
|
}
|
||||||
|
return os.FileMode(m), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// uid returns the user id for the owner specified in the yaml file graph.
|
||||||
|
// Caller should first check obj.Owner is not empty
|
||||||
|
func (obj *FileRes) uid() (int, error) {
|
||||||
|
u2, err2 := user.LookupId(obj.Owner)
|
||||||
|
if err2 == nil {
|
||||||
|
return strconv.Atoi(u2.Uid)
|
||||||
|
}
|
||||||
|
|
||||||
|
u, err := user.Lookup(obj.Owner)
|
||||||
|
if err == nil {
|
||||||
|
return strconv.Atoi(u.Uid)
|
||||||
|
}
|
||||||
|
|
||||||
|
return -1, errwrap.Wrapf(err, "owner lookup error (%s)", obj.Owner)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init runs some startup code for this resource.
|
// Init runs some startup code for this resource.
|
||||||
func (obj *FileRes) Init() error {
|
func (obj *FileRes) Init() error {
|
||||||
obj.sha256sum = ""
|
obj.sha256sum = ""
|
||||||
if obj.Path == "" { // use the name as the path default if missing
|
|
||||||
obj.Path = obj.BaseRes.Name
|
|
||||||
}
|
|
||||||
obj.path = obj.GetPath() // compute once
|
obj.path = obj.GetPath() // compute once
|
||||||
obj.isDir = strings.HasSuffix(obj.path, "/") // dirs have trailing slashes
|
obj.isDir = strings.HasSuffix(obj.path, "/") // dirs have trailing slashes
|
||||||
|
|
||||||
obj.BaseRes.kind = "File"
|
obj.BaseRes.kind = "file"
|
||||||
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPath returns the actual path to use for this resource. It computes this
|
// GetPath returns the actual path to use for this resource. It computes this
|
||||||
// after analysis of the Path, Dirname and Basename values. Dirs end with slash.
|
// after analysis of the Path, Dirname and Basename values. Dirs end with slash.
|
||||||
func (obj *FileRes) GetPath() string {
|
func (obj *FileRes) GetPath() string {
|
||||||
d := util.Dirname(obj.Path)
|
p := obj.Path
|
||||||
b := util.Basename(obj.Path)
|
if obj.Path == "" { // use the name as the path default if missing
|
||||||
|
p = obj.BaseRes.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
d := util.Dirname(p)
|
||||||
|
b := util.Basename(p)
|
||||||
if obj.Dirname == "" && obj.Basename == "" {
|
if obj.Dirname == "" && obj.Basename == "" {
|
||||||
return obj.Path
|
return p
|
||||||
}
|
}
|
||||||
if obj.Dirname == "" {
|
if obj.Dirname == "" {
|
||||||
return d + obj.Basename
|
return d + obj.Basename
|
||||||
@@ -109,80 +174,40 @@ func (obj *FileRes) GetPath() string {
|
|||||||
return obj.Dirname + obj.Basename
|
return obj.Dirname + obj.Basename
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate reports any problems with the struct definition.
|
|
||||||
func (obj *FileRes) Validate() error {
|
|
||||||
if obj.Dirname != "" && !strings.HasSuffix(obj.Dirname, "/") {
|
|
||||||
return fmt.Errorf("Dirname must end with a slash.")
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(obj.Basename, "/") {
|
|
||||||
return fmt.Errorf("Basename must not start with a slash.")
|
|
||||||
}
|
|
||||||
|
|
||||||
if obj.Content != nil && obj.Source != "" {
|
|
||||||
return fmt.Errorf("Can't specify both Content and Source.")
|
|
||||||
}
|
|
||||||
|
|
||||||
if obj.isDir && obj.Content != nil { // makes no sense
|
|
||||||
return fmt.Errorf("Can't specify Content when creating a Dir.")
|
|
||||||
}
|
|
||||||
|
|
||||||
// XXX: should this specify that we create an empty directory instead?
|
|
||||||
//if obj.Source == "" && obj.isDir {
|
|
||||||
// return fmt.Errorf("Can't specify an empty source when creating a Dir.")
|
|
||||||
//}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Watch is the primary listener for this resource and it outputs events.
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
// This one is a file watcher for files and directories.
|
// This one is a file watcher for files and directories.
|
||||||
// Modify with caution, it is probably important to write some test cases first!
|
// Modify with caution, it is probably important to write some test cases first!
|
||||||
// If the Watch returns an error, it means that something has gone wrong, and it
|
// If the Watch returns an error, it means that something has gone wrong, and it
|
||||||
// must be restarted. On a clean exit it returns nil.
|
// must be restarted. On a clean exit it returns nil.
|
||||||
// FIXME: Also watch the source directory when using obj.Source !!!
|
// FIXME: Also watch the source directory when using obj.Source !!!
|
||||||
func (obj *FileRes) Watch(processChan chan event.Event) error {
|
func (obj *FileRes) Watch() error {
|
||||||
if obj.IsWatching() {
|
|
||||||
return nil // TODO: should this be an error?
|
|
||||||
}
|
|
||||||
obj.SetWatching(true)
|
|
||||||
defer obj.SetWatching(false)
|
|
||||||
cuid := obj.converger.Register()
|
|
||||||
defer cuid.Unregister()
|
|
||||||
|
|
||||||
var startup bool
|
|
||||||
Startup := func(block bool) <-chan time.Time {
|
|
||||||
if block {
|
|
||||||
return nil // blocks forever
|
|
||||||
//return make(chan time.Time) // blocks forever
|
|
||||||
}
|
|
||||||
return time.After(time.Duration(500) * time.Millisecond) // 1/2 the resolution of converged timeout
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
obj.recWatcher, err = recwatch.NewRecWatcher(obj.Path, obj.Recurse)
|
obj.recWatcher, err = recwatch.NewRecWatcher(obj.path, obj.Recurse)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer obj.recWatcher.Close()
|
defer obj.recWatcher.Close()
|
||||||
|
|
||||||
|
// notify engine that we're running
|
||||||
|
if err := obj.Running(); err != nil {
|
||||||
|
return err // bubble up a NACK...
|
||||||
|
}
|
||||||
|
|
||||||
var send = false // send event?
|
var send = false // send event?
|
||||||
var exit = false
|
var exit *error
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if obj.debug {
|
if obj.debug {
|
||||||
log.Printf("%s[%s]: Watching: %s", obj.Kind(), obj.GetName(), obj.Path) // attempting to watch...
|
log.Printf("%s[%s]: Watching: %s", obj.Kind(), obj.GetName(), obj.path) // attempting to watch...
|
||||||
}
|
}
|
||||||
|
|
||||||
obj.SetState(ResStateWatching) // reset
|
|
||||||
select {
|
select {
|
||||||
case event, ok := <-obj.recWatcher.Events():
|
case event, ok := <-obj.recWatcher.Events():
|
||||||
if !ok { // channel shutdown
|
if !ok { // channel shutdown
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
cuid.SetConverged(false)
|
|
||||||
if err := event.Error; err != nil {
|
if err := event.Error; err != nil {
|
||||||
return errwrap.Wrapf(err, "Unknown %s[%s] watcher error", obj.Kind(), obj.GetName())
|
return errwrap.Wrapf(err, "unknown %s[%s] watcher error", obj.Kind(), obj.GetName())
|
||||||
}
|
}
|
||||||
if obj.debug { // don't access event.Body if event.Error isn't nil
|
if obj.debug { // don't access event.Body if event.Error isn't nil
|
||||||
log.Printf("%s[%s]: Event(%s): %v", obj.Kind(), obj.GetName(), event.Body.Name, event.Body.Op)
|
log.Printf("%s[%s]: Event(%s): %v", obj.Kind(), obj.GetName(), event.Body.Name, event.Body.Op)
|
||||||
@@ -191,29 +216,16 @@ func (obj *FileRes) Watch(processChan chan event.Event) error {
|
|||||||
obj.StateOK(false) // dirty
|
obj.StateOK(false) // dirty
|
||||||
|
|
||||||
case event := <-obj.Events():
|
case event := <-obj.Events():
|
||||||
cuid.SetConverged(false)
|
if exit, send = obj.ReadEvent(event); exit != nil {
|
||||||
if exit, send = obj.ReadEvent(&event); exit {
|
return *exit // exit
|
||||||
return nil // exit
|
|
||||||
}
|
}
|
||||||
//obj.StateOK(false) // dirty // these events don't invalidate state
|
//obj.StateOK(false) // dirty // these events don't invalidate state
|
||||||
|
|
||||||
case <-cuid.ConvergedTimer():
|
|
||||||
cuid.SetConverged(true) // converged!
|
|
||||||
continue
|
|
||||||
|
|
||||||
case <-Startup(startup):
|
|
||||||
cuid.SetConverged(false)
|
|
||||||
send = true
|
|
||||||
obj.StateOK(false) // dirty
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// do all our event sending all together to avoid duplicate msgs
|
// do all our event sending all together to avoid duplicate msgs
|
||||||
if send {
|
if send {
|
||||||
startup = true // startup finished
|
|
||||||
send = false
|
send = false
|
||||||
if exit, err := obj.DoSend(processChan, ""); exit || err != nil {
|
obj.Event()
|
||||||
return err // we exit or bubble up a NACK...
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -239,7 +251,7 @@ type FileInfo struct {
|
|||||||
// ReadDir reads a directory path, and returns a list of enhanced FileInfo's.
|
// ReadDir reads a directory path, and returns a list of enhanced FileInfo's.
|
||||||
func ReadDir(path string) ([]FileInfo, error) {
|
func ReadDir(path string) ([]FileInfo, error) {
|
||||||
if !strings.HasSuffix(path, "/") { // dirs have trailing slashes
|
if !strings.HasSuffix(path, "/") { // dirs have trailing slashes
|
||||||
return nil, fmt.Errorf("Path must be a directory.")
|
return nil, fmt.Errorf("path must be a directory")
|
||||||
}
|
}
|
||||||
output := []FileInfo{} // my file info
|
output := []FileInfo{} // my file info
|
||||||
fileInfos, err := ioutil.ReadDir(path)
|
fileInfos, err := ioutil.ReadDir(path)
|
||||||
@@ -253,7 +265,7 @@ func ReadDir(path string) ([]FileInfo, error) {
|
|||||||
abs := path + smartPath(fi)
|
abs := path + smartPath(fi)
|
||||||
rel, err := filepath.Rel(path, abs) // NOTE: calls Clean()
|
rel, err := filepath.Rel(path, abs) // NOTE: calls Clean()
|
||||||
if err != nil { // shouldn't happen
|
if err != nil { // shouldn't happen
|
||||||
return nil, errwrap.Wrapf(err, "ReadDir: Unhandled error")
|
return nil, errwrap.Wrapf(err, "unhandled error in ReadDir")
|
||||||
}
|
}
|
||||||
if fi.IsDir() {
|
if fi.IsDir() {
|
||||||
rel += "/" // add a trailing slash for dirs
|
rel += "/" // add a trailing slash for dirs
|
||||||
@@ -295,7 +307,7 @@ func (obj *FileRes) fileCheckApply(apply bool, src io.ReadSeeker, dst string, sh
|
|||||||
srcFile, isFile := src.(*os.File)
|
srcFile, isFile := src.(*os.File)
|
||||||
_, isBytes := src.(*bytes.Reader) // supports seeking!
|
_, isBytes := src.(*bytes.Reader) // supports seeking!
|
||||||
if !isFile && !isBytes {
|
if !isFile && !isBytes {
|
||||||
return "", false, fmt.Errorf("Can't open src as either file or buffer!")
|
return "", false, fmt.Errorf("can't open src as either file or buffer")
|
||||||
}
|
}
|
||||||
|
|
||||||
var srcStat os.FileInfo
|
var srcStat os.FileInfo
|
||||||
@@ -307,7 +319,7 @@ func (obj *FileRes) fileCheckApply(apply bool, src io.ReadSeeker, dst string, sh
|
|||||||
}
|
}
|
||||||
// TODO: deal with symlinks
|
// TODO: deal with symlinks
|
||||||
if !srcStat.Mode().IsRegular() { // can't copy non-regular files or dirs
|
if !srcStat.Mode().IsRegular() { // can't copy non-regular files or dirs
|
||||||
return "", false, fmt.Errorf("Non-regular src file: %s (%q)", srcStat.Name(), srcStat.Mode())
|
return "", false, fmt.Errorf("non-regular src file: %s (%q)", srcStat.Name(), srcStat.Mode())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -331,12 +343,12 @@ func (obj *FileRes) fileCheckApply(apply bool, src io.ReadSeeker, dst string, sh
|
|||||||
return "", false, nil
|
return "", false, nil
|
||||||
}
|
}
|
||||||
if !obj.Force {
|
if !obj.Force {
|
||||||
return "", false, fmt.Errorf("Can't force dir into file: %s", dst)
|
return "", false, fmt.Errorf("can't force dir into file: %s", dst)
|
||||||
}
|
}
|
||||||
|
|
||||||
cleanDst := path.Clean(dst)
|
cleanDst := path.Clean(dst)
|
||||||
if cleanDst == "" || cleanDst == "/" {
|
if cleanDst == "" || cleanDst == "/" {
|
||||||
return "", false, fmt.Errorf("Don't want to remove root!") // safety
|
return "", false, fmt.Errorf("don't want to remove root") // safety
|
||||||
}
|
}
|
||||||
// FIXME: respect obj.Recurse here...
|
// FIXME: respect obj.Recurse here...
|
||||||
// there is a dir here, where we want a file...
|
// there is a dir here, where we want a file...
|
||||||
@@ -348,7 +360,7 @@ func (obj *FileRes) fileCheckApply(apply bool, src io.ReadSeeker, dst string, sh
|
|||||||
|
|
||||||
} else if err == nil {
|
} else if err == nil {
|
||||||
if !dstStat.Mode().IsRegular() {
|
if !dstStat.Mode().IsRegular() {
|
||||||
return "", false, fmt.Errorf("Non-regular dst file: %s (%q)", dstStat.Name(), dstStat.Mode())
|
return "", false, fmt.Errorf("non-regular dst file: %s (%q)", dstStat.Name(), dstStat.Mode())
|
||||||
}
|
}
|
||||||
if isFile && os.SameFile(srcStat, dstStat) { // same inode, we're done!
|
if isFile && os.SameFile(srcStat, dstStat) { // same inode, we're done!
|
||||||
return "", true, nil
|
return "", true, nil
|
||||||
@@ -417,6 +429,54 @@ func (obj *FileRes) fileCheckApply(apply bool, src io.ReadSeeker, dst string, sh
|
|||||||
return sha256sum, false, dstFile.Sync()
|
return sha256sum, false, dstFile.Sync()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// dirCheckApply is the CheckApply operation for an empty directory.
|
||||||
|
func (obj *FileRes) dirCheckApply(apply bool) (bool, error) {
|
||||||
|
// check if the path exists and is a directory
|
||||||
|
st, err := os.Stat(obj.path)
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return false, errwrap.Wrapf(err, "error checking file resource existence")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil && st.IsDir() {
|
||||||
|
return true, nil // already a directory, nothing to do
|
||||||
|
}
|
||||||
|
if err == nil && !st.IsDir() && !obj.Force {
|
||||||
|
return false, fmt.Errorf("can't force file into dir: %s", obj.path)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !apply {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// the path exists and is not a directory
|
||||||
|
// delete the file if force is given
|
||||||
|
if err == nil && !st.IsDir() {
|
||||||
|
log.Printf("dirCheckApply: Removing (force): %s", obj.path)
|
||||||
|
if err := os.Remove(obj.path); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// create the empty directory
|
||||||
|
var mode os.FileMode
|
||||||
|
if obj.Mode != "" {
|
||||||
|
mode, err = obj.mode()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
mode = os.ModePerm
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.Force {
|
||||||
|
// FIXME: respect obj.Recurse here...
|
||||||
|
// TODO: add recurse limit here
|
||||||
|
return false, os.MkdirAll(obj.path, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, os.Mkdir(obj.path, mode)
|
||||||
|
}
|
||||||
|
|
||||||
// syncCheckApply is the CheckApply operation for a source and destination dir.
|
// syncCheckApply is the CheckApply operation for a source and destination dir.
|
||||||
// It is recursive and can create directories directly, and files via the usual
|
// It is recursive and can create directories directly, and files via the usual
|
||||||
// fileCheckApply method. It returns checkOK and error as is normally expected.
|
// fileCheckApply method. It returns checkOK and error as is normally expected.
|
||||||
@@ -425,7 +485,7 @@ func (obj *FileRes) syncCheckApply(apply bool, src, dst string) (bool, error) {
|
|||||||
log.Printf("syncCheckApply: %s -> %s", src, dst)
|
log.Printf("syncCheckApply: %s -> %s", src, dst)
|
||||||
}
|
}
|
||||||
if src == "" || dst == "" {
|
if src == "" || dst == "" {
|
||||||
return false, fmt.Errorf("The src and dst must not be empty!")
|
return false, fmt.Errorf("the src and dst must not be empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
var checkOK = true
|
var checkOK = true
|
||||||
@@ -435,7 +495,7 @@ func (obj *FileRes) syncCheckApply(apply bool, src, dst string) (bool, error) {
|
|||||||
dstIsDir := strings.HasSuffix(dst, "/")
|
dstIsDir := strings.HasSuffix(dst, "/")
|
||||||
|
|
||||||
if srcIsDir != dstIsDir {
|
if srcIsDir != dstIsDir {
|
||||||
return false, fmt.Errorf("The src and dst must be both either files or directories.")
|
return false, fmt.Errorf("the src and dst must be both either files or directories")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !srcIsDir && !dstIsDir {
|
if !srcIsDir && !dstIsDir {
|
||||||
@@ -488,10 +548,10 @@ func (obj *FileRes) syncCheckApply(apply bool, src, dst string) (bool, error) {
|
|||||||
if _, ok := smartDst[relPathFile]; ok {
|
if _, ok := smartDst[relPathFile]; ok {
|
||||||
absCleanDst := path.Clean(absDst)
|
absCleanDst := path.Clean(absDst)
|
||||||
if !obj.Force {
|
if !obj.Force {
|
||||||
return false, fmt.Errorf("Can't force file into dir: %s", absCleanDst)
|
return false, fmt.Errorf("can't force file into dir: %s", absCleanDst)
|
||||||
}
|
}
|
||||||
if absCleanDst == "" || absCleanDst == "/" {
|
if absCleanDst == "" || absCleanDst == "/" {
|
||||||
return false, fmt.Errorf("Don't want to remove root!") // safety
|
return false, fmt.Errorf("don't want to remove root") // safety
|
||||||
}
|
}
|
||||||
log.Printf("syncCheckApply: Removing (force): %s", absCleanDst)
|
log.Printf("syncCheckApply: Removing (force): %s", absCleanDst)
|
||||||
if err := os.Remove(absCleanDst); err != nil {
|
if err := os.Remove(absCleanDst); err != nil {
|
||||||
@@ -536,7 +596,7 @@ func (obj *FileRes) syncCheckApply(apply bool, src, dst string) (bool, error) {
|
|||||||
absDst := fileInfo.AbsPath // absolute path (should get removed)
|
absDst := fileInfo.AbsPath // absolute path (should get removed)
|
||||||
absCleanDst := path.Clean(absDst)
|
absCleanDst := path.Clean(absDst)
|
||||||
if absCleanDst == "" || absCleanDst == "/" {
|
if absCleanDst == "" || absCleanDst == "/" {
|
||||||
return false, fmt.Errorf("Don't want to remove root!") // safety
|
return false, fmt.Errorf("don't want to remove root") // safety
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: respect obj.Recurse here...
|
// FIXME: respect obj.Recurse here...
|
||||||
@@ -594,7 +654,7 @@ func (obj *FileRes) contentCheckApply(apply bool) (checkOK bool, _ error) {
|
|||||||
|
|
||||||
// apply portion
|
// apply portion
|
||||||
if obj.path == "" || obj.path == "/" {
|
if obj.path == "" || obj.path == "/" {
|
||||||
return false, fmt.Errorf("Don't want to remove root!") // safety
|
return false, fmt.Errorf("don't want to remove root") // safety
|
||||||
}
|
}
|
||||||
log.Printf("contentCheckApply: Removing: %s", obj.path)
|
log.Printf("contentCheckApply: Removing: %s", obj.path)
|
||||||
// FIXME: respect obj.Recurse here...
|
// FIXME: respect obj.Recurse here...
|
||||||
@@ -603,16 +663,16 @@ func (obj *FileRes) contentCheckApply(apply bool) (checkOK bool, _ error) {
|
|||||||
return false, err // either nil or not
|
return false, err // either nil or not
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if obj.isDir && obj.Source == "" {
|
||||||
|
return obj.dirCheckApply(apply)
|
||||||
|
}
|
||||||
|
|
||||||
// content is not defined, leave it alone...
|
// content is not defined, leave it alone...
|
||||||
if obj.Content == nil {
|
if obj.Content == nil {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if obj.Source == "" { // do the obj.Content checks first...
|
if obj.Source == "" { // do the obj.Content checks first...
|
||||||
if obj.isDir { // TODO: should we create an empty dir this way?
|
|
||||||
log.Fatal("XXX: Not implemented!") // XXX
|
|
||||||
}
|
|
||||||
|
|
||||||
bufferSrc := bytes.NewReader([]byte(*obj.Content))
|
bufferSrc := bytes.NewReader([]byte(*obj.Content))
|
||||||
sha256sum, checkOK, err := obj.fileCheckApply(apply, bufferSrc, obj.path, obj.sha256sum)
|
sha256sum, checkOK, err := obj.fileCheckApply(apply, bufferSrc, obj.path, obj.sha256sum)
|
||||||
if sha256sum != "" { // empty values mean errored or didn't hash
|
if sha256sum != "" { // empty values mean errored or didn't hash
|
||||||
@@ -635,6 +695,113 @@ func (obj *FileRes) contentCheckApply(apply bool) (checkOK bool, _ error) {
|
|||||||
return checkOK, nil
|
return checkOK, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// chmodCheckApply performs a CheckApply for the file permissions.
|
||||||
|
func (obj *FileRes) chmodCheckApply(apply bool) (checkOK bool, _ error) {
|
||||||
|
log.Printf("%s[%s]: chmodCheckApply(%t)", obj.Kind(), obj.GetName(), apply)
|
||||||
|
|
||||||
|
if obj.State == "absent" {
|
||||||
|
// File is absent
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.Mode == "" {
|
||||||
|
// No mode specified, everything is ok
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
mode, err := obj.mode()
|
||||||
|
|
||||||
|
// If the file does not exist and we are in
|
||||||
|
// noop mode, do not throw an error.
|
||||||
|
if os.IsNotExist(err) && !apply {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
st, err := os.Stat(obj.path)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Nothing to do
|
||||||
|
if st.Mode() == mode {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not clean but don't apply
|
||||||
|
if !apply {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err = os.Chmod(obj.path, mode)
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// chownCheckApply performs a CheckApply for the file ownership.
|
||||||
|
func (obj *FileRes) chownCheckApply(apply bool) (checkOK bool, _ error) {
|
||||||
|
var expectedUID, expectedGID int
|
||||||
|
log.Printf("%s[%s]: chownCheckApply(%t)", obj.Kind(), obj.GetName(), apply)
|
||||||
|
|
||||||
|
if obj.State == "absent" {
|
||||||
|
// File is absent or no owner specified
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
st, err := os.Stat(obj.path)
|
||||||
|
|
||||||
|
// If the file does not exist and we are in
|
||||||
|
// noop mode, do not throw an error.
|
||||||
|
if os.IsNotExist(err) && !apply {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
stUnix, ok := st.Sys().(*syscall.Stat_t)
|
||||||
|
if !ok {
|
||||||
|
// Not unix
|
||||||
|
panic("No support for your platform")
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.Owner != "" {
|
||||||
|
expectedUID, err = obj.uid()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Nothing specified, no changes to be made, expect same as actual
|
||||||
|
expectedUID = int(stUnix.Uid)
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.Group != "" {
|
||||||
|
expectedGID, err = obj.gid()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Nothing specified, no changes to be made, expect same as actual
|
||||||
|
expectedGID = int(stUnix.Gid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Nothing to do
|
||||||
|
if int(stUnix.Uid) == expectedUID && int(stUnix.Gid) == expectedGID {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not clean, but don't apply
|
||||||
|
if !apply {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err = os.Chown(obj.path, expectedUID, expectedGID)
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
// CheckApply checks the resource state and applies the resource if the bool
|
// CheckApply checks the resource state and applies the resource if the bool
|
||||||
// input is true. It returns error info and if the state check passed or not.
|
// input is true. It returns error info and if the state check passed or not.
|
||||||
func (obj *FileRes) CheckApply(apply bool) (checkOK bool, _ error) {
|
func (obj *FileRes) CheckApply(apply bool) (checkOK bool, _ error) {
|
||||||
@@ -659,19 +826,17 @@ func (obj *FileRes) CheckApply(apply bool) (checkOK bool, _ error) {
|
|||||||
checkOK = false
|
checkOK = false
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO
|
if c, err := obj.chmodCheckApply(apply); err != nil {
|
||||||
//if c, err := obj.chmodCheckApply(apply); err != nil {
|
return false, err
|
||||||
// return false, err
|
} else if !c {
|
||||||
//} else if !c {
|
checkOK = false
|
||||||
// checkOK = false
|
}
|
||||||
//}
|
|
||||||
|
|
||||||
// TODO
|
if c, err := obj.chownCheckApply(apply); err != nil {
|
||||||
//if c, err := obj.chownCheckApply(apply); err != nil {
|
return false, err
|
||||||
// return false, err
|
} else if !c {
|
||||||
//} else if !c {
|
checkOK = false
|
||||||
// checkOK = false
|
}
|
||||||
//}
|
|
||||||
|
|
||||||
return checkOK, nil // w00t
|
return checkOK, nil // w00t
|
||||||
}
|
}
|
||||||
@@ -754,9 +919,9 @@ func (obj *FileRes) AutoEdges() AutoEdge {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUIDs includes all params to make a unique identification of this object.
|
// UIDs includes all params to make a unique identification of this object.
|
||||||
// Most resources only return one, although some resources can return multiple.
|
// Most resources only return one, although some resources can return multiple.
|
||||||
func (obj *FileRes) GetUIDs() []ResUID {
|
func (obj *FileRes) UIDs() []ResUID {
|
||||||
x := &FileUID{
|
x := &FileUID{
|
||||||
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
||||||
path: obj.path,
|
path: obj.path,
|
||||||
@@ -787,7 +952,7 @@ func (obj *FileRes) Compare(res Res) bool {
|
|||||||
if obj.Name != res.Name {
|
if obj.Name != res.Name {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if obj.path != res.Path {
|
if obj.path != res.path {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if (obj.Content == nil) != (res.Content == nil) { // xor
|
if (obj.Content == nil) != (res.Content == nil) { // xor
|
||||||
@@ -821,3 +986,23 @@ func (obj *FileRes) CollectPattern(pattern string) {
|
|||||||
// XXX: currently the pattern for files can only override the Dirname variable :P
|
// XXX: currently the pattern for files can only override the Dirname variable :P
|
||||||
obj.Dirname = pattern // XXX: simplistic for now
|
obj.Dirname = pattern // XXX: simplistic for now
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML is the custom unmarshal handler for this struct.
|
||||||
|
// It is primarily useful for setting the defaults.
|
||||||
|
func (obj *FileRes) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
type rawRes FileRes // indirection to avoid infinite recursion
|
||||||
|
|
||||||
|
def := obj.Default() // get the default
|
||||||
|
res, ok := def.(*FileRes) // put in the right format
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("could not convert to FileRes")
|
||||||
|
}
|
||||||
|
raw := rawRes(*res) // convert; the defaults go here
|
||||||
|
|
||||||
|
if err := unmarshal(&raw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*obj = FileRes(raw) // restore from indirection with type conversion!
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
43
resources/file_attrs.go
Normal file
43
resources/file_attrs.go
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// +build go1.7
|
||||||
|
|
||||||
|
package resources
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os/user"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
errwrap "github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// gid returns the group id for the group specified in the yaml file graph.
|
||||||
|
// Caller should first check obj.Group is not empty
|
||||||
|
func (obj *FileRes) gid() (int, error) {
|
||||||
|
g2, err2 := user.LookupGroupId(obj.Group)
|
||||||
|
if err2 == nil {
|
||||||
|
return strconv.Atoi(g2.Gid)
|
||||||
|
}
|
||||||
|
|
||||||
|
g, err := user.LookupGroup(obj.Group)
|
||||||
|
if err == nil {
|
||||||
|
return strconv.Atoi(g.Gid)
|
||||||
|
}
|
||||||
|
|
||||||
|
return -1, errwrap.Wrapf(err, "Group lookup error (%s)", obj.Group)
|
||||||
|
}
|
||||||
43
resources/file_attrs_go1.6.go
Normal file
43
resources/file_attrs_go1.6.go
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// +build !go1.7
|
||||||
|
|
||||||
|
package resources
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
group "github.com/hnakamur/group"
|
||||||
|
errwrap "github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// gid returns the group id for the group specified in the yaml file graph.
|
||||||
|
// Caller should first check obj.Group is not empty
|
||||||
|
func (obj *FileRes) gid() (int, error) {
|
||||||
|
g2, err2 := group.LookupId(obj.Group)
|
||||||
|
if err2 == nil {
|
||||||
|
return strconv.Atoi(g2.Gid)
|
||||||
|
}
|
||||||
|
|
||||||
|
g, err := group.Lookup(obj.Group)
|
||||||
|
if err == nil {
|
||||||
|
return strconv.Atoi(g.Gid)
|
||||||
|
}
|
||||||
|
|
||||||
|
return -1, errwrap.Wrapf(err, "Group lookup error (%s)", obj.Group)
|
||||||
|
}
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -22,9 +22,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/purpleidea/mgmt/event"
|
|
||||||
"github.com/purpleidea/mgmt/util"
|
"github.com/purpleidea/mgmt/util"
|
||||||
|
|
||||||
"github.com/godbus/dbus"
|
"github.com/godbus/dbus"
|
||||||
@@ -70,22 +68,26 @@ type HostnameRes struct {
|
|||||||
conn *dbus.Conn
|
conn *dbus.Conn
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewHostnameRes is a constructor for this resource. It also calls Init() for you.
|
// Default returns some sensible defaults for this resource.
|
||||||
func NewHostnameRes(name, staticHostname, transientHostname, prettyHostname string) (*HostnameRes, error) {
|
func (obj *HostnameRes) Default() Res {
|
||||||
obj := &HostnameRes{
|
return &HostnameRes{
|
||||||
BaseRes: BaseRes{
|
BaseRes: BaseRes{
|
||||||
Name: name,
|
MetaParams: DefaultMetaParams, // force a default
|
||||||
},
|
},
|
||||||
PrettyHostname: prettyHostname,
|
|
||||||
StaticHostname: staticHostname,
|
|
||||||
TransientHostname: transientHostname,
|
|
||||||
}
|
}
|
||||||
return obj, obj.Init()
|
}
|
||||||
|
|
||||||
|
// Validate if the params passed in are valid data.
|
||||||
|
func (obj *HostnameRes) Validate() error {
|
||||||
|
if obj.PrettyHostname == "" && obj.StaticHostname == "" && obj.TransientHostname == "" {
|
||||||
|
return ErrResourceInsufficientParameters
|
||||||
|
}
|
||||||
|
return obj.BaseRes.Validate()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init runs some startup code for this resource.
|
// Init runs some startup code for this resource.
|
||||||
func (obj *HostnameRes) Init() error {
|
func (obj *HostnameRes) Init() error {
|
||||||
obj.BaseRes.kind = "Hostname"
|
obj.BaseRes.kind = "hostname"
|
||||||
if obj.PrettyHostname == "" {
|
if obj.PrettyHostname == "" {
|
||||||
obj.PrettyHostname = obj.Hostname
|
obj.PrettyHostname = obj.Hostname
|
||||||
}
|
}
|
||||||
@@ -98,34 +100,8 @@ func (obj *HostnameRes) Init() error {
|
|||||||
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate if the params passed in are valid data.
|
|
||||||
// FIXME: where should this get called ?
|
|
||||||
func (obj *HostnameRes) Validate() error {
|
|
||||||
if obj.PrettyHostname == "" && obj.StaticHostname == "" && obj.TransientHostname == "" {
|
|
||||||
return ErrResourceInsufficientParameters
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Watch is the primary listener for this resource and it outputs events.
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
func (obj *HostnameRes) Watch(processChan chan event.Event) error {
|
func (obj *HostnameRes) Watch() error {
|
||||||
if obj.IsWatching() {
|
|
||||||
return nil // TODO: should this be an error?
|
|
||||||
}
|
|
||||||
obj.SetWatching(true)
|
|
||||||
defer obj.SetWatching(false)
|
|
||||||
cuid := obj.converger.Register()
|
|
||||||
defer cuid.Unregister()
|
|
||||||
|
|
||||||
var startup bool
|
|
||||||
Startup := func(block bool) <-chan time.Time {
|
|
||||||
if block {
|
|
||||||
return nil // blocks forever
|
|
||||||
//return make(chan time.Time) // blocks forever
|
|
||||||
}
|
|
||||||
return time.After(time.Duration(500) * time.Millisecond) // 1/2 the resolution of converged timeout
|
|
||||||
}
|
|
||||||
|
|
||||||
// if we share the bus with others, we will get each others messages!!
|
// if we share the bus with others, we will get each others messages!!
|
||||||
bus, err := util.SystemBusPrivateUsable() // don't share the bus connection!
|
bus, err := util.SystemBusPrivateUsable() // don't share the bus connection!
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -142,42 +118,32 @@ func (obj *HostnameRes) Watch(processChan chan event.Event) error {
|
|||||||
signals := make(chan *dbus.Signal, 10) // closed by dbus package
|
signals := make(chan *dbus.Signal, 10) // closed by dbus package
|
||||||
bus.Signal(signals)
|
bus.Signal(signals)
|
||||||
|
|
||||||
|
// notify engine that we're running
|
||||||
|
if err := obj.Running(); err != nil {
|
||||||
|
return err // bubble up a NACK...
|
||||||
|
}
|
||||||
|
|
||||||
var send = false // send event?
|
var send = false // send event?
|
||||||
|
|
||||||
for {
|
for {
|
||||||
obj.SetState(ResStateWatching) // reset
|
|
||||||
select {
|
select {
|
||||||
case <-signals:
|
case <-signals:
|
||||||
cuid.SetConverged(false)
|
|
||||||
send = true
|
send = true
|
||||||
obj.StateOK(false) // dirty
|
obj.StateOK(false) // dirty
|
||||||
|
|
||||||
case event := <-obj.Events():
|
case event := <-obj.Events():
|
||||||
cuid.SetConverged(false)
|
|
||||||
// we avoid sending events on unpause
|
// we avoid sending events on unpause
|
||||||
if exit, _ := obj.ReadEvent(&event); exit {
|
if exit, _ := obj.ReadEvent(event); exit != nil {
|
||||||
return nil // exit
|
return *exit // exit
|
||||||
}
|
}
|
||||||
send = true
|
send = true
|
||||||
obj.StateOK(false) // dirty
|
obj.StateOK(false) // dirty
|
||||||
|
|
||||||
case <-cuid.ConvergedTimer():
|
|
||||||
cuid.SetConverged(true) // converged!
|
|
||||||
continue
|
|
||||||
|
|
||||||
case <-Startup(startup):
|
|
||||||
cuid.SetConverged(false)
|
|
||||||
send = true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// do all our event sending all together to avoid duplicate msgs
|
// do all our event sending all together to avoid duplicate msgs
|
||||||
if send {
|
if send {
|
||||||
startup = true // startup finished
|
|
||||||
send = false
|
send = false
|
||||||
|
obj.Event()
|
||||||
if exit, err := obj.DoSend(processChan, ""); exit || err != nil {
|
|
||||||
return err // we exit or bubble up a NACK...
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -266,9 +232,9 @@ func (obj *HostnameRes) AutoEdges() AutoEdge {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUIDs includes all params to make a unique identification of this object.
|
// UIDs includes all params to make a unique identification of this object.
|
||||||
// Most resources only return one, although some resources can return multiple.
|
// Most resources only return one, although some resources can return multiple.
|
||||||
func (obj *HostnameRes) GetUIDs() []ResUID {
|
func (obj *HostnameRes) UIDs() []ResUID {
|
||||||
x := &HostnameUID{
|
x := &HostnameUID{
|
||||||
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
||||||
name: obj.Name,
|
name: obj.Name,
|
||||||
@@ -309,3 +275,23 @@ func (obj *HostnameRes) Compare(res Res) bool {
|
|||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML is the custom unmarshal handler for this struct.
|
||||||
|
// It is primarily useful for setting the defaults.
|
||||||
|
func (obj *HostnameRes) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
type rawRes HostnameRes // indirection to avoid infinite recursion
|
||||||
|
|
||||||
|
def := obj.Default() // get the default
|
||||||
|
res, ok := def.(*HostnameRes) // put in the right format
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("could not convert to HostnameRes")
|
||||||
|
}
|
||||||
|
raw := rawRes(*res) // convert; the defaults go here
|
||||||
|
|
||||||
|
if err := unmarshal(&raw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*obj = HostnameRes(raw) // restore from indirection with type conversion!
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
304
resources/kv.go
Normal file
304
resources/kv.go
Normal file
@@ -0,0 +1,304 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package resources
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/gob"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
errwrap "github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
gob.Register(&KVRes{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// KVResSkipCmpStyle represents the different styles of comparison when using SkipLessThan.
|
||||||
|
type KVResSkipCmpStyle int
|
||||||
|
|
||||||
|
// These are the different allowed comparison styles. Most folks will want SkipCmpStyleInt.
|
||||||
|
const (
|
||||||
|
SkipCmpStyleInt KVResSkipCmpStyle = iota
|
||||||
|
SkipCmpStyleString
|
||||||
|
)
|
||||||
|
|
||||||
|
// KVRes is a resource which writes a key/value pair into cluster wide storage.
|
||||||
|
// It will ensure that the key is set to the requested value. The one exception
|
||||||
|
// is that if you use the SkipLessThan parameter, then it will only replace the
|
||||||
|
// stored value with the requested value if it is greater than that stored one.
|
||||||
|
// This allows the KV resource to be used in fast acting, finite state machines
|
||||||
|
// which have monotonically increasing state values that represent progression.
|
||||||
|
// The one exception is that when this resource receives a refresh signal, then
|
||||||
|
// it will set the value to be the exact one if they are not identical already.
|
||||||
|
type KVRes struct {
|
||||||
|
BaseRes `yaml:",inline"`
|
||||||
|
Key string `yaml:"key"` // key to set
|
||||||
|
Value *string `yaml:"value"` // value to set (nil to delete)
|
||||||
|
SkipLessThan bool `yaml:"skiplessthan"` // skip updates as long as stored value is greater
|
||||||
|
SkipCmpStyle KVResSkipCmpStyle `yaml:"skipcmpstyle"` // how to do the less than cmp
|
||||||
|
// TODO: does it make sense to have different backends here? (eg: local)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default returns some sensible defaults for this resource.
|
||||||
|
func (obj *KVRes) Default() Res {
|
||||||
|
return &KVRes{
|
||||||
|
BaseRes: BaseRes{
|
||||||
|
MetaParams: DefaultMetaParams, // force a default
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate if the params passed in are valid data.
|
||||||
|
// FIXME: This will catch most issues unless data is passed in after Init with
|
||||||
|
// the Send/Recv mechanism. Should the engine re-call Validate after Send/Recv?
|
||||||
|
func (obj *KVRes) Validate() error {
|
||||||
|
if obj.Key == "" {
|
||||||
|
return fmt.Errorf("key must not be empty")
|
||||||
|
}
|
||||||
|
if obj.SkipLessThan {
|
||||||
|
if obj.SkipCmpStyle != SkipCmpStyleInt && obj.SkipCmpStyle != SkipCmpStyleString {
|
||||||
|
return fmt.Errorf("the SkipCmpStyle of %v is invalid", obj.SkipCmpStyle)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v := obj.Value; obj.SkipCmpStyle == SkipCmpStyleInt && v != nil {
|
||||||
|
if _, err := strconv.Atoi(*v); err != nil {
|
||||||
|
return fmt.Errorf("the set value of %v can't convert to int", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return obj.BaseRes.Validate()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init initializes the resource.
|
||||||
|
func (obj *KVRes) Init() error {
|
||||||
|
obj.BaseRes.kind = "kv"
|
||||||
|
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
|
func (obj *KVRes) Watch() error {
|
||||||
|
|
||||||
|
// notify engine that we're running
|
||||||
|
if err := obj.Running(); err != nil {
|
||||||
|
return err // bubble up a NACK...
|
||||||
|
}
|
||||||
|
|
||||||
|
ch := obj.Data().World.StrWatch(obj.Key) // get possible events!
|
||||||
|
|
||||||
|
var send = false // send event?
|
||||||
|
var exit *error
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
// NOTE: this part is very similar to the file resource code
|
||||||
|
case err, ok := <-ch:
|
||||||
|
if !ok { // channel shutdown
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return errwrap.Wrapf(err, "unknown %s[%s] watcher error", obj.Kind(), obj.GetName())
|
||||||
|
}
|
||||||
|
if obj.Data().Debug {
|
||||||
|
log.Printf("%s[%s]: Event!", obj.Kind(), obj.GetName())
|
||||||
|
}
|
||||||
|
send = true
|
||||||
|
obj.StateOK(false) // dirty
|
||||||
|
|
||||||
|
case event := <-obj.Events():
|
||||||
|
// we avoid sending events on unpause
|
||||||
|
if exit, send = obj.ReadEvent(event); exit != nil {
|
||||||
|
return *exit // exit
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// do all our event sending all together to avoid duplicate msgs
|
||||||
|
if send {
|
||||||
|
send = false
|
||||||
|
obj.Event()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// lessThanCheck checks for less than validity.
|
||||||
|
func (obj *KVRes) lessThanCheck(value string) (checkOK bool, err error) {
|
||||||
|
|
||||||
|
v := *obj.Value
|
||||||
|
if value == v { // redundant check for safety
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var refresh = obj.Refresh() // do we have a pending reload to apply?
|
||||||
|
if !obj.SkipLessThan || refresh { // update lessthan on refresh
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch obj.SkipCmpStyle {
|
||||||
|
case SkipCmpStyleInt:
|
||||||
|
intValue, err := strconv.Atoi(value)
|
||||||
|
if err != nil {
|
||||||
|
// NOTE: We don't error here since we're going to write
|
||||||
|
// over the value anyways. It could be from an old run!
|
||||||
|
return false, nil // value is bad (old/corrupt), fix it
|
||||||
|
}
|
||||||
|
if vint, err := strconv.Atoi(v); err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "can't convert %v to int", v)
|
||||||
|
} else if vint < intValue {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case SkipCmpStyleString:
|
||||||
|
if v < value { // weird way to cmp, but valid
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return false, fmt.Errorf("unmatches SkipCmpStyle style %v", obj.SkipCmpStyle)
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckApply method for Password resource. Does nothing, returns happy!
|
||||||
|
func (obj *KVRes) CheckApply(apply bool) (checkOK bool, err error) {
|
||||||
|
log.Printf("%s[%s]: CheckApply(%t)", obj.Kind(), obj.GetName(), apply)
|
||||||
|
|
||||||
|
if val, exists := obj.Recv["Value"]; exists && val.Changed {
|
||||||
|
// if we received on Value, and it changed, wooo, nothing to do.
|
||||||
|
log.Printf("CheckApply: `Value` was updated!")
|
||||||
|
}
|
||||||
|
|
||||||
|
hostname := obj.Data().Hostname // me
|
||||||
|
keyMap, err := obj.Data().World.StrGet(obj.Key)
|
||||||
|
if err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "check error during StrGet")
|
||||||
|
}
|
||||||
|
|
||||||
|
if value, ok := keyMap[hostname]; ok && obj.Value != nil {
|
||||||
|
if value == *obj.Value {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if c, err := obj.lessThanCheck(value); err != nil {
|
||||||
|
return false, err
|
||||||
|
} else if c {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
} else if !ok && obj.Value == nil {
|
||||||
|
return true, nil // nothing to delete, we're good!
|
||||||
|
|
||||||
|
} else if ok && obj.Value == nil { // delete
|
||||||
|
err := obj.Data().World.StrDel(obj.Key)
|
||||||
|
return false, errwrap.Wrapf(err, "apply error during StrDel")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !apply {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := obj.Data().World.StrSet(obj.Key, *obj.Value); err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "apply error during StrSet")
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// KVUID is the UID struct for KVRes.
|
||||||
|
type KVUID struct {
|
||||||
|
BaseUID
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutoEdges returns the AutoEdge interface. In this case no autoedges are used.
|
||||||
|
func (obj *KVRes) AutoEdges() AutoEdge {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UIDs includes all params to make a unique identification of this object.
|
||||||
|
// Most resources only return one, although some resources can return multiple.
|
||||||
|
func (obj *KVRes) UIDs() []ResUID {
|
||||||
|
x := &KVUID{
|
||||||
|
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
||||||
|
name: obj.Name,
|
||||||
|
}
|
||||||
|
return []ResUID{x}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupCmp returns whether two resources can be grouped together or not.
|
||||||
|
func (obj *KVRes) GroupCmp(r Res) bool {
|
||||||
|
_, ok := r.(*KVRes)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return false // TODO: this is doable!
|
||||||
|
// TODO: it could be useful to group our writes and watches!
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare two resources and return if they are equivalent.
|
||||||
|
func (obj *KVRes) Compare(res Res) bool {
|
||||||
|
switch res.(type) {
|
||||||
|
// we can only compare KVRes to others of the same resource
|
||||||
|
case *KVRes:
|
||||||
|
res := res.(*KVRes)
|
||||||
|
if !obj.BaseRes.Compare(res) { // call base Compare
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.Key != res.Key {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if (obj.Value == nil) != (res.Value == nil) { // xor
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.Value != nil && res.Value != nil {
|
||||||
|
if *obj.Value != *res.Value { // compare the strings
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if obj.SkipLessThan != res.SkipLessThan {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.SkipCmpStyle != res.SkipCmpStyle {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML is the custom unmarshal handler for this struct.
|
||||||
|
// It is primarily useful for setting the defaults.
|
||||||
|
func (obj *KVRes) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
type rawRes KVRes // indirection to avoid infinite recursion
|
||||||
|
|
||||||
|
def := obj.Default() // get the default
|
||||||
|
res, ok := def.(*KVRes) // put in the right format
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("could not convert to KVRes")
|
||||||
|
}
|
||||||
|
raw := rawRes(*res) // convert; the defaults go here
|
||||||
|
|
||||||
|
if err := unmarshal(&raw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*obj = KVRes(raw) // restore from indirection with type conversion!
|
||||||
|
return nil
|
||||||
|
}
|
||||||
112
resources/msg.go
112
resources/msg.go
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -23,9 +23,6 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/purpleidea/mgmt/event"
|
|
||||||
|
|
||||||
"github.com/coreos/go-systemd/journal"
|
"github.com/coreos/go-systemd/journal"
|
||||||
)
|
)
|
||||||
@@ -53,45 +50,33 @@ type MsgUID struct {
|
|||||||
body string
|
body string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMsgRes is a constructor for this resource.
|
// Default returns some sensible defaults for this resource.
|
||||||
func NewMsgRes(name, body, priority string, journal, syslog bool, fields map[string]string) (*MsgRes, error) {
|
func (obj *MsgRes) Default() Res {
|
||||||
message := name
|
return &MsgRes{
|
||||||
if body != "" {
|
|
||||||
message = body
|
|
||||||
}
|
|
||||||
|
|
||||||
obj := &MsgRes{
|
|
||||||
BaseRes: BaseRes{
|
BaseRes: BaseRes{
|
||||||
Name: name,
|
MetaParams: DefaultMetaParams, // force a default
|
||||||
},
|
},
|
||||||
Body: message,
|
|
||||||
Priority: priority,
|
|
||||||
Fields: fields,
|
|
||||||
Journal: journal,
|
|
||||||
Syslog: syslog,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return obj, obj.Init()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init runs some startup code for this resource.
|
// Validate the params that are passed to MsgRes.
|
||||||
func (obj *MsgRes) Init() error {
|
|
||||||
obj.BaseRes.kind = "Msg"
|
|
||||||
return obj.BaseRes.Init() // call base init, b/c we're overrriding
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate the params that are passed to MsgRes
|
|
||||||
func (obj *MsgRes) Validate() error {
|
func (obj *MsgRes) Validate() error {
|
||||||
invalidCharacters := regexp.MustCompile("[^a-zA-Z0-9_]")
|
invalidCharacters := regexp.MustCompile("[^a-zA-Z0-9_]")
|
||||||
for field := range obj.Fields {
|
for field := range obj.Fields {
|
||||||
if invalidCharacters.FindString(field) != "" {
|
if invalidCharacters.FindString(field) != "" {
|
||||||
return fmt.Errorf("Invalid character in field %s.", field)
|
return fmt.Errorf("invalid character in field %s", field)
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(field, "_") {
|
if strings.HasPrefix(field, "_") {
|
||||||
return fmt.Errorf("Fields cannot begin with _.")
|
return fmt.Errorf("fields cannot begin with _")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return obj.BaseRes.Validate()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init runs some startup code for this resource.
|
||||||
|
func (obj *MsgRes) Init() error {
|
||||||
|
obj.BaseRes.kind = "msg"
|
||||||
|
return obj.BaseRes.Init() // call base init, b/c we're overrriding
|
||||||
}
|
}
|
||||||
|
|
||||||
// isAllStateOK derives a compound state from all internal cache flags that apply to this resource.
|
// isAllStateOK derives a compound state from all internal cache flags that apply to this resource.
|
||||||
@@ -135,54 +120,27 @@ func (obj *MsgRes) journalPriority() journal.Priority {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Watch is the primary listener for this resource and it outputs events.
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
func (obj *MsgRes) Watch(processChan chan event.Event) error {
|
func (obj *MsgRes) Watch() error {
|
||||||
if obj.IsWatching() {
|
// notify engine that we're running
|
||||||
return nil
|
if err := obj.Running(); err != nil {
|
||||||
}
|
return err // bubble up a NACK...
|
||||||
obj.SetWatching(true)
|
|
||||||
defer obj.SetWatching(false)
|
|
||||||
cuid := obj.converger.Register()
|
|
||||||
defer cuid.Unregister()
|
|
||||||
|
|
||||||
var startup bool
|
|
||||||
Startup := func(block bool) <-chan time.Time {
|
|
||||||
if block {
|
|
||||||
return nil // blocks forever
|
|
||||||
//return make(chan time.Time) // blocks forever
|
|
||||||
}
|
|
||||||
return time.After(time.Duration(500) * time.Millisecond) // 1/2 the resolution of converged timeout
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var send = false // send event?
|
var send = false // send event?
|
||||||
var exit = false
|
var exit *error
|
||||||
for {
|
for {
|
||||||
obj.SetState(ResStateWatching) // reset
|
|
||||||
select {
|
select {
|
||||||
case event := <-obj.Events():
|
case event := <-obj.Events():
|
||||||
cuid.SetConverged(false)
|
|
||||||
// we avoid sending events on unpause
|
// we avoid sending events on unpause
|
||||||
if exit, send = obj.ReadEvent(&event); exit {
|
if exit, send = obj.ReadEvent(event); exit != nil {
|
||||||
return nil // exit
|
return *exit // exit
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-cuid.ConvergedTimer():
|
|
||||||
cuid.SetConverged(true) // converged!
|
|
||||||
continue
|
|
||||||
|
|
||||||
case <-Startup(startup):
|
|
||||||
cuid.SetConverged(false)
|
|
||||||
send = true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// do all our event sending all together to avoid duplicate msgs
|
// do all our event sending all together to avoid duplicate msgs
|
||||||
if send {
|
if send {
|
||||||
startup = true // startup finished
|
|
||||||
send = false
|
send = false
|
||||||
// only do this on certain types of events
|
obj.Event()
|
||||||
//obj.isStateOK = false // something made state dirty
|
|
||||||
if exit, err := obj.DoSend(processChan, ""); exit || err != nil {
|
|
||||||
return err // we exit or bubble up a NACK...
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -232,9 +190,9 @@ func (obj *MsgRes) CheckApply(apply bool) (bool, error) {
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUIDs includes all params to make a unique identification of this object.
|
// UIDs includes all params to make a unique identification of this object.
|
||||||
// Most resources only return one, although some resources can return multiple.
|
// Most resources only return one, although some resources can return multiple.
|
||||||
func (obj *MsgRes) GetUIDs() []ResUID {
|
func (obj *MsgRes) UIDs() []ResUID {
|
||||||
x := &MsgUID{
|
x := &MsgUID{
|
||||||
BaseUID: BaseUID{
|
BaseUID: BaseUID{
|
||||||
name: obj.GetName(),
|
name: obj.GetName(),
|
||||||
@@ -277,3 +235,23 @@ func (obj *MsgRes) Compare(res Res) bool {
|
|||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML is the custom unmarshal handler for this struct.
|
||||||
|
// It is primarily useful for setting the defaults.
|
||||||
|
func (obj *MsgRes) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
type rawRes MsgRes // indirection to avoid infinite recursion
|
||||||
|
|
||||||
|
def := obj.Default() // get the default
|
||||||
|
res, ok := def.(*MsgRes) // put in the right format
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("could not convert to MsgRes")
|
||||||
|
}
|
||||||
|
raw := rawRes(*res) // convert; the defaults go here
|
||||||
|
|
||||||
|
if err := unmarshal(&raw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*obj = MsgRes(raw) // restore from indirection with type conversion!
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -19,10 +19,8 @@ package resources
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/purpleidea/mgmt/event"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -35,76 +33,48 @@ type NoopRes struct {
|
|||||||
Comment string `yaml:"comment"` // extra field for example purposes
|
Comment string `yaml:"comment"` // extra field for example purposes
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewNoopRes is a constructor for this resource. It also calls Init() for you.
|
// Default returns some sensible defaults for this resource.
|
||||||
func NewNoopRes(name string) (*NoopRes, error) {
|
func (obj *NoopRes) Default() Res {
|
||||||
obj := &NoopRes{
|
return &NoopRes{
|
||||||
BaseRes: BaseRes{
|
BaseRes: BaseRes{
|
||||||
Name: name,
|
MetaParams: DefaultMetaParams, // force a default
|
||||||
},
|
},
|
||||||
Comment: "",
|
|
||||||
}
|
}
|
||||||
return obj, obj.Init()
|
}
|
||||||
|
|
||||||
|
// Validate if the params passed in are valid data.
|
||||||
|
func (obj *NoopRes) Validate() error {
|
||||||
|
return obj.BaseRes.Validate()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init runs some startup code for this resource.
|
// Init runs some startup code for this resource.
|
||||||
func (obj *NoopRes) Init() error {
|
func (obj *NoopRes) Init() error {
|
||||||
obj.BaseRes.kind = "Noop"
|
obj.BaseRes.kind = "noop"
|
||||||
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate if the params passed in are valid data.
|
|
||||||
// FIXME: where should this get called ?
|
|
||||||
func (obj *NoopRes) Validate() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Watch is the primary listener for this resource and it outputs events.
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
func (obj *NoopRes) Watch(processChan chan event.Event) error {
|
func (obj *NoopRes) Watch() error {
|
||||||
if obj.IsWatching() {
|
// notify engine that we're running
|
||||||
return nil // TODO: should this be an error?
|
if err := obj.Running(); err != nil {
|
||||||
}
|
return err // bubble up a NACK...
|
||||||
obj.SetWatching(true)
|
|
||||||
defer obj.SetWatching(false)
|
|
||||||
cuid := obj.converger.Register()
|
|
||||||
defer cuid.Unregister()
|
|
||||||
|
|
||||||
var startup bool
|
|
||||||
Startup := func(block bool) <-chan time.Time {
|
|
||||||
if block {
|
|
||||||
return nil // blocks forever
|
|
||||||
//return make(chan time.Time) // blocks forever
|
|
||||||
}
|
|
||||||
return time.After(time.Duration(500) * time.Millisecond) // 1/2 the resolution of converged timeout
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var send = false // send event?
|
var send = false // send event?
|
||||||
var exit = false
|
var exit *error
|
||||||
for {
|
for {
|
||||||
obj.SetState(ResStateWatching) // reset
|
|
||||||
select {
|
select {
|
||||||
case event := <-obj.Events():
|
case event := <-obj.Events():
|
||||||
cuid.SetConverged(false)
|
|
||||||
// we avoid sending events on unpause
|
// we avoid sending events on unpause
|
||||||
if exit, send = obj.ReadEvent(&event); exit {
|
if exit, send = obj.ReadEvent(event); exit != nil {
|
||||||
return nil // exit
|
return *exit // exit
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-cuid.ConvergedTimer():
|
|
||||||
cuid.SetConverged(true) // converged!
|
|
||||||
continue
|
|
||||||
|
|
||||||
case <-Startup(startup):
|
|
||||||
cuid.SetConverged(false)
|
|
||||||
send = true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// do all our event sending all together to avoid duplicate msgs
|
// do all our event sending all together to avoid duplicate msgs
|
||||||
if send {
|
if send {
|
||||||
startup = true // startup finished
|
|
||||||
send = false
|
send = false
|
||||||
if exit, err := obj.DoSend(processChan, ""); exit || err != nil {
|
obj.Event()
|
||||||
return err // we exit or bubble up a NACK...
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -128,9 +98,9 @@ func (obj *NoopRes) AutoEdges() AutoEdge {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUIDs includes all params to make a unique identification of this object.
|
// UIDs includes all params to make a unique identification of this object.
|
||||||
// Most resources only return one, although some resources can return multiple.
|
// Most resources only return one, although some resources can return multiple.
|
||||||
func (obj *NoopRes) GetUIDs() []ResUID {
|
func (obj *NoopRes) UIDs() []ResUID {
|
||||||
x := &NoopUID{
|
x := &NoopUID{
|
||||||
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
||||||
name: obj.Name,
|
name: obj.Name,
|
||||||
@@ -169,3 +139,23 @@ func (obj *NoopRes) Compare(res Res) bool {
|
|||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML is the custom unmarshal handler for this struct.
|
||||||
|
// It is primarily useful for setting the defaults.
|
||||||
|
func (obj *NoopRes) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
type rawRes NoopRes // indirection to avoid infinite recursion
|
||||||
|
|
||||||
|
def := obj.Default() // get the default
|
||||||
|
res, ok := def.(*NoopRes) // put in the right format
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("could not convert to NoopRes")
|
||||||
|
}
|
||||||
|
raw := rawRes(*res) // convert; the defaults go here
|
||||||
|
|
||||||
|
if err := unmarshal(&raw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*obj = NoopRes(raw) // restore from indirection with type conversion!
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -22,9 +22,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/purpleidea/mgmt/event"
|
|
||||||
"github.com/purpleidea/mgmt/util"
|
"github.com/purpleidea/mgmt/util"
|
||||||
|
|
||||||
systemdUtil "github.com/coreos/go-systemd/util"
|
systemdUtil "github.com/coreos/go-systemd/util"
|
||||||
@@ -58,7 +56,34 @@ type NspawnRes struct {
|
|||||||
svc *SvcRes
|
svc *SvcRes
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init runs some startup code for this resource
|
// Default returns some sensible defaults for this resource.
|
||||||
|
func (obj *NspawnRes) Default() Res {
|
||||||
|
return &NspawnRes{
|
||||||
|
BaseRes: BaseRes{
|
||||||
|
MetaParams: DefaultMetaParams, // force a default
|
||||||
|
},
|
||||||
|
State: running,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate if the params passed in are valid data.
|
||||||
|
func (obj *NspawnRes) Validate() error {
|
||||||
|
// TODO: validStates should be an enum!
|
||||||
|
validStates := map[string]struct{}{
|
||||||
|
stopped: {},
|
||||||
|
running: {},
|
||||||
|
}
|
||||||
|
if _, exists := validStates[obj.State]; !exists {
|
||||||
|
return fmt.Errorf("Invalid State: %s", obj.State)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := obj.svc.Validate(); err != nil { // composite resource
|
||||||
|
return errwrap.Wrapf(err, "validate failed for embedded svc")
|
||||||
|
}
|
||||||
|
return obj.BaseRes.Validate()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init runs some startup code for this resource.
|
||||||
func (obj *NspawnRes) Init() error {
|
func (obj *NspawnRes) Init() error {
|
||||||
var serviceName = fmt.Sprintf(nspawnServiceTmpl, obj.GetName())
|
var serviceName = fmt.Sprintf(nspawnServiceTmpl, obj.GetName())
|
||||||
obj.svc = &SvcRes{}
|
obj.svc = &SvcRes{}
|
||||||
@@ -67,61 +92,21 @@ func (obj *NspawnRes) Init() error {
|
|||||||
if err := obj.svc.Init(); err != nil {
|
if err := obj.svc.Init(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
obj.BaseRes.kind = "Nspawn"
|
obj.BaseRes.kind = "nspawn"
|
||||||
return obj.BaseRes.Init()
|
return obj.BaseRes.Init()
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewNspawnRes is the constructor for this resource
|
|
||||||
func NewNspawnRes(name string, state string) (*NspawnRes, error) {
|
|
||||||
obj := &NspawnRes{
|
|
||||||
BaseRes: BaseRes{
|
|
||||||
Name: name,
|
|
||||||
},
|
|
||||||
State: state,
|
|
||||||
}
|
|
||||||
return obj, obj.Init()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate params
|
|
||||||
func (obj *NspawnRes) Validate() error {
|
|
||||||
validStates := map[string]struct{}{
|
|
||||||
stopped: {},
|
|
||||||
running: {},
|
|
||||||
}
|
|
||||||
if _, exists := validStates[obj.State]; !exists {
|
|
||||||
return fmt.Errorf("Invalid State: %s", obj.State)
|
|
||||||
}
|
|
||||||
return obj.svc.Validate()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Watch for state changes and sends a message to the bus if there is a change
|
// Watch for state changes and sends a message to the bus if there is a change
|
||||||
func (obj *NspawnRes) Watch(processChan chan event.Event) error {
|
func (obj *NspawnRes) Watch() error {
|
||||||
if obj.IsWatching() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
obj.SetWatching(true)
|
|
||||||
defer obj.SetWatching(false)
|
|
||||||
cuid := obj.converger.Register()
|
|
||||||
defer cuid.Unregister()
|
|
||||||
|
|
||||||
var startup bool
|
|
||||||
Startup := func(block bool) <-chan time.Time {
|
|
||||||
if block {
|
|
||||||
return nil // blocks forever
|
|
||||||
}
|
|
||||||
// 1/2 the resolution of converged timeout
|
|
||||||
return time.After(time.Duration(500) * time.Millisecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
// this resource depends on systemd ensure that it's running
|
// this resource depends on systemd ensure that it's running
|
||||||
if !systemdUtil.IsRunningSystemd() {
|
if !systemdUtil.IsRunningSystemd() {
|
||||||
return fmt.Errorf("Systemd is not running.")
|
return fmt.Errorf("systemd is not running")
|
||||||
}
|
}
|
||||||
|
|
||||||
// create a private message bus
|
// create a private message bus
|
||||||
bus, err := util.SystemBusPrivateUsable()
|
bus, err := util.SystemBusPrivateUsable()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errwrap.Wrapf(err, "Failed to connect to bus")
|
return errwrap.Wrapf(err, "failed to connect to bus")
|
||||||
}
|
}
|
||||||
|
|
||||||
// add a match rule to match messages going through the message bus
|
// add a match rule to match messages going through the message bus
|
||||||
@@ -135,11 +120,15 @@ func (obj *NspawnRes) Watch(processChan chan event.Event) error {
|
|||||||
buschan := make(chan *dbus.Signal, 10)
|
buschan := make(chan *dbus.Signal, 10)
|
||||||
bus.Signal(buschan)
|
bus.Signal(buschan)
|
||||||
|
|
||||||
|
// notify engine that we're running
|
||||||
|
if err := obj.Running(); err != nil {
|
||||||
|
return err // bubble up a NACK...
|
||||||
|
}
|
||||||
|
|
||||||
var send = false
|
var send = false
|
||||||
var exit = false
|
var exit *error
|
||||||
|
|
||||||
for {
|
for {
|
||||||
obj.SetState(ResStateWatching)
|
|
||||||
select {
|
select {
|
||||||
case event := <-buschan:
|
case event := <-buschan:
|
||||||
// process org.freedesktop.machine1 events for this resource's name
|
// process org.freedesktop.machine1 events for this resource's name
|
||||||
@@ -150,35 +139,22 @@ func (obj *NspawnRes) Watch(processChan chan event.Event) error {
|
|||||||
} else if event.Name == machineRemoved {
|
} else if event.Name == machineRemoved {
|
||||||
log.Printf("%s[%s]: Machine stopped", obj.Kind(), obj.GetName())
|
log.Printf("%s[%s]: Machine stopped", obj.Kind(), obj.GetName())
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("Unknown event: %s", event.Name)
|
return fmt.Errorf("unknown event: %s", event.Name)
|
||||||
}
|
}
|
||||||
send = true
|
send = true
|
||||||
obj.StateOK(false) // dirty
|
obj.StateOK(false) // dirty
|
||||||
}
|
}
|
||||||
|
|
||||||
case event := <-obj.Events():
|
case event := <-obj.Events():
|
||||||
cuid.SetConverged(false)
|
if exit, send = obj.ReadEvent(event); exit != nil {
|
||||||
if exit, send = obj.ReadEvent(&event); exit {
|
return *exit // exit
|
||||||
return nil // exit
|
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-cuid.ConvergedTimer():
|
|
||||||
cuid.SetConverged(true) // converged!
|
|
||||||
continue
|
|
||||||
|
|
||||||
case <-Startup(startup):
|
|
||||||
cuid.SetConverged(false)
|
|
||||||
send = true
|
|
||||||
obj.StateOK(false) // dirty
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// do all our event sending all together to avoid duplicate msgs
|
// do all our event sending all together to avoid duplicate msgs
|
||||||
if send {
|
if send {
|
||||||
startup = true // startup finished
|
|
||||||
send = false
|
send = false
|
||||||
if exit, err := obj.DoSend(processChan, ""); exit || err != nil {
|
obj.Event()
|
||||||
return err // we exit or bubble up a NACK...
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -189,13 +165,13 @@ func (obj *NspawnRes) Watch(processChan chan event.Event) error {
|
|||||||
func (obj *NspawnRes) CheckApply(apply bool) (checkOK bool, err error) {
|
func (obj *NspawnRes) CheckApply(apply bool) (checkOK bool, err error) {
|
||||||
// this resource depends on systemd ensure that it's running
|
// this resource depends on systemd ensure that it's running
|
||||||
if !systemdUtil.IsRunningSystemd() {
|
if !systemdUtil.IsRunningSystemd() {
|
||||||
return false, errors.New("Systemd is not running.")
|
return false, errors.New("systemd is not running")
|
||||||
}
|
}
|
||||||
|
|
||||||
// connect to org.freedesktop.machine1.Manager
|
// connect to org.freedesktop.machine1.Manager
|
||||||
conn, err := machined.New()
|
conn, err := machined.New()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errwrap.Wrapf(err, "Failed to connect to dbus")
|
return false, errwrap.Wrapf(err, "failed to connect to dbus")
|
||||||
}
|
}
|
||||||
|
|
||||||
// compare the current state with the desired state and perform the
|
// compare the current state with the desired state and perform the
|
||||||
@@ -213,7 +189,7 @@ func (obj *NspawnRes) CheckApply(apply bool) (checkOK bool, err error) {
|
|||||||
// error if we need the image ignore if we don't
|
// error if we need the image ignore if we don't
|
||||||
if _, err = conn.GetImage(obj.GetName()); err != nil && obj.State != stopped {
|
if _, err = conn.GetImage(obj.GetName()); err != nil && obj.State != stopped {
|
||||||
return false, fmt.Errorf(
|
return false, fmt.Errorf(
|
||||||
"No machine nor image named '%s'",
|
"no machine nor image named '%s'",
|
||||||
obj.GetName())
|
obj.GetName())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -243,7 +219,7 @@ func (obj *NspawnRes) CheckApply(apply bool) (checkOK bool, err error) {
|
|||||||
log.Printf("%s[%s]: Starting machine", obj.Kind(), obj.GetName())
|
log.Printf("%s[%s]: Starting machine", obj.Kind(), obj.GetName())
|
||||||
// assume state had to be changed at this point, ignore checkOK
|
// assume state had to be changed at this point, ignore checkOK
|
||||||
if _, err := obj.svc.CheckApply(apply); err != nil {
|
if _, err := obj.svc.CheckApply(apply); err != nil {
|
||||||
return false, errwrap.Wrapf(err, "Nested svc failed")
|
return false, errwrap.Wrapf(err, "nested svc failed")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if obj.State == stopped {
|
if obj.State == stopped {
|
||||||
@@ -251,7 +227,7 @@ func (obj *NspawnRes) CheckApply(apply bool) (checkOK bool, err error) {
|
|||||||
// org.freedesktop.machine1.Manager.KillMachine
|
// org.freedesktop.machine1.Manager.KillMachine
|
||||||
log.Printf("%s[%s]: Stopping machine", obj.Kind(), obj.GetName())
|
log.Printf("%s[%s]: Stopping machine", obj.Kind(), obj.GetName())
|
||||||
if err := conn.TerminateMachine(obj.GetName()); err != nil {
|
if err := conn.TerminateMachine(obj.GetName()); err != nil {
|
||||||
return false, errwrap.Wrapf(err, "Failed to stop machine")
|
return false, errwrap.Wrapf(err, "failed to stop machine")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -277,14 +253,14 @@ func (obj *NspawnUID) IFF(uid ResUID) bool {
|
|||||||
return obj.name == res.name
|
return obj.name == res.name
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUIDs includes all params to make a unique identification of this object
|
// UIDs includes all params to make a unique identification of this object
|
||||||
// most resources only return one although some resources can return multiple
|
// most resources only return one although some resources can return multiple
|
||||||
func (obj *NspawnRes) GetUIDs() []ResUID {
|
func (obj *NspawnRes) UIDs() []ResUID {
|
||||||
x := &NspawnUID{
|
x := &NspawnUID{
|
||||||
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
||||||
name: obj.Name, // svc name
|
name: obj.Name, // svc name
|
||||||
}
|
}
|
||||||
return append([]ResUID{x}, obj.svc.GetUIDs()...)
|
return append([]ResUID{x}, obj.svc.UIDs()...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GroupCmp returns whether two resources can be grouped together or not
|
// GroupCmp returns whether two resources can be grouped together or not
|
||||||
@@ -321,3 +297,23 @@ func (obj *NspawnRes) Compare(res Res) bool {
|
|||||||
func (obj *NspawnRes) AutoEdges() AutoEdge {
|
func (obj *NspawnRes) AutoEdges() AutoEdge {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML is the custom unmarshal handler for this struct.
|
||||||
|
// It is primarily useful for setting the defaults.
|
||||||
|
func (obj *NspawnRes) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
type rawRes NspawnRes // indirection to avoid infinite recursion
|
||||||
|
|
||||||
|
def := obj.Default() // get the default
|
||||||
|
res, ok := def.(*NspawnRes) // put in the right format
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("could not convert to NspawnRes")
|
||||||
|
}
|
||||||
|
raw := rawRes(*res) // convert; the defaults go here
|
||||||
|
|
||||||
|
if err := unmarshal(&raw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*obj = NspawnRes(raw) // restore from indirection with type conversion!
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -833,7 +833,7 @@ func FilterPackageIDs(m map[string]*PkPackageIDActionData, packages []string) ([
|
|||||||
obj, ok := m[k] // lookup single package
|
obj, ok := m[k] // lookup single package
|
||||||
// package doesn't exist, this is an error!
|
// package doesn't exist, this is an error!
|
||||||
if !ok || !obj.Found || obj.PackageID == "" {
|
if !ok || !obj.Found || obj.PackageID == "" {
|
||||||
return nil, fmt.Errorf("Can't find package named '%s'.", k)
|
return nil, fmt.Errorf("can't find package named '%s'", k)
|
||||||
}
|
}
|
||||||
result = append(result, obj.PackageID)
|
result = append(result, obj.PackageID)
|
||||||
}
|
}
|
||||||
@@ -848,7 +848,7 @@ func FilterState(m map[string]*PkPackageIDActionData, packages []string, state s
|
|||||||
obj, ok := m[k] // lookup single package
|
obj, ok := m[k] // lookup single package
|
||||||
// package doesn't exist, this is an error!
|
// package doesn't exist, this is an error!
|
||||||
if !ok || !obj.Found {
|
if !ok || !obj.Found {
|
||||||
return nil, fmt.Errorf("Can't find package named '%s'.", k)
|
return nil, fmt.Errorf("can't find package named '%s'", k)
|
||||||
}
|
}
|
||||||
var b bool
|
var b bool
|
||||||
if state == "installed" {
|
if state == "installed" {
|
||||||
@@ -865,7 +865,7 @@ func FilterState(m map[string]*PkPackageIDActionData, packages []string, state s
|
|||||||
result[k] = b // save
|
result[k] = b // save
|
||||||
}
|
}
|
||||||
if len(pkgs) > 0 {
|
if len(pkgs) > 0 {
|
||||||
err = fmt.Errorf("Can't filter non-boolean state on: %v!", strings.Join(pkgs, ","))
|
err = fmt.Errorf("can't filter non-boolean state on: %v", strings.Join(pkgs, ","))
|
||||||
}
|
}
|
||||||
return result, err
|
return result, err
|
||||||
}
|
}
|
||||||
@@ -877,7 +877,7 @@ func FilterPackageState(m map[string]*PkPackageIDActionData, packages []string,
|
|||||||
obj, ok := m[k] // lookup single package
|
obj, ok := m[k] // lookup single package
|
||||||
// package doesn't exist, this is an error!
|
// package doesn't exist, this is an error!
|
||||||
if !ok || !obj.Found {
|
if !ok || !obj.Found {
|
||||||
return nil, fmt.Errorf("Can't find package named '%s'.", k)
|
return nil, fmt.Errorf("can't find package named '%s'", k)
|
||||||
}
|
}
|
||||||
b := false
|
b := false
|
||||||
if state == "installed" && obj.Installed {
|
if state == "installed" && obj.Installed {
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -27,9 +27,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/purpleidea/mgmt/event"
|
|
||||||
"github.com/purpleidea/mgmt/recwatch"
|
"github.com/purpleidea/mgmt/recwatch"
|
||||||
|
|
||||||
errwrap "github.com/pkg/errors"
|
errwrap "github.com/pkg/errors"
|
||||||
@@ -57,21 +55,25 @@ type PasswordRes struct {
|
|||||||
recWatcher *recwatch.RecWatcher
|
recWatcher *recwatch.RecWatcher
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPasswordRes is a constructor for this resource. It also calls Init() for you.
|
// Default returns some sensible defaults for this resource.
|
||||||
func NewPasswordRes(name string, length uint16) (*PasswordRes, error) {
|
func (obj *PasswordRes) Default() Res {
|
||||||
obj := &PasswordRes{
|
return &PasswordRes{
|
||||||
BaseRes: BaseRes{
|
BaseRes: BaseRes{
|
||||||
Name: name,
|
MetaParams: DefaultMetaParams, // force a default
|
||||||
},
|
},
|
||||||
Length: length,
|
Length: 64, // safe default
|
||||||
}
|
}
|
||||||
return obj, obj.Init()
|
}
|
||||||
|
|
||||||
|
// Validate if the params passed in are valid data.
|
||||||
|
func (obj *PasswordRes) Validate() error {
|
||||||
|
return obj.BaseRes.Validate()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init generates a new password for this resource if one was not provided. It
|
// Init generates a new password for this resource if one was not provided. It
|
||||||
// will save this into a local file. It will load it back in from previous runs.
|
// will save this into a local file. It will load it back in from previous runs.
|
||||||
func (obj *PasswordRes) Init() error {
|
func (obj *PasswordRes) Init() error {
|
||||||
obj.BaseRes.kind = "Password" // must be set before using VarDir
|
obj.BaseRes.kind = "password" // must be set before using VarDir
|
||||||
|
|
||||||
dir, err := obj.VarDir("")
|
dir, err := obj.VarDir("")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -82,12 +84,6 @@ func (obj *PasswordRes) Init() error {
|
|||||||
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate if the params passed in are valid data.
|
|
||||||
// FIXME: where should this get called ?
|
|
||||||
func (obj *PasswordRes) Validate() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (obj *PasswordRes) read() (string, error) {
|
func (obj *PasswordRes) read() (string, error) {
|
||||||
file, err := os.Open(obj.path) // open a handle to read the file
|
file, err := os.Open(obj.path) // open a handle to read the file
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -148,11 +144,11 @@ func (obj *PasswordRes) check(value string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if !obj.Saved && length != 0 { // should have no stored password
|
if !obj.Saved && length != 0 { // should have no stored password
|
||||||
return fmt.Errorf("Expected empty token only!")
|
return fmt.Errorf("expected empty token only")
|
||||||
}
|
}
|
||||||
|
|
||||||
if length != obj.Length {
|
if length != obj.Length {
|
||||||
return fmt.Errorf("String length is not %d", obj.Length)
|
return fmt.Errorf("string length is not %d", obj.Length)
|
||||||
}
|
}
|
||||||
Loop:
|
Loop:
|
||||||
for i := uint16(0); i < length; i++ {
|
for i := uint16(0); i < length; i++ {
|
||||||
@@ -162,30 +158,13 @@ Loop:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// we couldn't find that character, so error!
|
// we couldn't find that character, so error!
|
||||||
return fmt.Errorf("Invalid character `%s`", string(value[i]))
|
return fmt.Errorf("invalid character `%s`", string(value[i]))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Watch is the primary listener for this resource and it outputs events.
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
func (obj *PasswordRes) Watch(processChan chan event.Event) error {
|
func (obj *PasswordRes) Watch() error {
|
||||||
if obj.IsWatching() {
|
|
||||||
return nil // TODO: should this be an error?
|
|
||||||
}
|
|
||||||
obj.SetWatching(true)
|
|
||||||
defer obj.SetWatching(false)
|
|
||||||
cuid := obj.converger.Register()
|
|
||||||
defer cuid.Unregister()
|
|
||||||
|
|
||||||
var startup bool
|
|
||||||
Startup := func(block bool) <-chan time.Time {
|
|
||||||
if block {
|
|
||||||
return nil // blocks forever
|
|
||||||
//return make(chan time.Time) // blocks forever
|
|
||||||
}
|
|
||||||
return time.After(time.Duration(500) * time.Millisecond) // 1/2 the resolution of converged timeout
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
obj.recWatcher, err = recwatch.NewRecWatcher(obj.path, false)
|
obj.recWatcher, err = recwatch.NewRecWatcher(obj.path, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -193,46 +172,37 @@ func (obj *PasswordRes) Watch(processChan chan event.Event) error {
|
|||||||
}
|
}
|
||||||
defer obj.recWatcher.Close()
|
defer obj.recWatcher.Close()
|
||||||
|
|
||||||
|
// notify engine that we're running
|
||||||
|
if err := obj.Running(); err != nil {
|
||||||
|
return err // bubble up a NACK...
|
||||||
|
}
|
||||||
|
|
||||||
var send = false // send event?
|
var send = false // send event?
|
||||||
var exit = false
|
var exit *error
|
||||||
for {
|
for {
|
||||||
obj.SetState(ResStateWatching) // reset
|
|
||||||
select {
|
select {
|
||||||
// NOTE: this part is very similar to the file resource code
|
// NOTE: this part is very similar to the file resource code
|
||||||
case event, ok := <-obj.recWatcher.Events():
|
case event, ok := <-obj.recWatcher.Events():
|
||||||
if !ok { // channel shutdown
|
if !ok { // channel shutdown
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
cuid.SetConverged(false)
|
|
||||||
if err := event.Error; err != nil {
|
if err := event.Error; err != nil {
|
||||||
return errwrap.Wrapf(err, "Unknown %s[%s] watcher error", obj.Kind(), obj.GetName())
|
return errwrap.Wrapf(err, "unknown %s[%s] watcher error", obj.Kind(), obj.GetName())
|
||||||
}
|
}
|
||||||
send = true
|
send = true
|
||||||
obj.StateOK(false) // dirty
|
obj.StateOK(false) // dirty
|
||||||
|
|
||||||
case event := <-obj.Events():
|
case event := <-obj.Events():
|
||||||
cuid.SetConverged(false)
|
|
||||||
// we avoid sending events on unpause
|
// we avoid sending events on unpause
|
||||||
if exit, send = obj.ReadEvent(&event); exit {
|
if exit, send = obj.ReadEvent(event); exit != nil {
|
||||||
return nil // exit
|
return *exit // exit
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-cuid.ConvergedTimer():
|
|
||||||
cuid.SetConverged(true) // converged!
|
|
||||||
continue
|
|
||||||
|
|
||||||
case <-Startup(startup):
|
|
||||||
cuid.SetConverged(false)
|
|
||||||
send = true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// do all our event sending all together to avoid duplicate msgs
|
// do all our event sending all together to avoid duplicate msgs
|
||||||
if send {
|
if send {
|
||||||
startup = true // startup finished
|
|
||||||
send = false
|
send = false
|
||||||
if exit, err := obj.DoSend(processChan, ""); exit || err != nil {
|
obj.Event()
|
||||||
return err // we exit or bubble up a NACK...
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -329,9 +299,9 @@ func (obj *PasswordRes) AutoEdges() AutoEdge {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUIDs includes all params to make a unique identification of this object.
|
// UIDs includes all params to make a unique identification of this object.
|
||||||
// Most resources only return one, although some resources can return multiple.
|
// Most resources only return one, although some resources can return multiple.
|
||||||
func (obj *PasswordRes) GetUIDs() []ResUID {
|
func (obj *PasswordRes) UIDs() []ResUID {
|
||||||
x := &PasswordUID{
|
x := &PasswordUID{
|
||||||
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
||||||
name: obj.Name,
|
name: obj.Name,
|
||||||
@@ -379,3 +349,23 @@ func (obj *PasswordRes) Compare(res Res) bool {
|
|||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML is the custom unmarshal handler for this struct.
|
||||||
|
// It is primarily useful for setting the defaults.
|
||||||
|
func (obj *PasswordRes) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
type rawRes PasswordRes // indirection to avoid infinite recursion
|
||||||
|
|
||||||
|
def := obj.Default() // get the default
|
||||||
|
res, ok := def.(*PasswordRes) // put in the right format
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("could not convert to PasswordRes")
|
||||||
|
}
|
||||||
|
raw := rawRes(*res) // convert; the defaults go here
|
||||||
|
|
||||||
|
if err := unmarshal(&raw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*obj = PasswordRes(raw) // restore from indirection with type conversion!
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
137
resources/pkg.go
137
resources/pkg.go
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -23,9 +23,7 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/purpleidea/mgmt/event"
|
|
||||||
"github.com/purpleidea/mgmt/resources/packagekit"
|
"github.com/purpleidea/mgmt/resources/packagekit"
|
||||||
"github.com/purpleidea/mgmt/util"
|
"github.com/purpleidea/mgmt/util"
|
||||||
|
|
||||||
@@ -47,48 +45,53 @@ type PkgRes struct {
|
|||||||
fileList []string // FIXME: update if pkg changes
|
fileList []string // FIXME: update if pkg changes
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPkgRes is a constructor for this resource. It also calls Init() for you.
|
// Default returns some sensible defaults for this resource.
|
||||||
func NewPkgRes(name, state string, allowuntrusted, allownonfree, allowunsupported bool) (*PkgRes, error) {
|
func (obj *PkgRes) Default() Res {
|
||||||
obj := &PkgRes{
|
return &PkgRes{
|
||||||
BaseRes: BaseRes{
|
BaseRes: BaseRes{
|
||||||
Name: name,
|
MetaParams: DefaultMetaParams, // force a default
|
||||||
},
|
},
|
||||||
State: state,
|
State: "installed", // i think this is preferable to "latest"
|
||||||
AllowUntrusted: allowuntrusted,
|
|
||||||
AllowNonFree: allownonfree,
|
|
||||||
AllowUnsupported: allowunsupported,
|
|
||||||
}
|
}
|
||||||
return obj, obj.Init()
|
}
|
||||||
|
|
||||||
|
// Validate checks if the resource data structure was populated correctly.
|
||||||
|
func (obj *PkgRes) Validate() error {
|
||||||
|
if obj.State == "" {
|
||||||
|
return fmt.Errorf("state cannot be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
return obj.BaseRes.Validate()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init runs some startup code for this resource.
|
// Init runs some startup code for this resource.
|
||||||
func (obj *PkgRes) Init() error {
|
func (obj *PkgRes) Init() error {
|
||||||
obj.BaseRes.kind = "Pkg"
|
obj.BaseRes.kind = "pkg"
|
||||||
if err := obj.BaseRes.Init(); err != nil { // call base init, b/c we're overriding
|
if err := obj.BaseRes.Init(); err != nil { // call base init, b/c we're overriding
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
bus := packagekit.NewBus()
|
bus := packagekit.NewBus()
|
||||||
if bus == nil {
|
if bus == nil {
|
||||||
return fmt.Errorf("Can't connect to PackageKit bus.")
|
return fmt.Errorf("can't connect to PackageKit bus")
|
||||||
}
|
}
|
||||||
defer bus.Close()
|
defer bus.Close()
|
||||||
|
|
||||||
result, err := obj.pkgMappingHelper(bus)
|
result, err := obj.pkgMappingHelper(bus)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errwrap.Wrapf(err, "The pkgMappingHelper failed")
|
return errwrap.Wrapf(err, "the pkgMappingHelper failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
data, ok := result[obj.Name] // lookup single package (init does just one)
|
data, ok := result[obj.Name] // lookup single package (init does just one)
|
||||||
// package doesn't exist, this is an error!
|
// package doesn't exist, this is an error!
|
||||||
if !ok || !data.Found {
|
if !ok || !data.Found {
|
||||||
return fmt.Errorf("Can't find package named '%s'.", obj.Name)
|
return fmt.Errorf("can't find package named '%s'", obj.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
packageIDs := []string{data.PackageID} // just one for now
|
packageIDs := []string{data.PackageID} // just one for now
|
||||||
filesMap, err := bus.GetFilesByPackageID(packageIDs)
|
filesMap, err := bus.GetFilesByPackageID(packageIDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errwrap.Wrapf(err, "Can't run GetFilesByPackageID")
|
return errwrap.Wrapf(err, "can't run GetFilesByPackageID")
|
||||||
}
|
}
|
||||||
if files, ok := filesMap[data.PackageID]; ok {
|
if files, ok := filesMap[data.PackageID]; ok {
|
||||||
obj.fileList = util.DirifyFileList(files, false)
|
obj.fileList = util.DirifyFileList(files, false)
|
||||||
@@ -96,61 +99,37 @@ func (obj *PkgRes) Init() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate checks if the resource data structure was populated correctly.
|
|
||||||
func (obj *PkgRes) Validate() error {
|
|
||||||
if obj.State == "" {
|
|
||||||
return fmt.Errorf("State cannot be empty!")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Watch is the primary listener for this resource and it outputs events.
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
// It uses the PackageKit UpdatesChanged signal to watch for changes.
|
// It uses the PackageKit UpdatesChanged signal to watch for changes.
|
||||||
// TODO: https://github.com/hughsie/PackageKit/issues/109
|
// TODO: https://github.com/hughsie/PackageKit/issues/109
|
||||||
// TODO: https://github.com/hughsie/PackageKit/issues/110
|
// TODO: https://github.com/hughsie/PackageKit/issues/110
|
||||||
func (obj *PkgRes) Watch(processChan chan event.Event) error {
|
func (obj *PkgRes) Watch() error {
|
||||||
if obj.IsWatching() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
obj.SetWatching(true)
|
|
||||||
defer obj.SetWatching(false)
|
|
||||||
cuid := obj.converger.Register()
|
|
||||||
defer cuid.Unregister()
|
|
||||||
|
|
||||||
var startup bool
|
|
||||||
Startup := func(block bool) <-chan time.Time {
|
|
||||||
if block {
|
|
||||||
return nil // blocks forever
|
|
||||||
//return make(chan time.Time) // blocks forever
|
|
||||||
}
|
|
||||||
return time.After(time.Duration(500) * time.Millisecond) // 1/2 the resolution of converged timeout
|
|
||||||
}
|
|
||||||
|
|
||||||
bus := packagekit.NewBus()
|
bus := packagekit.NewBus()
|
||||||
if bus == nil {
|
if bus == nil {
|
||||||
return fmt.Errorf("Can't connect to PackageKit bus.")
|
return fmt.Errorf("can't connect to PackageKit bus")
|
||||||
}
|
}
|
||||||
defer bus.Close()
|
defer bus.Close()
|
||||||
|
|
||||||
ch, err := bus.WatchChanges()
|
ch, err := bus.WatchChanges()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errwrap.Wrapf(err, "Error adding signal match")
|
return errwrap.Wrapf(err, "error adding signal match")
|
||||||
|
}
|
||||||
|
|
||||||
|
// notify engine that we're running
|
||||||
|
if err := obj.Running(); err != nil {
|
||||||
|
return err // bubble up a NACK...
|
||||||
}
|
}
|
||||||
|
|
||||||
var send = false // send event?
|
var send = false // send event?
|
||||||
var exit = false
|
var exit *error
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if obj.debug {
|
if obj.debug {
|
||||||
log.Printf("%s: Watching...", obj.fmtNames(obj.getNames()))
|
log.Printf("%s: Watching...", obj.fmtNames(obj.getNames()))
|
||||||
}
|
}
|
||||||
|
|
||||||
obj.SetState(ResStateWatching) // reset
|
|
||||||
select {
|
select {
|
||||||
case event := <-ch:
|
case event := <-ch:
|
||||||
cuid.SetConverged(false)
|
|
||||||
|
|
||||||
// FIXME: ask packagekit for info on what packages changed
|
// FIXME: ask packagekit for info on what packages changed
|
||||||
if obj.debug {
|
if obj.debug {
|
||||||
log.Printf("%s: Event: %v", obj.fmtNames(obj.getNames()), event.Name)
|
log.Printf("%s: Event: %v", obj.fmtNames(obj.getNames()), event.Name)
|
||||||
@@ -166,29 +145,17 @@ func (obj *PkgRes) Watch(processChan chan event.Event) error {
|
|||||||
obj.StateOK(false) // dirty
|
obj.StateOK(false) // dirty
|
||||||
|
|
||||||
case event := <-obj.Events():
|
case event := <-obj.Events():
|
||||||
cuid.SetConverged(false)
|
if exit, send = obj.ReadEvent(event); exit != nil {
|
||||||
if exit, send = obj.ReadEvent(&event); exit {
|
return *exit // exit
|
||||||
return nil // exit
|
|
||||||
}
|
}
|
||||||
//obj.StateOK(false) // these events don't invalidate state
|
//obj.StateOK(false) // these events don't invalidate state
|
||||||
|
|
||||||
case <-cuid.ConvergedTimer():
|
|
||||||
cuid.SetConverged(true) // converged!
|
|
||||||
continue
|
|
||||||
|
|
||||||
case <-Startup(startup):
|
|
||||||
cuid.SetConverged(false)
|
|
||||||
send = true
|
|
||||||
obj.StateOK(false) // dirty
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// do all our event sending all together to avoid duplicate msgs
|
// do all our event sending all together to avoid duplicate msgs
|
||||||
if send {
|
if send {
|
||||||
startup = true // startup finished
|
|
||||||
send = false
|
send = false
|
||||||
if exit, err := obj.DoSend(processChan, ""); exit || err != nil {
|
obj.Event()
|
||||||
return err // we exit or bubble up a NACK...
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -222,7 +189,7 @@ func (obj *PkgRes) groupMappingHelper() map[string]string {
|
|||||||
for _, x := range g {
|
for _, x := range g {
|
||||||
pkg, ok := x.(*PkgRes) // convert from Res
|
pkg, ok := x.(*PkgRes) // convert from Res
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Fatalf("Grouped member %v is not a %s", x, obj.Kind())
|
log.Fatalf("grouped member %v is not a %s", x, obj.Kind())
|
||||||
}
|
}
|
||||||
result[pkg.Name] = pkg.State
|
result[pkg.Name] = pkg.State
|
||||||
}
|
}
|
||||||
@@ -262,13 +229,13 @@ func (obj *PkgRes) CheckApply(apply bool) (checkOK bool, err error) {
|
|||||||
|
|
||||||
bus := packagekit.NewBus()
|
bus := packagekit.NewBus()
|
||||||
if bus == nil {
|
if bus == nil {
|
||||||
return false, fmt.Errorf("Can't connect to PackageKit bus.")
|
return false, fmt.Errorf("can't connect to PackageKit bus")
|
||||||
}
|
}
|
||||||
defer bus.Close()
|
defer bus.Close()
|
||||||
|
|
||||||
result, err := obj.pkgMappingHelper(bus)
|
result, err := obj.pkgMappingHelper(bus)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errwrap.Wrapf(err, "The pkgMappingHelper failed")
|
return false, errwrap.Wrapf(err, "the pkgMappingHelper failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
packageMap := obj.groupMappingHelper() // map[string]string
|
packageMap := obj.groupMappingHelper() // map[string]string
|
||||||
@@ -281,7 +248,7 @@ func (obj *PkgRes) CheckApply(apply bool) (checkOK bool, err error) {
|
|||||||
// eventually we might be able to drop this constraint!
|
// eventually we might be able to drop this constraint!
|
||||||
states, err := packagekit.FilterState(result, packageList, obj.State)
|
states, err := packagekit.FilterState(result, packageList, obj.State)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errwrap.Wrapf(err, "The FilterState method failed")
|
return false, errwrap.Wrapf(err, "the FilterState method failed")
|
||||||
}
|
}
|
||||||
data, _ := result[obj.Name] // if above didn't error, we won't either!
|
data, _ := result[obj.Name] // if above didn't error, we won't either!
|
||||||
validState := util.BoolMapTrue(util.BoolMapValues(states))
|
validState := util.BoolMapTrue(util.BoolMapValues(states))
|
||||||
@@ -373,7 +340,7 @@ type PkgResAutoEdges struct {
|
|||||||
// Next returns the next automatic edge.
|
// Next returns the next automatic edge.
|
||||||
func (obj *PkgResAutoEdges) Next() []ResUID {
|
func (obj *PkgResAutoEdges) Next() []ResUID {
|
||||||
if obj.testIsNext {
|
if obj.testIsNext {
|
||||||
log.Fatal("Expecting a call to Test()")
|
log.Fatal("expecting a call to Test()")
|
||||||
}
|
}
|
||||||
obj.testIsNext = true // set after all the errors paths are past
|
obj.testIsNext = true // set after all the errors paths are past
|
||||||
|
|
||||||
@@ -401,13 +368,13 @@ func (obj *PkgResAutoEdges) Next() []ResUID {
|
|||||||
// Test gets results of the earlier Next() call, & returns if we should continue!
|
// Test gets results of the earlier Next() call, & returns if we should continue!
|
||||||
func (obj *PkgResAutoEdges) Test(input []bool) bool {
|
func (obj *PkgResAutoEdges) Test(input []bool) bool {
|
||||||
if !obj.testIsNext {
|
if !obj.testIsNext {
|
||||||
log.Fatal("Expecting a call to Next()")
|
log.Fatal("expecting a call to Next()")
|
||||||
}
|
}
|
||||||
|
|
||||||
// ack the svcUID's...
|
// ack the svcUID's...
|
||||||
if x := obj.svcUIDs; len(x) > 0 {
|
if x := obj.svcUIDs; len(x) > 0 {
|
||||||
if y := len(x); y != len(input) {
|
if y := len(x); y != len(input) {
|
||||||
log.Fatalf("Expecting %d value(s)!", y)
|
log.Fatalf("expecting %d value(s)", y)
|
||||||
}
|
}
|
||||||
obj.svcUIDs = []ResUID{} // empty
|
obj.svcUIDs = []ResUID{} // empty
|
||||||
obj.testIsNext = false
|
obj.testIsNext = false
|
||||||
@@ -416,7 +383,7 @@ func (obj *PkgResAutoEdges) Test(input []bool) bool {
|
|||||||
|
|
||||||
count := len(obj.fileList)
|
count := len(obj.fileList)
|
||||||
if count != len(input) {
|
if count != len(input) {
|
||||||
log.Fatalf("Expecting %d value(s)!", count)
|
log.Fatalf("expecting %d value(s)", count)
|
||||||
}
|
}
|
||||||
obj.testIsNext = false // set after all the errors paths are past
|
obj.testIsNext = false // set after all the errors paths are past
|
||||||
|
|
||||||
@@ -479,9 +446,9 @@ func (obj *PkgRes) AutoEdges() AutoEdge {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUIDs includes all params to make a unique identification of this object.
|
// UIDs includes all params to make a unique identification of this object.
|
||||||
// Most resources only return one, although some resources can return multiple.
|
// Most resources only return one, although some resources can return multiple.
|
||||||
func (obj *PkgRes) GetUIDs() []ResUID {
|
func (obj *PkgRes) UIDs() []ResUID {
|
||||||
x := &PkgUID{
|
x := &PkgUID{
|
||||||
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
||||||
name: obj.Name,
|
name: obj.Name,
|
||||||
@@ -561,3 +528,23 @@ func ReturnSvcInFileList(fileList []string) []string {
|
|||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML is the custom unmarshal handler for this struct.
|
||||||
|
// It is primarily useful for setting the defaults.
|
||||||
|
func (obj *PkgRes) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
type rawRes PkgRes // indirection to avoid infinite recursion
|
||||||
|
|
||||||
|
def := obj.Default() // get the default
|
||||||
|
res, ok := def.(*PkgRes) // put in the right format
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("could not convert to PkgRes")
|
||||||
|
}
|
||||||
|
raw := rawRes(*res) // convert; the defaults go here
|
||||||
|
|
||||||
|
if err := unmarshal(&raw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*obj = PkgRes(raw) // restore from indirection with type conversion!
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -19,19 +19,23 @@
|
|||||||
package resources
|
package resources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/gob"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
// TODO: should each resource be a sub-package?
|
// TODO: should each resource be a sub-package?
|
||||||
"github.com/purpleidea/mgmt/converger"
|
"github.com/purpleidea/mgmt/converger"
|
||||||
"github.com/purpleidea/mgmt/event"
|
"github.com/purpleidea/mgmt/event"
|
||||||
|
"github.com/purpleidea/mgmt/prometheus"
|
||||||
|
"github.com/purpleidea/mgmt/util"
|
||||||
|
|
||||||
errwrap "github.com/pkg/errors"
|
errwrap "github.com/pkg/errors"
|
||||||
|
"golang.org/x/time/rate"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate stringer -type=ResState -output=resstate_stringer.go
|
//go:generate stringer -type=ResState -output=resstate_stringer.go
|
||||||
@@ -41,22 +45,37 @@ type ResState int
|
|||||||
|
|
||||||
// Each ResState should be set properly in the relevant part of the resource.
|
// Each ResState should be set properly in the relevant part of the resource.
|
||||||
const (
|
const (
|
||||||
ResStateNil ResState = iota
|
ResStateNil ResState = iota
|
||||||
ResStateWatching
|
ResStateProcess // we're in process, but we haven't done much yet
|
||||||
ResStateEvent // an event has happened, but we haven't poked yet
|
ResStateCheckApply // we're about to run CheckApply
|
||||||
ResStateCheckApply
|
ResStatePoking // we're done CheckApply, and we're about to poke
|
||||||
ResStatePoking
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const refreshPathToken = "refresh"
|
const refreshPathToken = "refresh"
|
||||||
|
|
||||||
|
// World is an interface to the rest of the different graph state. It allows
|
||||||
|
// the GAPI to store state and exchange information throughout the cluster. It
|
||||||
|
// is the interface each machine uses to communicate with the rest of the world.
|
||||||
|
type World interface { // TODO: is there a better name for this interface?
|
||||||
|
ResExport([]Res) error
|
||||||
|
// FIXME: should this method take a "filter" data struct instead of many args?
|
||||||
|
ResCollect(hostnameFilter, kindFilter []string) ([]Res, error)
|
||||||
|
|
||||||
|
StrWatch(namespace string) chan error
|
||||||
|
StrGet(namespace string) (map[string]string, error)
|
||||||
|
StrSet(namespace, value string) error
|
||||||
|
StrDel(namespace string) error
|
||||||
|
}
|
||||||
|
|
||||||
// Data is the set of input values passed into the pgraph for the resources.
|
// Data is the set of input values passed into the pgraph for the resources.
|
||||||
type Data struct {
|
type Data struct {
|
||||||
//Hostname string // uuid for the host
|
Hostname string // uuid for the host
|
||||||
//Noop bool
|
//Noop bool
|
||||||
Converger converger.Converger
|
Converger converger.Converger
|
||||||
Prefix string // the prefix to be used for the pgraph namespace
|
Prometheus *prometheus.Prometheus
|
||||||
Debug bool
|
World World
|
||||||
|
Prefix string // the prefix to be used for the pgraph namespace
|
||||||
|
Debug bool
|
||||||
// NOTE: we can add more fields here if needed for the resources.
|
// NOTE: we can add more fields here if needed for the resources.
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -91,8 +110,12 @@ type MetaParams struct {
|
|||||||
// NOTE: there are separate Watch and CheckApply retry and delay values,
|
// NOTE: there are separate Watch and CheckApply retry and delay values,
|
||||||
// but I've decided to use the same ones for both until there's a proper
|
// but I've decided to use the same ones for both until there's a proper
|
||||||
// reason to want to do something differently for the Watch errors.
|
// reason to want to do something differently for the Watch errors.
|
||||||
Retry int16 `yaml:"retry"` // metaparam, number of times to retry on error. -1 for infinite
|
Retry int16 `yaml:"retry"` // metaparam, number of times to retry on error. -1 for infinite
|
||||||
Delay uint64 `yaml:"delay"` // metaparam, number of milliseconds to wait between retries
|
Delay uint64 `yaml:"delay"` // metaparam, number of milliseconds to wait between retries
|
||||||
|
Poll uint32 `yaml:"poll"` // metaparam, number of seconds between poll intervals, 0 to watch
|
||||||
|
Limit rate.Limit `yaml:"limit"` // metaparam, number of events per second to allow through
|
||||||
|
Burst int `yaml:"burst"` // metaparam, number of events to allow in a burst
|
||||||
|
Sema []string `yaml:"sema"` // metaparam, list of semaphore ids (id | id:count)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML is the custom unmarshal handler for the MetaParams struct. It
|
// UnmarshalYAML is the custom unmarshal handler for the MetaParams struct. It
|
||||||
@@ -114,8 +137,12 @@ var DefaultMetaParams = MetaParams{
|
|||||||
AutoEdge: true,
|
AutoEdge: true,
|
||||||
AutoGroup: true,
|
AutoGroup: true,
|
||||||
Noop: false,
|
Noop: false,
|
||||||
Retry: 0, // TODO: is this a good default?
|
Retry: 0, // TODO: is this a good default?
|
||||||
Delay: 0, // TODO: is this a good default?
|
Delay: 0, // TODO: is this a good default?
|
||||||
|
Poll: 0, // defaults to watching for events
|
||||||
|
Limit: rate.Inf, // defaults to no limit
|
||||||
|
Burst: 0, // no burst needed on an infinite rate // TODO: is this a good default?
|
||||||
|
//Sema: []string{},
|
||||||
}
|
}
|
||||||
|
|
||||||
// The Base interface is everything that is common to all resources.
|
// The Base interface is everything that is common to all resources.
|
||||||
@@ -126,15 +153,21 @@ type Base interface {
|
|||||||
SetKind(string)
|
SetKind(string)
|
||||||
Kind() string
|
Kind() string
|
||||||
Meta() *MetaParams
|
Meta() *MetaParams
|
||||||
Events() chan event.Event
|
Events() chan *event.Event
|
||||||
AssociateData(*Data)
|
Data() *Data
|
||||||
IsWatching() bool
|
IsWorking() bool
|
||||||
SetWatching(bool)
|
IsQuiescing() bool
|
||||||
|
QuiesceGroup() *sync.WaitGroup
|
||||||
|
WaitGroup() *sync.WaitGroup
|
||||||
|
Setup()
|
||||||
|
Reset()
|
||||||
|
Converger() converger.Converger
|
||||||
|
ConvergerUIDs() (converger.UID, converger.UID, converger.UID)
|
||||||
GetState() ResState
|
GetState() ResState
|
||||||
SetState(ResState)
|
SetState(ResState)
|
||||||
DoSend(chan event.Event, string) (bool, error)
|
Event() error
|
||||||
SendEvent(event.EventName, bool, bool) bool
|
SendEvent(event.Kind, error) error
|
||||||
ReadEvent(*event.Event) (bool, bool) // TODO: optional here?
|
ReadEvent(*event.Event) (*error, bool)
|
||||||
Refresh() bool // is there a pending refresh to run?
|
Refresh() bool // is there a pending refresh to run?
|
||||||
SetRefresh(bool) // set the refresh state of this resource
|
SetRefresh(bool) // set the refresh state of this resource
|
||||||
SendRecv(Res) (map[string]bool, error) // send->recv data passing function
|
SendRecv(Res) (map[string]bool, error) // send->recv data passing function
|
||||||
@@ -147,19 +180,31 @@ type Base interface {
|
|||||||
GetGroup() []Res // return everyone grouped inside me
|
GetGroup() []Res // return everyone grouped inside me
|
||||||
SetGroup([]Res)
|
SetGroup([]Res)
|
||||||
VarDir(string) (string, error)
|
VarDir(string) (string, error)
|
||||||
|
Running() error // notify the engine that Watch started
|
||||||
|
Started() <-chan struct{} // returns when the resource has started
|
||||||
|
Stopped() <-chan struct{} // returns when the resource has stopped
|
||||||
|
Starter(bool)
|
||||||
|
Poll() error // poll alternative to watching :(
|
||||||
|
ProcessChan() chan *event.Event
|
||||||
|
ProcessSync() *sync.WaitGroup
|
||||||
|
ProcessExit()
|
||||||
|
Prometheus() *prometheus.Prometheus
|
||||||
}
|
}
|
||||||
|
|
||||||
// Res is the minimum interface you need to implement to define a new resource.
|
// Res is the minimum interface you need to implement to define a new resource.
|
||||||
type Res interface {
|
type Res interface {
|
||||||
Base // include everything from the Base interface
|
Base // include everything from the Base interface
|
||||||
|
Default() Res // return a struct with sane defaults as a Res
|
||||||
|
Validate() error
|
||||||
Init() error
|
Init() error
|
||||||
//Validate() error // TODO: this might one day be added
|
Close() error
|
||||||
GetUIDs() []ResUID // most resources only return one
|
UIDs() []ResUID // most resources only return one
|
||||||
Watch(chan event.Event) error // send on channel to signal process() events
|
Watch() error // send on channel to signal process() events
|
||||||
CheckApply(apply bool) (checkOK bool, err error)
|
CheckApply(apply bool) (checkOK bool, err error)
|
||||||
AutoEdges() AutoEdge
|
AutoEdges() AutoEdge
|
||||||
Compare(Res) bool
|
Compare(Res) bool
|
||||||
CollectPattern(string) // XXX: temporary until Res collection is more advanced
|
CollectPattern(string) // XXX: temporary until Res collection is more advanced
|
||||||
|
//UnmarshalYAML(unmarshal func(interface{}) error) error // optional
|
||||||
}
|
}
|
||||||
|
|
||||||
// BaseRes is the base struct that gets used in every resource.
|
// BaseRes is the base struct that gets used in every resource.
|
||||||
@@ -168,20 +213,67 @@ type BaseRes struct {
|
|||||||
MetaParams MetaParams `yaml:"meta"` // struct of all the metaparams
|
MetaParams MetaParams `yaml:"meta"` // struct of all the metaparams
|
||||||
Recv map[string]*Send // mapping of key to receive on from value
|
Recv map[string]*Send // mapping of key to receive on from value
|
||||||
|
|
||||||
kind string
|
kind string
|
||||||
events chan event.Event
|
data Data
|
||||||
|
state ResState
|
||||||
|
prefix string // base prefix for this resource
|
||||||
|
|
||||||
|
eventsLock *sync.Mutex // locks around sending and closing of events channel
|
||||||
|
eventsDone bool
|
||||||
|
eventsChan chan *event.Event
|
||||||
|
|
||||||
|
processLock *sync.Mutex
|
||||||
|
processDone bool
|
||||||
|
processChan chan *event.Event
|
||||||
|
processSync *sync.WaitGroup
|
||||||
|
|
||||||
converger converger.Converger // converged tracking
|
converger converger.Converger // converged tracking
|
||||||
prefix string // base prefix for this resource
|
cuid converger.UID
|
||||||
debug bool
|
wcuid converger.UID
|
||||||
state ResState
|
pcuid converger.UID
|
||||||
watching bool // is Watch() loop running ?
|
|
||||||
isStateOK bool // whether the state is okay based on events or not
|
started chan struct{} // closed when worker is started/running
|
||||||
|
stopped chan struct{} // closed when worker is stopped/exited
|
||||||
|
isStarted bool // did the started chan already close?
|
||||||
|
starter bool // does this have indegree == 0 ? XXX: usually?
|
||||||
|
|
||||||
|
quiescing bool // are we quiescing (pause or exit)
|
||||||
|
quiesceGroup *sync.WaitGroup
|
||||||
|
waitGroup *sync.WaitGroup
|
||||||
|
working bool // is the Worker() loop running ?
|
||||||
|
debug bool
|
||||||
|
isStateOK bool // whether the state is okay based on events or not
|
||||||
|
|
||||||
isGrouped bool // am i contained within a group?
|
isGrouped bool // am i contained within a group?
|
||||||
grouped []Res // list of any grouped resources
|
grouped []Res // list of any grouped resources
|
||||||
refresh bool // does this resource have a refresh to run?
|
|
||||||
|
refresh bool // does this resource have a refresh to run?
|
||||||
//refreshState StatefulBool // TODO: future stateful bool
|
//refreshState StatefulBool // TODO: future stateful bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML is the custom unmarshal handler for the BaseRes struct. It is
|
||||||
|
// primarily useful for setting the defaults, in particular if meta is absent!
|
||||||
|
// FIXME: how come we can't get this to work properly without dropping fields?
|
||||||
|
//func (obj *BaseRes) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
// DefaultBaseRes := BaseRes{
|
||||||
|
// // without specifying a default here, if we don't specify *any*
|
||||||
|
// // meta parameters in the yaml file, then the UnmarshalYAML for
|
||||||
|
// // the MetaParams struct won't run, and we won't get defaults!
|
||||||
|
// MetaParams: DefaultMetaParams, // force a default
|
||||||
|
// }
|
||||||
|
|
||||||
|
// type rawBaseRes BaseRes // indirection to avoid infinite recursion
|
||||||
|
// raw := rawBaseRes(DefaultBaseRes) // convert; the defaults go here
|
||||||
|
// //raw := rawBaseRes{}
|
||||||
|
|
||||||
|
// if err := unmarshal(&raw); err != nil {
|
||||||
|
// return err
|
||||||
|
// }
|
||||||
|
|
||||||
|
// *obj = BaseRes(raw) // restore from indirection with type conversion!
|
||||||
|
// return nil
|
||||||
|
//}
|
||||||
|
|
||||||
// UIDExistsInUIDs wraps the IFF method when used with a list of UID's.
|
// UIDExistsInUIDs wraps the IFF method when used with a list of UID's.
|
||||||
func UIDExistsInUIDs(uid ResUID, uids []ResUID) bool {
|
func UIDExistsInUIDs(uid ResUID, uids []ResUID) bool {
|
||||||
for _, u := range uids {
|
for _, u := range uids {
|
||||||
@@ -223,18 +315,80 @@ func (obj *BaseUID) Reversed() bool {
|
|||||||
return *obj.reversed
|
return *obj.reversed
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate reports any problems with the struct definition.
|
||||||
|
func (obj *BaseRes) Validate() error {
|
||||||
|
isInf := obj.Meta().Limit == rate.Inf || math.IsInf(float64(obj.Meta().Limit), 1)
|
||||||
|
if obj.Meta().Burst == 0 && !isInf { // blocked
|
||||||
|
return fmt.Errorf("Permanently limited (rate != Inf, burst: 0)")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Init initializes structures like channels if created without New constructor.
|
// Init initializes structures like channels if created without New constructor.
|
||||||
func (obj *BaseRes) Init() error {
|
func (obj *BaseRes) Init() error {
|
||||||
if obj.kind == "" {
|
if obj.debug {
|
||||||
return fmt.Errorf("Resource did not set kind!")
|
log.Printf("%s[%s]: Init()", obj.Kind(), obj.GetName())
|
||||||
}
|
}
|
||||||
obj.events = make(chan event.Event) // unbuffered chan to avoid stale events
|
if obj.kind == "" {
|
||||||
|
return fmt.Errorf("resource did not set kind")
|
||||||
|
}
|
||||||
|
|
||||||
|
obj.cuid = obj.Converger().Register()
|
||||||
|
obj.wcuid = obj.Converger().Register() // get a cuid for the worker!
|
||||||
|
obj.pcuid = obj.Converger().Register() // get a cuid for the process
|
||||||
|
|
||||||
|
obj.processLock = &sync.Mutex{} // lock around processChan closing and sending
|
||||||
|
obj.processDone = false // did we close processChan ?
|
||||||
|
obj.processChan = make(chan *event.Event)
|
||||||
|
obj.processSync = &sync.WaitGroup{}
|
||||||
|
|
||||||
|
obj.quiescing = false // no quiesce operation is happening at the moment
|
||||||
|
obj.quiesceGroup = &sync.WaitGroup{}
|
||||||
|
|
||||||
|
obj.waitGroup = &sync.WaitGroup{} // Init and Close must be 1-1 matched!
|
||||||
|
obj.waitGroup.Add(1)
|
||||||
|
obj.working = true // Worker method should now be running...
|
||||||
|
|
||||||
|
// FIXME: force a sane default until UnmarshalYAML on *BaseRes works...
|
||||||
|
if obj.Meta().Burst == 0 && obj.Meta().Limit == 0 { // blocked
|
||||||
|
obj.Meta().Limit = rate.Inf
|
||||||
|
}
|
||||||
|
if math.IsInf(float64(obj.Meta().Limit), 1) { // yaml `.inf` -> rate.Inf
|
||||||
|
obj.Meta().Limit = rate.Inf
|
||||||
|
}
|
||||||
|
|
||||||
//dir, err := obj.VarDir("")
|
//dir, err := obj.VarDir("")
|
||||||
//if err != nil {
|
//if err != nil {
|
||||||
// return errwrap.Wrapf(err, "VarDir failed in Init()")
|
// return errwrap.Wrapf(err, "the VarDir failed in Init()")
|
||||||
//}
|
//}
|
||||||
// TODO: this StatefulBool implementation could be eventually swappable
|
// TODO: this StatefulBool implementation could be eventually swappable
|
||||||
//obj.refreshState = &DiskBool{Path: path.Join(dir, refreshPathToken)}
|
//obj.refreshState = &DiskBool{Path: path.Join(dir, refreshPathToken)}
|
||||||
|
|
||||||
|
if err := obj.Prometheus().AddManagedResource(fmt.Sprintf("%v[%v]", obj.Kind(), obj.GetName()), obj.Kind()); err != nil {
|
||||||
|
return errwrap.Wrapf(err, "could not increase prometheus counter!")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close shuts down and performs any cleanup.
|
||||||
|
func (obj *BaseRes) Close() error {
|
||||||
|
if obj.debug {
|
||||||
|
log.Printf("%s[%s]: Close()", obj.Kind(), obj.GetName())
|
||||||
|
}
|
||||||
|
|
||||||
|
obj.pcuid.Unregister()
|
||||||
|
obj.wcuid.Unregister()
|
||||||
|
obj.cuid.Unregister()
|
||||||
|
|
||||||
|
obj.working = false // Worker method should now be closing...
|
||||||
|
close(obj.stopped)
|
||||||
|
obj.waitGroup.Done()
|
||||||
|
|
||||||
|
if err := obj.Prometheus().RemoveManagedResource(fmt.Sprintf("%v[%v]", obj.Kind(), obj.GetName()), obj.kind); err != nil {
|
||||||
|
return errwrap.Wrapf(err, "could not decrease prometheus counter!")
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -264,25 +418,61 @@ func (obj *BaseRes) Meta() *MetaParams {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Events returns the channel of events to listen on.
|
// Events returns the channel of events to listen on.
|
||||||
func (obj *BaseRes) Events() chan event.Event {
|
func (obj *BaseRes) Events() chan *event.Event {
|
||||||
return obj.events
|
return obj.eventsChan
|
||||||
}
|
}
|
||||||
|
|
||||||
// AssociateData associates some data with the object in question.
|
// Data returns an associable handle to some data passed in to the resource.
|
||||||
func (obj *BaseRes) AssociateData(data *Data) {
|
func (obj *BaseRes) Data() *Data {
|
||||||
obj.converger = data.Converger
|
return &obj.data
|
||||||
obj.prefix = data.Prefix
|
|
||||||
obj.debug = data.Debug
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsWatching tells us if the Watch() function is running.
|
// IsWorking tells us if the Worker() function is running. Not thread safe.
|
||||||
func (obj *BaseRes) IsWatching() bool {
|
func (obj *BaseRes) IsWorking() bool {
|
||||||
return obj.watching
|
return obj.working
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetWatching stores the status of if the Watch() function is running.
|
// IsQuiescing returns if there is a quiesce operation in progress. Pause and
|
||||||
func (obj *BaseRes) SetWatching(b bool) {
|
// exit both meet this criteria, and this tells some systems to wind down, such
|
||||||
obj.watching = b
|
// as the event replay mechanism.
|
||||||
|
func (obj *BaseRes) IsQuiescing() bool { return obj.quiescing }
|
||||||
|
|
||||||
|
// QuiesceGroup returns the sync group associated with the quiesce operations.
|
||||||
|
func (obj *BaseRes) QuiesceGroup() *sync.WaitGroup { return obj.quiesceGroup }
|
||||||
|
|
||||||
|
// WaitGroup returns a sync.WaitGroup which is open when the resource is done.
|
||||||
|
// This is more useful than a closed channel signal, since it can be re-used
|
||||||
|
// safely without having to recreate it and worry about stale channel handles.
|
||||||
|
func (obj *BaseRes) WaitGroup() *sync.WaitGroup { return obj.waitGroup }
|
||||||
|
|
||||||
|
// Setup does some work which must happen before the Worker starts. It happens
|
||||||
|
// once per Worker startup. It can happen in parallel with other Setup calls, so
|
||||||
|
// add locks around any operation that's not thread-safe.
|
||||||
|
func (obj *BaseRes) Setup() {
|
||||||
|
obj.started = make(chan struct{}) // closes when started
|
||||||
|
obj.stopped = make(chan struct{}) // closes when stopped
|
||||||
|
|
||||||
|
obj.eventsLock = &sync.Mutex{}
|
||||||
|
obj.eventsDone = false
|
||||||
|
obj.eventsChan = make(chan *event.Event) // unbuffered chan to avoid stale events
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset from Setup. These can get called for different vertices in parallel.
|
||||||
|
func (obj *BaseRes) Reset() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Converger returns the converger object used by the system. It can be used to
|
||||||
|
// register new convergers if needed.
|
||||||
|
func (obj *BaseRes) Converger() converger.Converger {
|
||||||
|
return obj.data.Converger
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConvergerUIDs returns the ConvergerUIDs for the resource. This is called by
|
||||||
|
// the various methods that need one of these ConvergerUIDs. They are registered
|
||||||
|
// by the Init method and unregistered on the resource Close.
|
||||||
|
func (obj *BaseRes) ConvergerUIDs() (cuid, wcuid, pcuid converger.UID) {
|
||||||
|
return obj.cuid, obj.wcuid, obj.pcuid
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetState returns the state of the resource.
|
// GetState returns the state of the resource.
|
||||||
@@ -308,6 +498,21 @@ func (obj *BaseRes) StateOK(b bool) {
|
|||||||
obj.isStateOK = b
|
obj.isStateOK = b
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ProcessChan returns the chan that resources send events to. Internal API!
|
||||||
|
func (obj *BaseRes) ProcessChan() chan *event.Event { return obj.processChan }
|
||||||
|
|
||||||
|
// ProcessSync returns the WaitGroup that blocks until the innerWorker closes.
|
||||||
|
func (obj *BaseRes) ProcessSync() *sync.WaitGroup { return obj.processSync }
|
||||||
|
|
||||||
|
// ProcessExit causes the innerWorker to close and waits until it does so.
|
||||||
|
func (obj *BaseRes) ProcessExit() {
|
||||||
|
obj.processLock.Lock() // lock to avoid a send when closed!
|
||||||
|
obj.processDone = true
|
||||||
|
close(obj.processChan)
|
||||||
|
obj.processLock.Unlock()
|
||||||
|
obj.processSync.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
// GroupCmp compares two resources and decides if they're suitable for grouping
|
// GroupCmp compares two resources and decides if they're suitable for grouping
|
||||||
// You'll probably want to override this method when implementing a resource...
|
// You'll probably want to override this method when implementing a resource...
|
||||||
func (obj *BaseRes) GroupCmp(res Res) bool {
|
func (obj *BaseRes) GroupCmp(res Res) bool {
|
||||||
@@ -317,10 +522,16 @@ func (obj *BaseRes) GroupCmp(res Res) bool {
|
|||||||
// GroupRes groups resource (arg) into self.
|
// GroupRes groups resource (arg) into self.
|
||||||
func (obj *BaseRes) GroupRes(res Res) error {
|
func (obj *BaseRes) GroupRes(res Res) error {
|
||||||
if l := len(res.GetGroup()); l > 0 {
|
if l := len(res.GetGroup()); l > 0 {
|
||||||
return fmt.Errorf("Res: %v already contains %d grouped resources!", res, l)
|
return fmt.Errorf("the %v resource already contains %d grouped resources", res, l)
|
||||||
}
|
}
|
||||||
if res.IsGrouped() {
|
if res.IsGrouped() {
|
||||||
return fmt.Errorf("Res: %v is already grouped!", res)
|
return fmt.Errorf("the %v resource is already grouped", res)
|
||||||
|
}
|
||||||
|
|
||||||
|
// merging two resources into one should yield the sum of their semas
|
||||||
|
if semas := res.Meta().Sema; len(semas) > 0 {
|
||||||
|
obj.Meta().Sema = append(obj.Meta().Sema, semas...)
|
||||||
|
obj.Meta().Sema = util.StrRemoveDuplicatesInList(obj.Meta().Sema)
|
||||||
}
|
}
|
||||||
|
|
||||||
obj.grouped = append(obj.grouped, res)
|
obj.grouped = append(obj.grouped, res)
|
||||||
@@ -371,6 +582,33 @@ func (obj *BaseRes) Compare(res Res) bool {
|
|||||||
if obj.Meta().Delay != res.Meta().Delay {
|
if obj.Meta().Delay != res.Meta().Delay {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
if obj.Meta().Poll != res.Meta().Poll {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.Meta().Limit != res.Meta().Limit {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.Meta().Burst != res.Meta().Burst {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// are the two slices the same?
|
||||||
|
cmpSlices := func(a, b []string) bool {
|
||||||
|
if len(a) != len(b) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
sort.Strings(a)
|
||||||
|
sort.Strings(b)
|
||||||
|
for i := range a {
|
||||||
|
if a[i] != b[i] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if !cmpSlices(obj.Meta().Sema, res.Meta().Sema) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -385,52 +623,72 @@ func (obj *BaseRes) VarDir(extra string) (string, error) {
|
|||||||
// Using extra adds additional dirs onto our namespace. An empty extra
|
// Using extra adds additional dirs onto our namespace. An empty extra
|
||||||
// adds no additional directories.
|
// adds no additional directories.
|
||||||
if obj.prefix == "" {
|
if obj.prefix == "" {
|
||||||
return "", fmt.Errorf("VarDir prefix is empty!")
|
return "", fmt.Errorf("the VarDir prefix is empty")
|
||||||
}
|
}
|
||||||
if obj.Kind() == "" {
|
if obj.Kind() == "" {
|
||||||
return "", fmt.Errorf("VarDir kind is empty!")
|
return "", fmt.Errorf("the VarDir kind is empty")
|
||||||
}
|
}
|
||||||
if obj.GetName() == "" {
|
if obj.GetName() == "" {
|
||||||
return "", fmt.Errorf("VarDir name is empty!")
|
return "", fmt.Errorf("the VarDir name is empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: is obj.GetName() sufficiently unique to use as a UID here?
|
// FIXME: is obj.GetName() sufficiently unique to use as a UID here?
|
||||||
uid := obj.GetName()
|
uid := obj.GetName()
|
||||||
p := fmt.Sprintf("%s/", path.Join(obj.prefix, obj.Kind(), uid, extra))
|
p := fmt.Sprintf("%s/", path.Join(obj.prefix, obj.Kind(), uid, extra))
|
||||||
if err := os.MkdirAll(p, 0770); err != nil {
|
if err := os.MkdirAll(p, 0770); err != nil {
|
||||||
return "", errwrap.Wrapf(err, "Can't create prefix for %s[%s]", obj.Kind(), obj.GetName())
|
return "", errwrap.Wrapf(err, "can't create prefix for %s[%s]", obj.Kind(), obj.GetName())
|
||||||
}
|
}
|
||||||
return p, nil
|
return p, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResToB64 encodes a resource to a base64 encoded string (after serialization)
|
// Started returns a channel that closes when the resource has started up.
|
||||||
func ResToB64(res Res) (string, error) {
|
func (obj *BaseRes) Started() <-chan struct{} { return obj.started }
|
||||||
b := bytes.Buffer{}
|
|
||||||
e := gob.NewEncoder(&b)
|
// Stopped returns a channel that closes when the worker has finished running.
|
||||||
err := e.Encode(&res) // pass with &
|
func (obj *BaseRes) Stopped() <-chan struct{} { return obj.stopped }
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("Gob failed to encode: %v", err)
|
// Starter sets the starter bool. This defines if a vertex has an indegree of 0.
|
||||||
|
// If we have an indegree of 0, we'll need to be a poke initiator in the graph.
|
||||||
|
func (obj *BaseRes) Starter(b bool) { obj.starter = b }
|
||||||
|
|
||||||
|
// Poll is the watch replacement for when we want to poll, which outputs events.
|
||||||
|
func (obj *BaseRes) Poll() error {
|
||||||
|
cuid, _, _ := obj.ConvergerUIDs() // get the converger uid used to report status
|
||||||
|
|
||||||
|
// create a time.Ticker for the given interval
|
||||||
|
ticker := time.NewTicker(time.Duration(obj.Meta().Poll) * time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
// notify engine that we're running
|
||||||
|
if err := obj.Running(); err != nil {
|
||||||
|
return err // bubble up a NACK...
|
||||||
|
}
|
||||||
|
cuid.SetConverged(false) // quickly stop any converge due to Running()
|
||||||
|
|
||||||
|
var send = false
|
||||||
|
var exit *error
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C: // received the timer event
|
||||||
|
log.Printf("%s[%s]: polling...", obj.Kind(), obj.GetName())
|
||||||
|
send = true
|
||||||
|
obj.StateOK(false) // dirty
|
||||||
|
|
||||||
|
case event := <-obj.Events():
|
||||||
|
cuid.ResetTimer() // important
|
||||||
|
if exit, send = obj.ReadEvent(event); exit != nil {
|
||||||
|
return *exit // exit
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if send {
|
||||||
|
send = false
|
||||||
|
obj.Event()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return base64.StdEncoding.EncodeToString(b.Bytes()), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// B64ToRes decodes a resource from a base64 encoded string (after deserialization)
|
// Prometheus returns the prometheus instance.
|
||||||
func B64ToRes(str string) (Res, error) {
|
func (obj *BaseRes) Prometheus() *prometheus.Prometheus {
|
||||||
var output interface{}
|
return obj.Data().Prometheus
|
||||||
bb, err := base64.StdEncoding.DecodeString(str)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Base64 failed to decode: %v", err)
|
|
||||||
}
|
|
||||||
b := bytes.NewBuffer(bb)
|
|
||||||
d := gob.NewDecoder(b)
|
|
||||||
err = d.Decode(&output) // pass with &
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Gob failed to decode: %v", err)
|
|
||||||
}
|
|
||||||
res, ok := output.(Res)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("Output %v is not a Res", res)
|
|
||||||
|
|
||||||
}
|
|
||||||
return res, nil
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -22,6 +22,7 @@ import (
|
|||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
"testing"
|
"testing"
|
||||||
|
//"github.com/purpleidea/mgmt/event"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMiscEncodeDecode1(t *testing.T) {
|
func TestMiscEncodeDecode1(t *testing.T) {
|
||||||
@@ -119,55 +120,55 @@ func TestIFF(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestReadEvent(t *testing.T) {
|
func TestReadEvent(t *testing.T) {
|
||||||
res := FileRes{}
|
//res := FileRes{}
|
||||||
|
|
||||||
shouldExit := map[eventName]bool{
|
//shouldExit := map[event.Kind]bool{
|
||||||
eventStart: false,
|
// event.EventStart: false,
|
||||||
eventPoke: false,
|
// event.EventPoke: false,
|
||||||
eventBackPoke: false,
|
// event.EventBackPoke: false,
|
||||||
eventExit: true,
|
// event.EventExit: true,
|
||||||
}
|
//}
|
||||||
shouldPoke := map[eventName]bool{
|
//shouldPoke := map[event.Kind]bool{
|
||||||
eventStart: true,
|
// event.EventStart: true,
|
||||||
eventPoke: true,
|
// event.EventPoke: true,
|
||||||
eventBackPoke: true,
|
// event.EventBackPoke: true,
|
||||||
eventExit: false,
|
// event.EventExit: false,
|
||||||
}
|
//}
|
||||||
|
|
||||||
for event := range shouldExit {
|
//for ev := range shouldExit {
|
||||||
exit, poke := res.ReadEvent(&Event{Name: event})
|
// exit, poke := res.ReadEvent(&event.Event{Kind: ev})
|
||||||
if exit != shouldExit[event] {
|
// if exit != shouldExit[ev] {
|
||||||
t.Errorf("resource.ReadEvent returned wrong exit flag for a %v event (%v, should be %v)",
|
// t.Errorf("resource.ReadEvent returned wrong exit flag for a %v event (%v, should be %v)",
|
||||||
event, exit, shouldExit[event])
|
// ev, exit, shouldExit[ev])
|
||||||
}
|
// }
|
||||||
if poke != shouldPoke[event] {
|
// if poke != shouldPoke[ev] {
|
||||||
t.Errorf("resource.ReadEvent returned wrong poke flag for a %v event (%v, should be %v)",
|
// t.Errorf("resource.ReadEvent returned wrong poke flag for a %v event (%v, should be %v)",
|
||||||
event, poke, shouldPoke[event])
|
// ev, poke, shouldPoke[ev])
|
||||||
}
|
// }
|
||||||
}
|
//}
|
||||||
|
|
||||||
res.Init()
|
//res.Init()
|
||||||
res.SetWatching(true)
|
//res.SetWatching(true)
|
||||||
|
|
||||||
// test result when a pause event is followed by start
|
// test result when a pause event is followed by start
|
||||||
go res.SendEvent(eventStart, false, false)
|
//go res.SendEvent(event.EventStart, nil)
|
||||||
exit, poke := res.ReadEvent(&Event{Name: eventPause})
|
//exit, poke := res.ReadEvent(&event.Event{Kind: event.EventPause})
|
||||||
if exit {
|
//if exit {
|
||||||
t.Error("resource.ReadEvent returned wrong exit flag for a pause+start event (true, should be false)")
|
// t.Error("resource.ReadEvent returned wrong exit flag for a pause+start event (true, should be false)")
|
||||||
}
|
//}
|
||||||
if poke {
|
//if poke {
|
||||||
t.Error("resource.ReadEvent returned wrong poke flag for a pause+start event (true, should be false)")
|
// t.Error("resource.ReadEvent returned wrong poke flag for a pause+start event (true, should be false)")
|
||||||
}
|
//}
|
||||||
|
|
||||||
// test result when a pause event is followed by exit
|
// test result when a pause event is followed by exit
|
||||||
go res.SendEvent(eventExit, false, false)
|
//go res.SendEvent(event.EventExit, nil)
|
||||||
exit, poke = res.ReadEvent(&Event{Name: eventPause})
|
//exit, poke = res.ReadEvent(&event.Event{Kind: event.EventPause})
|
||||||
if !exit {
|
//if !exit {
|
||||||
t.Error("resource.ReadEvent returned wrong exit flag for a pause+start event (false, should be true)")
|
// t.Error("resource.ReadEvent returned wrong exit flag for a pause+start event (false, should be true)")
|
||||||
}
|
//}
|
||||||
if poke {
|
//if poke {
|
||||||
t.Error("resource.ReadEvent returned wrong poke flag for a pause+start event (true, should be false)")
|
// t.Error("resource.ReadEvent returned wrong poke flag for a pause+start event (true, should be false)")
|
||||||
}
|
//}
|
||||||
|
|
||||||
// TODO: create a wrapper API around log, so that Fatals can be mocked and tested
|
// TODO: create a wrapper API around log, so that Fatals can be mocked and tested
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -28,84 +28,142 @@ import (
|
|||||||
errwrap "github.com/pkg/errors"
|
errwrap "github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SendEvent pushes an event into the message queue for a particular vertex
|
// Event sends off an event, but doesn't block the incoming event queue.
|
||||||
func (obj *BaseRes) SendEvent(ev event.EventName, sync bool, activity bool) bool {
|
func (obj *BaseRes) Event() error {
|
||||||
// TODO: isn't this race-y ?
|
|
||||||
if !obj.IsWatching() { // element has already exited
|
|
||||||
return false // if we don't return, we'll block on the send
|
|
||||||
}
|
|
||||||
if !sync {
|
|
||||||
obj.events <- event.Event{Name: ev, Resp: nil, Msg: "", Activity: activity}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := event.NewResp()
|
resp := event.NewResp()
|
||||||
obj.events <- event.Event{Name: ev, Resp: resp, Msg: "", Activity: activity}
|
obj.processLock.Lock()
|
||||||
resp.ACKWait() // waits until true (nil) value
|
if obj.processDone {
|
||||||
return true
|
obj.processLock.Unlock()
|
||||||
|
return fmt.Errorf("processChan is already closed")
|
||||||
|
}
|
||||||
|
obj.quiesceGroup.Add(1) // add to processChan queue count
|
||||||
|
obj.processChan <- &event.Event{Kind: event.EventNil, Resp: resp} // trigger process
|
||||||
|
obj.processLock.Unlock()
|
||||||
|
return resp.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
// DoSend sends off an event, but doesn't block the incoming event queue.
|
// SendEvent pushes an event into the message queue for a particular vertex.
|
||||||
func (obj *BaseRes) DoSend(processChan chan event.Event, comment string) (exit bool, err error) {
|
func (obj *BaseRes) SendEvent(ev event.Kind, err error) error {
|
||||||
|
if obj.debug {
|
||||||
|
if err == nil {
|
||||||
|
log.Printf("%s[%s]: SendEvent(%+v)", obj.Kind(), obj.GetName(), ev)
|
||||||
|
} else {
|
||||||
|
log.Printf("%s[%s]: SendEvent(%+v): %v", obj.Kind(), obj.GetName(), ev, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
resp := event.NewResp()
|
resp := event.NewResp()
|
||||||
processChan <- event.Event{Name: event.EventNil, Resp: resp, Activity: false, Msg: comment} // trigger process
|
obj.eventsLock.Lock()
|
||||||
e := resp.Wait()
|
if obj.eventsDone {
|
||||||
return false, e // XXX: at the moment, we don't use the exit bool.
|
obj.eventsLock.Unlock()
|
||||||
|
return fmt.Errorf("eventsChan is already closed")
|
||||||
|
}
|
||||||
|
obj.eventsChan <- &event.Event{Kind: ev, Resp: resp, Err: err}
|
||||||
|
if ev == event.EventExit {
|
||||||
|
obj.eventsDone = true
|
||||||
|
close(obj.eventsChan) // this is where we properly close this channel!
|
||||||
|
}
|
||||||
|
obj.eventsLock.Unlock()
|
||||||
|
resp.ACKWait() // waits until true (nil) value
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadEvent processes events when a select gets one, and handles the pause
|
// ReadEvent processes events when a select gets one, and handles the pause
|
||||||
// code too! The return values specify if we should exit and poke respectively.
|
// code too! The return values specify if we should exit and poke respectively.
|
||||||
func (obj *BaseRes) ReadEvent(ev *event.Event) (exit, send bool) {
|
func (obj *BaseRes) ReadEvent(ev *event.Event) (exit *error, send bool) {
|
||||||
ev.ACK()
|
//ev.ACK()
|
||||||
var poke bool
|
err := ev.Error()
|
||||||
// ensure that a CheckApply runs by sending with a dirty state...
|
|
||||||
if ev.GetActivity() { // if previous node did work, and we were notified...
|
|
||||||
obj.StateOK(false) // dirty
|
|
||||||
poke = true // poke!
|
|
||||||
// XXX: this should be elsewhere in case Watch isn't used (eg: Polling instead...)
|
|
||||||
// XXX: unless this is used in our "fallback" polling implementation???
|
|
||||||
//obj.SetRefresh(true) // TODO: is this redundant?
|
|
||||||
}
|
|
||||||
|
|
||||||
switch ev.Name {
|
switch ev.Kind {
|
||||||
case event.EventStart:
|
case event.EventStart:
|
||||||
send = true || poke
|
ev.ACK()
|
||||||
return
|
return nil, true
|
||||||
|
|
||||||
case event.EventPoke:
|
case event.EventPoke:
|
||||||
send = true || poke
|
ev.ACK()
|
||||||
return
|
return nil, true
|
||||||
|
|
||||||
case event.EventBackPoke:
|
case event.EventBackPoke:
|
||||||
send = true || poke
|
ev.ACK()
|
||||||
return // forward poking in response to a back poke!
|
return nil, true // forward poking in response to a back poke!
|
||||||
|
|
||||||
case event.EventExit:
|
case event.EventExit:
|
||||||
|
obj.quiescing = true
|
||||||
|
obj.quiesceGroup.Wait()
|
||||||
|
obj.quiescing = false // for symmetry
|
||||||
|
ev.ACK()
|
||||||
// FIXME: what do we do if we have a pending refresh (poke) and an exit?
|
// FIXME: what do we do if we have a pending refresh (poke) and an exit?
|
||||||
return true, false
|
return &err, false
|
||||||
|
|
||||||
case event.EventPause:
|
case event.EventPause:
|
||||||
// wait for next event to continue
|
obj.quiescing = true // set the quiesce flag to avoid event replays
|
||||||
select {
|
obj.quiesceGroup.Wait()
|
||||||
case e, ok := <-obj.Events():
|
obj.quiescing = false // reset
|
||||||
if !ok { // shutdown
|
ev.ACK()
|
||||||
return true, false
|
|
||||||
}
|
// wait for next event to continue, but discard any backpoking!
|
||||||
e.ACK()
|
for {
|
||||||
if e.Name == event.EventExit {
|
// Consider a graph (V2->V3). If while paused, we add a
|
||||||
return true, false
|
// new resource (V1->V2), when we unpause, V3 will run,
|
||||||
} else if e.Name == event.EventStart { // eventContinue
|
// and then V2 followed by V1 (reverse topo sort) which
|
||||||
return false, false // don't poke on unpause!
|
// can cause V2 to BackPoke to V1 (since V1 needs to go
|
||||||
} else {
|
// first) which can panic if V1 is not running yet! The
|
||||||
|
// solution is to ignore the BackPoke because once that
|
||||||
|
// V1 vertex gets running, it will then send off a poke
|
||||||
|
// to V2 that it did without the need for the BackPoke!
|
||||||
|
select {
|
||||||
|
case e, ok := <-obj.Events():
|
||||||
|
if !ok { // shutdown
|
||||||
|
err := error(nil)
|
||||||
|
return &err, false
|
||||||
|
}
|
||||||
|
//obj.quiescing = true
|
||||||
|
//obj.quiesceGroup.Wait() // unnecessary, but symmetrically correct
|
||||||
|
//obj.quiescing = false
|
||||||
|
e.ACK()
|
||||||
|
err := e.Error()
|
||||||
|
if e.Kind == event.EventExit {
|
||||||
|
return &err, false
|
||||||
|
} else if e.Kind == event.EventStart { // eventContinue
|
||||||
|
return nil, false // don't poke on unpause!
|
||||||
|
} else if e.Kind == event.EventBackPoke {
|
||||||
|
continue // silently discard this event while paused
|
||||||
|
}
|
||||||
// if we get a poke event here, it's a bug!
|
// if we get a poke event here, it's a bug!
|
||||||
log.Fatalf("%s[%s]: Unknown event: %v, while paused!", obj.Kind(), obj.GetName(), e)
|
err = fmt.Errorf("%s[%s]: unknown event: %v, while paused", obj.Kind(), obj.GetName(), e)
|
||||||
|
panic(err) // TODO: return a special sentinel instead?
|
||||||
|
//return &err, false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
|
||||||
log.Fatal("Unknown event: ", ev)
|
|
||||||
}
|
}
|
||||||
return true, false // required to keep the stupid go compiler happy
|
err = fmt.Errorf("unknown event: %v", ev)
|
||||||
|
panic(err) // TODO: return a special sentinel instead?
|
||||||
|
//return &err, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Running is called by the Watch method of the resource once it has started up.
|
||||||
|
// This signals to the engine to kick off the initial CheckApply resource check.
|
||||||
|
func (obj *BaseRes) Running() error {
|
||||||
|
// TODO: If a non-polling resource wants to use the converger, then it
|
||||||
|
// should probably tell Running (via an arg) to not do this. Currently
|
||||||
|
// it's a very unlikely race that could cause an early converge if the
|
||||||
|
// converge timeout is very short ( ~ 1s) and the Watch method doesn't
|
||||||
|
// immediately SetConverged(false) to stop possible early termination.
|
||||||
|
if obj.Meta().Poll == 0 { // if not polling, unblock this...
|
||||||
|
cuid, _, _ := obj.ConvergerUIDs()
|
||||||
|
cuid.SetConverged(true) // a reasonable initial assumption
|
||||||
|
}
|
||||||
|
|
||||||
|
obj.StateOK(false) // assume we're initially dirty
|
||||||
|
if !obj.isStarted { // this avoids a double close when/if watch retries
|
||||||
|
obj.isStarted = true
|
||||||
|
close(obj.started) // send started signal
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
if obj.starter { // vertices of indegree == 0 should send initial pokes
|
||||||
|
err = obj.Event() // trigger a CheckApply
|
||||||
|
}
|
||||||
|
return err // bubble up any possible error (or nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send points to a value that a resource will send.
|
// Send points to a value that a resource will send.
|
||||||
@@ -147,7 +205,7 @@ func (obj *BaseRes) SendRecv(res Res) (map[string]bool, error) {
|
|||||||
|
|
||||||
// i think we probably want the same kind, at least for now...
|
// i think we probably want the same kind, at least for now...
|
||||||
if kind1 != kind2 {
|
if kind1 != kind2 {
|
||||||
e := fmt.Errorf("Kind mismatch between %s[%s]: %s and %s[%s]: %s", v.Res.Kind(), v.Res.GetName(), kind1, obj.Kind(), obj.GetName(), kind2)
|
e := fmt.Errorf("kind mismatch between %s[%s]: %s and %s[%s]: %s", v.Res.Kind(), v.Res.GetName(), kind1, obj.Kind(), obj.GetName(), kind2)
|
||||||
err = multierr.Append(err, e) // list of errors
|
err = multierr.Append(err, e) // list of errors
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -155,21 +213,21 @@ func (obj *BaseRes) SendRecv(res Res) (map[string]bool, error) {
|
|||||||
// if the types don't match, we can't use send->recv
|
// if the types don't match, we can't use send->recv
|
||||||
// TODO: do we want to relax this for string -> *string ?
|
// TODO: do we want to relax this for string -> *string ?
|
||||||
if e := TypeCmp(value1, value2); e != nil {
|
if e := TypeCmp(value1, value2); e != nil {
|
||||||
e := errwrap.Wrapf(e, "Type mismatch between %s[%s] and %s[%s]", v.Res.Kind(), v.Res.GetName(), obj.Kind(), obj.GetName())
|
e := errwrap.Wrapf(e, "type mismatch between %s[%s] and %s[%s]", v.Res.Kind(), v.Res.GetName(), obj.Kind(), obj.GetName())
|
||||||
err = multierr.Append(err, e) // list of errors
|
err = multierr.Append(err, e) // list of errors
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we can't set, then well this is pointless!
|
// if we can't set, then well this is pointless!
|
||||||
if !value2.CanSet() {
|
if !value2.CanSet() {
|
||||||
e := fmt.Errorf("Can't set %s[%s].%s", obj.Kind(), obj.GetName(), k)
|
e := fmt.Errorf("can't set %s[%s].%s", obj.Kind(), obj.GetName(), k)
|
||||||
err = multierr.Append(err, e) // list of errors
|
err = multierr.Append(err, e) // list of errors
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we can't interface, we can't compare...
|
// if we can't interface, we can't compare...
|
||||||
if !value1.CanInterface() || !value2.CanInterface() {
|
if !value1.CanInterface() || !value2.CanInterface() {
|
||||||
e := fmt.Errorf("Can't interface %s[%s].%s", obj.Kind(), obj.GetName(), k)
|
e := fmt.Errorf("can't interface %s[%s].%s", obj.Kind(), obj.GetName(), k)
|
||||||
err = multierr.Append(err, e) // list of errors
|
err = multierr.Append(err, e) // list of errors
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -191,7 +249,7 @@ func (obj *BaseRes) SendRecv(res Res) (map[string]bool, error) {
|
|||||||
func TypeCmp(a, b reflect.Value) error {
|
func TypeCmp(a, b reflect.Value) error {
|
||||||
ta, tb := a.Type(), b.Type()
|
ta, tb := a.Type(), b.Type()
|
||||||
if ta != tb {
|
if ta != tb {
|
||||||
return fmt.Errorf("Type mismatch: %s != %s", ta, tb)
|
return fmt.Errorf("type mismatch: %s != %s", ta, tb)
|
||||||
}
|
}
|
||||||
// NOTE: it seems we don't need to recurse into pointers to sub check!
|
// NOTE: it seems we don't need to recurse into pointers to sub check!
|
||||||
|
|
||||||
|
|||||||
154
resources/svc.go
154
resources/svc.go
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -23,9 +23,7 @@ import (
|
|||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/purpleidea/mgmt/event"
|
|
||||||
"github.com/purpleidea/mgmt/util"
|
"github.com/purpleidea/mgmt/util"
|
||||||
|
|
||||||
systemd "github.com/coreos/go-systemd/dbus" // change namespace
|
systemd "github.com/coreos/go-systemd/dbus" // change namespace
|
||||||
@@ -45,69 +43,49 @@ type SvcRes struct {
|
|||||||
Startup string `yaml:"startup"` // enabled, disabled, undefined
|
Startup string `yaml:"startup"` // enabled, disabled, undefined
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSvcRes is a constructor for this resource. It also calls Init() for you.
|
// Default returns some sensible defaults for this resource.
|
||||||
func NewSvcRes(name, state, startup string) (*SvcRes, error) {
|
func (obj *SvcRes) Default() Res {
|
||||||
obj := &SvcRes{
|
return &SvcRes{
|
||||||
BaseRes: BaseRes{
|
BaseRes: BaseRes{
|
||||||
Name: name,
|
MetaParams: DefaultMetaParams, // force a default
|
||||||
},
|
},
|
||||||
State: state,
|
|
||||||
Startup: startup,
|
|
||||||
}
|
}
|
||||||
return obj, obj.Init()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init runs some startup code for this resource.
|
|
||||||
func (obj *SvcRes) Init() error {
|
|
||||||
obj.BaseRes.kind = "Svc"
|
|
||||||
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate checks if the resource data structure was populated correctly.
|
// Validate checks if the resource data structure was populated correctly.
|
||||||
func (obj *SvcRes) Validate() error {
|
func (obj *SvcRes) Validate() error {
|
||||||
if obj.State != "running" && obj.State != "stopped" && obj.State != "" {
|
if obj.State != "running" && obj.State != "stopped" && obj.State != "" {
|
||||||
return fmt.Errorf("State must be either `running` or `stopped` or undefined.")
|
return fmt.Errorf("state must be either `running` or `stopped` or undefined")
|
||||||
}
|
}
|
||||||
if obj.Startup != "enabled" && obj.Startup != "disabled" && obj.Startup != "" {
|
if obj.Startup != "enabled" && obj.Startup != "disabled" && obj.Startup != "" {
|
||||||
return fmt.Errorf("Startup must be either `enabled` or `disabled` or undefined.")
|
return fmt.Errorf("startup must be either `enabled` or `disabled` or undefined")
|
||||||
}
|
}
|
||||||
return nil
|
return obj.BaseRes.Validate()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init runs some startup code for this resource.
|
||||||
|
func (obj *SvcRes) Init() error {
|
||||||
|
obj.BaseRes.kind = "svc"
|
||||||
|
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
}
|
}
|
||||||
|
|
||||||
// Watch is the primary listener for this resource and it outputs events.
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
func (obj *SvcRes) Watch(processChan chan event.Event) error {
|
func (obj *SvcRes) Watch() error {
|
||||||
if obj.IsWatching() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
obj.SetWatching(true)
|
|
||||||
defer obj.SetWatching(false)
|
|
||||||
cuid := obj.converger.Register()
|
|
||||||
defer cuid.Unregister()
|
|
||||||
|
|
||||||
var startup bool
|
|
||||||
Startup := func(block bool) <-chan time.Time {
|
|
||||||
if block {
|
|
||||||
return nil // blocks forever
|
|
||||||
//return make(chan time.Time) // blocks forever
|
|
||||||
}
|
|
||||||
return time.After(time.Duration(500) * time.Millisecond) // 1/2 the resolution of converged timeout
|
|
||||||
}
|
|
||||||
|
|
||||||
// obj.Name: svc name
|
// obj.Name: svc name
|
||||||
if !systemdUtil.IsRunningSystemd() {
|
if !systemdUtil.IsRunningSystemd() {
|
||||||
return fmt.Errorf("Systemd is not running.")
|
return fmt.Errorf("systemd is not running")
|
||||||
}
|
}
|
||||||
|
|
||||||
conn, err := systemd.NewSystemdConnection() // needs root access
|
conn, err := systemd.NewSystemdConnection() // needs root access
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errwrap.Wrapf(err, "Failed to connect to systemd")
|
return errwrap.Wrapf(err, "failed to connect to systemd")
|
||||||
}
|
}
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
|
||||||
// if we share the bus with others, we will get each others messages!!
|
// if we share the bus with others, we will get each others messages!!
|
||||||
bus, err := util.SystemBusPrivateUsable() // don't share the bus connection!
|
bus, err := util.SystemBusPrivateUsable() // don't share the bus connection!
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errwrap.Wrapf(err, "Failed to connect to bus")
|
return errwrap.Wrapf(err, "failed to connect to bus")
|
||||||
}
|
}
|
||||||
|
|
||||||
// XXX: will this detect new units?
|
// XXX: will this detect new units?
|
||||||
@@ -116,9 +94,14 @@ func (obj *SvcRes) Watch(processChan chan event.Event) error {
|
|||||||
buschan := make(chan *dbus.Signal, 10)
|
buschan := make(chan *dbus.Signal, 10)
|
||||||
bus.Signal(buschan)
|
bus.Signal(buschan)
|
||||||
|
|
||||||
|
// notify engine that we're running
|
||||||
|
if err := obj.Running(); err != nil {
|
||||||
|
return err // bubble up a NACK...
|
||||||
|
}
|
||||||
|
|
||||||
var svc = fmt.Sprintf("%s.service", obj.Name) // systemd name
|
var svc = fmt.Sprintf("%s.service", obj.Name) // systemd name
|
||||||
var send = false // send event?
|
var send = false // send event?
|
||||||
var exit = false
|
var exit *error
|
||||||
var invalid = false // does the svc exist or not?
|
var invalid = false // does the svc exist or not?
|
||||||
var previous bool // previous invalid value
|
var previous bool // previous invalid value
|
||||||
set := conn.NewSubscriptionSet() // no error should be returned
|
set := conn.NewSubscriptionSet() // no error should be returned
|
||||||
@@ -159,27 +142,15 @@ func (obj *SvcRes) Watch(processChan chan event.Event) error {
|
|||||||
set.Remove(svc) // no return value should ever occur
|
set.Remove(svc) // no return value should ever occur
|
||||||
}
|
}
|
||||||
|
|
||||||
obj.SetState(ResStateWatching) // reset
|
|
||||||
select {
|
select {
|
||||||
case <-buschan: // XXX: wait for new units event to unstick
|
case <-buschan: // XXX: wait for new units event to unstick
|
||||||
cuid.SetConverged(false)
|
|
||||||
// loop so that we can see the changed invalid signal
|
// loop so that we can see the changed invalid signal
|
||||||
log.Printf("Svc[%s]->DaemonReload()", svc)
|
log.Printf("Svc[%s]->DaemonReload()", svc)
|
||||||
|
|
||||||
case event := <-obj.Events():
|
case event := <-obj.Events():
|
||||||
cuid.SetConverged(false)
|
if exit, send = obj.ReadEvent(event); exit != nil {
|
||||||
if exit, send = obj.ReadEvent(&event); exit {
|
return *exit // exit
|
||||||
return nil // exit
|
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-cuid.ConvergedTimer():
|
|
||||||
cuid.SetConverged(true) // converged!
|
|
||||||
continue
|
|
||||||
|
|
||||||
case <-Startup(startup):
|
|
||||||
cuid.SetConverged(false)
|
|
||||||
send = true
|
|
||||||
obj.StateOK(false) // dirty
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if !activeSet {
|
if !activeSet {
|
||||||
@@ -188,7 +159,6 @@ func (obj *SvcRes) Watch(processChan chan event.Event) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("Watching: %s", svc) // attempting to watch...
|
log.Printf("Watching: %s", svc) // attempting to watch...
|
||||||
obj.SetState(ResStateWatching) // reset
|
|
||||||
select {
|
select {
|
||||||
case event := <-subChannel:
|
case event := <-subChannel:
|
||||||
|
|
||||||
@@ -204,6 +174,8 @@ func (obj *SvcRes) Watch(processChan chan event.Event) error {
|
|||||||
log.Printf("Svc[%s]->Stopped", svc)
|
log.Printf("Svc[%s]->Stopped", svc)
|
||||||
case "reloading":
|
case "reloading":
|
||||||
log.Printf("Svc[%s]->Reloading", svc)
|
log.Printf("Svc[%s]->Reloading", svc)
|
||||||
|
case "failed":
|
||||||
|
log.Printf("Svc[%s]->Failed", svc)
|
||||||
default:
|
default:
|
||||||
log.Fatalf("Unknown svc state: %s", event[svc].ActiveState)
|
log.Fatalf("Unknown svc state: %s", event[svc].ActiveState)
|
||||||
}
|
}
|
||||||
@@ -215,32 +187,18 @@ func (obj *SvcRes) Watch(processChan chan event.Event) error {
|
|||||||
obj.StateOK(false) // dirty
|
obj.StateOK(false) // dirty
|
||||||
|
|
||||||
case err := <-subErrors:
|
case err := <-subErrors:
|
||||||
cuid.SetConverged(false)
|
return errwrap.Wrapf(err, "unknown %s[%s] error", obj.Kind(), obj.GetName())
|
||||||
return errwrap.Wrapf(err, "Unknown %s[%s] error", obj.Kind(), obj.GetName())
|
|
||||||
|
|
||||||
case event := <-obj.Events():
|
case event := <-obj.Events():
|
||||||
cuid.SetConverged(false)
|
if exit, send = obj.ReadEvent(event); exit != nil {
|
||||||
if exit, send = obj.ReadEvent(&event); exit {
|
return *exit // exit
|
||||||
return nil // exit
|
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-cuid.ConvergedTimer():
|
|
||||||
cuid.SetConverged(true) // converged!
|
|
||||||
continue
|
|
||||||
|
|
||||||
case <-Startup(startup):
|
|
||||||
cuid.SetConverged(false)
|
|
||||||
send = true
|
|
||||||
obj.StateOK(false) // dirty
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if send {
|
if send {
|
||||||
startup = true // startup finished
|
|
||||||
send = false
|
send = false
|
||||||
if exit, err := obj.DoSend(processChan, ""); exit || err != nil {
|
obj.Event()
|
||||||
return err // we exit or bubble up a NACK...
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -249,12 +207,12 @@ func (obj *SvcRes) Watch(processChan chan event.Event) error {
|
|||||||
// input is true. It returns error info and if the state check passed or not.
|
// input is true. It returns error info and if the state check passed or not.
|
||||||
func (obj *SvcRes) CheckApply(apply bool) (checkOK bool, err error) {
|
func (obj *SvcRes) CheckApply(apply bool) (checkOK bool, err error) {
|
||||||
if !systemdUtil.IsRunningSystemd() {
|
if !systemdUtil.IsRunningSystemd() {
|
||||||
return false, fmt.Errorf("Systemd is not running.")
|
return false, fmt.Errorf("systemd is not running")
|
||||||
}
|
}
|
||||||
|
|
||||||
conn, err := systemd.NewSystemdConnection() // needs root access
|
conn, err := systemd.NewSystemdConnection() // needs root access
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errwrap.Wrapf(err, "Failed to connect to systemd")
|
return false, errwrap.Wrapf(err, "failed to connect to systemd")
|
||||||
}
|
}
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
|
||||||
@@ -262,13 +220,13 @@ func (obj *SvcRes) CheckApply(apply bool) (checkOK bool, err error) {
|
|||||||
|
|
||||||
loadstate, err := conn.GetUnitProperty(svc, "LoadState")
|
loadstate, err := conn.GetUnitProperty(svc, "LoadState")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errwrap.Wrapf(err, "Failed to get load state")
|
return false, errwrap.Wrapf(err, "failed to get load state")
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: we have to compare variants with other variants, they are really strings...
|
// NOTE: we have to compare variants with other variants, they are really strings...
|
||||||
var notFound = (loadstate.Value == dbus.MakeVariant("not-found"))
|
var notFound = (loadstate.Value == dbus.MakeVariant("not-found"))
|
||||||
if notFound {
|
if notFound {
|
||||||
return false, errwrap.Wrapf(err, "Failed to find svc: %s", svc)
|
return false, errwrap.Wrapf(err, "failed to find svc: %s", svc)
|
||||||
}
|
}
|
||||||
|
|
||||||
// XXX: check svc "enabled at boot" or not status...
|
// XXX: check svc "enabled at boot" or not status...
|
||||||
@@ -276,7 +234,7 @@ func (obj *SvcRes) CheckApply(apply bool) (checkOK bool, err error) {
|
|||||||
//conn.GetUnitProperties(svc)
|
//conn.GetUnitProperties(svc)
|
||||||
activestate, err := conn.GetUnitProperty(svc, "ActiveState")
|
activestate, err := conn.GetUnitProperty(svc, "ActiveState")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errwrap.Wrapf(err, "Failed to get active state")
|
return false, errwrap.Wrapf(err, "failed to get active state")
|
||||||
}
|
}
|
||||||
|
|
||||||
var running = (activestate.Value == dbus.MakeVariant("active"))
|
var running = (activestate.Value == dbus.MakeVariant("active"))
|
||||||
@@ -304,7 +262,7 @@ func (obj *SvcRes) CheckApply(apply bool) (checkOK bool, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errwrap.Wrapf(err, "Unable to change startup status")
|
return false, errwrap.Wrapf(err, "unable to change startup status")
|
||||||
}
|
}
|
||||||
|
|
||||||
// XXX: do we need to use a buffered channel here?
|
// XXX: do we need to use a buffered channel here?
|
||||||
@@ -313,7 +271,7 @@ func (obj *SvcRes) CheckApply(apply bool) (checkOK bool, err error) {
|
|||||||
if obj.State == "running" {
|
if obj.State == "running" {
|
||||||
_, err = conn.StartUnit(svc, "fail", result)
|
_, err = conn.StartUnit(svc, "fail", result)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errwrap.Wrapf(err, "Failed to start unit")
|
return false, errwrap.Wrapf(err, "failed to start unit")
|
||||||
}
|
}
|
||||||
if refresh {
|
if refresh {
|
||||||
log.Printf("%s[%s]: Skipping reload, due to pending start", obj.Kind(), obj.GetName())
|
log.Printf("%s[%s]: Skipping reload, due to pending start", obj.Kind(), obj.GetName())
|
||||||
@@ -322,7 +280,7 @@ func (obj *SvcRes) CheckApply(apply bool) (checkOK bool, err error) {
|
|||||||
} else if obj.State == "stopped" {
|
} else if obj.State == "stopped" {
|
||||||
_, err = conn.StopUnit(svc, "fail", result)
|
_, err = conn.StopUnit(svc, "fail", result)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errwrap.Wrapf(err, "Failed to stop unit")
|
return false, errwrap.Wrapf(err, "failed to stop unit")
|
||||||
}
|
}
|
||||||
if refresh {
|
if refresh {
|
||||||
log.Printf("%s[%s]: Skipping reload, due to pending stop", obj.Kind(), obj.GetName())
|
log.Printf("%s[%s]: Skipping reload, due to pending stop", obj.Kind(), obj.GetName())
|
||||||
@@ -332,10 +290,10 @@ func (obj *SvcRes) CheckApply(apply bool) (checkOK bool, err error) {
|
|||||||
|
|
||||||
status := <-result
|
status := <-result
|
||||||
if &status == nil {
|
if &status == nil {
|
||||||
return false, fmt.Errorf("Systemd service action result is nil")
|
return false, fmt.Errorf("systemd service action result is nil")
|
||||||
}
|
}
|
||||||
if status != "done" {
|
if status != "done" {
|
||||||
return false, fmt.Errorf("Unknown systemd return string: %v", status)
|
return false, fmt.Errorf("unknown systemd return string: %v", status)
|
||||||
}
|
}
|
||||||
|
|
||||||
if refresh { // we need to reload the service
|
if refresh { // we need to reload the service
|
||||||
@@ -377,7 +335,7 @@ type SvcResAutoEdges struct {
|
|||||||
// Next returns the next automatic edge.
|
// Next returns the next automatic edge.
|
||||||
func (obj *SvcResAutoEdges) Next() []ResUID {
|
func (obj *SvcResAutoEdges) Next() []ResUID {
|
||||||
if obj.found {
|
if obj.found {
|
||||||
log.Fatal("Shouldn't be called anymore!")
|
log.Fatal("shouldn't be called anymore!")
|
||||||
}
|
}
|
||||||
if len(obj.data) == 0 { // check length for rare scenarios
|
if len(obj.data) == 0 { // check length for rare scenarios
|
||||||
return nil
|
return nil
|
||||||
@@ -397,7 +355,7 @@ func (obj *SvcResAutoEdges) Test(input []bool) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if len(input) != 1 { // in case we get given bad data
|
if len(input) != 1 { // in case we get given bad data
|
||||||
log.Fatal("Expecting a single value!")
|
log.Fatal("expecting a single value")
|
||||||
}
|
}
|
||||||
if input[0] { // if a match is found, we're done!
|
if input[0] { // if a match is found, we're done!
|
||||||
obj.found = true // no more to find!
|
obj.found = true // no more to find!
|
||||||
@@ -431,9 +389,9 @@ func (obj *SvcRes) AutoEdges() AutoEdge {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUIDs includes all params to make a unique identification of this object.
|
// UIDs includes all params to make a unique identification of this object.
|
||||||
// Most resources only return one, although some resources can return multiple.
|
// Most resources only return one, although some resources can return multiple.
|
||||||
func (obj *SvcRes) GetUIDs() []ResUID {
|
func (obj *SvcRes) UIDs() []ResUID {
|
||||||
x := &SvcUID{
|
x := &SvcUID{
|
||||||
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
||||||
name: obj.Name, // svc name
|
name: obj.Name, // svc name
|
||||||
@@ -476,3 +434,23 @@ func (obj *SvcRes) Compare(res Res) bool {
|
|||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML is the custom unmarshal handler for this struct.
|
||||||
|
// It is primarily useful for setting the defaults.
|
||||||
|
func (obj *SvcRes) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
type rawRes SvcRes // indirection to avoid infinite recursion
|
||||||
|
|
||||||
|
def := obj.Default() // get the default
|
||||||
|
res, ok := def.(*SvcRes) // put in the right format
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("could not convert to SvcRes")
|
||||||
|
}
|
||||||
|
raw := rawRes(*res) // convert; the defaults go here
|
||||||
|
|
||||||
|
if err := unmarshal(&raw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*obj = SvcRes(raw) // restore from indirection with type conversion!
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
// Mgmt
|
// Mgmt
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
//
|
//
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// This program is free software: you can redistribute it and/or modify
|
||||||
@@ -19,10 +19,9 @@ package resources
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/purpleidea/mgmt/event"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -32,7 +31,7 @@ func init() {
|
|||||||
// TimerRes is a timer resource for time based events.
|
// TimerRes is a timer resource for time based events.
|
||||||
type TimerRes struct {
|
type TimerRes struct {
|
||||||
BaseRes `yaml:",inline"`
|
BaseRes `yaml:",inline"`
|
||||||
Interval int `yaml:"interval"` // Interval : Interval between runs
|
Interval uint32 `yaml:"interval"` // Interval : Interval between runs
|
||||||
|
|
||||||
ticker *time.Ticker
|
ticker *time.Ticker
|
||||||
}
|
}
|
||||||
@@ -43,87 +42,59 @@ type TimerUID struct {
|
|||||||
name string
|
name string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTimerRes is a constructor for this resource. It also calls Init() for you.
|
// Default returns some sensible defaults for this resource.
|
||||||
func NewTimerRes(name string, interval int) (*TimerRes, error) {
|
func (obj *TimerRes) Default() Res {
|
||||||
obj := &TimerRes{
|
return &TimerRes{
|
||||||
BaseRes: BaseRes{
|
BaseRes: BaseRes{
|
||||||
Name: name,
|
MetaParams: DefaultMetaParams, // force a default
|
||||||
},
|
},
|
||||||
Interval: interval,
|
|
||||||
}
|
}
|
||||||
return obj, obj.Init()
|
}
|
||||||
|
|
||||||
|
// Validate the params that are passed to TimerRes.
|
||||||
|
func (obj *TimerRes) Validate() error {
|
||||||
|
return obj.BaseRes.Validate()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init runs some startup code for this resource.
|
// Init runs some startup code for this resource.
|
||||||
func (obj *TimerRes) Init() error {
|
func (obj *TimerRes) Init() error {
|
||||||
obj.BaseRes.kind = "Timer"
|
obj.BaseRes.kind = "timer"
|
||||||
return obj.BaseRes.Init() // call base init, b/c we're overrriding
|
return obj.BaseRes.Init() // call base init, b/c we're overrriding
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate the params that are passed to TimerRes
|
|
||||||
// Currently we are getting only an interval in seconds
|
|
||||||
// which gets validated by go compiler
|
|
||||||
func (obj *TimerRes) Validate() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newTicker creates a new ticker
|
// newTicker creates a new ticker
|
||||||
func (obj *TimerRes) newTicker() *time.Ticker {
|
func (obj *TimerRes) newTicker() *time.Ticker {
|
||||||
return time.NewTicker(time.Duration(obj.Interval) * time.Second)
|
return time.NewTicker(time.Duration(obj.Interval) * time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Watch is the primary listener for this resource and it outputs events.
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
func (obj *TimerRes) Watch(processChan chan event.Event) error {
|
func (obj *TimerRes) Watch() error {
|
||||||
if obj.IsWatching() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
obj.SetWatching(true)
|
|
||||||
defer obj.SetWatching(false)
|
|
||||||
cuid := obj.converger.Register()
|
|
||||||
defer cuid.Unregister()
|
|
||||||
|
|
||||||
var startup bool
|
|
||||||
Startup := func(block bool) <-chan time.Time {
|
|
||||||
if block {
|
|
||||||
return nil // blocks forever
|
|
||||||
//return make(chan time.Time) // blocks forever
|
|
||||||
}
|
|
||||||
return time.After(time.Duration(500) * time.Millisecond) // 1/2 the resolution of converged timeout
|
|
||||||
}
|
|
||||||
|
|
||||||
// create a time.Ticker for the given interval
|
// create a time.Ticker for the given interval
|
||||||
obj.ticker = obj.newTicker()
|
obj.ticker = obj.newTicker()
|
||||||
defer obj.ticker.Stop()
|
defer obj.ticker.Stop()
|
||||||
|
|
||||||
|
// notify engine that we're running
|
||||||
|
if err := obj.Running(); err != nil {
|
||||||
|
return err // bubble up a NACK...
|
||||||
|
}
|
||||||
|
|
||||||
var send = false
|
var send = false
|
||||||
|
|
||||||
for {
|
for {
|
||||||
obj.SetState(ResStateWatching)
|
|
||||||
select {
|
select {
|
||||||
case <-obj.ticker.C: // received the timer event
|
case <-obj.ticker.C: // received the timer event
|
||||||
send = true
|
send = true
|
||||||
log.Printf("%s[%s]: received tick", obj.Kind(), obj.GetName())
|
log.Printf("%s[%s]: received tick", obj.Kind(), obj.GetName())
|
||||||
|
|
||||||
case event := <-obj.Events():
|
case event := <-obj.Events():
|
||||||
cuid.SetConverged(false)
|
if exit, _ := obj.ReadEvent(event); exit != nil {
|
||||||
if exit, _ := obj.ReadEvent(&event); exit {
|
return *exit // exit
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-cuid.ConvergedTimer():
|
|
||||||
cuid.SetConverged(true)
|
|
||||||
continue
|
|
||||||
|
|
||||||
case <-Startup(startup):
|
|
||||||
cuid.SetConverged(false)
|
|
||||||
send = true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if send {
|
if send {
|
||||||
startup = true // startup finished
|
|
||||||
send = false
|
send = false
|
||||||
if exit, err := obj.DoSend(processChan, "timer ticked"); exit || err != nil {
|
obj.Event()
|
||||||
return err // we exit or bubble up a NACK...
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -144,9 +115,9 @@ func (obj *TimerRes) CheckApply(apply bool) (bool, error) {
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUIDs includes all params to make a unique identification of this object.
|
// UIDs includes all params to make a unique identification of this object.
|
||||||
// Most resources only return one, although some resources can return multiple.
|
// Most resources only return one, although some resources can return multiple.
|
||||||
func (obj *TimerRes) GetUIDs() []ResUID {
|
func (obj *TimerRes) UIDs() []ResUID {
|
||||||
x := &TimerUID{
|
x := &TimerUID{
|
||||||
BaseUID: BaseUID{
|
BaseUID: BaseUID{
|
||||||
name: obj.GetName(),
|
name: obj.GetName(),
|
||||||
@@ -181,3 +152,23 @@ func (obj *TimerRes) Compare(res Res) bool {
|
|||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML is the custom unmarshal handler for this struct.
|
||||||
|
// It is primarily useful for setting the defaults.
|
||||||
|
func (obj *TimerRes) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
type rawRes TimerRes // indirection to avoid infinite recursion
|
||||||
|
|
||||||
|
def := obj.Default() // get the default
|
||||||
|
res, ok := def.(*TimerRes) // put in the right format
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("could not convert to TimerRes")
|
||||||
|
}
|
||||||
|
raw := rawRes(*res) // convert; the defaults go here
|
||||||
|
|
||||||
|
if err := unmarshal(&raw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*obj = TimerRes(raw) // restore from indirection with type conversion!
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
59
resources/util.go
Normal file
59
resources/util.go
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package resources
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/gob"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
errwrap "github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ResToB64 encodes a resource to a base64 encoded string (after serialization).
|
||||||
|
func ResToB64(res Res) (string, error) {
|
||||||
|
b := bytes.Buffer{}
|
||||||
|
e := gob.NewEncoder(&b)
|
||||||
|
err := e.Encode(&res) // pass with &
|
||||||
|
if err != nil {
|
||||||
|
return "", errwrap.Wrapf(err, "gob failed to encode")
|
||||||
|
}
|
||||||
|
return base64.StdEncoding.EncodeToString(b.Bytes()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// B64ToRes decodes a resource from a base64 encoded string (after deserialization).
|
||||||
|
func B64ToRes(str string) (Res, error) {
|
||||||
|
var output interface{}
|
||||||
|
bb, err := base64.StdEncoding.DecodeString(str)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errwrap.Wrapf(err, "base64 failed to decode")
|
||||||
|
}
|
||||||
|
b := bytes.NewBuffer(bb)
|
||||||
|
d := gob.NewDecoder(b)
|
||||||
|
err = d.Decode(&output) // pass with &
|
||||||
|
if err != nil {
|
||||||
|
return nil, errwrap.Wrapf(err, "gob failed to decode")
|
||||||
|
}
|
||||||
|
res, ok := output.(Res)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Output %v is not a Res", res)
|
||||||
|
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
24
resources/virt_disabled.go
Normal file
24
resources/virt_disabled.go
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
// +build novirt
|
||||||
|
|
||||||
|
package resources
|
||||||
|
|
||||||
|
// VirtRes represents the fields of the Virt resource. Since this file is
|
||||||
|
// only invoked with the tag "novirt", we do not need any fields here.
|
||||||
|
type VirtRes struct {
|
||||||
|
}
|
||||||
6
spec.in
6
spec.in
@@ -13,18 +13,18 @@ Source0: https://dl.fedoraproject.org/pub/alt/purpleidea/__PROGRAM__/SOURCES/__P
|
|||||||
Requires: graphviz
|
Requires: graphviz
|
||||||
|
|
||||||
# If go_compiler is not set to 1, there is no virtual provide. Use golang instead.
|
# If go_compiler is not set to 1, there is no virtual provide. Use golang instead.
|
||||||
BuildRequires: %{?go_compiler:compiler(go-compiler)}%{!?go_compiler:golang}
|
BuildRequires: %{?go_compiler:compiler(go-compiler)}%{!?go_compiler:golang}
|
||||||
BuildRequires: golang-googlecode-tools-stringer
|
BuildRequires: golang-googlecode-tools-stringer
|
||||||
BuildRequires: git-core
|
BuildRequires: git-core
|
||||||
BuildRequires: mercurial
|
BuildRequires: mercurial
|
||||||
|
|
||||||
ExclusiveArch: %{go_arches}
|
ExclusiveArch: %{go_arches}
|
||||||
|
|
||||||
%description
|
%description
|
||||||
A next generation config management prototype!
|
A next generation config management prototype!
|
||||||
|
|
||||||
%prep
|
%prep
|
||||||
%setup
|
%setup -q
|
||||||
|
|
||||||
%build
|
%build
|
||||||
# FIXME: in the future, these could be vendor-ed in
|
# FIXME: in the future, these could be vendor-ed in
|
||||||
|
|||||||
4
tag.sh
4
tag.sh
@@ -7,5 +7,7 @@ echo "Version $t is now tagged!"
|
|||||||
echo "Pushing $t to origin..."
|
echo "Pushing $t to origin..."
|
||||||
echo "Press ^C within 3s to abort."
|
echo "Press ^C within 3s to abort."
|
||||||
sleep 3s
|
sleep 3s
|
||||||
git tag $t
|
echo "release: tag $t" | git tag --file=- --sign $t
|
||||||
git push origin $t
|
git push origin $t
|
||||||
|
git diff --stat "$v" "$t"
|
||||||
|
if which contrib.sh 2>/dev/null; then contrib.sh "$v"; fi
|
||||||
|
|||||||
5
test.sh
5
test.sh
@@ -21,13 +21,14 @@ run-test ./test/test-gofmt.sh
|
|||||||
run-test ./test/test-yamlfmt.sh
|
run-test ./test/test-yamlfmt.sh
|
||||||
run-test ./test/test-bashfmt.sh
|
run-test ./test/test-bashfmt.sh
|
||||||
run-test ./test/test-headerfmt.sh
|
run-test ./test/test-headerfmt.sh
|
||||||
run-test go test
|
run-test ./test/test-commit-message.sh
|
||||||
run-test ./test/test-govet.sh
|
run-test ./test/test-govet.sh
|
||||||
|
run-test ./test/test-gotest.sh
|
||||||
|
|
||||||
# do these longer tests only when running on ci
|
# do these longer tests only when running on ci
|
||||||
if env | grep -q -e '^TRAVIS=true$' -e '^JENKINS_URL=' -e '^BUILD_TAG=jenkins'; then
|
if env | grep -q -e '^TRAVIS=true$' -e '^JENKINS_URL=' -e '^BUILD_TAG=jenkins'; then
|
||||||
run-test go test -race
|
|
||||||
run-test ./test/test-shell.sh
|
run-test ./test/test-shell.sh
|
||||||
|
run-test ./test/test-gotest.sh --race
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# FIXME: this now fails everywhere :(
|
# FIXME: this now fails everywhere :(
|
||||||
|
|||||||
30
test/shell/augeas-1.sh
Executable file
30
test/shell/augeas-1.sh
Executable file
@@ -0,0 +1,30 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
if env | grep -q -e '^TRAVIS=true$'; then
|
||||||
|
# inotify doesn't seem to work properly on travis
|
||||||
|
echo "Travis and Jenkins give wonky results here, skipping test!"
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
mkdir -p "${MGMT_TMPDIR}"
|
||||||
|
> "${MGMT_TMPDIR}"sshd_config
|
||||||
|
|
||||||
|
# run empty graph, with prometheus support
|
||||||
|
timeout --kill-after=20s 15s ./mgmt run --tmp-prefix --yaml=augeas-1.yaml &
|
||||||
|
pid=$!
|
||||||
|
sleep 5s # let it converge
|
||||||
|
|
||||||
|
grep "X11Forwarding no" "${MGMT_TMPDIR}"sshd_config
|
||||||
|
|
||||||
|
sed -i "s/no/yes/" "${MGMT_TMPDIR}"sshd_config
|
||||||
|
|
||||||
|
grep "X11Forwarding yes" "${MGMT_TMPDIR}"sshd_config
|
||||||
|
|
||||||
|
sleep 3 # Augeas is slow
|
||||||
|
|
||||||
|
grep "X11Forwarding no" "${MGMT_TMPDIR}"sshd_config
|
||||||
|
|
||||||
|
|
||||||
|
killall -SIGINT mgmt # send ^C to exit mgmt
|
||||||
|
wait $pid # get exit status
|
||||||
|
exit $?
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user