Compare commits
112 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
70f8d54a31 | ||
|
|
4ef25a33fc | ||
|
|
f5dd90a8dd | ||
|
|
a84defd689 | ||
|
|
1cf88d9540 | ||
|
|
644a0ee8c8 | ||
|
|
e9d5dc8fee | ||
|
|
8003202beb | ||
|
|
b46432b5b6 | ||
|
|
5e3f03df06 | ||
|
|
8ab8e6679a | ||
|
|
786b896018 | ||
|
|
40723f8705 | ||
|
|
2a0721bddf | ||
|
|
ff01e4a5e7 | ||
|
|
6794aff77c | ||
|
|
636f2a36b1 | ||
|
|
eee652cefe | ||
|
|
6d45cd45d1 | ||
|
|
f5fb135793 | ||
|
|
6bf32c978a | ||
|
|
8d3011fb9c | ||
|
|
9260066fa3 | ||
|
|
5e45c5805b | ||
|
|
db4de12767 | ||
|
|
d429795737 | ||
|
|
276219a691 | ||
|
|
03c1df98f4 | ||
|
|
79ba750dd5 | ||
|
|
1d0e187838 | ||
|
|
ad1e48aa2d | ||
|
|
7032eea045 | ||
|
|
bdb970203c | ||
|
|
fa4f5abc78 | ||
|
|
0c7b05b233 | ||
|
|
4ca98b5f17 | ||
|
|
4e00c78410 | ||
|
|
17adb19c0d | ||
|
|
1db936e253 | ||
|
|
7194ba7e0e | ||
|
|
59b9b6f091 | ||
|
|
c1ec8d15f3 | ||
|
|
24ba6abc6b | ||
|
|
f6c1bba3b6 | ||
|
|
a606961a22 | ||
|
|
cafe0e4ec2 | ||
|
|
e28c1266cf | ||
|
|
c1605a4f22 | ||
|
|
7aeb55de70 | ||
|
|
8ca65f9fda | ||
|
|
94524d1156 | ||
|
|
a1ed03478b | ||
|
|
402a6379b9 | ||
|
|
5d45bcd552 | ||
|
|
f1fa64c170 | ||
|
|
50fc78564c | ||
|
|
3e5863dc8a | ||
|
|
94b447a9c5 | ||
|
|
78d769797f | ||
|
|
672baae126 | ||
|
|
e942d71ed2 | ||
|
|
f5d24cf86c | ||
|
|
f63b1cd56d | ||
|
|
66719b3cda | ||
|
|
a5e9f6a6fc | ||
|
|
f821afdf3e | ||
|
|
2c61de83c6 | ||
|
|
6da6f75b88 | ||
|
|
a55807a708 | ||
|
|
fce86b0d08 | ||
|
|
d26b503dca | ||
|
|
5363839ac8 | ||
|
|
715a4bf393 | ||
|
|
8f83ecee65 | ||
|
|
2eed4bda42 | ||
|
|
f4e1e24ca7 | ||
|
|
05c540e6cc | ||
|
|
9656390c87 | ||
|
|
4b6470d1e1 | ||
|
|
56471c2fe4 | ||
|
|
9f56e4a582 | ||
|
|
12ea860eba | ||
|
|
b876c29862 | ||
|
|
6bbce039aa | ||
|
|
1584f20220 | ||
|
|
dcad5abc1c | ||
|
|
ab73261fd4 | ||
|
|
05b75c0a44 | ||
|
|
ba7ef0788e | ||
|
|
3aaa80974e | ||
|
|
995ca32eee | ||
|
|
bf5f48b85b | ||
|
|
d6e386a555 | ||
|
|
a0a71f683c | ||
|
|
7adf88b55b | ||
|
|
8a9d47fc4b | ||
|
|
2a0a69c917 | ||
|
|
aeab8f55bd | ||
|
|
9407050598 | ||
|
|
b99da63306 | ||
|
|
f0d6cfaae4 | ||
|
|
3120628d8a | ||
|
|
2654384461 | ||
|
|
eac3b25dc9 | ||
|
|
7788f91dd5 | ||
|
|
d0c9b7170c | ||
|
|
d84caa5528 | ||
|
|
2ab72bdf94 | ||
|
|
f6833fde29 | ||
|
|
fa8a50b525 | ||
|
|
d80c6bbf1d | ||
|
|
6f3ac4bf2a |
1
.dockerignore
Normal file
1
.dockerignore
Normal file
@@ -0,0 +1 @@
|
|||||||
|
docker
|
||||||
19
.editorconfig
Normal file
19
.editorconfig
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
; This file is for unifying the coding style for different editors and IDEs.
|
||||||
|
; Plugins are available for notepad++, emacs, vim, gedit,
|
||||||
|
; textmate, visual studio, and more.
|
||||||
|
;
|
||||||
|
; See http://editorconfig.org for details.
|
||||||
|
|
||||||
|
# Top-most EditorConfig file.
|
||||||
|
root = true
|
||||||
|
|
||||||
|
[*]
|
||||||
|
end_of_line = lf
|
||||||
|
insert_final_newline = true
|
||||||
|
trim_trailing_whitespace = true
|
||||||
|
|
||||||
|
[*.go]
|
||||||
|
indent_style = tab
|
||||||
|
|
||||||
|
[Makefile]
|
||||||
|
indent_style = tab
|
||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -6,4 +6,5 @@ old/
|
|||||||
tmp/
|
tmp/
|
||||||
*_stringer.go
|
*_stringer.go
|
||||||
mgmt
|
mgmt
|
||||||
|
mgmt.static
|
||||||
rpmbuild/
|
rpmbuild/
|
||||||
|
|||||||
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
[submodule "vendor/github.com/coreos/etcd"]
|
||||||
|
path = vendor/github.com/coreos/etcd
|
||||||
|
url = https://github.com/coreos/etcd/
|
||||||
@@ -9,9 +9,11 @@ before_install: 'git fetch --unshallow'
|
|||||||
install: 'make deps'
|
install: 'make deps'
|
||||||
script: 'make test'
|
script: 'make test'
|
||||||
matrix:
|
matrix:
|
||||||
|
fast_finish: true
|
||||||
allow_failures:
|
allow_failures:
|
||||||
- go: tip
|
- go: tip
|
||||||
- go: 1.4.3
|
- go: 1.4.3
|
||||||
|
- go: 1.6
|
||||||
notifications:
|
notifications:
|
||||||
irc:
|
irc:
|
||||||
channels:
|
channels:
|
||||||
|
|||||||
1
AUTHORS
1
AUTHORS
@@ -5,3 +5,4 @@ For a more exhaustive list please run: git log --format='%aN' | sort -u
|
|||||||
This list is sorted alphabetically by first name.
|
This list is sorted alphabetically by first name.
|
||||||
|
|
||||||
James Shubin
|
James Shubin
|
||||||
|
Paul Morgan
|
||||||
|
|||||||
184
DOCUMENTATION.md
184
DOCUMENTATION.md
@@ -33,6 +33,8 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||||||
4. [Features - All things mgmt can do](#features)
|
4. [Features - All things mgmt can do](#features)
|
||||||
* [Autoedges - Automatic resource relationships](#autoedges)
|
* [Autoedges - Automatic resource relationships](#autoedges)
|
||||||
* [Autogrouping - Automatic resource grouping](#autogrouping)
|
* [Autogrouping - Automatic resource grouping](#autogrouping)
|
||||||
|
* [Automatic clustering - Automatic cluster management](#automatic-clustering)
|
||||||
|
* [Remote mode - Remote "agent-less" execution](#remote-agent-less-mode)
|
||||||
5. [Usage/FAQ - Notes on usage and frequently asked questions](#usage-and-frequently-asked-questions)
|
5. [Usage/FAQ - Notes on usage and frequently asked questions](#usage-and-frequently-asked-questions)
|
||||||
6. [Reference - Detailed reference](#reference)
|
6. [Reference - Detailed reference](#reference)
|
||||||
* [Graph definition file](#graph-definition-file)
|
* [Graph definition file](#graph-definition-file)
|
||||||
@@ -43,8 +45,8 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||||||
|
|
||||||
##Overview
|
##Overview
|
||||||
|
|
||||||
The `mgmt` tool is a research prototype to demonstrate next generation config
|
The `mgmt` tool is a next generation config management prototype. It's not yet
|
||||||
management techniques. Hopefully it will evolve into a useful, robust tool.
|
ready for production, but we hope to get there soon. Get involved today!
|
||||||
|
|
||||||
##Project Description
|
##Project Description
|
||||||
|
|
||||||
@@ -56,8 +58,11 @@ For more information, you may like to read some blog posts from the author:
|
|||||||
|
|
||||||
* [Next generation config mgmt](https://ttboj.wordpress.com/2016/01/18/next-generation-configuration-mgmt/)
|
* [Next generation config mgmt](https://ttboj.wordpress.com/2016/01/18/next-generation-configuration-mgmt/)
|
||||||
* [Automatic edges in mgmt](https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/)
|
* [Automatic edges in mgmt](https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/)
|
||||||
|
* [Automatic grouping in mgmt](https://ttboj.wordpress.com/2016/03/30/automatic-grouping-in-mgmt/)
|
||||||
|
* [Automatic clustering in mgmt](https://ttboj.wordpress.com/2016/06/20/automatic-clustering-in-mgmt/)
|
||||||
|
|
||||||
There is also an [introductory video](https://www.youtube.com/watch?v=GVhpPF0j-iE&html5=1) available.
|
There is also an [introductory video](http://meetings-archive.debian.net/pub/debian-meetings/2016/debconf16/Next_Generation_Config_Mgmt.webm) available.
|
||||||
|
Older videos and other material [is available](https://github.com/purpleidea/mgmt/#on-the-web).
|
||||||
|
|
||||||
##Setup
|
##Setup
|
||||||
|
|
||||||
@@ -81,7 +86,7 @@ automatically ensure that any file resource you declare that matches a
|
|||||||
file installed by your package resource will only be processed after the
|
file installed by your package resource will only be processed after the
|
||||||
package is installed.
|
package is installed.
|
||||||
|
|
||||||
####Controlling autodeges
|
####Controlling autoedges
|
||||||
|
|
||||||
Though autoedges is likely to be very helpful and avoid you having to declare
|
Though autoedges is likely to be very helpful and avoid you having to declare
|
||||||
all dependencies explicitly, there are cases where this behaviour is
|
all dependencies explicitly, there are cases where this behaviour is
|
||||||
@@ -98,6 +103,11 @@ installation of the `mysql-server` package.
|
|||||||
You can disable autoedges for a resource by setting the `autoedge` key on
|
You can disable autoedges for a resource by setting the `autoedge` key on
|
||||||
the meta attributes of that resource to `false`.
|
the meta attributes of that resource to `false`.
|
||||||
|
|
||||||
|
####Blog post
|
||||||
|
|
||||||
|
You can read the introductory blog post about this topic here:
|
||||||
|
[https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/](https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/)
|
||||||
|
|
||||||
###Autogrouping
|
###Autogrouping
|
||||||
|
|
||||||
Automatic grouping or AutoGroup is the mechanism in mgmt by which it will
|
Automatic grouping or AutoGroup is the mechanism in mgmt by which it will
|
||||||
@@ -112,6 +122,53 @@ used for other use cases too.
|
|||||||
You can disable autogrouping for a resource by setting the `autogroup` key on
|
You can disable autogrouping for a resource by setting the `autogroup` key on
|
||||||
the meta attributes of that resource to `false`.
|
the meta attributes of that resource to `false`.
|
||||||
|
|
||||||
|
####Blog post
|
||||||
|
|
||||||
|
You can read the introductory blog post about this topic here:
|
||||||
|
[https://ttboj.wordpress.com/2016/03/30/automatic-grouping-in-mgmt/](https://ttboj.wordpress.com/2016/03/30/automatic-grouping-in-mgmt/)
|
||||||
|
|
||||||
|
###Automatic clustering
|
||||||
|
|
||||||
|
Automatic clustering is a feature by which mgmt automatically builds, scales,
|
||||||
|
and manages the embedded etcd cluster which is compiled into mgmt itself. It is
|
||||||
|
quite helpful for rapidly bootstrapping clusters and avoiding the extra work to
|
||||||
|
setup etcd.
|
||||||
|
|
||||||
|
If you prefer to avoid this feature. you can always opt to use an existing etcd
|
||||||
|
cluster that is managed separately from mgmt by pointing your mgmt agents at it
|
||||||
|
with the `--seeds` variable.
|
||||||
|
|
||||||
|
####Blog post
|
||||||
|
|
||||||
|
You can read the introductory blog post about this topic here:
|
||||||
|
[https://ttboj.wordpress.com/2016/06/20/automatic-clustering-in-mgmt/](https://ttboj.wordpress.com/2016/06/20/automatic-clustering-in-mgmt/)
|
||||||
|
|
||||||
|
###Remote ("agent-less") mode
|
||||||
|
|
||||||
|
Remote mode is a special mode that lets you kick off mgmt runs on one or more
|
||||||
|
remote machines which are only accessible via SSH. In this mode the initiating
|
||||||
|
host connects over SSH, copies over the `mgmt` binary, opens an SSH tunnel, and
|
||||||
|
runs the remote program while simultaneously passing the etcd traffic back
|
||||||
|
through the tunnel so that the initiators etcd cluster can be used to exchange
|
||||||
|
resource data.
|
||||||
|
|
||||||
|
The interesting benefit of this architecture is that multiple hosts which can't
|
||||||
|
connect directly use the initiator to pass the important traffic through to each
|
||||||
|
other. Once the cluster has converged all the remote programs can shutdown
|
||||||
|
leaving no residual agent.
|
||||||
|
|
||||||
|
This mode can also be useful for bootstrapping a new host where you'd like to
|
||||||
|
have the service run continuously and as part of an mgmt cluster normally.
|
||||||
|
|
||||||
|
In particular, when combined with the `--converged-timeout` parameter, the
|
||||||
|
entire set of running mgmt agents will need to all simultaneously converge for
|
||||||
|
the group to exit. This is particularly useful for bootstrapping new clusters
|
||||||
|
which need to exchange information that is only available at run time.
|
||||||
|
|
||||||
|
####Blog post
|
||||||
|
|
||||||
|
An introductory blog post about this topic will follow soon.
|
||||||
|
|
||||||
##Usage and frequently asked questions
|
##Usage and frequently asked questions
|
||||||
(Send your questions as a patch to this FAQ! I'll review it, merge it, and
|
(Send your questions as a patch to this FAQ! I'll review it, merge it, and
|
||||||
respond by commit with the answer.)
|
respond by commit with the answer.)
|
||||||
@@ -130,6 +187,58 @@ chosen, but it was also somewhat arbitrary. If there is available interest,
|
|||||||
good reasoning, *and* patches, then we would consider either switching or
|
good reasoning, *and* patches, then we would consider either switching or
|
||||||
supporting both, but this is not a high priority at this time.
|
supporting both, but this is not a high priority at this time.
|
||||||
|
|
||||||
|
###Can I use an existing etcd cluster instead of the automatic embedded servers?
|
||||||
|
|
||||||
|
Yes, it's possible to use an existing etcd cluster instead of the automatic,
|
||||||
|
elastic embedded etcd servers. To do so, simply point to the cluster with the
|
||||||
|
`--seeds` variable, the same way you would if you were seeding a new member to
|
||||||
|
an existing mgmt cluster.
|
||||||
|
|
||||||
|
The downside to this approach is that you won't benefit from the automatic
|
||||||
|
elastic nature of the embedded etcd servers, and that you're responsible if you
|
||||||
|
accidentally break your etcd cluster, or if you use an unsupported version.
|
||||||
|
|
||||||
|
###What does the error message about an inconsistent dataDir mean?
|
||||||
|
|
||||||
|
If you get an error message similar to:
|
||||||
|
|
||||||
|
```
|
||||||
|
Etcd: Connect: CtxError...
|
||||||
|
Etcd: CtxError: Reason: CtxDelayErr(5s): No endpoints available yet!
|
||||||
|
Etcd: Connect: Endpoints: []
|
||||||
|
Etcd: The dataDir (/var/lib/mgmt/etcd) might be inconsistent or corrupt.
|
||||||
|
```
|
||||||
|
|
||||||
|
This happens when there are a series of fatal connect errors in a row. This can
|
||||||
|
happen when you start `mgmt` using a dataDir that doesn't correspond to the
|
||||||
|
current cluster view. As a result, the embedded etcd server never finishes
|
||||||
|
starting up, and as a result, a default endpoint never gets added. The solution
|
||||||
|
is to either reconcile the mistake, and if there is no important data saved, you
|
||||||
|
can remove the etcd dataDir. This is typically `/var/lib/mgmt/etcd/member/`.
|
||||||
|
|
||||||
|
###Why do resources have both a `Compare` method and an `IFF` (on the UUID) method?
|
||||||
|
|
||||||
|
The `Compare()` methods are for determining if two resources are effectively the
|
||||||
|
same, which is used to make graph change delta's efficient. This is when we want
|
||||||
|
to change from the current running graph to a new graph, but preserve the common
|
||||||
|
vertices. Since we want to make this process efficient, we only update the parts
|
||||||
|
that are different, and leave everything else alone. This `Compare()` method can
|
||||||
|
tell us if two resources are the same.
|
||||||
|
|
||||||
|
The `IFF()` method is part of the whole UUID system, which is for discerning if
|
||||||
|
a resource meets the requirements another expects for an automatic edge. This is
|
||||||
|
because the automatic edge system assumes a unified UUID pattern to test for
|
||||||
|
equality. In the future it might be helpful or sane to merge the two similar
|
||||||
|
comparison functions although for now they are separate because they are
|
||||||
|
actually answer different questions.
|
||||||
|
|
||||||
|
###Did you know that there is a band named `MGMT`?
|
||||||
|
|
||||||
|
I didn't realize this when naming the project, and it is accidental. After much
|
||||||
|
anguishing, I chose the name because it was short and I thought it was
|
||||||
|
appropriately descriptive. If you need a less ambiguous search term or phrase,
|
||||||
|
you can try using `mgmtconfig` or `mgmt config`.
|
||||||
|
|
||||||
###You didn't answer my question, or I have a question!
|
###You didn't answer my question, or I have a question!
|
||||||
|
|
||||||
It's best to ask on [IRC](https://webchat.freenode.net/?channels=#mgmtconfig)
|
It's best to ask on [IRC](https://webchat.freenode.net/?channels=#mgmtconfig)
|
||||||
@@ -169,12 +278,79 @@ Exit if the machine has converged for approximately this many seconds.
|
|||||||
Exit when the agent has run for approximately this many seconds. This is not
|
Exit when the agent has run for approximately this many seconds. This is not
|
||||||
generally recommended, but may be useful for users who know what they're doing.
|
generally recommended, but may be useful for users who know what they're doing.
|
||||||
|
|
||||||
|
####`--noop`
|
||||||
|
Globally force all resources into no-op mode. This also disables the export to
|
||||||
|
etcd functionality, but does not disable resource collection, however all
|
||||||
|
resources that are collected will have their individual noop settings set.
|
||||||
|
|
||||||
|
####`--remote <graph.yaml>`
|
||||||
|
Point to a graph file to run on the remote host specified within. This parameter
|
||||||
|
can be used multiple times if you'd like to remotely run on multiple hosts in
|
||||||
|
parallel.
|
||||||
|
|
||||||
|
####`--allow-interactive`
|
||||||
|
Allow interactive prompting for SSH passwords if there is no authentication
|
||||||
|
method that works.
|
||||||
|
|
||||||
|
####`--ssh-priv-id-rsa`
|
||||||
|
Specify the path for finding SSH keys. This defaults to `~/.ssh/id_rsa`. To
|
||||||
|
never use this method of authentication, set this to the empty string.
|
||||||
|
|
||||||
|
####`--cconns`
|
||||||
|
The maximum number of concurrent remote ssh connections to run. This defaults
|
||||||
|
to `0`, which means unlimited.
|
||||||
|
|
||||||
|
####`--no-caching`
|
||||||
|
Don't allow remote caching of the remote execution binary. This will require
|
||||||
|
the binary to be copied over for every remote execution, but it limits the
|
||||||
|
likelihood that there is leftover information from the configuration process.
|
||||||
|
|
||||||
|
####`--prefix <path>`
|
||||||
|
Specify a path to a custom working directory prefix. This directory will get
|
||||||
|
created if it does not exist. This usually defaults to `/var/lib/mgmt/`. This
|
||||||
|
can't be combined with the `--tmp-prefix` option. It can be combined with the
|
||||||
|
`--allow-tmp-prefix` option.
|
||||||
|
|
||||||
|
####`--tmp-prefix`
|
||||||
|
If this option is specified, a temporary prefix will be used instead of the
|
||||||
|
default prefix. This can't be combined with the `--prefix` option.
|
||||||
|
|
||||||
|
####`--allow-tmp-prefix`
|
||||||
|
If this option is specified, we will attempt to fall back to a temporary prefix
|
||||||
|
if the primary prefix couldn't be created. This is useful for avoiding failures
|
||||||
|
in environments where the primary prefix may or may not be available, but you'd
|
||||||
|
like to try. The canonical example is when running `mgmt` with `--remote` there
|
||||||
|
might be a cached copy of the binary in the primary prefix, but in case there's
|
||||||
|
no binary available continue working in a temporary directory to avoid failure.
|
||||||
|
|
||||||
##Examples
|
##Examples
|
||||||
For example configurations, please consult the [examples/](https://github.com/purpleidea/mgmt/tree/master/examples) directory in the git
|
For example configurations, please consult the [examples/](https://github.com/purpleidea/mgmt/tree/master/examples) directory in the git
|
||||||
source repository. It is available from:
|
source repository. It is available from:
|
||||||
|
|
||||||
[https://github.com/purpleidea/mgmt/tree/master/examples](https://github.com/purpleidea/mgmt/tree/master/examples)
|
[https://github.com/purpleidea/mgmt/tree/master/examples](https://github.com/purpleidea/mgmt/tree/master/examples)
|
||||||
|
|
||||||
|
### Systemd:
|
||||||
|
See [`misc/mgmt.service`](misc/mgmt.service) for a sample systemd unit file.
|
||||||
|
This unit file is part of the RPM.
|
||||||
|
|
||||||
|
To specify your custom options for `mgmt` on a systemd distro:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo mkdir -p /etc/systemd/system/mgmt.service.d/
|
||||||
|
|
||||||
|
cat > /etc/systemd/system/mgmt.service.d/env.conf <<EOF
|
||||||
|
# Environment variables:
|
||||||
|
MGMT_SEEDS=http://127.0.0.1:2379
|
||||||
|
MGMT_CONVERGED_TIMEOUT=-1
|
||||||
|
MGMT_MAX_RUNTIME=0
|
||||||
|
|
||||||
|
# Other CLI options if necessary.
|
||||||
|
#OPTS="--max-runtime=0"
|
||||||
|
EOF
|
||||||
|
|
||||||
|
sudo systemctl daemon-reload
|
||||||
|
```
|
||||||
|
|
||||||
##Development
|
##Development
|
||||||
|
|
||||||
This is a project that I started in my free time in 2013. Development is driven
|
This is a project that I started in my free time in 2013. Development is driven
|
||||||
|
|||||||
42
Makefile
42
Makefile
@@ -16,11 +16,11 @@
|
|||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
SHELL = /bin/bash
|
SHELL = /bin/bash
|
||||||
.PHONY: all version program path deps run race build clean test format docs rpmbuild mkdirs rpm srpm spec tar upload upload-sources upload-srpms upload-rpms copr
|
.PHONY: all version program path deps run race generate build clean test gofmt yamlfmt format docs rpmbuild mkdirs rpm srpm spec tar upload upload-sources upload-srpms upload-rpms copr
|
||||||
.SILENT: clean
|
.SILENT: clean
|
||||||
|
|
||||||
SVERSION := $(shell git describe --match '[0-9]*\.[0-9]*\.[0-9]*' --tags --dirty --always)
|
SVERSION := $(or $(SVERSION),$(shell git describe --match '[0-9]*\.[0-9]*\.[0-9]*' --tags --dirty --always))
|
||||||
VERSION := $(shell git describe --match '[0-9]*\.[0-9]*\.[0-9]*' --tags --abbrev=0)
|
VERSION := $(or $(VERSION),$(shell git describe --match '[0-9]*\.[0-9]*\.[0-9]*' --tags --abbrev=0))
|
||||||
PROGRAM := $(shell echo $(notdir $(CURDIR)) | cut -f1 -d"-")
|
PROGRAM := $(shell echo $(notdir $(CURDIR)) | cut -f1 -d"-")
|
||||||
OLDGOLANG := $(shell go version | grep -E 'go1.3|go1.4')
|
OLDGOLANG := $(shell go version | grep -E 'go1.3|go1.4')
|
||||||
ifeq ($(VERSION),$(SVERSION))
|
ifeq ($(VERSION),$(SVERSION))
|
||||||
@@ -28,7 +28,7 @@ ifeq ($(VERSION),$(SVERSION))
|
|||||||
else
|
else
|
||||||
RELEASE = untagged
|
RELEASE = untagged
|
||||||
endif
|
endif
|
||||||
ARCH = $(shell arch)
|
ARCH = $(uname -m)
|
||||||
SPEC = rpmbuild/SPECS/$(PROGRAM).spec
|
SPEC = rpmbuild/SPECS/$(PROGRAM).spec
|
||||||
SOURCE = rpmbuild/SOURCES/$(PROGRAM)-$(VERSION).tar.bz2
|
SOURCE = rpmbuild/SOURCES/$(PROGRAM)-$(VERSION).tar.bz2
|
||||||
SRPM = rpmbuild/SRPMS/$(PROGRAM)-$(VERSION)-$(RELEASE).src.rpm
|
SRPM = rpmbuild/SRPMS/$(PROGRAM)-$(VERSION)-$(RELEASE).src.rpm
|
||||||
@@ -38,7 +38,7 @@ USERNAME := $(shell cat ~/.config/copr 2>/dev/null | grep username | awk -F '='
|
|||||||
SERVER = 'dl.fedoraproject.org'
|
SERVER = 'dl.fedoraproject.org'
|
||||||
REMOTE_PATH = 'pub/alt/$(USERNAME)/$(PROGRAM)'
|
REMOTE_PATH = 'pub/alt/$(USERNAME)/$(PROGRAM)'
|
||||||
|
|
||||||
all: docs
|
all: docs $(PROGRAM).static
|
||||||
|
|
||||||
# show the current version
|
# show the current version
|
||||||
version:
|
version:
|
||||||
@@ -54,34 +54,50 @@ deps:
|
|||||||
./misc/make-deps.sh
|
./misc/make-deps.sh
|
||||||
|
|
||||||
run:
|
run:
|
||||||
find -maxdepth 1 -type f -name '*.go' -not -name '*_test.go' | xargs go run -ldflags "-X main.program=$(PROGRAM) -X main.version=$(SVERSION)"
|
find . -maxdepth 1 -type f -name '*.go' -not -name '*_test.go' | xargs go run -ldflags "-X main.program=$(PROGRAM) -X main.version=$(SVERSION)"
|
||||||
|
|
||||||
# include race flag
|
# include race flag
|
||||||
race:
|
race:
|
||||||
find -maxdepth 1 -type f -name '*.go' -not -name '*_test.go' | xargs go run -race -ldflags "-X main.program=$(PROGRAM) -X main.version=$(SVERSION)"
|
find . -maxdepth 1 -type f -name '*.go' -not -name '*_test.go' | xargs go run -race -ldflags "-X main.program=$(PROGRAM) -X main.version=$(SVERSION)"
|
||||||
|
|
||||||
|
generate:
|
||||||
|
go generate
|
||||||
|
|
||||||
build: $(PROGRAM)
|
build: $(PROGRAM)
|
||||||
|
|
||||||
$(PROGRAM): main.go
|
$(PROGRAM): main.go
|
||||||
@echo "Building: $(PROGRAM), version: $(SVERSION)..."
|
@echo "Building: $(PROGRAM), version: $(SVERSION)..."
|
||||||
|
ifneq ($(OLDGOLANG),)
|
||||||
|
@# avoid equals sign in old golang versions eg in: -X foo=bar
|
||||||
|
time go build -ldflags "-X main.program $(PROGRAM) -X main.version $(SVERSION)" -o $(PROGRAM);
|
||||||
|
else
|
||||||
|
time go build -ldflags "-X main.program=$(PROGRAM) -X main.version=$(SVERSION)" -o $(PROGRAM);
|
||||||
|
endif
|
||||||
|
|
||||||
|
$(PROGRAM).static: main.go
|
||||||
|
@echo "Building: $(PROGRAM).static, version: $(SVERSION)..."
|
||||||
go generate
|
go generate
|
||||||
ifneq ($(OLDGOLANG),)
|
ifneq ($(OLDGOLANG),)
|
||||||
@# avoid equals sign in old golang versions eg in: -X foo=bar
|
@# avoid equals sign in old golang versions eg in: -X foo=bar
|
||||||
go build -ldflags "-X main.program $(PROGRAM) -X main.version $(SVERSION)" -o $(PROGRAM);
|
go build -a -installsuffix cgo -tags netgo -ldflags '-extldflags "-static" -X main.program $(PROGRAM) -X main.version $(SVERSION)' -o $(PROGRAM).static;
|
||||||
else
|
else
|
||||||
go build -ldflags "-X main.program=$(PROGRAM) -X main.version=$(SVERSION)" -o $(PROGRAM);
|
go build -a -installsuffix cgo -tags netgo -ldflags '-extldflags "-static" -X main.program=$(PROGRAM) -X main.version=$(SVERSION)' -o $(PROGRAM).static;
|
||||||
endif
|
endif
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
[ ! -e $(PROGRAM) ] || rm $(PROGRAM)
|
[ ! -e $(PROGRAM) ] || rm $(PROGRAM)
|
||||||
rm -f *_stringer.go # generated by `go generate`
|
#rm -f *_stringer.go # generated by `go generate`
|
||||||
|
|
||||||
test:
|
test:
|
||||||
./test.sh
|
./test.sh
|
||||||
|
|
||||||
format:
|
gofmt:
|
||||||
find -type f -name '*.go' -not -path './old/*' -not -path './tmp/*' -exec gofmt -w {} \;
|
find . -maxdepth 3 -type f -name '*.go' -not -path './old/*' -not -path './tmp/*' -exec gofmt -w {} \;
|
||||||
find -type f -name '*.yaml' -not -path './old/*' -not -path './tmp/*' -not -path './omv.yaml' -exec ruby -e "require 'yaml'; x=YAML.load_file('{}').to_yaml.each_line.map(&:rstrip).join(10.chr)+10.chr; File.open('{}', 'w').write x" \;
|
|
||||||
|
yamlfmt:
|
||||||
|
find . -type f -name '*.yaml' -not -path './old/*' -not -path './tmp/*' -not -path './omv.yaml' -exec ruby -e "require 'yaml'; x=YAML.load_file('{}').to_yaml.each_line.map(&:rstrip).join(10.chr)+10.chr; File.open('{}', 'w').write x" \;
|
||||||
|
|
||||||
|
format: gofmt yamlfmt
|
||||||
|
|
||||||
docs: $(PROGRAM)-documentation.pdf
|
docs: $(PROGRAM)-documentation.pdf
|
||||||
|
|
||||||
|
|||||||
31
README.md
31
README.md
@@ -1,5 +1,6 @@
|
|||||||
# *mgmt*: This is: mgmt!
|
# *mgmt*: This is: mgmt!
|
||||||
|
|
||||||
|
[](https://goreportcard.com/report/github.com/purpleidea/mgmt)
|
||||||
[](http://travis-ci.org/purpleidea/mgmt)
|
[](http://travis-ci.org/purpleidea/mgmt)
|
||||||
[](DOCUMENTATION.md)
|
[](DOCUMENTATION.md)
|
||||||
[](https://webchat.freenode.net/?channels=#mgmtconfig)
|
[](https://webchat.freenode.net/?channels=#mgmtconfig)
|
||||||
@@ -15,9 +16,9 @@ Please join the [#mgmtconfig](https://webchat.freenode.net/?channels=#mgmtconfig
|
|||||||
If you have a well phrased question that might benefit others, consider asking it by sending a patch to the documentation [FAQ](https://github.com/purpleidea/mgmt/blob/master/DOCUMENTATION.md#usage-and-frequently-asked-questions) section. I'll merge your question, and a patch with the answer!
|
If you have a well phrased question that might benefit others, consider asking it by sending a patch to the documentation [FAQ](https://github.com/purpleidea/mgmt/blob/master/DOCUMENTATION.md#usage-and-frequently-asked-questions) section. I'll merge your question, and a patch with the answer!
|
||||||
|
|
||||||
## Quick start:
|
## Quick start:
|
||||||
* Either get the golang dependencies on your own, or run `make deps` if you're comfortable with how we install them.
|
* Clone the repository recursively, eg: `git clone --recursive https://github.com/purpleidea/mgmt/`.
|
||||||
|
* Get the remaining golang dependencies on your own, or run `make deps` if you're comfortable with how we install them.
|
||||||
* Run `make build` to get a freshly built `mgmt` binary.
|
* Run `make build` to get a freshly built `mgmt` binary.
|
||||||
* Run `cd $(mktemp --tmpdir -d tmp.XXX) && etcd` to get etcd running. The `mgmt` software will do this automatically for you in the future.
|
|
||||||
* Run `time ./mgmt run --file examples/graph0.yaml --converged-timeout=1` to try out a very simple example!
|
* Run `time ./mgmt run --file examples/graph0.yaml --converged-timeout=1` to try out a very simple example!
|
||||||
* To run continuously in the default mode of operation, omit the `--converged-timeout` option.
|
* To run continuously in the default mode of operation, omit the `--converged-timeout` option.
|
||||||
* Have fun hacking on our future technology!
|
* Have fun hacking on our future technology!
|
||||||
@@ -48,6 +49,7 @@ Feel free to read my article on [debugging golang programs](https://ttboj.wordpr
|
|||||||
go get github.com/codegangsta/cli
|
go get github.com/codegangsta/cli
|
||||||
go get github.com/coreos/go-systemd/dbus
|
go get github.com/coreos/go-systemd/dbus
|
||||||
go get github.com/coreos/go-systemd/util
|
go get github.com/coreos/go-systemd/util
|
||||||
|
go get github.com/coreos/pkg/capnslog
|
||||||
|
|
||||||
* stringer (required for building), available as a package on some platforms, otherwise via `go get`
|
* stringer (required for building), available as a package on some platforms, otherwise via `go get`
|
||||||
|
|
||||||
@@ -60,14 +62,23 @@ Feel free to read my article on [debugging golang programs](https://ttboj.wordpr
|
|||||||
We'd love to have your patches! Please send them by email, or as a pull request.
|
We'd love to have your patches! Please send them by email, or as a pull request.
|
||||||
|
|
||||||
## On the web:
|
## On the web:
|
||||||
* Introductory blog post: [https://ttboj.wordpress.com/2016/01/18/next-generation-configuration-mgmt/](https://ttboj.wordpress.com/2016/01/18/next-generation-configuration-mgmt/)
|
* James Shubin; blog: [Next generation configuration mgmt](https://ttboj.wordpress.com/2016/01/18/next-generation-configuration-mgmt/)
|
||||||
* Introductory recording from DevConf.cz 2016: [https://www.youtube.com/watch?v=GVhpPF0j-iE&html5=1](https://www.youtube.com/watch?v=GVhpPF0j-iE&html5=1)
|
* James Shubin; video: [Introductory recording from DevConf.cz 2016](https://www.youtube.com/watch?v=GVhpPF0j-iE&html5=1)
|
||||||
* Introductory recording from CfgMgmtCamp.eu 2016: [https://www.youtube.com/watch?v=fNeooSiIRnA&html5=1](https://www.youtube.com/watch?v=fNeooSiIRnA&html5=1)
|
* James Shubin; video: [Introductory recording from CfgMgmtCamp.eu 2016](https://www.youtube.com/watch?v=fNeooSiIRnA&html5=1)
|
||||||
* Julian Dunn at CfgMgmtCamp.eu 2016: [https://www.youtube.com/watch?v=kfF9IATUask&t=1949&html5=1](https://www.youtube.com/watch?v=kfF9IATUask&t=1949&html5=1)
|
* Julian Dunn; video: [On mgmt at CfgMgmtCamp.eu 2016](https://www.youtube.com/watch?v=kfF9IATUask&t=1949&html5=1)
|
||||||
* Walter Heck at CfgMgmtCamp.eu 2016: [http://www.slideshare.net/olindata/configuration-management-time-for-a-4th-generation/3](http://www.slideshare.net/olindata/configuration-management-time-for-a-4th-generation/3)
|
* Walter Heck; slides: [On mgmt at CfgMgmtCamp.eu 2016](http://www.slideshare.net/olindata/configuration-management-time-for-a-4th-generation/3)
|
||||||
* Marco Marongiu on mgmt: [http://syslog.me/2016/02/15/leap-or-die/](http://syslog.me/2016/02/15/leap-or-die/)
|
* Marco Marongiu; blog: [On mgmt](http://syslog.me/2016/02/15/leap-or-die/)
|
||||||
* Felix Frank on puppet to mgmt "transpiling" [https://ffrank.github.io/features/2016/02/18/from-catalog-to-mgmt/](https://ffrank.github.io/features/2016/02/18/from-catalog-to-mgmt/)
|
* Felix Frank; blog: [From Catalog To Mgmt (on puppet to mgmt "transpiling")](https://ffrank.github.io/features/2016/02/18/from-catalog-to-mgmt/)
|
||||||
* Blog post on automatic edges and the pkg resource: [https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/](https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/)
|
* James Shubin; blog: [Automatic edges in mgmt (...and the pkg resource)](https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/)
|
||||||
|
* James Shubin; blog: [Automatic grouping in mgmt](https://ttboj.wordpress.com/2016/03/30/automatic-grouping-in-mgmt/)
|
||||||
|
* John Arundel; tweet: [“Puppet’s days are numbered.”](https://twitter.com/bitfield/status/732157519142002688)
|
||||||
|
* Felix Frank; blog: [Puppet, Meet Mgmt (on puppet to mgmt internals)](https://ffrank.github.io/features/2016/06/12/puppet,-meet-mgmt/)
|
||||||
|
* Felix Frank; blog: [Puppet Powered Mgmt (puppet to mgmt tl;dr)](https://ffrank.github.io/features/2016/06/19/puppet-powered-mgmt/)
|
||||||
|
* James Shubin; blog: [Automatic clustering in mgmt](https://ttboj.wordpress.com/2016/06/20/automatic-clustering-in-mgmt/)
|
||||||
|
* James Shubin; video: [Recording from CoreOSFest 2016](https://www.youtube.com/watch?v=KVmDCUA42wc&html5=1)
|
||||||
|
* James Shubin; video: [Recording from DebConf16](http://meetings-archive.debian.net/pub/debian-meetings/2016/debconf16/Next_Generation_Config_Mgmt.webm) ([Slides](https://annex.debconf.org//debconf-share/debconf16/slides/15-next-generation-config-mgmt.pdf))
|
||||||
|
* Felix Frank; blog: [Edging It All In (puppet and mgmt edges)](https://ffrank.github.io/features/2016/07/12/edging-it-all-in/)
|
||||||
|
* Felix Frank; blog: [Translating All The Things (puppet to mgmt translation warnings)](https://ffrank.github.io/features/2016/08/19/translating-all-the-things/)
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
|
|||||||
6
THANKS
6
THANKS
@@ -9,10 +9,16 @@ Chris Wright - For encouraging me to continue work on my prototype.
|
|||||||
|
|
||||||
Daniel Riek - For supporting and sheltering this project from bureaucracy.
|
Daniel Riek - For supporting and sheltering this project from bureaucracy.
|
||||||
|
|
||||||
|
Diego Ongaro - For good chats, particularly around distributed systems.
|
||||||
|
|
||||||
|
Felix Frank - For taking a difficult problem and building an inspiring solution.
|
||||||
|
|
||||||
Ira Cooper - For having an algorithmic design discussion with me.
|
Ira Cooper - For having an algorithmic design discussion with me.
|
||||||
|
|
||||||
Jeff Darcy - For some algorithm recommendations, and NACKing my TopoSort idea!
|
Jeff Darcy - For some algorithm recommendations, and NACKing my TopoSort idea!
|
||||||
|
|
||||||
Red Hat, inc. - For paying my salary, thus financially supporting my hacking.
|
Red Hat, inc. - For paying my salary, thus financially supporting my hacking.
|
||||||
|
|
||||||
|
Samuel Gélineau - For help with programming language theory and design.
|
||||||
|
|
||||||
And many others...
|
And many others...
|
||||||
|
|||||||
10
TODO.md
10
TODO.md
@@ -3,7 +3,8 @@ If you're looking for something to do, look here!
|
|||||||
Let us know if you're working on one of the items.
|
Let us know if you're working on one of the items.
|
||||||
|
|
||||||
## Package resource
|
## Package resource
|
||||||
- [ ] base resource [bug](https://github.com/purpleidea/mgmt/issues/11)
|
- [ ] getfiles support on debian [bug](https://github.com/hughsie/PackageKit/issues/118)
|
||||||
|
- [ ] directory info on fedora [bug](https://github.com/hughsie/PackageKit/issues/117)
|
||||||
- [ ] dnf blocker [bug](https://github.com/hughsie/PackageKit/issues/110)
|
- [ ] dnf blocker [bug](https://github.com/hughsie/PackageKit/issues/110)
|
||||||
- [ ] install signal blocker [bug](https://github.com/hughsie/PackageKit/issues/109)
|
- [ ] install signal blocker [bug](https://github.com/hughsie/PackageKit/issues/109)
|
||||||
|
|
||||||
@@ -21,9 +22,11 @@ Let us know if you're working on one of the items.
|
|||||||
- [ ] reset on recompile
|
- [ ] reset on recompile
|
||||||
- [ ] increment algorithm (linear, exponential, etc...)
|
- [ ] increment algorithm (linear, exponential, etc...)
|
||||||
|
|
||||||
|
## Virt (libvirt) resource
|
||||||
|
- [ ] base resource [bug](https://github.com/purpleidea/mgmt/issues/25)
|
||||||
|
|
||||||
## Etcd improvements
|
## Etcd improvements
|
||||||
- [ ] embedded etcd master
|
- [ ] embedded etcd master
|
||||||
- [ ] capnslog fixes [bug](https://github.com/coreos/etcd/issues/4115)
|
|
||||||
|
|
||||||
## Language improvements
|
## Language improvements
|
||||||
- [ ] language design
|
- [ ] language design
|
||||||
@@ -35,9 +38,6 @@ Let us know if you're working on one of the items.
|
|||||||
|
|
||||||
## Other
|
## Other
|
||||||
- [ ] better error/retry handling
|
- [ ] better error/retry handling
|
||||||
- [ ] resource grouping
|
|
||||||
- [ ] automatic dependency adding (eg: packagekit file dependencies)
|
|
||||||
- [ ] mgmt systemd service file [bug](https://github.com/purpleidea/mgmt/issues/12) [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
|
||||||
- [ ] deb package target in Makefile
|
- [ ] deb package target in Makefile
|
||||||
- [ ] reproducible builds
|
- [ ] reproducible builds
|
||||||
- [ ] add your suggestions!
|
- [ ] add your suggestions!
|
||||||
|
|||||||
157
config.go
157
config.go
@@ -43,20 +43,25 @@ type edgeConfig struct {
|
|||||||
To vertexConfig `yaml:"to"`
|
To vertexConfig `yaml:"to"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GraphConfig is the data structure that describes a single graph to run.
|
||||||
type GraphConfig struct {
|
type GraphConfig struct {
|
||||||
Graph string `yaml:"graph"`
|
Graph string `yaml:"graph"`
|
||||||
Resources struct {
|
Resources struct {
|
||||||
Noop []*NoopRes `yaml:"noop"`
|
Noop []*NoopRes `yaml:"noop"`
|
||||||
Pkg []*PkgRes `yaml:"pkg"`
|
Pkg []*PkgRes `yaml:"pkg"`
|
||||||
File []*FileRes `yaml:"file"`
|
File []*FileRes `yaml:"file"`
|
||||||
Svc []*SvcRes `yaml:"svc"`
|
Svc []*SvcRes `yaml:"svc"`
|
||||||
Exec []*ExecRes `yaml:"exec"`
|
Exec []*ExecRes `yaml:"exec"`
|
||||||
|
Timer []*TimerRes `yaml:"timer"`
|
||||||
} `yaml:"resources"`
|
} `yaml:"resources"`
|
||||||
Collector []collectorResConfig `yaml:"collect"`
|
Collector []collectorResConfig `yaml:"collect"`
|
||||||
Edges []edgeConfig `yaml:"edges"`
|
Edges []edgeConfig `yaml:"edges"`
|
||||||
Comment string `yaml:"comment"`
|
Comment string `yaml:"comment"`
|
||||||
|
Hostname string `yaml:"hostname"` // uuid for the host
|
||||||
|
Remote string `yaml:"remote"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Parse parses a data stream into the graph structure.
|
||||||
func (c *GraphConfig) Parse(data []byte) error {
|
func (c *GraphConfig) Parse(data []byte) error {
|
||||||
if err := yaml.Unmarshal(data, c); err != nil {
|
if err := yaml.Unmarshal(data, c); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -67,16 +72,17 @@ func (c *GraphConfig) Parse(data []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ParseConfigFromFile takes a filename and returns the graph config structure.
|
||||||
func ParseConfigFromFile(filename string) *GraphConfig {
|
func ParseConfigFromFile(filename string) *GraphConfig {
|
||||||
data, err := ioutil.ReadFile(filename)
|
data, err := ioutil.ReadFile(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error: Config: ParseConfigFromFile: File: %v", err)
|
log.Printf("Config: Error: ParseConfigFromFile: File: %v", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var config GraphConfig
|
var config GraphConfig
|
||||||
if err := config.Parse(data); err != nil {
|
if err := config.Parse(data); err != nil {
|
||||||
log.Printf("Error: Config: ParseConfigFromFile: Parse: %v", err)
|
log.Printf("Config: Error: ParseConfigFromFile: Parse: %v", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -85,7 +91,10 @@ func ParseConfigFromFile(filename string) *GraphConfig {
|
|||||||
|
|
||||||
// NewGraphFromConfig returns a new graph from existing input, such as from the
|
// NewGraphFromConfig returns a new graph from existing input, such as from the
|
||||||
// existing graph, and a GraphConfig struct.
|
// existing graph, and a GraphConfig struct.
|
||||||
func (g *Graph) NewGraphFromConfig(config *GraphConfig, etcdO *EtcdWObject, hostname string) (*Graph, error) {
|
func (g *Graph) NewGraphFromConfig(config *GraphConfig, embdEtcd *EmbdEtcd, noop bool) (*Graph, error) {
|
||||||
|
if config.Hostname == "" {
|
||||||
|
return nil, fmt.Errorf("Config: Error: Hostname can't be empty!")
|
||||||
|
}
|
||||||
|
|
||||||
var graph *Graph // new graph to return
|
var graph *Graph // new graph to return
|
||||||
if g == nil { // FIXME: how can we check for an empty graph?
|
if g == nil { // FIXME: how can we check for an empty graph?
|
||||||
@@ -101,8 +110,8 @@ func (g *Graph) NewGraphFromConfig(config *GraphConfig, etcdO *EtcdWObject, host
|
|||||||
// TODO: if defined (somehow)...
|
// TODO: if defined (somehow)...
|
||||||
graph.SetName(config.Graph) // set graph name
|
graph.SetName(config.Graph) // set graph name
|
||||||
|
|
||||||
var keep []*Vertex // list of vertex which are the same in new graph
|
var keep []*Vertex // list of vertex which are the same in new graph
|
||||||
|
var resources []Res // list of resources to export
|
||||||
// use reflection to avoid duplicating code... better options welcome!
|
// use reflection to avoid duplicating code... better options welcome!
|
||||||
value := reflect.Indirect(reflect.ValueOf(config.Resources))
|
value := reflect.Indirect(reflect.ValueOf(config.Resources))
|
||||||
vtype := value.Type()
|
vtype := value.Type()
|
||||||
@@ -118,84 +127,106 @@ func (g *Graph) NewGraphFromConfig(config *GraphConfig, etcdO *EtcdWObject, host
|
|||||||
}
|
}
|
||||||
for j := 0; j < slice.Len(); j++ { // loop through resources of same kind
|
for j := 0; j < slice.Len(); j++ { // loop through resources of same kind
|
||||||
x := slice.Index(j).Interface()
|
x := slice.Index(j).Interface()
|
||||||
obj, ok := x.(Res) // convert to Res type
|
res, ok := x.(Res) // convert to Res type
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("Error: Config: Can't convert: %v of type: %T to Res.", x, x)
|
return nil, fmt.Errorf("Config: Error: Can't convert: %v of type: %T to Res.", x, x)
|
||||||
|
}
|
||||||
|
if noop {
|
||||||
|
res.Meta().Noop = noop
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, exists := lookup[kind]; !exists {
|
if _, exists := lookup[kind]; !exists {
|
||||||
lookup[kind] = make(map[string]*Vertex)
|
lookup[kind] = make(map[string]*Vertex)
|
||||||
}
|
}
|
||||||
// XXX: should we export based on a @@ prefix, or a metaparam
|
// XXX: should we export based on a @@ prefix, or a metaparam
|
||||||
// like exported => true || exported => (host pattern)||(other pattern?)
|
// like exported => true || exported => (host pattern)||(other pattern?)
|
||||||
if !strings.HasPrefix(obj.GetName(), "@@") { // exported resource
|
if !strings.HasPrefix(res.GetName(), "@@") { // not exported resource
|
||||||
// XXX: we don't have a way of knowing if any of the
|
// XXX: we don't have a way of knowing if any of the
|
||||||
// metaparams are undefined, and as a result to set the
|
// metaparams are undefined, and as a result to set the
|
||||||
// defaults that we want! I hate the go yaml parser!!!
|
// defaults that we want! I hate the go yaml parser!!!
|
||||||
v := graph.GetVertexMatch(obj)
|
v := graph.GetVertexMatch(res)
|
||||||
if v == nil { // no match found
|
if v == nil { // no match found
|
||||||
obj.Init()
|
res.Init()
|
||||||
v = NewVertex(obj)
|
v = NewVertex(res)
|
||||||
graph.AddVertex(v) // call standalone in case not part of an edge
|
graph.AddVertex(v) // call standalone in case not part of an edge
|
||||||
}
|
}
|
||||||
lookup[kind][obj.GetName()] = v // used for constructing edges
|
lookup[kind][res.GetName()] = v // used for constructing edges
|
||||||
keep = append(keep, v) // append
|
keep = append(keep, v) // append
|
||||||
|
|
||||||
} else {
|
} else if !noop { // do not export any resources if noop
|
||||||
// XXX: do this in a different function...
|
// store for addition to etcd storage...
|
||||||
// add to etcd storage...
|
res.SetName(res.GetName()[2:]) //slice off @@
|
||||||
obj.SetName(obj.GetName()[2:]) //slice off @@
|
res.setKind(kind) // cheap init
|
||||||
|
resources = append(resources, res)
|
||||||
data, err := ResToB64(obj)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Config: Could not encode %v resource: %v, error: %v", kind, obj.GetName(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !etcdO.EtcdPut(hostname, obj.GetName(), kind, data) {
|
|
||||||
return nil, fmt.Errorf("Config: Could not export %v resource: %v", kind, obj.GetName())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// store in etcd
|
||||||
|
if err := EtcdSetResources(embdEtcd, config.Hostname, resources); err != nil {
|
||||||
|
return nil, fmt.Errorf("Config: Could not export resources: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
// lookup from etcd graph
|
// lookup from etcd
|
||||||
|
var hostnameFilter []string // empty to get from everyone
|
||||||
|
kindFilter := []string{}
|
||||||
|
for _, t := range config.Collector {
|
||||||
|
// XXX: should we just drop these everywhere and have the kind strings be all lowercase?
|
||||||
|
kind := FirstToUpper(t.Kind)
|
||||||
|
kindFilter = append(kindFilter, kind)
|
||||||
|
}
|
||||||
// do all the graph look ups in one single step, so that if the etcd
|
// do all the graph look ups in one single step, so that if the etcd
|
||||||
// database changes, we don't have a partial state of affairs...
|
// database changes, we don't have a partial state of affairs...
|
||||||
nodes, ok := etcdO.EtcdGet()
|
if len(kindFilter) > 0 { // if kindFilter is empty, don't need to do lookups!
|
||||||
if ok {
|
var err error
|
||||||
|
resources, err = EtcdGetResources(embdEtcd, hostnameFilter, kindFilter)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Config: Could not collect resources: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, res := range resources {
|
||||||
|
matched := false
|
||||||
|
// see if we find a collect pattern that matches
|
||||||
for _, t := range config.Collector {
|
for _, t := range config.Collector {
|
||||||
// XXX: should we just drop these everywhere and have the kind strings be all lowercase?
|
// XXX: should we just drop these everywhere and have the kind strings be all lowercase?
|
||||||
kind := FirstToUpper(t.Kind)
|
kind := FirstToUpper(t.Kind)
|
||||||
|
|
||||||
// use t.Kind and optionally t.Pattern to collect from etcd storage
|
// use t.Kind and optionally t.Pattern to collect from etcd storage
|
||||||
log.Printf("Collect: %v; Pattern: %v", kind, t.Pattern)
|
log.Printf("Collect: %v; Pattern: %v", kind, t.Pattern)
|
||||||
for _, str := range etcdO.EtcdGetProcess(nodes, kind) {
|
|
||||||
obj, err := B64ToRes(str)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("B64ToRes failed to decode: %v", err)
|
|
||||||
log.Printf("Collect: %v: not collected!", kind)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.Pattern != "" { // XXX: simplistic for now
|
// XXX: expand to more complex pattern matching here...
|
||||||
obj.CollectPattern(t.Pattern) // obj.Dirname = t.Pattern
|
if res.Kind() != kind {
|
||||||
}
|
continue
|
||||||
|
|
||||||
log.Printf("Collect: %v[%v]: collected!", kind, obj.GetName())
|
|
||||||
|
|
||||||
// XXX: similar to other resource add code:
|
|
||||||
if _, exists := lookup[kind]; !exists {
|
|
||||||
lookup[kind] = make(map[string]*Vertex)
|
|
||||||
}
|
|
||||||
v := graph.GetVertexMatch(obj)
|
|
||||||
if v == nil { // no match found
|
|
||||||
obj.Init() // initialize go channels or things won't work!!!
|
|
||||||
v = NewVertex(obj)
|
|
||||||
graph.AddVertex(v) // call standalone in case not part of an edge
|
|
||||||
}
|
|
||||||
lookup[kind][obj.GetName()] = v // used for constructing edges
|
|
||||||
keep = append(keep, v) // append
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if matched {
|
||||||
|
// we've already matched this resource, should we match again?
|
||||||
|
log.Printf("Config: Warning: Matching %v[%v] again!", kind, res.GetName())
|
||||||
|
}
|
||||||
|
matched = true
|
||||||
|
|
||||||
|
// collect resources but add the noop metaparam
|
||||||
|
if noop {
|
||||||
|
res.Meta().Noop = noop
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.Pattern != "" { // XXX: simplistic for now
|
||||||
|
res.CollectPattern(t.Pattern) // res.Dirname = t.Pattern
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Collect: %v[%v]: collected!", kind, res.GetName())
|
||||||
|
|
||||||
|
// XXX: similar to other resource add code:
|
||||||
|
if _, exists := lookup[kind]; !exists {
|
||||||
|
lookup[kind] = make(map[string]*Vertex)
|
||||||
|
}
|
||||||
|
v := graph.GetVertexMatch(res)
|
||||||
|
if v == nil { // no match found
|
||||||
|
res.Init() // initialize go channels or things won't work!!!
|
||||||
|
v = NewVertex(res)
|
||||||
|
graph.AddVertex(v) // call standalone in case not part of an edge
|
||||||
|
}
|
||||||
|
lookup[kind][res.GetName()] = v // used for constructing edges
|
||||||
|
keep = append(keep, v) // append
|
||||||
|
|
||||||
|
//break // let's see if another resource even matches
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -267,11 +298,11 @@ func (g *Graph) addEdgesByMatchingUUIDS(v *Vertex, uuids []ResUUID) []bool {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// add auto edges to graph
|
// AutoEdges adds the automatic edges to the graph.
|
||||||
func (g *Graph) AutoEdges() {
|
func (g *Graph) AutoEdges() {
|
||||||
log.Println("Compile: Adding AutoEdges...")
|
log.Println("Compile: Adding AutoEdges...")
|
||||||
for _, v := range g.GetVertices() { // for each vertexes autoedges
|
for _, v := range g.GetVertices() { // for each vertexes autoedges
|
||||||
if !v.GetMeta().AutoEdge { // is the metaparam true?
|
if !v.Meta().AutoEdge { // is the metaparam true?
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
autoEdgeObj := v.AutoEdges()
|
autoEdgeObj := v.AutoEdges()
|
||||||
@@ -398,7 +429,7 @@ func (ag *baseGrouper) vertexCmp(v1, v2 *Vertex) error {
|
|||||||
return fmt.Errorf("The two resources aren't the same kind!")
|
return fmt.Errorf("The two resources aren't the same kind!")
|
||||||
}
|
}
|
||||||
// someone doesn't want to group!
|
// someone doesn't want to group!
|
||||||
if !v1.GetMeta().AutoGroup || !v2.GetMeta().AutoGroup {
|
if !v1.Meta().AutoGroup || !v2.Meta().AutoGroup {
|
||||||
return fmt.Errorf("One of the autogroup flags is false!")
|
return fmt.Errorf("One of the autogroup flags is false!")
|
||||||
}
|
}
|
||||||
if v1.Res.IsGrouped() { // already grouped!
|
if v1.Res.IsGrouped() { // already grouped!
|
||||||
|
|||||||
@@ -24,9 +24,74 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ConfigWatcher returns events on a channel anytime one of its files events.
|
||||||
|
type ConfigWatcher struct {
|
||||||
|
ch chan string
|
||||||
|
wg sync.WaitGroup
|
||||||
|
closechan chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConfigWatcher creates a new ConfigWatcher struct.
|
||||||
|
func NewConfigWatcher() *ConfigWatcher {
|
||||||
|
return &ConfigWatcher{
|
||||||
|
ch: make(chan string),
|
||||||
|
closechan: make(chan struct{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The Add method adds a new file path to watch for events on.
|
||||||
|
func (obj *ConfigWatcher) Add(file ...string) {
|
||||||
|
if len(file) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(file) > 1 {
|
||||||
|
for _, f := range file { // add all the files...
|
||||||
|
obj.Add(f) // recurse
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// otherwise, add the one file passed in...
|
||||||
|
obj.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer obj.wg.Done()
|
||||||
|
ch := ConfigWatch(file[0])
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ch:
|
||||||
|
obj.ch <- file[0]
|
||||||
|
continue
|
||||||
|
case <-obj.closechan:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Events returns a channel to listen on for file events. It closes when it is
|
||||||
|
// emptied after the Close() method is called. You can test for closure with the
|
||||||
|
// f, more := <-obj.Events() pattern.
|
||||||
|
func (obj *ConfigWatcher) Events() chan string {
|
||||||
|
return obj.ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close shuts down the ConfigWatcher object. It closes the Events channel after
|
||||||
|
// all the currently pending events have been emptied.
|
||||||
|
func (obj *ConfigWatcher) Close() {
|
||||||
|
if obj.ch == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
close(obj.closechan)
|
||||||
|
obj.wg.Wait() // wait until everyone is done sending on obj.ch
|
||||||
|
//obj.ch <- "" // send finished message
|
||||||
|
close(obj.ch)
|
||||||
|
obj.ch = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigWatch writes on the channel everytime an event is seen for the path.
|
||||||
// XXX: it would be great if we could reuse code between this and the file resource
|
// XXX: it would be great if we could reuse code between this and the file resource
|
||||||
// XXX: patch this to submit it as part of go-fsnotify if they're interested...
|
// XXX: patch this to submit it as part of go-fsnotify if they're interested...
|
||||||
func ConfigWatch(file string) chan bool {
|
func ConfigWatch(file string) chan bool {
|
||||||
@@ -51,8 +116,9 @@ func ConfigWatch(file string) chan bool {
|
|||||||
if current == "" { // the empty string top is the root dir ("/")
|
if current == "" { // the empty string top is the root dir ("/")
|
||||||
current = "/"
|
current = "/"
|
||||||
}
|
}
|
||||||
log.Printf("Watching: %v", current) // attempting to watch...
|
if DEBUG {
|
||||||
|
log.Printf("Watching: %v", current) // attempting to watch...
|
||||||
|
}
|
||||||
// initialize in the loop so that we can reset on rm-ed handles
|
// initialize in the loop so that we can reset on rm-ed handles
|
||||||
err = watcher.Add(current)
|
err = watcher.Add(current)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -97,7 +163,10 @@ func ConfigWatch(file string) chan bool {
|
|||||||
// if we have what we wanted, awesome, send an event...
|
// if we have what we wanted, awesome, send an event...
|
||||||
if event.Name == safename {
|
if event.Name == safename {
|
||||||
//log.Println("Event!")
|
//log.Println("Event!")
|
||||||
send = true
|
// TODO: filter out some of the events, is Write a sufficient minimum?
|
||||||
|
if event.Op&fsnotify.Write == fsnotify.Write {
|
||||||
|
send = true
|
||||||
|
}
|
||||||
|
|
||||||
// file removed, move the watch upwards
|
// file removed, move the watch upwards
|
||||||
if deltaDepth >= 0 && (event.Op&fsnotify.Remove == fsnotify.Remove) {
|
if deltaDepth >= 0 && (event.Op&fsnotify.Remove == fsnotify.Remove) {
|
||||||
|
|||||||
379
converger.go
Normal file
379
converger.go
Normal file
@@ -0,0 +1,379 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: we could make a new function that masks out the state of certain
|
||||||
|
// UUID's, but at the moment the new Timer code has obsoleted the need...
|
||||||
|
|
||||||
|
// Converger is the general interface for implementing a convergence watcher
|
||||||
|
type Converger interface { // TODO: need a better name
|
||||||
|
Register() ConvergerUUID
|
||||||
|
IsConverged(ConvergerUUID) bool // is the UUID converged ?
|
||||||
|
SetConverged(ConvergerUUID, bool) error // set the converged state of the UUID
|
||||||
|
Unregister(ConvergerUUID)
|
||||||
|
Start()
|
||||||
|
Pause()
|
||||||
|
Loop(bool)
|
||||||
|
ConvergedTimer(ConvergerUUID) <-chan time.Time
|
||||||
|
Status() map[uint64]bool
|
||||||
|
Timeout() int // returns the timeout that this was created with
|
||||||
|
SetStateFn(func(bool) error) // sets the stateFn
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConvergerUUID is the interface resources can use to notify with if converged
|
||||||
|
// you'll need to use part of the Converger interface to Register initially too
|
||||||
|
type ConvergerUUID interface {
|
||||||
|
ID() uint64 // get Id
|
||||||
|
Name() string // get a friendly name
|
||||||
|
SetName(string)
|
||||||
|
IsValid() bool // has Id been initialized ?
|
||||||
|
InvalidateID() // set Id to nil
|
||||||
|
IsConverged() bool
|
||||||
|
SetConverged(bool) error
|
||||||
|
Unregister()
|
||||||
|
ConvergedTimer() <-chan time.Time
|
||||||
|
StartTimer() (func() error, error) // cancellable is the same as StopTimer()
|
||||||
|
ResetTimer() error // resets counter to zero
|
||||||
|
StopTimer() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// converger is an implementation of the Converger interface
|
||||||
|
type converger struct {
|
||||||
|
timeout int // must be zero (instant) or greater seconds to run
|
||||||
|
stateFn func(bool) error // run on converged state changes with state bool
|
||||||
|
converged bool // did we converge (state changes of this run Fn)
|
||||||
|
channel chan struct{} // signal here to run an isConverged check
|
||||||
|
control chan bool // control channel for start/pause
|
||||||
|
mutex sync.RWMutex // used for controlling access to status and lastid
|
||||||
|
lastid uint64
|
||||||
|
status map[uint64]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// convergerUUID is an implementation of the ConvergerUUID interface
|
||||||
|
type convergerUUID struct {
|
||||||
|
converger Converger
|
||||||
|
id uint64
|
||||||
|
name string // user defined, friendly name
|
||||||
|
mutex sync.Mutex
|
||||||
|
timer chan struct{}
|
||||||
|
running bool // is the above timer running?
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConverger builds a new converger struct
|
||||||
|
func NewConverger(timeout int, stateFn func(bool) error) *converger {
|
||||||
|
return &converger{
|
||||||
|
timeout: timeout,
|
||||||
|
stateFn: stateFn,
|
||||||
|
channel: make(chan struct{}),
|
||||||
|
control: make(chan bool),
|
||||||
|
lastid: 0,
|
||||||
|
status: make(map[uint64]bool),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register assigns a ConvergerUUID to the caller
|
||||||
|
func (obj *converger) Register() ConvergerUUID {
|
||||||
|
obj.mutex.Lock()
|
||||||
|
defer obj.mutex.Unlock()
|
||||||
|
obj.lastid++
|
||||||
|
obj.status[obj.lastid] = false // initialize as not converged
|
||||||
|
return &convergerUUID{
|
||||||
|
converger: obj,
|
||||||
|
id: obj.lastid,
|
||||||
|
name: fmt.Sprintf("%d", obj.lastid), // some default
|
||||||
|
timer: nil,
|
||||||
|
running: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsConverged gets the converged status of a uuid
|
||||||
|
func (obj *converger) IsConverged(uuid ConvergerUUID) bool {
|
||||||
|
if !uuid.IsValid() {
|
||||||
|
panic(fmt.Sprintf("Id of ConvergerUUID(%s) is nil!", uuid.Name()))
|
||||||
|
}
|
||||||
|
obj.mutex.RLock()
|
||||||
|
isConverged, found := obj.status[uuid.ID()] // lookup
|
||||||
|
obj.mutex.RUnlock()
|
||||||
|
if !found {
|
||||||
|
panic("Id of ConvergerUUID is unregistered!")
|
||||||
|
}
|
||||||
|
return isConverged
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetConverged updates the converger with the converged state of the UUID
|
||||||
|
func (obj *converger) SetConverged(uuid ConvergerUUID, isConverged bool) error {
|
||||||
|
if !uuid.IsValid() {
|
||||||
|
return fmt.Errorf("Id of ConvergerUUID(%s) is nil!", uuid.Name())
|
||||||
|
}
|
||||||
|
obj.mutex.Lock()
|
||||||
|
if _, found := obj.status[uuid.ID()]; !found {
|
||||||
|
panic("Id of ConvergerUUID is unregistered!")
|
||||||
|
}
|
||||||
|
obj.status[uuid.ID()] = isConverged // set
|
||||||
|
obj.mutex.Unlock() // unlock *before* poke or deadlock!
|
||||||
|
if isConverged != obj.converged { // only poke if it would be helpful
|
||||||
|
// run in a go routine so that we never block... just queue up!
|
||||||
|
// this allows us to send events, even if we haven't started...
|
||||||
|
go func() { obj.channel <- struct{}{} }()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// isConverged returns true if *every* registered uuid has converged
|
||||||
|
func (obj *converger) isConverged() bool {
|
||||||
|
obj.mutex.RLock() // take a read lock
|
||||||
|
defer obj.mutex.RUnlock()
|
||||||
|
for _, v := range obj.status {
|
||||||
|
if !v { // everyone must be converged for this to be true
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unregister dissociates the ConvergedUUID from the converged checking
|
||||||
|
func (obj *converger) Unregister(uuid ConvergerUUID) {
|
||||||
|
if !uuid.IsValid() {
|
||||||
|
panic(fmt.Sprintf("Id of ConvergerUUID(%s) is nil!", uuid.Name()))
|
||||||
|
}
|
||||||
|
obj.mutex.Lock()
|
||||||
|
uuid.StopTimer() // ignore any errors
|
||||||
|
delete(obj.status, uuid.ID())
|
||||||
|
obj.mutex.Unlock()
|
||||||
|
uuid.InvalidateID()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start causes a Converger object to start or resume running
|
||||||
|
func (obj *converger) Start() {
|
||||||
|
obj.control <- true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pause causes a Converger object to stop running temporarily
|
||||||
|
func (obj *converger) Pause() { // FIXME: add a sync ACK on pause before return
|
||||||
|
obj.control <- false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loop is the main loop for a Converger object; it usually runs in a goroutine
|
||||||
|
// TODO: we could eventually have each resource tell us as soon as it converges
|
||||||
|
// and then keep track of the time delays here, to avoid callers needing select
|
||||||
|
// NOTE: when we have very short timeouts, if we start before all the resources
|
||||||
|
// have joined the map, then it might appears as if we converged before we did!
|
||||||
|
func (obj *converger) Loop(startPaused bool) {
|
||||||
|
if obj.control == nil {
|
||||||
|
panic("Converger not initialized correctly")
|
||||||
|
}
|
||||||
|
if startPaused { // start paused without racing
|
||||||
|
select {
|
||||||
|
case e := <-obj.control:
|
||||||
|
if !e {
|
||||||
|
panic("Converger expected true!")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case e := <-obj.control: // expecting "false" which means pause!
|
||||||
|
if e {
|
||||||
|
panic("Converger expected false!")
|
||||||
|
}
|
||||||
|
// now i'm paused...
|
||||||
|
select {
|
||||||
|
case e := <-obj.control:
|
||||||
|
if !e {
|
||||||
|
panic("Converger expected true!")
|
||||||
|
}
|
||||||
|
// restart
|
||||||
|
// kick once to refresh the check...
|
||||||
|
go func() { obj.channel <- struct{}{} }()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
case <-obj.channel:
|
||||||
|
if !obj.isConverged() {
|
||||||
|
if obj.converged { // we're doing a state change
|
||||||
|
if obj.stateFn != nil {
|
||||||
|
// call an arbitrary function
|
||||||
|
if err := obj.stateFn(false); err != nil {
|
||||||
|
// FIXME: what to do on error ?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
obj.converged = false
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// we have converged!
|
||||||
|
if obj.timeout >= 0 { // only run if timeout is valid
|
||||||
|
if !obj.converged { // we're doing a state change
|
||||||
|
if obj.stateFn != nil {
|
||||||
|
// call an arbitrary function
|
||||||
|
if err := obj.stateFn(true); err != nil {
|
||||||
|
// FIXME: what to do on error ?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
obj.converged = true
|
||||||
|
// loop and wait again...
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConvergedTimer adds a timeout to a select call and blocks until then
|
||||||
|
// TODO: this means we could eventually have per resource converged timeouts
|
||||||
|
func (obj *converger) ConvergedTimer(uuid ConvergerUUID) <-chan time.Time {
|
||||||
|
// be clever: if i'm already converged, this timeout should block which
|
||||||
|
// avoids unnecessary new signals being sent! this avoids fast loops if
|
||||||
|
// we have a low timeout, or in particular a timeout == 0
|
||||||
|
if uuid.IsConverged() {
|
||||||
|
// blocks the case statement in select forever!
|
||||||
|
return TimeAfterOrBlock(-1)
|
||||||
|
}
|
||||||
|
return TimeAfterOrBlock(obj.timeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status returns a map of the converged status of each UUID.
|
||||||
|
func (obj *converger) Status() map[uint64]bool {
|
||||||
|
status := make(map[uint64]bool)
|
||||||
|
obj.mutex.RLock() // take a read lock
|
||||||
|
defer obj.mutex.RUnlock()
|
||||||
|
for k, v := range obj.status { // make a copy to avoid the mutex
|
||||||
|
status[k] = v
|
||||||
|
}
|
||||||
|
return status
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout returns the timeout in seconds that converger was created with. This
|
||||||
|
// is useful to avoid passing in the timeout value separately when you're
|
||||||
|
// already passing in the Converger struct.
|
||||||
|
func (obj *converger) Timeout() int {
|
||||||
|
return obj.timeout
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStateFn sets the state function to be run on change of converged state.
|
||||||
|
func (obj *converger) SetStateFn(stateFn func(bool) error) {
|
||||||
|
obj.stateFn = stateFn
|
||||||
|
}
|
||||||
|
|
||||||
|
// Id returns the unique id of this UUID object
|
||||||
|
func (obj *convergerUUID) ID() uint64 {
|
||||||
|
return obj.id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns a user defined name for the specific convergerUUID.
|
||||||
|
func (obj *convergerUUID) Name() string {
|
||||||
|
return obj.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetName sets a user defined name for the specific convergerUUID.
|
||||||
|
func (obj *convergerUUID) SetName(name string) {
|
||||||
|
obj.name = name
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid tells us if the id is valid or has already been destroyed
|
||||||
|
func (obj *convergerUUID) IsValid() bool {
|
||||||
|
return obj.id != 0 // an id of 0 is invalid
|
||||||
|
}
|
||||||
|
|
||||||
|
// InvalidateID marks the id as no longer valid
|
||||||
|
func (obj *convergerUUID) InvalidateID() {
|
||||||
|
obj.id = 0 // an id of 0 is invalid
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsConverged is a helper function to the regular IsConverged method
|
||||||
|
func (obj *convergerUUID) IsConverged() bool {
|
||||||
|
return obj.converger.IsConverged(obj)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetConverged is a helper function to the regular SetConverged notification
|
||||||
|
func (obj *convergerUUID) SetConverged(isConverged bool) error {
|
||||||
|
return obj.converger.SetConverged(obj, isConverged)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unregister is a helper function to unregister myself
|
||||||
|
func (obj *convergerUUID) Unregister() {
|
||||||
|
obj.converger.Unregister(obj)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConvergedTimer is a helper around the regular ConvergedTimer method
|
||||||
|
func (obj *convergerUUID) ConvergedTimer() <-chan time.Time {
|
||||||
|
return obj.converger.ConvergedTimer(obj)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartTimer runs an invisible timer that automatically converges on timeout.
|
||||||
|
func (obj *convergerUUID) StartTimer() (func() error, error) {
|
||||||
|
obj.mutex.Lock()
|
||||||
|
if !obj.running {
|
||||||
|
obj.timer = make(chan struct{})
|
||||||
|
obj.running = true
|
||||||
|
} else {
|
||||||
|
obj.mutex.Unlock()
|
||||||
|
return obj.StopTimer, fmt.Errorf("Timer already started!")
|
||||||
|
}
|
||||||
|
obj.mutex.Unlock()
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case _, ok := <-obj.timer: // reset signal channel
|
||||||
|
if !ok { // channel is closed
|
||||||
|
return // false to exit
|
||||||
|
}
|
||||||
|
obj.SetConverged(false)
|
||||||
|
|
||||||
|
case <-obj.ConvergedTimer():
|
||||||
|
obj.SetConverged(true) // converged!
|
||||||
|
select {
|
||||||
|
case _, ok := <-obj.timer: // reset signal channel
|
||||||
|
if !ok { // channel is closed
|
||||||
|
return // false to exit
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return obj.StopTimer, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResetTimer resets the counter to zero if using a StartTimer internally.
|
||||||
|
func (obj *convergerUUID) ResetTimer() error {
|
||||||
|
obj.mutex.Lock()
|
||||||
|
defer obj.mutex.Unlock()
|
||||||
|
if obj.running {
|
||||||
|
obj.timer <- struct{}{} // send the reset message
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("Timer hasn't been started!")
|
||||||
|
}
|
||||||
|
|
||||||
|
// StopTimer stops the running timer permanently until a StartTimer is run.
|
||||||
|
func (obj *convergerUUID) StopTimer() error {
|
||||||
|
obj.mutex.Lock()
|
||||||
|
defer obj.mutex.Unlock()
|
||||||
|
if !obj.running {
|
||||||
|
return fmt.Errorf("Timer isn't running!")
|
||||||
|
}
|
||||||
|
close(obj.timer)
|
||||||
|
obj.running = false
|
||||||
|
return nil
|
||||||
|
}
|
||||||
22
docker/Dockerfile
Normal file
22
docker/Dockerfile
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
FROM golang:1.6.2
|
||||||
|
|
||||||
|
MAINTAINER Michał Czeraszkiewicz <contact@czerasz.com>
|
||||||
|
|
||||||
|
# Set the reset cache variable
|
||||||
|
# Read more here: http://czerasz.com/2014/11/13/docker-tip-and-tricks/#use-refreshedat-variable-for-better-cache-control
|
||||||
|
ENV REFRESHED_AT 2016-05-10
|
||||||
|
|
||||||
|
# Update the package list to be able to use required packages
|
||||||
|
RUN apt-get update
|
||||||
|
|
||||||
|
# Change the working directory
|
||||||
|
WORKDIR /go/src/mgmt
|
||||||
|
|
||||||
|
# Copy all the files to the working directory
|
||||||
|
COPY . /go/src/mgmt
|
||||||
|
|
||||||
|
# Install dependencies
|
||||||
|
RUN make deps
|
||||||
|
|
||||||
|
# Build the binary
|
||||||
|
RUN make build
|
||||||
31
docker/Dockerfile.development
Normal file
31
docker/Dockerfile.development
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
FROM golang:1.6.2
|
||||||
|
|
||||||
|
MAINTAINER Michał Czeraszkiewicz <contact@czerasz.com>
|
||||||
|
|
||||||
|
# Set the reset cache variable
|
||||||
|
# Read more here: http://czerasz.com/2014/11/13/docker-tip-and-tricks/#use-refreshedat-variable-for-better-cache-control
|
||||||
|
ENV REFRESHED_AT 2016-05-14
|
||||||
|
|
||||||
|
RUN apt-get update
|
||||||
|
|
||||||
|
# Setup User to match Host User
|
||||||
|
# Give the nre user superuser permissions
|
||||||
|
ARG USER_ID=1000
|
||||||
|
ARG GROUP_ID=1000
|
||||||
|
ARG USER_NAME=mgmt
|
||||||
|
ARG GROUP_NAME=$USER_NAME
|
||||||
|
RUN groupadd --gid $GROUP_ID $GROUP_NAME && \
|
||||||
|
useradd --create-home --home /home/$USER_NAME --uid ${USER_ID} --gid $GROUP_NAME --groups sudo $USER_NAME && \
|
||||||
|
echo "$USER_NAME ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers
|
||||||
|
|
||||||
|
# Copy all the files to the working directory
|
||||||
|
COPY . /home/$USER_NAME/mgmt
|
||||||
|
|
||||||
|
# Change working directory
|
||||||
|
WORKDIR /home/$USER_NAME/mgmt
|
||||||
|
|
||||||
|
# Install dependencies
|
||||||
|
RUN make deps
|
||||||
|
|
||||||
|
# Change user
|
||||||
|
USER ${USER_NAME}
|
||||||
26
docker/scripts/build
Executable file
26
docker/scripts/build
Executable file
@@ -0,0 +1,26 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
script_directory="$( cd "$( dirname "$0" )" && pwd )"
|
||||||
|
project_directory=$script_directory/../..
|
||||||
|
|
||||||
|
# Specify the Docker image name
|
||||||
|
image_name='purpleidea/mgmt'
|
||||||
|
|
||||||
|
# Build the image which contains the compiled binary
|
||||||
|
docker build -t $image_name \
|
||||||
|
--file=$project_directory/docker/Dockerfile $project_directory
|
||||||
|
|
||||||
|
# Remove the container if it already exists
|
||||||
|
docker rm -f mgmt-export 2> /dev/null
|
||||||
|
|
||||||
|
# Start the container in background so we can "copy out" the binary
|
||||||
|
docker run -d --name=mgmt-export $image_name bash -c 'while true; sleep 1000; done'
|
||||||
|
|
||||||
|
# Remove the current binary
|
||||||
|
rm $project_directory/mgmt 2> /dev/null
|
||||||
|
|
||||||
|
# Get the binary from the container
|
||||||
|
docker cp mgmt-export:/go/src/mgmt/mgmt $project_directory/mgmt
|
||||||
|
|
||||||
|
# Remove the container
|
||||||
|
docker rm -f mgmt-export 2> /dev/null
|
||||||
14
docker/scripts/build-development
Executable file
14
docker/scripts/build-development
Executable file
@@ -0,0 +1,14 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Stop on any error
|
||||||
|
set -e
|
||||||
|
|
||||||
|
script_directory="$( cd "$( dirname "$0" )" && pwd )"
|
||||||
|
project_directory=$script_directory/../..
|
||||||
|
|
||||||
|
# Specify the Docker image name
|
||||||
|
image_name='purpleidea/mgmt:development'
|
||||||
|
|
||||||
|
# Build the image
|
||||||
|
docker build -t $image_name \
|
||||||
|
--file=$project_directory/docker/Dockerfile.development $project_directory
|
||||||
15
docker/scripts/run-development
Executable file
15
docker/scripts/run-development
Executable file
@@ -0,0 +1,15 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Stop on any error
|
||||||
|
set -e
|
||||||
|
|
||||||
|
script_directory="$( cd "$( dirname "$0" )" && pwd )"
|
||||||
|
project_directory=$script_directory/../..
|
||||||
|
|
||||||
|
# Specify the Docker image name
|
||||||
|
image_name='purpleidea/mgmt:development'
|
||||||
|
|
||||||
|
# Run container in development mode
|
||||||
|
docker run --rm --name=mgm_development --user=mgmt \
|
||||||
|
-v $project_directory:/home/mgmt/mgmt \
|
||||||
|
-it $image_name bash
|
||||||
39
event.go
39
event.go
@@ -29,8 +29,10 @@ const (
|
|||||||
eventBackPoke
|
eventBackPoke
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Resp is a channel to be used for boolean responses.
|
||||||
type Resp chan bool
|
type Resp chan bool
|
||||||
|
|
||||||
|
// Event is the main struct that stores event information and responses.
|
||||||
type Event struct {
|
type Event struct {
|
||||||
Name eventName
|
Name eventName
|
||||||
Resp Resp // channel to send an ack response on, nil to skip
|
Resp Resp // channel to send an ack response on, nil to skip
|
||||||
@@ -39,37 +41,56 @@ type Event struct {
|
|||||||
Activity bool // did something interesting happen?
|
Activity bool // did something interesting happen?
|
||||||
}
|
}
|
||||||
|
|
||||||
// send a single acknowledgement on the channel if one was requested
|
// ACK sends a single acknowledgement on the channel if one was requested.
|
||||||
func (event *Event) ACK() {
|
func (event *Event) ACK() {
|
||||||
if event.Resp != nil { // if they've requested an ACK
|
if event.Resp != nil { // if they've requested an ACK
|
||||||
event.Resp <- true // send ACK
|
event.Resp.ACK()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NACK sends a negative acknowledgement message on the channel if one was requested.
|
||||||
func (event *Event) NACK() {
|
func (event *Event) NACK() {
|
||||||
if event.Resp != nil { // if they've requested an ACK
|
if event.Resp != nil { // if they've requested a NACK
|
||||||
event.Resp <- false // send NACK
|
event.Resp.NACK()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resp is just a helper to return the right type of response channel
|
// NewResp is just a helper to return the right type of response channel.
|
||||||
func NewResp() Resp {
|
func NewResp() Resp {
|
||||||
resp := make(chan bool)
|
resp := make(chan bool)
|
||||||
return resp
|
return resp
|
||||||
}
|
}
|
||||||
|
|
||||||
// ACKWait waits for a +ive Ack from a Resp channel
|
// ACK sends a true value to resp.
|
||||||
|
func (resp Resp) ACK() {
|
||||||
|
if resp != nil {
|
||||||
|
resp <- true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NACK sends a false value to resp.
|
||||||
|
func (resp Resp) NACK() {
|
||||||
|
if resp != nil {
|
||||||
|
resp <- false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait waits for any response from a Resp channel and returns it.
|
||||||
|
func (resp Resp) Wait() bool {
|
||||||
|
return <-resp
|
||||||
|
}
|
||||||
|
|
||||||
|
// ACKWait waits for a +ive Ack from a Resp channel.
|
||||||
func (resp Resp) ACKWait() {
|
func (resp Resp) ACKWait() {
|
||||||
for {
|
for {
|
||||||
value := <-resp
|
|
||||||
// wait until true value
|
// wait until true value
|
||||||
if value {
|
if resp.Wait() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// get the activity value
|
// GetActivity returns the activity value.
|
||||||
func (event *Event) GetActivity() bool {
|
func (event *Event) GetActivity() bool {
|
||||||
return event.Activity
|
return event.Activity
|
||||||
}
|
}
|
||||||
|
|||||||
17
examples/autogroup2.yaml
Normal file
17
examples/autogroup2.yaml
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
resources:
|
||||||
|
pkg:
|
||||||
|
- name: powertop
|
||||||
|
meta:
|
||||||
|
autogroup: true
|
||||||
|
state: installed
|
||||||
|
- name: sl
|
||||||
|
meta:
|
||||||
|
autogroup: true
|
||||||
|
state: installed
|
||||||
|
- name: cowsay
|
||||||
|
meta:
|
||||||
|
autogroup: true
|
||||||
|
state: installed
|
||||||
|
edges: []
|
||||||
18
examples/etcd1d.yaml
Normal file
18
examples/etcd1d.yaml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
resources:
|
||||||
|
file:
|
||||||
|
- name: file1d
|
||||||
|
path: "/tmp/mgmtD/f1d"
|
||||||
|
content: |
|
||||||
|
i am f1
|
||||||
|
state: exists
|
||||||
|
- name: "@@file2d"
|
||||||
|
path: "/tmp/mgmtD/f2d"
|
||||||
|
content: |
|
||||||
|
i am f2, exported from host D
|
||||||
|
state: exists
|
||||||
|
collect:
|
||||||
|
- kind: file
|
||||||
|
pattern: "/tmp/mgmtD/"
|
||||||
|
edges: []
|
||||||
59
examples/exec3.yaml
Normal file
59
examples/exec3.yaml
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
---
|
||||||
|
graph: parallel
|
||||||
|
resources:
|
||||||
|
exec:
|
||||||
|
- name: pkg10
|
||||||
|
cmd: sleep 10s
|
||||||
|
shell: ''
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
- name: svc10
|
||||||
|
cmd: sleep 10s
|
||||||
|
shell: ''
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
- name: exec10
|
||||||
|
cmd: sleep 10s
|
||||||
|
shell: ''
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
- name: pkg15
|
||||||
|
cmd: sleep 15s
|
||||||
|
shell: ''
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
edges:
|
||||||
|
- name: e1
|
||||||
|
from:
|
||||||
|
kind: exec
|
||||||
|
name: pkg10
|
||||||
|
to:
|
||||||
|
kind: exec
|
||||||
|
name: svc10
|
||||||
|
- name: e2
|
||||||
|
from:
|
||||||
|
kind: exec
|
||||||
|
name: svc10
|
||||||
|
to:
|
||||||
|
kind: exec
|
||||||
|
name: exec10
|
||||||
24
examples/noop1.yaml
Normal file
24
examples/noop1.yaml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
comment: noop example
|
||||||
|
resources:
|
||||||
|
noop:
|
||||||
|
- name: noop1
|
||||||
|
meta:
|
||||||
|
noop: true
|
||||||
|
file:
|
||||||
|
- name: file1
|
||||||
|
path: "/tmp/mgmt-hello-noop"
|
||||||
|
content: |
|
||||||
|
hello world from @purpleidea
|
||||||
|
state: exists
|
||||||
|
meta:
|
||||||
|
noop: true
|
||||||
|
edges:
|
||||||
|
- name: e1
|
||||||
|
from:
|
||||||
|
kind: noop
|
||||||
|
name: noop1
|
||||||
|
to:
|
||||||
|
kind: file
|
||||||
|
name: file1
|
||||||
23
examples/remote1.yaml
Normal file
23
examples/remote1.yaml
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
comment: remote noop example
|
||||||
|
resources:
|
||||||
|
noop:
|
||||||
|
- name: noop1
|
||||||
|
meta:
|
||||||
|
noop: true
|
||||||
|
file:
|
||||||
|
- name: file1
|
||||||
|
path: "/tmp/mgmt-remote-hello"
|
||||||
|
content: |
|
||||||
|
hello world from @purpleidea
|
||||||
|
state: exists
|
||||||
|
edges:
|
||||||
|
- name: e1
|
||||||
|
from:
|
||||||
|
kind: noop
|
||||||
|
name: noop1
|
||||||
|
to:
|
||||||
|
kind: file
|
||||||
|
name: file1
|
||||||
|
remote: "ssh://root:password@hostname:22"
|
||||||
25
examples/timer1.yaml
Normal file
25
examples/timer1.yaml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
comment: timer example
|
||||||
|
resources:
|
||||||
|
timer:
|
||||||
|
- name: timer1
|
||||||
|
interval: 30
|
||||||
|
exec:
|
||||||
|
- name: exec1
|
||||||
|
cmd: echo hello world
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
edges:
|
||||||
|
- name: e1
|
||||||
|
from:
|
||||||
|
kind: timer
|
||||||
|
name: timer1
|
||||||
|
to:
|
||||||
|
kind: exec
|
||||||
|
name: exec1
|
||||||
43
examples/timer2.yaml
Normal file
43
examples/timer2.yaml
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
comment: example of multiple timers
|
||||||
|
resources:
|
||||||
|
timer:
|
||||||
|
- name: timer1
|
||||||
|
interval: 30
|
||||||
|
- name: timer2
|
||||||
|
interval: 60
|
||||||
|
exec:
|
||||||
|
- name: exec1
|
||||||
|
cmd: echo hello world 30
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
- name: exec2
|
||||||
|
cmd: echo hello world 60
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
edges:
|
||||||
|
- name: e1
|
||||||
|
from:
|
||||||
|
kind: timer
|
||||||
|
name: timer1
|
||||||
|
to:
|
||||||
|
kind: exec
|
||||||
|
name: exec1
|
||||||
|
- name: e2
|
||||||
|
from:
|
||||||
|
kind: timer
|
||||||
|
name: timer2
|
||||||
|
to:
|
||||||
|
kind: exec
|
||||||
|
name: exec2
|
||||||
44
exec.go
44
exec.go
@@ -31,6 +31,7 @@ func init() {
|
|||||||
gob.Register(&ExecRes{})
|
gob.Register(&ExecRes{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ExecRes is an exec resource for running commands.
|
||||||
type ExecRes struct {
|
type ExecRes struct {
|
||||||
BaseRes `yaml:",inline"`
|
BaseRes `yaml:",inline"`
|
||||||
State string `yaml:"state"` // state: exists/present?, absent, (undefined?)
|
State string `yaml:"state"` // state: exists/present?, absent, (undefined?)
|
||||||
@@ -44,6 +45,7 @@ type ExecRes struct {
|
|||||||
PollInt int `yaml:"pollint"` // the poll interval for the ifcmd
|
PollInt int `yaml:"pollint"` // the poll interval for the ifcmd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewExecRes is a constructor for this resource. It also calls Init() for you.
|
||||||
func NewExecRes(name, cmd, shell string, timeout int, watchcmd, watchshell, ifcmd, ifshell string, pollint int, state string) *ExecRes {
|
func NewExecRes(name, cmd, shell string, timeout int, watchcmd, watchshell, ifcmd, ifshell string, pollint int, state string) *ExecRes {
|
||||||
obj := &ExecRes{
|
obj := &ExecRes{
|
||||||
BaseRes: BaseRes{
|
BaseRes: BaseRes{
|
||||||
@@ -63,6 +65,7 @@ func NewExecRes(name, cmd, shell string, timeout int, watchcmd, watchshell, ifcm
|
|||||||
return obj
|
return obj
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Init runs some startup code for this resource.
|
||||||
func (obj *ExecRes) Init() {
|
func (obj *ExecRes) Init() {
|
||||||
obj.BaseRes.kind = "Exec"
|
obj.BaseRes.kind = "Exec"
|
||||||
obj.BaseRes.Init() // call base init, b/c we're overriding
|
obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
@@ -101,18 +104,19 @@ func (obj *ExecRes) BufioChanScanner(scanner *bufio.Scanner) (chan string, chan
|
|||||||
return ch, errch
|
return ch, errch
|
||||||
}
|
}
|
||||||
|
|
||||||
// Exec watcher
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
func (obj *ExecRes) Watch(processChan chan Event) {
|
func (obj *ExecRes) Watch(processChan chan Event) {
|
||||||
if obj.IsWatching() {
|
if obj.IsWatching() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
obj.SetWatching(true)
|
obj.SetWatching(true)
|
||||||
defer obj.SetWatching(false)
|
defer obj.SetWatching(false)
|
||||||
|
cuuid := obj.converger.Register()
|
||||||
|
defer cuuid.Unregister()
|
||||||
|
|
||||||
var send = false // send event?
|
var send = false // send event?
|
||||||
var exit = false
|
var exit = false
|
||||||
bufioch, errch := make(chan string), make(chan error)
|
bufioch, errch := make(chan string), make(chan error)
|
||||||
//vertex := obj.GetVertex() // stored with SetVertex
|
|
||||||
|
|
||||||
if obj.WatchCmd != "" {
|
if obj.WatchCmd != "" {
|
||||||
var cmdName string
|
var cmdName string
|
||||||
@@ -124,7 +128,7 @@ func (obj *ExecRes) Watch(processChan chan Event) {
|
|||||||
cmdName = split[0]
|
cmdName = split[0]
|
||||||
//d, _ := os.Getwd() // TODO: how does this ever error ?
|
//d, _ := os.Getwd() // TODO: how does this ever error ?
|
||||||
//cmdName = path.Join(d, cmdName)
|
//cmdName = path.Join(d, cmdName)
|
||||||
cmdArgs = split[1:len(split)]
|
cmdArgs = split[1:]
|
||||||
} else {
|
} else {
|
||||||
cmdName = obj.Shell // usually bash, or sh
|
cmdName = obj.Shell // usually bash, or sh
|
||||||
cmdArgs = []string{"-c", obj.WatchCmd}
|
cmdArgs = []string{"-c", obj.WatchCmd}
|
||||||
@@ -157,7 +161,7 @@ func (obj *ExecRes) Watch(processChan chan Event) {
|
|||||||
obj.SetState(resStateWatching) // reset
|
obj.SetState(resStateWatching) // reset
|
||||||
select {
|
select {
|
||||||
case text := <-bufioch:
|
case text := <-bufioch:
|
||||||
obj.SetConvergedState(resConvergedNil)
|
cuuid.SetConverged(false)
|
||||||
// each time we get a line of output, we loop!
|
// each time we get a line of output, we loop!
|
||||||
log.Printf("%v[%v]: Watch output: %s", obj.Kind(), obj.GetName(), text)
|
log.Printf("%v[%v]: Watch output: %s", obj.Kind(), obj.GetName(), text)
|
||||||
if text != "" {
|
if text != "" {
|
||||||
@@ -165,8 +169,8 @@ func (obj *ExecRes) Watch(processChan chan Event) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
case err := <-errch:
|
case err := <-errch:
|
||||||
obj.SetConvergedState(resConvergedNil) // XXX ?
|
cuuid.SetConverged(false) // XXX ?
|
||||||
if err == nil { // EOF
|
if err == nil { // EOF
|
||||||
// FIXME: add an "if watch command ends/crashes"
|
// FIXME: add an "if watch command ends/crashes"
|
||||||
// restart or generate error option
|
// restart or generate error option
|
||||||
log.Printf("%v[%v]: Reached EOF", obj.Kind(), obj.GetName())
|
log.Printf("%v[%v]: Reached EOF", obj.Kind(), obj.GetName())
|
||||||
@@ -177,14 +181,13 @@ func (obj *ExecRes) Watch(processChan chan Event) {
|
|||||||
// XXX: how should we handle errors?
|
// XXX: how should we handle errors?
|
||||||
|
|
||||||
case event := <-obj.events:
|
case event := <-obj.events:
|
||||||
obj.SetConvergedState(resConvergedNil)
|
cuuid.SetConverged(false)
|
||||||
if exit, send = obj.ReadEvent(&event); exit {
|
if exit, send = obj.ReadEvent(&event); exit {
|
||||||
return // exit
|
return // exit
|
||||||
}
|
}
|
||||||
|
|
||||||
case _ = <-TimeAfterOrBlock(obj.ctimeout):
|
case <-cuuid.ConvergedTimer():
|
||||||
obj.SetConvergedState(resConvergedTimeout)
|
cuuid.SetConverged(true) // converged!
|
||||||
obj.converged <- true
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -200,8 +203,10 @@ func (obj *ExecRes) Watch(processChan chan Event) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CheckApply checks the resource state and applies the resource if the bool
|
||||||
|
// input is true. It returns error info and if the state check passed or not.
|
||||||
// TODO: expand the IfCmd to be a list of commands
|
// TODO: expand the IfCmd to be a list of commands
|
||||||
func (obj *ExecRes) CheckApply(apply bool) (stateok bool, err error) {
|
func (obj *ExecRes) CheckApply(apply bool) (checkok bool, err error) {
|
||||||
log.Printf("%v[%v]: CheckApply(%t)", obj.Kind(), obj.GetName(), apply)
|
log.Printf("%v[%v]: CheckApply(%t)", obj.Kind(), obj.GetName(), apply)
|
||||||
|
|
||||||
// if there is a watch command, but no if command, run based on state
|
// if there is a watch command, but no if command, run based on state
|
||||||
@@ -230,7 +235,7 @@ func (obj *ExecRes) CheckApply(apply bool) (stateok bool, err error) {
|
|||||||
cmdName = split[0]
|
cmdName = split[0]
|
||||||
//d, _ := os.Getwd() // TODO: how does this ever error ?
|
//d, _ := os.Getwd() // TODO: how does this ever error ?
|
||||||
//cmdName = path.Join(d, cmdName)
|
//cmdName = path.Join(d, cmdName)
|
||||||
cmdArgs = split[1:len(split)]
|
cmdArgs = split[1:]
|
||||||
} else {
|
} else {
|
||||||
cmdName = obj.IfShell // usually bash, or sh
|
cmdName = obj.IfShell // usually bash, or sh
|
||||||
cmdArgs = []string{"-c", obj.IfCmd}
|
cmdArgs = []string{"-c", obj.IfCmd}
|
||||||
@@ -266,7 +271,7 @@ func (obj *ExecRes) CheckApply(apply bool) (stateok bool, err error) {
|
|||||||
cmdName = split[0]
|
cmdName = split[0]
|
||||||
//d, _ := os.Getwd() // TODO: how does this ever error ?
|
//d, _ := os.Getwd() // TODO: how does this ever error ?
|
||||||
//cmdName = path.Join(d, cmdName)
|
//cmdName = path.Join(d, cmdName)
|
||||||
cmdArgs = split[1:len(split)]
|
cmdArgs = split[1:]
|
||||||
} else {
|
} else {
|
||||||
cmdName = obj.Shell // usually bash, or sh
|
cmdName = obj.Shell // usually bash, or sh
|
||||||
cmdArgs = []string{"-c", obj.Cmd}
|
cmdArgs = []string{"-c", obj.Cmd}
|
||||||
@@ -320,6 +325,7 @@ func (obj *ExecRes) CheckApply(apply bool) (stateok bool, err error) {
|
|||||||
return false, nil // success
|
return false, nil // success
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ExecUUID is the UUID struct for ExecRes.
|
||||||
type ExecUUID struct {
|
type ExecUUID struct {
|
||||||
BaseUUID
|
BaseUUID
|
||||||
Cmd string
|
Cmd string
|
||||||
@@ -362,13 +368,15 @@ func (obj *ExecUUID) IFF(uuid ResUUID) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The AutoEdges method returns the AutoEdges. In this case none are used.
|
||||||
func (obj *ExecRes) AutoEdges() AutoEdge {
|
func (obj *ExecRes) AutoEdges() AutoEdge {
|
||||||
// TODO: parse as many exec params to look for auto edges, for example
|
// TODO: parse as many exec params to look for auto edges, for example
|
||||||
// the path of the binary in the Cmd variable might be from in a pkg
|
// the path of the binary in the Cmd variable might be from in a pkg
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// include all params to make a unique identification of this object
|
// GetUUIDs includes all params to make a unique identification of this object.
|
||||||
|
// Most resources only return one, although some resources can return multiple.
|
||||||
func (obj *ExecRes) GetUUIDs() []ResUUID {
|
func (obj *ExecRes) GetUUIDs() []ResUUID {
|
||||||
x := &ExecUUID{
|
x := &ExecUUID{
|
||||||
BaseUUID: BaseUUID{name: obj.GetName(), kind: obj.Kind()},
|
BaseUUID: BaseUUID{name: obj.GetName(), kind: obj.Kind()},
|
||||||
@@ -379,18 +387,24 @@ func (obj *ExecRes) GetUUIDs() []ResUUID {
|
|||||||
return []ResUUID{x}
|
return []ResUUID{x}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GroupCmp returns whether two resources can be grouped together or not.
|
||||||
func (obj *ExecRes) GroupCmp(r Res) bool {
|
func (obj *ExecRes) GroupCmp(r Res) bool {
|
||||||
_, ok := r.(*SvcRes)
|
_, ok := r.(*ExecRes)
|
||||||
if !ok {
|
if !ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return false // not possible atm
|
return false // not possible atm
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Compare two resources and return if they are equivalent.
|
||||||
func (obj *ExecRes) Compare(res Res) bool {
|
func (obj *ExecRes) Compare(res Res) bool {
|
||||||
switch res.(type) {
|
switch res.(type) {
|
||||||
case *ExecRes:
|
case *ExecRes:
|
||||||
res := res.(*ExecRes)
|
res := res.(*ExecRes)
|
||||||
|
if !obj.BaseRes.Compare(res) { // call base Compare
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
if obj.Name != res.Name {
|
if obj.Name != res.Name {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|||||||
51
file.go
51
file.go
@@ -36,6 +36,7 @@ func init() {
|
|||||||
gob.Register(&FileRes{})
|
gob.Register(&FileRes{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FileRes is a file and directory resource.
|
||||||
type FileRes struct {
|
type FileRes struct {
|
||||||
BaseRes `yaml:",inline"`
|
BaseRes `yaml:",inline"`
|
||||||
Path string `yaml:"path"` // path variable (should default to name)
|
Path string `yaml:"path"` // path variable (should default to name)
|
||||||
@@ -46,6 +47,7 @@ type FileRes struct {
|
|||||||
sha256sum string
|
sha256sum string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewFileRes is a constructor for this resource. It also calls Init() for you.
|
||||||
func NewFileRes(name, path, dirname, basename, content, state string) *FileRes {
|
func NewFileRes(name, path, dirname, basename, content, state string) *FileRes {
|
||||||
// FIXME if path = nil, path = name ...
|
// FIXME if path = nil, path = name ...
|
||||||
obj := &FileRes{
|
obj := &FileRes{
|
||||||
@@ -63,11 +65,14 @@ func NewFileRes(name, path, dirname, basename, content, state string) *FileRes {
|
|||||||
return obj
|
return obj
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Init runs some startup code for this resource.
|
||||||
func (obj *FileRes) Init() {
|
func (obj *FileRes) Init() {
|
||||||
obj.BaseRes.kind = "File"
|
obj.BaseRes.kind = "File"
|
||||||
obj.BaseRes.Init() // call base init, b/c we're overriding
|
obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetPath returns the actual path to use for this resource. It computes this
|
||||||
|
// after analysis of the path, dirname and basename values.
|
||||||
func (obj *FileRes) GetPath() string {
|
func (obj *FileRes) GetPath() string {
|
||||||
d := Dirname(obj.Path)
|
d := Dirname(obj.Path)
|
||||||
b := Basename(obj.Path)
|
b := Basename(obj.Path)
|
||||||
@@ -99,8 +104,9 @@ func (obj *FileRes) Validate() bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// File watcher for files and directories
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
// Modify with caution, probably important to write some test cases first!
|
// This one is a file watcher for files and directories.
|
||||||
|
// Modify with caution, it is probably important to write some test cases first!
|
||||||
// obj.GetPath(): file or directory
|
// obj.GetPath(): file or directory
|
||||||
func (obj *FileRes) Watch(processChan chan Event) {
|
func (obj *FileRes) Watch(processChan chan Event) {
|
||||||
if obj.IsWatching() {
|
if obj.IsWatching() {
|
||||||
@@ -108,11 +114,12 @@ func (obj *FileRes) Watch(processChan chan Event) {
|
|||||||
}
|
}
|
||||||
obj.SetWatching(true)
|
obj.SetWatching(true)
|
||||||
defer obj.SetWatching(false)
|
defer obj.SetWatching(false)
|
||||||
|
cuuid := obj.converger.Register()
|
||||||
|
defer cuuid.Unregister()
|
||||||
|
|
||||||
//var recursive bool = false
|
//var recursive bool = false
|
||||||
//var isdir = (obj.GetPath()[len(obj.GetPath())-1:] == "/") // dirs have trailing slashes
|
//var isdir = (obj.GetPath()[len(obj.GetPath())-1:] == "/") // dirs have trailing slashes
|
||||||
//log.Printf("IsDirectory: %v", isdir)
|
//log.Printf("IsDirectory: %v", isdir)
|
||||||
//vertex := obj.GetVertex() // stored with SetVertex
|
|
||||||
var safename = path.Clean(obj.GetPath()) // no trailing slash
|
var safename = path.Clean(obj.GetPath()) // no trailing slash
|
||||||
|
|
||||||
watcher, err := fsnotify.NewWatcher()
|
watcher, err := fsnotify.NewWatcher()
|
||||||
@@ -164,7 +171,7 @@ func (obj *FileRes) Watch(processChan chan Event) {
|
|||||||
if DEBUG {
|
if DEBUG {
|
||||||
log.Printf("File[%v]: Watch(%v), Event(%v): %v", obj.GetName(), current, event.Name, event.Op)
|
log.Printf("File[%v]: Watch(%v), Event(%v): %v", obj.GetName(), current, event.Name, event.Op)
|
||||||
}
|
}
|
||||||
obj.SetConvergedState(resConvergedNil) // XXX: technically i can detect if the event is erroneous or not first
|
cuuid.SetConverged(false) // XXX: technically i can detect if the event is erroneous or not first
|
||||||
// the deeper you go, the bigger the deltaDepth is...
|
// the deeper you go, the bigger the deltaDepth is...
|
||||||
// this is the difference between what we're watching,
|
// this is the difference between what we're watching,
|
||||||
// and the event... doesn't mean we can't watch deeper
|
// and the event... doesn't mean we can't watch deeper
|
||||||
@@ -234,21 +241,20 @@ func (obj *FileRes) Watch(processChan chan Event) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
case err := <-watcher.Errors:
|
case err := <-watcher.Errors:
|
||||||
obj.SetConvergedState(resConvergedNil) // XXX ?
|
cuuid.SetConverged(false) // XXX ?
|
||||||
log.Printf("error: %v", err)
|
log.Printf("error: %v", err)
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
//obj.events <- fmt.Sprintf("file: %v", "error") // XXX: how should we handle errors?
|
//obj.events <- fmt.Sprintf("file: %v", "error") // XXX: how should we handle errors?
|
||||||
|
|
||||||
case event := <-obj.events:
|
case event := <-obj.events:
|
||||||
obj.SetConvergedState(resConvergedNil)
|
cuuid.SetConverged(false)
|
||||||
if exit, send = obj.ReadEvent(&event); exit {
|
if exit, send = obj.ReadEvent(&event); exit {
|
||||||
return // exit
|
return // exit
|
||||||
}
|
}
|
||||||
//dirty = false // these events don't invalidate state
|
//dirty = false // these events don't invalidate state
|
||||||
|
|
||||||
case _ = <-TimeAfterOrBlock(obj.ctimeout):
|
case <-cuuid.ConvergedTimer():
|
||||||
obj.SetConvergedState(resConvergedTimeout)
|
cuuid.SetConverged(true) // converged!
|
||||||
obj.converged <- true
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -267,6 +273,8 @@ func (obj *FileRes) Watch(processChan chan Event) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HashSHA256fromContent computes the hash of the file contents and returns it.
|
||||||
|
// It also caches the value if it can.
|
||||||
func (obj *FileRes) HashSHA256fromContent() string {
|
func (obj *FileRes) HashSHA256fromContent() string {
|
||||||
if obj.sha256sum != "" { // return if already computed
|
if obj.sha256sum != "" { // return if already computed
|
||||||
return obj.sha256sum
|
return obj.sha256sum
|
||||||
@@ -278,6 +286,8 @@ func (obj *FileRes) HashSHA256fromContent() string {
|
|||||||
return obj.sha256sum
|
return obj.sha256sum
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FileHashSHA256Check computes the hash of the actual file and compares it to
|
||||||
|
// the computed hash of the resources file contents.
|
||||||
func (obj *FileRes) FileHashSHA256Check() (bool, error) {
|
func (obj *FileRes) FileHashSHA256Check() (bool, error) {
|
||||||
if PathIsDir(obj.GetPath()) { // assert
|
if PathIsDir(obj.GetPath()) { // assert
|
||||||
log.Fatal("This should only be called on a File resource.")
|
log.Fatal("This should only be called on a File resource.")
|
||||||
@@ -303,6 +313,8 @@ func (obj *FileRes) FileHashSHA256Check() (bool, error) {
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FileApply writes the resource file contents out to the correct path. This
|
||||||
|
// implementation doesn't try to be particularly clever in any way.
|
||||||
func (obj *FileRes) FileApply() error {
|
func (obj *FileRes) FileApply() error {
|
||||||
if PathIsDir(obj.GetPath()) {
|
if PathIsDir(obj.GetPath()) {
|
||||||
log.Fatal("This should only be called on a File resource.")
|
log.Fatal("This should only be called on a File resource.")
|
||||||
@@ -328,7 +340,9 @@ func (obj *FileRes) FileApply() error {
|
|||||||
return nil // success
|
return nil // success
|
||||||
}
|
}
|
||||||
|
|
||||||
func (obj *FileRes) CheckApply(apply bool) (stateok bool, err error) {
|
// CheckApply checks the resource state and applies the resource if the bool
|
||||||
|
// input is true. It returns error info and if the state check passed or not.
|
||||||
|
func (obj *FileRes) CheckApply(apply bool) (checkok bool, err error) {
|
||||||
log.Printf("%v[%v]: CheckApply(%t)", obj.Kind(), obj.GetName(), apply)
|
log.Printf("%v[%v]: CheckApply(%t)", obj.Kind(), obj.GetName(), apply)
|
||||||
|
|
||||||
if obj.isStateOK { // cache the state
|
if obj.isStateOK { // cache the state
|
||||||
@@ -381,6 +395,7 @@ func (obj *FileRes) CheckApply(apply bool) (stateok bool, err error) {
|
|||||||
return false, nil // success
|
return false, nil // success
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FileUUID is the UUID struct for FileRes.
|
||||||
type FileUUID struct {
|
type FileUUID struct {
|
||||||
BaseUUID
|
BaseUUID
|
||||||
path string
|
path string
|
||||||
@@ -396,12 +411,14 @@ func (obj *FileUUID) IFF(uuid ResUUID) bool {
|
|||||||
return obj.path == res.path
|
return obj.path == res.path
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FileResAutoEdges holds the state of the auto edge generator.
|
||||||
type FileResAutoEdges struct {
|
type FileResAutoEdges struct {
|
||||||
data []ResUUID
|
data []ResUUID
|
||||||
pointer int
|
pointer int
|
||||||
found bool
|
found bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Next returns the next automatic edge.
|
||||||
func (obj *FileResAutoEdges) Next() []ResUUID {
|
func (obj *FileResAutoEdges) Next() []ResUUID {
|
||||||
if obj.found {
|
if obj.found {
|
||||||
log.Fatal("Shouldn't be called anymore!")
|
log.Fatal("Shouldn't be called anymore!")
|
||||||
@@ -414,7 +431,7 @@ func (obj *FileResAutoEdges) Next() []ResUUID {
|
|||||||
return []ResUUID{value} // we return one, even though api supports N
|
return []ResUUID{value} // we return one, even though api supports N
|
||||||
}
|
}
|
||||||
|
|
||||||
// get results of the earlier Next() call, return if we should continue!
|
// Test gets results of the earlier Next() call, & returns if we should continue!
|
||||||
func (obj *FileResAutoEdges) Test(input []bool) bool {
|
func (obj *FileResAutoEdges) Test(input []bool) bool {
|
||||||
// if there aren't any more remaining
|
// if there aren't any more remaining
|
||||||
if len(obj.data) <= obj.pointer {
|
if len(obj.data) <= obj.pointer {
|
||||||
@@ -433,7 +450,8 @@ func (obj *FileResAutoEdges) Test(input []bool) bool {
|
|||||||
return true // keep going
|
return true // keep going
|
||||||
}
|
}
|
||||||
|
|
||||||
// generate a simple linear sequence of each parent directory from bottom up!
|
// AutoEdges generates a simple linear sequence of each parent directory from
|
||||||
|
// the bottom up!
|
||||||
func (obj *FileRes) AutoEdges() AutoEdge {
|
func (obj *FileRes) AutoEdges() AutoEdge {
|
||||||
var data []ResUUID // store linear result chain here...
|
var data []ResUUID // store linear result chain here...
|
||||||
values := PathSplitFullReversed(obj.GetPath()) // build it
|
values := PathSplitFullReversed(obj.GetPath()) // build it
|
||||||
@@ -456,6 +474,8 @@ func (obj *FileRes) AutoEdges() AutoEdge {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetUUIDs includes all params to make a unique identification of this object.
|
||||||
|
// Most resources only return one, although some resources can return multiple.
|
||||||
func (obj *FileRes) GetUUIDs() []ResUUID {
|
func (obj *FileRes) GetUUIDs() []ResUUID {
|
||||||
x := &FileUUID{
|
x := &FileUUID{
|
||||||
BaseUUID: BaseUUID{name: obj.GetName(), kind: obj.Kind()},
|
BaseUUID: BaseUUID{name: obj.GetName(), kind: obj.Kind()},
|
||||||
@@ -464,6 +484,7 @@ func (obj *FileRes) GetUUIDs() []ResUUID {
|
|||||||
return []ResUUID{x}
|
return []ResUUID{x}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GroupCmp returns whether two resources can be grouped together or not.
|
||||||
func (obj *FileRes) GroupCmp(r Res) bool {
|
func (obj *FileRes) GroupCmp(r Res) bool {
|
||||||
_, ok := r.(*FileRes)
|
_, ok := r.(*FileRes)
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -474,10 +495,15 @@ func (obj *FileRes) GroupCmp(r Res) bool {
|
|||||||
return false // not possible atm
|
return false // not possible atm
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Compare two resources and return if they are equivalent.
|
||||||
func (obj *FileRes) Compare(res Res) bool {
|
func (obj *FileRes) Compare(res Res) bool {
|
||||||
switch res.(type) {
|
switch res.(type) {
|
||||||
case *FileRes:
|
case *FileRes:
|
||||||
res := res.(*FileRes)
|
res := res.(*FileRes)
|
||||||
|
if !obj.BaseRes.Compare(res) { // call base Compare
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
if obj.Name != res.Name {
|
if obj.Name != res.Name {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -496,6 +522,7 @@ func (obj *FileRes) Compare(res Res) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CollectPattern applies the pattern for collection resources.
|
||||||
func (obj *FileRes) CollectPattern(pattern string) {
|
func (obj *FileRes) CollectPattern(pattern string) {
|
||||||
// XXX: currently the pattern for files can only override the Dirname variable :P
|
// XXX: currently the pattern for files can only override the Dirname variable :P
|
||||||
obj.Dirname = pattern // XXX: simplistic for now
|
obj.Dirname = pattern // XXX: simplistic for now
|
||||||
|
|||||||
2
gopath/.gitignore
vendored
Normal file
2
gopath/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
bin/
|
||||||
|
pkg/
|
||||||
1
gopath/src
Symbolic link
1
gopath/src
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../vendor
|
||||||
450
main.go
450
main.go
@@ -18,7 +18,11 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/codegangsta/cli"
|
"fmt"
|
||||||
|
etcdtypes "github.com/coreos/etcd/pkg/types"
|
||||||
|
"github.com/coreos/pkg/capnslog"
|
||||||
|
"github.com/urfave/cli"
|
||||||
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
@@ -31,10 +35,14 @@ import (
|
|||||||
var (
|
var (
|
||||||
program string
|
program string
|
||||||
version string
|
version string
|
||||||
|
prefix = fmt.Sprintf("/var/lib/%s/", program)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// variables controlling verbosity
|
||||||
const (
|
const (
|
||||||
DEBUG = false
|
DEBUG = false // add additional log messages
|
||||||
|
TRACE = false // add execution flow log messages
|
||||||
|
VERBOSE = false // add extra log message output
|
||||||
)
|
)
|
||||||
|
|
||||||
// signal handler
|
// signal handler
|
||||||
@@ -56,13 +64,113 @@ func waitForSignal(exit chan bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func run(c *cli.Context) {
|
// run is the main run target.
|
||||||
|
func run(c *cli.Context) error {
|
||||||
var start = time.Now().UnixNano()
|
var start = time.Now().UnixNano()
|
||||||
var wg sync.WaitGroup
|
|
||||||
exit := make(chan bool) // exit signal
|
|
||||||
converged := make(chan bool) // converged signal
|
|
||||||
log.Printf("This is: %v, version: %v", program, version)
|
log.Printf("This is: %v, version: %v", program, version)
|
||||||
log.Printf("Main: Start: %v", start)
|
log.Printf("Main: Start: %v", start)
|
||||||
|
|
||||||
|
hostname, _ := os.Hostname()
|
||||||
|
// allow passing in the hostname, instead of using --hostname
|
||||||
|
if c.IsSet("file") {
|
||||||
|
if config := ParseConfigFromFile(c.String("file")); config != nil {
|
||||||
|
if h := config.Hostname; h != "" {
|
||||||
|
hostname = h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c.IsSet("hostname") { // override by cli
|
||||||
|
if h := c.String("hostname"); h != "" {
|
||||||
|
hostname = h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
noop := c.Bool("noop")
|
||||||
|
|
||||||
|
seeds, err := etcdtypes.NewURLs(
|
||||||
|
FlattenListWithSplit(c.StringSlice("seeds"), []string{",", ";", " "}),
|
||||||
|
)
|
||||||
|
if err != nil && len(c.StringSlice("seeds")) > 0 {
|
||||||
|
log.Printf("Main: Error: seeds didn't parse correctly!")
|
||||||
|
return cli.NewExitError("", 1)
|
||||||
|
}
|
||||||
|
clientURLs, err := etcdtypes.NewURLs(
|
||||||
|
FlattenListWithSplit(c.StringSlice("client-urls"), []string{",", ";", " "}),
|
||||||
|
)
|
||||||
|
if err != nil && len(c.StringSlice("client-urls")) > 0 {
|
||||||
|
log.Printf("Main: Error: clientURLs didn't parse correctly!")
|
||||||
|
return cli.NewExitError("", 1)
|
||||||
|
}
|
||||||
|
serverURLs, err := etcdtypes.NewURLs(
|
||||||
|
FlattenListWithSplit(c.StringSlice("server-urls"), []string{",", ";", " "}),
|
||||||
|
)
|
||||||
|
if err != nil && len(c.StringSlice("server-urls")) > 0 {
|
||||||
|
log.Printf("Main: Error: serverURLs didn't parse correctly!")
|
||||||
|
return cli.NewExitError("", 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
idealClusterSize := uint16(c.Int("ideal-cluster-size"))
|
||||||
|
if idealClusterSize < 1 {
|
||||||
|
log.Printf("Main: Error: idealClusterSize should be at least one!")
|
||||||
|
return cli.NewExitError("", 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.IsSet("file") && c.IsSet("puppet") {
|
||||||
|
log.Println("Main: Error: the --file and --puppet parameters cannot be used together!")
|
||||||
|
return cli.NewExitError("", 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Bool("no-server") && len(c.StringSlice("remote")) > 0 {
|
||||||
|
// TODO: in this case, we won't be able to tunnel stuff back to
|
||||||
|
// here, so if we're okay with every remote graph running in an
|
||||||
|
// isolated mode, then this is okay. Improve on this if there's
|
||||||
|
// someone who really wants to be able to do this.
|
||||||
|
log.Println("Main: Error: the --no-server and --remote parameters cannot be used together!")
|
||||||
|
return cli.NewExitError("", 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
cConns := uint16(c.Int("cconns"))
|
||||||
|
if cConns < 0 {
|
||||||
|
log.Printf("Main: Error: --cconns should be at least zero!")
|
||||||
|
return cli.NewExitError("", 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.IsSet("converged-timeout") && cConns > 0 && len(c.StringSlice("remote")) > c.Int("cconns") {
|
||||||
|
log.Printf("Main: Error: combining --converged-timeout with more remotes than available connections will never converge!")
|
||||||
|
return cli.NewExitError("", 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
depth := uint16(c.Int("depth"))
|
||||||
|
if depth < 0 { // user should not be using this argument manually
|
||||||
|
log.Printf("Main: Error: negative values for --depth are not permitted!")
|
||||||
|
return cli.NewExitError("", 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.IsSet("prefix") && c.Bool("tmp-prefix") {
|
||||||
|
log.Println("Main: Error: combining --prefix and the request for a tmp prefix is illogical!")
|
||||||
|
return cli.NewExitError("", 1)
|
||||||
|
}
|
||||||
|
if s := c.String("prefix"); c.IsSet("prefix") && s != "" {
|
||||||
|
prefix = s
|
||||||
|
}
|
||||||
|
|
||||||
|
// make sure the working directory prefix exists
|
||||||
|
if c.Bool("tmp-prefix") || os.MkdirAll(prefix, 0770) != nil {
|
||||||
|
if c.Bool("tmp-prefix") || c.Bool("allow-tmp-prefix") {
|
||||||
|
if prefix, err = ioutil.TempDir("", program+"-"); err != nil {
|
||||||
|
log.Printf("Main: Error: Can't create temporary prefix!")
|
||||||
|
return cli.NewExitError("", 1)
|
||||||
|
}
|
||||||
|
log.Println("Main: Warning: Working prefix directory is temporary!")
|
||||||
|
|
||||||
|
} else {
|
||||||
|
log.Printf("Main: Error: Can't create prefix!")
|
||||||
|
return cli.NewExitError("", 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Printf("Main: Working prefix is: %s", prefix)
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
exit := make(chan bool) // exit signal
|
||||||
var G, fullGraph *Graph
|
var G, fullGraph *Graph
|
||||||
|
|
||||||
// exit after `max-runtime` seconds for no reason at all...
|
// exit after `max-runtime` seconds for no reason at all...
|
||||||
@@ -73,81 +181,131 @@ func run(c *cli.Context) {
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
// initial etcd peer endpoint
|
// setup converger
|
||||||
seed := c.String("seed")
|
converger := NewConverger(
|
||||||
if seed == "" {
|
c.Int("converged-timeout"),
|
||||||
// XXX: start up etcd server, others will join me!
|
nil, // stateFn gets added in by EmbdEtcd
|
||||||
seed = "http://127.0.0.1:2379" // thus we use the local server!
|
)
|
||||||
|
go converger.Loop(true) // main loop for converger, true to start paused
|
||||||
|
|
||||||
|
// embedded etcd
|
||||||
|
if len(seeds) == 0 {
|
||||||
|
log.Printf("Main: Seeds: No seeds specified!")
|
||||||
|
} else {
|
||||||
|
log.Printf("Main: Seeds(%v): %v", len(seeds), seeds)
|
||||||
}
|
}
|
||||||
// then, connect to `seed` as a client
|
EmbdEtcd := NewEmbdEtcd(
|
||||||
|
hostname,
|
||||||
// FIXME: validate seed, or wait for it to fail in etcd init?
|
seeds,
|
||||||
|
clientURLs,
|
||||||
// etcd
|
serverURLs,
|
||||||
etcdO := &EtcdWObject{
|
c.Bool("no-server"),
|
||||||
seed: seed,
|
idealClusterSize,
|
||||||
ctimeout: c.Int("converged-timeout"),
|
prefix,
|
||||||
converged: converged,
|
converger,
|
||||||
|
)
|
||||||
|
if EmbdEtcd == nil {
|
||||||
|
// TODO: verify EmbdEtcd is not nil below...
|
||||||
|
log.Printf("Main: Etcd: Creation failed!")
|
||||||
|
exit <- true
|
||||||
|
} else if err := EmbdEtcd.Startup(); err != nil { // startup (returns when etcd main loop is running)
|
||||||
|
log.Printf("Main: Etcd: Startup failed: %v", err)
|
||||||
|
exit <- true
|
||||||
|
}
|
||||||
|
convergerStateFn := func(b bool) error {
|
||||||
|
// exit if we are using the converged-timeout and we are the
|
||||||
|
// root node. otherwise, if we are a child node in a remote
|
||||||
|
// execution hierarchy, we should only notify our converged
|
||||||
|
// state and wait for the parent to trigger the exit.
|
||||||
|
if depth == 0 && c.Int("converged-timeout") >= 0 {
|
||||||
|
if b {
|
||||||
|
log.Printf("Converged for %d seconds, exiting!", c.Int("converged-timeout"))
|
||||||
|
exit <- true // trigger an exit!
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// send our individual state into etcd for others to see
|
||||||
|
return EtcdSetHostnameConverged(EmbdEtcd, hostname, b) // TODO: what should happen on error?
|
||||||
|
}
|
||||||
|
if EmbdEtcd != nil {
|
||||||
|
converger.SetStateFn(convergerStateFn)
|
||||||
}
|
}
|
||||||
|
|
||||||
hostname := c.String("hostname")
|
exitchan := make(chan Event) // exit event
|
||||||
if hostname == "" {
|
|
||||||
hostname, _ = os.Hostname() // etcd watch key // XXX: this is not the correct key name this is the set key name... WOOPS
|
|
||||||
}
|
|
||||||
go func() {
|
go func() {
|
||||||
startchan := make(chan struct{}) // start signal
|
startchan := make(chan struct{}) // start signal
|
||||||
go func() { startchan <- struct{}{} }()
|
go func() { startchan <- struct{}{} }()
|
||||||
file := c.String("file")
|
file := c.String("file")
|
||||||
configchan := make(chan bool)
|
var configchan chan bool
|
||||||
if !c.Bool("no-watch") {
|
var puppetchan <-chan time.Time
|
||||||
|
if !c.Bool("no-watch") && c.IsSet("file") {
|
||||||
configchan = ConfigWatch(file)
|
configchan = ConfigWatch(file)
|
||||||
|
} else if c.IsSet("puppet") {
|
||||||
|
interval := PuppetInterval(c.String("puppet-conf"))
|
||||||
|
puppetchan = time.Tick(time.Duration(interval) * time.Second)
|
||||||
}
|
}
|
||||||
log.Println("Etcd: Starting...")
|
log.Println("Etcd: Starting...")
|
||||||
etcdchan := etcdO.EtcdWatch()
|
etcdchan := EtcdWatch(EmbdEtcd)
|
||||||
first := true // first loop or not
|
first := true // first loop or not
|
||||||
for {
|
for {
|
||||||
log.Println("Main: Waiting...")
|
log.Println("Main: Waiting...")
|
||||||
select {
|
select {
|
||||||
case _ = <-startchan: // kick the loop once at start
|
case <-startchan: // kick the loop once at start
|
||||||
// pass
|
// pass
|
||||||
case msg := <-etcdchan:
|
|
||||||
switch msg {
|
case b := <-etcdchan:
|
||||||
// some types of messages we ignore...
|
if !b { // ignore the message
|
||||||
case etcdFoo, etcdBar:
|
|
||||||
continue
|
continue
|
||||||
// while others passthrough and cause a compile!
|
|
||||||
case etcdStart, etcdEvent:
|
|
||||||
// pass
|
|
||||||
default:
|
|
||||||
log.Fatal("Etcd: Unhandled message: ", msg)
|
|
||||||
}
|
}
|
||||||
|
// everything else passes through to cause a compile!
|
||||||
|
|
||||||
|
case <-puppetchan:
|
||||||
|
// nothing, just go on
|
||||||
|
|
||||||
case msg := <-configchan:
|
case msg := <-configchan:
|
||||||
if c.Bool("no-watch") || !msg {
|
if c.Bool("no-watch") || !msg {
|
||||||
continue // not ready to read config
|
continue // not ready to read config
|
||||||
}
|
}
|
||||||
//case compile_event: XXX
|
// XXX: case compile_event: ...
|
||||||
|
// ...
|
||||||
|
case msg := <-exitchan:
|
||||||
|
msg.ACK()
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
config := ParseConfigFromFile(file)
|
var config *GraphConfig
|
||||||
|
if c.IsSet("file") {
|
||||||
|
config = ParseConfigFromFile(file)
|
||||||
|
} else if c.IsSet("puppet") {
|
||||||
|
config = ParseConfigFromPuppet(c.String("puppet"), c.String("puppet-conf"))
|
||||||
|
}
|
||||||
if config == nil {
|
if config == nil {
|
||||||
log.Printf("Config parse failure")
|
log.Printf("Config: Parse failure")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if config.Hostname != "" && config.Hostname != hostname {
|
||||||
|
log.Printf("Config: Hostname changed, ignoring config!")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
config.Hostname = hostname // set it in case it was ""
|
||||||
|
|
||||||
// run graph vertex LOCK...
|
// run graph vertex LOCK...
|
||||||
if !first { // TODO: we can flatten this check out I think
|
if !first { // TODO: we can flatten this check out I think
|
||||||
G.Pause() // sync
|
converger.Pause() // FIXME: add sync wait?
|
||||||
|
G.Pause() // sync
|
||||||
}
|
}
|
||||||
|
|
||||||
// build graph from yaml file on events (eg: from etcd)
|
// build graph from yaml file on events (eg: from etcd)
|
||||||
// we need the vertices to be paused to work on them
|
// we need the vertices to be paused to work on them
|
||||||
if newFullgraph, err := fullGraph.NewGraphFromConfig(config, etcdO, hostname); err == nil { // keep references to all original elements
|
if newFullgraph, err := fullGraph.NewGraphFromConfig(config, EmbdEtcd, noop); err == nil { // keep references to all original elements
|
||||||
fullGraph = newFullgraph
|
fullGraph = newFullgraph
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Config: Error making new graph from config: %v", err)
|
log.Printf("Config: Error making new graph from config: %v", err)
|
||||||
// unpause!
|
// unpause!
|
||||||
if !first {
|
if !first {
|
||||||
G.Start(&wg, first) // sync
|
G.Start(&wg, first) // sync
|
||||||
|
converger.Start() // after G.Start()
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -165,50 +323,77 @@ func run(c *cli.Context) {
|
|||||||
} else {
|
} else {
|
||||||
log.Printf("Graphviz: Successfully generated graph!")
|
log.Printf("Graphviz: Successfully generated graph!")
|
||||||
}
|
}
|
||||||
G.SetVertex()
|
G.AssociateData(converger)
|
||||||
G.SetConvergedCallback(c.Int("converged-timeout"), converged)
|
|
||||||
// G.Start(...) needs to be synchronous or wait,
|
// G.Start(...) needs to be synchronous or wait,
|
||||||
// because if half of the nodes are started and
|
// because if half of the nodes are started and
|
||||||
// some are not ready yet and the EtcdWatch
|
// some are not ready yet and the EtcdWatch
|
||||||
// loops, we'll cause G.Pause(...) before we
|
// loops, we'll cause G.Pause(...) before we
|
||||||
// even got going, thus causing nil pointer errors
|
// even got going, thus causing nil pointer errors
|
||||||
G.Start(&wg, first) // sync
|
G.Start(&wg, first) // sync
|
||||||
|
converger.Start() // after G.Start()
|
||||||
first = false
|
first = false
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if i := c.Int("converged-timeout"); i >= 0 {
|
configWatcher := NewConfigWatcher()
|
||||||
go func() {
|
events := configWatcher.Events()
|
||||||
ConvergedLoop:
|
if !c.Bool("no-watch") {
|
||||||
for {
|
configWatcher.Add(c.StringSlice("remote")...) // add all the files...
|
||||||
<-converged // when anyone says they have converged
|
} else {
|
||||||
|
events = nil // signal that no-watch is true
|
||||||
if etcdO.GetConvergedState() != etcdConvergedTimeout {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for v := range G.GetVerticesChan() {
|
|
||||||
if v.Res.GetConvergedState() != resConvergedTimeout {
|
|
||||||
continue ConvergedLoop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// if all have converged, exit
|
|
||||||
log.Printf("Converged for %d seconds, exiting!", i)
|
|
||||||
exit <- true
|
|
||||||
for {
|
|
||||||
<-converged
|
|
||||||
} // unblock/drain
|
|
||||||
//return
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// initialize the add watcher, which calls the f callback on map changes
|
||||||
|
convergerCb := func(f func(map[string]bool) error) (func(), error) {
|
||||||
|
return EtcdAddHostnameConvergedWatcher(EmbdEtcd, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// build remotes struct for remote ssh
|
||||||
|
remotes := NewRemotes(
|
||||||
|
EmbdEtcd.LocalhostClientURLs().StringSlice(),
|
||||||
|
[]string{DefaultClientURL},
|
||||||
|
noop,
|
||||||
|
c.StringSlice("remote"), // list of files
|
||||||
|
events, // watch for file changes
|
||||||
|
cConns,
|
||||||
|
c.Bool("allow-interactive"),
|
||||||
|
c.String("ssh-priv-id-rsa"),
|
||||||
|
!c.Bool("no-caching"),
|
||||||
|
depth,
|
||||||
|
prefix,
|
||||||
|
converger,
|
||||||
|
convergerCb,
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: is there any benefit to running the remotes above in the loop?
|
||||||
|
// wait for etcd to be running before we remote in, which we do above!
|
||||||
|
go remotes.Run()
|
||||||
|
|
||||||
|
if !c.IsSet("file") && !c.IsSet("puppet") {
|
||||||
|
converger.Start() // better start this for empty graphs
|
||||||
|
}
|
||||||
log.Println("Main: Running...")
|
log.Println("Main: Running...")
|
||||||
|
|
||||||
waitForSignal(exit) // pass in exit channel to watch
|
waitForSignal(exit) // pass in exit channel to watch
|
||||||
|
|
||||||
|
log.Println("Destroy...")
|
||||||
|
|
||||||
|
configWatcher.Close() // stop sending file changes to remotes
|
||||||
|
remotes.Exit() // tell all the remote connections to shutdown; waits!
|
||||||
|
|
||||||
G.Exit() // tell all the children to exit
|
G.Exit() // tell all the children to exit
|
||||||
|
|
||||||
|
// tell inner main loop to exit
|
||||||
|
resp := NewResp()
|
||||||
|
go func() { exitchan <- Event{eventExit, resp, "", false} }()
|
||||||
|
|
||||||
|
// cleanup etcd main loop last so it can process everything first
|
||||||
|
if err := EmbdEtcd.Destroy(); err != nil { // shutdown and cleanup etcd
|
||||||
|
log.Printf("Etcd exited poorly with: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp.ACKWait() // let inner main loop finish cleanly just in case
|
||||||
|
|
||||||
if DEBUG {
|
if DEBUG {
|
||||||
log.Printf("Graph: %v", G)
|
log.Printf("Graph: %v", G)
|
||||||
}
|
}
|
||||||
@@ -217,13 +402,26 @@ func run(c *cli.Context) {
|
|||||||
|
|
||||||
// TODO: wait for each vertex to exit...
|
// TODO: wait for each vertex to exit...
|
||||||
log.Println("Goodbye!")
|
log.Println("Goodbye!")
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
//if DEBUG {
|
var flags int
|
||||||
log.SetFlags(log.LstdFlags | log.Lshortfile)
|
if DEBUG || true { // TODO: remove || true
|
||||||
//}
|
flags = log.LstdFlags | log.Lshortfile
|
||||||
log.SetFlags(log.Flags() - log.Ldate) // remove the date for now
|
}
|
||||||
|
flags = (flags - log.Ldate) // remove the date for now
|
||||||
|
log.SetFlags(flags)
|
||||||
|
|
||||||
|
// un-hijack from capnslog...
|
||||||
|
log.SetOutput(os.Stderr)
|
||||||
|
if VERBOSE {
|
||||||
|
capnslog.SetFormatter(capnslog.NewLogFormatter(os.Stderr, "(etcd) ", flags))
|
||||||
|
} else {
|
||||||
|
capnslog.SetFormatter(capnslog.NewNilFormatter())
|
||||||
|
}
|
||||||
|
|
||||||
|
// test for sanity
|
||||||
if program == "" || version == "" {
|
if program == "" || version == "" {
|
||||||
log.Fatal("Program was not compiled correctly. Please see Makefile.")
|
log.Fatal("Program was not compiled correctly. Please see Makefile.")
|
||||||
}
|
}
|
||||||
@@ -241,9 +439,10 @@ func main() {
|
|||||||
Action: run,
|
Action: run,
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
cli.StringFlag{
|
cli.StringFlag{
|
||||||
Name: "file, f",
|
Name: "file, f",
|
||||||
Value: "",
|
Value: "",
|
||||||
Usage: "graph definition to run",
|
Usage: "graph definition to run",
|
||||||
|
EnvVar: "MGMT_FILE",
|
||||||
},
|
},
|
||||||
cli.BoolFlag{
|
cli.BoolFlag{
|
||||||
Name: "no-watch",
|
Name: "no-watch",
|
||||||
@@ -271,20 +470,105 @@ func main() {
|
|||||||
Usage: "hostname to use",
|
Usage: "hostname to use",
|
||||||
},
|
},
|
||||||
// if empty, it will startup a new server
|
// if empty, it will startup a new server
|
||||||
|
cli.StringSliceFlag{
|
||||||
|
Name: "seeds, s",
|
||||||
|
Value: &cli.StringSlice{}, // empty slice
|
||||||
|
Usage: "default etc client endpoint",
|
||||||
|
EnvVar: "MGMT_SEEDS",
|
||||||
|
},
|
||||||
|
// port 2379 and 4001 are common
|
||||||
|
cli.StringSliceFlag{
|
||||||
|
Name: "client-urls",
|
||||||
|
Value: &cli.StringSlice{},
|
||||||
|
Usage: "list of URLs to listen on for client traffic",
|
||||||
|
EnvVar: "MGMT_CLIENT_URLS",
|
||||||
|
},
|
||||||
|
// port 2380 and 7001 are common
|
||||||
|
cli.StringSliceFlag{
|
||||||
|
Name: "server-urls, peer-urls",
|
||||||
|
Value: &cli.StringSlice{},
|
||||||
|
Usage: "list of URLs to listen on for server (peer) traffic",
|
||||||
|
EnvVar: "MGMT_SERVER_URLS",
|
||||||
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "no-server",
|
||||||
|
Usage: "do not let other servers peer with me",
|
||||||
|
},
|
||||||
|
cli.IntFlag{
|
||||||
|
Name: "ideal-cluster-size",
|
||||||
|
Value: defaultIdealClusterSize,
|
||||||
|
Usage: "ideal number of server peers in cluster, only read by initial server",
|
||||||
|
EnvVar: "MGMT_IDEAL_CLUSTER_SIZE",
|
||||||
|
},
|
||||||
|
cli.IntFlag{
|
||||||
|
Name: "converged-timeout, t",
|
||||||
|
Value: -1,
|
||||||
|
Usage: "exit after approximately this many seconds in a converged state",
|
||||||
|
EnvVar: "MGMT_CONVERGED_TIMEOUT",
|
||||||
|
},
|
||||||
|
cli.IntFlag{
|
||||||
|
Name: "max-runtime",
|
||||||
|
Value: 0,
|
||||||
|
Usage: "exit after a maximum of approximately this many seconds",
|
||||||
|
EnvVar: "MGMT_MAX_RUNTIME",
|
||||||
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "noop",
|
||||||
|
Usage: "globally force all resources into no-op mode",
|
||||||
|
},
|
||||||
cli.StringFlag{
|
cli.StringFlag{
|
||||||
Name: "seed, s",
|
Name: "puppet, p",
|
||||||
Value: "",
|
Value: "",
|
||||||
Usage: "default etc peer endpoint",
|
Usage: "load graph from puppet, optionally takes a manifest or path to manifest file",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "puppet-conf",
|
||||||
|
Value: "",
|
||||||
|
Usage: "supply the path to an alternate puppet.conf file to use",
|
||||||
|
},
|
||||||
|
cli.StringSliceFlag{
|
||||||
|
Name: "remote",
|
||||||
|
Value: &cli.StringSlice{},
|
||||||
|
Usage: "list of remote graph definitions to run",
|
||||||
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "allow-interactive",
|
||||||
|
Usage: "allow interactive prompting, such as for remote passwords",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "ssh-priv-id-rsa",
|
||||||
|
Value: "~/.ssh/id_rsa",
|
||||||
|
Usage: "default path to ssh key file, set empty to never touch",
|
||||||
|
EnvVar: "MGMT_SSH_PRIV_ID_RSA",
|
||||||
},
|
},
|
||||||
cli.IntFlag{
|
cli.IntFlag{
|
||||||
Name: "converged-timeout, t",
|
Name: "cconns",
|
||||||
Value: -1,
|
Value: 0,
|
||||||
Usage: "exit after approximately this many seconds in a converged state",
|
Usage: "number of maximum concurrent remote ssh connections to run, 0 for unlimited",
|
||||||
|
EnvVar: "MGMT_CCONNS",
|
||||||
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "no-caching",
|
||||||
|
Usage: "don't allow remote caching of remote execution binary",
|
||||||
},
|
},
|
||||||
cli.IntFlag{
|
cli.IntFlag{
|
||||||
Name: "max-runtime",
|
Name: "depth",
|
||||||
Value: 0,
|
Hidden: true, // internal use only
|
||||||
Usage: "exit after a maximum of approximately this many seconds",
|
Value: 0,
|
||||||
|
Usage: "specify depth in remote hierarchy",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "prefix",
|
||||||
|
Usage: "specify a path to the working prefix directory",
|
||||||
|
EnvVar: "MGMT_PREFIX",
|
||||||
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "tmp-prefix",
|
||||||
|
Usage: "request a pseudo-random, temporary prefix to be used",
|
||||||
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "allow-tmp-prefix",
|
||||||
|
Usage: "allow creation of a new temporary prefix if main prefix is unavailable",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
122
misc.go
122
misc.go
@@ -25,12 +25,15 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// returns the string with the first character capitalized
|
// FirstToUpper returns the string with the first character capitalized.
|
||||||
func FirstToUpper(str string) string {
|
func FirstToUpper(str string) string {
|
||||||
|
if str == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
return strings.ToUpper(str[0:1]) + str[1:]
|
return strings.ToUpper(str[0:1]) + str[1:]
|
||||||
}
|
}
|
||||||
|
|
||||||
// return true if a string exists inside a list, otherwise false
|
// StrInList returns true if a string exists inside a list, otherwise false.
|
||||||
func StrInList(needle string, haystack []string) bool {
|
func StrInList(needle string, haystack []string) bool {
|
||||||
for _, x := range haystack {
|
for _, x := range haystack {
|
||||||
if needle == x {
|
if needle == x {
|
||||||
@@ -40,8 +43,19 @@ func StrInList(needle string, haystack []string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// remove any duplicate values in the list
|
// Uint64KeyFromStrInMap returns true if needle is found in haystack of keys
|
||||||
// possibly sub-optimal, O(n^2)? implementation
|
// that have uint64 type.
|
||||||
|
func Uint64KeyFromStrInMap(needle string, haystack map[uint64]string) (uint64, bool) {
|
||||||
|
for k, v := range haystack {
|
||||||
|
if v == needle {
|
||||||
|
return k, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// StrRemoveDuplicatesInList removes any duplicate values in the list.
|
||||||
|
// This is a possibly sub-optimal, O(n^2)? implementation.
|
||||||
func StrRemoveDuplicatesInList(list []string) []string {
|
func StrRemoveDuplicatesInList(list []string) []string {
|
||||||
unique := []string{}
|
unique := []string{}
|
||||||
for _, x := range list {
|
for _, x := range list {
|
||||||
@@ -52,7 +66,8 @@ func StrRemoveDuplicatesInList(list []string) []string {
|
|||||||
return unique
|
return unique
|
||||||
}
|
}
|
||||||
|
|
||||||
// remove any of the elements in filter, if they exist in list
|
// StrFilterElementsInList removes any of the elements in filter, if they exist
|
||||||
|
// in the list.
|
||||||
func StrFilterElementsInList(filter []string, list []string) []string {
|
func StrFilterElementsInList(filter []string, list []string) []string {
|
||||||
result := []string{}
|
result := []string{}
|
||||||
for _, x := range list {
|
for _, x := range list {
|
||||||
@@ -63,8 +78,8 @@ func StrFilterElementsInList(filter []string, list []string) []string {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// remove any of the elements in filter, if they don't exist in list
|
// StrListIntersection removes any of the elements in filter, if they don't
|
||||||
// this is an in order intersection of two lists
|
// exist in the list. This is an in order intersection of two lists.
|
||||||
func StrListIntersection(list1 []string, list2 []string) []string {
|
func StrListIntersection(list1 []string, list2 []string) []string {
|
||||||
result := []string{}
|
result := []string{}
|
||||||
for _, x := range list1 {
|
for _, x := range list1 {
|
||||||
@@ -75,7 +90,7 @@ func StrListIntersection(list1 []string, list2 []string) []string {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// reverse a list of strings
|
// ReverseStringList reverses a list of strings.
|
||||||
func ReverseStringList(in []string) []string {
|
func ReverseStringList(in []string) []string {
|
||||||
var out []string // empty list
|
var out []string // empty list
|
||||||
l := len(in)
|
l := len(in)
|
||||||
@@ -85,19 +100,31 @@ func ReverseStringList(in []string) []string {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// return the sorted list of string keys in a map with string keys
|
// StrMapKeys return the sorted list of string keys in a map with string keys.
|
||||||
// NOTE: i thought it would be nice for this to use: map[string]interface{} but
|
// NOTE: i thought it would be nice for this to use: map[string]interface{} but
|
||||||
// it turns out that's not allowed. I know we don't have generics, but common!
|
// it turns out that's not allowed. I know we don't have generics, but come on!
|
||||||
func StrMapKeys(m map[string]string) []string {
|
func StrMapKeys(m map[string]string) []string {
|
||||||
result := []string{}
|
result := []string{}
|
||||||
for k, _ := range m {
|
for k := range m {
|
||||||
result = append(result, k)
|
result = append(result, k)
|
||||||
}
|
}
|
||||||
sort.Strings(result) // deterministic order
|
sort.Strings(result) // deterministic order
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// return the sorted list of bool values in a map with string values
|
// StrMapKeysUint64 return the sorted list of string keys in a map with string
|
||||||
|
// keys but uint64 values.
|
||||||
|
func StrMapKeysUint64(m map[string]uint64) []string {
|
||||||
|
result := []string{}
|
||||||
|
for k := range m {
|
||||||
|
result = append(result, k)
|
||||||
|
}
|
||||||
|
sort.Strings(result) // deterministic order
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolMapValues returns the sorted list of bool values in a map with string
|
||||||
|
// values.
|
||||||
func BoolMapValues(m map[string]bool) []bool {
|
func BoolMapValues(m map[string]bool) []bool {
|
||||||
result := []bool{}
|
result := []bool{}
|
||||||
for _, v := range m {
|
for _, v := range m {
|
||||||
@@ -107,7 +134,8 @@ func BoolMapValues(m map[string]bool) []bool {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// return the sorted list of string values in a map with string values
|
// StrMapValues returns the sorted list of string values in a map with string
|
||||||
|
// values.
|
||||||
func StrMapValues(m map[string]string) []string {
|
func StrMapValues(m map[string]string) []string {
|
||||||
result := []string{}
|
result := []string{}
|
||||||
for _, v := range m {
|
for _, v := range m {
|
||||||
@@ -117,7 +145,18 @@ func StrMapValues(m map[string]string) []string {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// return true if everyone is true
|
// StrMapValuesUint64 return the sorted list of string values in a map with
|
||||||
|
// string values.
|
||||||
|
func StrMapValuesUint64(m map[uint64]string) []string {
|
||||||
|
result := []string{}
|
||||||
|
for _, v := range m {
|
||||||
|
result = append(result, v)
|
||||||
|
}
|
||||||
|
sort.Strings(result) // deterministic order
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolMapTrue returns true if everyone in the list is true.
|
||||||
func BoolMapTrue(l []bool) bool {
|
func BoolMapTrue(l []bool) bool {
|
||||||
for _, b := range l {
|
for _, b := range l {
|
||||||
if !b {
|
if !b {
|
||||||
@@ -127,7 +166,7 @@ func BoolMapTrue(l []bool) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Similar to the GNU dirname command
|
// Dirname is similar to the GNU dirname command.
|
||||||
func Dirname(p string) string {
|
func Dirname(p string) string {
|
||||||
if p == "/" {
|
if p == "/" {
|
||||||
return ""
|
return ""
|
||||||
@@ -136,6 +175,7 @@ func Dirname(p string) string {
|
|||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Basename is the base of a path string.
|
||||||
func Basename(p string) string {
|
func Basename(p string) string {
|
||||||
_, b := path.Split(path.Clean(p))
|
_, b := path.Split(path.Clean(p))
|
||||||
if p == "" {
|
if p == "" {
|
||||||
@@ -147,7 +187,8 @@ func Basename(p string) string {
|
|||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
// Split a path into an array of tokens excluding any trailing empty tokens
|
// PathSplit splits a path into an array of tokens excluding any trailing empty
|
||||||
|
// tokens.
|
||||||
func PathSplit(p string) []string {
|
func PathSplit(p string) []string {
|
||||||
if p == "/" { // TODO: can't this all be expressed nicely in one line?
|
if p == "/" { // TODO: can't this all be expressed nicely in one line?
|
||||||
return []string{""}
|
return []string{""}
|
||||||
@@ -155,7 +196,7 @@ func PathSplit(p string) []string {
|
|||||||
return strings.Split(path.Clean(p), "/")
|
return strings.Split(path.Clean(p), "/")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Does path string contain the given path prefix in it?
|
// HasPathPrefix tells us if a path string contain the given path prefix in it.
|
||||||
func HasPathPrefix(p, prefix string) bool {
|
func HasPathPrefix(p, prefix string) bool {
|
||||||
|
|
||||||
patharray := PathSplit(p)
|
patharray := PathSplit(p)
|
||||||
@@ -174,6 +215,8 @@ func HasPathPrefix(p, prefix string) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StrInPathPrefixList returns true if the needle is a PathPrefix in the
|
||||||
|
// haystack.
|
||||||
func StrInPathPrefixList(needle string, haystack []string) bool {
|
func StrInPathPrefixList(needle string, haystack []string) bool {
|
||||||
for _, x := range haystack {
|
for _, x := range haystack {
|
||||||
if HasPathPrefix(x, needle) {
|
if HasPathPrefix(x, needle) {
|
||||||
@@ -183,7 +226,8 @@ func StrInPathPrefixList(needle string, haystack []string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// remove redundant file path prefixes that are under the tree of other files
|
// RemoveCommonFilePrefixes removes redundant file path prefixes that are under
|
||||||
|
// the tree of other files.
|
||||||
func RemoveCommonFilePrefixes(paths []string) []string {
|
func RemoveCommonFilePrefixes(paths []string) []string {
|
||||||
var result = make([]string, len(paths))
|
var result = make([]string, len(paths))
|
||||||
for i := 0; i < len(paths); i++ { // copy, b/c append can modify the args!!
|
for i := 0; i < len(paths); i++ { // copy, b/c append can modify the args!!
|
||||||
@@ -214,7 +258,8 @@ loop:
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delta of path prefix, tells you how many path tokens different the prefix is
|
// PathPrefixDelta returns the delta of the path prefix, which tells you how
|
||||||
|
// many path tokens different the prefix is.
|
||||||
func PathPrefixDelta(p, prefix string) int {
|
func PathPrefixDelta(p, prefix string) int {
|
||||||
|
|
||||||
if !HasPathPrefix(p, prefix) {
|
if !HasPathPrefix(p, prefix) {
|
||||||
@@ -225,11 +270,13 @@ func PathPrefixDelta(p, prefix string) int {
|
|||||||
return len(patharray) - len(prefixarray)
|
return len(patharray) - len(prefixarray)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PathIsDir returns true if there is a trailing slash.
|
||||||
func PathIsDir(p string) bool {
|
func PathIsDir(p string) bool {
|
||||||
return p[len(p)-1:] == "/" // a dir has a trailing slash in this context
|
return p[len(p)-1:] == "/" // a dir has a trailing slash in this context
|
||||||
}
|
}
|
||||||
|
|
||||||
// return the full list of "dependency" paths for a given path in reverse order
|
// PathSplitFullReversed returns the full list of "dependency" paths for a given
|
||||||
|
// path in reverse order.
|
||||||
func PathSplitFullReversed(p string) []string {
|
func PathSplitFullReversed(p string) []string {
|
||||||
var result []string
|
var result []string
|
||||||
split := PathSplit(p)
|
split := PathSplit(p)
|
||||||
@@ -245,8 +292,8 @@ func PathSplitFullReversed(p string) []string {
|
|||||||
return ReverseStringList(result)
|
return ReverseStringList(result)
|
||||||
}
|
}
|
||||||
|
|
||||||
// add trailing slashes to any likely dirs in a package manager fileList
|
// DirifyFileList adds trailing slashes to any likely dirs in a package manager
|
||||||
// if removeDirs is true, instead, don't keep the dirs in our output
|
// fileList if removeDirs is true, otherwise, don't keep the dirs in our output.
|
||||||
func DirifyFileList(fileList []string, removeDirs bool) []string {
|
func DirifyFileList(fileList []string, removeDirs bool) []string {
|
||||||
dirs := []string{}
|
dirs := []string{}
|
||||||
for _, file := range fileList {
|
for _, file := range fileList {
|
||||||
@@ -270,8 +317,31 @@ func DirifyFileList(fileList []string, removeDirs bool) []string {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// special version of time.After that blocks when given a negative integer
|
// FlattenListWithSplit flattens a list of input by splitting each element by
|
||||||
// when used in a case statement, the timer restarts on each select call to it
|
// any and all of the strings listed in the split array
|
||||||
|
func FlattenListWithSplit(input []string, split []string) []string {
|
||||||
|
if len(split) == 0 { // nothing to split by
|
||||||
|
return input
|
||||||
|
}
|
||||||
|
out := []string{}
|
||||||
|
for _, x := range input {
|
||||||
|
s := []string{}
|
||||||
|
if len(split) == 1 {
|
||||||
|
s = strings.Split(x, split[0]) // split by only string
|
||||||
|
} else {
|
||||||
|
s = []string{x} // initial
|
||||||
|
for i := range split {
|
||||||
|
s = FlattenListWithSplit(s, []string{split[i]}) // recurse
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out = append(out, s...)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeAfterOrBlock is aspecial version of time.After that blocks when given a
|
||||||
|
// negative integer. When used in a case statement, the timer restarts on each
|
||||||
|
// select call to it.
|
||||||
func TimeAfterOrBlock(t int) <-chan time.Time {
|
func TimeAfterOrBlock(t int) <-chan time.Time {
|
||||||
if t < 0 {
|
if t < 0 {
|
||||||
return make(chan time.Time) // blocks forever
|
return make(chan time.Time) // blocks forever
|
||||||
@@ -279,8 +349,8 @@ func TimeAfterOrBlock(t int) <-chan time.Time {
|
|||||||
return time.After(time.Duration(t) * time.Second)
|
return time.After(time.Duration(t) * time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
// making using the private bus usable, should be upstream:
|
// SystemBusPrivateUsable makes using the private bus usable
|
||||||
// TODO: https://github.com/godbus/dbus/issues/15
|
// TODO: should be upstream: https://github.com/godbus/dbus/issues/15
|
||||||
func SystemBusPrivateUsable() (conn *dbus.Conn, err error) {
|
func SystemBusPrivateUsable() (conn *dbus.Conn, err error) {
|
||||||
conn, err = dbus.SystemBusPrivate()
|
conn, err = dbus.SystemBusPrivate()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
15
misc/go
Executable file
15
misc/go
Executable file
@@ -0,0 +1,15 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# hack around stupid $GOPATH semantics, with ~/bin/go helper
|
||||||
|
# thanks to Nilium in #go-nuts for 1/3 of the idea
|
||||||
|
[ -z "$GOPATH" ] && echo '$GOPATH is not set!' && exit 1
|
||||||
|
GO="$(which -a go | sed -e '2q;d')" # TODO: pick /usr/bin/go in a better way
|
||||||
|
if [ "$1" = "generate" ]; then
|
||||||
|
exec $GO "$@" # go generate is stupid and gets confused by $GOPATH
|
||||||
|
fi
|
||||||
|
# the idea is to have $project/gopath/src/ be a symlink to ../vendor but you put
|
||||||
|
# all of your vendored things in vendor/ but with this gopath can be per project
|
||||||
|
if [ -d "$PWD/vendor/" ] && [ -d "$PWD/gopath/" ] && [ "`readlink $PWD/gopath/src`" = "../vendor" ] ; then
|
||||||
|
GOPATH="$PWD/gopath/:$GOPATH" $GO "$@"
|
||||||
|
else
|
||||||
|
$GO "$@"
|
||||||
|
fi
|
||||||
@@ -9,6 +9,8 @@ if env | grep -q '^TRAVIS=true$'; then
|
|||||||
travis=1
|
travis=1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
sudo_command=$(which sudo)
|
||||||
|
|
||||||
if [ $travis -eq 0 ]; then
|
if [ $travis -eq 0 ]; then
|
||||||
YUM=`which yum 2>/dev/null`
|
YUM=`which yum 2>/dev/null`
|
||||||
APT=`which apt-get 2>/dev/null`
|
APT=`which apt-get 2>/dev/null`
|
||||||
@@ -18,34 +20,32 @@ if [ $travis -eq 0 ]; then
|
|||||||
fi
|
fi
|
||||||
if [ ! -z "$YUM" ]; then
|
if [ ! -z "$YUM" ]; then
|
||||||
# some go dependencies are stored in mercurial
|
# some go dependencies are stored in mercurial
|
||||||
sudo $YUM install -y golang golang-googlecode-tools-stringer hg
|
$sudo_command $YUM install -y golang golang-googlecode-tools-stringer hg
|
||||||
|
|
||||||
fi
|
fi
|
||||||
if [ ! -z "$APT" ]; then
|
if [ ! -z "$APT" ]; then
|
||||||
sudo $APT update
|
$sudo_command $APT update
|
||||||
sudo $APT install -y golang make gcc packagekit mercurial
|
$sudo_command $APT install -y golang make gcc packagekit mercurial
|
||||||
# one of these two golang tools packages should work on debian
|
# one of these two golang tools packages should work on debian
|
||||||
sudo $APT install -y golang-golang-x-tools || true
|
$sudo_command $APT install -y golang-golang-x-tools || true
|
||||||
sudo $APT install -y golang-go.tools || true
|
$sudo_command $APT install -y golang-go.tools || true
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# build etcd
|
# if golang is too old, we don't want to fail with an obscure error later
|
||||||
git clone --recursive https://github.com/coreos/etcd/ && cd etcd
|
if go version | grep 'go1\.[0123]\.'; then
|
||||||
goversion=$(go version)
|
echo "mgmt requires go1.4 or higher."
|
||||||
# if 'go version' contains string 'devel', then use git master of etcd...
|
exit 1
|
||||||
if [ "${goversion#*devel}" == "$goversion" ]; then
|
|
||||||
git checkout v2.2.4 # TODO: update to newer versions as needed
|
|
||||||
fi
|
fi
|
||||||
[ -x build ] && ./build
|
|
||||||
mkdir -p ~/bin/
|
|
||||||
cp bin/etcd ~/bin/
|
|
||||||
cd - >/dev/null
|
|
||||||
rm -rf etcd # clean up to avoid failing on upstream gofmt errors
|
|
||||||
|
|
||||||
go get ./... # get all the go dependencies
|
go get ./... # get all the go dependencies
|
||||||
[ -e "$GOBIN/mgmt" ] && rm -f "$GOBIN/mgmt" # the `go get` version has no -X
|
[ -e "$GOBIN/mgmt" ] && rm -f "$GOBIN/mgmt" # the `go get` version has no -X
|
||||||
go get golang.org/x/tools/cmd/vet # add in `go vet` for travis
|
# vet is built-in in go 1.6 - we check for go vet command
|
||||||
|
go vet 1> /dev/null 2>&1
|
||||||
|
ret=$?
|
||||||
|
if [[ $ret != 0 ]]; then
|
||||||
|
go get golang.org/x/tools/cmd/vet # add in `go vet` for travis
|
||||||
|
fi
|
||||||
go get golang.org/x/tools/cmd/stringer # for automatic stringer-ing
|
go get golang.org/x/tools/cmd/stringer # for automatic stringer-ing
|
||||||
go get github.com/golang/lint/golint # for `golint`-ing
|
go get github.com/golang/lint/golint # for `golint`-ing
|
||||||
cd "$XPWD" >/dev/null
|
cd "$XPWD" >/dev/null
|
||||||
|
|||||||
13
misc/mgmt.service
Normal file
13
misc/mgmt.service
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Run mgmt configuration management
|
||||||
|
Documentation=https://github.com/purpleidea/mgmt/
|
||||||
|
After=systemd-networkd.service
|
||||||
|
Requires=systemd-networkd.service
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
ExecStart=/usr/bin/mgmt run ${OPTS}
|
||||||
|
RestartSec=5s
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
68
misc_test.go
68
misc_test.go
@@ -742,3 +742,71 @@ func TestMiscT11(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMiscFlattenListWithSplit1(t *testing.T) {
|
||||||
|
{
|
||||||
|
in := []string{} // input
|
||||||
|
ex := []string{} // expected
|
||||||
|
out := FlattenListWithSplit(in, []string{",", ";", " "})
|
||||||
|
sort.Strings(out)
|
||||||
|
sort.Strings(ex)
|
||||||
|
if !reflect.DeepEqual(ex, out) {
|
||||||
|
t.Errorf("FlattenListWithSplit expected: %v; got: %v.", ex, out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
in := []string{"hey"} // input
|
||||||
|
ex := []string{"hey"} // expected
|
||||||
|
out := FlattenListWithSplit(in, []string{",", ";", " "})
|
||||||
|
sort.Strings(out)
|
||||||
|
sort.Strings(ex)
|
||||||
|
if !reflect.DeepEqual(ex, out) {
|
||||||
|
t.Errorf("FlattenListWithSplit expected: %v; got: %v.", ex, out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
in := []string{"a", "b", "c", "d"} // input
|
||||||
|
ex := []string{"a", "b", "c", "d"} // expected
|
||||||
|
out := FlattenListWithSplit(in, []string{",", ";", " "})
|
||||||
|
sort.Strings(out)
|
||||||
|
sort.Strings(ex)
|
||||||
|
if !reflect.DeepEqual(ex, out) {
|
||||||
|
t.Errorf("FlattenListWithSplit expected: %v; got: %v.", ex, out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
in := []string{"a,b,c,d"} // input
|
||||||
|
ex := []string{"a", "b", "c", "d"} // expected
|
||||||
|
out := FlattenListWithSplit(in, []string{",", ";", " "})
|
||||||
|
sort.Strings(out)
|
||||||
|
sort.Strings(ex)
|
||||||
|
if !reflect.DeepEqual(ex, out) {
|
||||||
|
t.Errorf("FlattenListWithSplit expected: %v; got: %v.", ex, out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
in := []string{"a,b;c d"} // input (mixed)
|
||||||
|
ex := []string{"a", "b", "c", "d"} // expected
|
||||||
|
out := FlattenListWithSplit(in, []string{",", ";", " "})
|
||||||
|
sort.Strings(out)
|
||||||
|
sort.Strings(ex)
|
||||||
|
if !reflect.DeepEqual(ex, out) {
|
||||||
|
t.Errorf("FlattenListWithSplit expected: %v; got: %v.", ex, out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
in := []string{"a,b,c,d;e,f,g,h;i,j,k,l;m,n,o,p q,r,s,t;u,v,w,x y z"} // input (mixed)
|
||||||
|
ex := []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"} // expected
|
||||||
|
out := FlattenListWithSplit(in, []string{",", ";", " "})
|
||||||
|
sort.Strings(out)
|
||||||
|
sort.Strings(ex)
|
||||||
|
if !reflect.DeepEqual(ex, out) {
|
||||||
|
t.Errorf("FlattenListWithSplit expected: %v; got: %v.", ex, out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
28
noop.go
28
noop.go
@@ -26,11 +26,13 @@ func init() {
|
|||||||
gob.Register(&NoopRes{})
|
gob.Register(&NoopRes{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NoopRes is a no-op resource that does nothing.
|
||||||
type NoopRes struct {
|
type NoopRes struct {
|
||||||
BaseRes `yaml:",inline"`
|
BaseRes `yaml:",inline"`
|
||||||
Comment string `yaml:"comment"` // extra field for example purposes
|
Comment string `yaml:"comment"` // extra field for example purposes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewNoopRes is a constructor for this resource. It also calls Init() for you.
|
||||||
func NewNoopRes(name string) *NoopRes {
|
func NewNoopRes(name string) *NoopRes {
|
||||||
obj := &NoopRes{
|
obj := &NoopRes{
|
||||||
BaseRes: BaseRes{
|
BaseRes: BaseRes{
|
||||||
@@ -42,6 +44,7 @@ func NewNoopRes(name string) *NoopRes {
|
|||||||
return obj
|
return obj
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Init runs some startup code for this resource.
|
||||||
func (obj *NoopRes) Init() {
|
func (obj *NoopRes) Init() {
|
||||||
obj.BaseRes.kind = "Noop"
|
obj.BaseRes.kind = "Noop"
|
||||||
obj.BaseRes.Init() // call base init, b/c we're overriding
|
obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
@@ -53,29 +56,30 @@ func (obj *NoopRes) Validate() bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
func (obj *NoopRes) Watch(processChan chan Event) {
|
func (obj *NoopRes) Watch(processChan chan Event) {
|
||||||
if obj.IsWatching() {
|
if obj.IsWatching() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
obj.SetWatching(true)
|
obj.SetWatching(true)
|
||||||
defer obj.SetWatching(false)
|
defer obj.SetWatching(false)
|
||||||
|
cuuid := obj.converger.Register()
|
||||||
|
defer cuuid.Unregister()
|
||||||
|
|
||||||
//vertex := obj.vertex // stored with SetVertex
|
|
||||||
var send = false // send event?
|
var send = false // send event?
|
||||||
var exit = false
|
var exit = false
|
||||||
for {
|
for {
|
||||||
obj.SetState(resStateWatching) // reset
|
obj.SetState(resStateWatching) // reset
|
||||||
select {
|
select {
|
||||||
case event := <-obj.events:
|
case event := <-obj.events:
|
||||||
obj.SetConvergedState(resConvergedNil)
|
cuuid.SetConverged(false)
|
||||||
// we avoid sending events on unpause
|
// we avoid sending events on unpause
|
||||||
if exit, send = obj.ReadEvent(&event); exit {
|
if exit, send = obj.ReadEvent(&event); exit {
|
||||||
return // exit
|
return // exit
|
||||||
}
|
}
|
||||||
|
|
||||||
case _ = <-TimeAfterOrBlock(obj.ctimeout):
|
case <-cuuid.ConvergedTimer():
|
||||||
obj.SetConvergedState(resConvergedTimeout)
|
cuuid.SetConverged(true) // converged!
|
||||||
obj.converged <- true
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -92,22 +96,24 @@ func (obj *NoopRes) Watch(processChan chan Event) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CheckApply method for Noop resource. Does nothing, returns happy!
|
// CheckApply method for Noop resource. Does nothing, returns happy!
|
||||||
func (obj *NoopRes) CheckApply(apply bool) (stateok bool, err error) {
|
func (obj *NoopRes) CheckApply(apply bool) (checkok bool, err error) {
|
||||||
log.Printf("%v[%v]: CheckApply(%t)", obj.Kind(), obj.GetName(), apply)
|
log.Printf("%v[%v]: CheckApply(%t)", obj.Kind(), obj.GetName(), apply)
|
||||||
return true, nil // state is always okay
|
return true, nil // state is always okay
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NoopUUID is the UUID struct for NoopRes.
|
||||||
type NoopUUID struct {
|
type NoopUUID struct {
|
||||||
BaseUUID
|
BaseUUID
|
||||||
name string
|
name string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The AutoEdges method returns the AutoEdges. In this case none are used.
|
||||||
func (obj *NoopRes) AutoEdges() AutoEdge {
|
func (obj *NoopRes) AutoEdges() AutoEdge {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// include all params to make a unique identification of this object
|
// GetUUIDs includes all params to make a unique identification of this object.
|
||||||
// most resources only return one, although some resources return multiple
|
// Most resources only return one, although some resources can return multiple.
|
||||||
func (obj *NoopRes) GetUUIDs() []ResUUID {
|
func (obj *NoopRes) GetUUIDs() []ResUUID {
|
||||||
x := &NoopUUID{
|
x := &NoopUUID{
|
||||||
BaseUUID: BaseUUID{name: obj.GetName(), kind: obj.Kind()},
|
BaseUUID: BaseUUID{name: obj.GetName(), kind: obj.Kind()},
|
||||||
@@ -116,6 +122,7 @@ func (obj *NoopRes) GetUUIDs() []ResUUID {
|
|||||||
return []ResUUID{x}
|
return []ResUUID{x}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GroupCmp returns whether two resources can be grouped together or not.
|
||||||
func (obj *NoopRes) GroupCmp(r Res) bool {
|
func (obj *NoopRes) GroupCmp(r Res) bool {
|
||||||
_, ok := r.(*NoopRes)
|
_, ok := r.(*NoopRes)
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -128,11 +135,16 @@ func (obj *NoopRes) GroupCmp(r Res) bool {
|
|||||||
return true // noop resources can always be grouped together!
|
return true // noop resources can always be grouped together!
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Compare two resources and return if they are equivalent.
|
||||||
func (obj *NoopRes) Compare(res Res) bool {
|
func (obj *NoopRes) Compare(res Res) bool {
|
||||||
switch res.(type) {
|
switch res.(type) {
|
||||||
// we can only compare NoopRes to others of the same resource
|
// we can only compare NoopRes to others of the same resource
|
||||||
case *NoopRes:
|
case *NoopRes:
|
||||||
res := res.(*NoopRes)
|
res := res.(*NoopRes)
|
||||||
|
// calling base Compare is unneeded for the noop res
|
||||||
|
//if !obj.BaseRes.Compare(res) { // call base Compare
|
||||||
|
// return false
|
||||||
|
//}
|
||||||
if obj.Name != res.Name {
|
if obj.Name != res.Name {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,11 +28,13 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// global tweaks of verbosity and code path
|
||||||
const (
|
const (
|
||||||
PK_DEBUG = false
|
PK_DEBUG = false
|
||||||
PARANOID = false // enable if you see any ghosts
|
PARANOID = false // enable if you see any ghosts
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// constants which might need to be tweaked or which contain special dbus strings.
|
||||||
const (
|
const (
|
||||||
// FIXME: if PkBufferSize is too low, install seems to drop signals
|
// FIXME: if PkBufferSize is too low, install seems to drop signals
|
||||||
PkBufferSize = 1000
|
PkBufferSize = 1000
|
||||||
@@ -46,11 +48,13 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
// PkArchMap contains the mapping from PackageKit arch to GOARCH.
|
||||||
// GOARCH's: 386, amd64, arm, arm64, mips64, mips64le, ppc64, ppc64le
|
// GOARCH's: 386, amd64, arm, arm64, mips64, mips64le, ppc64, ppc64le
|
||||||
PkArchMap = map[string]string{ // map of PackageKit arch to GOARCH
|
PkArchMap = map[string]string{ // map of PackageKit arch to GOARCH
|
||||||
// TODO: add more values
|
// TODO: add more values
|
||||||
// noarch
|
// noarch
|
||||||
"noarch": "ANY", // special value "ANY"
|
"noarch": "ANY", // special value "ANY" (noarch as seen in Fedora)
|
||||||
|
"all": "ANY", // special value "ANY" ('all' as seen in Debian)
|
||||||
// fedora
|
// fedora
|
||||||
"x86_64": "amd64",
|
"x86_64": "amd64",
|
||||||
"aarch64": "arm64",
|
"aarch64": "arm64",
|
||||||
@@ -97,6 +101,7 @@ const ( //static const PkEnumMatch enum_filter[]
|
|||||||
PK_FILTER_ENUM_NOT_DOWNLOADED // "~downloaded"
|
PK_FILTER_ENUM_NOT_DOWNLOADED // "~downloaded"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// constants from packagekit c library.
|
||||||
const ( //static const PkEnumMatch enum_transaction_flag[]
|
const ( //static const PkEnumMatch enum_transaction_flag[]
|
||||||
PK_TRANSACTION_FLAG_ENUM_NONE uint64 = 1 << iota // "none"
|
PK_TRANSACTION_FLAG_ENUM_NONE uint64 = 1 << iota // "none"
|
||||||
PK_TRANSACTION_FLAG_ENUM_ONLY_TRUSTED // "only-trusted"
|
PK_TRANSACTION_FLAG_ENUM_ONLY_TRUSTED // "only-trusted"
|
||||||
@@ -107,6 +112,7 @@ const ( //static const PkEnumMatch enum_transaction_flag[]
|
|||||||
PK_TRANSACTION_FLAG_ENUM_ALLOW_DOWNGRADE // "allow-downgrade"
|
PK_TRANSACTION_FLAG_ENUM_ALLOW_DOWNGRADE // "allow-downgrade"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// constants from packagekit c library.
|
||||||
const ( //typedef enum
|
const ( //typedef enum
|
||||||
PK_INFO_ENUM_UNKNOWN uint64 = 1 << iota
|
PK_INFO_ENUM_UNKNOWN uint64 = 1 << iota
|
||||||
PK_INFO_ENUM_INSTALLED
|
PK_INFO_ENUM_INSTALLED
|
||||||
@@ -137,12 +143,12 @@ const ( //typedef enum
|
|||||||
PK_INFO_ENUM_LAST
|
PK_INFO_ENUM_LAST
|
||||||
)
|
)
|
||||||
|
|
||||||
// wrapper struct so we can pass bus connection around in the struct
|
// Conn is a wrapper struct so we can pass bus connection around in the struct.
|
||||||
type Conn struct {
|
type Conn struct {
|
||||||
conn *dbus.Conn
|
conn *dbus.Conn
|
||||||
}
|
}
|
||||||
|
|
||||||
// struct that is returned by PackagesToPackageIDs in the map values
|
// PkPackageIDActionData is a struct that is returned by PackagesToPackageIDs in the map values.
|
||||||
type PkPackageIDActionData struct {
|
type PkPackageIDActionData struct {
|
||||||
Found bool
|
Found bool
|
||||||
Installed bool
|
Installed bool
|
||||||
@@ -151,7 +157,7 @@ type PkPackageIDActionData struct {
|
|||||||
Newest bool
|
Newest bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// get a new bus connection
|
// NewBus returns a new bus connection.
|
||||||
func NewBus() *Conn {
|
func NewBus() *Conn {
|
||||||
// if we share the bus with others, we will get each others messages!!
|
// if we share the bus with others, we will get each others messages!!
|
||||||
bus, err := SystemBusPrivateUsable() // don't share the bus connection!
|
bus, err := SystemBusPrivateUsable() // don't share the bus connection!
|
||||||
@@ -163,12 +169,12 @@ func NewBus() *Conn {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// get the dbus connection object
|
// GetBus gets the dbus connection object.
|
||||||
func (bus *Conn) GetBus() *dbus.Conn {
|
func (bus *Conn) GetBus() *dbus.Conn {
|
||||||
return bus.conn
|
return bus.conn
|
||||||
}
|
}
|
||||||
|
|
||||||
// close the dbus connection object
|
// Close closes the dbus connection object.
|
||||||
func (bus *Conn) Close() error {
|
func (bus *Conn) Close() error {
|
||||||
return bus.conn.Close()
|
return bus.conn.Close()
|
||||||
}
|
}
|
||||||
@@ -204,7 +210,7 @@ func (bus *Conn) matchSignal(ch chan *dbus.Signal, path dbus.ObjectPath, iface s
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// get a signal anytime an event happens
|
// WatchChanges gets a signal anytime an event happens.
|
||||||
func (bus *Conn) WatchChanges() (chan *dbus.Signal, error) {
|
func (bus *Conn) WatchChanges() (chan *dbus.Signal, error) {
|
||||||
ch := make(chan *dbus.Signal, PkBufferSize)
|
ch := make(chan *dbus.Signal, PkBufferSize)
|
||||||
// NOTE: the TransactionListChanged signal fires much more frequently,
|
// NOTE: the TransactionListChanged signal fires much more frequently,
|
||||||
@@ -246,7 +252,7 @@ func (bus *Conn) WatchChanges() (chan *dbus.Signal, error) {
|
|||||||
return ch, nil
|
return ch, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// create and return a transaction path
|
// CreateTransaction creates and returns a transaction path.
|
||||||
func (bus *Conn) CreateTransaction() (dbus.ObjectPath, error) {
|
func (bus *Conn) CreateTransaction() (dbus.ObjectPath, error) {
|
||||||
if PK_DEBUG {
|
if PK_DEBUG {
|
||||||
log.Println("PackageKit: CreateTransaction()")
|
log.Println("PackageKit: CreateTransaction()")
|
||||||
@@ -263,6 +269,7 @@ func (bus *Conn) CreateTransaction() (dbus.ObjectPath, error) {
|
|||||||
return interfacePath, nil
|
return interfacePath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ResolvePackages runs the PackageKit Resolve method and returns the result.
|
||||||
func (bus *Conn) ResolvePackages(packages []string, filter uint64) ([]string, error) {
|
func (bus *Conn) ResolvePackages(packages []string, filter uint64) ([]string, error) {
|
||||||
packageIDs := []string{}
|
packageIDs := []string{}
|
||||||
ch := make(chan *dbus.Signal, PkBufferSize) // we need to buffer :(
|
ch := make(chan *dbus.Signal, PkBufferSize) // we need to buffer :(
|
||||||
@@ -326,6 +333,7 @@ loop:
|
|||||||
return packageIDs, nil
|
return packageIDs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsInstalledList queries a list of packages to see if they are installed.
|
||||||
func (bus *Conn) IsInstalledList(packages []string) ([]bool, error) {
|
func (bus *Conn) IsInstalledList(packages []string) ([]bool, error) {
|
||||||
var filter uint64 // initializes at the "zero" value of 0
|
var filter uint64 // initializes at the "zero" value of 0
|
||||||
filter += PK_FILTER_ENUM_ARCH // always search in our arch
|
filter += PK_FILTER_ENUM_ARCH // always search in our arch
|
||||||
@@ -362,7 +370,7 @@ func (bus *Conn) IsInstalledList(packages []string) ([]bool, error) {
|
|||||||
return r, nil
|
return r, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// is package installed ?
|
// IsInstalled returns if a package is installed.
|
||||||
// TODO: this could be optimized by making the resolve call directly
|
// TODO: this could be optimized by making the resolve call directly
|
||||||
func (bus *Conn) IsInstalled(pkg string) (bool, error) {
|
func (bus *Conn) IsInstalled(pkg string) (bool, error) {
|
||||||
p, e := bus.IsInstalledList([]string{pkg})
|
p, e := bus.IsInstalledList([]string{pkg})
|
||||||
@@ -372,7 +380,7 @@ func (bus *Conn) IsInstalled(pkg string) (bool, error) {
|
|||||||
return p[0], nil
|
return p[0], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// install list of packages by packageID
|
// InstallPackages installs a list of packages by packageID.
|
||||||
func (bus *Conn) InstallPackages(packageIDs []string, transactionFlags uint64) error {
|
func (bus *Conn) InstallPackages(packageIDs []string, transactionFlags uint64) error {
|
||||||
|
|
||||||
ch := make(chan *dbus.Signal, PkBufferSize) // we need to buffer :(
|
ch := make(chan *dbus.Signal, PkBufferSize) // we need to buffer :(
|
||||||
@@ -414,7 +422,7 @@ loop:
|
|||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("PackageKit: Error: %v", signal.Body)
|
return fmt.Errorf("PackageKit: Error: %v", signal.Body)
|
||||||
}
|
}
|
||||||
case _ = <-TimeAfterOrBlock(timeout):
|
case <-TimeAfterOrBlock(timeout):
|
||||||
if finished {
|
if finished {
|
||||||
log.Println("PackageKit: Timeout: InstallPackages: Waiting for 'Destroy'")
|
log.Println("PackageKit: Timeout: InstallPackages: Waiting for 'Destroy'")
|
||||||
return nil // got tired of waiting for Destroy
|
return nil // got tired of waiting for Destroy
|
||||||
@@ -424,7 +432,7 @@ loop:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// remove list of packages
|
// RemovePackages removes a list of packages by packageID.
|
||||||
func (bus *Conn) RemovePackages(packageIDs []string, transactionFlags uint64) error {
|
func (bus *Conn) RemovePackages(packageIDs []string, transactionFlags uint64) error {
|
||||||
|
|
||||||
var allowDeps = true // TODO: configurable
|
var allowDeps = true // TODO: configurable
|
||||||
@@ -472,7 +480,7 @@ loop:
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// update list of packages to versions that are specified
|
// UpdatePackages updates a list of packages to versions that are specified.
|
||||||
func (bus *Conn) UpdatePackages(packageIDs []string, transactionFlags uint64) error {
|
func (bus *Conn) UpdatePackages(packageIDs []string, transactionFlags uint64) error {
|
||||||
ch := make(chan *dbus.Signal, PkBufferSize) // we need to buffer :(
|
ch := make(chan *dbus.Signal, PkBufferSize) // we need to buffer :(
|
||||||
interfacePath, err := bus.CreateTransaction()
|
interfacePath, err := bus.CreateTransaction()
|
||||||
@@ -515,7 +523,7 @@ loop:
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// get the list of files that are contained inside a list of packageids
|
// GetFilesByPackageID gets the list of files that are contained inside a list of packageIDs.
|
||||||
func (bus *Conn) GetFilesByPackageID(packageIDs []string) (files map[string][]string, err error) {
|
func (bus *Conn) GetFilesByPackageID(packageIDs []string) (files map[string][]string, err error) {
|
||||||
// NOTE: the maximum number of files in an RPM is 52116 in Fedora 23
|
// NOTE: the maximum number of files in an RPM is 52116 in Fedora 23
|
||||||
// https://gist.github.com/purpleidea/b98e60dcd449e1ac3b8a
|
// https://gist.github.com/purpleidea/b98e60dcd449e1ac3b8a
|
||||||
@@ -580,7 +588,7 @@ loop:
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// get list of packages that are installed and which can be updated, mod filter
|
// GetUpdates gets a list of packages that are installed and which can be updated, mod filter.
|
||||||
func (bus *Conn) GetUpdates(filter uint64) ([]string, error) {
|
func (bus *Conn) GetUpdates(filter uint64) ([]string, error) {
|
||||||
if PK_DEBUG {
|
if PK_DEBUG {
|
||||||
log.Println("PackageKit: GetUpdates()")
|
log.Println("PackageKit: GetUpdates()")
|
||||||
@@ -641,9 +649,10 @@ loop:
|
|||||||
return packageIDs, nil
|
return packageIDs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// this is a helper function that *might* be generally useful outside mgmtconfig
|
// PackagesToPackageIDs is a helper function that *might* be generally useful
|
||||||
// packageMap input has the package names as keys and requested states as values
|
// outside mgmt. The packageMap input has the package names as keys and
|
||||||
// these states can be installed, uninstalled, newest or a requested version str
|
// requested states as values. These states can be: installed, uninstalled,
|
||||||
|
// newest or a requested version str.
|
||||||
func (bus *Conn) PackagesToPackageIDs(packageMap map[string]string, filter uint64) (map[string]*PkPackageIDActionData, error) {
|
func (bus *Conn) PackagesToPackageIDs(packageMap map[string]string, filter uint64) (map[string]*PkPackageIDActionData, error) {
|
||||||
count := 0
|
count := 0
|
||||||
packages := make([]string, len(packageMap))
|
packages := make([]string, len(packageMap))
|
||||||
@@ -814,7 +823,7 @@ func (bus *Conn) PackagesToPackageIDs(packageMap map[string]string, filter uint6
|
|||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// returns a list of packageIDs which match the set of package names in packages
|
// FilterPackageIDs returns a list of packageIDs which match the set of package names in packages.
|
||||||
func FilterPackageIDs(m map[string]*PkPackageIDActionData, packages []string) ([]string, error) {
|
func FilterPackageIDs(m map[string]*PkPackageIDActionData, packages []string) ([]string, error) {
|
||||||
result := []string{}
|
result := []string{}
|
||||||
for _, k := range packages {
|
for _, k := range packages {
|
||||||
@@ -828,6 +837,7 @@ func FilterPackageIDs(m map[string]*PkPackageIDActionData, packages []string) ([
|
|||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FilterState returns a map of whether each package queried matches the particular state.
|
||||||
func FilterState(m map[string]*PkPackageIDActionData, packages []string, state string) (result map[string]bool, err error) {
|
func FilterState(m map[string]*PkPackageIDActionData, packages []string, state string) (result map[string]bool, err error) {
|
||||||
result = make(map[string]bool)
|
result = make(map[string]bool)
|
||||||
pkgs := []string{} // bad pkgs that don't have a bool state
|
pkgs := []string{} // bad pkgs that don't have a bool state
|
||||||
@@ -857,7 +867,7 @@ func FilterState(m map[string]*PkPackageIDActionData, packages []string, state s
|
|||||||
return result, err
|
return result, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// return all packages that are in package and match the specific state
|
// FilterPackageState returns all packages that are in package and match the specific state.
|
||||||
func FilterPackageState(m map[string]*PkPackageIDActionData, packages []string, state string) (result []string, err error) {
|
func FilterPackageState(m map[string]*PkPackageIDActionData, packages []string, state string) (result []string, err error) {
|
||||||
result = []string{}
|
result = []string{}
|
||||||
for _, k := range packages {
|
for _, k := range packages {
|
||||||
@@ -883,7 +893,7 @@ func FilterPackageState(m map[string]*PkPackageIDActionData, packages []string,
|
|||||||
return result, err
|
return result, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// does flag exist inside data portion of packageID field?
|
// FlagInData asks whether a flag exists inside the data portion of a packageID field?
|
||||||
func FlagInData(flag, data string) bool {
|
func FlagInData(flag, data string) bool {
|
||||||
flags := strings.Split(data, ":")
|
flags := strings.Split(data, ":")
|
||||||
for _, f := range flags {
|
for _, f := range flags {
|
||||||
@@ -894,11 +904,12 @@ func FlagInData(flag, data string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// builds the transaction method string
|
// FmtTransactionMethod builds the transaction method string properly.
|
||||||
func FmtTransactionMethod(method string) string {
|
func FmtTransactionMethod(method string) string {
|
||||||
return fmt.Sprintf("%s.%s", PkIfaceTransaction, method)
|
return fmt.Sprintf("%s.%s", PkIfaceTransaction, method)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsMyArch determines if a PackageKit architecture matches the current os arch.
|
||||||
func IsMyArch(arch string) bool {
|
func IsMyArch(arch string) bool {
|
||||||
goarch, ok := PkArchMap[arch]
|
goarch, ok := PkArchMap[arch]
|
||||||
if !ok {
|
if !ok {
|
||||||
|
|||||||
131
pgraph.go
131
pgraph.go
@@ -43,6 +43,7 @@ const (
|
|||||||
graphStatePaused
|
graphStatePaused
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Graph is the graph structure in this library.
|
||||||
// The graph abstract data type (ADT) is defined as follows:
|
// The graph abstract data type (ADT) is defined as follows:
|
||||||
// * the directed graph arrows point from left to right ( -> )
|
// * the directed graph arrows point from left to right ( -> )
|
||||||
// * the arrows point away from their dependencies (eg: arrows mean "before")
|
// * the arrows point away from their dependencies (eg: arrows mean "before")
|
||||||
@@ -55,15 +56,18 @@ type Graph struct {
|
|||||||
mutex sync.Mutex // used when modifying graph State variable
|
mutex sync.Mutex // used when modifying graph State variable
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Vertex is the primary vertex struct in this library.
|
||||||
type Vertex struct {
|
type Vertex struct {
|
||||||
Res // anonymous field
|
Res // anonymous field
|
||||||
timestamp int64 // last updated timestamp ?
|
timestamp int64 // last updated timestamp ?
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Edge is the primary edge struct in this library.
|
||||||
type Edge struct {
|
type Edge struct {
|
||||||
Name string
|
Name string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewGraph builds a new graph.
|
||||||
func NewGraph(name string) *Graph {
|
func NewGraph(name string) *Graph {
|
||||||
return &Graph{
|
return &Graph{
|
||||||
Name: name,
|
Name: name,
|
||||||
@@ -72,12 +76,14 @@ func NewGraph(name string) *Graph {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewVertex returns a new graph vertex struct with a contained resource.
|
||||||
func NewVertex(r Res) *Vertex {
|
func NewVertex(r Res) *Vertex {
|
||||||
return &Vertex{
|
return &Vertex{
|
||||||
Res: r,
|
Res: r,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewEdge returns a new graph edge struct.
|
||||||
func NewEdge(name string) *Edge {
|
func NewEdge(name string) *Edge {
|
||||||
return &Edge{
|
return &Edge{
|
||||||
Name: name,
|
Name: name,
|
||||||
@@ -97,38 +103,34 @@ func (g *Graph) Copy() *Graph {
|
|||||||
return newGraph
|
return newGraph
|
||||||
}
|
}
|
||||||
|
|
||||||
// returns the name of the graph
|
// GetName returns the name of the graph.
|
||||||
func (g *Graph) GetName() string {
|
func (g *Graph) GetName() string {
|
||||||
return g.Name
|
return g.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
// set name of the graph
|
// SetName sets the name of the graph.
|
||||||
func (g *Graph) SetName(name string) {
|
func (g *Graph) SetName(name string) {
|
||||||
g.Name = name
|
g.Name = name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *Graph) GetState() graphState {
|
// getState returns the state of the graph. This state is used for optimizing
|
||||||
|
// certain algorithms by knowing what part of processing the graph is currently
|
||||||
|
// undergoing.
|
||||||
|
func (g *Graph) getState() graphState {
|
||||||
//g.mutex.Lock()
|
//g.mutex.Lock()
|
||||||
//defer g.mutex.Unlock()
|
//defer g.mutex.Unlock()
|
||||||
return g.state
|
return g.state
|
||||||
}
|
}
|
||||||
|
|
||||||
// set graph state and return previous state
|
// setState sets the graph state and returns the previous state.
|
||||||
func (g *Graph) SetState(state graphState) graphState {
|
func (g *Graph) setState(state graphState) graphState {
|
||||||
g.mutex.Lock()
|
g.mutex.Lock()
|
||||||
defer g.mutex.Unlock()
|
defer g.mutex.Unlock()
|
||||||
prev := g.GetState()
|
prev := g.getState()
|
||||||
g.state = state
|
g.state = state
|
||||||
return prev
|
return prev
|
||||||
}
|
}
|
||||||
|
|
||||||
// store a pointer in the resource to it's parent vertex
|
|
||||||
func (g *Graph) SetVertex() {
|
|
||||||
for v := range g.GetVerticesChan() {
|
|
||||||
v.Res.SetVertex(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddVertex uses variadic input to add all listed vertices to the graph
|
// AddVertex uses variadic input to add all listed vertices to the graph
|
||||||
func (g *Graph) AddVertex(xv ...*Vertex) {
|
func (g *Graph) AddVertex(xv ...*Vertex) {
|
||||||
for _, v := range xv {
|
for _, v := range xv {
|
||||||
@@ -138,6 +140,7 @@ func (g *Graph) AddVertex(xv ...*Vertex) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeleteVertex deletes a particular vertex from the graph.
|
||||||
func (g *Graph) DeleteVertex(v *Vertex) {
|
func (g *Graph) DeleteVertex(v *Vertex) {
|
||||||
delete(g.Adjacency, v)
|
delete(g.Adjacency, v)
|
||||||
for k := range g.Adjacency {
|
for k := range g.Adjacency {
|
||||||
@@ -145,7 +148,7 @@ func (g *Graph) DeleteVertex(v *Vertex) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// adds a directed edge to the graph from v1 to v2
|
// AddEdge adds a directed edge to the graph from v1 to v2.
|
||||||
func (g *Graph) AddEdge(v1, v2 *Vertex, e *Edge) {
|
func (g *Graph) AddEdge(v1, v2 *Vertex, e *Edge) {
|
||||||
// NOTE: this doesn't allow more than one edge between two vertexes...
|
// NOTE: this doesn't allow more than one edge between two vertexes...
|
||||||
g.AddVertex(v1, v2) // supports adding N vertices now
|
g.AddVertex(v1, v2) // supports adding N vertices now
|
||||||
@@ -154,6 +157,8 @@ func (g *Graph) AddEdge(v1, v2 *Vertex, e *Edge) {
|
|||||||
g.Adjacency[v1][v2] = e
|
g.Adjacency[v1][v2] = e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetVertexMatch searches for an equivalent resource in the graph and returns
|
||||||
|
// the vertex it is found in, or nil if not found.
|
||||||
func (g *Graph) GetVertexMatch(obj Res) *Vertex {
|
func (g *Graph) GetVertexMatch(obj Res) *Vertex {
|
||||||
for k := range g.Adjacency {
|
for k := range g.Adjacency {
|
||||||
if k.Res.Compare(obj) {
|
if k.Res.Compare(obj) {
|
||||||
@@ -163,6 +168,7 @@ func (g *Graph) GetVertexMatch(obj Res) *Vertex {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasVertex returns if the input vertex exists in the graph.
|
||||||
func (g *Graph) HasVertex(v *Vertex) bool {
|
func (g *Graph) HasVertex(v *Vertex) bool {
|
||||||
if _, exists := g.Adjacency[v]; exists {
|
if _, exists := g.Adjacency[v]; exists {
|
||||||
return true
|
return true
|
||||||
@@ -170,12 +176,12 @@ func (g *Graph) HasVertex(v *Vertex) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// number of vertices in the graph
|
// NumVertices returns the number of vertices in the graph.
|
||||||
func (g *Graph) NumVertices() int {
|
func (g *Graph) NumVertices() int {
|
||||||
return len(g.Adjacency)
|
return len(g.Adjacency)
|
||||||
}
|
}
|
||||||
|
|
||||||
// number of edges in the graph
|
// NumEdges returns the number of edges in the graph.
|
||||||
func (g *Graph) NumEdges() int {
|
func (g *Graph) NumEdges() int {
|
||||||
count := 0
|
count := 0
|
||||||
for k := range g.Adjacency {
|
for k := range g.Adjacency {
|
||||||
@@ -194,7 +200,7 @@ func (g *Graph) GetVertices() []*Vertex {
|
|||||||
return vertices
|
return vertices
|
||||||
}
|
}
|
||||||
|
|
||||||
// returns a channel of all vertices in the graph
|
// GetVerticesChan returns a channel of all vertices in the graph.
|
||||||
func (g *Graph) GetVerticesChan() chan *Vertex {
|
func (g *Graph) GetVerticesChan() chan *Vertex {
|
||||||
ch := make(chan *Vertex)
|
ch := make(chan *Vertex)
|
||||||
go func(ch chan *Vertex) {
|
go func(ch chan *Vertex) {
|
||||||
@@ -206,6 +212,7 @@ func (g *Graph) GetVerticesChan() chan *Vertex {
|
|||||||
return ch
|
return ch
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VertexSlice is a linear list of vertices. It can be sorted.
|
||||||
type VertexSlice []*Vertex
|
type VertexSlice []*Vertex
|
||||||
|
|
||||||
func (vs VertexSlice) Len() int { return len(vs) }
|
func (vs VertexSlice) Len() int { return len(vs) }
|
||||||
@@ -223,7 +230,7 @@ func (g *Graph) GetVerticesSorted() []*Vertex {
|
|||||||
return vertices
|
return vertices
|
||||||
}
|
}
|
||||||
|
|
||||||
// make the graph pretty print
|
// String makes the graph pretty print.
|
||||||
func (g *Graph) String() string {
|
func (g *Graph) String() string {
|
||||||
return fmt.Sprintf("Vertices(%d), Edges(%d)", g.NumVertices(), g.NumEdges())
|
return fmt.Sprintf("Vertices(%d), Edges(%d)", g.NumVertices(), g.NumEdges())
|
||||||
}
|
}
|
||||||
@@ -233,7 +240,7 @@ func (v *Vertex) String() string {
|
|||||||
return fmt.Sprintf("%s[%s]", v.Res.Kind(), v.Res.GetName())
|
return fmt.Sprintf("%s[%s]", v.Res.Kind(), v.Res.GetName())
|
||||||
}
|
}
|
||||||
|
|
||||||
// output the graph in graphviz format
|
// Graphviz outputs the graph in graphviz format.
|
||||||
// https://en.wikipedia.org/wiki/DOT_%28graph_description_language%29
|
// https://en.wikipedia.org/wiki/DOT_%28graph_description_language%29
|
||||||
func (g *Graph) Graphviz() (out string) {
|
func (g *Graph) Graphviz() (out string) {
|
||||||
//digraph g {
|
//digraph g {
|
||||||
@@ -265,7 +272,8 @@ func (g *Graph) Graphviz() (out string) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// write out the graphviz data and run the correct graphviz filter command
|
// ExecGraphviz writes out the graphviz data and runs the correct graphviz
|
||||||
|
// filter command.
|
||||||
func (g *Graph) ExecGraphviz(program, filename string) error {
|
func (g *Graph) ExecGraphviz(program, filename string) error {
|
||||||
|
|
||||||
switch program {
|
switch program {
|
||||||
@@ -315,8 +323,8 @@ func (g *Graph) ExecGraphviz(program, filename string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// return an array (slice) of all directed vertices to vertex v (??? -> v)
|
// IncomingGraphEdges returns an array (slice) of all directed vertices to
|
||||||
// OKTimestamp should use this
|
// vertex v (??? -> v). OKTimestamp should probably use this.
|
||||||
func (g *Graph) IncomingGraphEdges(v *Vertex) []*Vertex {
|
func (g *Graph) IncomingGraphEdges(v *Vertex) []*Vertex {
|
||||||
// TODO: we might be able to implement this differently by reversing
|
// TODO: we might be able to implement this differently by reversing
|
||||||
// the Adjacency graph and then looping through it again...
|
// the Adjacency graph and then looping through it again...
|
||||||
@@ -331,8 +339,8 @@ func (g *Graph) IncomingGraphEdges(v *Vertex) []*Vertex {
|
|||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// return an array (slice) of all vertices that vertex v points to (v -> ???)
|
// OutgoingGraphEdges returns an array (slice) of all vertices that vertex v
|
||||||
// poke should use this
|
// points to (v -> ???). Poke should probably use this.
|
||||||
func (g *Graph) OutgoingGraphEdges(v *Vertex) []*Vertex {
|
func (g *Graph) OutgoingGraphEdges(v *Vertex) []*Vertex {
|
||||||
var s []*Vertex
|
var s []*Vertex
|
||||||
for k := range g.Adjacency[v] { // forward paths
|
for k := range g.Adjacency[v] { // forward paths
|
||||||
@@ -341,7 +349,8 @@ func (g *Graph) OutgoingGraphEdges(v *Vertex) []*Vertex {
|
|||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// return an array (slice) of all vertices that connect to vertex v
|
// GraphEdges returns an array (slice) of all vertices that connect to vertex v.
|
||||||
|
// This is the union of IncomingGraphEdges and OutgoingGraphEdges.
|
||||||
func (g *Graph) GraphEdges(v *Vertex) []*Vertex {
|
func (g *Graph) GraphEdges(v *Vertex) []*Vertex {
|
||||||
var s []*Vertex
|
var s []*Vertex
|
||||||
s = append(s, g.IncomingGraphEdges(v)...)
|
s = append(s, g.IncomingGraphEdges(v)...)
|
||||||
@@ -349,6 +358,7 @@ func (g *Graph) GraphEdges(v *Vertex) []*Vertex {
|
|||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DFS returns a depth first search for the graph, starting at the input vertex.
|
||||||
func (g *Graph) DFS(start *Vertex) []*Vertex {
|
func (g *Graph) DFS(start *Vertex) []*Vertex {
|
||||||
var d []*Vertex // discovered
|
var d []*Vertex // discovered
|
||||||
var s []*Vertex // stack
|
var s []*Vertex // stack
|
||||||
@@ -371,7 +381,7 @@ func (g *Graph) DFS(start *Vertex) []*Vertex {
|
|||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
// build a new graph containing only vertices from the list...
|
// FilterGraph builds a new graph containing only vertices from the list.
|
||||||
func (g *Graph) FilterGraph(name string, vertices []*Vertex) *Graph {
|
func (g *Graph) FilterGraph(name string, vertices []*Vertex) *Graph {
|
||||||
newgraph := NewGraph(name)
|
newgraph := NewGraph(name)
|
||||||
for k1, x := range g.Adjacency {
|
for k1, x := range g.Adjacency {
|
||||||
@@ -385,8 +395,8 @@ func (g *Graph) FilterGraph(name string, vertices []*Vertex) *Graph {
|
|||||||
return newgraph
|
return newgraph
|
||||||
}
|
}
|
||||||
|
|
||||||
// return a channel containing the N disconnected graphs in our main graph
|
// GetDisconnectedGraphs returns a channel containing the N disconnected graphs
|
||||||
// we can then process each of these in parallel
|
// in our main graph. We can then process each of these in parallel.
|
||||||
func (g *Graph) GetDisconnectedGraphs() chan *Graph {
|
func (g *Graph) GetDisconnectedGraphs() chan *Graph {
|
||||||
ch := make(chan *Graph)
|
ch := make(chan *Graph)
|
||||||
go func() {
|
go func() {
|
||||||
@@ -421,8 +431,7 @@ func (g *Graph) GetDisconnectedGraphs() chan *Graph {
|
|||||||
return ch
|
return ch
|
||||||
}
|
}
|
||||||
|
|
||||||
// return the indegree for the graph, IOW the count of vertices that point to me
|
// InDegree returns the count of vertices that point to me in one big lookup map.
|
||||||
// NOTE: this returns the values for all vertices in one big lookup table
|
|
||||||
func (g *Graph) InDegree() map[*Vertex]int {
|
func (g *Graph) InDegree() map[*Vertex]int {
|
||||||
result := make(map[*Vertex]int)
|
result := make(map[*Vertex]int)
|
||||||
for k := range g.Adjacency {
|
for k := range g.Adjacency {
|
||||||
@@ -437,21 +446,20 @@ func (g *Graph) InDegree() map[*Vertex]int {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// return the outdegree for the graph, IOW the count of vertices that point away
|
// OutDegree returns the count of vertices that point away in one big lookup map.
|
||||||
// NOTE: this returns the values for all vertices in one big lookup table
|
|
||||||
func (g *Graph) OutDegree() map[*Vertex]int {
|
func (g *Graph) OutDegree() map[*Vertex]int {
|
||||||
result := make(map[*Vertex]int)
|
result := make(map[*Vertex]int)
|
||||||
|
|
||||||
for k := range g.Adjacency {
|
for k := range g.Adjacency {
|
||||||
result[k] = 0 // initialize
|
result[k] = 0 // initialize
|
||||||
for _ = range g.Adjacency[k] {
|
for range g.Adjacency[k] {
|
||||||
result[k]++
|
result[k]++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// returns a topological sort for the graph
|
// TopologicalSort returns the sort of graph vertices in that order.
|
||||||
// based on descriptions and code from wikipedia and rosetta code
|
// based on descriptions and code from wikipedia and rosetta code
|
||||||
// TODO: add memoization, and cache invalidation to speed this up :)
|
// TODO: add memoization, and cache invalidation to speed this up :)
|
||||||
func (g *Graph) TopologicalSort() (result []*Vertex, ok bool) { // kahn's algorithm
|
func (g *Graph) TopologicalSort() (result []*Vertex, ok bool) { // kahn's algorithm
|
||||||
@@ -633,15 +641,6 @@ func (g *Graph) VertexMerge(v1, v2 *Vertex, vertexMergeFn func(*Vertex, *Vertex)
|
|||||||
return nil // success
|
return nil // success
|
||||||
}
|
}
|
||||||
|
|
||||||
func HeisenbergCount(ch chan *Vertex) int {
|
|
||||||
c := 0
|
|
||||||
for x := range ch {
|
|
||||||
_ = x
|
|
||||||
c++
|
|
||||||
}
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTimestamp returns the timestamp of a vertex
|
// GetTimestamp returns the timestamp of a vertex
|
||||||
func (v *Vertex) GetTimestamp() int64 {
|
func (v *Vertex) GetTimestamp() int64 {
|
||||||
return v.timestamp
|
return v.timestamp
|
||||||
@@ -653,7 +652,7 @@ func (v *Vertex) UpdateTimestamp() int64 {
|
|||||||
return v.timestamp
|
return v.timestamp
|
||||||
}
|
}
|
||||||
|
|
||||||
// can this element run right now?
|
// OKTimestamp returns true if this element can run right now?
|
||||||
func (g *Graph) OKTimestamp(v *Vertex) bool {
|
func (g *Graph) OKTimestamp(v *Vertex) bool {
|
||||||
// these are all the vertices pointing TO v, eg: ??? -> v
|
// these are all the vertices pointing TO v, eg: ??? -> v
|
||||||
for _, n := range g.IncomingGraphEdges(v) {
|
for _, n := range g.IncomingGraphEdges(v) {
|
||||||
@@ -672,14 +671,14 @@ func (g *Graph) OKTimestamp(v *Vertex) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// notify nodes after me in the dependency graph that they need refreshing...
|
// Poke notifies nodes after me in the dependency graph that they need refreshing...
|
||||||
// NOTE: this assumes that this can never fail or need to be rescheduled
|
// NOTE: this assumes that this can never fail or need to be rescheduled
|
||||||
func (g *Graph) Poke(v *Vertex, activity bool) {
|
func (g *Graph) Poke(v *Vertex, activity bool) {
|
||||||
// these are all the vertices pointing AWAY FROM v, eg: v -> ???
|
// these are all the vertices pointing AWAY FROM v, eg: v -> ???
|
||||||
for _, n := range g.OutgoingGraphEdges(v) {
|
for _, n := range g.OutgoingGraphEdges(v) {
|
||||||
// XXX: if we're in state event and haven't been cancelled by
|
// XXX: if we're in state event and haven't been cancelled by
|
||||||
// apply, then we can cancel a poke to a child, right? XXX
|
// apply, then we can cancel a poke to a child, right? XXX
|
||||||
// XXX: if n.Res.GetState() != resStateEvent { // is this correct?
|
// XXX: if n.Res.getState() != resStateEvent { // is this correct?
|
||||||
if true { // XXX
|
if true { // XXX
|
||||||
if DEBUG {
|
if DEBUG {
|
||||||
log.Printf("%v[%v]: Poke: %v[%v]", v.Kind(), v.GetName(), n.Kind(), n.GetName())
|
log.Printf("%v[%v]: Poke: %v[%v]", v.Kind(), v.GetName(), n.Kind(), n.GetName())
|
||||||
@@ -693,7 +692,7 @@ func (g *Graph) Poke(v *Vertex, activity bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// poke the pre-requisites that are stale and need to run before I can run...
|
// BackPoke pokes the pre-requisites that are stale and need to run before I can run.
|
||||||
func (g *Graph) BackPoke(v *Vertex) {
|
func (g *Graph) BackPoke(v *Vertex) {
|
||||||
// these are all the vertices pointing TO v, eg: ??? -> v
|
// these are all the vertices pointing TO v, eg: ??? -> v
|
||||||
for _, n := range g.IncomingGraphEdges(v) {
|
for _, n := range g.IncomingGraphEdges(v) {
|
||||||
@@ -717,6 +716,7 @@ func (g *Graph) BackPoke(v *Vertex) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Process is the primary function to execute for a particular vertex in the graph.
|
||||||
// XXX: rename this function
|
// XXX: rename this function
|
||||||
func (g *Graph) Process(v *Vertex) {
|
func (g *Graph) Process(v *Vertex) {
|
||||||
obj := v.Res
|
obj := v.Res
|
||||||
@@ -736,15 +736,15 @@ func (g *Graph) Process(v *Vertex) {
|
|||||||
|
|
||||||
obj.SetState(resStateCheckApply)
|
obj.SetState(resStateCheckApply)
|
||||||
// if this fails, don't UpdateTimestamp()
|
// if this fails, don't UpdateTimestamp()
|
||||||
stateok, err := obj.CheckApply(true)
|
checkok, err := obj.CheckApply(!obj.Meta().Noop)
|
||||||
if stateok && err != nil { // should never return this way
|
if checkok && err != nil { // should never return this way
|
||||||
log.Fatalf("%v[%v]: CheckApply(): %t, %+v", obj.Kind(), obj.GetName(), stateok, err)
|
log.Fatalf("%v[%v]: CheckApply(): %t, %+v", obj.Kind(), obj.GetName(), checkok, err)
|
||||||
}
|
}
|
||||||
if DEBUG {
|
if DEBUG {
|
||||||
log.Printf("%v[%v]: CheckApply(): %t, %v", obj.Kind(), obj.GetName(), stateok, err)
|
log.Printf("%v[%v]: CheckApply(): %t, %v", obj.Kind(), obj.GetName(), checkok, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !stateok { // if state *was* not ok, we had to have apply'ed
|
if !checkok { // if state *was* not ok, we had to have apply'ed
|
||||||
if err != nil { // error during check or apply
|
if err != nil { // error during check or apply
|
||||||
ok = false
|
ok = false
|
||||||
} else {
|
} else {
|
||||||
@@ -752,6 +752,11 @@ func (g *Graph) Process(v *Vertex) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// when noop is true we always want to update timestamp
|
||||||
|
if obj.Meta().Noop && err == nil {
|
||||||
|
ok = true
|
||||||
|
}
|
||||||
|
|
||||||
if ok {
|
if ok {
|
||||||
// update this timestamp *before* we poke or the poked
|
// update this timestamp *before* we poke or the poked
|
||||||
// nodes might fail due to having a too old timestamp!
|
// nodes might fail due to having a too old timestamp!
|
||||||
@@ -766,10 +771,11 @@ func (g *Graph) Process(v *Vertex) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// main kick to start the graph
|
// Start is a main kick to start the graph. It goes through in reverse topological
|
||||||
|
// sort order so that events can't hit un-started vertices.
|
||||||
func (g *Graph) Start(wg *sync.WaitGroup, first bool) { // start or continue
|
func (g *Graph) Start(wg *sync.WaitGroup, first bool) { // start or continue
|
||||||
log.Printf("State: %v -> %v", g.SetState(graphStateStarting), g.GetState())
|
log.Printf("State: %v -> %v", g.setState(graphStateStarting), g.getState())
|
||||||
defer log.Printf("State: %v -> %v", g.SetState(graphStateStarted), g.GetState())
|
defer log.Printf("State: %v -> %v", g.setState(graphStateStarted), g.getState())
|
||||||
t, _ := g.TopologicalSort()
|
t, _ := g.TopologicalSort()
|
||||||
// TODO: only calculate indegree if `first` is true to save resources
|
// TODO: only calculate indegree if `first` is true to save resources
|
||||||
indegree := g.InDegree() // compute all of the indegree's
|
indegree := g.InDegree() // compute all of the indegree's
|
||||||
@@ -831,15 +837,17 @@ func (g *Graph) Start(wg *sync.WaitGroup, first bool) { // start or continue
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Pause sends pause events to the graph in a topological sort order.
|
||||||
func (g *Graph) Pause() {
|
func (g *Graph) Pause() {
|
||||||
log.Printf("State: %v -> %v", g.SetState(graphStatePausing), g.GetState())
|
log.Printf("State: %v -> %v", g.setState(graphStatePausing), g.getState())
|
||||||
defer log.Printf("State: %v -> %v", g.SetState(graphStatePaused), g.GetState())
|
defer log.Printf("State: %v -> %v", g.setState(graphStatePaused), g.getState())
|
||||||
t, _ := g.TopologicalSort()
|
t, _ := g.TopologicalSort()
|
||||||
for _, v := range t { // squeeze out the events...
|
for _, v := range t { // squeeze out the events...
|
||||||
v.SendEvent(eventPause, true, false)
|
v.SendEvent(eventPause, true, false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Exit sends exit events to the graph in a topological sort order.
|
||||||
func (g *Graph) Exit() {
|
func (g *Graph) Exit() {
|
||||||
if g == nil {
|
if g == nil {
|
||||||
return
|
return
|
||||||
@@ -855,13 +863,14 @@ func (g *Graph) Exit() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *Graph) SetConvergedCallback(ctimeout int, converged chan bool) {
|
// AssociateData associates some data with the object in the graph in question
|
||||||
|
func (g *Graph) AssociateData(converger Converger) {
|
||||||
for v := range g.GetVerticesChan() {
|
for v := range g.GetVerticesChan() {
|
||||||
v.Res.SetConvergedCallback(ctimeout, converged)
|
v.Res.AssociateData(converger)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// in array function to test *Vertex in a slice of *Vertices
|
// VertexContains is an "in array" function to test for a vertex in a slice of vertices.
|
||||||
func VertexContains(needle *Vertex, haystack []*Vertex) bool {
|
func VertexContains(needle *Vertex, haystack []*Vertex) bool {
|
||||||
for _, v := range haystack {
|
for _, v := range haystack {
|
||||||
if needle == v {
|
if needle == v {
|
||||||
@@ -871,7 +880,7 @@ func VertexContains(needle *Vertex, haystack []*Vertex) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// reverse a list of vertices
|
// Reverse reverses a list of vertices.
|
||||||
func Reverse(vs []*Vertex) []*Vertex {
|
func Reverse(vs []*Vertex) []*Vertex {
|
||||||
//var out []*Vertex // XXX: golint suggests, but it fails testing
|
//var out []*Vertex // XXX: golint suggests, but it fails testing
|
||||||
out := make([]*Vertex, 0) // empty list
|
out := make([]*Vertex, 0) // empty list
|
||||||
|
|||||||
@@ -638,7 +638,7 @@ func NewNoopResTest(name string) *NoopResTest {
|
|||||||
NoopRes: NoopRes{
|
NoopRes: NoopRes{
|
||||||
BaseRes: BaseRes{
|
BaseRes: BaseRes{
|
||||||
Name: name,
|
Name: name,
|
||||||
Meta: MetaParams{
|
MetaParams: MetaParams{
|
||||||
AutoGroup: true, // always autogroup
|
AutoGroup: true, // always autogroup
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -807,7 +807,7 @@ func (g *Graph) fullPrint() (str string) {
|
|||||||
// helper function
|
// helper function
|
||||||
func runGraphCmp(t *testing.T, g1, g2 *Graph) {
|
func runGraphCmp(t *testing.T, g1, g2 *Graph) {
|
||||||
ch := g1.autoGroup(&testGrouper{}) // edits the graph
|
ch := g1.autoGroup(&testGrouper{}) // edits the graph
|
||||||
for _ = range ch { // bleed the channel or it won't run :(
|
for range ch { // bleed the channel or it won't run :(
|
||||||
// pass
|
// pass
|
||||||
}
|
}
|
||||||
err := GraphCmp(g1, g2)
|
err := GraphCmp(g1, g2)
|
||||||
@@ -819,7 +819,7 @@ func runGraphCmp(t *testing.T, g1, g2 *Graph) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// all of the following test cases are layed out with the following semantics:
|
// all of the following test cases are laid out with the following semantics:
|
||||||
// * vertices which start with the same single letter are considered "like"
|
// * vertices which start with the same single letter are considered "like"
|
||||||
// * "like" elements should be merged
|
// * "like" elements should be merged
|
||||||
// * vertices can have any integer after their single letter "family" type
|
// * vertices can have any integer after their single letter "family" type
|
||||||
|
|||||||
54
pkg.go
54
pkg.go
@@ -31,6 +31,7 @@ func init() {
|
|||||||
gob.Register(&PkgRes{})
|
gob.Register(&PkgRes{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PkgRes is a package resource for packagekit.
|
||||||
type PkgRes struct {
|
type PkgRes struct {
|
||||||
BaseRes `yaml:",inline"`
|
BaseRes `yaml:",inline"`
|
||||||
State string `yaml:"state"` // state: installed, uninstalled, newest, <version>
|
State string `yaml:"state"` // state: installed, uninstalled, newest, <version>
|
||||||
@@ -41,13 +42,11 @@ type PkgRes struct {
|
|||||||
fileList []string // FIXME: update if pkg changes
|
fileList []string // FIXME: update if pkg changes
|
||||||
}
|
}
|
||||||
|
|
||||||
// helper function for creating new pkg resources that calls Init()
|
// NewPkgRes is a constructor for this resource. It also calls Init() for you.
|
||||||
func NewPkgRes(name, state string, allowuntrusted, allownonfree, allowunsupported bool) *PkgRes {
|
func NewPkgRes(name, state string, allowuntrusted, allownonfree, allowunsupported bool) *PkgRes {
|
||||||
obj := &PkgRes{
|
obj := &PkgRes{
|
||||||
BaseRes: BaseRes{
|
BaseRes: BaseRes{
|
||||||
Name: name,
|
Name: name,
|
||||||
events: make(chan Event),
|
|
||||||
vertex: nil,
|
|
||||||
},
|
},
|
||||||
State: state,
|
State: state,
|
||||||
AllowUntrusted: allowuntrusted,
|
AllowUntrusted: allowuntrusted,
|
||||||
@@ -58,6 +57,7 @@ func NewPkgRes(name, state string, allowuntrusted, allownonfree, allowunsupporte
|
|||||||
return obj
|
return obj
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Init runs some startup code for this resource.
|
||||||
func (obj *PkgRes) Init() {
|
func (obj *PkgRes) Init() {
|
||||||
obj.BaseRes.kind = "Pkg"
|
obj.BaseRes.kind = "Pkg"
|
||||||
obj.BaseRes.Init() // call base init, b/c we're overriding
|
obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
@@ -95,8 +95,8 @@ func (obj *PkgRes) Init() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate checks if the resource data structure was populated correctly.
|
||||||
func (obj *PkgRes) Validate() bool {
|
func (obj *PkgRes) Validate() bool {
|
||||||
|
|
||||||
if obj.State == "" {
|
if obj.State == "" {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -104,7 +104,8 @@ func (obj *PkgRes) Validate() bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// use UpdatesChanged signal to watch for changes
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
|
// It uses the PackageKit UpdatesChanged signal to watch for changes.
|
||||||
// TODO: https://github.com/hughsie/PackageKit/issues/109
|
// TODO: https://github.com/hughsie/PackageKit/issues/109
|
||||||
// TODO: https://github.com/hughsie/PackageKit/issues/110
|
// TODO: https://github.com/hughsie/PackageKit/issues/110
|
||||||
func (obj *PkgRes) Watch(processChan chan Event) {
|
func (obj *PkgRes) Watch(processChan chan Event) {
|
||||||
@@ -113,6 +114,8 @@ func (obj *PkgRes) Watch(processChan chan Event) {
|
|||||||
}
|
}
|
||||||
obj.SetWatching(true)
|
obj.SetWatching(true)
|
||||||
defer obj.SetWatching(false)
|
defer obj.SetWatching(false)
|
||||||
|
cuuid := obj.converger.Register()
|
||||||
|
defer cuuid.Unregister()
|
||||||
|
|
||||||
bus := NewBus()
|
bus := NewBus()
|
||||||
if bus == nil {
|
if bus == nil {
|
||||||
@@ -137,6 +140,8 @@ func (obj *PkgRes) Watch(processChan chan Event) {
|
|||||||
obj.SetState(resStateWatching) // reset
|
obj.SetState(resStateWatching) // reset
|
||||||
select {
|
select {
|
||||||
case event := <-ch:
|
case event := <-ch:
|
||||||
|
cuuid.SetConverged(false)
|
||||||
|
|
||||||
// FIXME: ask packagekit for info on what packages changed
|
// FIXME: ask packagekit for info on what packages changed
|
||||||
if DEBUG {
|
if DEBUG {
|
||||||
log.Printf("%v: Event: %v", obj.fmtNames(obj.getNames()), event.Name)
|
log.Printf("%v: Event: %v", obj.fmtNames(obj.getNames()), event.Name)
|
||||||
@@ -148,20 +153,18 @@ func (obj *PkgRes) Watch(processChan chan Event) {
|
|||||||
<-ch // discard
|
<-ch // discard
|
||||||
}
|
}
|
||||||
|
|
||||||
obj.SetConvergedState(resConvergedNil)
|
|
||||||
send = true
|
send = true
|
||||||
dirty = true
|
dirty = true
|
||||||
|
|
||||||
case event := <-obj.events:
|
case event := <-obj.events:
|
||||||
obj.SetConvergedState(resConvergedNil)
|
cuuid.SetConverged(false)
|
||||||
if exit, send = obj.ReadEvent(&event); exit {
|
if exit, send = obj.ReadEvent(&event); exit {
|
||||||
return // exit
|
return // exit
|
||||||
}
|
}
|
||||||
//dirty = false // these events don't invalidate state
|
dirty = false // these events don't invalidate state
|
||||||
|
|
||||||
case _ = <-TimeAfterOrBlock(obj.ctimeout):
|
case <-cuuid.ConvergedTimer():
|
||||||
obj.SetConvergedState(resConvergedTimeout)
|
cuuid.SetConverged(true) // converged!
|
||||||
obj.converged <- true
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -242,7 +245,9 @@ func (obj *PkgRes) pkgMappingHelper(bus *Conn) (map[string]*PkPackageIDActionDat
|
|||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (obj *PkgRes) CheckApply(apply bool) (stateok bool, err error) {
|
// CheckApply checks the resource state and applies the resource if the bool
|
||||||
|
// input is true. It returns error info and if the state check passed or not.
|
||||||
|
func (obj *PkgRes) CheckApply(apply bool) (checkok bool, err error) {
|
||||||
log.Printf("%v: CheckApply(%t)", obj.fmtNames(obj.getNames()), apply)
|
log.Printf("%v: CheckApply(%t)", obj.fmtNames(obj.getNames()), apply)
|
||||||
|
|
||||||
if obj.State == "" { // TODO: Validate() should replace this check!
|
if obj.State == "" { // TODO: Validate() should replace this check!
|
||||||
@@ -287,10 +292,12 @@ func (obj *PkgRes) CheckApply(apply bool) (stateok bool, err error) {
|
|||||||
fallthrough
|
fallthrough
|
||||||
case "newest":
|
case "newest":
|
||||||
if validState {
|
if validState {
|
||||||
return true, nil // state is correct, exit!
|
obj.isStateOK = true // reset
|
||||||
|
return true, nil // state is correct, exit!
|
||||||
}
|
}
|
||||||
default: // version string
|
default: // version string
|
||||||
if obj.State == data.Version && data.Version != "" {
|
if obj.State == data.Version && data.Version != "" {
|
||||||
|
obj.isStateOK = true // reset
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -334,9 +341,11 @@ func (obj *PkgRes) CheckApply(apply bool) (stateok bool, err error) {
|
|||||||
return false, err // fail
|
return false, err // fail
|
||||||
}
|
}
|
||||||
log.Printf("%v: Set: %v success!", obj.fmtNames(StrListIntersection(applyPackages, obj.getNames())), obj.State)
|
log.Printf("%v: Set: %v success!", obj.fmtNames(StrListIntersection(applyPackages, obj.getNames())), obj.State)
|
||||||
return false, nil // success
|
obj.isStateOK = true // reset
|
||||||
|
return false, nil // success
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PkgUUID is the UUID struct for PkgRes.
|
||||||
type PkgUUID struct {
|
type PkgUUID struct {
|
||||||
BaseUUID
|
BaseUUID
|
||||||
name string // pkg name
|
name string // pkg name
|
||||||
@@ -354,6 +363,7 @@ func (obj *PkgUUID) IFF(uuid ResUUID) bool {
|
|||||||
return obj.name == res.name
|
return obj.name == res.name
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PkgResAutoEdges holds the state of the auto edge generator.
|
||||||
type PkgResAutoEdges struct {
|
type PkgResAutoEdges struct {
|
||||||
fileList []string
|
fileList []string
|
||||||
svcUUIDs []ResUUID
|
svcUUIDs []ResUUID
|
||||||
@@ -362,6 +372,7 @@ type PkgResAutoEdges struct {
|
|||||||
kind string
|
kind string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Next returns the next automatic edge.
|
||||||
func (obj *PkgResAutoEdges) Next() []ResUUID {
|
func (obj *PkgResAutoEdges) Next() []ResUUID {
|
||||||
if obj.testIsNext {
|
if obj.testIsNext {
|
||||||
log.Fatal("Expecting a call to Test()")
|
log.Fatal("Expecting a call to Test()")
|
||||||
@@ -389,6 +400,7 @@ func (obj *PkgResAutoEdges) Next() []ResUUID {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test gets results of the earlier Next() call, & returns if we should continue!
|
||||||
func (obj *PkgResAutoEdges) Test(input []bool) bool {
|
func (obj *PkgResAutoEdges) Test(input []bool) bool {
|
||||||
if !obj.testIsNext {
|
if !obj.testIsNext {
|
||||||
log.Fatal("Expecting a call to Next()")
|
log.Fatal("Expecting a call to Next()")
|
||||||
@@ -439,7 +451,8 @@ func (obj *PkgResAutoEdges) Test(input []bool) bool {
|
|||||||
return true // continue, there are more files!
|
return true // continue, there are more files!
|
||||||
}
|
}
|
||||||
|
|
||||||
// produce an object which generates a minimal pkg file optimization sequence
|
// AutoEdges produces an object which generates a minimal pkg file optimization
|
||||||
|
// sequence of edges.
|
||||||
func (obj *PkgRes) AutoEdges() AutoEdge {
|
func (obj *PkgRes) AutoEdges() AutoEdge {
|
||||||
// in contrast with the FileRes AutoEdges() function which contains
|
// in contrast with the FileRes AutoEdges() function which contains
|
||||||
// more of the mechanics, most of the AutoEdge mechanics for the PkgRes
|
// more of the mechanics, most of the AutoEdge mechanics for the PkgRes
|
||||||
@@ -468,7 +481,8 @@ func (obj *PkgRes) AutoEdges() AutoEdge {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// include all params to make a unique identification of this object
|
// GetUUIDs includes all params to make a unique identification of this object.
|
||||||
|
// Most resources only return one, although some resources can return multiple.
|
||||||
func (obj *PkgRes) GetUUIDs() []ResUUID {
|
func (obj *PkgRes) GetUUIDs() []ResUUID {
|
||||||
x := &PkgUUID{
|
x := &PkgUUID{
|
||||||
BaseUUID: BaseUUID{name: obj.GetName(), kind: obj.Kind()},
|
BaseUUID: BaseUUID{name: obj.GetName(), kind: obj.Kind()},
|
||||||
@@ -479,6 +493,7 @@ func (obj *PkgRes) GetUUIDs() []ResUUID {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GroupCmp returns whether two resources can be grouped together or not.
|
||||||
// can these two resources be merged ?
|
// can these two resources be merged ?
|
||||||
// (aka does this resource support doing so?)
|
// (aka does this resource support doing so?)
|
||||||
// will resource allow itself to be grouped _into_ this obj?
|
// will resource allow itself to be grouped _into_ this obj?
|
||||||
@@ -500,10 +515,15 @@ func (obj *PkgRes) GroupCmp(r Res) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Compare two resources and return if they are equivalent.
|
||||||
func (obj *PkgRes) Compare(res Res) bool {
|
func (obj *PkgRes) Compare(res Res) bool {
|
||||||
switch res.(type) {
|
switch res.(type) {
|
||||||
case *PkgRes:
|
case *PkgRes:
|
||||||
res := res.(*PkgRes)
|
res := res.(*PkgRes)
|
||||||
|
if !obj.BaseRes.Compare(res) { // call base Compare
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
if obj.Name != res.Name {
|
if obj.Name != res.Name {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|||||||
141
puppet.go
Normal file
141
puppet.go
Normal file
@@ -0,0 +1,141 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"os/exec"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// PuppetYAMLBufferSize is the maximum buffer size for the yaml input data
|
||||||
|
PuppetYAMLBufferSize = 65535
|
||||||
|
)
|
||||||
|
|
||||||
|
func runPuppetCommand(cmd *exec.Cmd) ([]byte, error) {
|
||||||
|
if DEBUG {
|
||||||
|
log.Printf("Puppet: running command: %v", cmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
stdout, err := cmd.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Puppet: Error opening pipe to puppet command: %v", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
stderr, err := cmd.StderrPipe()
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Puppet: Error opening error pipe to puppet command: %v", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
log.Printf("Puppet: Error starting puppet command: %v", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX: the current implementation is likely prone to fail
|
||||||
|
// as soon as the YAML data overflows the buffer.
|
||||||
|
data := make([]byte, PuppetYAMLBufferSize)
|
||||||
|
var result []byte
|
||||||
|
for err == nil {
|
||||||
|
var count int
|
||||||
|
count, err = stdout.Read(data)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
log.Printf("Puppet: Error reading YAML data from puppet: %v", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Slicing down to the number of actual bytes is important, the YAML parser
|
||||||
|
// will choke on an oversized slice. http://stackoverflow.com/a/33726617/3356612
|
||||||
|
result = append(result, data[0:count]...)
|
||||||
|
}
|
||||||
|
if DEBUG {
|
||||||
|
log.Printf("Puppet: read %v bytes of data from puppet", len(result))
|
||||||
|
}
|
||||||
|
for scanner := bufio.NewScanner(stderr); scanner.Scan(); {
|
||||||
|
log.Printf("Puppet: (output) %v", scanner.Text())
|
||||||
|
}
|
||||||
|
if err := cmd.Wait(); err != nil {
|
||||||
|
log.Printf("Puppet: Error: puppet command did not complete: %v", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseConfigFromPuppet takes a special puppet param string and config and
|
||||||
|
// returns the graph configuration structure.
|
||||||
|
func ParseConfigFromPuppet(puppetParam, puppetConf string) *GraphConfig {
|
||||||
|
var puppetConfArg string
|
||||||
|
if puppetConf != "" {
|
||||||
|
puppetConfArg = "--config=" + puppetConf
|
||||||
|
}
|
||||||
|
|
||||||
|
var cmd *exec.Cmd
|
||||||
|
if puppetParam == "agent" {
|
||||||
|
cmd = exec.Command("puppet", "mgmtgraph", "print", puppetConfArg)
|
||||||
|
} else if strings.HasSuffix(puppetParam, ".pp") {
|
||||||
|
cmd = exec.Command("puppet", "mgmtgraph", "print", puppetConfArg, "--manifest", puppetParam)
|
||||||
|
} else {
|
||||||
|
cmd = exec.Command("puppet", "mgmtgraph", "print", puppetConfArg, "--code", puppetParam)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println("Puppet: launching translator")
|
||||||
|
|
||||||
|
var config GraphConfig
|
||||||
|
if data, err := runPuppetCommand(cmd); err != nil {
|
||||||
|
return nil
|
||||||
|
} else if err := config.Parse(data); err != nil {
|
||||||
|
log.Printf("Puppet: Error: Could not parse YAML output with Parse: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &config
|
||||||
|
}
|
||||||
|
|
||||||
|
// PuppetInterval returns the graph refresh interval from the puppet configuration.
|
||||||
|
func PuppetInterval(puppetConf string) int {
|
||||||
|
if DEBUG {
|
||||||
|
log.Printf("Puppet: determining graph refresh interval")
|
||||||
|
}
|
||||||
|
var cmd *exec.Cmd
|
||||||
|
if puppetConf != "" {
|
||||||
|
cmd = exec.Command("puppet", "config", "print", "runinterval", "--config", puppetConf)
|
||||||
|
} else {
|
||||||
|
cmd = exec.Command("puppet", "config", "print", "runinterval")
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println("Puppet: inspecting runinterval configuration")
|
||||||
|
|
||||||
|
interval := 1800
|
||||||
|
data, err := runPuppetCommand(cmd)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Puppet: could not determine configured run interval (%v), using default of %v", err, interval)
|
||||||
|
return interval
|
||||||
|
}
|
||||||
|
result, err := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 0)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Puppet: error reading numeric runinterval value (%v), using default of %v", err, interval)
|
||||||
|
return interval
|
||||||
|
}
|
||||||
|
|
||||||
|
return int(result)
|
||||||
|
}
|
||||||
143
resources.go
143
resources.go
@@ -36,16 +36,7 @@ const (
|
|||||||
resStatePoking
|
resStatePoking
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate stringer -type=resConvergedState -output=resconvergedstate_stringer.go
|
// ResUUID is a unique identifier for a resource, namely it's name, and the kind ("type").
|
||||||
type resConvergedState int
|
|
||||||
|
|
||||||
const (
|
|
||||||
resConvergedNil resConvergedState = iota
|
|
||||||
//resConverged
|
|
||||||
resConvergedTimeout
|
|
||||||
)
|
|
||||||
|
|
||||||
// a unique identifier for a resource, namely it's name, and the kind ("type")
|
|
||||||
type ResUUID interface {
|
type ResUUID interface {
|
||||||
GetName() string
|
GetName() string
|
||||||
Kind() string
|
Kind() string
|
||||||
@@ -54,6 +45,7 @@ type ResUUID interface {
|
|||||||
Reversed() bool // true means this resource happens before the generator
|
Reversed() bool // true means this resource happens before the generator
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The BaseUUID struct is used to provide a unique resource identifier.
|
||||||
type BaseUUID struct {
|
type BaseUUID struct {
|
||||||
name string // name and kind are the values of where this is coming from
|
name string // name and kind are the values of where this is coming from
|
||||||
kind string
|
kind string
|
||||||
@@ -61,29 +53,30 @@ type BaseUUID struct {
|
|||||||
reversed *bool // piggyback edge information here
|
reversed *bool // piggyback edge information here
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The AutoEdge interface is used to implement the autoedges feature.
|
||||||
type AutoEdge interface {
|
type AutoEdge interface {
|
||||||
Next() []ResUUID // call to get list of edges to add
|
Next() []ResUUID // call to get list of edges to add
|
||||||
Test([]bool) bool // call until false
|
Test([]bool) bool // call until false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MetaParams is a struct will all params that apply to every resource.
|
||||||
type MetaParams struct {
|
type MetaParams struct {
|
||||||
AutoEdge bool `yaml:"autoedge"` // metaparam, should we generate auto edges? // XXX should default to true
|
AutoEdge bool `yaml:"autoedge"` // metaparam, should we generate auto edges? // XXX should default to true
|
||||||
AutoGroup bool `yaml:"autogroup"` // metaparam, should we auto group? // XXX should default to true
|
AutoGroup bool `yaml:"autogroup"` // metaparam, should we auto group? // XXX should default to true
|
||||||
|
Noop bool `yaml:"noop"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// this interface is everything that is common to all resources
|
// The Base interface is everything that is common to all resources.
|
||||||
// everything here only needs to be implemented once, in the BaseRes
|
// Everything here only needs to be implemented once, in the BaseRes.
|
||||||
type Base interface {
|
type Base interface {
|
||||||
GetName() string // can't be named "Name()" because of struct field
|
GetName() string // can't be named "Name()" because of struct field
|
||||||
SetName(string)
|
SetName(string)
|
||||||
|
setKind(string)
|
||||||
Kind() string
|
Kind() string
|
||||||
GetMeta() MetaParams
|
Meta() *MetaParams
|
||||||
SetVertex(*Vertex)
|
AssociateData(Converger)
|
||||||
SetConvergedCallback(ctimeout int, converged chan bool)
|
|
||||||
IsWatching() bool
|
IsWatching() bool
|
||||||
SetWatching(bool)
|
SetWatching(bool)
|
||||||
GetConvergedState() resConvergedState
|
|
||||||
SetConvergedState(resConvergedState)
|
|
||||||
GetState() resState
|
GetState() resState
|
||||||
SetState(resState)
|
SetState(resState)
|
||||||
SendEvent(eventName, bool, bool) bool
|
SendEvent(eventName, bool, bool) bool
|
||||||
@@ -96,7 +89,7 @@ type Base interface {
|
|||||||
SetGroup([]Res)
|
SetGroup([]Res)
|
||||||
}
|
}
|
||||||
|
|
||||||
// this is the minimum interface you need to implement to make a new resource
|
// Res is the minimum interface you need to implement to define a new resource.
|
||||||
type Res interface {
|
type Res interface {
|
||||||
Base // include everything from the Base interface
|
Base // include everything from the Base interface
|
||||||
Init()
|
Init()
|
||||||
@@ -109,23 +102,21 @@ type Res interface {
|
|||||||
CollectPattern(string) // XXX: temporary until Res collection is more advanced
|
CollectPattern(string) // XXX: temporary until Res collection is more advanced
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BaseRes is the base struct that gets used in every resource.
|
||||||
type BaseRes struct {
|
type BaseRes struct {
|
||||||
Name string `yaml:"name"`
|
Name string `yaml:"name"`
|
||||||
Meta MetaParams `yaml:"meta"` // struct of all the metaparams
|
MetaParams MetaParams `yaml:"meta"` // struct of all the metaparams
|
||||||
kind string
|
kind string
|
||||||
events chan Event
|
events chan Event
|
||||||
vertex *Vertex
|
converger Converger // converged tracking
|
||||||
state resState
|
state resState
|
||||||
convergedState resConvergedState
|
watching bool // is Watch() loop running ?
|
||||||
watching bool // is Watch() loop running ?
|
isStateOK bool // whether the state is okay based on events or not
|
||||||
ctimeout int // converged timeout
|
isGrouped bool // am i contained within a group?
|
||||||
converged chan bool
|
grouped []Res // list of any grouped resources
|
||||||
isStateOK bool // whether the state is okay based on events or not
|
|
||||||
isGrouped bool // am i contained within a group?
|
|
||||||
grouped []Res // list of any grouped resources
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// wraps the IFF method when used with a list of UUID's
|
// UUIDExistsInUUIDs wraps the IFF method when used with a list of UUID's.
|
||||||
func UUIDExistsInUUIDs(uuid ResUUID, uuids []ResUUID) bool {
|
func UUIDExistsInUUIDs(uuid ResUUID, uuids []ResUUID) bool {
|
||||||
for _, u := range uuids {
|
for _, u := range uuids {
|
||||||
if uuid.IFF(u) {
|
if uuid.IFF(u) {
|
||||||
@@ -135,18 +126,20 @@ func UUIDExistsInUUIDs(uuid ResUUID, uuids []ResUUID) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetName returns the name of the resource.
|
||||||
func (obj *BaseUUID) GetName() string {
|
func (obj *BaseUUID) GetName() string {
|
||||||
return obj.name
|
return obj.name
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Kind returns the kind of resource.
|
||||||
func (obj *BaseUUID) Kind() string {
|
func (obj *BaseUUID) Kind() string {
|
||||||
return obj.kind
|
return obj.kind
|
||||||
}
|
}
|
||||||
|
|
||||||
// if and only if they are equivalent, return true
|
// IFF looks at two UUID's and if and only if they are equivalent, returns true.
|
||||||
// if they are not equivalent, return false
|
// If they are not equivalent, it returns false.
|
||||||
// most resource will want to override this method, since it does the important
|
// Most resources will want to override this method, since it does the important
|
||||||
// work of actually discerning if two resources are identical in function
|
// work of actually discerning if two resources are identical in function.
|
||||||
func (obj *BaseUUID) IFF(uuid ResUUID) bool {
|
func (obj *BaseUUID) IFF(uuid ResUUID) bool {
|
||||||
res, ok := uuid.(*BaseUUID)
|
res, ok := uuid.(*BaseUUID)
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -155,6 +148,8 @@ func (obj *BaseUUID) IFF(uuid ResUUID) bool {
|
|||||||
return obj.name == res.name
|
return obj.name == res.name
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Reversed is part of the ResUUID interface, and true means this resource
|
||||||
|
// happens before the generator.
|
||||||
func (obj *BaseUUID) Reversed() bool {
|
func (obj *BaseUUID) Reversed() bool {
|
||||||
if obj.reversed == nil {
|
if obj.reversed == nil {
|
||||||
log.Fatal("Programming error!")
|
log.Fatal("Programming error!")
|
||||||
@@ -162,64 +157,57 @@ func (obj *BaseUUID) Reversed() bool {
|
|||||||
return *obj.reversed
|
return *obj.reversed
|
||||||
}
|
}
|
||||||
|
|
||||||
// initialize structures like channels if created without New constructor
|
// Init initializes structures like channels if created without New constructor.
|
||||||
func (obj *BaseRes) Init() {
|
func (obj *BaseRes) Init() {
|
||||||
obj.events = make(chan Event) // unbuffered chan size to avoid stale events
|
obj.events = make(chan Event) // unbuffered chan size to avoid stale events
|
||||||
}
|
}
|
||||||
|
|
||||||
// this method gets used by all the resources
|
// GetName is used by all the resources to Get the name.
|
||||||
func (obj *BaseRes) GetName() string {
|
func (obj *BaseRes) GetName() string {
|
||||||
return obj.Name
|
return obj.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetName is used to set the name of the resource.
|
||||||
func (obj *BaseRes) SetName(name string) {
|
func (obj *BaseRes) SetName(name string) {
|
||||||
obj.Name = name
|
obj.Name = name
|
||||||
}
|
}
|
||||||
|
|
||||||
// return the kind of resource this is
|
// setKind sets the kind. This is used internally for exported resources.
|
||||||
|
func (obj *BaseRes) setKind(kind string) {
|
||||||
|
obj.kind = kind
|
||||||
|
}
|
||||||
|
|
||||||
|
// Kind returns the kind of resource this is.
|
||||||
func (obj *BaseRes) Kind() string {
|
func (obj *BaseRes) Kind() string {
|
||||||
return obj.kind
|
return obj.kind
|
||||||
}
|
}
|
||||||
|
|
||||||
func (obj *BaseRes) GetMeta() MetaParams {
|
// Meta returns the MetaParams as a reference, which we can then get/set on.
|
||||||
return obj.Meta
|
func (obj *BaseRes) Meta() *MetaParams {
|
||||||
|
return &obj.MetaParams
|
||||||
}
|
}
|
||||||
|
|
||||||
func (obj *BaseRes) GetVertex() *Vertex {
|
// AssociateData associates some data with the object in question.
|
||||||
return obj.vertex
|
func (obj *BaseRes) AssociateData(converger Converger) {
|
||||||
|
obj.converger = converger
|
||||||
}
|
}
|
||||||
|
|
||||||
func (obj *BaseRes) SetVertex(v *Vertex) {
|
// IsWatching tells us if the Watch() function is running.
|
||||||
obj.vertex = v
|
|
||||||
}
|
|
||||||
|
|
||||||
func (obj *BaseRes) SetConvergedCallback(ctimeout int, converged chan bool) {
|
|
||||||
obj.ctimeout = ctimeout
|
|
||||||
obj.converged = converged
|
|
||||||
}
|
|
||||||
|
|
||||||
// is the Watch() function running?
|
|
||||||
func (obj *BaseRes) IsWatching() bool {
|
func (obj *BaseRes) IsWatching() bool {
|
||||||
return obj.watching
|
return obj.watching
|
||||||
}
|
}
|
||||||
|
|
||||||
// store status of if the Watch() function is running
|
// SetWatching stores the status of if the Watch() function is running.
|
||||||
func (obj *BaseRes) SetWatching(b bool) {
|
func (obj *BaseRes) SetWatching(b bool) {
|
||||||
obj.watching = b
|
obj.watching = b
|
||||||
}
|
}
|
||||||
|
|
||||||
func (obj *BaseRes) GetConvergedState() resConvergedState {
|
// GetState returns the state of the resource.
|
||||||
return obj.convergedState
|
|
||||||
}
|
|
||||||
|
|
||||||
func (obj *BaseRes) SetConvergedState(state resConvergedState) {
|
|
||||||
obj.convergedState = state
|
|
||||||
}
|
|
||||||
|
|
||||||
func (obj *BaseRes) GetState() resState {
|
func (obj *BaseRes) GetState() resState {
|
||||||
return obj.state
|
return obj.state
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetState sets the state of the resource.
|
||||||
func (obj *BaseRes) SetState(state resState) {
|
func (obj *BaseRes) SetState(state resState) {
|
||||||
if DEBUG {
|
if DEBUG {
|
||||||
log.Printf("%v[%v]: State: %v -> %v", obj.Kind(), obj.GetName(), obj.GetState(), state)
|
log.Printf("%v[%v]: State: %v -> %v", obj.Kind(), obj.GetName(), obj.GetState(), state)
|
||||||
@@ -227,7 +215,7 @@ func (obj *BaseRes) SetState(state resState) {
|
|||||||
obj.state = state
|
obj.state = state
|
||||||
}
|
}
|
||||||
|
|
||||||
// push an event into the message queue for a particular vertex
|
// SendEvent pushes an event into the message queue for a particular vertex
|
||||||
func (obj *BaseRes) SendEvent(event eventName, sync bool, activity bool) bool {
|
func (obj *BaseRes) SendEvent(event eventName, sync bool, activity bool) bool {
|
||||||
// TODO: isn't this race-y ?
|
// TODO: isn't this race-y ?
|
||||||
if !obj.IsWatching() { // element has already exited
|
if !obj.IsWatching() { // element has already exited
|
||||||
@@ -249,8 +237,8 @@ func (obj *BaseRes) SendEvent(event eventName, sync bool, activity bool) bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// process events when a select gets one, this handles the pause code too!
|
// ReadEvent processes events when a select gets one, and handles the pause
|
||||||
// the return values specify if we should exit and poke respectively
|
// code too! The return values specify if we should exit and poke respectively.
|
||||||
func (obj *BaseRes) ReadEvent(event *Event) (exit, poke bool) {
|
func (obj *BaseRes) ReadEvent(event *Event) (exit, poke bool) {
|
||||||
event.ACK()
|
event.ACK()
|
||||||
switch event.Name {
|
switch event.Name {
|
||||||
@@ -287,6 +275,13 @@ func (obj *BaseRes) ReadEvent(event *Event) (exit, poke bool) {
|
|||||||
return true, false // required to keep the stupid go compiler happy
|
return true, false // required to keep the stupid go compiler happy
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GroupCmp compares two resources and decides if they're suitable for grouping
|
||||||
|
// You'll probably want to override this method when implementing a resource...
|
||||||
|
func (obj *BaseRes) GroupCmp(res Res) bool {
|
||||||
|
return false // base implementation assumes false, override me!
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupRes groups resource (arg) into self.
|
||||||
func (obj *BaseRes) GroupRes(res Res) error {
|
func (obj *BaseRes) GroupRes(res Res) error {
|
||||||
if l := len(res.GetGroup()); l > 0 {
|
if l := len(res.GetGroup()); l > 0 {
|
||||||
return fmt.Errorf("Res: %v already contains %d grouped resources!", res, l)
|
return fmt.Errorf("Res: %v already contains %d grouped resources!", res, l)
|
||||||
@@ -300,22 +295,40 @@ func (obj *BaseRes) GroupRes(res Res) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsGrouped determines if we are grouped.
|
||||||
func (obj *BaseRes) IsGrouped() bool { // am I grouped?
|
func (obj *BaseRes) IsGrouped() bool { // am I grouped?
|
||||||
return obj.isGrouped
|
return obj.isGrouped
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetGrouped sets a flag to tell if we are grouped.
|
||||||
func (obj *BaseRes) SetGrouped(b bool) {
|
func (obj *BaseRes) SetGrouped(b bool) {
|
||||||
obj.isGrouped = b
|
obj.isGrouped = b
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetGroup returns everyone grouped inside me.
|
||||||
func (obj *BaseRes) GetGroup() []Res { // return everyone grouped inside me
|
func (obj *BaseRes) GetGroup() []Res { // return everyone grouped inside me
|
||||||
return obj.grouped
|
return obj.grouped
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetGroup sets the grouped resources into me.
|
||||||
func (obj *BaseRes) SetGroup(g []Res) {
|
func (obj *BaseRes) SetGroup(g []Res) {
|
||||||
obj.grouped = g
|
obj.grouped = g
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Compare is the base compare method, which also handles the metaparams cmp
|
||||||
|
func (obj *BaseRes) Compare(res Res) bool {
|
||||||
|
if obj.Meta().Noop != res.Meta().Noop {
|
||||||
|
// obj is the existing res, res is the *new* resource
|
||||||
|
// if we go from no-noop -> noop, we can re-use the obj
|
||||||
|
// if we go from noop -> no-noop, we need to regenerate
|
||||||
|
if obj.Meta().Noop { // asymmetrical
|
||||||
|
return false // going from noop to no-noop!
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// CollectPattern is used for resource collection.
|
||||||
func (obj *BaseRes) CollectPattern(pattern string) {
|
func (obj *BaseRes) CollectPattern(pattern string) {
|
||||||
// XXX: default method is empty
|
// XXX: default method is empty
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -103,3 +103,71 @@ func TestMiscEncodeDecode2(t *testing.T) {
|
|||||||
t.Error("The input and output Res values do not match!")
|
t.Error("The input and output Res values do not match!")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestIFF(t *testing.T) {
|
||||||
|
uuid := &BaseUUID{name: "/tmp/unit-test"}
|
||||||
|
same := &BaseUUID{name: "/tmp/unit-test"}
|
||||||
|
diff := &BaseUUID{name: "/tmp/other-file"}
|
||||||
|
|
||||||
|
if !uuid.IFF(same) {
|
||||||
|
t.Error("basic resource UUIDs with the same name should satisfy each other's IFF condition.")
|
||||||
|
}
|
||||||
|
|
||||||
|
if uuid.IFF(diff) {
|
||||||
|
t.Error("basic resource UUIDs with different names should NOT satisfy each other's IFF condition.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadEvent(t *testing.T) {
|
||||||
|
res := FileRes{}
|
||||||
|
|
||||||
|
shouldExit := map[eventName]bool{
|
||||||
|
eventStart: false,
|
||||||
|
eventPoke: false,
|
||||||
|
eventBackPoke: false,
|
||||||
|
eventExit: true,
|
||||||
|
}
|
||||||
|
shouldPoke := map[eventName]bool{
|
||||||
|
eventStart: true,
|
||||||
|
eventPoke: true,
|
||||||
|
eventBackPoke: true,
|
||||||
|
eventExit: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
for event, _ := range shouldExit {
|
||||||
|
exit, poke := res.ReadEvent(&Event{Name: event})
|
||||||
|
if exit != shouldExit[event] {
|
||||||
|
t.Errorf("resource.ReadEvent returned wrong exit flag for a %v event (%v, should be %v)",
|
||||||
|
event, exit, shouldExit[event])
|
||||||
|
}
|
||||||
|
if poke != shouldPoke[event] {
|
||||||
|
t.Errorf("resource.ReadEvent returned wrong poke flag for a %v event (%v, should be %v)",
|
||||||
|
event, poke, shouldPoke[event])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
res.Init()
|
||||||
|
res.SetWatching(true)
|
||||||
|
|
||||||
|
// test result when a pause event is followed by start
|
||||||
|
go res.SendEvent(eventStart, false, false)
|
||||||
|
exit, poke := res.ReadEvent(&Event{Name: eventPause})
|
||||||
|
if exit {
|
||||||
|
t.Error("resource.ReadEvent returned wrong exit flag for a pause+start event (true, should be false)")
|
||||||
|
}
|
||||||
|
if poke {
|
||||||
|
t.Error("resource.ReadEvent returned wrong poke flag for a pause+start event (true, should be false)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// test result when a pause event is followed by exit
|
||||||
|
go res.SendEvent(eventExit, false, false)
|
||||||
|
exit, poke = res.ReadEvent(&Event{Name: eventPause})
|
||||||
|
if !exit {
|
||||||
|
t.Error("resource.ReadEvent returned wrong exit flag for a pause+start event (false, should be true)")
|
||||||
|
}
|
||||||
|
if poke {
|
||||||
|
t.Error("resource.ReadEvent returned wrong poke flag for a pause+start event (true, should be false)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: create a wrapper API around log, so that Fatals can be mocked and tested
|
||||||
|
}
|
||||||
|
|||||||
17
spec.in
17
spec.in
@@ -37,9 +37,8 @@ make build
|
|||||||
|
|
||||||
%install
|
%install
|
||||||
rm -rf %{buildroot}
|
rm -rf %{buildroot}
|
||||||
# _datadir is typically /usr/share/
|
mkdir -p %{buildroot}/%{_unitdir}/
|
||||||
install -d -m 0755 %{buildroot}/%{_datadir}/__PROGRAM__/
|
install -pm 0644 misc/__PROGRAM__.service %{buildroot}/%{_unitdir}/
|
||||||
cp -a AUTHORS COPYING COPYRIGHT DOCUMENTATION.md README.md THANKS examples/ %{buildroot}/%{_datadir}/__PROGRAM__/
|
|
||||||
|
|
||||||
# install the binary
|
# install the binary
|
||||||
mkdir -p %{buildroot}/%{_bindir}
|
mkdir -p %{buildroot}/%{_bindir}
|
||||||
@@ -55,9 +54,19 @@ install -m 0644 misc/example.conf %{buildroot}%{_sysconfdir}/__PROGRAM__/__PROGR
|
|||||||
|
|
||||||
%files
|
%files
|
||||||
%attr(0755, root, root) %{_sysconfdir}/profile.d/__PROGRAM__.sh
|
%attr(0755, root, root) %{_sysconfdir}/profile.d/__PROGRAM__.sh
|
||||||
%{_datadir}/__PROGRAM__/*
|
|
||||||
%{_bindir}/__PROGRAM__
|
%{_bindir}/__PROGRAM__
|
||||||
%{_sysconfdir}/__PROGRAM__/*
|
%{_sysconfdir}/__PROGRAM__/*
|
||||||
|
%{_unitdir}/__PROGRAM__.service
|
||||||
|
|
||||||
|
# https://fedoraproject.org/wiki/Packaging:Guidelines?rd=Packaging/Guidelines#Documentation
|
||||||
|
# Please add docs one per line in alpha order to avoid diff churn.
|
||||||
|
%doc AUTHORS
|
||||||
|
%doc COPYING
|
||||||
|
%doc COPYRIGHT
|
||||||
|
%doc DOCUMENTATION.md
|
||||||
|
%doc README.md
|
||||||
|
%doc THANKS
|
||||||
|
%doc examples/*
|
||||||
|
|
||||||
# this changelog is auto-generated by git log
|
# this changelog is auto-generated by git log
|
||||||
%changelog
|
%changelog
|
||||||
|
|||||||
47
svc.go
47
svc.go
@@ -33,12 +33,14 @@ func init() {
|
|||||||
gob.Register(&SvcRes{})
|
gob.Register(&SvcRes{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SvcRes is a service resource for systemd units.
|
||||||
type SvcRes struct {
|
type SvcRes struct {
|
||||||
BaseRes `yaml:",inline"`
|
BaseRes `yaml:",inline"`
|
||||||
State string `yaml:"state"` // state: running, stopped, undefined
|
State string `yaml:"state"` // state: running, stopped, undefined
|
||||||
Startup string `yaml:"startup"` // enabled, disabled, undefined
|
Startup string `yaml:"startup"` // enabled, disabled, undefined
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewSvcRes is a constructor for this resource. It also calls Init() for you.
|
||||||
func NewSvcRes(name, state, startup string) *SvcRes {
|
func NewSvcRes(name, state, startup string) *SvcRes {
|
||||||
obj := &SvcRes{
|
obj := &SvcRes{
|
||||||
BaseRes: BaseRes{
|
BaseRes: BaseRes{
|
||||||
@@ -51,11 +53,13 @@ func NewSvcRes(name, state, startup string) *SvcRes {
|
|||||||
return obj
|
return obj
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Init runs some startup code for this resource.
|
||||||
func (obj *SvcRes) Init() {
|
func (obj *SvcRes) Init() {
|
||||||
obj.BaseRes.kind = "Svc"
|
obj.BaseRes.kind = "Svc"
|
||||||
obj.BaseRes.Init() // call base init, b/c we're overriding
|
obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate checks if the resource data structure was populated correctly.
|
||||||
func (obj *SvcRes) Validate() bool {
|
func (obj *SvcRes) Validate() bool {
|
||||||
if obj.State != "running" && obj.State != "stopped" && obj.State != "" {
|
if obj.State != "running" && obj.State != "stopped" && obj.State != "" {
|
||||||
return false
|
return false
|
||||||
@@ -66,16 +70,17 @@ func (obj *SvcRes) Validate() bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Service watcher
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
func (obj *SvcRes) Watch(processChan chan Event) {
|
func (obj *SvcRes) Watch(processChan chan Event) {
|
||||||
if obj.IsWatching() {
|
if obj.IsWatching() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
obj.SetWatching(true)
|
obj.SetWatching(true)
|
||||||
defer obj.SetWatching(false)
|
defer obj.SetWatching(false)
|
||||||
|
cuuid := obj.converger.Register()
|
||||||
|
defer cuuid.Unregister()
|
||||||
|
|
||||||
// obj.Name: svc name
|
// obj.Name: svc name
|
||||||
//vertex := obj.GetVertex() // stored with SetVertex
|
|
||||||
if !systemdUtil.IsRunningSystemd() {
|
if !systemdUtil.IsRunningSystemd() {
|
||||||
log.Fatal("Systemd is not running.")
|
log.Fatal("Systemd is not running.")
|
||||||
}
|
}
|
||||||
@@ -144,13 +149,13 @@ func (obj *SvcRes) Watch(processChan chan Event) {
|
|||||||
|
|
||||||
obj.SetState(resStateWatching) // reset
|
obj.SetState(resStateWatching) // reset
|
||||||
select {
|
select {
|
||||||
case _ = <-buschan: // XXX wait for new units event to unstick
|
case <-buschan: // XXX wait for new units event to unstick
|
||||||
obj.SetConvergedState(resConvergedNil)
|
cuuid.SetConverged(false)
|
||||||
// loop so that we can see the changed invalid signal
|
// loop so that we can see the changed invalid signal
|
||||||
log.Printf("Svc[%v]->DaemonReload()", svc)
|
log.Printf("Svc[%v]->DaemonReload()", svc)
|
||||||
|
|
||||||
case event := <-obj.events:
|
case event := <-obj.events:
|
||||||
obj.SetConvergedState(resConvergedNil)
|
cuuid.SetConverged(false)
|
||||||
if exit, send = obj.ReadEvent(&event); exit {
|
if exit, send = obj.ReadEvent(&event); exit {
|
||||||
return // exit
|
return // exit
|
||||||
}
|
}
|
||||||
@@ -158,9 +163,8 @@ func (obj *SvcRes) Watch(processChan chan Event) {
|
|||||||
dirty = true
|
dirty = true
|
||||||
}
|
}
|
||||||
|
|
||||||
case _ = <-TimeAfterOrBlock(obj.ctimeout):
|
case <-cuuid.ConvergedTimer():
|
||||||
obj.SetConvergedState(resConvergedTimeout)
|
cuuid.SetConverged(true) // converged!
|
||||||
obj.converged <- true
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -193,19 +197,23 @@ func (obj *SvcRes) Watch(processChan chan Event) {
|
|||||||
dirty = true
|
dirty = true
|
||||||
|
|
||||||
case err := <-subErrors:
|
case err := <-subErrors:
|
||||||
obj.SetConvergedState(resConvergedNil) // XXX ?
|
cuuid.SetConverged(false) // XXX ?
|
||||||
log.Printf("error: %v", err)
|
log.Printf("error: %v", err)
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
//vertex.events <- fmt.Sprintf("svc: %v", "error") // XXX: how should we handle errors?
|
//vertex.events <- fmt.Sprintf("svc: %v", "error") // XXX: how should we handle errors?
|
||||||
|
|
||||||
case event := <-obj.events:
|
case event := <-obj.events:
|
||||||
obj.SetConvergedState(resConvergedNil)
|
cuuid.SetConverged(false)
|
||||||
if exit, send = obj.ReadEvent(&event); exit {
|
if exit, send = obj.ReadEvent(&event); exit {
|
||||||
return // exit
|
return // exit
|
||||||
}
|
}
|
||||||
if event.GetActivity() {
|
if event.GetActivity() {
|
||||||
dirty = true
|
dirty = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case <-cuuid.ConvergedTimer():
|
||||||
|
cuuid.SetConverged(true) // converged!
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -223,7 +231,9 @@ func (obj *SvcRes) Watch(processChan chan Event) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (obj *SvcRes) CheckApply(apply bool) (stateok bool, err error) {
|
// CheckApply checks the resource state and applies the resource if the bool
|
||||||
|
// input is true. It returns error info and if the state check passed or not.
|
||||||
|
func (obj *SvcRes) CheckApply(apply bool) (checkok bool, err error) {
|
||||||
log.Printf("%v[%v]: CheckApply(%t)", obj.Kind(), obj.GetName(), apply)
|
log.Printf("%v[%v]: CheckApply(%t)", obj.Kind(), obj.GetName(), apply)
|
||||||
|
|
||||||
if obj.isStateOK { // cache the state
|
if obj.isStateOK { // cache the state
|
||||||
@@ -316,6 +326,7 @@ func (obj *SvcRes) CheckApply(apply bool) (stateok bool, err error) {
|
|||||||
return false, nil // success
|
return false, nil // success
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SvcUUID is the UUID struct for SvcRes.
|
||||||
type SvcUUID struct {
|
type SvcUUID struct {
|
||||||
// NOTE: there is also a name variable in the BaseUUID struct, this is
|
// NOTE: there is also a name variable in the BaseUUID struct, this is
|
||||||
// information about where this UUID came from, and is unrelated to the
|
// information about where this UUID came from, and is unrelated to the
|
||||||
@@ -335,12 +346,14 @@ func (obj *SvcUUID) IFF(uuid ResUUID) bool {
|
|||||||
return obj.name == res.name
|
return obj.name == res.name
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SvcResAutoEdges holds the state of the auto edge generator.
|
||||||
type SvcResAutoEdges struct {
|
type SvcResAutoEdges struct {
|
||||||
data []ResUUID
|
data []ResUUID
|
||||||
pointer int
|
pointer int
|
||||||
found bool
|
found bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Next returns the next automatic edge.
|
||||||
func (obj *SvcResAutoEdges) Next() []ResUUID {
|
func (obj *SvcResAutoEdges) Next() []ResUUID {
|
||||||
if obj.found {
|
if obj.found {
|
||||||
log.Fatal("Shouldn't be called anymore!")
|
log.Fatal("Shouldn't be called anymore!")
|
||||||
@@ -353,7 +366,7 @@ func (obj *SvcResAutoEdges) Next() []ResUUID {
|
|||||||
return []ResUUID{value} // we return one, even though api supports N
|
return []ResUUID{value} // we return one, even though api supports N
|
||||||
}
|
}
|
||||||
|
|
||||||
// get results of the earlier Next() call, return if we should continue!
|
// Test gets results of the earlier Next() call, & returns if we should continue!
|
||||||
func (obj *SvcResAutoEdges) Test(input []bool) bool {
|
func (obj *SvcResAutoEdges) Test(input []bool) bool {
|
||||||
// if there aren't any more remaining
|
// if there aren't any more remaining
|
||||||
if len(obj.data) <= obj.pointer {
|
if len(obj.data) <= obj.pointer {
|
||||||
@@ -372,6 +385,7 @@ func (obj *SvcResAutoEdges) Test(input []bool) bool {
|
|||||||
return true // keep going
|
return true // keep going
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The AutoEdges method returns the AutoEdges. In this case the systemd units.
|
||||||
func (obj *SvcRes) AutoEdges() AutoEdge {
|
func (obj *SvcRes) AutoEdges() AutoEdge {
|
||||||
var data []ResUUID
|
var data []ResUUID
|
||||||
svcFiles := []string{
|
svcFiles := []string{
|
||||||
@@ -396,7 +410,8 @@ func (obj *SvcRes) AutoEdges() AutoEdge {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// include all params to make a unique identification of this object
|
// GetUUIDs includes all params to make a unique identification of this object.
|
||||||
|
// Most resources only return one, although some resources can return multiple.
|
||||||
func (obj *SvcRes) GetUUIDs() []ResUUID {
|
func (obj *SvcRes) GetUUIDs() []ResUUID {
|
||||||
x := &SvcUUID{
|
x := &SvcUUID{
|
||||||
BaseUUID: BaseUUID{name: obj.GetName(), kind: obj.Kind()},
|
BaseUUID: BaseUUID{name: obj.GetName(), kind: obj.Kind()},
|
||||||
@@ -405,6 +420,7 @@ func (obj *SvcRes) GetUUIDs() []ResUUID {
|
|||||||
return []ResUUID{x}
|
return []ResUUID{x}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GroupCmp returns whether two resources can be grouped together or not.
|
||||||
func (obj *SvcRes) GroupCmp(r Res) bool {
|
func (obj *SvcRes) GroupCmp(r Res) bool {
|
||||||
_, ok := r.(*SvcRes)
|
_, ok := r.(*SvcRes)
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -416,10 +432,15 @@ func (obj *SvcRes) GroupCmp(r Res) bool {
|
|||||||
return false // not possible atm
|
return false // not possible atm
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Compare two resources and return if they are equivalent.
|
||||||
func (obj *SvcRes) Compare(res Res) bool {
|
func (obj *SvcRes) Compare(res Res) bool {
|
||||||
switch res.(type) {
|
switch res.(type) {
|
||||||
case *SvcRes:
|
case *SvcRes:
|
||||||
res := res.(*SvcRes)
|
res := res.(*SvcRes)
|
||||||
|
if !obj.BaseRes.Compare(res) { // call base Compare
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
if obj.Name != res.Name {
|
if obj.Name != res.Name {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|||||||
44
test.sh
44
test.sh
@@ -4,31 +4,45 @@ echo running test.sh
|
|||||||
echo "ENV:"
|
echo "ENV:"
|
||||||
env
|
env
|
||||||
|
|
||||||
|
failures=''
|
||||||
|
function run-test()
|
||||||
|
{
|
||||||
|
$@ || failures=$( [ -n "$failures" ] && echo "$failures\\n$@" || echo "$@" )
|
||||||
|
}
|
||||||
|
|
||||||
# ensure there is no trailing whitespace or other whitespace errors
|
# ensure there is no trailing whitespace or other whitespace errors
|
||||||
git diff-tree --check $(git hash-object -t tree /dev/null) HEAD
|
run-test git diff-tree --check $(git hash-object -t tree /dev/null) HEAD
|
||||||
|
|
||||||
# ensure entries to authors file are sorted
|
# ensure entries to authors file are sorted
|
||||||
start=$(($(grep -n '^[[:space:]]*$' AUTHORS | awk -F ':' '{print $1}' | head -1) + 1))
|
start=$(($(grep -n '^[[:space:]]*$' AUTHORS | awk -F ':' '{print $1}' | head -1) + 1))
|
||||||
diff <(tail -n +$start AUTHORS | sort) <(tail -n +$start AUTHORS)
|
run-test diff <(tail -n +$start AUTHORS | sort) <(tail -n +$start AUTHORS)
|
||||||
|
|
||||||
./test/test-gofmt.sh
|
run-test ./test/test-gofmt.sh
|
||||||
./test/test-yamlfmt.sh
|
run-test ./test/test-yamlfmt.sh
|
||||||
./test/test-bashfmt.sh
|
run-test ./test/test-bashfmt.sh
|
||||||
./test/test-headerfmt.sh
|
run-test ./test/test-headerfmt.sh
|
||||||
go test
|
run-test go test
|
||||||
./test/test-govet.sh
|
run-test ./test/test-govet.sh
|
||||||
|
|
||||||
# do these longer tests only when running on ci
|
# do these longer tests only when running on ci
|
||||||
if env | grep -q -e '^TRAVIS=true$' -e '^JENKINS_URL=' -e '^BUILD_TAG=jenkins'; then
|
if env | grep -q -e '^TRAVIS=true$' -e '^JENKINS_URL=' -e '^BUILD_TAG=jenkins'; then
|
||||||
go test -race
|
run-test go test -race
|
||||||
./test/test-shell.sh
|
run-test ./test/test-shell.sh
|
||||||
else
|
|
||||||
# FIXME: this fails on travis for some reason
|
|
||||||
./test/test-reproducible.sh
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# FIXME: this now fails everywhere :(
|
||||||
|
#run-test ./test/test-reproducible.sh
|
||||||
|
|
||||||
# run omv tests on jenkins physical hosts only
|
# run omv tests on jenkins physical hosts only
|
||||||
if env | grep -q -e '^JENKINS_URL=' -e '^BUILD_TAG=jenkins'; then
|
if env | grep -q -e '^JENKINS_URL=' -e '^BUILD_TAG=jenkins'; then
|
||||||
./test/test-omv.sh
|
run-test ./test/test-omv.sh
|
||||||
fi
|
fi
|
||||||
./test/test-golint.sh # test last, because this test is somewhat arbitrary
|
run-test ./test/test-golint.sh # test last, because this test is somewhat arbitrary
|
||||||
|
|
||||||
|
if [[ -n "$failures" ]]; then
|
||||||
|
echo 'FAIL'
|
||||||
|
echo 'The following tests have failed:'
|
||||||
|
echo -e "$failures"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo 'ALL PASSED'
|
||||||
|
|||||||
@@ -27,12 +27,11 @@
|
|||||||
:ansible_extras: {}
|
:ansible_extras: {}
|
||||||
:cachier: false
|
:cachier: false
|
||||||
:vms:
|
:vms:
|
||||||
- :name: etcd
|
- :name: mgmt0
|
||||||
:shell:
|
:shell:
|
||||||
- iptables -F
|
- iptables -F
|
||||||
- cd /vagrant/mgmt/ && make path
|
- cd /vagrant/mgmt/ && make path
|
||||||
- cd /vagrant/mgmt/ && make deps && make build && cp mgmt ~/bin/
|
- cd /vagrant/mgmt/ && make deps && make build && cp mgmt ~/bin/
|
||||||
- etcd -bind-addr "`hostname --ip-address`:2379" &
|
|
||||||
- cd && mgmt --help
|
- cd && mgmt --help
|
||||||
:namespace: omv
|
:namespace: omv
|
||||||
:count: 0
|
:count: 0
|
||||||
@@ -45,8 +44,7 @@
|
|||||||
:unsafe: false
|
:unsafe: false
|
||||||
:nested: false
|
:nested: false
|
||||||
:tests:
|
:tests:
|
||||||
- omv up etcd
|
- omv up mgmt0
|
||||||
- vssh root@etcd -c pidof etcd
|
|
||||||
- omv destroy
|
- omv destroy
|
||||||
:comment: simple hello world test case for mgmt
|
:comment: simple hello world test case for mgmt
|
||||||
:reallyrm: false
|
:reallyrm: false
|
||||||
|
|||||||
@@ -32,7 +32,6 @@
|
|||||||
- iptables -F
|
- iptables -F
|
||||||
- cd /vagrant/mgmt/ && make path
|
- cd /vagrant/mgmt/ && make path
|
||||||
- cd /vagrant/mgmt/ && make deps && make build && cp mgmt ~/bin/
|
- cd /vagrant/mgmt/ && make deps && make build && cp mgmt ~/bin/
|
||||||
- etcd -bind-addr "`hostname --ip-address`:2379" &
|
|
||||||
- cd && mgmt run --file /vagrant/mgmt/examples/pkg1.yaml --converged-timeout=5
|
- cd && mgmt run --file /vagrant/mgmt/examples/pkg1.yaml --converged-timeout=5
|
||||||
:namespace: omv
|
:namespace: omv
|
||||||
:count: 0
|
:count: 0
|
||||||
|
|||||||
@@ -33,7 +33,6 @@
|
|||||||
- iptables -F
|
- iptables -F
|
||||||
- cd /vagrant/mgmt/ && make path
|
- cd /vagrant/mgmt/ && make path
|
||||||
- cd /vagrant/mgmt/ && make deps && make build && cp mgmt ~/bin/
|
- cd /vagrant/mgmt/ && make deps && make build && cp mgmt ~/bin/
|
||||||
- etcd -bind-addr "`hostname --ip-address`:2379" &
|
|
||||||
- cd && mgmt run --file /vagrant/mgmt/examples/pkg1.yaml --converged-timeout=5
|
- cd && mgmt run --file /vagrant/mgmt/examples/pkg1.yaml --converged-timeout=5
|
||||||
:namespace: omv
|
:namespace: omv
|
||||||
:count: 0
|
:count: 0
|
||||||
|
|||||||
@@ -1,23 +0,0 @@
|
|||||||
# NOTE: boiler plate to run etcd; source with: . etcd.sh; should NOT be +x
|
|
||||||
cleanup ()
|
|
||||||
{
|
|
||||||
echo "cleanup: $1"
|
|
||||||
killall etcd || killall -9 etcd || true # kill etcd
|
|
||||||
rm -rf /tmp/etcd/
|
|
||||||
}
|
|
||||||
|
|
||||||
trap_with_arg() {
|
|
||||||
func="$1"
|
|
||||||
shift
|
|
||||||
for sig in "$@"
|
|
||||||
do
|
|
||||||
trap "$func $sig" "$sig"
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
trap_with_arg cleanup INT QUIT TERM EXIT # ERR
|
|
||||||
mkdir -p /tmp/etcd/
|
|
||||||
cd /tmp/etcd/ >/dev/null # shush the cd operation
|
|
||||||
etcd & # start etcd as job # 1
|
|
||||||
sleep 1s # let etcd startup
|
|
||||||
cd - >/dev/null
|
|
||||||
@@ -5,10 +5,11 @@
|
|||||||
# * it is recommended that you run mgmt with --no-watch
|
# * it is recommended that you run mgmt with --no-watch
|
||||||
# * it is recommended that you run mgmt --converged-timeout=<seconds>
|
# * it is recommended that you run mgmt --converged-timeout=<seconds>
|
||||||
# * you can run mgmt with --max-runtime=<seconds> in special scenarios
|
# * you can run mgmt with --max-runtime=<seconds> in special scenarios
|
||||||
# * you can get a separate etcd going by sourcing etcd.sh: . etcd.sh
|
|
||||||
|
|
||||||
set -o errexit
|
set -o errexit
|
||||||
set -o nounset
|
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
|
|
||||||
timeout --kill-after=3s 1s ./mgmt --help # hello world!
|
timeout --kill-after=3s 1s ./mgmt --help # hello world!
|
||||||
|
pid=$!
|
||||||
|
wait $pid # get exit status
|
||||||
|
exit $?
|
||||||
|
|||||||
@@ -6,14 +6,15 @@ if env | grep -q -e '^TRAVIS=true$'; then
|
|||||||
exit
|
exit
|
||||||
fi
|
fi
|
||||||
|
|
||||||
. etcd.sh # start etcd as job # 1
|
|
||||||
|
|
||||||
# run till completion
|
# run till completion
|
||||||
timeout --kill-after=15s 10s ./mgmt run --file t2.yaml --converged-timeout=5 --no-watch &
|
timeout --kill-after=15s 10s ./mgmt run --file t2.yaml --converged-timeout=5 --no-watch --tmp-prefix &
|
||||||
|
pid=$!
|
||||||
. wait.sh # wait for everything except etcd
|
wait $pid # get exit status
|
||||||
|
e=$?
|
||||||
|
|
||||||
test -e /tmp/mgmt/f1
|
test -e /tmp/mgmt/f1
|
||||||
test -e /tmp/mgmt/f2
|
test -e /tmp/mgmt/f2
|
||||||
test -e /tmp/mgmt/f3
|
test -e /tmp/mgmt/f3
|
||||||
test ! -e /tmp/mgmt/f4
|
test ! -e /tmp/mgmt/f4
|
||||||
|
|
||||||
|
exit $e
|
||||||
|
|||||||
@@ -27,15 +27,15 @@ resources:
|
|||||||
edges:
|
edges:
|
||||||
- name: e1
|
- name: e1
|
||||||
from:
|
from:
|
||||||
res: file
|
kind: file
|
||||||
name: file1
|
name: file1
|
||||||
to:
|
to:
|
||||||
res: file
|
kind: file
|
||||||
name: file2
|
name: file2
|
||||||
- name: e2
|
- name: e2
|
||||||
from:
|
from:
|
||||||
res: file
|
kind: file
|
||||||
name: file2
|
name: file2
|
||||||
to:
|
to:
|
||||||
res: file
|
kind: file
|
||||||
name: file3
|
name: file3
|
||||||
|
|||||||
@@ -23,6 +23,6 @@ resources:
|
|||||||
i am f4, exported from host A
|
i am f4, exported from host A
|
||||||
state: exists
|
state: exists
|
||||||
collect:
|
collect:
|
||||||
- res: file
|
- kind: file
|
||||||
pattern: "/tmp/mgmt/mgmtA/"
|
pattern: "/tmp/mgmt/mgmtA/"
|
||||||
edges: []
|
edges: []
|
||||||
|
|||||||
@@ -23,6 +23,6 @@ resources:
|
|||||||
i am f4, exported from host B
|
i am f4, exported from host B
|
||||||
state: exists
|
state: exists
|
||||||
collect:
|
collect:
|
||||||
- res: file
|
- kind: file
|
||||||
pattern: "/tmp/mgmt/mgmtB/"
|
pattern: "/tmp/mgmt/mgmtB/"
|
||||||
edges: []
|
edges: []
|
||||||
|
|||||||
@@ -23,6 +23,6 @@ resources:
|
|||||||
i am f4, exported from host C
|
i am f4, exported from host C
|
||||||
state: exists
|
state: exists
|
||||||
collect:
|
collect:
|
||||||
- res: file
|
- kind: file
|
||||||
pattern: "/tmp/mgmt/mgmtC/"
|
pattern: "/tmp/mgmt/mgmtC/"
|
||||||
edges: []
|
edges: []
|
||||||
|
|||||||
@@ -6,17 +6,23 @@ if env | grep -q -e '^TRAVIS=true$'; then
|
|||||||
exit
|
exit
|
||||||
fi
|
fi
|
||||||
|
|
||||||
. etcd.sh # start etcd as job # 1
|
|
||||||
|
|
||||||
# setup
|
# setup
|
||||||
mkdir -p "${MGMT_TMPDIR}"mgmt{A..C}
|
mkdir -p "${MGMT_TMPDIR}"mgmt{A..C}
|
||||||
|
|
||||||
# run till completion
|
# run till completion
|
||||||
timeout --kill-after=15s 10s ./mgmt run --file t3-a.yaml --converged-timeout=5 --no-watch &
|
timeout --kill-after=15s 10s ./mgmt run --file t3-a.yaml --converged-timeout=5 --no-watch --tmp-prefix &
|
||||||
timeout --kill-after=15s 10s ./mgmt run --file t3-b.yaml --converged-timeout=5 --no-watch &
|
pid1=$!
|
||||||
timeout --kill-after=15s 10s ./mgmt run --file t3-c.yaml --converged-timeout=5 --no-watch &
|
timeout --kill-after=15s 10s ./mgmt run --file t3-b.yaml --converged-timeout=5 --no-watch --tmp-prefix &
|
||||||
|
pid2=$!
|
||||||
|
timeout --kill-after=15s 10s ./mgmt run --file t3-c.yaml --converged-timeout=5 --no-watch --tmp-prefix &
|
||||||
|
pid3=$!
|
||||||
|
|
||||||
. wait.sh # wait for everything except etcd
|
wait $pid1 # get exit status
|
||||||
|
e1=$?
|
||||||
|
wait $pid2 # get exit status
|
||||||
|
e2=$?
|
||||||
|
wait $pid3 # get exit status
|
||||||
|
e3=$?
|
||||||
|
|
||||||
# A: collected
|
# A: collected
|
||||||
test -e "${MGMT_TMPDIR}"mgmtA/f3b
|
test -e "${MGMT_TMPDIR}"mgmtA/f3b
|
||||||
@@ -71,3 +77,5 @@ test ! -e "${MGMT_TMPDIR}"mgmtC/f1a
|
|||||||
test ! -e "${MGMT_TMPDIR}"mgmtC/f2a
|
test ! -e "${MGMT_TMPDIR}"mgmtC/f2a
|
||||||
test ! -e "${MGMT_TMPDIR}"mgmtC/f1b
|
test ! -e "${MGMT_TMPDIR}"mgmtC/f1b
|
||||||
test ! -e "${MGMT_TMPDIR}"mgmtC/f2b
|
test ! -e "${MGMT_TMPDIR}"mgmtC/f2b
|
||||||
|
|
||||||
|
exit $(($e1+$e2+$e3))
|
||||||
|
|||||||
@@ -1,10 +1,7 @@
|
|||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
|
|
||||||
. etcd.sh # start etcd as job # 1
|
|
||||||
|
|
||||||
# should take slightly more than 25s, but fail if we take 35s)
|
# should take slightly more than 25s, but fail if we take 35s)
|
||||||
timeout --kill-after=35s 30s ./mgmt run --file t4.yaml --converged-timeout=5 --no-watch &
|
timeout --kill-after=35s 30s ./mgmt run --file t4.yaml --converged-timeout=5 --no-watch --tmp-prefix &
|
||||||
|
pid=$!
|
||||||
#jobs # etcd is 1
|
wait $pid # get exit status
|
||||||
#wait -n 2 # wait for mgmt to exit
|
exit $?
|
||||||
. wait.sh # wait for everything except etcd
|
|
||||||
|
|||||||
@@ -56,22 +56,22 @@ resources:
|
|||||||
edges:
|
edges:
|
||||||
- name: e1
|
- name: e1
|
||||||
from:
|
from:
|
||||||
res: exec
|
kind: exec
|
||||||
name: exec1
|
name: exec1
|
||||||
to:
|
to:
|
||||||
res: exec
|
kind: exec
|
||||||
name: exec5
|
name: exec5
|
||||||
- name: e2
|
- name: e2
|
||||||
from:
|
from:
|
||||||
res: exec
|
kind: exec
|
||||||
name: exec2
|
name: exec2
|
||||||
to:
|
to:
|
||||||
res: exec
|
kind: exec
|
||||||
name: exec5
|
name: exec5
|
||||||
- name: e3
|
- name: e3
|
||||||
from:
|
from:
|
||||||
res: exec
|
kind: exec
|
||||||
name: exec3
|
name: exec3
|
||||||
to:
|
to:
|
||||||
res: exec
|
kind: exec
|
||||||
name: exec5
|
name: exec5
|
||||||
|
|||||||
@@ -1,10 +1,7 @@
|
|||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
|
|
||||||
. etcd.sh # start etcd as job # 1
|
|
||||||
|
|
||||||
# should take slightly more than 35s, but fail if we take 45s)
|
# should take slightly more than 35s, but fail if we take 45s)
|
||||||
timeout --kill-after=45s 40s ./mgmt run --file t5.yaml --converged-timeout=5 --no-watch &
|
timeout --kill-after=45s 40s ./mgmt run --file t5.yaml --converged-timeout=5 --no-watch --tmp-prefix &
|
||||||
|
pid=$!
|
||||||
#jobs # etcd is 1
|
wait $pid # get exit status
|
||||||
#wait -n 2 # wait for mgmt to exit
|
exit $?
|
||||||
. wait.sh # wait for everything except etcd
|
|
||||||
|
|||||||
@@ -86,43 +86,43 @@ resources:
|
|||||||
edges:
|
edges:
|
||||||
- name: e1
|
- name: e1
|
||||||
from:
|
from:
|
||||||
res: exec
|
kind: exec
|
||||||
name: exec1
|
name: exec1
|
||||||
to:
|
to:
|
||||||
res: exec
|
kind: exec
|
||||||
name: exec4
|
name: exec4
|
||||||
- name: e2
|
- name: e2
|
||||||
from:
|
from:
|
||||||
res: exec
|
kind: exec
|
||||||
name: exec2
|
name: exec2
|
||||||
to:
|
to:
|
||||||
res: exec
|
kind: exec
|
||||||
name: exec4
|
name: exec4
|
||||||
- name: e3
|
- name: e3
|
||||||
from:
|
from:
|
||||||
res: exec
|
kind: exec
|
||||||
name: exec3
|
name: exec3
|
||||||
to:
|
to:
|
||||||
res: exec
|
kind: exec
|
||||||
name: exec4
|
name: exec4
|
||||||
- name: e4
|
- name: e4
|
||||||
from:
|
from:
|
||||||
res: exec
|
kind: exec
|
||||||
name: exec4
|
name: exec4
|
||||||
to:
|
to:
|
||||||
res: exec
|
kind: exec
|
||||||
name: exec5
|
name: exec5
|
||||||
- name: e5
|
- name: e5
|
||||||
from:
|
from:
|
||||||
res: exec
|
kind: exec
|
||||||
name: exec4
|
name: exec4
|
||||||
to:
|
to:
|
||||||
res: exec
|
kind: exec
|
||||||
name: exec6
|
name: exec6
|
||||||
- name: e6
|
- name: e6
|
||||||
from:
|
from:
|
||||||
res: exec
|
kind: exec
|
||||||
name: exec4
|
name: exec4
|
||||||
to:
|
to:
|
||||||
res: exec
|
kind: exec
|
||||||
name: exec7
|
name: exec7
|
||||||
|
|||||||
@@ -6,10 +6,9 @@ if env | grep -q -e '^TRAVIS=true$'; then
|
|||||||
exit
|
exit
|
||||||
fi
|
fi
|
||||||
|
|
||||||
. etcd.sh # start etcd as job # 1
|
|
||||||
|
|
||||||
# run till completion
|
# run till completion
|
||||||
timeout --kill-after=20s 15s ./mgmt run --file t6.yaml --no-watch &
|
timeout --kill-after=20s 15s ./mgmt run --file t6.yaml --no-watch --tmp-prefix &
|
||||||
|
pid=$!
|
||||||
sleep 1s # let it converge
|
sleep 1s # let it converge
|
||||||
test -e /tmp/mgmt/f1
|
test -e /tmp/mgmt/f1
|
||||||
test -e /tmp/mgmt/f2
|
test -e /tmp/mgmt/f2
|
||||||
@@ -30,4 +29,5 @@ test -e /tmp/mgmt/f2
|
|||||||
|
|
||||||
killall -SIGINT mgmt # send ^C to exit mgmt
|
killall -SIGINT mgmt # send ^C to exit mgmt
|
||||||
|
|
||||||
. wait.sh # wait for everything except etcd
|
wait $pid # get exit status
|
||||||
|
exit $?
|
||||||
|
|||||||
@@ -27,15 +27,15 @@ resources:
|
|||||||
edges:
|
edges:
|
||||||
- name: e1
|
- name: e1
|
||||||
from:
|
from:
|
||||||
res: file
|
kind: file
|
||||||
name: file1
|
name: file1
|
||||||
to:
|
to:
|
||||||
res: file
|
kind: file
|
||||||
name: file2
|
name: file2
|
||||||
- name: e2
|
- name: e2
|
||||||
from:
|
from:
|
||||||
res: file
|
kind: file
|
||||||
name: file2
|
name: file2
|
||||||
to:
|
to:
|
||||||
res: file
|
kind: file
|
||||||
name: file3
|
name: file3
|
||||||
|
|||||||
9
test/shell/t7.sh
Executable file
9
test/shell/t7.sh
Executable file
@@ -0,0 +1,9 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
# run empty graph
|
||||||
|
timeout --kill-after=20s 15s ./mgmt run --tmp-prefix &
|
||||||
|
pid=$!
|
||||||
|
sleep 5s # let it converge
|
||||||
|
$(sleep 3s && killall -SIGINT mgmt)& # send ^C to exit mgmt
|
||||||
|
wait $pid # get exit status
|
||||||
|
exit $?
|
||||||
22
test/shell/t8.sh
Executable file
22
test/shell/t8.sh
Executable file
@@ -0,0 +1,22 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
# run empty graphs, we're just testing etcd clustering
|
||||||
|
timeout --kill-after=180s 120s ./mgmt run --hostname h1 --tmp-prefix &
|
||||||
|
pid1=$!
|
||||||
|
sleep 15s # let it startup
|
||||||
|
|
||||||
|
timeout --kill-after=180s 120s ./mgmt run --hostname h2 --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2381 --server-urls http://127.0.0.1:2382 --tmp-prefix &
|
||||||
|
pid2=$!
|
||||||
|
sleep 15s
|
||||||
|
|
||||||
|
$(sleep 15s && kill -SIGINT $pid2)& # send ^C to exit 2nd mgmt
|
||||||
|
wait $pid2
|
||||||
|
e=$?
|
||||||
|
if [ $e -ne 0 ]; then
|
||||||
|
exit $e
|
||||||
|
fi
|
||||||
|
|
||||||
|
$(sleep 15s && kill -SIGINT $pid1)& # send ^C to exit 1st mgmt
|
||||||
|
wait $pid1 # get exit status
|
||||||
|
# if pid1 exits because of a timeout, then it blocked, and this is a bug!
|
||||||
|
exit $?
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
# NOTE: boiler plate to wait on mgmt; source with: . wait.sh; should NOT be +x
|
|
||||||
while test "`jobs -p`" != "" && test "`jobs -p`" != "`pidof etcd`"
|
|
||||||
do
|
|
||||||
for j in `jobs -p`
|
|
||||||
do
|
|
||||||
[ "$j" = "`pidof etcd`" ] && continue # don't wait for etcd
|
|
||||||
wait $j || continue # wait for mgmt job $j
|
|
||||||
done
|
|
||||||
done
|
|
||||||
@@ -5,4 +5,5 @@ ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )" # dir!
|
|||||||
cd "${ROOT}"
|
cd "${ROOT}"
|
||||||
|
|
||||||
go vet && echo PASS || exit 1 # since it doesn't output an ok message on pass
|
go vet && echo PASS || exit 1 # since it doesn't output an ok message on pass
|
||||||
grep 'log.' *.go | grep '\\n' && exit 1 || echo PASS # no \n needed in log.Printf()
|
grep 'log.' *.go | grep '\\n"' && echo 'no \n needed in log.Printf()' && exit 1 || echo PASS # no \n needed in log.Printf()
|
||||||
|
grep 'case _ = <-' *.go && echo 'case _ = <- can be simplified to: case <-' && exit 1 || echo PASS # this can be simplified
|
||||||
|
|||||||
@@ -1,6 +1,12 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# simple test harness for testing mgmt
|
# simple test harness for testing mgmt
|
||||||
# NOTE: this will rm -rf /tmp/mgmt/
|
# NOTE: this will rm -rf /tmp/mgmt/
|
||||||
|
if [ "$1" == "--help" ] || [ "$1" == "-h" ]; then
|
||||||
|
echo -e "usage: ./"`basename $0`" [[--help] | <test>]"
|
||||||
|
echo -e "where: <test> is empty to run all tests, or <file>.sh from shell/ dir"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
echo running test-shell.sh
|
echo running test-shell.sh
|
||||||
set -o errexit
|
set -o errexit
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
|
|||||||
151
timer.go
Normal file
151
timer.go
Normal file
@@ -0,0 +1,151 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/gob"
|
||||||
|
"log"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
gob.Register(&TimerRes{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimerRes is a timer resource for time based events.
|
||||||
|
type TimerRes struct {
|
||||||
|
BaseRes `yaml:",inline"`
|
||||||
|
Interval int `yaml:"interval"` // Interval : Interval between runs
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimerUUID is the UUID struct for TimerRes.
|
||||||
|
type TimerUUID struct {
|
||||||
|
BaseUUID
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTimerRes is a constructor for this resource. It also calls Init() for you.
|
||||||
|
func NewTimerRes(name string, interval int) *TimerRes {
|
||||||
|
obj := &TimerRes{
|
||||||
|
BaseRes: BaseRes{
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
Interval: interval,
|
||||||
|
}
|
||||||
|
obj.Init()
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init runs some startup code for this resource.
|
||||||
|
func (obj *TimerRes) Init() {
|
||||||
|
obj.BaseRes.kind = "Timer"
|
||||||
|
obj.BaseRes.Init() // call base init, b/c we're overrriding
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate the params that are passed to TimerRes
|
||||||
|
// Currently we are getting only an interval in seconds
|
||||||
|
// which gets validated by go compiler
|
||||||
|
func (obj *TimerRes) Validate() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
|
func (obj *TimerRes) Watch(processChan chan Event) {
|
||||||
|
if obj.IsWatching() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a time.Ticker for the given interval
|
||||||
|
ticker := time.NewTicker(time.Duration(obj.Interval) * time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
obj.SetWatching(true)
|
||||||
|
defer obj.SetWatching(false)
|
||||||
|
cuuid := obj.converger.Register()
|
||||||
|
defer cuuid.Unregister()
|
||||||
|
|
||||||
|
var send = false
|
||||||
|
|
||||||
|
for {
|
||||||
|
obj.SetState(resStateWatching)
|
||||||
|
select {
|
||||||
|
case <-ticker.C: // received the timer event
|
||||||
|
send = true
|
||||||
|
log.Printf("%v[%v]: received tick", obj.Kind(), obj.GetName())
|
||||||
|
case event := <-obj.events:
|
||||||
|
cuuid.SetConverged(false)
|
||||||
|
if exit, _ := obj.ReadEvent(&event); exit {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case <-cuuid.ConvergedTimer():
|
||||||
|
cuuid.SetConverged(true)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if send {
|
||||||
|
send = false
|
||||||
|
obj.isStateOK = false
|
||||||
|
resp := NewResp()
|
||||||
|
processChan <- Event{eventNil, resp, "timer ticked", true}
|
||||||
|
resp.ACKWait()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUUIDs includes all params to make a unique identification of this object.
|
||||||
|
// Most resources only return one, although some resources can return multiple.
|
||||||
|
func (obj *TimerRes) GetUUIDs() []ResUUID {
|
||||||
|
x := &TimerUUID{
|
||||||
|
BaseUUID: BaseUUID{
|
||||||
|
name: obj.GetName(),
|
||||||
|
kind: obj.Kind(),
|
||||||
|
},
|
||||||
|
name: obj.Name,
|
||||||
|
}
|
||||||
|
return []ResUUID{x}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The AutoEdges method returns the AutoEdges. In this case none are used.
|
||||||
|
func (obj *TimerRes) AutoEdges() AutoEdge {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare two resources and return if they are equivalent.
|
||||||
|
func (obj *TimerRes) Compare(res Res) bool {
|
||||||
|
switch res.(type) {
|
||||||
|
case *TimerRes:
|
||||||
|
res := res.(*TimerRes)
|
||||||
|
if !obj.BaseRes.Compare(res) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.Name != res.Name {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.Interval != res.Interval {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckApply method for Timer resource. Does nothing, returns happy!
|
||||||
|
func (obj *TimerRes) CheckApply(apply bool) (bool, error) {
|
||||||
|
log.Printf("%v[%v]: CheckApply(%t)", obj.Kind(), obj.GetName(), apply)
|
||||||
|
return true, nil // state is always okay
|
||||||
|
}
|
||||||
1
vendor/github.com/coreos/etcd
generated
vendored
Submodule
1
vendor/github.com/coreos/etcd
generated
vendored
Submodule
Submodule vendor/github.com/coreos/etcd added at 9e9bbb829e
Reference in New Issue
Block a user