Compare commits
332 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
28a6430778 | ||
|
|
6e4157da35 | ||
|
|
4f420dde05 | ||
|
|
d9601471df | ||
|
|
9941a97e37 | ||
|
|
0a64b08669 | ||
|
|
4d9d0d4548 | ||
|
|
5f6c8545c6 | ||
|
|
ddc335d65a | ||
|
|
9cbaa892d3 | ||
|
|
9531465410 | ||
|
|
c35916fad1 | ||
|
|
bf476a058e | ||
|
|
d4e815a4cb | ||
|
|
0545c4167b | ||
|
|
6838dd02c0 | ||
|
|
14c2fd1edd | ||
|
|
6e503cc79b | ||
|
|
bd4563b699 | ||
|
|
458e115490 | ||
|
|
51369adad1 | ||
|
|
f65c5fb147 | ||
|
|
4150ae7307 | ||
|
|
a87288d519 | ||
|
|
3cf9639e99 | ||
|
|
4490c3ed1a | ||
|
|
fbcb562781 | ||
|
|
b1e035f96a | ||
|
|
11c3a26c23 | ||
|
|
1fbe72b52d | ||
|
|
f4bb066737 | ||
|
|
aaac9cbeeb | ||
|
|
0e68ff6923 | ||
|
|
1c59712cbf | ||
|
|
c2cb1c9168 | ||
|
|
cc8e2e40dd | ||
|
|
e67d97d9da | ||
|
|
d74c2115fd | ||
|
|
70e7ee2d46 | ||
|
|
d11854f4e8 | ||
|
|
4bb553e015 | ||
|
|
0af9af44e5 | ||
|
|
3a0d73f740 | ||
|
|
9b9ff2622d | ||
|
|
a4858be967 | ||
|
|
6fd5623b1f | ||
|
|
66d9c7091c | ||
|
|
525a1e8140 | ||
|
|
64dc47d7e9 | ||
|
|
f3fc7bb91e | ||
|
|
028ef14cc0 | ||
|
|
3e001f9a1c | ||
|
|
33d20ac6d8 | ||
|
|
660554cc45 | ||
|
|
a455324e8c | ||
|
|
cd5e2e1148 | ||
|
|
074da4da19 | ||
|
|
e4e39d820c | ||
|
|
e5dbb214a2 | ||
|
|
91af528ff8 | ||
|
|
18c4e39ea3 | ||
|
|
bda455ce78 | ||
|
|
a07aea1ad3 | ||
|
|
18e2dbf144 | ||
|
|
564a07e62e | ||
|
|
a358135e41 | ||
|
|
6d9be15035 | ||
|
|
b740e0b78a | ||
|
|
9546949945 | ||
|
|
8ff048d055 | ||
|
|
95a1c6e7fb | ||
|
|
0b1a4a0f30 | ||
|
|
22b48e296a | ||
|
|
c696ebf53c | ||
|
|
a0686b7d2b | ||
|
|
8d94be8924 | ||
|
|
e97ac5033f | ||
|
|
44771a0049 | ||
|
|
32aae8f57a | ||
|
|
8207e23cd9 | ||
|
|
a469029698 | ||
|
|
203d866643 | ||
|
|
1488e5ec4d | ||
|
|
af66138a17 | ||
|
|
5f060d60a7 | ||
|
|
73ccbb69ea | ||
|
|
be60440b20 | ||
|
|
837efb78e6 | ||
|
|
4a62a290d8 | ||
|
|
018399cb1f | ||
|
|
646a576358 | ||
|
|
d8e19cd79a | ||
|
|
757cb0cf23 | ||
|
|
7d92ab335a | ||
|
|
46c6d6f656 | ||
|
|
46260749c1 | ||
|
|
50664fe115 | ||
|
|
c480bd94db | ||
|
|
79923a939b | ||
|
|
327b22113a | ||
|
|
12160ab539 | ||
|
|
2462ea0892 | ||
|
|
8be09eadd4 | ||
|
|
98bc96c911 | ||
|
|
b0fce6a80d | ||
|
|
53b8a21d1e | ||
|
|
1346492d72 | ||
|
|
e5bb8d7992 | ||
|
|
49594b8435 | ||
|
|
3bd37a7906 | ||
|
|
e070a85ae0 | ||
|
|
c189278e24 | ||
|
|
2a8606bd98 | ||
|
|
18ea05c837 | ||
|
|
86c3072515 | ||
|
|
fccf508dde | ||
|
|
2da21f90f4 | ||
|
|
bec7f1726f | ||
|
|
74dfb9d88d | ||
|
|
02dddfc227 | ||
|
|
545016b38f | ||
|
|
0ccceaf226 | ||
|
|
a601115650 | ||
|
|
ae6267c906 | ||
|
|
ac142694f5 | ||
|
|
69b0913315 | ||
|
|
421bacd7dc | ||
|
|
573a76eedb | ||
|
|
b7948c7f40 | ||
|
|
2647d09b8f | ||
|
|
57e919d7e5 | ||
|
|
f456aa1407 | ||
|
|
d0d62892c8 | ||
|
|
a981cfa053 | ||
|
|
55290dd1e3 | ||
|
|
9c4e255994 | ||
|
|
f9c7d5f7bc | ||
|
|
49baea5f6a | ||
|
|
6209cf3933 | ||
|
|
d170a523c3 | ||
|
|
be5040e7a8 | ||
|
|
ecbaa5bfc1 | ||
|
|
25e2af7c89 | ||
|
|
605688426d | ||
|
|
0e069f1e75 | ||
|
|
e9adbf18d3 | ||
|
|
610202097a | ||
|
|
8c2c552164 | ||
|
|
b9976cf693 | ||
|
|
3261c405bd | ||
|
|
35d3328e3e | ||
|
|
e96041d76f | ||
|
|
c2034bc0c0 | ||
|
|
e8855f7621 | ||
|
|
bdb8368e89 | ||
|
|
f160db2032 | ||
|
|
de9a32a273 | ||
|
|
6ba7422c3b | ||
|
|
5cbb0ceb80 | ||
|
|
5b29358b37 | ||
|
|
90147f3dfb | ||
|
|
72873abe05 | ||
|
|
de1810ba68 | ||
|
|
7b7c765d78 | ||
|
|
806d4660cf | ||
|
|
5ae5d364bb | ||
|
|
1af67e72d4 | ||
|
|
ed268ad683 | ||
|
|
5bdd2ca02f | ||
|
|
eb59861d4d | ||
|
|
427e46a2aa | ||
|
|
68a8649292 | ||
|
|
2aff8709a5 | ||
|
|
62c3add888 | ||
|
|
3ac878db62 | ||
|
|
c247cd8fea | ||
|
|
b6772b7280 | ||
|
|
807a3df9d1 | ||
|
|
491d60e267 | ||
|
|
4811eafd67 | ||
|
|
8dedbb9620 | ||
|
|
dd8454161f | ||
|
|
9421f2cddd | ||
|
|
d8c4f78ec1 | ||
|
|
54296da647 | ||
|
|
357102fdb5 | ||
|
|
7e15a9e181 | ||
|
|
12e0b2d6f7 | ||
|
|
11b40bf32f | ||
|
|
8d2b53373f | ||
|
|
9ecc49e592 | ||
|
|
4f34f7083b | ||
|
|
2a6df875ec | ||
|
|
51c83116a2 | ||
|
|
74435aac76 | ||
|
|
5dfdb5b5f9 | ||
|
|
ac892a3f3d | ||
|
|
1a2e99f559 | ||
|
|
e97bba0524 | ||
|
|
0538f0c524 | ||
|
|
fc3e35868d | ||
|
|
f1e0cfea1c | ||
|
|
56efef69ba | ||
|
|
668ec8a248 | ||
|
|
60912bd01c | ||
|
|
0b416e44f8 | ||
|
|
ecc4aa09d3 | ||
|
|
b921aabbed | ||
|
|
6ad8ac0b6b | ||
|
|
44e7e0e970 | ||
|
|
45820b4ce3 | ||
|
|
3a098377cb | ||
|
|
35875485ee | ||
|
|
19760be0bc | ||
|
|
b3ea33f88d | ||
|
|
5b3425a689 | ||
|
|
a3d157bde6 | ||
|
|
2c8c9264a4 | ||
|
|
0009d9b20e | ||
|
|
dd8d17232f | ||
|
|
6312b9225f | ||
|
|
68cc09fef2 | ||
|
|
0651c9de65 | ||
|
|
38261ec809 | ||
|
|
067932aebf | ||
|
|
af47511d58 | ||
|
|
36b916f27f | ||
|
|
e519811893 | ||
|
|
4803be1987 | ||
|
|
1f415db44f | ||
|
|
0e316b1d55 | ||
|
|
eb545e75fb | ||
|
|
6edb5c30d5 | ||
|
|
597ed6eaa0 | ||
|
|
2b47d7494e | ||
|
|
213a88f62f | ||
|
|
07fd2e88a2 | ||
|
|
639afe881c | ||
|
|
2e718c0e9d | ||
|
|
b0a8fc165c | ||
|
|
ba6044e9e8 | ||
|
|
7f1c13a576 | ||
|
|
63c5e35e2b | ||
|
|
62e6a7d7fa | ||
|
|
e5a3dae332 | ||
|
|
b45a7663b3 | ||
|
|
6ef904f62b | ||
|
|
6d21cf3084 | ||
|
|
32bd96b6e2 | ||
|
|
fb5da76247 | ||
|
|
e588f51824 | ||
|
|
3e419c4955 | ||
|
|
606d2bafac | ||
|
|
8ac3c49286 | ||
|
|
534aa84ed0 | ||
|
|
04d17cb580 | ||
|
|
d039006eb4 | ||
|
|
fb04f62115 | ||
|
|
3bffccc48e | ||
|
|
eef9abf0bf | ||
|
|
de5ada30b7 | ||
|
|
12f7d0a516 | ||
|
|
0aa9c7c592 | ||
|
|
2216c8dc1c | ||
|
|
984270ebe1 | ||
|
|
2e2658ab6f | ||
|
|
1370f2a76b | ||
|
|
75dedf391a | ||
|
|
7b5c640d05 | ||
|
|
aa9a21b4d0 | ||
|
|
71de8014d5 | ||
|
|
80476d19f9 | ||
|
|
15103d18ef | ||
|
|
0dbd2004ad | ||
|
|
8c92566889 | ||
|
|
fb9449038b | ||
|
|
e06c4a873d | ||
|
|
c4c28c6c82 | ||
|
|
42ff9b803a | ||
|
|
3831e9739c | ||
|
|
f196e5cca2 | ||
|
|
d3af9105ee | ||
|
|
6d685ae4d6 | ||
|
|
8381d8246a | ||
|
|
b26322fc20 | ||
|
|
1c1e8127d8 | ||
|
|
1b3b4406ff | ||
|
|
cf0b77518a | ||
|
|
afdbf44e23 | ||
|
|
ec87781956 | ||
|
|
a6ae958be7 | ||
|
|
312103ef1b | ||
|
|
c2911bb2b7 | ||
|
|
8ca5e38121 | ||
|
|
4b8ad3a8a7 | ||
|
|
f219c2649d | ||
|
|
cfde54261b | ||
|
|
71a82b0a34 | ||
|
|
b7bd2d2664 | ||
|
|
cd26a0770d | ||
|
|
46893e84c3 | ||
|
|
567dcaf79d | ||
|
|
9368c7e05f | ||
|
|
654b3e9dbe | ||
|
|
f09db490f0 | ||
|
|
30d93cfde7 | ||
|
|
41b3db7d6b | ||
|
|
2a60debceb | ||
|
|
eb30642b6f | ||
|
|
ea85e2af6b | ||
|
|
ef979a0839 | ||
|
|
e0107b1dda | ||
|
|
ccc00f913d | ||
|
|
ad3c6bdc88 | ||
|
|
8fe3891ea9 | ||
|
|
63f21952f4 | ||
|
|
361d643ce7 | ||
|
|
abe1ffaab6 | ||
|
|
fc24c91dde | ||
|
|
53cabd5ee4 | ||
|
|
2b1e8cdbee | ||
|
|
9715146495 | ||
|
|
22b0b89949 | ||
|
|
2ebc23a777 | ||
|
|
0199285319 | ||
|
|
277ab2fe44 | ||
|
|
8a96dfdc8a | ||
|
|
66fbbb940a | ||
|
|
716ea1bb3c | ||
|
|
3d701d3daa | ||
|
|
598c74657c | ||
|
|
4bd53d5ab0 |
9
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
## Versions:
|
||||
|
||||
* mgmt version (eg: `mgmt --version`):
|
||||
|
||||
* operating system/distribution (eg: `uname -a`):
|
||||
|
||||
* golang version (eg: `go version`):
|
||||
|
||||
## Description:
|
||||
3
.gitignore
vendored
@@ -1,10 +1,11 @@
|
||||
.idea/
|
||||
.omv/
|
||||
.ssh/
|
||||
.vagrant/
|
||||
mgmt-documentation.pdf
|
||||
old/
|
||||
tmp/
|
||||
*_stringer.go
|
||||
mgmt
|
||||
mgmt.static
|
||||
mgmt.iml
|
||||
rpmbuild/
|
||||
|
||||
15
.gitmodules
vendored
@@ -1,3 +1,18 @@
|
||||
[submodule "vendor/github.com/coreos/etcd"]
|
||||
path = vendor/github.com/coreos/etcd
|
||||
url = https://github.com/coreos/etcd/
|
||||
[submodule "vendor/google.golang.org/grpc"]
|
||||
path = vendor/google.golang.org/grpc
|
||||
url = https://github.com/grpc/grpc-go
|
||||
[submodule "vendor/github.com/grpc-ecosystem/grpc-gateway"]
|
||||
path = vendor/github.com/grpc-ecosystem/grpc-gateway
|
||||
url = https://github.com/grpc-ecosystem/grpc-gateway
|
||||
[submodule "vendor/gopkg.in/fsnotify.v1"]
|
||||
path = vendor/gopkg.in/fsnotify.v1
|
||||
url = https://gopkg.in/fsnotify.v1
|
||||
[submodule "vendor/github.com/purpleidea/go-systemd"]
|
||||
path = vendor/github.com/purpleidea/go-systemd
|
||||
url = https://github.com/purpleidea/go-systemd
|
||||
[submodule "vendor/honnef.co/go/augeas"]
|
||||
path = vendor/honnef.co/go/augeas
|
||||
url = https://github.com/dominikh/go-augeas/
|
||||
|
||||
20
.travis.yml
@@ -1,19 +1,22 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.4.3
|
||||
- 1.5.3
|
||||
- 1.6
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- tip
|
||||
sudo: false
|
||||
before_install: 'git fetch --unshallow'
|
||||
go_import_path: github.com/purpleidea/mgmt
|
||||
sudo: true
|
||||
dist: trusty
|
||||
before_install:
|
||||
- sudo apt update
|
||||
- git fetch --unshallow
|
||||
install: 'make deps'
|
||||
script: 'make test'
|
||||
matrix:
|
||||
fast_finish: true
|
||||
allow_failures:
|
||||
- go: tip
|
||||
- go: 1.4.3
|
||||
- go: 1.6
|
||||
- go: 1.8.x
|
||||
notifications:
|
||||
irc:
|
||||
channels:
|
||||
@@ -26,4 +29,7 @@ notifications:
|
||||
use_notice: false
|
||||
skip_join: false
|
||||
email:
|
||||
recipients:
|
||||
- travis-ci@shubin.ca
|
||||
on_failure: change
|
||||
on_success: change
|
||||
|
||||
2
AUTHORS
@@ -4,5 +4,7 @@ If you appreciate the work of one of the contributors, thank them a beverage!
|
||||
For a more exhaustive list please run: git log --format='%aN' | sort -u
|
||||
This list is sorted alphabetically by first name.
|
||||
|
||||
Felix Frank
|
||||
James Shubin
|
||||
Julien Pivotto
|
||||
Paul Morgan
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
Mgmt
|
||||
Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||
Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||
Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
|
||||
374
DOCUMENTATION.md
@@ -1,374 +0,0 @@
|
||||
#mgmt
|
||||
|
||||
<!--
|
||||
Mgmt
|
||||
Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||
Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
-->
|
||||
|
||||
##mgmt by [James](https://ttboj.wordpress.com/)
|
||||
####Available from:
|
||||
####[https://github.com/purpleidea/mgmt/](https://github.com/purpleidea/mgmt/)
|
||||
|
||||
####This documentation is available in: [Markdown](https://github.com/purpleidea/mgmt/blob/master/DOCUMENTATION.md) or [PDF](https://pdfdoc-purpleidea.rhcloud.com/pdf/https://github.com/purpleidea/mgmt/blob/master/DOCUMENTATION.md) format.
|
||||
|
||||
####Table of Contents
|
||||
|
||||
1. [Overview](#overview)
|
||||
2. [Project description - What the project does](#project-description)
|
||||
3. [Setup - Getting started with mgmt](#setup)
|
||||
4. [Features - All things mgmt can do](#features)
|
||||
* [Autoedges - Automatic resource relationships](#autoedges)
|
||||
* [Autogrouping - Automatic resource grouping](#autogrouping)
|
||||
* [Automatic clustering - Automatic cluster management](#automatic-clustering)
|
||||
* [Remote mode - Remote "agent-less" execution](#remote-agent-less-mode)
|
||||
5. [Usage/FAQ - Notes on usage and frequently asked questions](#usage-and-frequently-asked-questions)
|
||||
6. [Reference - Detailed reference](#reference)
|
||||
* [Graph definition file](#graph-definition-file)
|
||||
* [Command line](#command-line)
|
||||
7. [Examples - Example configurations](#examples)
|
||||
8. [Development - Background on module development and reporting bugs](#development)
|
||||
9. [Authors - Authors and contact information](#authors)
|
||||
|
||||
##Overview
|
||||
|
||||
The `mgmt` tool is a next generation config management prototype. It's not yet
|
||||
ready for production, but we hope to get there soon. Get involved today!
|
||||
|
||||
##Project Description
|
||||
|
||||
The mgmt tool is a distributed, event driven, config management tool, that
|
||||
supports parallel execution, and librarification to be used as the management
|
||||
foundation in and for, new and existing software.
|
||||
|
||||
For more information, you may like to read some blog posts from the author:
|
||||
|
||||
* [Next generation config mgmt](https://ttboj.wordpress.com/2016/01/18/next-generation-configuration-mgmt/)
|
||||
* [Automatic edges in mgmt](https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/)
|
||||
* [Automatic grouping in mgmt](https://ttboj.wordpress.com/2016/03/30/automatic-grouping-in-mgmt/)
|
||||
* [Automatic clustering in mgmt](https://ttboj.wordpress.com/2016/06/20/automatic-clustering-in-mgmt/)
|
||||
|
||||
There is also an [introductory video](http://meetings-archive.debian.net/pub/debian-meetings/2016/debconf16/Next_Generation_Config_Mgmt.webm) available.
|
||||
Older videos and other material [is available](https://github.com/purpleidea/mgmt/#on-the-web).
|
||||
|
||||
##Setup
|
||||
|
||||
During this prototype phase, the tool can be run out of the source directory.
|
||||
You'll probably want to use ```./run.sh run --file examples/graph1.yaml``` to
|
||||
get started. Beware that this _can_ cause data loss. Understand what you're
|
||||
doing first, or perform these actions in a virtual environment such as the one
|
||||
provided by [Oh-My-Vagrant](https://github.com/purpleidea/oh-my-vagrant).
|
||||
|
||||
##Features
|
||||
|
||||
This section details the numerous features of mgmt and some caveats you might
|
||||
need to be aware of.
|
||||
|
||||
###Autoedges
|
||||
|
||||
Automatic edges, or AutoEdges, is the mechanism in mgmt by which it will
|
||||
automatically create dependencies for you between resources. For example,
|
||||
since mgmt can discover which files are installed by a package it will
|
||||
automatically ensure that any file resource you declare that matches a
|
||||
file installed by your package resource will only be processed after the
|
||||
package is installed.
|
||||
|
||||
####Controlling autoedges
|
||||
|
||||
Though autoedges is likely to be very helpful and avoid you having to declare
|
||||
all dependencies explicitly, there are cases where this behaviour is
|
||||
undesirable.
|
||||
|
||||
Some distributions allow package installations to automatically start the
|
||||
service they ship. This can be problematic in the case of packages like MySQL
|
||||
as there are configuration options that need to be set before MySQL is ever
|
||||
started for the first time (or you'll need to wipe the data directory). In
|
||||
order to handle this situation you can disable autoedges per resource and
|
||||
explicitly declare that you want `my.cnf` to be written to disk before the
|
||||
installation of the `mysql-server` package.
|
||||
|
||||
You can disable autoedges for a resource by setting the `autoedge` key on
|
||||
the meta attributes of that resource to `false`.
|
||||
|
||||
####Blog post
|
||||
|
||||
You can read the introductory blog post about this topic here:
|
||||
[https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/](https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/)
|
||||
|
||||
###Autogrouping
|
||||
|
||||
Automatic grouping or AutoGroup is the mechanism in mgmt by which it will
|
||||
automatically group multiple resource vertices into a single one. This is
|
||||
particularly useful for grouping multiple package resources into a single
|
||||
resource, since the multiple installations can happen together in a single
|
||||
transaction, which saves a lot of time because package resources typically have
|
||||
a large fixed cost to running (downloading and verifying the package repo) and
|
||||
if they are grouped they share this fixed cost. This grouping feature can be
|
||||
used for other use cases too.
|
||||
|
||||
You can disable autogrouping for a resource by setting the `autogroup` key on
|
||||
the meta attributes of that resource to `false`.
|
||||
|
||||
####Blog post
|
||||
|
||||
You can read the introductory blog post about this topic here:
|
||||
[https://ttboj.wordpress.com/2016/03/30/automatic-grouping-in-mgmt/](https://ttboj.wordpress.com/2016/03/30/automatic-grouping-in-mgmt/)
|
||||
|
||||
###Automatic clustering
|
||||
|
||||
Automatic clustering is a feature by which mgmt automatically builds, scales,
|
||||
and manages the embedded etcd cluster which is compiled into mgmt itself. It is
|
||||
quite helpful for rapidly bootstrapping clusters and avoiding the extra work to
|
||||
setup etcd.
|
||||
|
||||
If you prefer to avoid this feature. you can always opt to use an existing etcd
|
||||
cluster that is managed separately from mgmt by pointing your mgmt agents at it
|
||||
with the `--seeds` variable.
|
||||
|
||||
####Blog post
|
||||
|
||||
You can read the introductory blog post about this topic here:
|
||||
[https://ttboj.wordpress.com/2016/06/20/automatic-clustering-in-mgmt/](https://ttboj.wordpress.com/2016/06/20/automatic-clustering-in-mgmt/)
|
||||
|
||||
###Remote ("agent-less") mode
|
||||
|
||||
Remote mode is a special mode that lets you kick off mgmt runs on one or more
|
||||
remote machines which are only accessible via SSH. In this mode the initiating
|
||||
host connects over SSH, copies over the `mgmt` binary, opens an SSH tunnel, and
|
||||
runs the remote program while simultaneously passing the etcd traffic back
|
||||
through the tunnel so that the initiators etcd cluster can be used to exchange
|
||||
resource data.
|
||||
|
||||
The interesting benefit of this architecture is that multiple hosts which can't
|
||||
connect directly use the initiator to pass the important traffic through to each
|
||||
other. Once the cluster has converged all the remote programs can shutdown
|
||||
leaving no residual agent.
|
||||
|
||||
This mode can also be useful for bootstrapping a new host where you'd like to
|
||||
have the service run continuously and as part of an mgmt cluster normally.
|
||||
|
||||
In particular, when combined with the `--converged-timeout` parameter, the
|
||||
entire set of running mgmt agents will need to all simultaneously converge for
|
||||
the group to exit. This is particularly useful for bootstrapping new clusters
|
||||
which need to exchange information that is only available at run time.
|
||||
|
||||
####Blog post
|
||||
|
||||
An introductory blog post about this topic will follow soon.
|
||||
|
||||
##Usage and frequently asked questions
|
||||
(Send your questions as a patch to this FAQ! I'll review it, merge it, and
|
||||
respond by commit with the answer.)
|
||||
|
||||
###Why did you start this project?
|
||||
|
||||
I wanted a next generation config management solution that didn't have all of
|
||||
the design flaws or limitations that the current generation of tools do, and no
|
||||
tool existed!
|
||||
|
||||
###Why did you use etcd? What about consul?
|
||||
|
||||
Etcd and consul are both written in golang, which made them the top two
|
||||
contenders for my prototype. Ultimately a choice had to be made, and etcd was
|
||||
chosen, but it was also somewhat arbitrary. If there is available interest,
|
||||
good reasoning, *and* patches, then we would consider either switching or
|
||||
supporting both, but this is not a high priority at this time.
|
||||
|
||||
###Can I use an existing etcd cluster instead of the automatic embedded servers?
|
||||
|
||||
Yes, it's possible to use an existing etcd cluster instead of the automatic,
|
||||
elastic embedded etcd servers. To do so, simply point to the cluster with the
|
||||
`--seeds` variable, the same way you would if you were seeding a new member to
|
||||
an existing mgmt cluster.
|
||||
|
||||
The downside to this approach is that you won't benefit from the automatic
|
||||
elastic nature of the embedded etcd servers, and that you're responsible if you
|
||||
accidentally break your etcd cluster, or if you use an unsupported version.
|
||||
|
||||
###What does the error message about an inconsistent dataDir mean?
|
||||
|
||||
If you get an error message similar to:
|
||||
|
||||
```
|
||||
Etcd: Connect: CtxError...
|
||||
Etcd: CtxError: Reason: CtxDelayErr(5s): No endpoints available yet!
|
||||
Etcd: Connect: Endpoints: []
|
||||
Etcd: The dataDir (/var/lib/mgmt/etcd) might be inconsistent or corrupt.
|
||||
```
|
||||
|
||||
This happens when there are a series of fatal connect errors in a row. This can
|
||||
happen when you start `mgmt` using a dataDir that doesn't correspond to the
|
||||
current cluster view. As a result, the embedded etcd server never finishes
|
||||
starting up, and as a result, a default endpoint never gets added. The solution
|
||||
is to either reconcile the mistake, and if there is no important data saved, you
|
||||
can remove the etcd dataDir. This is typically `/var/lib/mgmt/etcd/member/`.
|
||||
|
||||
###Why do resources have both a `Compare` method and an `IFF` (on the UUID) method?
|
||||
|
||||
The `Compare()` methods are for determining if two resources are effectively the
|
||||
same, which is used to make graph change delta's efficient. This is when we want
|
||||
to change from the current running graph to a new graph, but preserve the common
|
||||
vertices. Since we want to make this process efficient, we only update the parts
|
||||
that are different, and leave everything else alone. This `Compare()` method can
|
||||
tell us if two resources are the same.
|
||||
|
||||
The `IFF()` method is part of the whole UUID system, which is for discerning if
|
||||
a resource meets the requirements another expects for an automatic edge. This is
|
||||
because the automatic edge system assumes a unified UUID pattern to test for
|
||||
equality. In the future it might be helpful or sane to merge the two similar
|
||||
comparison functions although for now they are separate because they are
|
||||
actually answer different questions.
|
||||
|
||||
###Did you know that there is a band named `MGMT`?
|
||||
|
||||
I didn't realize this when naming the project, and it is accidental. After much
|
||||
anguishing, I chose the name because it was short and I thought it was
|
||||
appropriately descriptive. If you need a less ambiguous search term or phrase,
|
||||
you can try using `mgmtconfig` or `mgmt config`.
|
||||
|
||||
###You didn't answer my question, or I have a question!
|
||||
|
||||
It's best to ask on [IRC](https://webchat.freenode.net/?channels=#mgmtconfig)
|
||||
to see if someone can help you. Once we get a big enough community going, we'll
|
||||
add a mailing list. If you don't get any response from the above, you can
|
||||
contact me through my [technical blog](https://ttboj.wordpress.com/contact/)
|
||||
and I'll do my best to help. If you have a good question, please add it as a
|
||||
patch to this documentation. I'll merge your question, and add a patch with the
|
||||
answer!
|
||||
|
||||
##Reference
|
||||
Please note that there are a number of undocumented options. For more
|
||||
information on these options, please view the source at:
|
||||
[https://github.com/purpleidea/mgmt/](https://github.com/purpleidea/mgmt/).
|
||||
If you feel that a well used option needs documenting here, please patch it!
|
||||
|
||||
###Overview of reference
|
||||
* [Graph definition file](#graph-definition-file): Main graph definition file.
|
||||
* [Command line](#command-line): Command line parameters.
|
||||
|
||||
###Graph definition file
|
||||
graph.yaml is the compiled graph definition file. The format is currently
|
||||
undocumented, but by looking through the [examples/](https://github.com/purpleidea/mgmt/tree/master/examples)
|
||||
you can probably figure out most of it, as it's fairly intuitive.
|
||||
|
||||
###Command line
|
||||
The main interface to the `mgmt` tool is the command line. For the most recent
|
||||
documentation, please run `mgmt --help`.
|
||||
|
||||
####`--file <graph.yaml>`
|
||||
Point to a graph file to run.
|
||||
|
||||
####`--converged-timeout <seconds>`
|
||||
Exit if the machine has converged for approximately this many seconds.
|
||||
|
||||
####`--max-runtime <seconds>`
|
||||
Exit when the agent has run for approximately this many seconds. This is not
|
||||
generally recommended, but may be useful for users who know what they're doing.
|
||||
|
||||
####`--noop`
|
||||
Globally force all resources into no-op mode. This also disables the export to
|
||||
etcd functionality, but does not disable resource collection, however all
|
||||
resources that are collected will have their individual noop settings set.
|
||||
|
||||
####`--remote <graph.yaml>`
|
||||
Point to a graph file to run on the remote host specified within. This parameter
|
||||
can be used multiple times if you'd like to remotely run on multiple hosts in
|
||||
parallel.
|
||||
|
||||
####`--allow-interactive`
|
||||
Allow interactive prompting for SSH passwords if there is no authentication
|
||||
method that works.
|
||||
|
||||
####`--ssh-priv-id-rsa`
|
||||
Specify the path for finding SSH keys. This defaults to `~/.ssh/id_rsa`. To
|
||||
never use this method of authentication, set this to the empty string.
|
||||
|
||||
####`--cconns`
|
||||
The maximum number of concurrent remote ssh connections to run. This defaults
|
||||
to `0`, which means unlimited.
|
||||
|
||||
####`--no-caching`
|
||||
Don't allow remote caching of the remote execution binary. This will require
|
||||
the binary to be copied over for every remote execution, but it limits the
|
||||
likelihood that there is leftover information from the configuration process.
|
||||
|
||||
####`--prefix <path>`
|
||||
Specify a path to a custom working directory prefix. This directory will get
|
||||
created if it does not exist. This usually defaults to `/var/lib/mgmt/`. This
|
||||
can't be combined with the `--tmp-prefix` option. It can be combined with the
|
||||
`--allow-tmp-prefix` option.
|
||||
|
||||
####`--tmp-prefix`
|
||||
If this option is specified, a temporary prefix will be used instead of the
|
||||
default prefix. This can't be combined with the `--prefix` option.
|
||||
|
||||
####`--allow-tmp-prefix`
|
||||
If this option is specified, we will attempt to fall back to a temporary prefix
|
||||
if the primary prefix couldn't be created. This is useful for avoiding failures
|
||||
in environments where the primary prefix may or may not be available, but you'd
|
||||
like to try. The canonical example is when running `mgmt` with `--remote` there
|
||||
might be a cached copy of the binary in the primary prefix, but in case there's
|
||||
no binary available continue working in a temporary directory to avoid failure.
|
||||
|
||||
##Examples
|
||||
For example configurations, please consult the [examples/](https://github.com/purpleidea/mgmt/tree/master/examples) directory in the git
|
||||
source repository. It is available from:
|
||||
|
||||
[https://github.com/purpleidea/mgmt/tree/master/examples](https://github.com/purpleidea/mgmt/tree/master/examples)
|
||||
|
||||
### Systemd:
|
||||
See [`misc/mgmt.service`](misc/mgmt.service) for a sample systemd unit file.
|
||||
This unit file is part of the RPM.
|
||||
|
||||
To specify your custom options for `mgmt` on a systemd distro:
|
||||
|
||||
```bash
|
||||
sudo mkdir -p /etc/systemd/system/mgmt.service.d/
|
||||
|
||||
cat > /etc/systemd/system/mgmt.service.d/env.conf <<EOF
|
||||
# Environment variables:
|
||||
MGMT_SEEDS=http://127.0.0.1:2379
|
||||
MGMT_CONVERGED_TIMEOUT=-1
|
||||
MGMT_MAX_RUNTIME=0
|
||||
|
||||
# Other CLI options if necessary.
|
||||
#OPTS="--max-runtime=0"
|
||||
EOF
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
```
|
||||
|
||||
##Development
|
||||
|
||||
This is a project that I started in my free time in 2013. Development is driven
|
||||
by all of our collective patches! Dive right in, and start hacking!
|
||||
Please contact me if you'd like to invite me to speak about this at your event.
|
||||
|
||||
You can follow along [on my technical blog](https://ttboj.wordpress.com/).
|
||||
|
||||
To report any bugs, please file a ticket at: [https://github.com/purpleidea/mgmt/issues](https://github.com/purpleidea/mgmt/issues).
|
||||
|
||||
##Authors
|
||||
|
||||
Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||
|
||||
Please see the
|
||||
[AUTHORS](https://github.com/purpleidea/mgmt/tree/master/AUTHORS) file
|
||||
for more information.
|
||||
|
||||
* [github](https://github.com/purpleidea/)
|
||||
* [@purpleidea](https://twitter.com/#!/purpleidea)
|
||||
* [https://ttboj.wordpress.com/](https://ttboj.wordpress.com/)
|
||||
68
Makefile
@@ -1,5 +1,5 @@
|
||||
# Mgmt
|
||||
# Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||
# Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||
# Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
@@ -15,8 +15,8 @@
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
SHELL = /bin/bash
|
||||
.PHONY: all version program path deps run race generate build clean test gofmt yamlfmt format docs rpmbuild mkdirs rpm srpm spec tar upload upload-sources upload-srpms upload-rpms copr
|
||||
SHELL = /usr/bin/env bash
|
||||
.PHONY: all art cleanart version program path deps run race generate build clean test gofmt yamlfmt format docs rpmbuild mkdirs rpm srpm spec tar upload upload-sources upload-srpms upload-rpms copr
|
||||
.SILENT: clean
|
||||
|
||||
SVERSION := $(or $(SVERSION),$(shell git describe --match '[0-9]*\.[0-9]*\.[0-9]*' --tags --dirty --always))
|
||||
@@ -37,6 +37,47 @@ RPM = rpmbuild/RPMS/$(PROGRAM)-$(VERSION)-$(RELEASE).$(ARCH).rpm
|
||||
USERNAME := $(shell cat ~/.config/copr 2>/dev/null | grep username | awk -F '=' '{print $$2}' | tr -d ' ')
|
||||
SERVER = 'dl.fedoraproject.org'
|
||||
REMOTE_PATH = 'pub/alt/$(USERNAME)/$(PROGRAM)'
|
||||
ifneq ($(GOTAGS),)
|
||||
BUILD_FLAGS = -tags '$(GOTAGS)'
|
||||
endif
|
||||
|
||||
default: build
|
||||
|
||||
#
|
||||
# art
|
||||
#
|
||||
art: art/mgmt_logo_default_symbol.png art/mgmt_logo_default_tall.png art/mgmt_logo_default_wide.png art/mgmt_logo_reversed_symbol.png art/mgmt_logo_reversed_tall.png art/mgmt_logo_reversed_wide.png art/mgmt_logo_white_symbol.png art/mgmt_logo_white_tall.png art/mgmt_logo_white_wide.png
|
||||
|
||||
cleanart:
|
||||
rm -f art/mgmt_logo_default_symbol.png art/mgmt_logo_default_tall.png art/mgmt_logo_default_wide.png art/mgmt_logo_reversed_symbol.png art/mgmt_logo_reversed_tall.png art/mgmt_logo_reversed_wide.png art/mgmt_logo_white_symbol.png art/mgmt_logo_white_tall.png art/mgmt_logo_white_wide.png
|
||||
|
||||
# NOTE: the widths are arbitrary
|
||||
art/mgmt_logo_default_symbol.png: art/mgmt_logo_default_symbol.svg
|
||||
inkscape --export-background='#ffffff' --without-gui --export-png "$@" --export-width 300 $(@:png=svg)
|
||||
|
||||
art/mgmt_logo_default_tall.png: art/mgmt_logo_default_tall.svg
|
||||
inkscape --export-background='#ffffff' --without-gui --export-png "$@" --export-width 400 $(@:png=svg)
|
||||
|
||||
art/mgmt_logo_default_wide.png: art/mgmt_logo_default_wide.svg
|
||||
inkscape --export-background='#ffffff' --without-gui --export-png "$@" --export-width 800 $(@:png=svg)
|
||||
|
||||
art/mgmt_logo_reversed_symbol.png: art/mgmt_logo_reversed_symbol.svg
|
||||
inkscape --export-background='#231f20' --without-gui --export-png "$@" --export-width 300 $(@:png=svg)
|
||||
|
||||
art/mgmt_logo_reversed_tall.png: art/mgmt_logo_reversed_tall.svg
|
||||
inkscape --export-background='#231f20' --without-gui --export-png "$@" --export-width 400 $(@:png=svg)
|
||||
|
||||
art/mgmt_logo_reversed_wide.png: art/mgmt_logo_reversed_wide.svg
|
||||
inkscape --export-background='#231f20' --without-gui --export-png "$@" --export-width 800 $(@:png=svg)
|
||||
|
||||
art/mgmt_logo_white_symbol.png: art/mgmt_logo_white_symbol.svg
|
||||
inkscape --export-background='#231f20' --without-gui --export-png "$@" --export-width 300 $(@:png=svg)
|
||||
|
||||
art/mgmt_logo_white_tall.png: art/mgmt_logo_white_tall.svg
|
||||
inkscape --export-background='#231f20' --without-gui --export-png "$@" --export-width 400 $(@:png=svg)
|
||||
|
||||
art/mgmt_logo_white_wide.png: art/mgmt_logo_white_wide.svg
|
||||
inkscape --export-background='#231f20' --without-gui --export-png "$@" --export-width 800 $(@:png=svg)
|
||||
|
||||
all: docs $(PROGRAM).static
|
||||
|
||||
@@ -69,9 +110,9 @@ $(PROGRAM): main.go
|
||||
@echo "Building: $(PROGRAM), version: $(SVERSION)..."
|
||||
ifneq ($(OLDGOLANG),)
|
||||
@# avoid equals sign in old golang versions eg in: -X foo=bar
|
||||
time go build -ldflags "-X main.program $(PROGRAM) -X main.version $(SVERSION)" -o $(PROGRAM);
|
||||
time go build -ldflags "-X main.program $(PROGRAM) -X main.version $(SVERSION)" -o $(PROGRAM) $(BUILD_FLAGS);
|
||||
else
|
||||
time go build -ldflags "-X main.program=$(PROGRAM) -X main.version=$(SVERSION)" -o $(PROGRAM);
|
||||
time go build -i -ldflags "-X main.program=$(PROGRAM) -X main.version=$(SVERSION)" -o $(PROGRAM) $(BUILD_FLAGS);
|
||||
endif
|
||||
|
||||
$(PROGRAM).static: main.go
|
||||
@@ -79,14 +120,15 @@ $(PROGRAM).static: main.go
|
||||
go generate
|
||||
ifneq ($(OLDGOLANG),)
|
||||
@# avoid equals sign in old golang versions eg in: -X foo=bar
|
||||
go build -a -installsuffix cgo -tags netgo -ldflags '-extldflags "-static" -X main.program $(PROGRAM) -X main.version $(SVERSION)' -o $(PROGRAM).static;
|
||||
go build -a -installsuffix cgo -tags netgo -ldflags '-extldflags "-static" -X main.program $(PROGRAM) -X main.version $(SVERSION)' -o $(PROGRAM).static $(BUILD_FLAGS);
|
||||
else
|
||||
go build -a -installsuffix cgo -tags netgo -ldflags '-extldflags "-static" -X main.program=$(PROGRAM) -X main.version=$(SVERSION)' -o $(PROGRAM).static;
|
||||
go build -a -installsuffix cgo -tags netgo -ldflags '-extldflags "-static" -X main.program=$(PROGRAM) -X main.version=$(SVERSION)' -o $(PROGRAM).static $(BUILD_FLAGS);
|
||||
endif
|
||||
|
||||
clean:
|
||||
[ ! -e $(PROGRAM) ] || rm $(PROGRAM)
|
||||
#rm -f *_stringer.go # generated by `go generate`
|
||||
rm -f *_stringer.go # generated by `go generate`
|
||||
rm -f *_mock.go # generated by `go generate`
|
||||
|
||||
test:
|
||||
./test.sh
|
||||
@@ -95,14 +137,14 @@ gofmt:
|
||||
find . -maxdepth 3 -type f -name '*.go' -not -path './old/*' -not -path './tmp/*' -exec gofmt -w {} \;
|
||||
|
||||
yamlfmt:
|
||||
find . -type f -name '*.yaml' -not -path './old/*' -not -path './tmp/*' -not -path './omv.yaml' -exec ruby -e "require 'yaml'; x=YAML.load_file('{}').to_yaml.each_line.map(&:rstrip).join(10.chr)+10.chr; File.open('{}', 'w').write x" \;
|
||||
find . -maxdepth 3 -type f -name '*.yaml' -not -path './old/*' -not -path './tmp/*' -not -path './omv.yaml' -exec ruby -e "require 'yaml'; x=YAML.load_file('{}').to_yaml.each_line.map(&:rstrip).join(10.chr)+10.chr; File.open('{}', 'w').write x" \;
|
||||
|
||||
format: gofmt yamlfmt
|
||||
|
||||
docs: $(PROGRAM)-documentation.pdf
|
||||
|
||||
$(PROGRAM)-documentation.pdf: DOCUMENTATION.md
|
||||
pandoc DOCUMENTATION.md -o '$(PROGRAM)-documentation.pdf'
|
||||
$(PROGRAM)-documentation.pdf: docs/documentation.md
|
||||
pandoc docs/documentation.md -o docs/'$(PROGRAM)-documentation.pdf'
|
||||
|
||||
#
|
||||
# build aliases
|
||||
@@ -146,8 +188,8 @@ $(SRPM): $(SPEC) $(SOURCE)
|
||||
#
|
||||
$(SPEC): rpmbuild/ spec.in
|
||||
@echo Running templater...
|
||||
#cat spec.in > $(SPEC)
|
||||
sed -e s/__PROGRAM__/$(PROGRAM)/ -e s/__VERSION__/$(VERSION)/ -e s/__RELEASE__/$(RELEASE)/ < spec.in > $(SPEC)
|
||||
cat spec.in > $(SPEC)
|
||||
sed -e s/__PROGRAM__/$(PROGRAM)/g -e s/__VERSION__/$(VERSION)/g -e s/__RELEASE__/$(RELEASE)/g < spec.in > $(SPEC)
|
||||
# append a changelog to the .spec file
|
||||
git log --format="* %cd %aN <%aE>%n- (%h) %s%d%n" --date=local | sed -r 's/[0-9]+:[0-9]+:[0-9]+ //' >> $(SPEC)
|
||||
|
||||
|
||||
119
README.md
@@ -1,33 +1,43 @@
|
||||
# *mgmt*: This is: mgmt!
|
||||
# *mgmt*: next generation config management!
|
||||
|
||||
[](https://goreportcard.com/report/github.com/purpleidea/mgmt)
|
||||
[](http://travis-ci.org/purpleidea/mgmt)
|
||||
[](DOCUMENTATION.md)
|
||||
[](https://webchat.freenode.net/?channels=#mgmtconfig)
|
||||
[](https://ci.centos.org/job/purpleidea-mgmt/)
|
||||
[](https://copr.fedoraproject.org/coprs/purpleidea/mgmt/)
|
||||
[](art/)
|
||||
|
||||
[](https://goreportcard.com/report/github.com/purpleidea/mgmt)
|
||||
[](http://travis-ci.org/purpleidea/mgmt)
|
||||
[](https://godoc.org/github.com/purpleidea/mgmt)
|
||||
[](https://webchat.freenode.net/?channels=#mgmtconfig)
|
||||
[](https://ci.centos.org/job/purpleidea-mgmt/)
|
||||
|
||||
## Community:
|
||||
Come join us on IRC in [#mgmtconfig](https://webchat.freenode.net/?channels=#mgmtconfig) on Freenode!
|
||||
You may like the [#mgmtconfig](https://twitter.com/hashtag/mgmtconfig) hashtag if you're on [Twitter](https://twitter.com/#!/purpleidea).
|
||||
Come join us in the `mgmt` community!
|
||||
|
||||
## Questions:
|
||||
Please join the [#mgmtconfig](https://webchat.freenode.net/?channels=#mgmtconfig) IRC community!
|
||||
If you have a well phrased question that might benefit others, consider asking it by sending a patch to the documentation [FAQ](https://github.com/purpleidea/mgmt/blob/master/DOCUMENTATION.md#usage-and-frequently-asked-questions) section. I'll merge your question, and a patch with the answer!
|
||||
| Medium | Link |
|
||||
|---|---|
|
||||
| IRC | [#mgmtconfig](https://webchat.freenode.net/?channels=#mgmtconfig) on Freenode |
|
||||
| Twitter | [@mgmtconfig](https://twitter.com/mgmtconfig) & [#mgmtconfig](https://twitter.com/hashtag/mgmtconfig) |
|
||||
| Mailing list | [mgmtconfig-list@redhat.com](https://www.redhat.com/mailman/listinfo/mgmtconfig-list) |
|
||||
|
||||
## Quick start:
|
||||
* Clone the repository recursively, eg: `git clone --recursive https://github.com/purpleidea/mgmt/`.
|
||||
* Get the remaining golang dependencies on your own, or run `make deps` if you're comfortable with how we install them.
|
||||
* Run `make build` to get a freshly built `mgmt` binary.
|
||||
* Run `time ./mgmt run --file examples/graph0.yaml --converged-timeout=1` to try out a very simple example!
|
||||
* To run continuously in the default mode of operation, omit the `--converged-timeout` option.
|
||||
* Have fun hacking on our future technology!
|
||||
|
||||
## Examples:
|
||||
Please look in the [examples/](examples/) folder for more examples!
|
||||
## Status:
|
||||
Mgmt is a fairly new project.
|
||||
We're working towards being minimally useful for production environments.
|
||||
We aren't feature complete for what we'd consider a 1.x release yet.
|
||||
With your help you'll be able to influence our design and get us there sooner!
|
||||
|
||||
## Documentation:
|
||||
Please see: [DOCUMENTATION.md](DOCUMENTATION.md) or [PDF](https://pdfdoc-purpleidea.rhcloud.com/pdf/https://github.com/purpleidea/mgmt/blob/master/DOCUMENTATION.md).
|
||||
Please read, enjoy and help improve our documentation!
|
||||
|
||||
| Documentation | Additional Notes |
|
||||
|---|---|
|
||||
| [general documentation](docs/documentation.md) | for everyone |
|
||||
| [quick start guide](docs/quick-start-guide.md) | for mgmt developers |
|
||||
| [resource guide](docs/resource-guide.md) | for mgmt developers |
|
||||
| [godoc API reference](https://godoc.org/github.com/purpleidea/mgmt) | for mgmt developers |
|
||||
| [prometheus guide](docs/prometheus.md) | for everyone |
|
||||
| [puppet guide](docs/puppet-guide.md) | for puppet sysadmins |
|
||||
|
||||
## Questions:
|
||||
Please ask in the [community](#community)!
|
||||
If you have a well phrased question that might benefit others, consider asking it by sending a patch to the documentation [FAQ](https://github.com/purpleidea/mgmt/blob/master/docs/documentation.md#usage-and-frequently-asked-questions) section. I'll merge your question, and a patch with the answer!
|
||||
|
||||
## Roadmap:
|
||||
Please see: [TODO.md](TODO.md) for a list of upcoming work and TODO items.
|
||||
@@ -39,46 +49,37 @@ Please set the `DEBUG` constant in [main.go](https://github.com/purpleidea/mgmt/
|
||||
Bonus points if you provide a [shell](https://github.com/purpleidea/mgmt/tree/master/test/shell) or [OMV](https://github.com/purpleidea/mgmt/tree/master/test/omv) reproducible test case.
|
||||
Feel free to read my article on [debugging golang programs](https://ttboj.wordpress.com/2016/02/15/debugging-golang-programs/).
|
||||
|
||||
## Dependencies:
|
||||
* golang 1.4 or higher (required, available in most distros)
|
||||
* golang libraries (required, available with `go get`)
|
||||
|
||||
go get github.com/coreos/etcd/client
|
||||
go get gopkg.in/yaml.v2
|
||||
go get gopkg.in/fsnotify.v1
|
||||
go get github.com/codegangsta/cli
|
||||
go get github.com/coreos/go-systemd/dbus
|
||||
go get github.com/coreos/go-systemd/util
|
||||
go get github.com/coreos/pkg/capnslog
|
||||
|
||||
* stringer (required for building), available as a package on some platforms, otherwise via `go get`
|
||||
|
||||
go get golang.org/x/tools/cmd/stringer
|
||||
|
||||
* pandoc (optional, for building a pdf of the documentation)
|
||||
* graphviz (optional, for building a visual representation of the graph)
|
||||
|
||||
## Patches:
|
||||
We'd love to have your patches! Please send them by email, or as a pull request.
|
||||
|
||||
## On the web:
|
||||
* James Shubin; blog: [Next generation configuration mgmt](https://ttboj.wordpress.com/2016/01/18/next-generation-configuration-mgmt/)
|
||||
* James Shubin; video: [Introductory recording from DevConf.cz 2016](https://www.youtube.com/watch?v=GVhpPF0j-iE&html5=1)
|
||||
* James Shubin; video: [Introductory recording from CfgMgmtCamp.eu 2016](https://www.youtube.com/watch?v=fNeooSiIRnA&html5=1)
|
||||
* Julian Dunn; video: [On mgmt at CfgMgmtCamp.eu 2016](https://www.youtube.com/watch?v=kfF9IATUask&t=1949&html5=1)
|
||||
* Walter Heck; slides: [On mgmt at CfgMgmtCamp.eu 2016](http://www.slideshare.net/olindata/configuration-management-time-for-a-4th-generation/3)
|
||||
* Marco Marongiu; blog: [On mgmt](http://syslog.me/2016/02/15/leap-or-die/)
|
||||
* Felix Frank; blog: [From Catalog To Mgmt (on puppet to mgmt "transpiling")](https://ffrank.github.io/features/2016/02/18/from-catalog-to-mgmt/)
|
||||
* James Shubin; blog: [Automatic edges in mgmt (...and the pkg resource)](https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/)
|
||||
* James Shubin; blog: [Automatic grouping in mgmt](https://ttboj.wordpress.com/2016/03/30/automatic-grouping-in-mgmt/)
|
||||
* John Arundel; tweet: [“Puppet’s days are numbered.”](https://twitter.com/bitfield/status/732157519142002688)
|
||||
* Felix Frank; blog: [Puppet, Meet Mgmt (on puppet to mgmt internals)](https://ffrank.github.io/features/2016/06/12/puppet,-meet-mgmt/)
|
||||
* Felix Frank; blog: [Puppet Powered Mgmt (puppet to mgmt tl;dr)](https://ffrank.github.io/features/2016/06/19/puppet-powered-mgmt/)
|
||||
* James Shubin; blog: [Automatic clustering in mgmt](https://ttboj.wordpress.com/2016/06/20/automatic-clustering-in-mgmt/)
|
||||
* James Shubin; video: [Recording from CoreOSFest 2016](https://www.youtube.com/watch?v=KVmDCUA42wc&html5=1)
|
||||
* James Shubin; video: [Recording from DebConf16](http://meetings-archive.debian.net/pub/debian-meetings/2016/debconf16/Next_Generation_Config_Mgmt.webm) ([Slides](https://annex.debconf.org//debconf-share/debconf16/slides/15-next-generation-config-mgmt.pdf))
|
||||
* Felix Frank; blog: [Edging It All In (puppet and mgmt edges)](https://ffrank.github.io/features/2016/07/12/edging-it-all-in/)
|
||||
* Felix Frank; blog: [Translating All The Things (puppet to mgmt translation warnings)](https://ffrank.github.io/features/2016/08/19/translating-all-the-things/)
|
||||
| Author | Format | Subject |
|
||||
|---|---|---|
|
||||
| James Shubin | blog | [Next generation configuration mgmt](https://ttboj.wordpress.com/2016/01/18/next-generation-configuration-mgmt/) |
|
||||
| James Shubin | video | [Introductory recording from DevConf.cz 2016](https://www.youtube.com/watch?v=GVhpPF0j-iE&html5=1) |
|
||||
| James Shubin | video | [Introductory recording from CfgMgmtCamp.eu 2016](https://www.youtube.com/watch?v=fNeooSiIRnA&html5=1) |
|
||||
| Julian Dunn | video | [On mgmt at CfgMgmtCamp.eu 2016](https://www.youtube.com/watch?v=kfF9IATUask&t=1949&html5=1) |
|
||||
| Walter Heck | slides | [On mgmt at CfgMgmtCamp.eu 2016](http://www.slideshare.net/olindata/configuration-management-time-for-a-4th-generation/3) |
|
||||
| Marco Marongiu | blog | [On mgmt](http://syslog.me/2016/02/15/leap-or-die/) |
|
||||
| Felix Frank | blog | [From Catalog To Mgmt (on puppet to mgmt "transpiling")](https://ffrank.github.io/features/2016/02/18/from-catalog-to-mgmt/) |
|
||||
| James Shubin | blog | [Automatic edges in mgmt (...and the pkg resource)](https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/) |
|
||||
| James Shubin | blog | [Automatic grouping in mgmt](https://ttboj.wordpress.com/2016/03/30/automatic-grouping-in-mgmt/) |
|
||||
| John Arundel | tweet | [“Puppet’s days are numbered.”](https://twitter.com/bitfield/status/732157519142002688) |
|
||||
| Felix Frank | blog | [Puppet, Meet Mgmt (on puppet to mgmt internals)](https://ffrank.github.io/features/2016/06/12/puppet,-meet-mgmt/) |
|
||||
| Felix Frank | blog | [Puppet Powered Mgmt (puppet to mgmt tl;dr)](https://ffrank.github.io/features/2016/06/19/puppet-powered-mgmt/) |
|
||||
| James Shubin | blog | [Automatic clustering in mgmt](https://ttboj.wordpress.com/2016/06/20/automatic-clustering-in-mgmt/) |
|
||||
| James Shubin | video | [Recording from CoreOSFest 2016](https://www.youtube.com/watch?v=KVmDCUA42wc&html5=1) |
|
||||
| James Shubin | video | [Recording from DebConf16](http://meetings-archive.debian.net/pub/debian-meetings/2016/debconf16/Next_Generation_Config_Mgmt.webm) ([Slides](https://annex.debconf.org//debconf-share/debconf16/slides/15-next-generation-config-mgmt.pdf)) |
|
||||
| Felix Frank | blog | [Edging It All In (puppet and mgmt edges)](https://ffrank.github.io/features/2016/07/12/edging-it-all-in/) |
|
||||
| Felix Frank | blog | [Translating All The Things (puppet to mgmt translation warnings)](https://ffrank.github.io/features/2016/08/19/translating-all-the-things/) |
|
||||
| James Shubin | video | [Recording from systemd.conf 2016](https://www.youtube.com/watch?v=jB992Zb3nH0&html5=1) |
|
||||
| James Shubin | blog | [Remote execution in mgmt](https://ttboj.wordpress.com/2016/10/07/remote-execution-in-mgmt/) |
|
||||
| James Shubin | video | [Recording from High Load Strategy 2016](https://vimeo.com/191493409) |
|
||||
| James Shubin | video | [Recording from NLUUG 2016](https://www.youtube.com/watch?v=MmpwOQAb_SE&html5=1) |
|
||||
| James Shubin | blog | [Send/Recv in mgmt](https://ttboj.wordpress.com/2016/12/07/sendrecv-in-mgmt/) |
|
||||
| James Shubin | blog | [Metaparameters in mgmt](https://ttboj.wordpress.com/2017/03/01/metaparameters-in-mgmt/) |
|
||||
| James Shubin | video | [Recording from Incontro DevOps 2017](https://vimeo.com/212241877) |
|
||||
| Yves Brissaud | blog | [mgmt aux HumanTalks Grenoble (french)](http://log.winsos.net/2017/04/12/mgmt-aux-human-talks-grenoble.html) |
|
||||
|
||||
##
|
||||
|
||||
|
||||
46
TODO.md
@@ -1,32 +1,58 @@
|
||||
# TODO
|
||||
If you're looking for something to do, look here!
|
||||
Let us know if you're working on one of the items.
|
||||
If you'd like something to work on, ping @purpleidea and I'll create an issue
|
||||
tailored especially for you! Just let me know your approximate golang skill
|
||||
level and how many hours you'd like to spend on the patch.
|
||||
|
||||
## Package resource
|
||||
- [ ] getfiles support on debian [bug](https://github.com/hughsie/PackageKit/issues/118)
|
||||
- [ ] directory info on fedora [bug](https://github.com/hughsie/PackageKit/issues/117)
|
||||
- [ ] dnf blocker [bug](https://github.com/hughsie/PackageKit/issues/110)
|
||||
- [ ] install signal blocker [bug](https://github.com/hughsie/PackageKit/issues/109)
|
||||
|
||||
## File resource [bug](https://github.com/purpleidea/mgmt/issues/13) [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||
- [ ] ability to make/delete folders
|
||||
- [ ] recursive argument (can recursively watch/modify contents)
|
||||
- [ ] force argument (can cause switch from file <-> folder)
|
||||
## File resource [bug](https://github.com/purpleidea/mgmt/issues/64) [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||
- [ ] recurse limit support [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||
- [ ] fanotify support [bug](https://github.com/go-fsnotify/fsnotify/issues/114)
|
||||
|
||||
## Svc resource
|
||||
- [ ] base resource improvements
|
||||
|
||||
## Exec resource
|
||||
- [ ] base resource improvements
|
||||
|
||||
## Timer resource
|
||||
- [ ] base resource [bug](https://github.com/purpleidea/mgmt/issues/15) [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||
- [ ] reset on recompile
|
||||
- [ ] increment algorithm (linear, exponential, etc...)
|
||||
- [ ] increment algorithm (linear, exponential, etc...) [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||
|
||||
## User/Group resource
|
||||
- [ ] base resource [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||
- [ ] automatic edges to file resource [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||
|
||||
## Virt (libvirt) resource
|
||||
- [ ] base resource [bug](https://github.com/purpleidea/mgmt/issues/25)
|
||||
- [ ] base resource improvements [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||
|
||||
## Net (systemd-networkd) resource
|
||||
- [ ] base resource
|
||||
|
||||
## Nspawn (systemd-nspawn) resource
|
||||
- [ ] base resource [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||
|
||||
## Mount (systemd-mount) resource
|
||||
- [ ] base resource [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||
|
||||
## Cron (systemd-timer) resource
|
||||
- [ ] base resource [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||
|
||||
## Http resource
|
||||
- [ ] base resource [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||
|
||||
## Etcd improvements
|
||||
- [ ] embedded etcd master
|
||||
- [ ] fix embedded etcd master race
|
||||
|
||||
## Torrent/dht file transfer
|
||||
- [ ] base plumbing
|
||||
|
||||
## GPG/Auth improvements
|
||||
- [ ] base plumbing
|
||||
|
||||
## Language improvements
|
||||
- [ ] language design
|
||||
|
||||
47
Vagrantfile
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
Vagrant.configure(2) do |config|
|
||||
config.ssh.forward_agent = true
|
||||
config.ssh.username = 'vagrant'
|
||||
config.vm.network "private_network", ip: "192.168.219.2"
|
||||
|
||||
config.vm.synced_folder ".", "/vagrant", disabled: true
|
||||
|
||||
config.vm.define "mgmt-dev" do |instance|
|
||||
instance.vm.box = "fedora/24-cloud-base"
|
||||
end
|
||||
|
||||
config.vm.provider "virtualbox" do |v|
|
||||
v.memory = 1536
|
||||
v.cpus = 2
|
||||
end
|
||||
|
||||
config.vm.provision "file", source: "vagrant/motd", destination: ".motd"
|
||||
config.vm.provision "shell", inline: "cp ~vagrant/.motd /etc/motd"
|
||||
|
||||
config.vm.provision "file", source: "vagrant/mgmt.bashrc", destination: ".mgmt.bashrc"
|
||||
config.vm.provision "file", source: "~/.gitconfig", destination: ".gitconfig"
|
||||
|
||||
# copied from make-deps.sh (with added git)
|
||||
config.vm.provision "shell", inline: "dnf install -y libvirt-devel golang golang-googlecode-tools-stringer hg git make"
|
||||
|
||||
# set up packagekit
|
||||
config.vm.provision "shell" do |shell|
|
||||
shell.inline = <<-SCRIPT
|
||||
dnf install -y PackageKit
|
||||
systemctl enable packagekit
|
||||
systemctl start packagekit
|
||||
SCRIPT
|
||||
end
|
||||
|
||||
# set up vagrant home
|
||||
script = <<-SCRIPT
|
||||
grep -q 'mgmt\.bashrc' ~/.bashrc || echo '. ~/.mgmt.bashrc' >>~/.bashrc
|
||||
. ~/.mgmt.bashrc
|
||||
go get -u github.com/purpleidea/mgmt
|
||||
cd ~/gopath/src/github.com/purpleidea/mgmt
|
||||
make deps
|
||||
SCRIPT
|
||||
config.vm.provision "shell" do |shell|
|
||||
shell.privileged = false
|
||||
shell.inline = script
|
||||
end
|
||||
end
|
||||
2
art/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*.png
|
||||
misc/
|
||||
BIN
art/mgmt.png
Normal file
|
After Width: | Height: | Size: 24 KiB |
94
art/mgmt_logo_default_symbol.svg
Normal file
@@ -0,0 +1,94 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
viewBox="0 0 120 107.1" style="enable-background:new 0 0 120 107.1;" xml:space="preserve">
|
||||
<style type="text/css">
|
||||
.st0{fill:#FFFFFF;}
|
||||
.st1{fill:#E22434;}
|
||||
.st2{display:none;}
|
||||
.st3{fill:#1B3663;}
|
||||
.st4{fill:#00B1D1;}
|
||||
.st5{fill:#BFE6EF;}
|
||||
.st6{fill:#69CBE0;}
|
||||
.st7{fill:#0080BD;}
|
||||
.st8{fill:#005DAB;}
|
||||
.st9{fill:#183660;}
|
||||
.st10{fill:none;stroke:#183660;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st11{fill:none;stroke:#183660;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
.st12{fill:none;stroke:#183660;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st13{fill:none;stroke:#C0E6EF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st14{fill:#C0E6EF;}
|
||||
.st15{fill:none;stroke:#C0E6EF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st16{fill:none;stroke:#C0E6EF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
.st17{fill:none;stroke:#FFFFFF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st18{fill:none;stroke:#FFFFFF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st19{fill:none;stroke:#FFFFFF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
</style>
|
||||
<g id="Layer_2">
|
||||
</g>
|
||||
<g id="Layer_1">
|
||||
<g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st10" x1="29.2" y1="24.1" x2="52.1" y2="42.8"/>
|
||||
<g>
|
||||
<polygon class="st9" points="27.7,27.8 24.9,20.5 32.6,21.8 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<circle class="st5" cx="16.1" cy="12.2" r="12.1"/>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st10" x1="52.1" y1="42.1" x2="74.1" y2="80"/>
|
||||
<g>
|
||||
<polygon class="st9" points="70.1,80.9 76.9,84.8 76.8,77.1 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st10" x1="69.4" y1="46.7" x2="95.8" y2="52.4"/>
|
||||
<g>
|
||||
<polygon class="st9" points="69.7,50.7 63.9,45.6 71.3,43.2 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st10" x1="52.1" y1="42.8" x2="71.9" y2="30.1"/>
|
||||
<g>
|
||||
<polygon class="st9" points="73.1,34 76.6,27.1 68.9,27.5 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st10" x1="16.8" y1="49.6" x2="34.8" y2="46.5"/>
|
||||
<g>
|
||||
<polygon class="st9" points="34.3,50.5 40.3,45.6 33,42.9 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st10" x1="92.3" y1="79.5" x2="107.8" y2="54.2"/>
|
||||
<g>
|
||||
<polygon class="st9" points="96.1,80.6 89.3,84.3 89.5,76.5 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st10" x1="97.3" y1="36.5" x2="107.8" y2="54.2"/>
|
||||
<g>
|
||||
<polygon class="st9" points="94.6,39.4 94.5,31.7 101.2,35.5 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<circle class="st3" cx="52.1" cy="42.8" r="12.1"/>
|
||||
<circle class="st4" cx="12.2" cy="50.8" r="12.1"/>
|
||||
<circle class="st7" cx="87.5" cy="21.7" r="12.1"/>
|
||||
<circle class="st8" cx="83.5" cy="95" r="12.1"/>
|
||||
<circle class="st6" cx="107.8" cy="54.2" r="12.1"/>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 2.8 KiB |
132
art/mgmt_logo_default_tall.svg
Normal file
@@ -0,0 +1,132 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
viewBox="0 0 168.3 131.6" style="enable-background:new 0 0 168.3 131.6;" xml:space="preserve">
|
||||
<style type="text/css">
|
||||
.st0{fill:#FFFFFF;}
|
||||
.st1{fill:#E22434;}
|
||||
.st2{display:none;}
|
||||
.st3{fill:#1B3663;}
|
||||
.st4{fill:#00B1D1;}
|
||||
.st5{fill:#BFE6EF;}
|
||||
.st6{fill:#69CBE0;}
|
||||
.st7{fill:#0080BD;}
|
||||
.st8{fill:#005DAB;}
|
||||
.st9{fill:#183660;}
|
||||
.st10{fill:none;stroke:#183660;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st11{fill:none;stroke:#183660;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
.st12{fill:none;stroke:#183660;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st13{fill:none;stroke:#C0E6EF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st14{fill:#C0E6EF;}
|
||||
.st15{fill:none;stroke:#C0E6EF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st16{fill:none;stroke:#C0E6EF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
.st17{fill:none;stroke:#FFFFFF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st18{fill:none;stroke:#FFFFFF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st19{fill:none;stroke:#FFFFFF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
</style>
|
||||
<g id="Layer_2">
|
||||
</g>
|
||||
<g id="Layer_1">
|
||||
<g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st3" d="M4.7,105l0.1,1.8c1.1-1.4,2.6-2.1,4.4-2.1c1.9,0,3.2,0.9,4,2.6c1.1-1.7,2.6-2.6,4.7-2.6
|
||||
c3.3,0,5,2.3,5.1,6.9V124h-5v-12.1c0-1.1-0.2-1.9-0.5-2.4c-0.3-0.5-0.8-0.7-1.5-0.7c-0.9,0-1.6,0.6-2.1,1.7l0,0.6V124H9v-12.1
|
||||
c0-1.1-0.1-1.9-0.4-2.4c-0.3-0.5-0.8-0.7-1.6-0.7c-0.9,0-1.5,0.5-2,1.4V124H0v-19H4.7z"/>
|
||||
<path class="st3" d="M26.4,113.9c0-3.1,0.6-5.4,1.7-7s2.7-2.3,4.7-2.3c1.7,0,3.1,0.7,4,2L37,105h4.5v19c0,2.4-0.7,4.3-2,5.6
|
||||
c-1.4,1.3-3.3,1.9-5.9,1.9c-1,0-2.1-0.2-3.3-0.6s-2-0.9-2.6-1.6l1.7-3.4c0.5,0.5,1.1,0.9,1.8,1.2c0.7,0.3,1.5,0.5,2.1,0.5
|
||||
c1.1,0,1.9-0.3,2.4-0.8s0.7-1.4,0.7-2.6v-1.6c-0.9,1.2-2.2,1.9-3.7,1.9c-2,0-3.6-0.8-4.7-2.4s-1.7-3.8-1.7-6.7V113.9z
|
||||
M31.4,115.2c0,1.8,0.2,3,0.7,3.8s1.2,1.2,2.2,1.2c1,0,1.8-0.4,2.3-1.1V110c-0.5-0.8-1.3-1.2-2.2-1.2c-1,0-1.7,0.4-2.2,1.2
|
||||
s-0.7,2.1-0.7,3.9V115.2z"/>
|
||||
<path class="st3" d="M50.1,105l0.1,1.8c1.1-1.4,2.6-2.1,4.4-2.1c1.9,0,3.2,0.9,4,2.6c1.1-1.7,2.6-2.6,4.7-2.6
|
||||
c3.3,0,5,2.3,5.1,6.9V124h-5v-12.1c0-1.1-0.2-1.9-0.5-2.4s-0.8-0.7-1.5-0.7c-0.9,0-1.6,0.6-2.1,1.7l0,0.6V124h-5v-12.1
|
||||
c0-1.1-0.1-1.9-0.4-2.4s-0.8-0.7-1.6-0.7c-0.9,0-1.5,0.5-2,1.4V124h-5v-19H50.1z"/>
|
||||
<path class="st3" d="M78.2,100.3v4.7h2.5v3.7h-2.5v9.5c0,0.8,0.1,1.3,0.3,1.5c0.2,0.3,0.6,0.4,1.2,0.4c0.5,0,0.9,0,1.2-0.1
|
||||
l0,3.9c-0.8,0.3-1.8,0.5-2.7,0.5c-3.2,0-4.8-1.8-4.9-5.5v-10.1h-2.2V105h2.2v-4.7H78.2z"/>
|
||||
<path class="st4" d="M90.6,122.6c1.4,0,2.4-0.4,3.1-1.1c0.7-0.8,1.1-1.9,1.2-3.3h1.9c-0.1,1.9-0.7,3.5-1.9,4.6
|
||||
c-1.1,1.1-2.6,1.7-4.3,1.7c-2.3,0-4-0.7-5.1-2.2s-1.7-3.6-1.8-6.4v-2.3c0-2.9,0.6-5.1,1.7-6.6s2.9-2.2,5.1-2.2
|
||||
c1.9,0,3.4,0.6,4.5,1.8s1.7,2.9,1.7,5H95c-0.1-1.6-0.5-2.8-1.2-3.6s-1.8-1.3-3.1-1.3c-1.7,0-2.9,0.6-3.7,1.7s-1.2,2.9-1.2,5.2
|
||||
v2.2c0,2.4,0.4,4.2,1.2,5.3S89,122.6,90.6,122.6z"/>
|
||||
<path class="st4" d="M100.5,113.6c0-2.7,0.6-4.9,1.9-6.5s2.9-2.4,5.1-2.4c2.2,0,3.9,0.8,5.1,2.4s1.9,3.7,1.9,6.5v2
|
||||
c0,2.8-0.6,5-1.9,6.5s-2.9,2.3-5.1,2.3c-2.1,0-3.8-0.8-5-2.3s-1.9-3.6-1.9-6.3V113.6z M102.5,115.5c0,2.2,0.4,3.9,1.3,5.2
|
||||
c0.9,1.3,2.1,1.9,3.7,1.9c1.6,0,2.8-0.6,3.7-1.8s1.3-2.9,1.3-5.2v-2c0-2.2-0.4-3.9-1.3-5.2c-0.9-1.3-2.1-1.9-3.7-1.9
|
||||
c-1.5,0-2.7,0.6-3.6,1.8s-1.3,2.9-1.4,5.1V115.5z"/>
|
||||
<path class="st4" d="M121.1,105l0.1,3c0.6-1,1.3-1.9,2.2-2.5s1.9-0.9,3-0.9c3.3,0,5,2.2,5.1,6.6V124h-1.9v-12.5
|
||||
c0-1.7-0.3-3-0.9-3.8c-0.6-0.8-1.5-1.2-2.8-1.2c-1,0-1.9,0.4-2.8,1.2s-1.4,1.8-1.9,3.2V124h-2v-19H121.1z"/>
|
||||
<path class="st4" d="M138.2,124v-17.3h-2.6V105h2.6v-2.5c0-1.9,0.5-3.3,1.3-4.3s2-1.5,3.5-1.5c0.7,0,1.3,0.1,1.9,0.3l-0.1,1.8
|
||||
c-0.5-0.1-1-0.2-1.6-0.2c-0.9,0-1.7,0.4-2.2,1.1s-0.8,1.7-0.8,3v2.3h3.7v1.8h-3.7V124H138.2z"/>
|
||||
<path class="st4" d="M148,99.5c0-0.4,0.1-0.7,0.3-1s0.5-0.4,0.9-0.4s0.7,0.1,1,0.4s0.3,0.6,0.3,1s-0.1,0.7-0.3,1s-0.5,0.4-1,0.4
|
||||
s-0.7-0.1-0.9-0.4S148,99.9,148,99.5z M150.2,124h-2v-19h2V124z"/>
|
||||
<path class="st4" d="M155.3,113.6c0-3,0.5-5.2,1.5-6.7s2.6-2.2,4.6-2.2c2.2,0,3.8,1,4.9,3l0.1-2.7h1.8v19.6c0,2.3-0.6,4-1.6,5.2
|
||||
c-1.1,1.2-2.7,1.8-4.7,1.8c-1,0-2.1-0.3-3.2-0.8s-1.9-1.1-2.4-1.9l0.9-1.4c1.3,1.5,2.8,2.2,4.5,2.2c1.6,0,2.7-0.4,3.5-1.3
|
||||
c0.7-0.8,1.1-2.1,1.1-3.8v-3c-1.1,1.8-2.7,2.7-4.9,2.7c-2,0-3.5-0.7-4.5-2.2s-1.6-3.6-1.6-6.5V113.6z M157.2,115.4
|
||||
c0,2.4,0.4,4.2,1.1,5.4s1.9,1.7,3.5,1.7c2.1,0,3.6-1,4.5-3v-9.7c-0.9-2.2-2.4-3.3-4.4-3.3c-1.6,0-2.8,0.6-3.5,1.7
|
||||
s-1.1,2.9-1.1,5.3V115.4z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st11" x1="58.9" y1="20.1" x2="77.9" y2="35.6"/>
|
||||
<g>
|
||||
<polygon class="st9" points="57.6,23.2 55.3,17.1 61.7,18.2 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<circle class="st5" cx="48" cy="10.1" r="10.1"/>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st11" x1="77.9" y1="35.1" x2="96.2" y2="66.7"/>
|
||||
<g>
|
||||
<polygon class="st9" points="93,67.5 98.6,70.7 98.6,64.2 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st11" x1="92.3" y1="38.9" x2="114.4" y2="43.6"/>
|
||||
<g>
|
||||
<polygon class="st9" points="92.6,42.3 87.8,38 93.9,36 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st11" x1="77.9" y1="35.6" x2="94.5" y2="25.1"/>
|
||||
<g>
|
||||
<polygon class="st9" points="95.4,28.3 98.4,22.6 91.9,22.9 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st11" x1="48.5" y1="41.3" x2="63.5" y2="38.7"/>
|
||||
<g>
|
||||
<polygon class="st9" points="63.1,42.1 68.1,38 62,35.7 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st11" x1="111.4" y1="66.3" x2="124.4" y2="45.2"/>
|
||||
<g>
|
||||
<polygon class="st9" points="114.6,67.2 109,70.2 109.1,63.8 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st11" x1="115.6" y1="30.4" x2="124.4" y2="45.2"/>
|
||||
<g>
|
||||
<polygon class="st9" points="113.3,32.8 113.3,26.4 118.9,29.5 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<circle class="st3" cx="77.9" cy="35.6" r="10.1"/>
|
||||
<circle class="st4" cx="44.6" cy="42.4" r="10.1"/>
|
||||
<circle class="st7" cx="107.4" cy="18.1" r="10.1"/>
|
||||
<circle class="st8" cx="104.1" cy="79.1" r="10.1"/>
|
||||
<circle class="st6" cx="124.4" cy="45.2" r="10.1"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 6.2 KiB |
132
art/mgmt_logo_default_wide.svg
Normal file
@@ -0,0 +1,132 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
viewBox="0 0 260.4 71.4" style="enable-background:new 0 0 260.4 71.4;" xml:space="preserve">
|
||||
<style type="text/css">
|
||||
.st0{fill:#FFFFFF;}
|
||||
.st1{fill:#E22434;}
|
||||
.st2{display:none;}
|
||||
.st3{fill:#1B3663;}
|
||||
.st4{fill:#00B1D1;}
|
||||
.st5{fill:#BFE6EF;}
|
||||
.st6{fill:#69CBE0;}
|
||||
.st7{fill:#0080BD;}
|
||||
.st8{fill:#005DAB;}
|
||||
.st9{fill:#183660;}
|
||||
.st10{fill:none;stroke:#183660;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st11{fill:none;stroke:#183660;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
.st12{fill:none;stroke:#183660;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st13{fill:none;stroke:#C0E6EF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st14{fill:#C0E6EF;}
|
||||
.st15{fill:none;stroke:#C0E6EF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st16{fill:none;stroke:#C0E6EF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
.st17{fill:none;stroke:#FFFFFF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st18{fill:none;stroke:#FFFFFF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st19{fill:none;stroke:#FFFFFF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
</style>
|
||||
<g id="Layer_2">
|
||||
</g>
|
||||
<g id="Layer_1">
|
||||
<g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st3" d="M96.7,25.7l0.1,1.8c1.1-1.4,2.6-2.1,4.4-2.1c1.9,0,3.2,0.9,4,2.6c1.1-1.7,2.6-2.6,4.7-2.6
|
||||
c3.3,0,5,2.3,5.1,6.9v12.5h-5V32.6c0-1.1-0.2-1.9-0.5-2.4s-0.8-0.7-1.5-0.7c-0.9,0-1.6,0.6-2.1,1.7l0,0.6v12.9h-5V32.6
|
||||
c0-1.1-0.1-1.9-0.4-2.4s-0.8-0.7-1.6-0.7c-0.9,0-1.5,0.5-2,1.4v13.8h-5v-19H96.7z"/>
|
||||
<path class="st3" d="M118.5,34.6c0-3.1,0.6-5.4,1.7-7s2.7-2.3,4.7-2.3c1.7,0,3.1,0.7,4,2l0.2-1.7h4.5v19c0,2.4-0.7,4.3-2,5.6
|
||||
c-1.4,1.3-3.3,1.9-5.9,1.9c-1,0-2.1-0.2-3.3-0.6s-2-0.9-2.6-1.6l1.7-3.4c0.5,0.5,1.1,0.9,1.8,1.2c0.7,0.3,1.5,0.5,2.1,0.5
|
||||
c1.1,0,1.9-0.3,2.4-0.8s0.7-1.4,0.7-2.6v-1.6c-0.9,1.2-2.2,1.9-3.7,1.9c-2,0-3.6-0.8-4.7-2.4s-1.7-3.8-1.7-6.7V34.6z
|
||||
M123.5,35.9c0,1.8,0.2,3,0.7,3.8s1.2,1.2,2.2,1.2c1,0,1.8-0.4,2.3-1.1v-9.1c-0.5-0.8-1.3-1.2-2.2-1.2c-1,0-1.7,0.4-2.2,1.2
|
||||
s-0.7,2.1-0.7,3.9V35.9z"/>
|
||||
<path class="st3" d="M142.2,25.7l0.1,1.8c1.1-1.4,2.6-2.1,4.4-2.1c1.9,0,3.2,0.9,4,2.6c1.1-1.7,2.6-2.6,4.7-2.6
|
||||
c3.3,0,5,2.3,5.1,6.9v12.5h-5V32.6c0-1.1-0.2-1.9-0.5-2.4c-0.3-0.5-0.8-0.7-1.5-0.7c-0.9,0-1.6,0.6-2.1,1.7l0,0.6v12.9h-5V32.6
|
||||
c0-1.1-0.1-1.9-0.4-2.4c-0.3-0.5-0.8-0.7-1.6-0.7c-0.9,0-1.5,0.5-2,1.4v13.8h-5v-19H142.2z"/>
|
||||
<path class="st3" d="M170.3,21v4.7h2.5v3.7h-2.5v9.5c0,0.8,0.1,1.3,0.3,1.5c0.2,0.3,0.6,0.4,1.2,0.4c0.5,0,0.9,0,1.2-0.1l0,3.9
|
||||
c-0.8,0.3-1.8,0.5-2.7,0.5c-3.2,0-4.8-1.8-4.9-5.5V29.4h-2.2v-3.7h2.2V21H170.3z"/>
|
||||
<path class="st4" d="M182.7,43.2c1.4,0,2.4-0.4,3.1-1.1s1.1-1.8,1.2-3.3h1.9c-0.1,1.9-0.7,3.5-1.9,4.6s-2.6,1.7-4.3,1.7
|
||||
c-2.3,0-4-0.7-5.1-2.2s-1.7-3.6-1.8-6.4v-2.3c0-2.9,0.6-5.1,1.7-6.6s2.9-2.2,5.1-2.2c1.9,0,3.4,0.6,4.5,1.8s1.7,2.9,1.7,5H187
|
||||
c-0.1-1.6-0.5-2.8-1.2-3.6s-1.8-1.3-3.1-1.3c-1.7,0-2.9,0.6-3.7,1.7c-0.8,1.1-1.2,2.9-1.2,5.2v2.2c0,2.4,0.4,4.2,1.2,5.3
|
||||
S181,43.2,182.7,43.2z"/>
|
||||
<path class="st4" d="M192.6,34.2c0-2.7,0.6-4.9,1.9-6.5s2.9-2.4,5.1-2.4c2.2,0,3.9,0.8,5.1,2.4c1.2,1.6,1.9,3.7,1.9,6.5v2
|
||||
c0,2.8-0.6,5-1.9,6.5s-2.9,2.3-5.1,2.3s-3.8-0.8-5-2.3s-1.9-3.6-1.9-6.3V34.2z M194.6,36.2c0,2.2,0.4,3.9,1.3,5.2
|
||||
c0.9,1.3,2.1,1.9,3.7,1.9c1.6,0,2.8-0.6,3.7-1.8c0.8-1.2,1.3-2.9,1.3-5.2v-2c0-2.2-0.4-3.9-1.3-5.2c-0.9-1.3-2.1-1.9-3.7-1.9
|
||||
c-1.5,0-2.7,0.6-3.6,1.8c-0.9,1.2-1.3,2.9-1.4,5.1V36.2z"/>
|
||||
<path class="st4" d="M213.2,25.7l0.1,3c0.6-1,1.3-1.9,2.2-2.5s1.9-0.9,3-0.9c3.3,0,5,2.2,5.1,6.6v12.7h-1.9V32.2
|
||||
c0-1.7-0.3-3-0.9-3.8c-0.6-0.8-1.5-1.2-2.8-1.2c-1,0-1.9,0.4-2.8,1.2s-1.4,1.8-1.9,3.2v13.2h-2v-19H213.2z"/>
|
||||
<path class="st4" d="M230.3,44.7V27.4h-2.6v-1.8h2.6v-2.5c0-1.9,0.5-3.3,1.3-4.3s2-1.5,3.5-1.5c0.7,0,1.3,0.1,1.9,0.3l-0.1,1.8
|
||||
c-0.5-0.1-1-0.2-1.6-0.2c-0.9,0-1.7,0.4-2.2,1.1s-0.8,1.7-0.8,3v2.3h3.7v1.8h-3.7v17.3H230.3z"/>
|
||||
<path class="st4" d="M240.1,20.2c0-0.4,0.1-0.7,0.3-1s0.5-0.4,0.9-0.4s0.7,0.1,1,0.4s0.3,0.6,0.3,1s-0.1,0.7-0.3,1
|
||||
s-0.5,0.4-1,0.4s-0.7-0.1-0.9-0.4S240.1,20.6,240.1,20.2z M242.3,44.7h-2v-19h2V44.7z"/>
|
||||
<path class="st4" d="M247.4,34.3c0-3,0.5-5.2,1.5-6.7s2.6-2.2,4.6-2.2c2.2,0,3.8,1,4.9,3l0.1-2.7h1.8v19.6c0,2.3-0.6,4-1.6,5.2
|
||||
s-2.7,1.8-4.7,1.8c-1,0-2.1-0.3-3.2-0.8s-1.9-1.1-2.4-1.9l0.9-1.4c1.3,1.5,2.8,2.2,4.5,2.2c1.6,0,2.7-0.4,3.5-1.3
|
||||
c0.7-0.8,1.1-2.1,1.1-3.8v-3c-1.1,1.8-2.7,2.7-4.9,2.7c-2,0-3.5-0.7-4.5-2.2s-1.6-3.6-1.6-6.5V34.3z M249.3,36.1
|
||||
c0,2.4,0.4,4.2,1.1,5.4s1.9,1.7,3.5,1.7c2.1,0,3.6-1,4.5-3v-9.7c-0.9-2.2-2.4-3.3-4.4-3.3c-1.6,0-2.8,0.6-3.5,1.7
|
||||
s-1.1,2.9-1.1,5.3V36.1z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st12" x1="19.5" y1="16" x2="34.7" y2="28.5"/>
|
||||
<g>
|
||||
<polygon class="st9" points="18.4,18.5 16.6,13.7 21.7,14.5 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<circle class="st5" cx="10.8" cy="8.1" r="8.1"/>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st12" x1="34.7" y1="28" x2="49.4" y2="53.3"/>
|
||||
<g>
|
||||
<polygon class="st9" points="46.8,54 51.2,56.5 51.2,51.4 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st12" x1="46.2" y1="31.1" x2="63.9" y2="34.9"/>
|
||||
<g>
|
||||
<polygon class="st9" points="46.4,33.8 42.6,30.4 47.5,28.8 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st12" x1="34.7" y1="28.5" x2="47.9" y2="20.1"/>
|
||||
<g>
|
||||
<polygon class="st9" points="48.7,22.7 51.1,18.1 45.9,18.3 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st12" x1="11.2" y1="33.1" x2="23.2" y2="31"/>
|
||||
<g>
|
||||
<polygon class="st9" points="22.9,33.7 26.8,30.4 22,28.6 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st12" x1="61.5" y1="53" x2="71.9" y2="36.1"/>
|
||||
<g>
|
||||
<polygon class="st9" points="64.1,53.7 59.5,56.2 59.7,51 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st12" x1="64.2" y1="24.2" x2="70.7" y2="33.9"/>
|
||||
<g>
|
||||
<polygon class="st9" points="62.5,26.3 62.2,21.1 66.8,23.4 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<circle class="st3" cx="34.7" cy="28.5" r="8.1"/>
|
||||
<circle class="st4" cx="8.1" cy="33.9" r="8.1"/>
|
||||
<circle class="st7" cx="58.3" cy="14.5" r="8.1"/>
|
||||
<circle class="st8" cx="55.7" cy="63.3" r="8.1"/>
|
||||
<circle class="st6" cx="71.9" cy="36.1" r="8.1"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 6.2 KiB |
94
art/mgmt_logo_reversed_symbol.svg
Normal file
@@ -0,0 +1,94 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
viewBox="0 0 120 107.1" style="enable-background:new 0 0 120 107.1;" xml:space="preserve">
|
||||
<style type="text/css">
|
||||
.st0{fill:#FFFFFF;}
|
||||
.st1{fill:#E22434;}
|
||||
.st2{display:none;}
|
||||
.st3{fill:#1B3663;}
|
||||
.st4{fill:#00B1D1;}
|
||||
.st5{fill:#BFE6EF;}
|
||||
.st6{fill:#69CBE0;}
|
||||
.st7{fill:#0080BD;}
|
||||
.st8{fill:#005DAB;}
|
||||
.st9{fill:#183660;}
|
||||
.st10{fill:none;stroke:#183660;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st11{fill:none;stroke:#183660;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
.st12{fill:none;stroke:#183660;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st13{fill:none;stroke:#C0E6EF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st14{fill:#C0E6EF;}
|
||||
.st15{fill:none;stroke:#C0E6EF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st16{fill:none;stroke:#C0E6EF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
.st17{fill:none;stroke:#FFFFFF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st18{fill:none;stroke:#FFFFFF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st19{fill:none;stroke:#FFFFFF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
</style>
|
||||
<g id="Layer_2">
|
||||
</g>
|
||||
<g id="Layer_1">
|
||||
<g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st13" x1="29.2" y1="24.1" x2="52.1" y2="42.8"/>
|
||||
<g>
|
||||
<polygon class="st14" points="27.7,27.8 24.9,20.5 32.6,21.8 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<circle class="st3" cx="16.1" cy="12.2" r="12.1"/>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st13" x1="52.1" y1="42.1" x2="74.1" y2="80"/>
|
||||
<g>
|
||||
<polygon class="st14" points="70.1,80.9 76.9,84.8 76.8,77.1 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st13" x1="69.4" y1="46.7" x2="95.8" y2="52.4"/>
|
||||
<g>
|
||||
<polygon class="st14" points="69.7,50.7 63.9,45.6 71.3,43.2 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st13" x1="52.1" y1="42.8" x2="71.9" y2="30.1"/>
|
||||
<g>
|
||||
<polygon class="st14" points="73.1,34 76.6,27.1 68.9,27.5 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st13" x1="16.8" y1="49.6" x2="34.8" y2="46.5"/>
|
||||
<g>
|
||||
<polygon class="st14" points="34.3,50.5 40.3,45.6 33,42.9 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st13" x1="92.3" y1="79.5" x2="107.8" y2="54.2"/>
|
||||
<g>
|
||||
<polygon class="st14" points="96.1,80.6 89.3,84.3 89.5,76.5 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st13" x1="97.2" y1="36.3" x2="107.8" y2="54.2"/>
|
||||
<g>
|
||||
<polygon class="st14" points="94.4,39.3 94.3,31.5 101.1,35.3 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<circle class="st5" cx="52.1" cy="42.8" r="12.1"/>
|
||||
<circle class="st4" cx="12.2" cy="50.8" r="12.1"/>
|
||||
<circle class="st7" cx="87.5" cy="21.7" r="12.1"/>
|
||||
<circle class="st8" cx="83.5" cy="95" r="12.1"/>
|
||||
<circle class="st6" cx="107.8" cy="54.2" r="12.1"/>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 2.8 KiB |
132
art/mgmt_logo_reversed_tall.svg
Normal file
@@ -0,0 +1,132 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
viewBox="0 0 168.3 133" style="enable-background:new 0 0 168.3 133;" xml:space="preserve">
|
||||
<style type="text/css">
|
||||
.st0{fill:#FFFFFF;}
|
||||
.st1{fill:#E22434;}
|
||||
.st2{display:none;}
|
||||
.st3{fill:#1B3663;}
|
||||
.st4{fill:#00B1D1;}
|
||||
.st5{fill:#BFE6EF;}
|
||||
.st6{fill:#69CBE0;}
|
||||
.st7{fill:#0080BD;}
|
||||
.st8{fill:#005DAB;}
|
||||
.st9{fill:#183660;}
|
||||
.st10{fill:none;stroke:#183660;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st11{fill:none;stroke:#183660;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
.st12{fill:none;stroke:#183660;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st13{fill:none;stroke:#C0E6EF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st14{fill:#C0E6EF;}
|
||||
.st15{fill:none;stroke:#C0E6EF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st16{fill:none;stroke:#C0E6EF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
.st17{fill:none;stroke:#FFFFFF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st18{fill:none;stroke:#FFFFFF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st19{fill:none;stroke:#FFFFFF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
</style>
|
||||
<g id="Layer_2">
|
||||
</g>
|
||||
<g id="Layer_1">
|
||||
<g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st0" d="M4.7,106.4l0.1,1.8c1.1-1.4,2.6-2.1,4.4-2.1c1.9,0,3.2,0.9,4,2.6c1.1-1.7,2.6-2.6,4.7-2.6
|
||||
c3.3,0,5,2.3,5.1,6.9v12.5h-5v-12.1c0-1.1-0.2-1.9-0.5-2.4c-0.3-0.5-0.8-0.7-1.5-0.7c-0.9,0-1.6,0.6-2.1,1.7l0,0.6v12.9H9v-12.1
|
||||
c0-1.1-0.1-1.9-0.4-2.4c-0.3-0.5-0.8-0.7-1.6-0.7c-0.9,0-1.5,0.5-2,1.4v13.8H0v-19H4.7z"/>
|
||||
<path class="st0" d="M26.4,115.3c0-3.1,0.6-5.4,1.7-7s2.7-2.3,4.7-2.3c1.7,0,3.1,0.7,4,2l0.2-1.7h4.5v19c0,2.4-0.7,4.3-2,5.6
|
||||
c-1.4,1.3-3.3,1.9-5.9,1.9c-1,0-2.1-0.2-3.3-0.6s-2-0.9-2.6-1.6l1.7-3.4c0.5,0.5,1.1,0.9,1.8,1.2c0.7,0.3,1.5,0.5,2.1,0.5
|
||||
c1.1,0,1.9-0.3,2.4-0.8c0.5-0.5,0.7-1.4,0.7-2.6v-1.6c-0.9,1.2-2.2,1.9-3.7,1.9c-2,0-3.6-0.8-4.7-2.4s-1.7-3.8-1.7-6.7V115.3z
|
||||
M31.4,116.6c0,1.8,0.2,3,0.7,3.8s1.2,1.2,2.2,1.2c1,0,1.8-0.4,2.3-1.1v-9.1c-0.5-0.8-1.3-1.2-2.2-1.2c-1,0-1.7,0.4-2.2,1.2
|
||||
s-0.7,2.1-0.7,3.9V116.6z"/>
|
||||
<path class="st0" d="M50.1,106.4l0.1,1.8c1.1-1.4,2.6-2.1,4.4-2.1c1.9,0,3.2,0.9,4,2.6c1.1-1.7,2.6-2.6,4.7-2.6
|
||||
c3.3,0,5,2.3,5.1,6.9v12.5h-5v-12.1c0-1.1-0.2-1.9-0.5-2.4s-0.8-0.7-1.5-0.7c-0.9,0-1.6,0.6-2.1,1.7l0,0.6v12.9h-5v-12.1
|
||||
c0-1.1-0.1-1.9-0.4-2.4s-0.8-0.7-1.6-0.7c-0.9,0-1.5,0.5-2,1.4v13.8h-5v-19H50.1z"/>
|
||||
<path class="st0" d="M78.2,101.7v4.7h2.5v3.7h-2.5v9.5c0,0.8,0.1,1.3,0.3,1.5c0.2,0.3,0.6,0.4,1.2,0.4c0.5,0,0.9,0,1.2-0.1
|
||||
l0,3.9c-0.8,0.3-1.8,0.5-2.7,0.5c-3.2,0-4.8-1.8-4.9-5.5v-10.1h-2.2v-3.7h2.2v-4.7H78.2z"/>
|
||||
<path class="st4" d="M90.6,124c1.4,0,2.4-0.4,3.1-1.1c0.7-0.8,1.1-1.9,1.2-3.3h1.9c-0.1,1.9-0.7,3.5-1.9,4.6
|
||||
c-1.1,1.1-2.6,1.7-4.3,1.7c-2.3,0-4-0.7-5.1-2.2s-1.7-3.6-1.8-6.4v-2.3c0-2.9,0.6-5.1,1.7-6.6s2.9-2.2,5.1-2.2
|
||||
c1.9,0,3.4,0.6,4.5,1.8s1.7,2.9,1.7,5H95c-0.1-1.6-0.5-2.8-1.2-3.6s-1.8-1.3-3.1-1.3c-1.7,0-2.9,0.6-3.7,1.7s-1.2,2.9-1.2,5.2
|
||||
v2.2c0,2.4,0.4,4.2,1.2,5.3S89,124,90.6,124z"/>
|
||||
<path class="st4" d="M100.5,115c0-2.7,0.6-4.9,1.9-6.5s2.9-2.4,5.1-2.4c2.2,0,3.9,0.8,5.1,2.4s1.9,3.7,1.9,6.5v2
|
||||
c0,2.8-0.6,5-1.9,6.5s-2.9,2.3-5.1,2.3c-2.1,0-3.8-0.8-5-2.3s-1.9-3.6-1.9-6.3V115z M102.5,116.9c0,2.2,0.4,3.9,1.3,5.2
|
||||
c0.9,1.3,2.1,1.9,3.7,1.9c1.6,0,2.8-0.6,3.7-1.8s1.3-2.9,1.3-5.2v-2c0-2.2-0.4-3.9-1.3-5.2c-0.9-1.3-2.1-1.9-3.7-1.9
|
||||
c-1.5,0-2.7,0.6-3.6,1.8s-1.3,2.9-1.4,5.1V116.9z"/>
|
||||
<path class="st4" d="M121.1,106.4l0.1,3c0.6-1,1.3-1.9,2.2-2.5s1.9-0.9,3-0.9c3.3,0,5,2.2,5.1,6.6v12.7h-1.9v-12.5
|
||||
c0-1.7-0.3-3-0.9-3.8c-0.6-0.8-1.5-1.2-2.8-1.2c-1,0-1.9,0.4-2.8,1.2s-1.4,1.8-1.9,3.2v13.2h-2v-19H121.1z"/>
|
||||
<path class="st4" d="M138.2,125.4v-17.3h-2.6v-1.8h2.6v-2.5c0-1.9,0.5-3.3,1.3-4.3s2-1.5,3.5-1.5c0.7,0,1.3,0.1,1.9,0.3
|
||||
l-0.1,1.8c-0.5-0.1-1-0.2-1.6-0.2c-0.9,0-1.7,0.4-2.2,1.1s-0.8,1.7-0.8,3v2.3h3.7v1.8h-3.7v17.3H138.2z"/>
|
||||
<path class="st4" d="M148,100.9c0-0.4,0.1-0.7,0.3-1s0.5-0.4,0.9-0.4s0.7,0.1,1,0.4s0.3,0.6,0.3,1s-0.1,0.7-0.3,1
|
||||
s-0.5,0.4-1,0.4s-0.7-0.1-0.9-0.4S148,101.3,148,100.9z M150.2,125.4h-2v-19h2V125.4z"/>
|
||||
<path class="st4" d="M155.3,115c0-3,0.5-5.2,1.5-6.7s2.6-2.2,4.6-2.2c2.2,0,3.8,1,4.9,3l0.1-2.7h1.8V126c0,2.3-0.6,4-1.6,5.2
|
||||
c-1.1,1.2-2.7,1.8-4.7,1.8c-1,0-2.1-0.3-3.2-0.8s-1.9-1.1-2.4-1.9l0.9-1.4c1.3,1.5,2.8,2.2,4.5,2.2c1.6,0,2.7-0.4,3.5-1.3
|
||||
c0.7-0.8,1.1-2.1,1.1-3.8v-3c-1.1,1.8-2.7,2.7-4.9,2.7c-2,0-3.5-0.7-4.5-2.2s-1.6-3.6-1.6-6.5V115z M157.2,116.8
|
||||
c0,2.4,0.4,4.2,1.1,5.4s1.9,1.7,3.5,1.7c2.1,0,3.6-1,4.5-3v-9.7c-0.9-2.2-2.4-3.3-4.4-3.3c-1.6,0-2.8,0.6-3.5,1.7
|
||||
s-1.1,2.9-1.1,5.3V116.8z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st16" x1="58.9" y1="20.1" x2="77.9" y2="35.6"/>
|
||||
<g>
|
||||
<polygon class="st14" points="57.6,23.2 55.3,17.1 61.7,18.2 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<circle class="st3" cx="48" cy="10.1" r="10.1"/>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st16" x1="77.9" y1="35.1" x2="96.2" y2="66.7"/>
|
||||
<g>
|
||||
<polygon class="st14" points="93,67.5 98.6,70.7 98.6,64.2 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st16" x1="92.3" y1="38.9" x2="114.4" y2="43.6"/>
|
||||
<g>
|
||||
<polygon class="st14" points="92.6,42.3 87.8,38 93.9,36 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st16" x1="77.9" y1="35.6" x2="94.5" y2="25.1"/>
|
||||
<g>
|
||||
<polygon class="st14" points="95.4,28.3 98.4,22.6 91.9,22.9 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st16" x1="48.5" y1="41.3" x2="63.5" y2="38.7"/>
|
||||
<g>
|
||||
<polygon class="st14" points="63.1,42.1 68.1,38 62,35.7 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st16" x1="111.4" y1="66.3" x2="124.4" y2="45.2"/>
|
||||
<g>
|
||||
<polygon class="st14" points="114.6,67.2 109,70.2 109.1,63.8 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st16" x1="115.5" y1="30.5" x2="124.4" y2="45.2"/>
|
||||
<g>
|
||||
<polygon class="st14" points="113.2,32.9 113.1,26.5 118.8,29.6 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<circle class="st5" cx="77.9" cy="35.6" r="10.1"/>
|
||||
<circle class="st4" cx="44.6" cy="42.4" r="10.1"/>
|
||||
<circle class="st7" cx="107.4" cy="18.1" r="10.1"/>
|
||||
<circle class="st8" cx="104.1" cy="79.1" r="10.1"/>
|
||||
<circle class="st6" cx="124.4" cy="45.2" r="10.1"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 6.2 KiB |
132
art/mgmt_logo_reversed_wide.svg
Normal file
@@ -0,0 +1,132 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
viewBox="0 0 260.4 71.4" style="enable-background:new 0 0 260.4 71.4;" xml:space="preserve">
|
||||
<style type="text/css">
|
||||
.st0{fill:#FFFFFF;}
|
||||
.st1{fill:#E22434;}
|
||||
.st2{display:none;}
|
||||
.st3{fill:#1B3663;}
|
||||
.st4{fill:#00B1D1;}
|
||||
.st5{fill:#BFE6EF;}
|
||||
.st6{fill:#69CBE0;}
|
||||
.st7{fill:#0080BD;}
|
||||
.st8{fill:#005DAB;}
|
||||
.st9{fill:#183660;}
|
||||
.st10{fill:none;stroke:#183660;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st11{fill:none;stroke:#183660;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
.st12{fill:none;stroke:#183660;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st13{fill:none;stroke:#C0E6EF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st14{fill:#C0E6EF;}
|
||||
.st15{fill:none;stroke:#C0E6EF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st16{fill:none;stroke:#C0E6EF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
.st17{fill:none;stroke:#FFFFFF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st18{fill:none;stroke:#FFFFFF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st19{fill:none;stroke:#FFFFFF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
</style>
|
||||
<g id="Layer_2">
|
||||
</g>
|
||||
<g id="Layer_1">
|
||||
<g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st0" d="M96.7,27.6l0.1,1.8c1.1-1.4,2.6-2.1,4.4-2.1c1.9,0,3.2,0.9,4,2.6c1.1-1.7,2.6-2.6,4.7-2.6
|
||||
c3.3,0,5,2.3,5.1,6.9v12.5h-5V34.4c0-1.1-0.2-1.9-0.5-2.4c-0.3-0.5-0.8-0.7-1.5-0.7c-0.9,0-1.6,0.6-2.1,1.7l0,0.6v12.9h-5V34.5
|
||||
c0-1.1-0.1-1.9-0.4-2.4c-0.3-0.5-0.8-0.7-1.6-0.7c-0.9,0-1.5,0.5-2,1.4v13.8h-5v-19H96.7z"/>
|
||||
<path class="st0" d="M118.5,36.5c0-3.1,0.6-5.4,1.7-7s2.7-2.3,4.7-2.3c1.7,0,3.1,0.7,4,2l0.2-1.7h4.5v19c0,2.4-0.7,4.3-2,5.6
|
||||
c-1.4,1.3-3.3,1.9-5.9,1.9c-1,0-2.1-0.2-3.3-0.6s-2-0.9-2.6-1.6l1.7-3.4c0.5,0.5,1.1,0.9,1.8,1.2c0.7,0.3,1.5,0.5,2.1,0.5
|
||||
c1.1,0,1.9-0.3,2.4-0.8s0.7-1.4,0.7-2.6v-1.6c-0.9,1.2-2.2,1.9-3.7,1.9c-2,0-3.6-0.8-4.7-2.4s-1.7-3.8-1.7-6.7V36.5z
|
||||
M123.5,37.7c0,1.8,0.2,3,0.7,3.8s1.2,1.2,2.2,1.2c1,0,1.8-0.4,2.3-1.1v-9.1c-0.5-0.8-1.3-1.2-2.2-1.2c-1,0-1.7,0.4-2.2,1.2
|
||||
c-0.5,0.8-0.7,2.1-0.7,3.9V37.7z"/>
|
||||
<path class="st0" d="M142.2,27.6l0.1,1.8c1.1-1.4,2.6-2.1,4.4-2.1c1.9,0,3.2,0.9,4,2.6c1.1-1.7,2.6-2.6,4.7-2.6
|
||||
c3.3,0,5,2.3,5.1,6.9v12.5h-5V34.4c0-1.1-0.2-1.9-0.5-2.4c-0.3-0.5-0.8-0.7-1.5-0.7c-0.9,0-1.6,0.6-2.1,1.7l0,0.6v12.9h-5V34.5
|
||||
c0-1.1-0.1-1.9-0.4-2.4c-0.3-0.5-0.8-0.7-1.6-0.7c-0.9,0-1.5,0.5-2,1.4v13.8h-5v-19H142.2z"/>
|
||||
<path class="st0" d="M170.3,22.9v4.7h2.5v3.7h-2.5v9.5c0,0.8,0.1,1.3,0.3,1.5c0.2,0.3,0.6,0.4,1.2,0.4c0.5,0,0.9,0,1.2-0.1
|
||||
l0,3.9c-0.8,0.3-1.8,0.5-2.7,0.5c-3.2,0-4.8-1.8-4.9-5.5V31.3h-2.2v-3.7h2.2v-4.7H170.3z"/>
|
||||
<path class="st4" d="M182.7,45.1c1.4,0,2.4-0.4,3.1-1.1c0.7-0.8,1.1-1.9,1.2-3.3h1.9c-0.1,1.9-0.7,3.5-1.9,4.6s-2.6,1.7-4.3,1.7
|
||||
c-2.3,0-4-0.7-5.1-2.2s-1.7-3.6-1.8-6.4V36c0-2.9,0.6-5.1,1.7-6.6s2.9-2.2,5.1-2.2c1.9,0,3.4,0.6,4.5,1.8s1.7,2.9,1.7,5H187
|
||||
c-0.1-1.6-0.5-2.8-1.2-3.6s-1.8-1.3-3.1-1.3c-1.7,0-2.9,0.6-3.7,1.7s-1.2,2.9-1.2,5.2v2.2c0,2.4,0.4,4.2,1.2,5.3
|
||||
S181,45.1,182.7,45.1z"/>
|
||||
<path class="st4" d="M192.6,36.1c0-2.7,0.6-4.9,1.9-6.5s2.9-2.4,5.1-2.4c2.2,0,3.9,0.8,5.1,2.4c1.2,1.6,1.9,3.7,1.9,6.5v2
|
||||
c0,2.8-0.6,5-1.9,6.5s-2.9,2.3-5.1,2.3s-3.8-0.8-5-2.3s-1.9-3.6-1.9-6.3V36.1z M194.6,38.1c0,2.2,0.4,3.9,1.3,5.2
|
||||
c0.9,1.3,2.1,1.9,3.7,1.9c1.6,0,2.8-0.6,3.7-1.8c0.8-1.2,1.3-2.9,1.3-5.2v-2c0-2.2-0.4-3.9-1.3-5.2c-0.9-1.3-2.1-1.9-3.7-1.9
|
||||
c-1.5,0-2.7,0.6-3.6,1.8c-0.9,1.2-1.3,2.9-1.4,5.1V38.1z"/>
|
||||
<path class="st4" d="M213.2,27.6l0.1,3c0.6-1,1.3-1.9,2.2-2.5s1.9-0.9,3-0.9c3.3,0,5,2.2,5.1,6.6v12.7h-1.9V34.1
|
||||
c0-1.7-0.3-3-0.9-3.8c-0.6-0.8-1.5-1.2-2.8-1.2c-1,0-1.9,0.4-2.8,1.2s-1.4,1.8-1.9,3.2v13.2h-2v-19H213.2z"/>
|
||||
<path class="st4" d="M230.3,46.6V29.3h-2.6v-1.8h2.6v-2.5c0-1.9,0.5-3.3,1.3-4.3s2-1.5,3.5-1.5c0.7,0,1.3,0.1,1.9,0.3l-0.1,1.8
|
||||
c-0.5-0.1-1-0.2-1.6-0.2c-0.9,0-1.7,0.4-2.2,1.1s-0.8,1.7-0.8,3v2.3h3.7v1.8h-3.7v17.3H230.3z"/>
|
||||
<path class="st4" d="M240.1,22.1c0-0.4,0.1-0.7,0.3-1s0.5-0.4,0.9-0.4s0.7,0.1,1,0.4s0.3,0.6,0.3,1s-0.1,0.7-0.3,1
|
||||
s-0.5,0.4-1,0.4s-0.7-0.1-0.9-0.4S240.1,22.5,240.1,22.1z M242.3,46.6h-2v-19h2V46.6z"/>
|
||||
<path class="st4" d="M247.4,36.2c0-3,0.5-5.2,1.5-6.7s2.6-2.2,4.6-2.2c2.2,0,3.8,1,4.9,3l0.1-2.7h1.8v19.6c0,2.3-0.6,4-1.6,5.2
|
||||
s-2.7,1.8-4.7,1.8c-1,0-2.1-0.3-3.2-0.8s-1.9-1.1-2.4-1.9l0.9-1.4c1.3,1.5,2.8,2.2,4.5,2.2c1.6,0,2.7-0.4,3.5-1.3
|
||||
c0.7-0.8,1.1-2.1,1.1-3.8v-3c-1.1,1.8-2.7,2.7-4.9,2.7c-2,0-3.5-0.7-4.5-2.2s-1.6-3.6-1.6-6.5V36.2z M249.3,38
|
||||
c0,2.4,0.4,4.2,1.1,5.4s1.9,1.7,3.5,1.7c2.1,0,3.6-1,4.5-3v-9.7c-0.9-2.2-2.4-3.3-4.4-3.3c-1.6,0-2.8,0.6-3.5,1.7
|
||||
s-1.1,2.9-1.1,5.3V38z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st15" x1="19.5" y1="16" x2="34.7" y2="28.5"/>
|
||||
<g>
|
||||
<polygon class="st14" points="18.4,18.5 16.6,13.7 21.7,14.5 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<circle class="st3" cx="10.8" cy="8.1" r="8.1"/>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st15" x1="34.7" y1="28" x2="49.4" y2="53.3"/>
|
||||
<g>
|
||||
<polygon class="st14" points="46.8,54 51.2,56.5 51.2,51.4 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st15" x1="46.2" y1="31.1" x2="63.9" y2="34.9"/>
|
||||
<g>
|
||||
<polygon class="st14" points="46.4,33.8 42.6,30.4 47.5,28.8 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st15" x1="34.7" y1="28.5" x2="47.9" y2="20.1"/>
|
||||
<g>
|
||||
<polygon class="st14" points="48.7,22.7 51.1,18.1 45.9,18.3 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st15" x1="11.2" y1="33.1" x2="23.2" y2="31"/>
|
||||
<g>
|
||||
<polygon class="st14" points="22.9,33.7 26.8,30.4 22,28.6 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st15" x1="61.5" y1="53" x2="71.9" y2="36.1"/>
|
||||
<g>
|
||||
<polygon class="st14" points="64.1,53.7 59.5,56.2 59.7,51 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st15" x1="64.8" y1="24.4" x2="71.9" y2="36.1"/>
|
||||
<g>
|
||||
<polygon class="st14" points="63,26.4 62.9,21.2 67.4,23.7 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<circle class="st5" cx="34.7" cy="28.5" r="8.1"/>
|
||||
<circle class="st4" cx="8.1" cy="33.9" r="8.1"/>
|
||||
<circle class="st7" cx="58.3" cy="14.5" r="8.1"/>
|
||||
<circle class="st8" cx="55.7" cy="63.3" r="8.1"/>
|
||||
<circle class="st6" cx="71.9" cy="36.1" r="8.1"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 6.2 KiB |
94
art/mgmt_logo_white_symbol.svg
Normal file
@@ -0,0 +1,94 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
viewBox="0 0 120 107.1" style="enable-background:new 0 0 120 107.1;" xml:space="preserve">
|
||||
<style type="text/css">
|
||||
.st0{fill:#FFFFFF;}
|
||||
.st1{fill:#E22434;}
|
||||
.st2{display:none;}
|
||||
.st3{fill:#1B3663;}
|
||||
.st4{fill:#00B1D1;}
|
||||
.st5{fill:#BFE6EF;}
|
||||
.st6{fill:#69CBE0;}
|
||||
.st7{fill:#0080BD;}
|
||||
.st8{fill:#005DAB;}
|
||||
.st9{fill:#183660;}
|
||||
.st10{fill:none;stroke:#183660;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st11{fill:none;stroke:#183660;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
.st12{fill:none;stroke:#183660;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st13{fill:none;stroke:#C0E6EF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st14{fill:#C0E6EF;}
|
||||
.st15{fill:none;stroke:#C0E6EF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st16{fill:none;stroke:#C0E6EF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
.st17{fill:none;stroke:#FFFFFF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st18{fill:none;stroke:#FFFFFF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st19{fill:none;stroke:#FFFFFF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
</style>
|
||||
<g id="Layer_2">
|
||||
</g>
|
||||
<g id="Layer_1">
|
||||
<g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st17" x1="29.2" y1="24.1" x2="52.1" y2="42.8"/>
|
||||
<g>
|
||||
<polygon class="st0" points="27.7,27.8 24.9,20.5 32.6,21.8 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<circle class="st0" cx="16.1" cy="12.2" r="12.1"/>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st17" x1="52.1" y1="42.1" x2="74.1" y2="80"/>
|
||||
<g>
|
||||
<polygon class="st0" points="70.1,80.9 76.9,84.8 76.8,77.1 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st17" x1="69.4" y1="46.7" x2="95.8" y2="52.4"/>
|
||||
<g>
|
||||
<polygon class="st0" points="69.7,50.7 63.9,45.6 71.3,43.2 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st17" x1="52.1" y1="42.8" x2="71.9" y2="30.1"/>
|
||||
<g>
|
||||
<polygon class="st0" points="73.1,34 76.6,27.1 68.9,27.5 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st17" x1="16.8" y1="49.6" x2="34.8" y2="46.5"/>
|
||||
<g>
|
||||
<polygon class="st0" points="34.3,50.5 40.3,45.6 33,42.9 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st17" x1="92.3" y1="79.5" x2="107.8" y2="54.2"/>
|
||||
<g>
|
||||
<polygon class="st0" points="96.1,80.6 89.3,84.3 89.5,76.5 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st17" x1="97.2" y1="36.6" x2="107.8" y2="54.2"/>
|
||||
<g>
|
||||
<polygon class="st0" points="94.4,39.5 94.3,31.8 101.1,35.5 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<circle class="st0" cx="52.1" cy="42.8" r="12.1"/>
|
||||
<circle class="st0" cx="12.2" cy="50.8" r="12.1"/>
|
||||
<circle class="st0" cx="87.5" cy="21.7" r="12.1"/>
|
||||
<circle class="st0" cx="83.5" cy="95" r="12.1"/>
|
||||
<circle class="st0" cx="107.8" cy="54.2" r="12.1"/>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 2.8 KiB |
132
art/mgmt_logo_white_tall.svg
Normal file
@@ -0,0 +1,132 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
viewBox="0 0 168.3 131.4" style="enable-background:new 0 0 168.3 131.4;" xml:space="preserve">
|
||||
<style type="text/css">
|
||||
.st0{fill:#FFFFFF;}
|
||||
.st1{fill:#E22434;}
|
||||
.st2{display:none;}
|
||||
.st3{fill:#1B3663;}
|
||||
.st4{fill:#00B1D1;}
|
||||
.st5{fill:#BFE6EF;}
|
||||
.st6{fill:#69CBE0;}
|
||||
.st7{fill:#0080BD;}
|
||||
.st8{fill:#005DAB;}
|
||||
.st9{fill:#183660;}
|
||||
.st10{fill:none;stroke:#183660;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st11{fill:none;stroke:#183660;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
.st12{fill:none;stroke:#183660;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st13{fill:none;stroke:#C0E6EF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st14{fill:#C0E6EF;}
|
||||
.st15{fill:none;stroke:#C0E6EF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st16{fill:none;stroke:#C0E6EF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
.st17{fill:none;stroke:#FFFFFF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st18{fill:none;stroke:#FFFFFF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st19{fill:none;stroke:#FFFFFF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
</style>
|
||||
<g id="Layer_2">
|
||||
</g>
|
||||
<g id="Layer_1">
|
||||
<g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st0" d="M4.7,104.8l0.1,1.8c1.1-1.4,2.6-2.1,4.4-2.1c1.9,0,3.2,0.9,4,2.6c1.1-1.7,2.6-2.6,4.7-2.6
|
||||
c3.3,0,5,2.3,5.1,6.9v12.5h-5v-12.1c0-1.1-0.2-1.9-0.5-2.4c-0.3-0.5-0.8-0.7-1.5-0.7c-0.9,0-1.6,0.6-2.1,1.7l0,0.6v12.9H9v-12.1
|
||||
c0-1.1-0.1-1.9-0.4-2.4c-0.3-0.5-0.8-0.7-1.6-0.7c-0.9,0-1.5,0.5-2,1.4v13.8H0v-19H4.7z"/>
|
||||
<path class="st0" d="M26.4,113.8c0-3.1,0.6-5.4,1.7-7s2.7-2.3,4.7-2.3c1.7,0,3.1,0.7,4,2l0.2-1.7h4.5v19c0,2.4-0.7,4.3-2,5.6
|
||||
c-1.4,1.3-3.3,1.9-5.9,1.9c-1,0-2.1-0.2-3.3-0.6s-2-0.9-2.6-1.6l1.7-3.4c0.5,0.5,1.1,0.9,1.8,1.2c0.7,0.3,1.5,0.5,2.1,0.5
|
||||
c1.1,0,1.9-0.3,2.4-0.8s0.7-1.4,0.7-2.6v-1.6c-0.9,1.2-2.2,1.9-3.7,1.9c-2,0-3.6-0.8-4.7-2.4s-1.7-3.8-1.7-6.7V113.8z M31.4,115
|
||||
c0,1.8,0.2,3,0.7,3.8s1.2,1.2,2.2,1.2c1,0,1.8-0.4,2.3-1.1v-9.1c-0.5-0.8-1.3-1.2-2.2-1.2c-1,0-1.7,0.4-2.2,1.2
|
||||
s-0.7,2.1-0.7,3.9V115z"/>
|
||||
<path class="st0" d="M50.1,104.8l0.1,1.8c1.1-1.4,2.6-2.1,4.4-2.1c1.9,0,3.2,0.9,4,2.6c1.1-1.7,2.6-2.6,4.7-2.6
|
||||
c3.3,0,5,2.3,5.1,6.9v12.5h-5v-12.1c0-1.1-0.2-1.9-0.5-2.4s-0.8-0.7-1.5-0.7c-0.9,0-1.6,0.6-2.1,1.7l0,0.6v12.9h-5v-12.1
|
||||
c0-1.1-0.1-1.9-0.4-2.4s-0.8-0.7-1.6-0.7c-0.9,0-1.5,0.5-2,1.4v13.8h-5v-19H50.1z"/>
|
||||
<path class="st0" d="M78.2,100.2v4.7h2.5v3.7h-2.5v9.5c0,0.8,0.1,1.3,0.3,1.5c0.2,0.3,0.6,0.4,1.2,0.4c0.5,0,0.9,0,1.2-0.1
|
||||
l0,3.9c-0.8,0.3-1.8,0.5-2.7,0.5c-3.2,0-4.8-1.8-4.9-5.5v-10.1h-2.2v-3.7h2.2v-4.7H78.2z"/>
|
||||
<path class="st0" d="M90.6,122.4c1.4,0,2.4-0.4,3.1-1.1c0.7-0.8,1.1-1.8,1.2-3.3h1.9c-0.1,1.9-0.7,3.5-1.9,4.6
|
||||
c-1.1,1.1-2.6,1.7-4.3,1.7c-2.3,0-4-0.7-5.1-2.2s-1.7-3.6-1.8-6.4v-2.3c0-2.9,0.6-5.1,1.7-6.6s2.9-2.2,5.1-2.2
|
||||
c1.9,0,3.4,0.6,4.5,1.8s1.7,2.9,1.7,5H95c-0.1-1.6-0.5-2.8-1.2-3.6s-1.8-1.3-3.1-1.3c-1.7,0-2.9,0.6-3.7,1.7
|
||||
c-0.8,1.1-1.2,2.9-1.2,5.2v2.2c0,2.4,0.4,4.2,1.2,5.3S89,122.4,90.6,122.4z"/>
|
||||
<path class="st0" d="M100.5,113.4c0-2.7,0.6-4.9,1.9-6.5s2.9-2.4,5.1-2.4c2.2,0,3.9,0.8,5.1,2.4s1.9,3.7,1.9,6.5v2
|
||||
c0,2.8-0.6,5-1.9,6.5s-2.9,2.3-5.1,2.3c-2.1,0-3.8-0.8-5-2.3s-1.9-3.6-1.9-6.3V113.4z M102.5,115.3c0,2.2,0.4,3.9,1.3,5.2
|
||||
c0.9,1.3,2.1,1.9,3.7,1.9c1.6,0,2.8-0.6,3.7-1.8c0.8-1.2,1.3-2.9,1.3-5.2v-2c0-2.2-0.4-3.9-1.3-5.2c-0.9-1.3-2.1-1.9-3.7-1.9
|
||||
c-1.5,0-2.7,0.6-3.6,1.8c-0.9,1.2-1.3,2.9-1.4,5.1V115.3z"/>
|
||||
<path class="st0" d="M121.1,104.8l0.1,3c0.6-1,1.3-1.9,2.2-2.5s1.9-0.9,3-0.9c3.3,0,5,2.2,5.1,6.6v12.7h-1.9v-12.5
|
||||
c0-1.7-0.3-3-0.9-3.8c-0.6-0.8-1.5-1.2-2.8-1.2c-1,0-1.9,0.4-2.8,1.2s-1.4,1.8-1.9,3.2v13.2h-2v-19H121.1z"/>
|
||||
<path class="st0" d="M138.2,123.8v-17.3h-2.6v-1.8h2.6v-2.5c0-1.9,0.5-3.3,1.3-4.3s2-1.5,3.5-1.5c0.7,0,1.3,0.1,1.9,0.3
|
||||
l-0.1,1.8c-0.5-0.1-1-0.2-1.6-0.2c-0.9,0-1.7,0.4-2.2,1.1s-0.8,1.7-0.8,3v2.3h3.7v1.8h-3.7v17.3H138.2z"/>
|
||||
<path class="st0" d="M148,99.3c0-0.4,0.1-0.7,0.3-1s0.5-0.4,0.9-0.4s0.7,0.1,1,0.4s0.3,0.6,0.3,1s-0.1,0.7-0.3,1s-0.5,0.4-1,0.4
|
||||
s-0.7-0.1-0.9-0.4S148,99.7,148,99.3z M150.2,123.8h-2v-19h2V123.8z"/>
|
||||
<path class="st0" d="M155.3,113.5c0-3,0.5-5.2,1.5-6.7s2.6-2.2,4.6-2.2c2.2,0,3.8,1,4.9,3l0.1-2.7h1.8v19.6c0,2.3-0.6,4-1.6,5.2
|
||||
c-1.1,1.2-2.7,1.8-4.7,1.8c-1,0-2.1-0.3-3.2-0.8s-1.9-1.1-2.4-1.9l0.9-1.4c1.3,1.5,2.8,2.2,4.5,2.2c1.6,0,2.7-0.4,3.5-1.3
|
||||
c0.7-0.8,1.1-2.1,1.1-3.8v-3c-1.1,1.8-2.7,2.7-4.9,2.7c-2,0-3.5-0.7-4.5-2.2s-1.6-3.6-1.6-6.5V113.5z M157.2,115.2
|
||||
c0,2.4,0.4,4.2,1.1,5.4s1.9,1.7,3.5,1.7c2.1,0,3.6-1,4.5-3v-9.7c-0.9-2.2-2.4-3.3-4.4-3.3c-1.6,0-2.8,0.6-3.5,1.7
|
||||
s-1.1,2.9-1.1,5.3V115.2z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st19" x1="58.9" y1="20.1" x2="77.9" y2="35.6"/>
|
||||
<g>
|
||||
<polygon class="st0" points="57.6,23.2 55.3,17.1 61.7,18.2 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<circle class="st0" cx="48" cy="10.1" r="10.1"/>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st19" x1="77.9" y1="35.1" x2="96.2" y2="66.7"/>
|
||||
<g>
|
||||
<polygon class="st0" points="93,67.5 98.6,70.7 98.6,64.2 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st19" x1="92.3" y1="38.9" x2="114.4" y2="43.6"/>
|
||||
<g>
|
||||
<polygon class="st0" points="92.6,42.3 87.8,38 93.9,36 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st19" x1="77.9" y1="35.6" x2="94.5" y2="25.1"/>
|
||||
<g>
|
||||
<polygon class="st0" points="95.4,28.3 98.4,22.6 91.9,22.9 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st19" x1="48.5" y1="41.3" x2="63.5" y2="38.7"/>
|
||||
<g>
|
||||
<polygon class="st0" points="63.1,42.1 68.1,38 62,35.7 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st19" x1="111.4" y1="66.3" x2="124.4" y2="45.2"/>
|
||||
<g>
|
||||
<polygon class="st0" points="114.6,67.2 109,70.2 109.1,63.8 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st19" x1="115.4" y1="30.4" x2="124.4" y2="45.2"/>
|
||||
<g>
|
||||
<polygon class="st0" points="113.2,32.8 113,26.4 118.7,29.5 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<circle class="st0" cx="77.9" cy="35.6" r="10.1"/>
|
||||
<circle class="st0" cx="44.6" cy="42.4" r="10.1"/>
|
||||
<circle class="st0" cx="107.4" cy="18.1" r="10.1"/>
|
||||
<circle class="st0" cx="104.1" cy="79.1" r="10.1"/>
|
||||
<circle class="st0" cx="124.4" cy="45.2" r="10.1"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 6.2 KiB |
132
art/mgmt_logo_white_wide.svg
Normal file
@@ -0,0 +1,132 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
viewBox="0 0 260.4 71.4" style="enable-background:new 0 0 260.4 71.4;" xml:space="preserve">
|
||||
<style type="text/css">
|
||||
.st0{fill:#FFFFFF;}
|
||||
.st1{fill:#E22434;}
|
||||
.st2{display:none;}
|
||||
.st3{fill:#1B3663;}
|
||||
.st4{fill:#00B1D1;}
|
||||
.st5{fill:#BFE6EF;}
|
||||
.st6{fill:#69CBE0;}
|
||||
.st7{fill:#0080BD;}
|
||||
.st8{fill:#005DAB;}
|
||||
.st9{fill:#183660;}
|
||||
.st10{fill:none;stroke:#183660;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st11{fill:none;stroke:#183660;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
.st12{fill:none;stroke:#183660;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st13{fill:none;stroke:#C0E6EF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st14{fill:#C0E6EF;}
|
||||
.st15{fill:none;stroke:#C0E6EF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st16{fill:none;stroke:#C0E6EF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
.st17{fill:none;stroke:#FFFFFF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||
.st18{fill:none;stroke:#FFFFFF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||
.st19{fill:none;stroke:#FFFFFF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||
</style>
|
||||
<g id="Layer_2">
|
||||
</g>
|
||||
<g id="Layer_1">
|
||||
<g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st0" d="M96.7,26l0.1,1.8c1.1-1.4,2.6-2.1,4.4-2.1c1.9,0,3.2,0.9,4,2.6c1.1-1.7,2.6-2.6,4.7-2.6
|
||||
c3.3,0,5,2.3,5.1,6.9V45h-5V32.9c0-1.1-0.2-1.9-0.5-2.4s-0.8-0.7-1.5-0.7c-0.9,0-1.6,0.6-2.1,1.7l0,0.6V45h-5V32.9
|
||||
c0-1.1-0.1-1.9-0.4-2.4s-0.8-0.7-1.6-0.7c-0.9,0-1.5,0.5-2,1.4V45h-5V26H96.7z"/>
|
||||
<path class="st0" d="M118.5,34.9c0-3.1,0.6-5.4,1.7-7s2.7-2.3,4.7-2.3c1.7,0,3.1,0.7,4,2l0.2-1.7h4.5v19c0,2.4-0.7,4.3-2,5.6
|
||||
c-1.4,1.3-3.3,1.9-5.9,1.9c-1,0-2.1-0.2-3.3-0.6s-2-0.9-2.6-1.6l1.7-3.4c0.5,0.5,1.1,0.9,1.8,1.2c0.7,0.3,1.5,0.5,2.1,0.5
|
||||
c1.1,0,1.9-0.3,2.4-0.8s0.7-1.4,0.7-2.6v-1.6c-0.9,1.2-2.2,1.9-3.7,1.9c-2,0-3.6-0.8-4.7-2.4s-1.7-3.8-1.7-6.7V34.9z
|
||||
M123.5,36.2c0,1.8,0.2,3,0.7,3.8s1.2,1.2,2.2,1.2c1,0,1.8-0.4,2.3-1.1V31c-0.5-0.8-1.3-1.2-2.2-1.2c-1,0-1.7,0.4-2.2,1.2
|
||||
s-0.7,2.1-0.7,3.9V36.2z"/>
|
||||
<path class="st0" d="M142.2,26l0.1,1.8c1.1-1.4,2.6-2.1,4.4-2.1c1.9,0,3.2,0.9,4,2.6c1.1-1.7,2.6-2.6,4.7-2.6
|
||||
c3.3,0,5,2.3,5.1,6.9V45h-5V32.9c0-1.1-0.2-1.9-0.5-2.4c-0.3-0.5-0.8-0.7-1.5-0.7c-0.9,0-1.6,0.6-2.1,1.7l0,0.6V45h-5V32.9
|
||||
c0-1.1-0.1-1.9-0.4-2.4c-0.3-0.5-0.8-0.7-1.6-0.7c-0.9,0-1.5,0.5-2,1.4V45h-5V26H142.2z"/>
|
||||
<path class="st0" d="M170.3,21.3V26h2.5v3.7h-2.5v9.5c0,0.8,0.1,1.3,0.3,1.5c0.2,0.3,0.6,0.4,1.2,0.4c0.5,0,0.9,0,1.2-0.1l0,3.9
|
||||
c-0.8,0.3-1.8,0.5-2.7,0.5c-3.2,0-4.8-1.8-4.9-5.5V29.7h-2.2V26h2.2v-4.7H170.3z"/>
|
||||
<path class="st0" d="M182.7,43.5c1.4,0,2.4-0.4,3.1-1.1c0.7-0.8,1.1-1.9,1.2-3.3h1.9c-0.1,1.9-0.7,3.5-1.9,4.6s-2.6,1.7-4.3,1.7
|
||||
c-2.3,0-4-0.7-5.1-2.2s-1.7-3.6-1.8-6.4v-2.3c0-2.9,0.6-5.1,1.7-6.6s2.9-2.2,5.1-2.2c1.9,0,3.4,0.6,4.5,1.8s1.7,2.8,1.7,5H187
|
||||
c-0.1-1.6-0.5-2.8-1.2-3.6s-1.8-1.3-3.1-1.3c-1.7,0-2.9,0.6-3.7,1.7c-0.8,1.1-1.2,2.9-1.2,5.2v2.2c0,2.4,0.4,4.2,1.2,5.3
|
||||
C179.8,43,181,43.5,182.7,43.5z"/>
|
||||
<path class="st0" d="M192.6,34.5c0-2.7,0.6-4.9,1.9-6.5s2.9-2.4,5.1-2.4c2.2,0,3.9,0.8,5.1,2.4c1.2,1.6,1.9,3.7,1.9,6.5v2
|
||||
c0,2.8-0.6,5-1.9,6.5s-2.9,2.3-5.1,2.3s-3.8-0.8-5-2.3s-1.9-3.6-1.9-6.3V34.5z M194.6,36.5c0,2.2,0.4,3.9,1.3,5.2
|
||||
c0.9,1.3,2.1,1.9,3.7,1.9c1.6,0,2.8-0.6,3.7-1.8c0.8-1.2,1.3-2.9,1.3-5.2v-2c0-2.2-0.4-3.9-1.3-5.2c-0.9-1.3-2.1-1.9-3.7-1.9
|
||||
c-1.5,0-2.7,0.6-3.6,1.8c-0.9,1.2-1.3,2.9-1.4,5.1V36.5z"/>
|
||||
<path class="st0" d="M213.2,26l0.1,3c0.6-1,1.3-1.9,2.2-2.5s1.9-0.9,3-0.9c3.3,0,5,2.2,5.1,6.6V45h-1.9V32.5
|
||||
c0-1.7-0.3-3-0.9-3.8c-0.6-0.8-1.5-1.2-2.8-1.2c-1,0-1.9,0.4-2.8,1.2s-1.4,1.8-1.9,3.2V45h-2V26H213.2z"/>
|
||||
<path class="st0" d="M230.3,45V27.7h-2.6V26h2.6v-2.5c0-1.9,0.5-3.3,1.3-4.3s2-1.5,3.5-1.5c0.7,0,1.3,0.1,1.9,0.3l-0.1,1.8
|
||||
c-0.5-0.1-1-0.2-1.6-0.2c-0.9,0-1.7,0.4-2.2,1.1s-0.8,1.7-0.8,3V26h3.7v1.8h-3.7V45H230.3z"/>
|
||||
<path class="st0" d="M240.1,20.5c0-0.4,0.1-0.7,0.3-1s0.5-0.4,0.9-0.4s0.7,0.1,1,0.4s0.3,0.6,0.3,1s-0.1,0.7-0.3,1
|
||||
s-0.5,0.4-1,0.4s-0.7-0.1-0.9-0.4S240.1,20.9,240.1,20.5z M242.3,45h-2V26h2V45z"/>
|
||||
<path class="st0" d="M247.4,34.6c0-3,0.5-5.2,1.5-6.7s2.6-2.2,4.6-2.2c2.2,0,3.8,1,4.9,3l0.1-2.7h1.8v19.6c0,2.3-0.6,4-1.6,5.2
|
||||
s-2.7,1.8-4.7,1.8c-1,0-2.1-0.3-3.2-0.8s-1.9-1.1-2.4-1.9l0.9-1.4c1.3,1.5,2.8,2.2,4.5,2.2c1.6,0,2.7-0.4,3.5-1.3
|
||||
c0.7-0.8,1.1-2.1,1.1-3.8v-3c-1.1,1.8-2.7,2.7-4.9,2.7c-2,0-3.5-0.7-4.5-2.2s-1.6-3.6-1.6-6.5V34.6z M249.3,36.4
|
||||
c0,2.4,0.4,4.2,1.1,5.4s1.9,1.7,3.5,1.7c2.1,0,3.6-1,4.5-3v-9.7c-0.9-2.2-2.4-3.3-4.4-3.3c-1.6,0-2.8,0.6-3.5,1.7
|
||||
s-1.1,2.9-1.1,5.3V36.4z"/>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st18" x1="19.5" y1="16" x2="34.7" y2="28.5"/>
|
||||
<g>
|
||||
<polygon class="st0" points="18.4,18.5 16.6,13.7 21.7,14.5 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<circle class="st0" cx="10.8" cy="8.1" r="8.1"/>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st18" x1="34.7" y1="28" x2="49.4" y2="53.3"/>
|
||||
<g>
|
||||
<polygon class="st0" points="46.8,54 51.2,56.5 51.2,51.4 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st18" x1="46.2" y1="31.1" x2="63.9" y2="34.9"/>
|
||||
<g>
|
||||
<polygon class="st0" points="46.4,33.8 42.6,30.4 47.5,28.8 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st18" x1="34.7" y1="28.5" x2="47.9" y2="20.1"/>
|
||||
<g>
|
||||
<polygon class="st0" points="48.7,22.7 51.1,18.1 45.9,18.3 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st18" x1="11.2" y1="33.1" x2="23.2" y2="31"/>
|
||||
<g>
|
||||
<polygon class="st0" points="22.9,33.7 26.8,30.4 22,28.6 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st18" x1="61.5" y1="53" x2="71.9" y2="36.1"/>
|
||||
<g>
|
||||
<polygon class="st0" points="64.1,53.7 59.5,56.2 59.7,51 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<line class="st18" x1="64.7" y1="24.4" x2="71.9" y2="36.6"/>
|
||||
<g>
|
||||
<polygon class="st0" points="62.9,26.4 62.9,21.2 67.3,23.7 "/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<circle class="st0" cx="34.7" cy="28.5" r="8.1"/>
|
||||
<circle class="st0" cx="8.1" cy="33.9" r="8.1"/>
|
||||
<circle class="st0" cx="58.3" cy="14.5" r="8.1"/>
|
||||
<circle class="st0" cx="55.7" cy="63.3" r="8.1"/>
|
||||
<circle class="st0" cx="71.9" cy="36.1" r="8.1"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 6.2 KiB |
566
config.go
@@ -1,566 +0,0 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"gopkg.in/yaml.v2"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type collectorResConfig struct {
|
||||
Kind string `yaml:"kind"`
|
||||
Pattern string `yaml:"pattern"` // XXX: Not Implemented
|
||||
}
|
||||
|
||||
type vertexConfig struct {
|
||||
Kind string `yaml:"kind"`
|
||||
Name string `yaml:"name"`
|
||||
}
|
||||
|
||||
type edgeConfig struct {
|
||||
Name string `yaml:"name"`
|
||||
From vertexConfig `yaml:"from"`
|
||||
To vertexConfig `yaml:"to"`
|
||||
}
|
||||
|
||||
// GraphConfig is the data structure that describes a single graph to run.
|
||||
type GraphConfig struct {
|
||||
Graph string `yaml:"graph"`
|
||||
Resources struct {
|
||||
Noop []*NoopRes `yaml:"noop"`
|
||||
Pkg []*PkgRes `yaml:"pkg"`
|
||||
File []*FileRes `yaml:"file"`
|
||||
Svc []*SvcRes `yaml:"svc"`
|
||||
Exec []*ExecRes `yaml:"exec"`
|
||||
Timer []*TimerRes `yaml:"timer"`
|
||||
} `yaml:"resources"`
|
||||
Collector []collectorResConfig `yaml:"collect"`
|
||||
Edges []edgeConfig `yaml:"edges"`
|
||||
Comment string `yaml:"comment"`
|
||||
Hostname string `yaml:"hostname"` // uuid for the host
|
||||
Remote string `yaml:"remote"`
|
||||
}
|
||||
|
||||
// Parse parses a data stream into the graph structure.
|
||||
func (c *GraphConfig) Parse(data []byte) error {
|
||||
if err := yaml.Unmarshal(data, c); err != nil {
|
||||
return err
|
||||
}
|
||||
if c.Graph == "" {
|
||||
return errors.New("Graph config: invalid `graph`")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseConfigFromFile takes a filename and returns the graph config structure.
|
||||
func ParseConfigFromFile(filename string) *GraphConfig {
|
||||
data, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
log.Printf("Config: Error: ParseConfigFromFile: File: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
var config GraphConfig
|
||||
if err := config.Parse(data); err != nil {
|
||||
log.Printf("Config: Error: ParseConfigFromFile: Parse: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return &config
|
||||
}
|
||||
|
||||
// NewGraphFromConfig returns a new graph from existing input, such as from the
|
||||
// existing graph, and a GraphConfig struct.
|
||||
func (g *Graph) NewGraphFromConfig(config *GraphConfig, embdEtcd *EmbdEtcd, noop bool) (*Graph, error) {
|
||||
if config.Hostname == "" {
|
||||
return nil, fmt.Errorf("Config: Error: Hostname can't be empty!")
|
||||
}
|
||||
|
||||
var graph *Graph // new graph to return
|
||||
if g == nil { // FIXME: how can we check for an empty graph?
|
||||
graph = NewGraph("Graph") // give graph a default name
|
||||
} else {
|
||||
graph = g.Copy() // same vertices, since they're pointers!
|
||||
}
|
||||
|
||||
var lookup = make(map[string]map[string]*Vertex)
|
||||
|
||||
//log.Printf("%+v", config) // debug
|
||||
|
||||
// TODO: if defined (somehow)...
|
||||
graph.SetName(config.Graph) // set graph name
|
||||
|
||||
var keep []*Vertex // list of vertex which are the same in new graph
|
||||
var resources []Res // list of resources to export
|
||||
// use reflection to avoid duplicating code... better options welcome!
|
||||
value := reflect.Indirect(reflect.ValueOf(config.Resources))
|
||||
vtype := value.Type()
|
||||
for i := 0; i < vtype.NumField(); i++ { // number of fields in struct
|
||||
name := vtype.Field(i).Name // string of field name
|
||||
field := value.FieldByName(name)
|
||||
iface := field.Interface() // interface type of value
|
||||
slice := reflect.ValueOf(iface)
|
||||
// XXX: should we just drop these everywhere and have the kind strings be all lowercase?
|
||||
kind := FirstToUpper(name)
|
||||
if DEBUG {
|
||||
log.Printf("Config: Processing: %v...", kind)
|
||||
}
|
||||
for j := 0; j < slice.Len(); j++ { // loop through resources of same kind
|
||||
x := slice.Index(j).Interface()
|
||||
res, ok := x.(Res) // convert to Res type
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Config: Error: Can't convert: %v of type: %T to Res.", x, x)
|
||||
}
|
||||
if noop {
|
||||
res.Meta().Noop = noop
|
||||
}
|
||||
if _, exists := lookup[kind]; !exists {
|
||||
lookup[kind] = make(map[string]*Vertex)
|
||||
}
|
||||
// XXX: should we export based on a @@ prefix, or a metaparam
|
||||
// like exported => true || exported => (host pattern)||(other pattern?)
|
||||
if !strings.HasPrefix(res.GetName(), "@@") { // not exported resource
|
||||
// XXX: we don't have a way of knowing if any of the
|
||||
// metaparams are undefined, and as a result to set the
|
||||
// defaults that we want! I hate the go yaml parser!!!
|
||||
v := graph.GetVertexMatch(res)
|
||||
if v == nil { // no match found
|
||||
res.Init()
|
||||
v = NewVertex(res)
|
||||
graph.AddVertex(v) // call standalone in case not part of an edge
|
||||
}
|
||||
lookup[kind][res.GetName()] = v // used for constructing edges
|
||||
keep = append(keep, v) // append
|
||||
|
||||
} else if !noop { // do not export any resources if noop
|
||||
// store for addition to etcd storage...
|
||||
res.SetName(res.GetName()[2:]) //slice off @@
|
||||
res.setKind(kind) // cheap init
|
||||
resources = append(resources, res)
|
||||
}
|
||||
}
|
||||
}
|
||||
// store in etcd
|
||||
if err := EtcdSetResources(embdEtcd, config.Hostname, resources); err != nil {
|
||||
return nil, fmt.Errorf("Config: Could not export resources: %v", err)
|
||||
}
|
||||
|
||||
// lookup from etcd
|
||||
var hostnameFilter []string // empty to get from everyone
|
||||
kindFilter := []string{}
|
||||
for _, t := range config.Collector {
|
||||
// XXX: should we just drop these everywhere and have the kind strings be all lowercase?
|
||||
kind := FirstToUpper(t.Kind)
|
||||
kindFilter = append(kindFilter, kind)
|
||||
}
|
||||
// do all the graph look ups in one single step, so that if the etcd
|
||||
// database changes, we don't have a partial state of affairs...
|
||||
if len(kindFilter) > 0 { // if kindFilter is empty, don't need to do lookups!
|
||||
var err error
|
||||
resources, err = EtcdGetResources(embdEtcd, hostnameFilter, kindFilter)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Config: Could not collect resources: %v", err)
|
||||
}
|
||||
}
|
||||
for _, res := range resources {
|
||||
matched := false
|
||||
// see if we find a collect pattern that matches
|
||||
for _, t := range config.Collector {
|
||||
// XXX: should we just drop these everywhere and have the kind strings be all lowercase?
|
||||
kind := FirstToUpper(t.Kind)
|
||||
// use t.Kind and optionally t.Pattern to collect from etcd storage
|
||||
log.Printf("Collect: %v; Pattern: %v", kind, t.Pattern)
|
||||
|
||||
// XXX: expand to more complex pattern matching here...
|
||||
if res.Kind() != kind {
|
||||
continue
|
||||
}
|
||||
|
||||
if matched {
|
||||
// we've already matched this resource, should we match again?
|
||||
log.Printf("Config: Warning: Matching %v[%v] again!", kind, res.GetName())
|
||||
}
|
||||
matched = true
|
||||
|
||||
// collect resources but add the noop metaparam
|
||||
if noop {
|
||||
res.Meta().Noop = noop
|
||||
}
|
||||
|
||||
if t.Pattern != "" { // XXX: simplistic for now
|
||||
res.CollectPattern(t.Pattern) // res.Dirname = t.Pattern
|
||||
}
|
||||
|
||||
log.Printf("Collect: %v[%v]: collected!", kind, res.GetName())
|
||||
|
||||
// XXX: similar to other resource add code:
|
||||
if _, exists := lookup[kind]; !exists {
|
||||
lookup[kind] = make(map[string]*Vertex)
|
||||
}
|
||||
v := graph.GetVertexMatch(res)
|
||||
if v == nil { // no match found
|
||||
res.Init() // initialize go channels or things won't work!!!
|
||||
v = NewVertex(res)
|
||||
graph.AddVertex(v) // call standalone in case not part of an edge
|
||||
}
|
||||
lookup[kind][res.GetName()] = v // used for constructing edges
|
||||
keep = append(keep, v) // append
|
||||
|
||||
//break // let's see if another resource even matches
|
||||
}
|
||||
}
|
||||
|
||||
// get rid of any vertices we shouldn't "keep" (that aren't in new graph)
|
||||
for _, v := range graph.GetVertices() {
|
||||
if !VertexContains(v, keep) {
|
||||
// wait for exit before starting new graph!
|
||||
v.SendEvent(eventExit, true, false)
|
||||
graph.DeleteVertex(v)
|
||||
}
|
||||
}
|
||||
|
||||
for _, e := range config.Edges {
|
||||
if _, ok := lookup[FirstToUpper(e.From.Kind)]; !ok {
|
||||
return nil, fmt.Errorf("Can't find 'from' resource!")
|
||||
}
|
||||
if _, ok := lookup[FirstToUpper(e.To.Kind)]; !ok {
|
||||
return nil, fmt.Errorf("Can't find 'to' resource!")
|
||||
}
|
||||
if _, ok := lookup[FirstToUpper(e.From.Kind)][e.From.Name]; !ok {
|
||||
return nil, fmt.Errorf("Can't find 'from' name!")
|
||||
}
|
||||
if _, ok := lookup[FirstToUpper(e.To.Kind)][e.To.Name]; !ok {
|
||||
return nil, fmt.Errorf("Can't find 'to' name!")
|
||||
}
|
||||
graph.AddEdge(lookup[FirstToUpper(e.From.Kind)][e.From.Name], lookup[FirstToUpper(e.To.Kind)][e.To.Name], NewEdge(e.Name))
|
||||
}
|
||||
|
||||
return graph, nil
|
||||
}
|
||||
|
||||
// add edges to the vertex in a graph based on if it matches a uuid list
|
||||
func (g *Graph) addEdgesByMatchingUUIDS(v *Vertex, uuids []ResUUID) []bool {
|
||||
// search for edges and see what matches!
|
||||
var result []bool
|
||||
|
||||
// loop through each uuid, and see if it matches any vertex
|
||||
for _, uuid := range uuids {
|
||||
var found = false
|
||||
// uuid is a ResUUID object
|
||||
for _, vv := range g.GetVertices() { // search
|
||||
if v == vv { // skip self
|
||||
continue
|
||||
}
|
||||
if DEBUG {
|
||||
log.Printf("Compile: AutoEdge: Match: %v[%v] with UUID: %v[%v]", vv.Kind(), vv.GetName(), uuid.Kind(), uuid.GetName())
|
||||
}
|
||||
// we must match to an effective UUID for the resource,
|
||||
// that is to say, the name value of a res is a helpful
|
||||
// handle, but it is not necessarily a unique identity!
|
||||
// remember, resources can return multiple UUID's each!
|
||||
if UUIDExistsInUUIDs(uuid, vv.GetUUIDs()) {
|
||||
// add edge from: vv -> v
|
||||
if uuid.Reversed() {
|
||||
txt := fmt.Sprintf("AutoEdge: %v[%v] -> %v[%v]", vv.Kind(), vv.GetName(), v.Kind(), v.GetName())
|
||||
log.Printf("Compile: Adding %v", txt)
|
||||
g.AddEdge(vv, v, NewEdge(txt))
|
||||
} else { // edges go the "normal" way, eg: pkg resource
|
||||
txt := fmt.Sprintf("AutoEdge: %v[%v] -> %v[%v]", v.Kind(), v.GetName(), vv.Kind(), vv.GetName())
|
||||
log.Printf("Compile: Adding %v", txt)
|
||||
g.AddEdge(v, vv, NewEdge(txt))
|
||||
}
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
result = append(result, found)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// AutoEdges adds the automatic edges to the graph.
|
||||
func (g *Graph) AutoEdges() {
|
||||
log.Println("Compile: Adding AutoEdges...")
|
||||
for _, v := range g.GetVertices() { // for each vertexes autoedges
|
||||
if !v.Meta().AutoEdge { // is the metaparam true?
|
||||
continue
|
||||
}
|
||||
autoEdgeObj := v.AutoEdges()
|
||||
if autoEdgeObj == nil {
|
||||
log.Printf("%v[%v]: Config: No auto edges were found!", v.Kind(), v.GetName())
|
||||
continue // next vertex
|
||||
}
|
||||
|
||||
for { // while the autoEdgeObj has more uuids to add...
|
||||
uuids := autoEdgeObj.Next() // get some!
|
||||
if uuids == nil {
|
||||
log.Printf("%v[%v]: Config: The auto edge list is empty!", v.Kind(), v.GetName())
|
||||
break // inner loop
|
||||
}
|
||||
if DEBUG {
|
||||
log.Println("Compile: AutoEdge: UUIDS:")
|
||||
for i, u := range uuids {
|
||||
log.Printf("Compile: AutoEdge: UUID%d: %v", i, u)
|
||||
}
|
||||
}
|
||||
|
||||
// match and add edges
|
||||
result := g.addEdgesByMatchingUUIDS(v, uuids)
|
||||
|
||||
// report back, and find out if we should continue
|
||||
if !autoEdgeObj.Test(result) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AutoGrouper is the required interface to implement for an autogroup algorithm
|
||||
type AutoGrouper interface {
|
||||
// listed in the order these are typically called in...
|
||||
name() string // friendly identifier
|
||||
init(*Graph) error // only call once
|
||||
vertexNext() (*Vertex, *Vertex, error) // mostly algorithmic
|
||||
vertexCmp(*Vertex, *Vertex) error // can we merge these ?
|
||||
vertexMerge(*Vertex, *Vertex) (*Vertex, error) // vertex merge fn to use
|
||||
edgeMerge(*Edge, *Edge) *Edge // edge merge fn to use
|
||||
vertexTest(bool) (bool, error) // call until false
|
||||
}
|
||||
|
||||
// baseGrouper is the base type for implementing the AutoGrouper interface
|
||||
type baseGrouper struct {
|
||||
graph *Graph // store a pointer to the graph
|
||||
vertices []*Vertex // cached list of vertices
|
||||
i int
|
||||
j int
|
||||
done bool
|
||||
}
|
||||
|
||||
// name provides a friendly name for the logs to see
|
||||
func (ag *baseGrouper) name() string {
|
||||
return "baseGrouper"
|
||||
}
|
||||
|
||||
// init is called only once and before using other AutoGrouper interface methods
|
||||
// the name method is the only exception: call it any time without side effects!
|
||||
func (ag *baseGrouper) init(g *Graph) error {
|
||||
if ag.graph != nil {
|
||||
return fmt.Errorf("The init method has already been called!")
|
||||
}
|
||||
ag.graph = g // pointer
|
||||
ag.vertices = ag.graph.GetVerticesSorted() // cache in deterministic order!
|
||||
ag.i = 0
|
||||
ag.j = 0
|
||||
if len(ag.vertices) == 0 { // empty graph
|
||||
ag.done = true
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// vertexNext is a simple iterator that loops through vertex (pair) combinations
|
||||
// an intelligent algorithm would selectively offer only valid pairs of vertices
|
||||
// these should satisfy logical grouping requirements for the autogroup designs!
|
||||
// the desired algorithms can override, but keep this method as a base iterator!
|
||||
func (ag *baseGrouper) vertexNext() (v1, v2 *Vertex, err error) {
|
||||
// this does a for v... { for w... { return v, w }} but stepwise!
|
||||
l := len(ag.vertices)
|
||||
if ag.i < l {
|
||||
v1 = ag.vertices[ag.i]
|
||||
}
|
||||
if ag.j < l {
|
||||
v2 = ag.vertices[ag.j]
|
||||
}
|
||||
|
||||
// in case the vertex was deleted
|
||||
if !ag.graph.HasVertex(v1) {
|
||||
v1 = nil
|
||||
}
|
||||
if !ag.graph.HasVertex(v2) {
|
||||
v2 = nil
|
||||
}
|
||||
|
||||
// two nested loops...
|
||||
if ag.j < l {
|
||||
ag.j++
|
||||
}
|
||||
if ag.j == l {
|
||||
ag.j = 0
|
||||
if ag.i < l {
|
||||
ag.i++
|
||||
}
|
||||
if ag.i == l {
|
||||
ag.done = true
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (ag *baseGrouper) vertexCmp(v1, v2 *Vertex) error {
|
||||
if v1 == nil || v2 == nil {
|
||||
return fmt.Errorf("Vertex is nil!")
|
||||
}
|
||||
if v1 == v2 { // skip yourself
|
||||
return fmt.Errorf("Vertices are the same!")
|
||||
}
|
||||
if v1.Kind() != v2.Kind() { // we must group similar kinds
|
||||
// TODO: maybe future resources won't need this limitation?
|
||||
return fmt.Errorf("The two resources aren't the same kind!")
|
||||
}
|
||||
// someone doesn't want to group!
|
||||
if !v1.Meta().AutoGroup || !v2.Meta().AutoGroup {
|
||||
return fmt.Errorf("One of the autogroup flags is false!")
|
||||
}
|
||||
if v1.Res.IsGrouped() { // already grouped!
|
||||
return fmt.Errorf("Already grouped!")
|
||||
}
|
||||
if len(v2.Res.GetGroup()) > 0 { // already has children grouped!
|
||||
return fmt.Errorf("Already has groups!")
|
||||
}
|
||||
if !v1.Res.GroupCmp(v2.Res) { // resource groupcmp failed!
|
||||
return fmt.Errorf("The GroupCmp failed!")
|
||||
}
|
||||
return nil // success
|
||||
}
|
||||
|
||||
func (ag *baseGrouper) vertexMerge(v1, v2 *Vertex) (v *Vertex, err error) {
|
||||
// NOTE: it's important to use w.Res instead of w, b/c
|
||||
// the w by itself is the *Vertex obj, not the *Res obj
|
||||
// which is contained within it! They both satisfy the
|
||||
// Res interface, which is why both will compile! :(
|
||||
err = v1.Res.GroupRes(v2.Res) // GroupRes skips stupid groupings
|
||||
return // success or fail, and no need to merge the actual vertices!
|
||||
}
|
||||
|
||||
func (ag *baseGrouper) edgeMerge(e1, e2 *Edge) *Edge {
|
||||
return e1 // noop
|
||||
}
|
||||
|
||||
// vertexTest processes the results of the grouping for the algorithm to know
|
||||
// return an error if something went horribly wrong, and bool false to stop
|
||||
func (ag *baseGrouper) vertexTest(b bool) (bool, error) {
|
||||
// NOTE: this particular baseGrouper version doesn't track what happens
|
||||
// because since we iterate over every pair, we don't care which merge!
|
||||
if ag.done {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// TODO: this algorithm may not be correct in all cases. replace if needed!
|
||||
type nonReachabilityGrouper struct {
|
||||
baseGrouper // "inherit" what we want, and reimplement the rest
|
||||
}
|
||||
|
||||
func (ag *nonReachabilityGrouper) name() string {
|
||||
return "nonReachabilityGrouper"
|
||||
}
|
||||
|
||||
// this algorithm relies on the observation that if there's a path from a to b,
|
||||
// then they *can't* be merged (b/c of the existing dependency) so therefore we
|
||||
// merge anything that *doesn't* satisfy this condition or that of the reverse!
|
||||
func (ag *nonReachabilityGrouper) vertexNext() (v1, v2 *Vertex, err error) {
|
||||
for {
|
||||
v1, v2, err = ag.baseGrouper.vertexNext() // get all iterable pairs
|
||||
if err != nil {
|
||||
log.Fatalf("Error running autoGroup(vertexNext): %v", err)
|
||||
}
|
||||
|
||||
if v1 != v2 { // ignore self cmp early (perf optimization)
|
||||
// if NOT reachable, they're viable...
|
||||
out1 := ag.graph.Reachability(v1, v2)
|
||||
out2 := ag.graph.Reachability(v2, v1)
|
||||
if len(out1) == 0 && len(out2) == 0 {
|
||||
return // return v1 and v2, they're viable
|
||||
}
|
||||
}
|
||||
|
||||
// if we got here, it means we're skipping over this candidate!
|
||||
if ok, err := ag.baseGrouper.vertexTest(false); err != nil {
|
||||
log.Fatalf("Error running autoGroup(vertexTest): %v", err)
|
||||
} else if !ok {
|
||||
return nil, nil, nil // done!
|
||||
}
|
||||
|
||||
// the vertexTest passed, so loop and try with a new pair...
|
||||
}
|
||||
}
|
||||
|
||||
// autoGroup is the mechanical auto group "runner" that runs the interface spec
|
||||
func (g *Graph) autoGroup(ag AutoGrouper) chan string {
|
||||
strch := make(chan string) // output log messages here
|
||||
go func(strch chan string) {
|
||||
strch <- fmt.Sprintf("Compile: Grouping: Algorithm: %v...", ag.name())
|
||||
if err := ag.init(g); err != nil {
|
||||
log.Fatalf("Error running autoGroup(init): %v", err)
|
||||
}
|
||||
|
||||
for {
|
||||
var v, w *Vertex
|
||||
v, w, err := ag.vertexNext() // get pair to compare
|
||||
if err != nil {
|
||||
log.Fatalf("Error running autoGroup(vertexNext): %v", err)
|
||||
}
|
||||
merged := false
|
||||
// save names since they change during the runs
|
||||
vStr := fmt.Sprintf("%s", v) // valid even if it is nil
|
||||
wStr := fmt.Sprintf("%s", w)
|
||||
|
||||
if err := ag.vertexCmp(v, w); err != nil { // cmp ?
|
||||
if DEBUG {
|
||||
strch <- fmt.Sprintf("Compile: Grouping: !GroupCmp for: %s into %s", wStr, vStr)
|
||||
}
|
||||
|
||||
// remove grouped vertex and merge edges (res is safe)
|
||||
} else if err := g.VertexMerge(v, w, ag.vertexMerge, ag.edgeMerge); err != nil { // merge...
|
||||
strch <- fmt.Sprintf("Compile: Grouping: !VertexMerge for: %s into %s", wStr, vStr)
|
||||
|
||||
} else { // success!
|
||||
strch <- fmt.Sprintf("Compile: Grouping: Success for: %s into %s", wStr, vStr)
|
||||
merged = true // woo
|
||||
}
|
||||
|
||||
// did these get used?
|
||||
if ok, err := ag.vertexTest(merged); err != nil {
|
||||
log.Fatalf("Error running autoGroup(vertexTest): %v", err)
|
||||
} else if !ok {
|
||||
break // done!
|
||||
}
|
||||
}
|
||||
|
||||
close(strch)
|
||||
return
|
||||
}(strch) // call function
|
||||
return strch
|
||||
}
|
||||
|
||||
// AutoGroup runs the auto grouping on the graph and prints out log messages
|
||||
func (g *Graph) AutoGroup() {
|
||||
// receive log messages from channel...
|
||||
// this allows test cases to avoid printing them when they're unwanted!
|
||||
// TODO: this algorithm may not be correct in all cases. replace if needed!
|
||||
for str := range g.autoGroup(&nonReachabilityGrouper{}) {
|
||||
log.Println(str)
|
||||
}
|
||||
}
|
||||
224
configwatch.go
@@ -1,224 +0,0 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"gopkg.in/fsnotify.v1"
|
||||
//"github.com/go-fsnotify/fsnotify" // git master of "gopkg.in/fsnotify.v1"
|
||||
"log"
|
||||
"math"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// ConfigWatcher returns events on a channel anytime one of its files events.
|
||||
type ConfigWatcher struct {
|
||||
ch chan string
|
||||
wg sync.WaitGroup
|
||||
closechan chan struct{}
|
||||
}
|
||||
|
||||
// NewConfigWatcher creates a new ConfigWatcher struct.
|
||||
func NewConfigWatcher() *ConfigWatcher {
|
||||
return &ConfigWatcher{
|
||||
ch: make(chan string),
|
||||
closechan: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// The Add method adds a new file path to watch for events on.
|
||||
func (obj *ConfigWatcher) Add(file ...string) {
|
||||
if len(file) == 0 {
|
||||
return
|
||||
}
|
||||
if len(file) > 1 {
|
||||
for _, f := range file { // add all the files...
|
||||
obj.Add(f) // recurse
|
||||
}
|
||||
return
|
||||
}
|
||||
// otherwise, add the one file passed in...
|
||||
obj.wg.Add(1)
|
||||
go func() {
|
||||
defer obj.wg.Done()
|
||||
ch := ConfigWatch(file[0])
|
||||
for {
|
||||
select {
|
||||
case <-ch:
|
||||
obj.ch <- file[0]
|
||||
continue
|
||||
case <-obj.closechan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Events returns a channel to listen on for file events. It closes when it is
|
||||
// emptied after the Close() method is called. You can test for closure with the
|
||||
// f, more := <-obj.Events() pattern.
|
||||
func (obj *ConfigWatcher) Events() chan string {
|
||||
return obj.ch
|
||||
}
|
||||
|
||||
// Close shuts down the ConfigWatcher object. It closes the Events channel after
|
||||
// all the currently pending events have been emptied.
|
||||
func (obj *ConfigWatcher) Close() {
|
||||
if obj.ch == nil {
|
||||
return
|
||||
}
|
||||
close(obj.closechan)
|
||||
obj.wg.Wait() // wait until everyone is done sending on obj.ch
|
||||
//obj.ch <- "" // send finished message
|
||||
close(obj.ch)
|
||||
obj.ch = nil
|
||||
}
|
||||
|
||||
// ConfigWatch writes on the channel everytime an event is seen for the path.
|
||||
// XXX: it would be great if we could reuse code between this and the file resource
|
||||
// XXX: patch this to submit it as part of go-fsnotify if they're interested...
|
||||
func ConfigWatch(file string) chan bool {
|
||||
ch := make(chan bool)
|
||||
go func() {
|
||||
var safename = path.Clean(file) // no trailing slash
|
||||
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer watcher.Close()
|
||||
|
||||
patharray := PathSplit(safename) // tokenize the path
|
||||
var index = len(patharray) // starting index
|
||||
var current string // current "watcher" location
|
||||
var deltaDepth int // depth delta between watcher and event
|
||||
var send = false // send event?
|
||||
|
||||
for {
|
||||
current = strings.Join(patharray[0:index], "/")
|
||||
if current == "" { // the empty string top is the root dir ("/")
|
||||
current = "/"
|
||||
}
|
||||
if DEBUG {
|
||||
log.Printf("Watching: %v", current) // attempting to watch...
|
||||
}
|
||||
// initialize in the loop so that we can reset on rm-ed handles
|
||||
err = watcher.Add(current)
|
||||
if err != nil {
|
||||
if err == syscall.ENOENT {
|
||||
index-- // usually not found, move up one dir
|
||||
} else if err == syscall.ENOSPC {
|
||||
// XXX: occasionally: no space left on device,
|
||||
// XXX: probably due to lack of inotify watches
|
||||
log.Printf("Out of inotify watches for config(%v)", file)
|
||||
log.Fatal(err)
|
||||
} else {
|
||||
log.Printf("Unknown config(%v) error:", file)
|
||||
log.Fatal(err)
|
||||
}
|
||||
index = int(math.Max(1, float64(index)))
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case event := <-watcher.Events:
|
||||
// the deeper you go, the bigger the deltaDepth is...
|
||||
// this is the difference between what we're watching,
|
||||
// and the event... doesn't mean we can't watch deeper
|
||||
if current == event.Name {
|
||||
deltaDepth = 0 // i was watching what i was looking for
|
||||
|
||||
} else if HasPathPrefix(event.Name, current) {
|
||||
deltaDepth = len(PathSplit(current)) - len(PathSplit(event.Name)) // -1 or less
|
||||
|
||||
} else if HasPathPrefix(current, event.Name) {
|
||||
deltaDepth = len(PathSplit(event.Name)) - len(PathSplit(current)) // +1 or more
|
||||
|
||||
} else {
|
||||
// TODO different watchers get each others events!
|
||||
// https://github.com/go-fsnotify/fsnotify/issues/95
|
||||
// this happened with two values such as:
|
||||
// event.Name: /tmp/mgmt/f3 and current: /tmp/mgmt/f2
|
||||
continue
|
||||
}
|
||||
//log.Printf("The delta depth is: %v", deltaDepth)
|
||||
|
||||
// if we have what we wanted, awesome, send an event...
|
||||
if event.Name == safename {
|
||||
//log.Println("Event!")
|
||||
// TODO: filter out some of the events, is Write a sufficient minimum?
|
||||
if event.Op&fsnotify.Write == fsnotify.Write {
|
||||
send = true
|
||||
}
|
||||
|
||||
// file removed, move the watch upwards
|
||||
if deltaDepth >= 0 && (event.Op&fsnotify.Remove == fsnotify.Remove) {
|
||||
//log.Println("Removal!")
|
||||
watcher.Remove(current)
|
||||
index--
|
||||
}
|
||||
|
||||
// we must be a parent watcher, so descend in
|
||||
if deltaDepth < 0 {
|
||||
watcher.Remove(current)
|
||||
index++
|
||||
}
|
||||
|
||||
// if safename starts with event.Name, we're above, and no event should be sent
|
||||
} else if HasPathPrefix(safename, event.Name) {
|
||||
//log.Println("Above!")
|
||||
|
||||
if deltaDepth >= 0 && (event.Op&fsnotify.Remove == fsnotify.Remove) {
|
||||
log.Println("Removal!")
|
||||
watcher.Remove(current)
|
||||
index--
|
||||
}
|
||||
|
||||
if deltaDepth < 0 {
|
||||
log.Println("Parent!")
|
||||
if PathPrefixDelta(safename, event.Name) == 1 { // we're the parent dir
|
||||
//send = true
|
||||
}
|
||||
watcher.Remove(current)
|
||||
index++
|
||||
}
|
||||
|
||||
// if event.Name startswith safename, send event, we're already deeper
|
||||
} else if HasPathPrefix(event.Name, safename) {
|
||||
//log.Println("Event2!")
|
||||
//send = true
|
||||
}
|
||||
|
||||
case err := <-watcher.Errors:
|
||||
log.Printf("error: %v", err)
|
||||
log.Fatal(err)
|
||||
|
||||
}
|
||||
|
||||
// do our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
ch <- true
|
||||
}
|
||||
}
|
||||
//close(ch)
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -15,35 +15,38 @@
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
// Package converger is a facility for reporting the converged state.
|
||||
package converger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/purpleidea/mgmt/util"
|
||||
)
|
||||
|
||||
// TODO: we could make a new function that masks out the state of certain
|
||||
// UUID's, but at the moment the new Timer code has obsoleted the need...
|
||||
// UID's, but at the moment the new Timer code has obsoleted the need...
|
||||
|
||||
// Converger is the general interface for implementing a convergence watcher
|
||||
// Converger is the general interface for implementing a convergence watcher.
|
||||
type Converger interface { // TODO: need a better name
|
||||
Register() ConvergerUUID
|
||||
IsConverged(ConvergerUUID) bool // is the UUID converged ?
|
||||
SetConverged(ConvergerUUID, bool) error // set the converged state of the UUID
|
||||
Unregister(ConvergerUUID)
|
||||
Register() UID
|
||||
IsConverged(UID) bool // is the UID converged ?
|
||||
SetConverged(UID, bool) error // set the converged state of the UID
|
||||
Unregister(UID)
|
||||
Start()
|
||||
Pause()
|
||||
Loop(bool)
|
||||
ConvergedTimer(ConvergerUUID) <-chan time.Time
|
||||
ConvergedTimer(UID) <-chan time.Time
|
||||
Status() map[uint64]bool
|
||||
Timeout() int // returns the timeout that this was created with
|
||||
SetStateFn(func(bool) error) // sets the stateFn
|
||||
}
|
||||
|
||||
// ConvergerUUID is the interface resources can use to notify with if converged
|
||||
// you'll need to use part of the Converger interface to Register initially too
|
||||
type ConvergerUUID interface {
|
||||
// UID is the interface resources can use to notify with if converged. You'll
|
||||
// need to use part of the Converger interface to Register initially too.
|
||||
type UID interface {
|
||||
ID() uint64 // get Id
|
||||
Name() string // get a friendly name
|
||||
SetName(string)
|
||||
@@ -58,7 +61,7 @@ type ConvergerUUID interface {
|
||||
StopTimer() error
|
||||
}
|
||||
|
||||
// converger is an implementation of the Converger interface
|
||||
// converger is an implementation of the Converger interface.
|
||||
type converger struct {
|
||||
timeout int // must be zero (instant) or greater seconds to run
|
||||
stateFn func(bool) error // run on converged state changes with state bool
|
||||
@@ -70,17 +73,18 @@ type converger struct {
|
||||
status map[uint64]bool
|
||||
}
|
||||
|
||||
// convergerUUID is an implementation of the ConvergerUUID interface
|
||||
type convergerUUID struct {
|
||||
// cuid is an implementation of the UID interface.
|
||||
type cuid struct {
|
||||
converger Converger
|
||||
id uint64
|
||||
name string // user defined, friendly name
|
||||
mutex sync.Mutex
|
||||
timer chan struct{}
|
||||
running bool // is the above timer running?
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// NewConverger builds a new converger struct
|
||||
// NewConverger builds a new converger struct.
|
||||
func NewConverger(timeout int, stateFn func(bool) error) *converger {
|
||||
return &converger{
|
||||
timeout: timeout,
|
||||
@@ -92,13 +96,13 @@ func NewConverger(timeout int, stateFn func(bool) error) *converger {
|
||||
}
|
||||
}
|
||||
|
||||
// Register assigns a ConvergerUUID to the caller
|
||||
func (obj *converger) Register() ConvergerUUID {
|
||||
// Register assigns a UID to the caller.
|
||||
func (obj *converger) Register() UID {
|
||||
obj.mutex.Lock()
|
||||
defer obj.mutex.Unlock()
|
||||
obj.lastid++
|
||||
obj.status[obj.lastid] = false // initialize as not converged
|
||||
return &convergerUUID{
|
||||
return &cuid{
|
||||
converger: obj,
|
||||
id: obj.lastid,
|
||||
name: fmt.Sprintf("%d", obj.lastid), // some default
|
||||
@@ -107,30 +111,30 @@ func (obj *converger) Register() ConvergerUUID {
|
||||
}
|
||||
}
|
||||
|
||||
// IsConverged gets the converged status of a uuid
|
||||
func (obj *converger) IsConverged(uuid ConvergerUUID) bool {
|
||||
if !uuid.IsValid() {
|
||||
panic(fmt.Sprintf("Id of ConvergerUUID(%s) is nil!", uuid.Name()))
|
||||
// IsConverged gets the converged status of a uid.
|
||||
func (obj *converger) IsConverged(uid UID) bool {
|
||||
if !uid.IsValid() {
|
||||
panic(fmt.Sprintf("the ID of UID(%s) is nil", uid.Name()))
|
||||
}
|
||||
obj.mutex.RLock()
|
||||
isConverged, found := obj.status[uuid.ID()] // lookup
|
||||
isConverged, found := obj.status[uid.ID()] // lookup
|
||||
obj.mutex.RUnlock()
|
||||
if !found {
|
||||
panic("Id of ConvergerUUID is unregistered!")
|
||||
panic("the ID of UID is unregistered")
|
||||
}
|
||||
return isConverged
|
||||
}
|
||||
|
||||
// SetConverged updates the converger with the converged state of the UUID
|
||||
func (obj *converger) SetConverged(uuid ConvergerUUID, isConverged bool) error {
|
||||
if !uuid.IsValid() {
|
||||
return fmt.Errorf("Id of ConvergerUUID(%s) is nil!", uuid.Name())
|
||||
// SetConverged updates the converger with the converged state of the UID.
|
||||
func (obj *converger) SetConverged(uid UID, isConverged bool) error {
|
||||
if !uid.IsValid() {
|
||||
return fmt.Errorf("the ID of UID(%s) is nil", uid.Name())
|
||||
}
|
||||
obj.mutex.Lock()
|
||||
if _, found := obj.status[uuid.ID()]; !found {
|
||||
panic("Id of ConvergerUUID is unregistered!")
|
||||
if _, found := obj.status[uid.ID()]; !found {
|
||||
panic("the ID of UID is unregistered")
|
||||
}
|
||||
obj.status[uuid.ID()] = isConverged // set
|
||||
obj.status[uid.ID()] = isConverged // set
|
||||
obj.mutex.Unlock() // unlock *before* poke or deadlock!
|
||||
if isConverged != obj.converged { // only poke if it would be helpful
|
||||
// run in a go routine so that we never block... just queue up!
|
||||
@@ -140,7 +144,7 @@ func (obj *converger) SetConverged(uuid ConvergerUUID, isConverged bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// isConverged returns true if *every* registered uuid has converged
|
||||
// isConverged returns true if *every* registered uid has converged.
|
||||
func (obj *converger) isConverged() bool {
|
||||
obj.mutex.RLock() // take a read lock
|
||||
defer obj.mutex.RUnlock()
|
||||
@@ -152,42 +156,42 @@ func (obj *converger) isConverged() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Unregister dissociates the ConvergedUUID from the converged checking
|
||||
func (obj *converger) Unregister(uuid ConvergerUUID) {
|
||||
if !uuid.IsValid() {
|
||||
panic(fmt.Sprintf("Id of ConvergerUUID(%s) is nil!", uuid.Name()))
|
||||
// Unregister dissociates the ConvergedUID from the converged checking.
|
||||
func (obj *converger) Unregister(uid UID) {
|
||||
if !uid.IsValid() {
|
||||
panic(fmt.Sprintf("the ID of UID(%s) is nil", uid.Name()))
|
||||
}
|
||||
obj.mutex.Lock()
|
||||
uuid.StopTimer() // ignore any errors
|
||||
delete(obj.status, uuid.ID())
|
||||
uid.StopTimer() // ignore any errors
|
||||
delete(obj.status, uid.ID())
|
||||
obj.mutex.Unlock()
|
||||
uuid.InvalidateID()
|
||||
uid.InvalidateID()
|
||||
}
|
||||
|
||||
// Start causes a Converger object to start or resume running
|
||||
// Start causes a Converger object to start or resume running.
|
||||
func (obj *converger) Start() {
|
||||
obj.control <- true
|
||||
}
|
||||
|
||||
// Pause causes a Converger object to stop running temporarily
|
||||
// Pause causes a Converger object to stop running temporarily.
|
||||
func (obj *converger) Pause() { // FIXME: add a sync ACK on pause before return
|
||||
obj.control <- false
|
||||
}
|
||||
|
||||
// Loop is the main loop for a Converger object; it usually runs in a goroutine
|
||||
// TODO: we could eventually have each resource tell us as soon as it converges
|
||||
// and then keep track of the time delays here, to avoid callers needing select
|
||||
// Loop is the main loop for a Converger object. It usually runs in a goroutine.
|
||||
// TODO: we could eventually have each resource tell us as soon as it converges,
|
||||
// and then keep track of the time delays here, to avoid callers needing select.
|
||||
// NOTE: when we have very short timeouts, if we start before all the resources
|
||||
// have joined the map, then it might appears as if we converged before we did!
|
||||
// have joined the map, then it might appear as if we converged before we did!
|
||||
func (obj *converger) Loop(startPaused bool) {
|
||||
if obj.control == nil {
|
||||
panic("Converger not initialized correctly")
|
||||
panic("converger not initialized correctly")
|
||||
}
|
||||
if startPaused { // start paused without racing
|
||||
select {
|
||||
case e := <-obj.control:
|
||||
if !e {
|
||||
panic("Converger expected true!")
|
||||
panic("converger expected true")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -195,13 +199,13 @@ func (obj *converger) Loop(startPaused bool) {
|
||||
select {
|
||||
case e := <-obj.control: // expecting "false" which means pause!
|
||||
if e {
|
||||
panic("Converger expected false!")
|
||||
panic("converger expected false")
|
||||
}
|
||||
// now i'm paused...
|
||||
select {
|
||||
case e := <-obj.control:
|
||||
if !e {
|
||||
panic("Converger expected true!")
|
||||
panic("converger expected true")
|
||||
}
|
||||
// restart
|
||||
// kick once to refresh the check...
|
||||
@@ -240,20 +244,20 @@ func (obj *converger) Loop(startPaused bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// ConvergedTimer adds a timeout to a select call and blocks until then
|
||||
// ConvergedTimer adds a timeout to a select call and blocks until then.
|
||||
// TODO: this means we could eventually have per resource converged timeouts
|
||||
func (obj *converger) ConvergedTimer(uuid ConvergerUUID) <-chan time.Time {
|
||||
func (obj *converger) ConvergedTimer(uid UID) <-chan time.Time {
|
||||
// be clever: if i'm already converged, this timeout should block which
|
||||
// avoids unnecessary new signals being sent! this avoids fast loops if
|
||||
// we have a low timeout, or in particular a timeout == 0
|
||||
if uuid.IsConverged() {
|
||||
if uid.IsConverged() {
|
||||
// blocks the case statement in select forever!
|
||||
return TimeAfterOrBlock(-1)
|
||||
return util.TimeAfterOrBlock(-1)
|
||||
}
|
||||
return TimeAfterOrBlock(obj.timeout)
|
||||
return util.TimeAfterOrBlock(obj.timeout)
|
||||
}
|
||||
|
||||
// Status returns a map of the converged status of each UUID.
|
||||
// Status returns a map of the converged status of each UID.
|
||||
func (obj *converger) Status() map[uint64]bool {
|
||||
status := make(map[uint64]bool)
|
||||
obj.mutex.RLock() // take a read lock
|
||||
@@ -276,63 +280,65 @@ func (obj *converger) SetStateFn(stateFn func(bool) error) {
|
||||
obj.stateFn = stateFn
|
||||
}
|
||||
|
||||
// Id returns the unique id of this UUID object
|
||||
func (obj *convergerUUID) ID() uint64 {
|
||||
// ID returns the unique id of this UID object.
|
||||
func (obj *cuid) ID() uint64 {
|
||||
return obj.id
|
||||
}
|
||||
|
||||
// Name returns a user defined name for the specific convergerUUID.
|
||||
func (obj *convergerUUID) Name() string {
|
||||
// Name returns a user defined name for the specific cuid.
|
||||
func (obj *cuid) Name() string {
|
||||
return obj.name
|
||||
}
|
||||
|
||||
// SetName sets a user defined name for the specific convergerUUID.
|
||||
func (obj *convergerUUID) SetName(name string) {
|
||||
// SetName sets a user defined name for the specific cuid.
|
||||
func (obj *cuid) SetName(name string) {
|
||||
obj.name = name
|
||||
}
|
||||
|
||||
// IsValid tells us if the id is valid or has already been destroyed
|
||||
func (obj *convergerUUID) IsValid() bool {
|
||||
// IsValid tells us if the id is valid or has already been destroyed.
|
||||
func (obj *cuid) IsValid() bool {
|
||||
return obj.id != 0 // an id of 0 is invalid
|
||||
}
|
||||
|
||||
// InvalidateID marks the id as no longer valid
|
||||
func (obj *convergerUUID) InvalidateID() {
|
||||
// InvalidateID marks the id as no longer valid.
|
||||
func (obj *cuid) InvalidateID() {
|
||||
obj.id = 0 // an id of 0 is invalid
|
||||
}
|
||||
|
||||
// IsConverged is a helper function to the regular IsConverged method
|
||||
func (obj *convergerUUID) IsConverged() bool {
|
||||
// IsConverged is a helper function to the regular IsConverged method.
|
||||
func (obj *cuid) IsConverged() bool {
|
||||
return obj.converger.IsConverged(obj)
|
||||
}
|
||||
|
||||
// SetConverged is a helper function to the regular SetConverged notification
|
||||
func (obj *convergerUUID) SetConverged(isConverged bool) error {
|
||||
// SetConverged is a helper function to the regular SetConverged notification.
|
||||
func (obj *cuid) SetConverged(isConverged bool) error {
|
||||
return obj.converger.SetConverged(obj, isConverged)
|
||||
}
|
||||
|
||||
// Unregister is a helper function to unregister myself
|
||||
func (obj *convergerUUID) Unregister() {
|
||||
// Unregister is a helper function to unregister myself.
|
||||
func (obj *cuid) Unregister() {
|
||||
obj.converger.Unregister(obj)
|
||||
}
|
||||
|
||||
// ConvergedTimer is a helper around the regular ConvergedTimer method
|
||||
func (obj *convergerUUID) ConvergedTimer() <-chan time.Time {
|
||||
// ConvergedTimer is a helper around the regular ConvergedTimer method.
|
||||
func (obj *cuid) ConvergedTimer() <-chan time.Time {
|
||||
return obj.converger.ConvergedTimer(obj)
|
||||
}
|
||||
|
||||
// StartTimer runs an invisible timer that automatically converges on timeout.
|
||||
func (obj *convergerUUID) StartTimer() (func() error, error) {
|
||||
func (obj *cuid) StartTimer() (func() error, error) {
|
||||
obj.mutex.Lock()
|
||||
if !obj.running {
|
||||
obj.timer = make(chan struct{})
|
||||
obj.running = true
|
||||
} else {
|
||||
obj.mutex.Unlock()
|
||||
return obj.StopTimer, fmt.Errorf("Timer already started!")
|
||||
return obj.StopTimer, fmt.Errorf("timer already started")
|
||||
}
|
||||
obj.mutex.Unlock()
|
||||
obj.wg.Add(1)
|
||||
go func() {
|
||||
defer obj.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case _, ok := <-obj.timer: // reset signal channel
|
||||
@@ -356,24 +362,25 @@ func (obj *convergerUUID) StartTimer() (func() error, error) {
|
||||
}
|
||||
|
||||
// ResetTimer resets the counter to zero if using a StartTimer internally.
|
||||
func (obj *convergerUUID) ResetTimer() error {
|
||||
func (obj *cuid) ResetTimer() error {
|
||||
obj.mutex.Lock()
|
||||
defer obj.mutex.Unlock()
|
||||
if obj.running {
|
||||
obj.timer <- struct{}{} // send the reset message
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Timer hasn't been started!")
|
||||
return fmt.Errorf("timer hasn't been started")
|
||||
}
|
||||
|
||||
// StopTimer stops the running timer permanently until a StartTimer is run.
|
||||
func (obj *convergerUUID) StopTimer() error {
|
||||
func (obj *cuid) StopTimer() error {
|
||||
obj.mutex.Lock()
|
||||
defer obj.mutex.Unlock()
|
||||
if !obj.running {
|
||||
return fmt.Errorf("Timer isn't running!")
|
||||
return fmt.Errorf("timer isn't running")
|
||||
}
|
||||
close(obj.timer)
|
||||
obj.wg.Wait()
|
||||
obj.running = false
|
||||
return nil
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -15,12 +15,5 @@
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package main provides the main entrypoint for using the `mgmt` software.
|
||||
package main
|
||||
|
||||
import (
|
||||
//"testing"
|
||||
)
|
||||
|
||||
//func TestT1(t *testing.T) {
|
||||
|
||||
//}
|
||||
2
docs/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
mgmt-documentation.pdf
|
||||
_build
|
||||
20
docs/Makefile
Normal file
@@ -0,0 +1,20 @@
|
||||
# Minimal makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
SPHINXPROJ = mgmt
|
||||
SOURCEDIR = .
|
||||
BUILDDIR = _build
|
||||
|
||||
# Put it first so that "make" without argument is like "make help".
|
||||
help:
|
||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
.PHONY: help Makefile
|
||||
|
||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||
%: Makefile
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
158
docs/conf.py
Normal file
@@ -0,0 +1,158 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# mgmt documentation build configuration file, created by
|
||||
# sphinx-quickstart on Wed Feb 15 21:34:09 2017.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its
|
||||
# containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#
|
||||
# import os
|
||||
# import sys
|
||||
# sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
from recommonmark.parser import CommonMarkParser
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#
|
||||
# needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = []
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
#
|
||||
|
||||
source_parsers = {
|
||||
'.md': CommonMarkParser,
|
||||
}
|
||||
|
||||
source_suffix = ['.rst', '.md']
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'mgmt'
|
||||
copyright = u'2013-2017+ James Shubin and the project contributors'
|
||||
author = u'James Shubin'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = u''
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = u''
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#
|
||||
# This is also used if you do content translation via gettext catalogs.
|
||||
# Usually you set "language" from the command line for these cases.
|
||||
language = None
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
# This patterns also effect to html_static_path and html_extra_path
|
||||
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'venv']
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||
todo_include_todos = False
|
||||
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
#
|
||||
#html_theme = 'alabaster'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#
|
||||
# html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
|
||||
# -- Options for HTMLHelp output ------------------------------------------
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'mgmtdoc'
|
||||
|
||||
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#
|
||||
# 'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#
|
||||
# 'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#
|
||||
# 'preamble': '',
|
||||
|
||||
# Latex figure (float) alignment
|
||||
#
|
||||
# 'figure_align': 'htbp',
|
||||
}
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
(master_doc, 'mgmt.tex', u'mgmt Documentation',
|
||||
u'James Shubin', 'manual'),
|
||||
]
|
||||
|
||||
|
||||
# -- Options for manual page output ---------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
(master_doc, 'mgmt', u'mgmt Documentation',
|
||||
[author], 1)
|
||||
]
|
||||
|
||||
|
||||
# -- Options for Texinfo output -------------------------------------------
|
||||
|
||||
# Grouping the document tree into Texinfo files. List of tuples
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
(master_doc, 'mgmt', u'mgmt Documentation',
|
||||
author, 'mgmt', 'A next generation config management prototype!',
|
||||
'Miscellaneous'),
|
||||
]
|
||||
646
docs/documentation.md
Normal file
@@ -0,0 +1,646 @@
|
||||
# mgmt
|
||||
|
||||
Available from:
|
||||
[https://github.com/purpleidea/mgmt/](https://github.com/purpleidea/mgmt/)
|
||||
|
||||
This documentation is available in: [Markdown](https://github.com/purpleidea/mgmt/blob/master/docs/documentation.md) or [PDF](https://pdfdoc-purpleidea.rhcloud.com/pdf/https://github.com/purpleidea/mgmt/blob/master/docs/documentation.md) format.
|
||||
|
||||
## Overview
|
||||
|
||||
The `mgmt` tool is a next generation config management prototype. It's not yet
|
||||
ready for production, but we hope to get there soon. Get involved today!
|
||||
|
||||
## Project Description
|
||||
|
||||
The mgmt tool is a distributed, event driven, config management tool, that
|
||||
supports parallel execution, and librarification to be used as the management
|
||||
foundation in and for, new and existing software.
|
||||
|
||||
For more information, you may like to read some blog posts from the author:
|
||||
|
||||
* [Next generation config mgmt](https://ttboj.wordpress.com/2016/01/18/next-generation-configuration-mgmt/)
|
||||
* [Automatic edges in mgmt](https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/)
|
||||
* [Automatic grouping in mgmt](https://ttboj.wordpress.com/2016/03/30/automatic-grouping-in-mgmt/)
|
||||
* [Automatic clustering in mgmt](https://ttboj.wordpress.com/2016/06/20/automatic-clustering-in-mgmt/)
|
||||
* [Remote execution in mgmt](https://ttboj.wordpress.com/2016/10/07/remote-execution-in-mgmt/)
|
||||
* [Send/Recv in mgmt](https://ttboj.wordpress.com/2016/12/07/sendrecv-in-mgmt/)
|
||||
* [Metaparameters in mgmt](https://ttboj.wordpress.com/2017/03/01/metaparameters-in-mgmt/)
|
||||
|
||||
There is also an [introductory video](http://meetings-archive.debian.net/pub/debian-meetings/2016/debconf16/Next_Generation_Config_Mgmt.webm) available.
|
||||
Older videos and other material [is available](https://github.com/purpleidea/mgmt/#on-the-web).
|
||||
|
||||
## Setup
|
||||
|
||||
During this prototype phase, the tool can be run out of the source directory.
|
||||
You'll probably want to use ```./run.sh run --yaml examples/graph1.yaml``` to
|
||||
get started. Beware that this _can_ cause data loss. Understand what you're
|
||||
doing first, or perform these actions in a virtual environment such as the one
|
||||
provided by [Oh-My-Vagrant](https://github.com/purpleidea/oh-my-vagrant).
|
||||
|
||||
## Features
|
||||
|
||||
This section details the numerous features of mgmt and some caveats you might
|
||||
need to be aware of.
|
||||
|
||||
### Autoedges
|
||||
|
||||
Automatic edges, or AutoEdges, is the mechanism in mgmt by which it will
|
||||
automatically create dependencies for you between resources. For example,
|
||||
since mgmt can discover which files are installed by a package it will
|
||||
automatically ensure that any file resource you declare that matches a
|
||||
file installed by your package resource will only be processed after the
|
||||
package is installed.
|
||||
|
||||
#### Controlling autoedges
|
||||
|
||||
Though autoedges is likely to be very helpful and avoid you having to declare
|
||||
all dependencies explicitly, there are cases where this behaviour is
|
||||
undesirable.
|
||||
|
||||
Some distributions allow package installations to automatically start the
|
||||
service they ship. This can be problematic in the case of packages like MySQL
|
||||
as there are configuration options that need to be set before MySQL is ever
|
||||
started for the first time (or you'll need to wipe the data directory). In
|
||||
order to handle this situation you can disable autoedges per resource and
|
||||
explicitly declare that you want `my.cnf` to be written to disk before the
|
||||
installation of the `mysql-server` package.
|
||||
|
||||
You can disable autoedges for a resource by setting the `autoedge` key on
|
||||
the meta attributes of that resource to `false`.
|
||||
|
||||
#### Blog post
|
||||
|
||||
You can read the introductory blog post about this topic here:
|
||||
[https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/](https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/)
|
||||
|
||||
### Autogrouping
|
||||
|
||||
Automatic grouping or AutoGroup is the mechanism in mgmt by which it will
|
||||
automatically group multiple resource vertices into a single one. This is
|
||||
particularly useful for grouping multiple package resources into a single
|
||||
resource, since the multiple installations can happen together in a single
|
||||
transaction, which saves a lot of time because package resources typically have
|
||||
a large fixed cost to running (downloading and verifying the package repo) and
|
||||
if they are grouped they share this fixed cost. This grouping feature can be
|
||||
used for other use cases too.
|
||||
|
||||
You can disable autogrouping for a resource by setting the `autogroup` key on
|
||||
the meta attributes of that resource to `false`.
|
||||
|
||||
#### Blog post
|
||||
|
||||
You can read the introductory blog post about this topic here:
|
||||
[https://ttboj.wordpress.com/2016/03/30/automatic-grouping-in-mgmt/](https://ttboj.wordpress.com/2016/03/30/automatic-grouping-in-mgmt/)
|
||||
|
||||
### Automatic clustering
|
||||
|
||||
Automatic clustering is a feature by which mgmt automatically builds, scales,
|
||||
and manages the embedded etcd cluster which is compiled into mgmt itself. It is
|
||||
quite helpful for rapidly bootstrapping clusters and avoiding the extra work to
|
||||
setup etcd.
|
||||
|
||||
If you prefer to avoid this feature. you can always opt to use an existing etcd
|
||||
cluster that is managed separately from mgmt by pointing your mgmt agents at it
|
||||
with the `--seeds` variable.
|
||||
|
||||
#### Blog post
|
||||
|
||||
You can read the introductory blog post about this topic here:
|
||||
[https://ttboj.wordpress.com/2016/06/20/automatic-clustering-in-mgmt/](https://ttboj.wordpress.com/2016/06/20/automatic-clustering-in-mgmt/)
|
||||
|
||||
### Remote ("agent-less") mode
|
||||
|
||||
Remote mode is a special mode that lets you kick off mgmt runs on one or more
|
||||
remote machines which are only accessible via SSH. In this mode the initiating
|
||||
host connects over SSH, copies over the `mgmt` binary, opens an SSH tunnel, and
|
||||
runs the remote program while simultaneously passing the etcd traffic back
|
||||
through the tunnel so that the initiators etcd cluster can be used to exchange
|
||||
resource data.
|
||||
|
||||
The interesting benefit of this architecture is that multiple hosts which can't
|
||||
connect directly use the initiator to pass the important traffic through to each
|
||||
other. Once the cluster has converged all the remote programs can shutdown
|
||||
leaving no residual agent.
|
||||
|
||||
This mode can also be useful for bootstrapping a new host where you'd like to
|
||||
have the service run continuously and as part of an mgmt cluster normally.
|
||||
|
||||
In particular, when combined with the `--converged-timeout` parameter, the
|
||||
entire set of running mgmt agents will need to all simultaneously converge for
|
||||
the group to exit. This is particularly useful for bootstrapping new clusters
|
||||
which need to exchange information that is only available at run time.
|
||||
|
||||
#### Blog post
|
||||
|
||||
You can read the introductory blog post about this topic here:
|
||||
[https://ttboj.wordpress.com/2016/10/07/remote-execution-in-mgmt/](https://ttboj.wordpress.com/2016/10/07/remote-execution-in-mgmt/)
|
||||
|
||||
### Puppet support
|
||||
|
||||
You can supply a Puppet manifest instead of creating the (YAML) graph manually.
|
||||
Puppet must be installed and in `mgmt`'s search path. You also need the
|
||||
[ffrank-mgmtgraph Puppet module](https://forge.puppet.com/ffrank/mgmtgraph).
|
||||
|
||||
Invoke `mgmt` with the `--puppet` switch, which supports 3 variants:
|
||||
|
||||
1. Request the configuration from the Puppet Master (like `puppet agent` does)
|
||||
|
||||
mgmt run --puppet agent
|
||||
|
||||
2. Compile a local manifest file (like `puppet apply`)
|
||||
|
||||
mgmt run --puppet /path/to/my/manifest.pp
|
||||
|
||||
3. Compile an ad hoc manifest from the commandline (like `puppet apply -e`)
|
||||
|
||||
mgmt run --puppet 'file { "/etc/ntp.conf": ensure => file }'
|
||||
|
||||
For more details and caveats see [Puppet.md](Puppet.md).
|
||||
|
||||
#### Blog post
|
||||
|
||||
An introductory post on the Puppet support is on
|
||||
[Felix's blog](http://ffrank.github.io/features/2016/06/19/puppet-powered-mgmt/).
|
||||
|
||||
## Resources
|
||||
|
||||
This section lists all the built-in resources and their properties. The
|
||||
resource primitives in `mgmt` are typically more powerful than resources in
|
||||
other configuration management systems because they can be event based which
|
||||
lets them respond in real-time to converge to the desired state. This property
|
||||
allows you to build more complex resources that you probably hadn't considered
|
||||
in the past.
|
||||
|
||||
In addition to the resource specific properties, there are resource properties
|
||||
(otherwise known as parameters) which can apply to every resource. These are
|
||||
called [meta parameters](#meta-parameters) and are listed separately. Certain
|
||||
meta parameters aren't very useful when combined with certain resources, but
|
||||
in general, it should be fairly obvious, such as when combining the `noop` meta
|
||||
parameter with the [Noop](#Noop) resource.
|
||||
|
||||
* [Augeas](#Augeas): Manipulate files using augeas.
|
||||
* [Exec](#Exec): Execute shell commands on the system.
|
||||
* [File](#File): Manage files and directories.
|
||||
* [Hostname](#Hostname): Manages the hostname on the system.
|
||||
* [KV](#KV): Set a key value pair in our shared world database.
|
||||
* [Msg](#Msg): Send log messages.
|
||||
* [Noop](#Noop): A simple resource that does nothing.
|
||||
* [Nspawn](#Nspawn): Manage systemd-machined nspawn containers.
|
||||
* [Password](#Password): Create random password strings.
|
||||
* [Pkg](#Pkg): Manage system packages with PackageKit.
|
||||
* [Svc](#Svc): Manage system systemd services.
|
||||
* [Timer](#Timer): Manage system systemd services.
|
||||
* [Virt](#Virt): Manage virtual machines with libvirt.
|
||||
|
||||
|
||||
### Augeas
|
||||
|
||||
The augeas resource uses [augeas](http://augeas.net/) commands to manipulate
|
||||
files.
|
||||
|
||||
### Exec
|
||||
|
||||
The exec resource can execute commands on your system.
|
||||
|
||||
### File
|
||||
|
||||
The file resource manages files and directories. In `mgmt`, directories are
|
||||
identified by a trailing slash in their path name. File have no such slash.
|
||||
|
||||
It has the following properties:
|
||||
|
||||
- `path`: file path (directories have a trailing slash here)
|
||||
- `content`: raw file content
|
||||
- `state`: either `exists` (the default value) or `absent`
|
||||
- `mode`: octal unix file permissions
|
||||
- `owner`: username or uid for the file owner
|
||||
- `group`: group name or gid for the file group
|
||||
|
||||
#### Path
|
||||
|
||||
The path property specifies the file or directory that we are managing.
|
||||
|
||||
#### Content
|
||||
|
||||
The content property is a string that specifies the desired file contents.
|
||||
|
||||
#### Source
|
||||
|
||||
The source property points to a source file or directory path that we wish to
|
||||
copy over and use as the desired contents for our resource.
|
||||
|
||||
#### State
|
||||
|
||||
The state property describes the action we'd like to apply for the resource. The
|
||||
possible values are: `exists` and `absent`.
|
||||
|
||||
#### Recurse
|
||||
|
||||
The recurse property limits whether file resource operations should recurse into
|
||||
and monitor directory contents with a depth greater than one.
|
||||
|
||||
#### Force
|
||||
|
||||
The force property is required if we want the file resource to be able to change
|
||||
a file into a directory or vice-versa. If such a change is needed, but the force
|
||||
property is not set to `true`, then this file resource will error.
|
||||
|
||||
### Hostname
|
||||
|
||||
The hostname resource manages static, transient/dynamic and pretty hostnames
|
||||
on the system and watches them for changes.
|
||||
|
||||
#### static_hostname
|
||||
The static hostname is the one configured in /etc/hostname or a similar
|
||||
file.
|
||||
It is chosen by the local user. It is not always in sync with the current
|
||||
host name as returned by the gethostname() system call.
|
||||
|
||||
#### transient_hostname
|
||||
The transient / dynamic hostname is the one configured via the kernel's
|
||||
sethostbyname().
|
||||
It can be different from the static hostname in case DHCP or mDNS have been
|
||||
configured to change the name based on network information.
|
||||
|
||||
#### pretty_hostname
|
||||
The pretty hostname is a free-form UTF8 host name for presentation to the user.
|
||||
|
||||
#### hostname
|
||||
Hostname is the fallback value for all 3 fields above, if only `hostname` is
|
||||
specified, it will set all 3 fields to this value.
|
||||
|
||||
### KV
|
||||
|
||||
The KV resource sets a key and value pair in the global world database. This is
|
||||
quite useful for setting a flag after a number of resources have run. It will
|
||||
ignore database updates to the value that are greater in compare order than the
|
||||
requested key if the `SkipLessThan` parameter is set to true. If we receive a
|
||||
refresh, then the stored value will be reset to the requested value even if the
|
||||
stored value is greater.
|
||||
|
||||
#### Key
|
||||
The string key used to store the key.
|
||||
|
||||
#### Value
|
||||
The string value to set. This can also be set via Send/Recv.
|
||||
|
||||
#### SkipLessThan
|
||||
If this parameter is set to `true`, then it will ignore updating the value as
|
||||
long as the database versions are greater than the requested value. The compare
|
||||
operation used is based on the `SkipCmpStyle` parameter.
|
||||
|
||||
#### SkipCmpStyle
|
||||
By default this converts the string values to integers and compares them as you
|
||||
would expect.
|
||||
|
||||
### Msg
|
||||
|
||||
The msg resource sends messages to the main log, or an external service such
|
||||
as systemd's journal.
|
||||
|
||||
### Noop
|
||||
|
||||
The noop resource does absolutely nothing. It does have some utility in testing
|
||||
`mgmt` and also as a placeholder in the resource graph.
|
||||
|
||||
### Nspawn
|
||||
|
||||
The nspawn resource is used to manage systemd-machined style containers.
|
||||
|
||||
### Password
|
||||
|
||||
The password resource can generate a random string to be used as a password. It
|
||||
will re-generate the password if it receives a refresh notification.
|
||||
|
||||
### Pkg
|
||||
|
||||
The pkg resource is used to manage system packages. This resource works on many
|
||||
different distributions because it uses the underlying packagekit facility which
|
||||
supports different backends for different environments. This ensures that we
|
||||
have great Debian (deb/dpkg) and Fedora (rpm/dnf) support simultaneously.
|
||||
|
||||
### Svc
|
||||
|
||||
The service resource is still very WIP. Please help us my improving it!
|
||||
|
||||
### Timer
|
||||
|
||||
This resource needs better documentation. Please help us my improving it!
|
||||
|
||||
### Virt
|
||||
|
||||
The virt resource can manage virtual machines via libvirt.
|
||||
|
||||
## Usage and frequently asked questions
|
||||
(Send your questions as a patch to this FAQ! I'll review it, merge it, and
|
||||
respond by commit with the answer.)
|
||||
|
||||
### Why did you start this project?
|
||||
|
||||
I wanted a next generation config management solution that didn't have all of
|
||||
the design flaws or limitations that the current generation of tools do, and no
|
||||
tool existed!
|
||||
|
||||
### Why did you use etcd? What about consul?
|
||||
|
||||
Etcd and consul are both written in golang, which made them the top two
|
||||
contenders for my prototype. Ultimately a choice had to be made, and etcd was
|
||||
chosen, but it was also somewhat arbitrary. If there is available interest,
|
||||
good reasoning, *and* patches, then we would consider either switching or
|
||||
supporting both, but this is not a high priority at this time.
|
||||
|
||||
### Can I use an existing etcd cluster instead of the automatic embedded servers?
|
||||
|
||||
Yes, it's possible to use an existing etcd cluster instead of the automatic,
|
||||
elastic embedded etcd servers. To do so, simply point to the cluster with the
|
||||
`--seeds` variable, the same way you would if you were seeding a new member to
|
||||
an existing mgmt cluster.
|
||||
|
||||
The downside to this approach is that you won't benefit from the automatic
|
||||
elastic nature of the embedded etcd servers, and that you're responsible if you
|
||||
accidentally break your etcd cluster, or if you use an unsupported version.
|
||||
|
||||
### What does the error message about an inconsistent dataDir mean?
|
||||
|
||||
If you get an error message similar to:
|
||||
|
||||
```
|
||||
Etcd: Connect: CtxError...
|
||||
Etcd: CtxError: Reason: CtxDelayErr(5s): No endpoints available yet!
|
||||
Etcd: Connect: Endpoints: []
|
||||
Etcd: The dataDir (/var/lib/mgmt/etcd) might be inconsistent or corrupt.
|
||||
```
|
||||
|
||||
This happens when there are a series of fatal connect errors in a row. This can
|
||||
happen when you start `mgmt` using a dataDir that doesn't correspond to the
|
||||
current cluster view. As a result, the embedded etcd server never finishes
|
||||
starting up, and as a result, a default endpoint never gets added. The solution
|
||||
is to either reconcile the mistake, and if there is no important data saved, you
|
||||
can remove the etcd dataDir. This is typically `/var/lib/mgmt/etcd/member/`.
|
||||
|
||||
### Why do resources have both a `Compare` method and an `IFF` (on the UID) method?
|
||||
|
||||
The `Compare()` methods are for determining if two resources are effectively the
|
||||
same, which is used to make graph change delta's efficient. This is when we want
|
||||
to change from the current running graph to a new graph, but preserve the common
|
||||
vertices. Since we want to make this process efficient, we only update the parts
|
||||
that are different, and leave everything else alone. This `Compare()` method can
|
||||
tell us if two resources are the same.
|
||||
|
||||
The `IFF()` method is part of the whole UID system, which is for discerning if a
|
||||
resource meets the requirements another expects for an automatic edge. This is
|
||||
because the automatic edge system assumes a unified UID pattern to test for
|
||||
equality. In the future it might be helpful or sane to merge the two similar
|
||||
comparison functions although for now they are separate because they are
|
||||
actually answer different questions.
|
||||
|
||||
### Did you know that there is a band named `MGMT`?
|
||||
|
||||
I didn't realize this when naming the project, and it is accidental. After much
|
||||
anguishing, I chose the name because it was short and I thought it was
|
||||
appropriately descriptive. If you need a less ambiguous search term or phrase,
|
||||
you can try using `mgmtconfig` or `mgmt config`.
|
||||
|
||||
### You didn't answer my question, or I have a question!
|
||||
|
||||
It's best to ask on [IRC](https://webchat.freenode.net/?channels=#mgmtconfig)
|
||||
to see if someone can help you. Once we get a big enough community going, we'll
|
||||
add a mailing list. If you don't get any response from the above, you can
|
||||
contact me through my [technical blog](https://ttboj.wordpress.com/contact/)
|
||||
and I'll do my best to help. If you have a good question, please add it as a
|
||||
patch to this documentation. I'll merge your question, and add a patch with the
|
||||
answer!
|
||||
|
||||
## Reference
|
||||
Please note that there are a number of undocumented options. For more
|
||||
information on these options, please view the source at:
|
||||
[https://github.com/purpleidea/mgmt/](https://github.com/purpleidea/mgmt/).
|
||||
If you feel that a well used option needs documenting here, please patch it!
|
||||
|
||||
### Overview of reference
|
||||
* [Meta parameters](#meta-parameters): List of available resource meta parameters.
|
||||
* [Graph definition file](#graph-definition-file): Main graph definition file.
|
||||
* [Command line](#command-line): Command line parameters.
|
||||
* [Compilation options](#compilation-options): Compilation options.
|
||||
|
||||
### Meta parameters
|
||||
These meta parameters are special parameters (or properties) which can apply to
|
||||
any resource. The usefulness of doing so will depend on the particular meta
|
||||
parameter and resource combination.
|
||||
|
||||
#### AutoEdge
|
||||
Boolean. Should we generate auto edges for this resource?
|
||||
|
||||
#### AutoGroup
|
||||
Boolean. Should we attempt to automatically group this resource with others?
|
||||
|
||||
#### Noop
|
||||
Boolean. Should the Apply portion of the CheckApply method of the resource
|
||||
make any changes? Noop is a concatenation of no-operation.
|
||||
|
||||
#### Retry
|
||||
Integer. The number of times to retry running the resource on error. Use -1 for
|
||||
infinite. This currently applies for both the Watch operation (which can fail)
|
||||
and for the CheckApply operation. While they could have separate values, I've
|
||||
decided to use the same ones for both until there's a proper reason to want to
|
||||
do something differently for the Watch errors.
|
||||
|
||||
#### Delay
|
||||
Integer. Number of milliseconds to wait between retries. The same value is
|
||||
shared between the Watch and CheckApply retries. This currently applies for both
|
||||
the Watch operation (which can fail) and for the CheckApply operation. While
|
||||
they could have separate values, I've decided to use the same ones for both
|
||||
until there's a proper reason to want to do something differently for the Watch
|
||||
errors.
|
||||
|
||||
#### Poll
|
||||
Integer. Number of seconds to wait between `CheckApply` checks. If this is
|
||||
greater than zero, then the standard event based `Watch` mechanism for this
|
||||
resource is replaced with a simple polling mechanism. In general, this is not
|
||||
recommended, unless you have a very good reason for doing so.
|
||||
|
||||
Please keep in mind that if you have a resource which changes every `I` seconds,
|
||||
and you poll it every `J` seconds, and you've asked for a converged timeout of
|
||||
`K` seconds, and `I <= J <= K`, then your graph will likely never converge.
|
||||
|
||||
When polling, the system detects that a resource is not converged if its
|
||||
`CheckApply` method returns false. This allows a resource which changes every
|
||||
`I` seconds, and which is polled every `J` seconds, and with a converged timeout
|
||||
of `K` seconds to still converge when `J <= K`, as long as `I > J || I > K`,
|
||||
which is another way of saying that if the resource finally settles down to give
|
||||
the graph enough time, it can probably converge.
|
||||
|
||||
#### Limit
|
||||
Float. Maximum rate of `CheckApply` runs started per second. Useful to limit
|
||||
an especially _eventful_ process from causing excessive checks to run. This
|
||||
defaults to `+Infinity` which adds no limiting. If you change this value, you
|
||||
will also need to change the `Burst` value to a non-zero value. Please see the
|
||||
[rate](https://godoc.org/golang.org/x/time/rate) package for more information.
|
||||
|
||||
#### Burst
|
||||
Integer. Burst is the maximum number of runs which can happen without invoking
|
||||
the rate limiter as designated by the `Limit` value. If the `Limit` is not set
|
||||
to `+Infinity`, this must be a non-zero value. Please see the
|
||||
[rate](https://godoc.org/golang.org/x/time/rate) package for more information.
|
||||
|
||||
#### Sema
|
||||
List of string ids. Sema is a P/V style counting semaphore which can be used to
|
||||
limit parallelism during the CheckApply phase of resource execution. Each
|
||||
resource can have `N` different semaphores which share a graph global namespace.
|
||||
Each semaphore has a maximum count associated with it. The default value of the
|
||||
size is 1 (one) if size is unspecified. Each string id is the unique id of the
|
||||
semaphore. If the id contains a trailing colon (:) followed by a positive
|
||||
integer, then that value is the max size for that semaphore. Valid semaphore
|
||||
id's include: `some_id`, `hello:42`, `not:smart:4` and `:13`. It is expected
|
||||
that the last bare example be only used by the engine to add a global semaphore.
|
||||
|
||||
### Graph definition file
|
||||
graph.yaml is the compiled graph definition file. The format is currently
|
||||
undocumented, but by looking through the [examples/](https://github.com/purpleidea/mgmt/tree/master/examples)
|
||||
you can probably figure out most of it, as it's fairly intuitive.
|
||||
|
||||
### Command line
|
||||
The main interface to the `mgmt` tool is the command line. For the most recent
|
||||
documentation, please run `mgmt --help`.
|
||||
|
||||
#### `--yaml <graph.yaml>`
|
||||
Point to a graph file to run.
|
||||
|
||||
#### `--converged-timeout <seconds>`
|
||||
Exit if the machine has converged for approximately this many seconds.
|
||||
|
||||
#### `--max-runtime <seconds>`
|
||||
Exit when the agent has run for approximately this many seconds. This is not
|
||||
generally recommended, but may be useful for users who know what they're doing.
|
||||
|
||||
#### `--noop`
|
||||
Globally force all resources into no-op mode. This also disables the export to
|
||||
etcd functionality, but does not disable resource collection, however all
|
||||
resources that are collected will have their individual noop settings set.
|
||||
|
||||
#### `--sema <size>`
|
||||
Globally add a counting semaphore of this size to each resource in the graph.
|
||||
The semaphore will get given an id of `:size`. In other words if you specify a
|
||||
size of 42, you can expect a semaphore if named: `:42`. It is expected that
|
||||
consumers of the semaphore metaparameter always include a prefix to avoid a
|
||||
collision with this globally defined semaphore. The size value must be greater
|
||||
than zero at this time. The traditional non-parallel execution found in config
|
||||
management tools such as `Puppet` can be obtained with `--sema 1`.
|
||||
|
||||
#### `--remote <graph.yaml>`
|
||||
Point to a graph file to run on the remote host specified within. This parameter
|
||||
can be used multiple times if you'd like to remotely run on multiple hosts in
|
||||
parallel.
|
||||
|
||||
#### `--allow-interactive`
|
||||
Allow interactive prompting for SSH passwords if there is no authentication
|
||||
method that works.
|
||||
|
||||
#### `--ssh-priv-id-rsa`
|
||||
Specify the path for finding SSH keys. This defaults to `~/.ssh/id_rsa`. To
|
||||
never use this method of authentication, set this to the empty string.
|
||||
|
||||
#### `--cconns`
|
||||
The maximum number of concurrent remote ssh connections to run. This defaults
|
||||
to `0`, which means unlimited.
|
||||
|
||||
#### `--no-caching`
|
||||
Don't allow remote caching of the remote execution binary. This will require
|
||||
the binary to be copied over for every remote execution, but it limits the
|
||||
likelihood that there is leftover information from the configuration process.
|
||||
|
||||
#### `--prefix <path>`
|
||||
Specify a path to a custom working directory prefix. This directory will get
|
||||
created if it does not exist. This usually defaults to `/var/lib/mgmt/`. This
|
||||
can't be combined with the `--tmp-prefix` option. It can be combined with the
|
||||
`--allow-tmp-prefix` option.
|
||||
|
||||
#### `--tmp-prefix`
|
||||
If this option is specified, a temporary prefix will be used instead of the
|
||||
default prefix. This can't be combined with the `--prefix` option.
|
||||
|
||||
#### `--allow-tmp-prefix`
|
||||
If this option is specified, we will attempt to fall back to a temporary prefix
|
||||
if the primary prefix couldn't be created. This is useful for avoiding failures
|
||||
in environments where the primary prefix may or may not be available, but you'd
|
||||
like to try. The canonical example is when running `mgmt` with `--remote` there
|
||||
might be a cached copy of the binary in the primary prefix, but in case there's
|
||||
no binary available continue working in a temporary directory to avoid failure.
|
||||
|
||||
### Compilation options
|
||||
|
||||
You can control some compilation variables by using environment variables.
|
||||
|
||||
#### Disable libvirt support
|
||||
|
||||
If you wish to compile mgmt without libvirt, you can use the following command:
|
||||
|
||||
```
|
||||
GOTAGS=novirt make build
|
||||
```
|
||||
|
||||
#### Disable augeas support
|
||||
|
||||
If you wish to compile mgmt without augeas support, you can use the following command:
|
||||
|
||||
```
|
||||
GOTAGS=noaugeas make build
|
||||
```
|
||||
|
||||
#### Combining compile-time flags
|
||||
|
||||
You can combine multiple tags by using a space-separated list:
|
||||
|
||||
```
|
||||
GOTAGS="noaugeas novirt" make build
|
||||
```
|
||||
|
||||
## Examples
|
||||
For example configurations, please consult the [examples/](https://github.com/purpleidea/mgmt/tree/master/examples) directory in the git
|
||||
source repository. It is available from:
|
||||
|
||||
[https://github.com/purpleidea/mgmt/tree/master/examples](https://github.com/purpleidea/mgmt/tree/master/examples)
|
||||
|
||||
### Systemd:
|
||||
See [`misc/mgmt.service`](misc/mgmt.service) for a sample systemd unit file.
|
||||
This unit file is part of the RPM.
|
||||
|
||||
To specify your custom options for `mgmt` on a systemd distro:
|
||||
|
||||
```bash
|
||||
sudo mkdir -p /etc/systemd/system/mgmt.service.d/
|
||||
|
||||
cat > /etc/systemd/system/mgmt.service.d/env.conf <<EOF
|
||||
# Environment variables:
|
||||
MGMT_SEEDS=http://127.0.0.1:2379
|
||||
MGMT_CONVERGED_TIMEOUT=-1
|
||||
MGMT_MAX_RUNTIME=0
|
||||
|
||||
# Other CLI options if necessary.
|
||||
#OPTS="--max-runtime=0"
|
||||
EOF
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
This is a project that I started in my free time in 2013. Development is driven
|
||||
by all of our collective patches! Dive right in, and start hacking!
|
||||
Please contact me if you'd like to invite me to speak about this at your event.
|
||||
|
||||
You can follow along [on my technical blog](https://ttboj.wordpress.com/).
|
||||
|
||||
To report any bugs, please file a ticket at: [https://github.com/purpleidea/mgmt/issues](https://github.com/purpleidea/mgmt/issues).
|
||||
|
||||
## Authors
|
||||
|
||||
Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||
|
||||
Please see the
|
||||
[AUTHORS](https://github.com/purpleidea/mgmt/tree/master/AUTHORS) file
|
||||
for more information.
|
||||
|
||||
* [github](https://github.com/purpleidea/)
|
||||
* [@purpleidea](https://twitter.com/#!/purpleidea)
|
||||
* [https://ttboj.wordpress.com/](https://ttboj.wordpress.com/)
|
||||
17
docs/index.rst
Normal file
@@ -0,0 +1,17 @@
|
||||
.. mgmt documentation master file, created by
|
||||
sphinx-quickstart on Wed Feb 15 21:34:09 2017.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Welcome to mgmt's documentation!
|
||||
================================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Contents:
|
||||
|
||||
documentation
|
||||
quick-start-guide
|
||||
resource-guide
|
||||
prometheus
|
||||
puppet-guide
|
||||
66
docs/prometheus.md
Normal file
@@ -0,0 +1,66 @@
|
||||
# Prometheus support
|
||||
|
||||
Mgmt comes with a built-in prometheus support. It is disabled by default, and
|
||||
can be enabled with the `--prometheus` command line switch.
|
||||
|
||||
By default, the prometheus instance will listen on [`127.0.0.1:9233`][pd]. You
|
||||
can change this setting by using the `--prometheus-listen` cli option:
|
||||
|
||||
To have mgmt prometheus bind interface on 0.0.0.0:45001, use:
|
||||
`./mgmt r --prometheus --prometheus-listen :45001`
|
||||
|
||||
## Metrics
|
||||
|
||||
Mgmt exposes three kinds of resources: _go_ metrics, _etcd_ metrics and _mgmt_
|
||||
metrics.
|
||||
|
||||
### go metrics
|
||||
|
||||
We use the [prometheus go_collector][pgc] to expose go metrics. Those metrics
|
||||
are mainly useful for debugging and perf testing.
|
||||
|
||||
### etcd metrics
|
||||
|
||||
mgmt exposes etcd metrics. Read more in the [upstream documentation][etcdm]
|
||||
|
||||
### mgmt metrics
|
||||
|
||||
Here is a list of the metrics we provide:
|
||||
|
||||
- `mgmt_resources_total`: The number of resources that mgmt is managing
|
||||
- `mgmt_checkapply_total`: The number of CheckApply's that mgmt has run
|
||||
- `mgmt_failures_total`: The number of resources that have failed
|
||||
- `mgmt_failures_current`: The number of resources that have failed
|
||||
- `mgmt_graph_start_time_seconds`: Start time of the current graph since unix epoch in seconds
|
||||
|
||||
For each metric, you will get some extra labels:
|
||||
|
||||
- `kind`: The kind of mgmt resource
|
||||
|
||||
For `mgmt_checkapply_total`, those extra labels are set:
|
||||
|
||||
- `eventful`: "true" or "false", if the CheckApply triggered some changes
|
||||
- `errorful`: "true" or "false", if the CheckApply reported an error
|
||||
- `apply`: "true" or "false", if the CheckApply ran in apply or noop mode
|
||||
|
||||
## Alerting
|
||||
|
||||
You can use prometheus to alert you upon changes or failures. We do not provide
|
||||
such templates yet, but we plan to provide some examples in this repository.
|
||||
Patches welcome!
|
||||
|
||||
## Grafana
|
||||
|
||||
We do not have grafana dashboards yet. Patches welcome!
|
||||
|
||||
## External resources
|
||||
|
||||
- [prometheus website](https://prometheus.io/)
|
||||
- [prometheus documentation](https://prometheus.io/docs/introduction/overview/)
|
||||
- [prometheus best practices regarding metrics
|
||||
naming](https://prometheus.io/docs/practices/naming/)
|
||||
- [grafana website](http://grafana.org/)
|
||||
|
||||
[pgc]: https://github.com/prometheus/client_golang/blob/master/prometheus/go_collector.go
|
||||
[etcdm]: https://coreos.com/etcd/docs/latest/metrics.html
|
||||
[pd]: https://github.com/prometheus/prometheus/wiki/Default-port-allocation
|
||||
166
docs/puppet-guide.md
Normal file
@@ -0,0 +1,166 @@
|
||||
# Puppet guide
|
||||
|
||||
`mgmt` can use Puppet as its source for the configuration graph.
|
||||
This document goes into detail on how this works, and lists
|
||||
some pitfalls and limitations.
|
||||
|
||||
For basic instructions on how to use the Puppet support, see
|
||||
the [main documentation](documentation.md#puppet-support).
|
||||
|
||||
## Prerequisites
|
||||
|
||||
You need Puppet installed in your system. It is not important how you
|
||||
get it. On the most common Linux distributions, you can use packages
|
||||
from the OS maintainer, or upstream Puppet repositories. An alternative
|
||||
that will also work on OSX is the `puppet` Ruby gem. It also has the
|
||||
advantage that you can install any desired version in your home directory
|
||||
or any other location.
|
||||
|
||||
Any release of Puppet's 3.x and 4.x series should be suitable for use with
|
||||
`mgmt`. Most importantly, make sure to install the `ffrank-mgmtgraph` Puppet
|
||||
module (referred to below as "the translator module").
|
||||
|
||||
```
|
||||
puppet module install ffrank-mgmtgraph
|
||||
```
|
||||
|
||||
Please note that the module is not required on your Puppet master (if you
|
||||
use a master/agent setup). It's needed on the machine that runs `mgmt`.
|
||||
You can install the module on the master anyway, so that it gets distributed
|
||||
to your agents through Puppet's `pluginsync` mechanism.
|
||||
|
||||
### Testing the Puppet side
|
||||
|
||||
The following command should run successfully and print a YAML hash on your
|
||||
terminal:
|
||||
|
||||
```puppet
|
||||
puppet mgmtgraph print --code 'file { "/tmp/mgmt-test": ensure => present }'
|
||||
```
|
||||
|
||||
You can use this CLI to test any manifests before handing them straight
|
||||
to `mgmt`.
|
||||
|
||||
## Writing a suitable manifest
|
||||
|
||||
### Unsupported attributes
|
||||
|
||||
`mgmt` inherited its resource module from Puppet, so by and large, it's quite
|
||||
possible to express `mgmt` graphs in terms of Puppet manifests. However,
|
||||
there isn't (and likely never will be) full feature parity between the
|
||||
respective resource types. In consequence, a manifest can have semantics that
|
||||
cannot be transferred to `mgmt`.
|
||||
|
||||
For example, at the time of writing this, the `file` type in `mgmt` had no
|
||||
notion of permissions (the file `mode`) yet. This lead to the following
|
||||
warning (among others that will be discussed below):
|
||||
|
||||
```
|
||||
$ puppet mgmtgraph print --code 'file { "/tmp/foo": mode => "0600" }'
|
||||
Warning: cannot translate: File[/tmp/foo] { mode => "600" } (attribute is ignored)
|
||||
```
|
||||
|
||||
This is a heads-up for the user, because the resulting `mgmt` graph will
|
||||
in fact not pass this information to the `/tmp/foo` file resource, and
|
||||
`mgmt` will ignore this file's permissions. Including such attributes in
|
||||
manifests that are written expressly for `mgmt` is not sensible and should
|
||||
be avoided.
|
||||
|
||||
### Unsupported resources
|
||||
|
||||
Puppet has a fairly large number of
|
||||
[built-in types](https://docs.puppet.com/puppet/latest/reference/type.html),
|
||||
and countless more are available through
|
||||
[modules](https://forge.puppet.com/). It's unlikely that all of them will
|
||||
eventually receive native counterparts in `mgmt`.
|
||||
|
||||
When encountering an unknown resource, the translator module will replace
|
||||
it with an `exec` resource in its output. This resource will run the equivalent
|
||||
of a `puppet resource` command to make Puppet apply the original resource
|
||||
itself. This has quite abysmal performance, because processing such a
|
||||
resource requires the forking of at least one Puppet process (two if it
|
||||
is found to be out of sync). This comes with considerable overhead.
|
||||
On most systems, starting up any Puppet command takes several seconds.
|
||||
Compared to the split second that the actual work usually takes,
|
||||
this overhead can amount to several orders of magnitude.
|
||||
|
||||
Avoid Puppet types that `mgmt` does not implement (yet).
|
||||
|
||||
### Avoiding common warnings
|
||||
|
||||
Many resource parameters in Puppet take default values. For the most part,
|
||||
the translator module just ignores them. However, there are cases in which
|
||||
Puppet will default to convenient behavior that `mgmt` cannot quite replicate.
|
||||
For example, translating a plain `file` resource will lead to a warning message:
|
||||
|
||||
```
|
||||
$ puppet mgmtgraph print --code 'file { "/tmp/mgmt-test": }'
|
||||
Warning: File[/tmp/mgmt-test] uses the 'puppet' file bucket, which mgmt cannot do. There will be no backup copies!
|
||||
```
|
||||
|
||||
The reason is that per default, Puppet assumes the following parameter value
|
||||
(among others)
|
||||
|
||||
```puppet
|
||||
file { "/tmp/mgmt-test":
|
||||
backup => 'puppet',
|
||||
}
|
||||
```
|
||||
|
||||
To avoid this, specify the parameter explicitly:
|
||||
|
||||
```
|
||||
$ puppet mgmtgraph print --code 'file { "/tmp/mgmt-test": backup => false }'
|
||||
```
|
||||
|
||||
This is tedious in a more complex manifest. A good simplification is the
|
||||
following [resource default](https://docs.puppet.com/puppet/latest/reference/lang_defaults.html)
|
||||
anywhere on the top scope of your manifest:
|
||||
|
||||
```puppet
|
||||
File { backup => false }
|
||||
```
|
||||
|
||||
If you encounter similar warnings from other types and/or parameters,
|
||||
use the same approach to silence them if possible.
|
||||
|
||||
## Configuring Puppet
|
||||
|
||||
Since `mgmt` uses an actual Puppet CLI behind the scenes, you might
|
||||
need to tweak some of Puppet's runtime options in order to make it
|
||||
do what you want. Reasons for this could be among the following:
|
||||
|
||||
* You use the `--puppet agent` variant and need to configure
|
||||
`servername`, `certname` and other master/agent-related options.
|
||||
* You don't want runtime information to end up in the `vardir`
|
||||
that is used by your regular `puppet agent`.
|
||||
* You install specific Puppet modules for `mgmt` in a non-standard
|
||||
location.
|
||||
|
||||
`mgmt` exposes only one Puppet option in order to allow you to
|
||||
control all of them, through its `--puppet-conf` option. It allows
|
||||
you to specify which `puppet.conf` file should be used during
|
||||
translation.
|
||||
|
||||
```
|
||||
mgmt run --puppet /opt/my-manifest.pp --puppet-conf /etc/mgmt/puppet.conf
|
||||
```
|
||||
|
||||
Within this file, you can just specify any needed options in the
|
||||
`[main]` section:
|
||||
|
||||
```
|
||||
[main]
|
||||
server=mgmt-master.example.net
|
||||
vardir=/var/lib/mgmt/puppet
|
||||
```
|
||||
|
||||
## Caveats
|
||||
|
||||
Please see the [README](https://github.com/ffrank/puppet-mgmtgraph/blob/master/README.md)
|
||||
of the translator module for the current state of supported and unsupported
|
||||
language features.
|
||||
|
||||
You should probably make sure to always use the latest release of
|
||||
both `ffrank-mgmtgraph` and `ffrank-yamlresource` (the latter is
|
||||
getting pulled in as a dependency of the former).
|
||||
93
docs/quick-start-guide.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# Quick start guide
|
||||
|
||||
## Introduction
|
||||
This guide is intended for developers. Once `mgmt` is minimally viable, we'll
|
||||
publish a quick start guide for users too. In the meantime, please contribute!
|
||||
If you're brand new to `mgmt`, it's probably a good idea to start by reading the
|
||||
[introductory article](https://ttboj.wordpress.com/2016/01/18/next-generation-configuration-mgmt/)
|
||||
or to watch an [introductory video](https://github.com/purpleidea/mgmt/#on-the-web).
|
||||
Once you're familiar with the general idea, please start hacking...
|
||||
|
||||
## Vagrant
|
||||
If you would like to avoid doing the following steps manually, we have prepared
|
||||
a [Vagrant](https://www.vagrantup.com/) environment for your convenience. From
|
||||
the project directory, run a `vagrant up`, and then a `vagrant status`. From
|
||||
there, you can `vagrant ssh` into the `mgmt` machine. The MOTD will explain the
|
||||
rest.
|
||||
|
||||
## Dependencies
|
||||
Software projects have a few different kinds of dependencies. There are _build_
|
||||
dependencies, _runtime_ dependencies, and additionally, a few extra dependencies
|
||||
required for running the _test_ suite.
|
||||
|
||||
### Build
|
||||
* `golang` 1.6 or higher (required, available in most distros)
|
||||
* golang libraries (required, available with `go get ./...`) a partial list includes:
|
||||
```
|
||||
github.com/coreos/etcd/client
|
||||
gopkg.in/yaml.v2
|
||||
gopkg.in/fsnotify.v1
|
||||
github.com/urfave/cli
|
||||
github.com/coreos/go-systemd/dbus
|
||||
github.com/coreos/go-systemd/util
|
||||
github.com/libvirt/libvirt-go
|
||||
```
|
||||
* `stringer` (optional), available as a package on some platforms, otherwise via `go get`
|
||||
```
|
||||
golang.org/x/tools/cmd/stringer
|
||||
```
|
||||
* `pandoc` (optional), for building a pdf of the documentation
|
||||
|
||||
### Runtime
|
||||
A relatively modern GNU/Linux system should be able to run `mgmt` without any
|
||||
problems. Since `mgmt` runs as a single statically compiled binary, all of the
|
||||
library dependencies are included. It is expected, that certain advanced
|
||||
resources require host specific facilities to work. These requirements are
|
||||
listed below:
|
||||
|
||||
| Resource | Dependency | Version |
|
||||
|----------|-------------------|---------|
|
||||
| file | inotify | ? |
|
||||
| hostname | systemd-hostnamed | ? |
|
||||
| nspawn | systemd-nspawn | ? |
|
||||
| pkg | packagekitd | ? |
|
||||
| svc | systemd | ? |
|
||||
| virt | libvirtd | ? |
|
||||
|
||||
For building a visual representation of the graph, `graphviz` is required.
|
||||
|
||||
### Testing
|
||||
* golint `github.com/golang/lint/golint`
|
||||
|
||||
## Quick start
|
||||
* Make sure you have golang version 1.6 or greater installed.
|
||||
* If you do not have a GOPATH yet, create one and export it:
|
||||
```
|
||||
mkdir $HOME/gopath
|
||||
export GOPATH=$HOME/gopath
|
||||
```
|
||||
* You might also want to add the GOPATH to your `~/.bashrc` or `~/.profile`.
|
||||
* For more information you can read the [GOPATH documentation](https://golang.org/cmd/go/#hdr-GOPATH_environment_variable).
|
||||
* Next download the mgmt code base, and switch to that directory:
|
||||
```
|
||||
mkdir -p $GOPATH/src/github.com/purpleidea/
|
||||
cd $GOPATH/src/github.com/purpleidea/
|
||||
git clone --recursive https://github.com/purpleidea/mgmt/
|
||||
cd $GOPATH/src/github.com/purpleidea/mgmt
|
||||
```
|
||||
* Run `make deps` to install system and golang dependencies. Take a look at `misc/make-deps.sh` for details.
|
||||
* Run `make build` to get a freshly built `mgmt` binary.
|
||||
* Run `time ./mgmt run --yaml examples/graph0.yaml --converged-timeout=5 --tmp-prefix` to try out a very simple example!
|
||||
* To run continuously in the default mode of operation, omit the `--converged-timeout` option.
|
||||
* Have fun hacking on our future technology!
|
||||
|
||||
## Examples
|
||||
Please look in the [examples/](../examples/) folder for some examples!
|
||||
|
||||
## Installation
|
||||
Installation of `mgmt` from distribution packages currently needs improvement.
|
||||
At the moment we have:
|
||||
* [COPR](https://copr.fedoraproject.org/coprs/purpleidea/mgmt/)
|
||||
* [Arch](https://aur.archlinux.org/packages/mgmt/)
|
||||
|
||||
Please contribute more! We'd especially like to see a Debian package!
|
||||
576
docs/resource-guide.md
Normal file
@@ -0,0 +1,576 @@
|
||||
# Resource guide
|
||||
|
||||
## Overview
|
||||
|
||||
The `mgmt` tool has built-in resource primitives which make up the building
|
||||
blocks of any configuration. Each instance of a resource is mapped to a single
|
||||
vertex in the resource [graph](https://en.wikipedia.org/wiki/Directed_acyclic_graph).
|
||||
This guide is meant to instruct developers on how to write a brand new resource.
|
||||
Since `mgmt` and the core resources are written in golang, some prior golang
|
||||
knowledge is assumed.
|
||||
|
||||
## Theory
|
||||
|
||||
Resources in `mgmt` are similar to resources in other systems in that they are
|
||||
[idempotent](https://en.wikipedia.org/wiki/Idempotence). Our resources are
|
||||
uniquely different in that they can detect when their state has changed, and as
|
||||
a result can run to revert or repair this change instantly. For some background
|
||||
on this design, please read the
|
||||
[original article](https://ttboj.wordpress.com/2016/01/18/next-generation-configuration-mgmt/)
|
||||
on the subject.
|
||||
|
||||
## Resource API
|
||||
|
||||
To implement a resource in `mgmt` it must satisfy the
|
||||
[`Res`](https://github.com/purpleidea/mgmt/blob/master/resources/resources.go)
|
||||
interface. What follows are each of the method signatures and a description of
|
||||
each.
|
||||
|
||||
### Default
|
||||
```golang
|
||||
Default() Res
|
||||
```
|
||||
|
||||
This returns a populated resource struct as a `Res`. It shouldn't populate any
|
||||
values which already have the correct default as the golang zero value. In
|
||||
general it is preferable if the zero values make for the correct defaults.
|
||||
|
||||
#### Example
|
||||
```golang
|
||||
// Default returns some sensible defaults for this resource.
|
||||
func (obj *FooRes) Default() Res {
|
||||
return &FooRes{
|
||||
Answer: 42, // sometimes, defaults shouldn't be the zero value
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Validate
|
||||
```golang
|
||||
Validate() error
|
||||
```
|
||||
|
||||
This method is used to validate if the populated resource struct is a valid
|
||||
representation of the resource kind. If it does not conform to the resource
|
||||
specifications, it should generate an error. If you notice that this method is
|
||||
quite large, it might be an indication that you should reconsider the parameter
|
||||
list and interface to this resource. This method is called _before_ `Init`.
|
||||
|
||||
#### Example
|
||||
```golang
|
||||
// Validate reports any problems with the struct definition.
|
||||
func (obj *FooRes) Validate() error {
|
||||
if obj.Answer != 42 { // validate whatever you want
|
||||
return fmt.Errorf("expected an answer of 42")
|
||||
}
|
||||
return obj.BaseRes.Validate() // remember to call the base method!
|
||||
}
|
||||
```
|
||||
|
||||
### Init
|
||||
```golang
|
||||
Init() error
|
||||
```
|
||||
|
||||
This is called to initialize the resource. If something goes wrong, it should
|
||||
return an error. It should set the resource `kind`, do any resource specific
|
||||
work, and finish by calling the `Init` method of the base resource.
|
||||
|
||||
#### Example
|
||||
```golang
|
||||
// Init initializes the Foo resource.
|
||||
func (obj *FooRes) Init() error {
|
||||
obj.BaseRes.Kind = "foo" // must lower case resource kind
|
||||
// run the resource specific initialization, and error if anything fails
|
||||
if some_error {
|
||||
return err // something went wrong!
|
||||
}
|
||||
return obj.BaseRes.Init() // call the base resource init
|
||||
}
|
||||
```
|
||||
|
||||
This method is always called after `Validate` has run successfully, with the
|
||||
exception that we can't prevent a malicious or buggy `libmgmt` user to not run
|
||||
this. In other words, you should expect `Validate` to have run first, but you
|
||||
shouldn't allow `Init` to dangerously `rm -rf /$the_world` if your code only
|
||||
checks `$the_world` in `Validate`. Remember to always program safely!
|
||||
|
||||
### Close
|
||||
```golang
|
||||
Close() error
|
||||
```
|
||||
|
||||
This is called to cleanup after the resource. It is usually not necessary, but
|
||||
can be useful if you'd like to properly close a persistent connection that you
|
||||
opened in the `Init` method and were using throughout the resource.
|
||||
|
||||
#### Example
|
||||
```golang
|
||||
// Close runs some cleanup code for this resource.
|
||||
func (obj *FooRes) Close() error {
|
||||
err := obj.conn.Close() // close some internal connection
|
||||
|
||||
// call base close, b/c we're overriding
|
||||
if e := obj.BaseRes.Close(); err == nil {
|
||||
err = e
|
||||
} else if e != nil {
|
||||
err = multierr.Append(err, e) // list of errors
|
||||
}
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
You should probably check the return errors of your internal methods, and pass
|
||||
on an error if something went wrong. Remember to always call the base `Close`
|
||||
method! If you plan to return early if you hit an internal error, then at least
|
||||
call it with a defer!
|
||||
|
||||
### CheckApply
|
||||
```golang
|
||||
CheckApply(apply bool) (checkOK bool, err error)
|
||||
```
|
||||
|
||||
`CheckApply` is where the real _work_ is done. Under normal circumstances, this
|
||||
function should check if the state of this resource is correct, and if so, it
|
||||
should return: `(true, nil)`. If the `apply` variable is set to `true`, then
|
||||
this means that we should then proceed to run the changes required to bring the
|
||||
resource into the correct state. If the `apply` variable is set to `false`, then
|
||||
the resource is operating in _noop_ mode and _no operations_ should be executed!
|
||||
|
||||
After having executed the necessary operations to bring the resource back into
|
||||
the desired state, or after having detected that the state was incorrect, but
|
||||
that changes can't be made because `apply` is `false`, you should then return
|
||||
`(false, nil)`.
|
||||
|
||||
You must cause the resource to converge during a single execution of this
|
||||
function. If you cannot, then you must return an error! The exception to this
|
||||
rule is that if an external force changes the state of the resource while it is
|
||||
being remedied, it is possible to return from this function even though the
|
||||
resource isn't now converged. This is not a bug, as the resources `Watch`
|
||||
facility will detect the change, ultimately resulting in a subsequent call to
|
||||
`CheckApply`.
|
||||
|
||||
#### Example
|
||||
```golang
|
||||
// CheckApply does the idempotent work of checking and applying resource state.
|
||||
func (obj *FooRes) CheckApply(apply bool) (bool, error) {
|
||||
// check the state
|
||||
if state_is_okay { return true, nil } // done early! :)
|
||||
// state was bad
|
||||
if !apply { return false, nil } // don't apply; !stateok, nil
|
||||
// do the apply!
|
||||
return false, nil // after success applying
|
||||
if any_error { return false, err } // anytime there's an err!
|
||||
}
|
||||
```
|
||||
|
||||
The `CheckApply` function is called by the `mgmt` engine when it believes a call
|
||||
is necessary. Under certain conditions when a `Watch` call does not invalidate
|
||||
the state of the resource, and no refresh call was sent, its execution might be
|
||||
skipped. This is an engine optimization, and not a bug. It is mentioned here in
|
||||
the documentation in case you are confused as to why a debug message you've
|
||||
added to the code isn't always printed.
|
||||
|
||||
#### Refresh notifications
|
||||
Some resources may choose to support receiving refresh notifications. In general
|
||||
these should be avoided if possible, but nevertheless, they do make sense in
|
||||
certain situations. Resources that support these need to verify if one was sent
|
||||
during the CheckApply phase of execution. This is accomplished by calling the
|
||||
`Refresh() bool` method of the resource, and inspecting the return value. This
|
||||
is only necessary if you plan to perform a refresh action. Refresh actions
|
||||
should still respect the `apply` variable, and no system changes should be made
|
||||
if it is `false`. Refresh notifications are generated by any resource when an
|
||||
action is applied by that resource and are transmitted through graph edges which
|
||||
have enabled their propagation. Resources that currently perform some refresh
|
||||
action include `svc`, `timer`, and `password`.
|
||||
|
||||
#### Paired execution
|
||||
For many resources it is not uncommon to see `CheckApply` run twice in rapid
|
||||
succession. This is usually not a pathological occurrence, but rather a healthy
|
||||
pattern which is a consequence of the event system. When the state of the
|
||||
resource is incorrect, `CheckApply` will run to remedy the state. In response to
|
||||
having just changed the state, it is usually the case that this repair will
|
||||
trigger the `Watch` code! In response, a second `CheckApply` is triggered, which
|
||||
will likely find the state to now be correct.
|
||||
|
||||
#### Summary
|
||||
* Anytime an error occurs during `CheckApply`, you should return `(false, err)`.
|
||||
* If the state is correct and no changes are needed, return `(true, nil)`.
|
||||
* You should only make changes to the system if `apply` is set to `true`.
|
||||
* After checking the state and possibly applying the fix, return `(false, nil)`.
|
||||
* Returning `(true, err)` is a programming error and will cause a `Fatal`.
|
||||
|
||||
### Watch
|
||||
```golang
|
||||
Watch() error
|
||||
```
|
||||
|
||||
`Watch` is a main loop that runs and sends messages when it detects that the
|
||||
state of the resource might have changed. To send a message you should write to
|
||||
the input event channel using the `Event` helper method. The Watch function
|
||||
should run continuously until a shutdown message is received. If at any time
|
||||
something goes wrong, you should return an error, and the `mgmt` engine will
|
||||
handle possibly restarting the main loop based on the `retry` meta parameters.
|
||||
|
||||
It is better to send an event notification which turns out to be spurious, than
|
||||
to miss a possible event. Resources which can miss events are incorrect and need
|
||||
to be re-engineered so that this isn't the case. If you have an idea for a
|
||||
resource which would fit this criteria, but you can't find a solution, please
|
||||
contact the `mgmt` maintainers so that this problem can be investigated and a
|
||||
possible system level engineering fix can be found.
|
||||
|
||||
You may have trouble deciding how much resource state checking should happen in
|
||||
the `Watch` loop versus deferring it all to the `CheckApply` method. You may
|
||||
want to put some simple fast path checking in `Watch` to avoid generating
|
||||
obviously spurious events, but in general it's best to keep the `Watch` method
|
||||
as simple as possible. Contact the `mgmt` maintainers if you're not sure.
|
||||
|
||||
If the resource is activated in `polling` mode, the `Watch` method will not get
|
||||
executed. As a result, the resource must still work even if the main loop is not
|
||||
running.
|
||||
|
||||
#### Select
|
||||
The lifetime of most resources `Watch` method should be spent in an infinite
|
||||
loop that is bounded by a `select` call. The `select` call is the point where
|
||||
our method hands back control to the engine (and the kernel) so that we can
|
||||
sleep until something of interest wakes us up. In this loop we must process
|
||||
events from the engine via the `<-obj.Events()` call, and receive events for our
|
||||
resource itself!
|
||||
|
||||
#### Events
|
||||
If we receive an internal event from the `<-obj.Events()` method, we can read it
|
||||
with the ReadEvent helper function. This function tells us if we should shutdown
|
||||
our resource, and if we should generate an event. When we want to send an event,
|
||||
we use the `Event` helper function. It is also important to mark the resource
|
||||
state as `dirty` if we believe it might have changed. We do this with the
|
||||
`StateOK(false)` function.
|
||||
|
||||
#### Startup
|
||||
Once the `Watch` function has finished starting up successfully, it is important
|
||||
to generate one event to notify the `mgmt` engine that we're now listening
|
||||
successfully, so that it can run an initial `CheckApply` to ensure we're safely
|
||||
tracking a healthy state and that we didn't miss anything when `Watch` was down
|
||||
or from before `mgmt` was running. It does this by calling the `Running` method.
|
||||
|
||||
#### Converged
|
||||
The engine might be asked to shutdown when the entire state of the system has
|
||||
not seen any changes for some duration of time. The engine can determine this
|
||||
automatically, but each resource can block this if it is absolutely necessary.
|
||||
To do this, the `Watch` method should get the `ConvergedUID` handle that has
|
||||
been prepared for it by the engine. This is done by calling the `ConvergerUID`
|
||||
method on the resource object. The result can be used to set the converged
|
||||
status with `SetConverged`, and to notify when the particular timeout has been
|
||||
reached by waiting on `ConvergedTimer`.
|
||||
|
||||
Instead of interacting with the `ConvergedUID` with these two methods, we can
|
||||
instead use the `StartTimer` and `ResetTimer` methods which accomplish the same
|
||||
thing, but provide a `select`-free interface for different coding situations.
|
||||
|
||||
This particular facility is most likely not required for most resources. It may
|
||||
prove to be useful if a resource wants to start off a long operation, but avoid
|
||||
sending out erroneous `Event` messages to keep things alive until it finishes.
|
||||
|
||||
#### Example
|
||||
```golang
|
||||
// Watch is the listener and main loop for this resource.
|
||||
func (obj *FooRes) Watch() error {
|
||||
// setup the Foo resource
|
||||
var err error
|
||||
if err, obj.foo = OpenFoo(); err != nil {
|
||||
return err // we couldn't startup
|
||||
}
|
||||
defer obj.whatever.CloseFoo() // shutdown our
|
||||
|
||||
// notify engine that we're running
|
||||
if err := obj.Running(); err != nil {
|
||||
return err // bubble up a NACK...
|
||||
}
|
||||
|
||||
var send = false // send event?
|
||||
var exit *error
|
||||
for {
|
||||
select {
|
||||
case event := <-obj.Events():
|
||||
// we avoid sending events on unpause
|
||||
if exit, send = obj.ReadEvent(event); exit != nil {
|
||||
return *exit // exit
|
||||
}
|
||||
|
||||
// the actual events!
|
||||
case event := <-obj.foo.Events:
|
||||
if is_an_event {
|
||||
send = true // used below
|
||||
obj.StateOK(false) // dirty
|
||||
}
|
||||
|
||||
// event errors
|
||||
case err := <-obj.foo.Errors:
|
||||
return err // will cause a retry or permanent failure
|
||||
}
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
obj.Event() // send the event!
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Summary
|
||||
* Remember to call the appropriate `converger` methods throughout the resource.
|
||||
* Remember to call `Startup` when the `Watch` is running successfully.
|
||||
* Remember to process internal events and shutdown promptly if asked to.
|
||||
* Ensure the design of your resource is well thought out.
|
||||
* Have a look at the existing resources for a rough idea of how this all works.
|
||||
|
||||
### Compare
|
||||
```golang
|
||||
Compare(Res) bool
|
||||
```
|
||||
|
||||
Each resource must have a `Compare` method. This takes as input another resource
|
||||
and must return whether they are identical or not. This is used for identifying
|
||||
if an existing resource can be used in place of a new one with a similar set of
|
||||
parameters. In particular, when switching from one graph to a new (possibly
|
||||
identical) graph, this avoids recomputing the state for resources which don't
|
||||
change or that are sufficiently similar that they don't need to be swapped out.
|
||||
|
||||
In general if all the resource properties are identical, then they usually don't
|
||||
need to be changed. On occasion, not all of them need to be compared, in
|
||||
particular if they store some generated state, or if they aren't significant in
|
||||
some way.
|
||||
|
||||
#### Example
|
||||
```golang
|
||||
// Compare two resources and return if they are equivalent.
|
||||
func (obj *FooRes) Compare(r Res) bool {
|
||||
// we can only compare FooRes to others of the same resource kind
|
||||
res, ok := r.(*FooRes)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if !obj.BaseRes.Compare(res) { // call base Compare
|
||||
return false
|
||||
}
|
||||
if obj.Name != res.Name {
|
||||
return false
|
||||
}
|
||||
|
||||
if obj.whatever != res.whatever {
|
||||
return false
|
||||
}
|
||||
if obj.Flag != res.Flag {
|
||||
return false
|
||||
}
|
||||
|
||||
return true // they must match!
|
||||
}
|
||||
```
|
||||
|
||||
### UIDs
|
||||
```golang
|
||||
UIDs() []ResUID
|
||||
```
|
||||
|
||||
The `UIDs` method returns a list of `ResUID` interfaces that represent the
|
||||
particular resource uniquely. This is used with the AutoEdges API to determine
|
||||
if another resource can match a dependency to this one.
|
||||
|
||||
### AutoEdges
|
||||
```golang
|
||||
AutoEdges() (AutoEdge, error)
|
||||
```
|
||||
|
||||
This returns a struct that implements the `AutoEdge` interface. This struct
|
||||
is used to match other resources that might be relevant dependencies for this
|
||||
resource.
|
||||
|
||||
### CollectPattern
|
||||
```golang
|
||||
CollectPattern() string
|
||||
```
|
||||
|
||||
This is currently a stub and will be updated once the DSL is further along.
|
||||
|
||||
### UnmarshalYAML
|
||||
```golang
|
||||
UnmarshalYAML(unmarshal func(interface{}) error) error // optional
|
||||
```
|
||||
|
||||
This is optional, but recommended for any resource that will have a YAML
|
||||
accessible struct, and an entry in the `GraphConfig` struct. It is not required
|
||||
because to do so would mean that third-party or custom resources (such as those
|
||||
someone writes to use with `libmgmt`) would have to implement this needlessly.
|
||||
|
||||
The signature intentionally matches what is required to satisfy the `go-yaml`
|
||||
[Unmarshaler](https://godoc.org/gopkg.in/yaml.v2#Unmarshaler) interface.
|
||||
|
||||
#### Example
|
||||
```golang
|
||||
// UnmarshalYAML is the custom unmarshal handler for this struct.
|
||||
// It is primarily useful for setting the defaults.
|
||||
func (obj *FooRes) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
type rawRes FooRes // indirection to avoid infinite recursion
|
||||
|
||||
def := obj.Default() // get the default
|
||||
res, ok := def.(*FooRes) // put in the right format
|
||||
if !ok {
|
||||
return fmt.Errorf("could not convert to FooRes")
|
||||
}
|
||||
raw := rawRes(*res) // convert; the defaults go here
|
||||
|
||||
if err := unmarshal(&raw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*obj = FooRes(raw) // restore from indirection with type conversion!
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
## Further considerations
|
||||
There is some additional information that any resource writer will need to know.
|
||||
Each issue is listed separately below!
|
||||
|
||||
### Resource struct
|
||||
Each resource will implement methods as pointer receivers on a resource struct.
|
||||
The resource struct must include an anonymous reference to the `BaseRes` struct.
|
||||
The naming convention for resources is that they end with a `Res` suffix. If
|
||||
you'd like your resource to be accessible by the `YAML` graph API (GAPI), then
|
||||
you'll need to include the appropriate YAML fields as shown below.
|
||||
|
||||
#### Example
|
||||
```golang
|
||||
type FooRes struct {
|
||||
BaseRes `yaml:",inline"` // base properties
|
||||
|
||||
Whatever string `yaml:"whatever"` // you pick!
|
||||
Bar int // no yaml, used as public output value for send/recv
|
||||
Baz bool `yaml:"baz"` // something else
|
||||
|
||||
something string // some private field
|
||||
}
|
||||
```
|
||||
|
||||
### YAML
|
||||
In addition to labelling your resource struct with YAML fields, you must also
|
||||
add an entry to the internal `GraphConfig` struct. It is a fairly straight
|
||||
forward one line patch.
|
||||
|
||||
```golang
|
||||
type GraphConfig struct {
|
||||
// [snip...]
|
||||
Resources struct {
|
||||
Noop []*resources.NoopRes `yaml:"noop"`
|
||||
File []*resources.FileRes `yaml:"file"`
|
||||
// [snip...]
|
||||
Foo []*resources.FooRes `yaml:"foo"` // tada :)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
It's also recommended that you add the [UnmarshalYAML](#unmarshalyaml) method to
|
||||
your resources so that unspecified values are given sane defaults.
|
||||
|
||||
### Gob registration
|
||||
All resources must be registered with the `golang` _gob_ module so that they can
|
||||
be encoded and decoded. Make sure to include the following code snippet for this
|
||||
to work.
|
||||
|
||||
```golang
|
||||
import "encoding/gob"
|
||||
func init() { // special golang method that runs once
|
||||
gob.Register(&FooRes{}) // substitude your resource here
|
||||
}
|
||||
```
|
||||
|
||||
## Automatic edges
|
||||
Automatic edges in `mgmt` are well described in [this article](https://ttboj.wordpress.com/2016/03/14/automatic-edges-in-mgmt/).
|
||||
The best example of this technique can be seen in the `svc` resource.
|
||||
Unfortunately no further documentation about this subject has been written. To
|
||||
expand this section, please send a patch! Please contact us if you'd like to
|
||||
work on a resource that uses this feature, or to add it to an existing one!
|
||||
|
||||
## Automatic grouping
|
||||
Automatic grouping in `mgmt` is well described in [this article](https://ttboj.wordpress.com/2016/03/30/automatic-grouping-in-mgmt/).
|
||||
The best example of this technique can be seen in the `pkg` resource.
|
||||
Unfortunately no further documentation about this subject has been written. To
|
||||
expand this section, please send a patch! Please contact us if you'd like to
|
||||
work on a resource that uses this feature, or to add it to an existing one!
|
||||
|
||||
|
||||
## Send/Recv
|
||||
In `mgmt` there is a novel concept called _Send/Recv_. For some background,
|
||||
please [read the introductory article](https://ttboj.wordpress.com/2016/12/07/sendrecv-in-mgmt/).
|
||||
When using this feature, the engine will automatically send the user specified
|
||||
value to the intended destination without requiring any resource specific code.
|
||||
Any time that one of the destination values is changed, the engine automatically
|
||||
marks the resource state as `dirty`. To detect if a particular value was
|
||||
received, and if it changed (during this invocation of CheckApply) from the
|
||||
previous value, you can query the Recv parameter. It will contain a `map` of all
|
||||
the keys which can be received on, and the value has a `Changed` property which
|
||||
will indicate whether the value was updated on this particular `CheckApply`
|
||||
invocation. The type of the sending key must match that of the receiving one.
|
||||
This can _only_ be done inside of the `CheckApply` function!
|
||||
|
||||
```golang
|
||||
// inside CheckApply, probably near the top
|
||||
if val, exists := obj.Recv["SomeKey"]; exists {
|
||||
log.Printf("SomeKey was sent to us from: %s.%s", val.Res, val.Key)
|
||||
if val.Changed {
|
||||
log.Printf("SomeKey was just updated!")
|
||||
// you may want to invalidate some local cache
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Astute readers will note that there isn't anything that prevents a user from
|
||||
sending an identically typed value to some arbitrary (public) key that the
|
||||
resource author hadn't considered! While this is true, resources should probably
|
||||
work within this problem space anyways. The rule of thumb is that any public
|
||||
parameter which is normally used in a resource can be used safely.
|
||||
|
||||
One subtle scenario is that if a resource creates a local cache or stores a
|
||||
computation that depends on the value of a public parameter and will require
|
||||
invalidation should that public parameter change, then you must detect that
|
||||
scenario and invalidate the cache when it occurs. This *must* be processed
|
||||
before there is a possibility of failure in CheckApply, because if we fail (and
|
||||
possibly run again) the subsequent send->recv transfer might not have a new
|
||||
value to copy, and therefore we won't see this notification of change.
|
||||
Therefore, it is important to process these promptly, if they must not be lost,
|
||||
such as for cache invalidation.
|
||||
|
||||
Remember, `Send/Recv` only changes your resource code if you cache state.
|
||||
|
||||
## Composite resources
|
||||
Composite resources are resources which embed one or more existing resources.
|
||||
This is useful to prevent code duplication in higher level resource scenarios.
|
||||
The best example of this technique can be seen in the `nspawn` resource which
|
||||
can be seen to partially embed a `svc` resource, but without its `Watch`.
|
||||
Unfortunately no further documentation about this subject has been written. To
|
||||
expand this section, please send a patch! Please contact us if you'd like to
|
||||
work on a resource that uses this feature, or to add it to an existing one!
|
||||
|
||||
## Frequently asked questions
|
||||
(Send your questions as a patch to this FAQ! I'll review it, merge it, and
|
||||
respond by commit with the answer.)
|
||||
|
||||
### Can I write resources in a different language?
|
||||
Currently `golang` is the only supported language for built-in resources. We
|
||||
might consider allowing external resources to be imported in the future. This
|
||||
will likely require a language that can expose a C-like API, such as `python` or
|
||||
`ruby`. Custom `golang` resources are already possible when using mgmt as a lib.
|
||||
Higher level resource collections will be possible once the `mgmt` DSL is ready.
|
||||
|
||||
### What new resource primitives need writing?
|
||||
There are still many ideas for new resources that haven't been written yet. If
|
||||
you'd like to contribute one, please contact us and tell us about your idea!
|
||||
|
||||
### Where can I find more information about mgmt?
|
||||
Additional blog posts, videos and other material [is available!](https://github.com/purpleidea/mgmt/#on-the-web).
|
||||
|
||||
## Suggestions
|
||||
If you have any ideas for API changes or other improvements to resource writing,
|
||||
please let us know! We're still pre 1.0 and pre 0.1 and happy to break API in
|
||||
order to get it right!
|
||||
412
etcd/methods.go
Normal file
@@ -0,0 +1,412 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
etcd "github.com/coreos/etcd/clientv3"
|
||||
rpctypes "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
etcdtypes "github.com/coreos/etcd/pkg/types"
|
||||
context "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// TODO: Could all these Etcd*(obj *EmbdEtcd, ...) functions which deal with the
|
||||
// interface between etcd paths and behaviour be grouped into a single struct ?
|
||||
|
||||
// Nominate nominates a particular client to be a server (peer).
|
||||
func Nominate(obj *EmbdEtcd, hostname string, urls etcdtypes.URLs) error {
|
||||
if obj.flags.Trace {
|
||||
log.Printf("Trace: Etcd: Nominate(%v): %v", hostname, urls.String())
|
||||
defer log.Printf("Trace: Etcd: Nominate(%v): Finished!", hostname)
|
||||
}
|
||||
// nominate someone to be a server
|
||||
nominate := fmt.Sprintf("/%s/nominated/%s", NS, hostname)
|
||||
ops := []etcd.Op{} // list of ops in this txn
|
||||
if urls != nil {
|
||||
ops = append(ops, etcd.OpPut(nominate, urls.String())) // TODO: add a TTL? (etcd.WithLease)
|
||||
|
||||
} else { // delete message if set to erase
|
||||
ops = append(ops, etcd.OpDelete(nominate))
|
||||
}
|
||||
|
||||
if _, err := obj.Txn(nil, ops, nil); err != nil {
|
||||
return fmt.Errorf("nominate failed") // exit in progress?
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Nominated returns a urls map of nominated etcd server volunteers.
|
||||
// NOTE: I know 'nominees' might be more correct, but is less consistent here
|
||||
func Nominated(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
||||
path := fmt.Sprintf("/%s/nominated/", NS)
|
||||
keyMap, err := obj.Get(path, etcd.WithPrefix()) // map[string]string, bool
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("nominated isn't available: %v", err)
|
||||
}
|
||||
nominated := make(etcdtypes.URLsMap)
|
||||
for key, val := range keyMap { // loop through directory of nominated
|
||||
if !strings.HasPrefix(key, path) {
|
||||
continue
|
||||
}
|
||||
name := key[len(path):] // get name of nominee
|
||||
if val == "" { // skip "erased" values
|
||||
continue
|
||||
}
|
||||
urls, err := etcdtypes.NewURLs(strings.Split(val, ","))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("nominated data format error: %v", err)
|
||||
}
|
||||
nominated[name] = urls // add to map
|
||||
if obj.flags.Debug {
|
||||
log.Printf("Etcd: Nominated(%v): %v", name, val)
|
||||
}
|
||||
}
|
||||
return nominated, nil
|
||||
}
|
||||
|
||||
// Volunteer offers yourself up to be a server if needed.
|
||||
func Volunteer(obj *EmbdEtcd, urls etcdtypes.URLs) error {
|
||||
if obj.flags.Trace {
|
||||
log.Printf("Trace: Etcd: Volunteer(%v): %v", obj.hostname, urls.String())
|
||||
defer log.Printf("Trace: Etcd: Volunteer(%v): Finished!", obj.hostname)
|
||||
}
|
||||
// volunteer to be a server
|
||||
volunteer := fmt.Sprintf("/%s/volunteers/%s", NS, obj.hostname)
|
||||
ops := []etcd.Op{} // list of ops in this txn
|
||||
if urls != nil {
|
||||
// XXX: adding a TTL is crucial! (i think)
|
||||
ops = append(ops, etcd.OpPut(volunteer, urls.String())) // value is usually a peer "serverURL"
|
||||
|
||||
} else { // delete message if set to erase
|
||||
ops = append(ops, etcd.OpDelete(volunteer))
|
||||
}
|
||||
|
||||
if _, err := obj.Txn(nil, ops, nil); err != nil {
|
||||
return fmt.Errorf("volunteering failed") // exit in progress?
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Volunteers returns a urls map of available etcd server volunteers.
|
||||
func Volunteers(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
||||
if obj.flags.Trace {
|
||||
log.Printf("Trace: Etcd: Volunteers()")
|
||||
defer log.Printf("Trace: Etcd: Volunteers(): Finished!")
|
||||
}
|
||||
path := fmt.Sprintf("/%s/volunteers/", NS)
|
||||
keyMap, err := obj.Get(path, etcd.WithPrefix())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("volunteers aren't available: %v", err)
|
||||
}
|
||||
volunteers := make(etcdtypes.URLsMap)
|
||||
for key, val := range keyMap { // loop through directory of volunteers
|
||||
if !strings.HasPrefix(key, path) {
|
||||
continue
|
||||
}
|
||||
name := key[len(path):] // get name of volunteer
|
||||
if val == "" { // skip "erased" values
|
||||
continue
|
||||
}
|
||||
urls, err := etcdtypes.NewURLs(strings.Split(val, ","))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("volunteers data format error: %v", err)
|
||||
}
|
||||
volunteers[name] = urls // add to map
|
||||
if obj.flags.Debug {
|
||||
log.Printf("Etcd: Volunteer(%v): %v", name, val)
|
||||
}
|
||||
}
|
||||
return volunteers, nil
|
||||
}
|
||||
|
||||
// AdvertiseEndpoints advertises the list of available client endpoints.
|
||||
func AdvertiseEndpoints(obj *EmbdEtcd, urls etcdtypes.URLs) error {
|
||||
if obj.flags.Trace {
|
||||
log.Printf("Trace: Etcd: AdvertiseEndpoints(%v): %v", obj.hostname, urls.String())
|
||||
defer log.Printf("Trace: Etcd: AdvertiseEndpoints(%v): Finished!", obj.hostname)
|
||||
}
|
||||
// advertise endpoints
|
||||
endpoints := fmt.Sprintf("/%s/endpoints/%s", NS, obj.hostname)
|
||||
ops := []etcd.Op{} // list of ops in this txn
|
||||
if urls != nil {
|
||||
// TODO: add a TTL? (etcd.WithLease)
|
||||
ops = append(ops, etcd.OpPut(endpoints, urls.String())) // value is usually a "clientURL"
|
||||
|
||||
} else { // delete message if set to erase
|
||||
ops = append(ops, etcd.OpDelete(endpoints))
|
||||
}
|
||||
|
||||
if _, err := obj.Txn(nil, ops, nil); err != nil {
|
||||
return fmt.Errorf("endpoint advertising failed") // exit in progress?
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Endpoints returns a urls map of available etcd server endpoints.
|
||||
func Endpoints(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
||||
if obj.flags.Trace {
|
||||
log.Printf("Trace: Etcd: Endpoints()")
|
||||
defer log.Printf("Trace: Etcd: Endpoints(): Finished!")
|
||||
}
|
||||
path := fmt.Sprintf("/%s/endpoints/", NS)
|
||||
keyMap, err := obj.Get(path, etcd.WithPrefix())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("endpoints aren't available: %v", err)
|
||||
}
|
||||
endpoints := make(etcdtypes.URLsMap)
|
||||
for key, val := range keyMap { // loop through directory of endpoints
|
||||
if !strings.HasPrefix(key, path) {
|
||||
continue
|
||||
}
|
||||
name := key[len(path):] // get name of volunteer
|
||||
if val == "" { // skip "erased" values
|
||||
continue
|
||||
}
|
||||
urls, err := etcdtypes.NewURLs(strings.Split(val, ","))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("endpoints data format error: %v", err)
|
||||
}
|
||||
endpoints[name] = urls // add to map
|
||||
if obj.flags.Debug {
|
||||
log.Printf("Etcd: Endpoint(%v): %v", name, val)
|
||||
}
|
||||
}
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
// SetHostnameConverged sets whether a specific hostname is converged.
|
||||
func SetHostnameConverged(obj *EmbdEtcd, hostname string, isConverged bool) error {
|
||||
if obj.flags.Trace {
|
||||
log.Printf("Trace: Etcd: SetHostnameConverged(%s): %v", hostname, isConverged)
|
||||
defer log.Printf("Trace: Etcd: SetHostnameConverged(%v): Finished!", hostname)
|
||||
}
|
||||
converged := fmt.Sprintf("/%s/converged/%s", NS, hostname)
|
||||
op := []etcd.Op{etcd.OpPut(converged, fmt.Sprintf("%t", isConverged))}
|
||||
if _, err := obj.Txn(nil, op, nil); err != nil { // TODO: do we need a skipConv flag here too?
|
||||
return fmt.Errorf("set converged failed") // exit in progress?
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// HostnameConverged returns a map of every hostname's converged state.
|
||||
func HostnameConverged(obj *EmbdEtcd) (map[string]bool, error) {
|
||||
if obj.flags.Trace {
|
||||
log.Printf("Trace: Etcd: HostnameConverged()")
|
||||
defer log.Printf("Trace: Etcd: HostnameConverged(): Finished!")
|
||||
}
|
||||
path := fmt.Sprintf("/%s/converged/", NS)
|
||||
keyMap, err := obj.ComplexGet(path, true, etcd.WithPrefix()) // don't un-converge
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("converged values aren't available: %v", err)
|
||||
}
|
||||
converged := make(map[string]bool)
|
||||
for key, val := range keyMap { // loop through directory...
|
||||
if !strings.HasPrefix(key, path) {
|
||||
continue
|
||||
}
|
||||
name := key[len(path):] // get name of key
|
||||
if val == "" { // skip "erased" values
|
||||
continue
|
||||
}
|
||||
b, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("converged data format error: %v", err)
|
||||
}
|
||||
converged[name] = b // add to map
|
||||
}
|
||||
return converged, nil
|
||||
}
|
||||
|
||||
// AddHostnameConvergedWatcher adds a watcher with a callback that runs on
|
||||
// hostname state changes.
|
||||
func AddHostnameConvergedWatcher(obj *EmbdEtcd, callbackFn func(map[string]bool) error) (func(), error) {
|
||||
path := fmt.Sprintf("/%s/converged/", NS)
|
||||
internalCbFn := func(re *RE) error {
|
||||
// TODO: get the value from the response, and apply delta...
|
||||
// for now, just run a get operation which is easier to code!
|
||||
m, err := HostnameConverged(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return callbackFn(m) // call my function
|
||||
}
|
||||
return obj.AddWatcher(path, internalCbFn, true, true, etcd.WithPrefix()) // no block and no converger reset
|
||||
}
|
||||
|
||||
// SetClusterSize sets the ideal target cluster size of etcd peers.
|
||||
func SetClusterSize(obj *EmbdEtcd, value uint16) error {
|
||||
if obj.flags.Trace {
|
||||
log.Printf("Trace: Etcd: SetClusterSize(): %v", value)
|
||||
defer log.Printf("Trace: Etcd: SetClusterSize(): Finished!")
|
||||
}
|
||||
key := fmt.Sprintf("/%s/idealClusterSize", NS)
|
||||
|
||||
if err := obj.Set(key, strconv.FormatUint(uint64(value), 10)); err != nil {
|
||||
return fmt.Errorf("function SetClusterSize failed: %v", err) // exit in progress?
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetClusterSize gets the ideal target cluster size of etcd peers.
|
||||
func GetClusterSize(obj *EmbdEtcd) (uint16, error) {
|
||||
key := fmt.Sprintf("/%s/idealClusterSize", NS)
|
||||
keyMap, err := obj.Get(key)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("function GetClusterSize failed: %v", err)
|
||||
}
|
||||
|
||||
val, exists := keyMap[key]
|
||||
if !exists || val == "" {
|
||||
return 0, fmt.Errorf("function GetClusterSize failed: %v", err)
|
||||
}
|
||||
|
||||
v, err := strconv.ParseUint(val, 10, 16)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("function GetClusterSize failed: %v", err)
|
||||
}
|
||||
return uint16(v), nil
|
||||
}
|
||||
|
||||
// MemberAdd adds a member to the cluster.
|
||||
func MemberAdd(obj *EmbdEtcd, peerURLs etcdtypes.URLs) (*etcd.MemberAddResponse, error) {
|
||||
//obj.Connect(false) // TODO: ?
|
||||
ctx := context.Background()
|
||||
var response *etcd.MemberAddResponse
|
||||
var err error
|
||||
for {
|
||||
if obj.exiting { // the exit signal has been sent!
|
||||
return nil, fmt.Errorf("exiting etcd")
|
||||
}
|
||||
obj.rLock.RLock()
|
||||
response, err = obj.client.MemberAdd(ctx, peerURLs.StringSlice())
|
||||
obj.rLock.RUnlock()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if ctx, err = obj.CtxError(ctx, err); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// MemberRemove removes a member by mID and returns if it worked, and also
|
||||
// if there was an error. This is because it might have run without error, but
|
||||
// the member wasn't found, for example.
|
||||
func MemberRemove(obj *EmbdEtcd, mID uint64) (bool, error) {
|
||||
//obj.Connect(false) // TODO: ?
|
||||
ctx := context.Background()
|
||||
for {
|
||||
if obj.exiting { // the exit signal has been sent!
|
||||
return false, fmt.Errorf("exiting etcd")
|
||||
}
|
||||
obj.rLock.RLock()
|
||||
_, err := obj.client.MemberRemove(ctx, mID)
|
||||
obj.rLock.RUnlock()
|
||||
if err == nil {
|
||||
break
|
||||
} else if err == rpctypes.ErrMemberNotFound {
|
||||
// if we get this, member already shut itself down :)
|
||||
return false, nil
|
||||
}
|
||||
if ctx, err = obj.CtxError(ctx, err); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Members returns information on cluster membership.
|
||||
// The member ID's are the keys, because an empty names means unstarted!
|
||||
// TODO: consider queueing this through the main loop with CtxError(ctx, err)
|
||||
func Members(obj *EmbdEtcd) (map[uint64]string, error) {
|
||||
//obj.Connect(false) // TODO: ?
|
||||
ctx := context.Background()
|
||||
var response *etcd.MemberListResponse
|
||||
var err error
|
||||
for {
|
||||
if obj.exiting { // the exit signal has been sent!
|
||||
return nil, fmt.Errorf("exiting etcd")
|
||||
}
|
||||
obj.rLock.RLock()
|
||||
if obj.flags.Trace {
|
||||
log.Printf("Trace: Etcd: Members(): Endpoints are: %v", obj.client.Endpoints())
|
||||
}
|
||||
response, err = obj.client.MemberList(ctx)
|
||||
obj.rLock.RUnlock()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if ctx, err = obj.CtxError(ctx, err); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
members := make(map[uint64]string)
|
||||
for _, x := range response.Members {
|
||||
members[x.ID] = x.Name // x.Name will be "" if unstarted!
|
||||
}
|
||||
return members, nil
|
||||
}
|
||||
|
||||
// Leader returns the current leader of the etcd server cluster.
|
||||
func Leader(obj *EmbdEtcd) (string, error) {
|
||||
//obj.Connect(false) // TODO: ?
|
||||
var err error
|
||||
membersMap := make(map[uint64]string)
|
||||
if membersMap, err = Members(obj); err != nil {
|
||||
return "", err
|
||||
}
|
||||
addresses := obj.LocalhostClientURLs() // heuristic, but probably correct
|
||||
if len(addresses) == 0 {
|
||||
// probably a programming error...
|
||||
return "", fmt.Errorf("programming error")
|
||||
}
|
||||
endpoint := addresses[0].Host // FIXME: arbitrarily picked the first one
|
||||
|
||||
// part two
|
||||
ctx := context.Background()
|
||||
var response *etcd.StatusResponse
|
||||
for {
|
||||
if obj.exiting { // the exit signal has been sent!
|
||||
return "", fmt.Errorf("exiting etcd")
|
||||
}
|
||||
|
||||
obj.rLock.RLock()
|
||||
response, err = obj.client.Maintenance.Status(ctx, endpoint)
|
||||
obj.rLock.RUnlock()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if ctx, err = obj.CtxError(ctx, err); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// isLeader: response.Header.MemberId == response.Leader
|
||||
for id, name := range membersMap {
|
||||
if id == response.Leader {
|
||||
return name, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("members map is not current") // not found
|
||||
}
|
||||
182
etcd/resources.go
Normal file
@@ -0,0 +1,182 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/purpleidea/mgmt/resources"
|
||||
"github.com/purpleidea/mgmt/util"
|
||||
|
||||
etcd "github.com/coreos/etcd/clientv3"
|
||||
)
|
||||
|
||||
// WatchResources returns a channel that outputs events when exported resources
|
||||
// change.
|
||||
// TODO: Filter our watch (on the server side if possible) based on the
|
||||
// collection prefixes and filters that we care about...
|
||||
func WatchResources(obj *EmbdEtcd) chan error {
|
||||
ch := make(chan error, 1) // buffer it so we can measure it
|
||||
path := fmt.Sprintf("/%s/exported/", NS)
|
||||
callback := func(re *RE) error {
|
||||
// TODO: is this even needed? it used to happen on conn errors
|
||||
log.Printf("Etcd: Watch: Path: %v", path) // event
|
||||
if re == nil || re.response.Canceled {
|
||||
return fmt.Errorf("watch is empty") // will cause a CtxError+retry
|
||||
}
|
||||
// we normally need to check if anything changed since the last
|
||||
// event, since a set (export) with no changes still causes the
|
||||
// watcher to trigger and this would cause an infinite loop. we
|
||||
// don't need to do this check anymore because we do the export
|
||||
// transactionally, and only if a change is needed. since it is
|
||||
// atomic, all the changes arrive together which avoids dupes!!
|
||||
if len(ch) == 0 { // send event only if one isn't pending
|
||||
// this check avoids multiple events all queueing up and then
|
||||
// being released continuously long after the changes stopped
|
||||
// do not block!
|
||||
ch <- nil // event
|
||||
}
|
||||
return nil
|
||||
}
|
||||
_, _ = obj.AddWatcher(path, callback, true, false, etcd.WithPrefix()) // no need to check errors
|
||||
return ch
|
||||
}
|
||||
|
||||
// SetResources exports all of the resources which we pass in to etcd.
|
||||
func SetResources(obj *EmbdEtcd, hostname string, resourceList []resources.Res) error {
|
||||
// key structure is /$NS/exported/$hostname/resources/$uid = $data
|
||||
|
||||
var kindFilter []string // empty to get from everyone
|
||||
hostnameFilter := []string{hostname}
|
||||
// this is not a race because we should only be reading keys which we
|
||||
// set, and there should not be any contention with other hosts here!
|
||||
originals, err := GetResources(obj, hostnameFilter, kindFilter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(originals) == 0 && len(resourceList) == 0 { // special case of no add or del
|
||||
return nil
|
||||
}
|
||||
|
||||
ifs := []etcd.Cmp{} // list matching the desired state
|
||||
ops := []etcd.Op{} // list of ops in this transaction
|
||||
for _, res := range resourceList {
|
||||
if res.GetKind() == "" {
|
||||
log.Fatalf("Etcd: SetResources: Error: Empty kind: %v", res.GetName())
|
||||
}
|
||||
uid := fmt.Sprintf("%s/%s", res.GetKind(), res.GetName())
|
||||
path := fmt.Sprintf("/%s/exported/%s/resources/%s", NS, hostname, uid)
|
||||
if data, err := resources.ResToB64(res); err == nil {
|
||||
ifs = append(ifs, etcd.Compare(etcd.Value(path), "=", data)) // desired state
|
||||
ops = append(ops, etcd.OpPut(path, data))
|
||||
} else {
|
||||
return fmt.Errorf("can't convert to B64: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
match := func(res resources.Res, resourceList []resources.Res) bool { // helper lambda
|
||||
for _, x := range resourceList {
|
||||
if res.GetKind() == x.GetKind() && res.GetName() == x.GetName() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
hasDeletes := false
|
||||
// delete old, now unused resources here...
|
||||
for _, res := range originals {
|
||||
if res.GetKind() == "" {
|
||||
log.Fatalf("Etcd: SetResources: Error: Empty kind: %v", res.GetName())
|
||||
}
|
||||
uid := fmt.Sprintf("%s/%s", res.GetKind(), res.GetName())
|
||||
path := fmt.Sprintf("/%s/exported/%s/resources/%s", NS, hostname, uid)
|
||||
|
||||
if match(res, resourceList) { // if we match, no need to delete!
|
||||
continue
|
||||
}
|
||||
|
||||
ops = append(ops, etcd.OpDelete(path))
|
||||
|
||||
hasDeletes = true
|
||||
}
|
||||
|
||||
// if everything is already correct, do nothing, otherwise, run the ops!
|
||||
// it's important to do this in one transaction, and atomically, because
|
||||
// this way, we only generate one watch event, and only when it's needed
|
||||
if hasDeletes { // always run, ifs don't matter
|
||||
_, err = obj.Txn(nil, ops, nil) // TODO: does this run? it should!
|
||||
} else {
|
||||
_, err = obj.Txn(ifs, nil, ops) // TODO: do we need to look at response?
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// GetResources collects all of the resources which match a filter from etcd.
|
||||
// If the kindfilter or hostnameFilter is empty, then it assumes no filtering...
|
||||
// TODO: Expand this with a more powerful filter based on what we eventually
|
||||
// support in our collect DSL. Ideally a server side filter like WithFilter()
|
||||
// We could do this if the pattern was /$NS/exported/$kind/$hostname/$uid = $data.
|
||||
func GetResources(obj *EmbdEtcd, hostnameFilter, kindFilter []string) ([]resources.Res, error) {
|
||||
// key structure is /$NS/exported/$hostname/resources/$uid = $data
|
||||
path := fmt.Sprintf("/%s/exported/", NS)
|
||||
resourceList := []resources.Res{}
|
||||
keyMap, err := obj.Get(path, etcd.WithPrefix(), etcd.WithSort(etcd.SortByKey, etcd.SortAscend))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get resources: %v", err)
|
||||
}
|
||||
for key, val := range keyMap {
|
||||
if !strings.HasPrefix(key, path) { // sanity check
|
||||
continue
|
||||
}
|
||||
|
||||
str := strings.Split(key[len(path):], "/")
|
||||
if len(str) != 4 {
|
||||
return nil, fmt.Errorf("unexpected chunk count")
|
||||
}
|
||||
hostname, r, kind, name := str[0], str[1], str[2], str[3]
|
||||
if r != "resources" {
|
||||
return nil, fmt.Errorf("unexpected chunk pattern")
|
||||
}
|
||||
if kind == "" {
|
||||
return nil, fmt.Errorf("unexpected kind chunk")
|
||||
}
|
||||
|
||||
// FIXME: ideally this would be a server side filter instead!
|
||||
if len(hostnameFilter) > 0 && !util.StrInList(hostname, hostnameFilter) {
|
||||
continue
|
||||
}
|
||||
|
||||
// FIXME: ideally this would be a server side filter instead!
|
||||
if len(kindFilter) > 0 && !util.StrInList(kind, kindFilter) {
|
||||
continue
|
||||
}
|
||||
|
||||
if obj, err := resources.B64ToRes(val); err == nil {
|
||||
obj.SetKind(kind) // cheap init
|
||||
log.Printf("Etcd: Get: (Hostname, Kind, Name): (%s, %s, %s)", hostname, kind, name)
|
||||
resourceList = append(resourceList, obj)
|
||||
} else {
|
||||
return nil, fmt.Errorf("can't convert from B64: %v", err)
|
||||
}
|
||||
}
|
||||
return resourceList, nil
|
||||
}
|
||||
105
etcd/str.go
Normal file
@@ -0,0 +1,105 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
etcd "github.com/coreos/etcd/clientv3"
|
||||
errwrap "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ErrNotExist is returned when GetStr can not find the requested key.
|
||||
// TODO: https://dave.cheney.net/2016/04/07/constant-errors
|
||||
var ErrNotExist = errors.New("errNotExist")
|
||||
|
||||
// WatchStr returns a channel which spits out events on key activity.
|
||||
// FIXME: It should close the channel when it's done, and spit out errors when
|
||||
// something goes wrong.
|
||||
func WatchStr(obj *EmbdEtcd, key string) chan error {
|
||||
// new key structure is /$NS/strings/$key = $data
|
||||
path := fmt.Sprintf("/%s/strings/%s", NS, key)
|
||||
ch := make(chan error, 1)
|
||||
// FIXME: fix our API so that we get a close event on shutdown.
|
||||
callback := func(re *RE) error {
|
||||
// TODO: is this even needed? it used to happen on conn errors
|
||||
//log.Printf("Etcd: Watch: Path: %v", path) // event
|
||||
if re == nil || re.response.Canceled {
|
||||
return fmt.Errorf("watch is empty") // will cause a CtxError+retry
|
||||
}
|
||||
if len(ch) == 0 { // send event only if one isn't pending
|
||||
ch <- nil // event
|
||||
}
|
||||
return nil
|
||||
}
|
||||
_, _ = obj.AddWatcher(path, callback, true, false, etcd.WithPrefix()) // no need to check errors
|
||||
return ch
|
||||
}
|
||||
|
||||
// GetStr collects the string which matches a global namespace in etcd.
|
||||
func GetStr(obj *EmbdEtcd, key string) (string, error) {
|
||||
// new key structure is /$NS/strings/$key = $data
|
||||
path := fmt.Sprintf("/%s/strings/%s", NS, key)
|
||||
keyMap, err := obj.Get(path, etcd.WithPrefix())
|
||||
if err != nil {
|
||||
return "", errwrap.Wrapf(err, "could not get strings in: %s", key)
|
||||
}
|
||||
|
||||
if len(keyMap) == 0 {
|
||||
return "", ErrNotExist
|
||||
}
|
||||
|
||||
if count := len(keyMap); count != 1 {
|
||||
return "", fmt.Errorf("returned %d entries", count)
|
||||
}
|
||||
|
||||
val, exists := keyMap[path]
|
||||
if !exists {
|
||||
return "", fmt.Errorf("path `%s` is missing", path)
|
||||
}
|
||||
|
||||
//log.Printf("Etcd: GetStr(%s): %s", key, val)
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// SetStr sets a key and hostname pair to a certain value. If the value is
|
||||
// nil, then it deletes the key. Otherwise the value should point to a string.
|
||||
// TODO: TTL or delete disconnect?
|
||||
func SetStr(obj *EmbdEtcd, key string, data *string) error {
|
||||
// key structure is /$NS/strings/$key = $data
|
||||
path := fmt.Sprintf("/%s/strings/%s", NS, key)
|
||||
ifs := []etcd.Cmp{} // list matching the desired state
|
||||
ops := []etcd.Op{} // list of ops in this transaction (then)
|
||||
els := []etcd.Op{} // list of ops in this transaction (else)
|
||||
if data == nil { // perform a delete
|
||||
// TODO: use https://github.com/coreos/etcd/pull/7417 if merged
|
||||
//ifs = append(ifs, etcd.KeyExists(path))
|
||||
ifs = append(ifs, etcd.Compare(etcd.Version(path), ">", 0))
|
||||
ops = append(ops, etcd.OpDelete(path))
|
||||
} else {
|
||||
data := *data // get the real value
|
||||
ifs = append(ifs, etcd.Compare(etcd.Value(path), "=", data)) // desired state
|
||||
els = append(els, etcd.OpPut(path, data))
|
||||
}
|
||||
|
||||
// it's important to do this in one transaction, and atomically, because
|
||||
// this way, we only generate one watch event, and only when it's needed
|
||||
_, err := obj.Txn(ifs, ops, els) // TODO: do we need to look at response?
|
||||
return errwrap.Wrapf(err, "could not set strings in: %s", key)
|
||||
}
|
||||
115
etcd/strmap.go
Normal file
@@ -0,0 +1,115 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/purpleidea/mgmt/util"
|
||||
|
||||
etcd "github.com/coreos/etcd/clientv3"
|
||||
errwrap "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// WatchStrMap returns a channel which spits out events on key activity.
|
||||
// FIXME: It should close the channel when it's done, and spit out errors when
|
||||
// something goes wrong.
|
||||
func WatchStrMap(obj *EmbdEtcd, key string) chan error {
|
||||
// new key structure is /$NS/strings/$key/$hostname = $data
|
||||
path := fmt.Sprintf("/%s/strings/%s", NS, key)
|
||||
ch := make(chan error, 1)
|
||||
// FIXME: fix our API so that we get a close event on shutdown.
|
||||
callback := func(re *RE) error {
|
||||
// TODO: is this even needed? it used to happen on conn errors
|
||||
//log.Printf("Etcd: Watch: Path: %v", path) // event
|
||||
if re == nil || re.response.Canceled {
|
||||
return fmt.Errorf("watch is empty") // will cause a CtxError+retry
|
||||
}
|
||||
if len(ch) == 0 { // send event only if one isn't pending
|
||||
ch <- nil // event
|
||||
}
|
||||
return nil
|
||||
}
|
||||
_, _ = obj.AddWatcher(path, callback, true, false, etcd.WithPrefix()) // no need to check errors
|
||||
return ch
|
||||
}
|
||||
|
||||
// GetStrMap collects all of the strings which match a namespace in etcd.
|
||||
func GetStrMap(obj *EmbdEtcd, hostnameFilter []string, key string) (map[string]string, error) {
|
||||
// old key structure is /$NS/strings/$hostname/$key = $data
|
||||
// new key structure is /$NS/strings/$key/$hostname = $data
|
||||
// FIXME: if we have the $key as the last token (old key structure), we
|
||||
// can allow the key to contain the slash char, otherwise we need to
|
||||
// verify that one isn't present in the input string.
|
||||
path := fmt.Sprintf("/%s/strings/%s", NS, key)
|
||||
keyMap, err := obj.Get(path, etcd.WithPrefix(), etcd.WithSort(etcd.SortByKey, etcd.SortAscend))
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf(err, "could not get strings in: %s", key)
|
||||
}
|
||||
result := make(map[string]string)
|
||||
for key, val := range keyMap {
|
||||
if !strings.HasPrefix(key, path) { // sanity check
|
||||
continue
|
||||
}
|
||||
|
||||
str := strings.Split(key[len(path):], "/")
|
||||
if len(str) != 2 {
|
||||
return nil, fmt.Errorf("unexpected chunk count of %d", len(str))
|
||||
}
|
||||
_, hostname := str[0], str[1]
|
||||
|
||||
if hostname == "" {
|
||||
return nil, fmt.Errorf("unexpected chunk length of %d", len(hostname))
|
||||
}
|
||||
|
||||
// FIXME: ideally this would be a server side filter instead!
|
||||
if len(hostnameFilter) > 0 && !util.StrInList(hostname, hostnameFilter) {
|
||||
continue
|
||||
}
|
||||
//log.Printf("Etcd: GetStr(%s): (Hostname, Data): (%s, %s)", key, hostname, val)
|
||||
result[hostname] = val
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// SetStrMap sets a key and hostname pair to a certain value. If the value is
|
||||
// nil, then it deletes the key. Otherwise the value should point to a string.
|
||||
// TODO: TTL or delete disconnect?
|
||||
func SetStrMap(obj *EmbdEtcd, hostname, key string, data *string) error {
|
||||
// key structure is /$NS/strings/$key/$hostname = $data
|
||||
path := fmt.Sprintf("/%s/strings/%s/%s", NS, key, hostname)
|
||||
ifs := []etcd.Cmp{} // list matching the desired state
|
||||
ops := []etcd.Op{} // list of ops in this transaction (then)
|
||||
els := []etcd.Op{} // list of ops in this transaction (else)
|
||||
if data == nil { // perform a delete
|
||||
// TODO: use https://github.com/coreos/etcd/pull/7417 if merged
|
||||
//ifs = append(ifs, etcd.KeyExists(path))
|
||||
ifs = append(ifs, etcd.Compare(etcd.Version(path), ">", 0))
|
||||
ops = append(ops, etcd.OpDelete(path))
|
||||
} else {
|
||||
data := *data // get the real value
|
||||
ifs = append(ifs, etcd.Compare(etcd.Value(path), "=", data)) // desired state
|
||||
els = append(els, etcd.OpPut(path, data))
|
||||
}
|
||||
|
||||
// it's important to do this in one transaction, and atomically, because
|
||||
// this way, we only generate one watch event, and only when it's needed
|
||||
_, err := obj.Txn(ifs, ops, els) // TODO: do we need to look at response?
|
||||
return errwrap.Wrapf(err, "could not set strings in: %s", key)
|
||||
}
|
||||
95
etcd/world.go
Normal file
@@ -0,0 +1,95 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"github.com/purpleidea/mgmt/resources"
|
||||
)
|
||||
|
||||
// World is an etcd backed implementation of the World interface.
|
||||
type World struct {
|
||||
Hostname string // uuid for the consumer of these
|
||||
EmbdEtcd *EmbdEtcd
|
||||
}
|
||||
|
||||
// ResWatch returns a channel which spits out events on possible exported
|
||||
// resource changes.
|
||||
func (obj *World) ResWatch() chan error {
|
||||
return WatchResources(obj.EmbdEtcd)
|
||||
}
|
||||
|
||||
// ResExport exports a list of resources under our hostname namespace.
|
||||
// Subsequent calls replace the previously set collection atomically.
|
||||
func (obj *World) ResExport(resourceList []resources.Res) error {
|
||||
return SetResources(obj.EmbdEtcd, obj.Hostname, resourceList)
|
||||
}
|
||||
|
||||
// ResCollect gets the collection of exported resources which match the filter.
|
||||
// It does this atomically so that a call always returns a complete collection.
|
||||
func (obj *World) ResCollect(hostnameFilter, kindFilter []string) ([]resources.Res, error) {
|
||||
// XXX: should we be restricted to retrieving resources that were
|
||||
// exported with a tag that allows or restricts our hostname? We could
|
||||
// enforce that here if the underlying API supported it... Add this?
|
||||
return GetResources(obj.EmbdEtcd, hostnameFilter, kindFilter)
|
||||
}
|
||||
|
||||
// StrWatch returns a channel which spits out events on possible string changes.
|
||||
func (obj *World) StrWatch(namespace string) chan error {
|
||||
return WatchStr(obj.EmbdEtcd, namespace)
|
||||
}
|
||||
|
||||
// StrIsNotExist returns whether the error from StrGet is a key missing error.
|
||||
func (obj *World) StrIsNotExist(err error) bool {
|
||||
return err == ErrNotExist
|
||||
}
|
||||
|
||||
// StrGet returns the value for the the given namespace.
|
||||
func (obj *World) StrGet(namespace string) (string, error) {
|
||||
return GetStr(obj.EmbdEtcd, namespace)
|
||||
}
|
||||
|
||||
// StrSet sets the namespace value to a particular string.
|
||||
func (obj *World) StrSet(namespace, value string) error {
|
||||
return SetStr(obj.EmbdEtcd, namespace, &value)
|
||||
}
|
||||
|
||||
// StrDel deletes the value in a particular namespace.
|
||||
func (obj *World) StrDel(namespace string) error {
|
||||
return SetStr(obj.EmbdEtcd, namespace, nil)
|
||||
}
|
||||
|
||||
// StrMapWatch returns a channel which spits out events on possible string changes.
|
||||
func (obj *World) StrMapWatch(namespace string) chan error {
|
||||
return WatchStrMap(obj.EmbdEtcd, namespace)
|
||||
}
|
||||
|
||||
// StrMapGet returns a map of hostnames to values in the given namespace.
|
||||
func (obj *World) StrMapGet(namespace string) (map[string]string, error) {
|
||||
return GetStrMap(obj.EmbdEtcd, []string{}, namespace)
|
||||
}
|
||||
|
||||
// StrMapSet sets the namespace value to a particular string under the identity
|
||||
// of its own hostname.
|
||||
func (obj *World) StrMapSet(namespace, value string) error {
|
||||
return SetStrMap(obj.EmbdEtcd, obj.Hostname, namespace, &value)
|
||||
}
|
||||
|
||||
// StrMapDel deletes the value in a particular namespace.
|
||||
func (obj *World) StrMapDel(namespace string) error {
|
||||
return SetStrMap(obj.EmbdEtcd, obj.Hostname, namespace, nil)
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -15,30 +15,38 @@
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
// Package event provides some primitives that are used for message passing.
|
||||
package event
|
||||
|
||||
//go:generate stringer -type=eventName -output=eventname_stringer.go
|
||||
type eventName int
|
||||
|
||||
const (
|
||||
eventNil eventName = iota
|
||||
eventExit
|
||||
eventStart
|
||||
eventPause
|
||||
eventPoke
|
||||
eventBackPoke
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Resp is a channel to be used for boolean responses.
|
||||
type Resp chan bool
|
||||
//go:generate stringer -type=Kind -output=kind_stringer.go
|
||||
|
||||
// Kind represents the type of event being passed.
|
||||
type Kind int
|
||||
|
||||
// The different event kinds are used in different contexts.
|
||||
const (
|
||||
EventNil Kind = iota
|
||||
EventExit
|
||||
EventStart
|
||||
EventPause
|
||||
EventPoke
|
||||
EventBackPoke
|
||||
)
|
||||
|
||||
// Resp is a channel to be used for boolean responses. A nil represents an ACK,
|
||||
// and a non-nil represents a NACK (false). This also lets us use custom errors.
|
||||
type Resp chan error
|
||||
|
||||
// Event is the main struct that stores event information and responses.
|
||||
type Event struct {
|
||||
Name eventName
|
||||
Kind Kind
|
||||
Resp Resp // channel to send an ack response on, nil to skip
|
||||
//Wg *sync.WaitGroup // receiver barrier to Wait() for everyone else on
|
||||
Msg string // some words for fun
|
||||
Activity bool // did something interesting happen?
|
||||
Err error // store an error in our event
|
||||
}
|
||||
|
||||
// ACK sends a single acknowledgement on the channel if one was requested.
|
||||
@@ -55,28 +63,43 @@ func (event *Event) NACK() {
|
||||
}
|
||||
}
|
||||
|
||||
// ACKNACK sends a custom ACK or NACK message on the channel if one was requested.
|
||||
func (event *Event) ACKNACK(err error) {
|
||||
if event.Resp != nil { // if they've requested a NACK
|
||||
event.Resp.ACKNACK(err)
|
||||
}
|
||||
}
|
||||
|
||||
// NewResp is just a helper to return the right type of response channel.
|
||||
func NewResp() Resp {
|
||||
resp := make(chan bool)
|
||||
resp := make(chan error)
|
||||
return resp
|
||||
}
|
||||
|
||||
// ACK sends a true value to resp.
|
||||
func (resp Resp) ACK() {
|
||||
if resp != nil {
|
||||
resp <- true
|
||||
resp <- nil // TODO: close instead?
|
||||
}
|
||||
}
|
||||
|
||||
// NACK sends a false value to resp.
|
||||
func (resp Resp) NACK() {
|
||||
if resp != nil {
|
||||
resp <- false
|
||||
resp <- fmt.Errorf("NACK")
|
||||
}
|
||||
}
|
||||
|
||||
// ACKNACK sends a custom ACK or NACK. The ACK value is always nil, the NACK can
|
||||
// be any non-nil error value.
|
||||
func (resp Resp) ACKNACK(err error) {
|
||||
if resp != nil {
|
||||
resp <- err
|
||||
}
|
||||
}
|
||||
|
||||
// Wait waits for any response from a Resp channel and returns it.
|
||||
func (resp Resp) Wait() bool {
|
||||
func (resp Resp) Wait() error {
|
||||
return <-resp
|
||||
}
|
||||
|
||||
@@ -84,13 +107,13 @@ func (resp Resp) Wait() bool {
|
||||
func (resp Resp) ACKWait() {
|
||||
for {
|
||||
// wait until true value
|
||||
if resp.Wait() {
|
||||
if resp.Wait() == nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetActivity returns the activity value.
|
||||
func (event *Event) GetActivity() bool {
|
||||
return event.Activity
|
||||
// Error returns the stored error value.
|
||||
func (event *Event) Error() error {
|
||||
return event.Err
|
||||
}
|
||||
11
examples/augeas1.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
graph: mygraph
|
||||
resources:
|
||||
augeas:
|
||||
- name: sshd_config
|
||||
lens: Sshd.lns
|
||||
file: "/etc/ssh/sshd_config"
|
||||
sets:
|
||||
- path: X11Forwarding
|
||||
value: false
|
||||
edges:
|
||||
@@ -5,11 +5,13 @@ resources:
|
||||
- name: drbd-utils
|
||||
meta:
|
||||
autoedge: true
|
||||
noop: true
|
||||
state: installed
|
||||
file:
|
||||
- name: file1
|
||||
meta:
|
||||
autoedge: true
|
||||
noop: true
|
||||
path: "/etc/drbd.conf"
|
||||
content: |
|
||||
# this is an mgmt test
|
||||
@@ -17,13 +19,14 @@ resources:
|
||||
- name: file2
|
||||
meta:
|
||||
autoedge: true
|
||||
noop: true
|
||||
path: "/etc/drbd.d/"
|
||||
content: |
|
||||
i am a directory
|
||||
source: /dev/null
|
||||
state: exists
|
||||
svc:
|
||||
- name: drbd
|
||||
meta:
|
||||
autoedge: true
|
||||
noop: true
|
||||
state: stopped
|
||||
edges: []
|
||||
|
||||
9
examples/deep-dirs.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
graph: mygraph
|
||||
resources:
|
||||
file:
|
||||
- name: file1
|
||||
path: "/tmp/mgmt/a/b/c/f1"
|
||||
content: |
|
||||
i am f1
|
||||
state: exists
|
||||
@@ -2,15 +2,10 @@
|
||||
graph: mygraph
|
||||
resources:
|
||||
file:
|
||||
- name: file1a
|
||||
path: "/tmp/mgmtA/f1a"
|
||||
- name: "@@filea"
|
||||
path: "/tmp/mgmtA/fA"
|
||||
content: |
|
||||
i am f1
|
||||
state: exists
|
||||
- name: "@@file2a"
|
||||
path: "/tmp/mgmtA/f2a"
|
||||
content: |
|
||||
i am f2, exported from host A
|
||||
i am fA, exported from host A
|
||||
state: exists
|
||||
collect:
|
||||
- kind: file
|
||||
|
||||
@@ -2,15 +2,10 @@
|
||||
graph: mygraph
|
||||
resources:
|
||||
file:
|
||||
- name: file1b
|
||||
path: "/tmp/mgmtB/f1b"
|
||||
- name: "@@fileb"
|
||||
path: "/tmp/mgmtB/fB"
|
||||
content: |
|
||||
i am f1
|
||||
state: exists
|
||||
- name: "@@file2b"
|
||||
path: "/tmp/mgmtB/f2b"
|
||||
content: |
|
||||
i am f2, exported from host B
|
||||
i am fB, exported from host B
|
||||
state: exists
|
||||
collect:
|
||||
- kind: file
|
||||
|
||||
@@ -2,15 +2,10 @@
|
||||
graph: mygraph
|
||||
resources:
|
||||
file:
|
||||
- name: file1c
|
||||
path: "/tmp/mgmtC/f1c"
|
||||
- name: "@@filec"
|
||||
path: "/tmp/mgmtC/fC"
|
||||
content: |
|
||||
i am f1
|
||||
state: exists
|
||||
- name: "@@file2c"
|
||||
path: "/tmp/mgmtC/f2c"
|
||||
content: |
|
||||
i am f2, exported from host C
|
||||
i am fC, exported from host C
|
||||
state: exists
|
||||
collect:
|
||||
- kind: file
|
||||
|
||||
@@ -2,15 +2,10 @@
|
||||
graph: mygraph
|
||||
resources:
|
||||
file:
|
||||
- name: file1d
|
||||
path: "/tmp/mgmtD/f1d"
|
||||
- name: "@@filed"
|
||||
path: "/tmp/mgmtD/fD"
|
||||
content: |
|
||||
i am f1
|
||||
state: exists
|
||||
- name: "@@file2d"
|
||||
path: "/tmp/mgmtD/f2d"
|
||||
content: |
|
||||
i am f2, exported from host D
|
||||
i am fD, exported from host D
|
||||
state: exists
|
||||
collect:
|
||||
- kind: file
|
||||
|
||||
13
examples/etcd1e.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
graph: mygraph
|
||||
resources:
|
||||
file:
|
||||
- name: "@@filee"
|
||||
path: "/tmp/mgmtE/fE"
|
||||
content: |
|
||||
i am fE, exported from host E
|
||||
state: exists
|
||||
collect:
|
||||
- kind: file
|
||||
pattern: "/tmp/mgmtE/"
|
||||
edges: []
|
||||
67
examples/exec3-sema.yaml
Normal file
@@ -0,0 +1,67 @@
|
||||
---
|
||||
graph: parallel
|
||||
resources:
|
||||
exec:
|
||||
- name: pkg10
|
||||
meta:
|
||||
sema: ['mylock:1', 'otherlock:42']
|
||||
cmd: sleep 10s
|
||||
shell: ''
|
||||
timeout: 0
|
||||
watchcmd: ''
|
||||
watchshell: ''
|
||||
ifcmd: ''
|
||||
ifshell: ''
|
||||
pollint: 0
|
||||
state: present
|
||||
- name: svc10
|
||||
meta:
|
||||
sema: ['mylock:1']
|
||||
cmd: sleep 10s
|
||||
shell: ''
|
||||
timeout: 0
|
||||
watchcmd: ''
|
||||
watchshell: ''
|
||||
ifcmd: ''
|
||||
ifshell: ''
|
||||
pollint: 0
|
||||
state: present
|
||||
- name: exec10
|
||||
meta:
|
||||
sema: ['mylock:1']
|
||||
cmd: sleep 10s
|
||||
shell: ''
|
||||
timeout: 0
|
||||
watchcmd: ''
|
||||
watchshell: ''
|
||||
ifcmd: ''
|
||||
ifshell: ''
|
||||
pollint: 0
|
||||
state: present
|
||||
- name: pkg15
|
||||
meta:
|
||||
sema: ['mylock:1', 'otherlock:42']
|
||||
cmd: sleep 15s
|
||||
shell: ''
|
||||
timeout: 0
|
||||
watchcmd: ''
|
||||
watchshell: ''
|
||||
ifcmd: ''
|
||||
ifshell: ''
|
||||
pollint: 0
|
||||
state: present
|
||||
edges:
|
||||
- name: e1
|
||||
from:
|
||||
kind: exec
|
||||
name: pkg10
|
||||
to:
|
||||
kind: exec
|
||||
name: svc10
|
||||
- name: e2
|
||||
from:
|
||||
kind: exec
|
||||
name: svc10
|
||||
to:
|
||||
kind: exec
|
||||
name: exec10
|
||||
10
examples/file0.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
graph: mygraph
|
||||
resources:
|
||||
file:
|
||||
- name: file0
|
||||
path: "/tmp/mgmt/f1"
|
||||
content: |
|
||||
i am f0
|
||||
state: exists
|
||||
edges: []
|
||||
13
examples/file2.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
graph: mygraph
|
||||
resources:
|
||||
noop:
|
||||
- name: noop1
|
||||
file:
|
||||
- name: file1
|
||||
path: "/tmp/mgmt/hello/"
|
||||
source: "/var/lib/mgmt/files/some_dir/"
|
||||
recurse: true
|
||||
force: true
|
||||
state: exists
|
||||
edges: []
|
||||
13
examples/file3.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
graph: mygraph
|
||||
resources:
|
||||
file:
|
||||
- name: file1
|
||||
meta:
|
||||
limit: .inf
|
||||
burst: 0
|
||||
path: "/tmp/mgmt/hello"
|
||||
content: |
|
||||
i am a file
|
||||
state: exists
|
||||
edges: []
|
||||
10
examples/file4.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
graph: mygraph
|
||||
resources:
|
||||
file:
|
||||
- name: file1
|
||||
path: "/tmp/mgmt/hello"
|
||||
content: |
|
||||
i am a file
|
||||
state: exists
|
||||
edges: []
|
||||
7
examples/hostname.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
graph: mygraph
|
||||
resources:
|
||||
hostname:
|
||||
- name: Hostname Watcher @ TestHost
|
||||
hostname: test.hostname.example.com
|
||||
edges: []
|
||||
8
examples/kv1.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
graph: mygraph
|
||||
resources:
|
||||
kv:
|
||||
- name: kv1
|
||||
key: "hello"
|
||||
value: "world"
|
||||
edges: []
|
||||
7
examples/kv2.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
graph: mygraph
|
||||
resources:
|
||||
kv:
|
||||
- name: kv1
|
||||
key: "iamdeleted"
|
||||
edges: []
|
||||
9
examples/kv3.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
graph: mygraph
|
||||
resources:
|
||||
kv:
|
||||
- name: kv1
|
||||
key: "stage"
|
||||
value: "3"
|
||||
skiplessthan: true
|
||||
edges: []
|
||||
31
examples/kv4.yaml
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
graph: mygraph
|
||||
resources:
|
||||
kv:
|
||||
- name: kv1
|
||||
key: "stage"
|
||||
value: "1"
|
||||
skiplessthan: true
|
||||
- name: kv2
|
||||
key: "stage"
|
||||
value: "2"
|
||||
skiplessthan: true
|
||||
- name: kv3
|
||||
key: "stage"
|
||||
value: "3"
|
||||
skiplessthan: true
|
||||
edges:
|
||||
- name: e1
|
||||
from:
|
||||
kind: kv
|
||||
name: kv1
|
||||
to:
|
||||
kind: kv
|
||||
name: kv2
|
||||
- name: e2
|
||||
from:
|
||||
kind: kv
|
||||
name: kv2
|
||||
to:
|
||||
kind: kv
|
||||
name: kv3
|
||||
246
examples/lib/exec-send-recv.go
Normal file
@@ -0,0 +1,246 @@
|
||||
// libmgmt example of send->recv
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/purpleidea/mgmt/gapi"
|
||||
mgmt "github.com/purpleidea/mgmt/lib"
|
||||
"github.com/purpleidea/mgmt/pgraph"
|
||||
"github.com/purpleidea/mgmt/resources"
|
||||
)
|
||||
|
||||
// MyGAPI implements the main GAPI interface.
|
||||
type MyGAPI struct {
|
||||
Name string // graph name
|
||||
Interval uint // refresh interval, 0 to never refresh
|
||||
|
||||
data gapi.Data
|
||||
initialized bool
|
||||
closeChan chan struct{}
|
||||
wg sync.WaitGroup // sync group for tunnel go routines
|
||||
}
|
||||
|
||||
// NewMyGAPI creates a new MyGAPI struct and calls Init().
|
||||
func NewMyGAPI(data gapi.Data, name string, interval uint) (*MyGAPI, error) {
|
||||
obj := &MyGAPI{
|
||||
Name: name,
|
||||
Interval: interval,
|
||||
}
|
||||
return obj, obj.Init(data)
|
||||
}
|
||||
|
||||
// Init initializes the MyGAPI struct.
|
||||
func (obj *MyGAPI) Init(data gapi.Data) error {
|
||||
if obj.initialized {
|
||||
return fmt.Errorf("already initialized")
|
||||
}
|
||||
if obj.Name == "" {
|
||||
return fmt.Errorf("the graph name must be specified")
|
||||
}
|
||||
obj.data = data // store for later
|
||||
obj.closeChan = make(chan struct{})
|
||||
obj.initialized = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Graph returns a current Graph.
|
||||
func (obj *MyGAPI) Graph() (*pgraph.Graph, error) {
|
||||
if !obj.initialized {
|
||||
return nil, fmt.Errorf("libmgmt: MyGAPI is not initialized")
|
||||
}
|
||||
|
||||
g, err := pgraph.NewGraph(obj.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// FIXME: these are being specified temporarily until it's the default!
|
||||
metaparams := resources.DefaultMetaParams
|
||||
|
||||
exec1 := &resources.ExecRes{
|
||||
BaseRes: resources.BaseRes{
|
||||
Name: "exec1",
|
||||
MetaParams: metaparams,
|
||||
},
|
||||
Cmd: "echo hello world && echo goodbye world 1>&2", // to stdout && stderr
|
||||
Shell: "/bin/bash",
|
||||
}
|
||||
g.AddVertex(exec1)
|
||||
|
||||
output := &resources.FileRes{
|
||||
BaseRes: resources.BaseRes{
|
||||
Name: "output",
|
||||
MetaParams: metaparams,
|
||||
// send->recv!
|
||||
Recv: map[string]*resources.Send{
|
||||
"Content": {Res: exec1, Key: "Output"},
|
||||
},
|
||||
},
|
||||
Path: "/tmp/mgmt/output",
|
||||
State: "present",
|
||||
}
|
||||
g.AddVertex(output)
|
||||
g.AddEdge(exec1, output, &resources.Edge{Name: "e0"})
|
||||
|
||||
stdout := &resources.FileRes{
|
||||
BaseRes: resources.BaseRes{
|
||||
Name: "stdout",
|
||||
MetaParams: metaparams,
|
||||
// send->recv!
|
||||
Recv: map[string]*resources.Send{
|
||||
"Content": {Res: exec1, Key: "Stdout"},
|
||||
},
|
||||
},
|
||||
Path: "/tmp/mgmt/stdout",
|
||||
State: "present",
|
||||
}
|
||||
g.AddVertex(stdout)
|
||||
g.AddEdge(exec1, stdout, &resources.Edge{Name: "e1"})
|
||||
|
||||
stderr := &resources.FileRes{
|
||||
BaseRes: resources.BaseRes{
|
||||
Name: "stderr",
|
||||
MetaParams: metaparams,
|
||||
// send->recv!
|
||||
Recv: map[string]*resources.Send{
|
||||
"Content": {Res: exec1, Key: "Stderr"},
|
||||
},
|
||||
},
|
||||
Path: "/tmp/mgmt/stderr",
|
||||
State: "present",
|
||||
}
|
||||
g.AddVertex(stderr)
|
||||
g.AddEdge(exec1, stderr, &resources.Edge{Name: "e2"})
|
||||
|
||||
//g, err := config.NewGraphFromConfig(obj.data.Hostname, obj.data.World, obj.data.Noop)
|
||||
return g, nil
|
||||
}
|
||||
|
||||
// Next returns nil errors every time there could be a new graph.
|
||||
func (obj *MyGAPI) Next() chan gapi.Next {
|
||||
ch := make(chan gapi.Next)
|
||||
obj.wg.Add(1)
|
||||
go func() {
|
||||
defer obj.wg.Done()
|
||||
defer close(ch) // this will run before the obj.wg.Done()
|
||||
if !obj.initialized {
|
||||
next := gapi.Next{
|
||||
Err: fmt.Errorf("libmgmt: MyGAPI is not initialized"),
|
||||
Exit: true, // exit, b/c programming error?
|
||||
}
|
||||
ch <- next
|
||||
return
|
||||
}
|
||||
startChan := make(chan struct{}) // start signal
|
||||
close(startChan) // kick it off!
|
||||
|
||||
ticker := make(<-chan time.Time)
|
||||
if obj.data.NoStreamWatch || obj.Interval <= 0 {
|
||||
ticker = nil
|
||||
} else {
|
||||
// arbitrarily change graph every interval seconds
|
||||
t := time.NewTicker(time.Duration(obj.Interval) * time.Second)
|
||||
defer t.Stop()
|
||||
ticker = t.C
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-startChan: // kick the loop once at start
|
||||
startChan = nil // disable
|
||||
// pass
|
||||
case <-ticker:
|
||||
// pass
|
||||
case <-obj.closeChan:
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("libmgmt: Generating new graph...")
|
||||
select {
|
||||
case ch <- gapi.Next{}: // trigger a run
|
||||
case <-obj.closeChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
// Close shuts down the MyGAPI.
|
||||
func (obj *MyGAPI) Close() error {
|
||||
if !obj.initialized {
|
||||
return fmt.Errorf("libmgmt: MyGAPI is not initialized")
|
||||
}
|
||||
close(obj.closeChan)
|
||||
obj.wg.Wait()
|
||||
obj.initialized = false // closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run runs an embedded mgmt server.
|
||||
func Run() error {
|
||||
|
||||
obj := &mgmt.Main{}
|
||||
obj.Program = "libmgmt" // TODO: set on compilation
|
||||
obj.Version = "0.0.1" // TODO: set on compilation
|
||||
obj.TmpPrefix = true // disable for easy debugging
|
||||
//prefix := "/tmp/testprefix/"
|
||||
//obj.Prefix = &p // enable for easy debugging
|
||||
obj.IdealClusterSize = -1
|
||||
obj.ConvergedTimeout = -1
|
||||
obj.Noop = false // FIXME: careful!
|
||||
|
||||
obj.GAPI = &MyGAPI{ // graph API
|
||||
Name: "libmgmt", // TODO: set on compilation
|
||||
Interval: 60 * 10, // arbitrarily change graph every 15 seconds
|
||||
}
|
||||
|
||||
if err := obj.Init(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// install the exit signal handler
|
||||
exit := make(chan struct{})
|
||||
defer close(exit)
|
||||
go func() {
|
||||
signals := make(chan os.Signal, 1)
|
||||
signal.Notify(signals, os.Interrupt) // catch ^C
|
||||
//signal.Notify(signals, os.Kill) // catch signals
|
||||
signal.Notify(signals, syscall.SIGTERM)
|
||||
|
||||
select {
|
||||
case sig := <-signals: // any signal will do
|
||||
if sig == os.Interrupt {
|
||||
log.Println("Interrupted by ^C")
|
||||
obj.Exit(nil)
|
||||
return
|
||||
}
|
||||
log.Println("Interrupted by signal")
|
||||
obj.Exit(fmt.Errorf("killed by %v", sig))
|
||||
return
|
||||
case <-exit:
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
if err := obj.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.Printf("Hello!")
|
||||
if err := Run(); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
return
|
||||
}
|
||||
log.Printf("Goodbye!")
|
||||
}
|
||||
255
examples/lib/libmgmt-subgraph0.go
Normal file
@@ -0,0 +1,255 @@
|
||||
// libmgmt example of flattened subgraph
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/purpleidea/mgmt/gapi"
|
||||
mgmt "github.com/purpleidea/mgmt/lib"
|
||||
"github.com/purpleidea/mgmt/pgraph"
|
||||
"github.com/purpleidea/mgmt/resources"
|
||||
|
||||
errwrap "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// MyGAPI implements the main GAPI interface.
|
||||
type MyGAPI struct {
|
||||
Name string // graph name
|
||||
Interval uint // refresh interval, 0 to never refresh
|
||||
|
||||
data gapi.Data
|
||||
initialized bool
|
||||
closeChan chan struct{}
|
||||
wg sync.WaitGroup // sync group for tunnel go routines
|
||||
}
|
||||
|
||||
// NewMyGAPI creates a new MyGAPI struct and calls Init().
|
||||
func NewMyGAPI(data gapi.Data, name string, interval uint) (*MyGAPI, error) {
|
||||
obj := &MyGAPI{
|
||||
Name: name,
|
||||
Interval: interval,
|
||||
}
|
||||
return obj, obj.Init(data)
|
||||
}
|
||||
|
||||
// Init initializes the MyGAPI struct.
|
||||
func (obj *MyGAPI) Init(data gapi.Data) error {
|
||||
if obj.initialized {
|
||||
return fmt.Errorf("already initialized")
|
||||
}
|
||||
if obj.Name == "" {
|
||||
return fmt.Errorf("the graph name must be specified")
|
||||
}
|
||||
obj.data = data // store for later
|
||||
obj.closeChan = make(chan struct{})
|
||||
obj.initialized = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (obj *MyGAPI) subGraph() (*pgraph.Graph, error) {
|
||||
g, err := pgraph.NewGraph(obj.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
metaparams := resources.DefaultMetaParams
|
||||
|
||||
f1 := &resources.FileRes{
|
||||
BaseRes: resources.BaseRes{
|
||||
Name: "file1",
|
||||
MetaParams: metaparams,
|
||||
},
|
||||
Path: "/tmp/mgmt/sub1",
|
||||
|
||||
State: "present",
|
||||
}
|
||||
g.AddVertex(f1)
|
||||
|
||||
n1 := &resources.NoopRes{
|
||||
BaseRes: resources.BaseRes{
|
||||
Name: "noop1",
|
||||
MetaParams: metaparams,
|
||||
},
|
||||
}
|
||||
g.AddVertex(n1)
|
||||
|
||||
return g, nil
|
||||
}
|
||||
|
||||
// Graph returns a current Graph.
|
||||
func (obj *MyGAPI) Graph() (*pgraph.Graph, error) {
|
||||
if !obj.initialized {
|
||||
return nil, fmt.Errorf("libmgmt: MyGAPI is not initialized")
|
||||
}
|
||||
|
||||
g, err := pgraph.NewGraph(obj.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// FIXME: these are being specified temporarily until it's the default!
|
||||
metaparams := resources.DefaultMetaParams
|
||||
|
||||
content := "I created a subgraph!\n"
|
||||
f0 := &resources.FileRes{
|
||||
BaseRes: resources.BaseRes{
|
||||
Name: "README",
|
||||
MetaParams: metaparams,
|
||||
},
|
||||
Path: "/tmp/mgmt/README",
|
||||
Content: &content,
|
||||
State: "present",
|
||||
}
|
||||
g.AddVertex(f0)
|
||||
|
||||
subGraph, err := obj.subGraph()
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf(err, "running subGraph() failed")
|
||||
}
|
||||
|
||||
edgeGenFn := func(v1, v2 pgraph.Vertex) pgraph.Edge {
|
||||
edge := &resources.Edge{
|
||||
Name: fmt.Sprintf("edge: %s->%s", v1, v2),
|
||||
}
|
||||
|
||||
// if we want to do something specific based on input
|
||||
_, v2IsFile := v2.(*resources.FileRes)
|
||||
if v1 == f0 && v2IsFile {
|
||||
edge.Notify = true
|
||||
}
|
||||
|
||||
return edge
|
||||
}
|
||||
g.AddEdgeVertexGraph(f0, subGraph, edgeGenFn)
|
||||
|
||||
//g, err := config.NewGraphFromConfig(obj.data.Hostname, obj.data.World, obj.data.Noop)
|
||||
return g, nil
|
||||
}
|
||||
|
||||
// Next returns nil errors every time there could be a new graph.
|
||||
func (obj *MyGAPI) Next() chan gapi.Next {
|
||||
ch := make(chan gapi.Next)
|
||||
obj.wg.Add(1)
|
||||
go func() {
|
||||
defer obj.wg.Done()
|
||||
defer close(ch) // this will run before the obj.wg.Done()
|
||||
if !obj.initialized {
|
||||
next := gapi.Next{
|
||||
Err: fmt.Errorf("libmgmt: MyGAPI is not initialized"),
|
||||
Exit: true, // exit, b/c programming error?
|
||||
}
|
||||
ch <- next
|
||||
return
|
||||
}
|
||||
startChan := make(chan struct{}) // start signal
|
||||
close(startChan) // kick it off!
|
||||
|
||||
ticker := make(<-chan time.Time)
|
||||
if obj.data.NoStreamWatch || obj.Interval <= 0 {
|
||||
ticker = nil
|
||||
} else {
|
||||
// arbitrarily change graph every interval seconds
|
||||
t := time.NewTicker(time.Duration(obj.Interval) * time.Second)
|
||||
defer t.Stop()
|
||||
ticker = t.C
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-startChan: // kick the loop once at start
|
||||
startChan = nil // disable
|
||||
// pass
|
||||
case <-ticker:
|
||||
// pass
|
||||
case <-obj.closeChan:
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("libmgmt: Generating new graph...")
|
||||
select {
|
||||
case ch <- gapi.Next{}: // trigger a run
|
||||
case <-obj.closeChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
// Close shuts down the MyGAPI.
|
||||
func (obj *MyGAPI) Close() error {
|
||||
if !obj.initialized {
|
||||
return fmt.Errorf("libmgmt: MyGAPI is not initialized")
|
||||
}
|
||||
close(obj.closeChan)
|
||||
obj.wg.Wait()
|
||||
obj.initialized = false // closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run runs an embedded mgmt server.
|
||||
func Run() error {
|
||||
|
||||
obj := &mgmt.Main{}
|
||||
obj.Program = "libmgmt" // TODO: set on compilation
|
||||
obj.Version = "0.0.1" // TODO: set on compilation
|
||||
obj.TmpPrefix = true // disable for easy debugging
|
||||
//prefix := "/tmp/testprefix/"
|
||||
//obj.Prefix = &p // enable for easy debugging
|
||||
obj.IdealClusterSize = -1
|
||||
obj.ConvergedTimeout = -1
|
||||
obj.Noop = false // FIXME: careful!
|
||||
|
||||
obj.GAPI = &MyGAPI{ // graph API
|
||||
Name: "libmgmt", // TODO: set on compilation
|
||||
Interval: 60 * 10, // arbitrarily change graph every 15 seconds
|
||||
}
|
||||
|
||||
if err := obj.Init(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// install the exit signal handler
|
||||
exit := make(chan struct{})
|
||||
defer close(exit)
|
||||
go func() {
|
||||
signals := make(chan os.Signal, 1)
|
||||
signal.Notify(signals, os.Interrupt) // catch ^C
|
||||
//signal.Notify(signals, os.Kill) // catch signals
|
||||
signal.Notify(signals, syscall.SIGTERM)
|
||||
|
||||
select {
|
||||
case sig := <-signals: // any signal will do
|
||||
if sig == os.Interrupt {
|
||||
log.Println("Interrupted by ^C")
|
||||
obj.Exit(nil)
|
||||
return
|
||||
}
|
||||
log.Println("Interrupted by signal")
|
||||
obj.Exit(fmt.Errorf("killed by %v", sig))
|
||||
return
|
||||
case <-exit:
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
if err := obj.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.Printf("Hello!")
|
||||
if err := Run(); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
return
|
||||
}
|
||||
log.Printf("Goodbye!")
|
||||
}
|
||||
243
examples/lib/libmgmt-subgraph1.go
Normal file
@@ -0,0 +1,243 @@
|
||||
// libmgmt example of graph resource
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/purpleidea/mgmt/gapi"
|
||||
mgmt "github.com/purpleidea/mgmt/lib"
|
||||
"github.com/purpleidea/mgmt/pgraph"
|
||||
"github.com/purpleidea/mgmt/resources"
|
||||
)
|
||||
|
||||
// MyGAPI implements the main GAPI interface.
|
||||
type MyGAPI struct {
|
||||
Name string // graph name
|
||||
Interval uint // refresh interval, 0 to never refresh
|
||||
|
||||
data gapi.Data
|
||||
initialized bool
|
||||
closeChan chan struct{}
|
||||
wg sync.WaitGroup // sync group for tunnel go routines
|
||||
}
|
||||
|
||||
// NewMyGAPI creates a new MyGAPI struct and calls Init().
|
||||
func NewMyGAPI(data gapi.Data, name string, interval uint) (*MyGAPI, error) {
|
||||
obj := &MyGAPI{
|
||||
Name: name,
|
||||
Interval: interval,
|
||||
}
|
||||
return obj, obj.Init(data)
|
||||
}
|
||||
|
||||
// Init initializes the MyGAPI struct.
|
||||
func (obj *MyGAPI) Init(data gapi.Data) error {
|
||||
if obj.initialized {
|
||||
return fmt.Errorf("already initialized")
|
||||
}
|
||||
if obj.Name == "" {
|
||||
return fmt.Errorf("the graph name must be specified")
|
||||
}
|
||||
obj.data = data // store for later
|
||||
obj.closeChan = make(chan struct{})
|
||||
obj.initialized = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Graph returns a current Graph.
|
||||
func (obj *MyGAPI) Graph() (*pgraph.Graph, error) {
|
||||
if !obj.initialized {
|
||||
return nil, fmt.Errorf("libmgmt: MyGAPI is not initialized")
|
||||
}
|
||||
|
||||
g, err := pgraph.NewGraph(obj.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// FIXME: these are being specified temporarily until it's the default!
|
||||
metaparams := resources.DefaultMetaParams
|
||||
|
||||
content := "I created a subgraph!\n"
|
||||
f0 := &resources.FileRes{
|
||||
BaseRes: resources.BaseRes{
|
||||
Name: "README",
|
||||
MetaParams: metaparams,
|
||||
},
|
||||
Path: "/tmp/mgmt/README",
|
||||
Content: &content,
|
||||
State: "present",
|
||||
}
|
||||
g.AddVertex(f0)
|
||||
|
||||
// create a subgraph to add *into* a graph resource
|
||||
subGraph, err := pgraph.NewGraph(fmt.Sprintf("%s->subgraph", obj.Name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// add elements into the sub graph
|
||||
f1 := &resources.FileRes{
|
||||
BaseRes: resources.BaseRes{
|
||||
Name: "file1",
|
||||
MetaParams: metaparams,
|
||||
},
|
||||
Path: "/tmp/mgmt/sub1",
|
||||
|
||||
State: "present",
|
||||
}
|
||||
subGraph.AddVertex(f1)
|
||||
|
||||
n1 := &resources.NoopRes{
|
||||
BaseRes: resources.BaseRes{
|
||||
Name: "noop1",
|
||||
MetaParams: metaparams,
|
||||
},
|
||||
}
|
||||
subGraph.AddVertex(n1)
|
||||
|
||||
e0 := &resources.Edge{Name: "e0"}
|
||||
e0.Notify = true // send a notification from v0 to v1
|
||||
subGraph.AddEdge(f1, n1, e0)
|
||||
|
||||
// create the actual resource to hold the sub graph
|
||||
subGraphRes0 := &resources.GraphRes{ // TODO: should we name this SubGraphRes ?
|
||||
BaseRes: resources.BaseRes{
|
||||
Name: "subgraph1",
|
||||
MetaParams: metaparams,
|
||||
},
|
||||
Graph: subGraph,
|
||||
}
|
||||
g.AddVertex(subGraphRes0) // add it to the main graph
|
||||
|
||||
//g, err := config.NewGraphFromConfig(obj.data.Hostname, obj.data.World, obj.data.Noop)
|
||||
return g, nil
|
||||
}
|
||||
|
||||
// Next returns nil errors every time there could be a new graph.
|
||||
func (obj *MyGAPI) Next() chan gapi.Next {
|
||||
ch := make(chan gapi.Next)
|
||||
obj.wg.Add(1)
|
||||
go func() {
|
||||
defer obj.wg.Done()
|
||||
defer close(ch) // this will run before the obj.wg.Done()
|
||||
if !obj.initialized {
|
||||
next := gapi.Next{
|
||||
Err: fmt.Errorf("libmgmt: MyGAPI is not initialized"),
|
||||
Exit: true, // exit, b/c programming error?
|
||||
}
|
||||
ch <- next
|
||||
return
|
||||
}
|
||||
startChan := make(chan struct{}) // start signal
|
||||
close(startChan) // kick it off!
|
||||
|
||||
ticker := make(<-chan time.Time)
|
||||
if obj.data.NoStreamWatch || obj.Interval <= 0 {
|
||||
ticker = nil
|
||||
} else {
|
||||
// arbitrarily change graph every interval seconds
|
||||
t := time.NewTicker(time.Duration(obj.Interval) * time.Second)
|
||||
defer t.Stop()
|
||||
ticker = t.C
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-startChan: // kick the loop once at start
|
||||
startChan = nil // disable
|
||||
// pass
|
||||
case <-ticker:
|
||||
// pass
|
||||
case <-obj.closeChan:
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("libmgmt: Generating new graph...")
|
||||
select {
|
||||
case ch <- gapi.Next{}: // trigger a run
|
||||
case <-obj.closeChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
// Close shuts down the MyGAPI.
|
||||
func (obj *MyGAPI) Close() error {
|
||||
if !obj.initialized {
|
||||
return fmt.Errorf("libmgmt: MyGAPI is not initialized")
|
||||
}
|
||||
close(obj.closeChan)
|
||||
obj.wg.Wait()
|
||||
obj.initialized = false // closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run runs an embedded mgmt server.
|
||||
func Run() error {
|
||||
|
||||
obj := &mgmt.Main{}
|
||||
obj.Program = "libmgmt" // TODO: set on compilation
|
||||
obj.Version = "0.0.1" // TODO: set on compilation
|
||||
obj.TmpPrefix = true // disable for easy debugging
|
||||
//prefix := "/tmp/testprefix/"
|
||||
//obj.Prefix = &p // enable for easy debugging
|
||||
obj.IdealClusterSize = -1
|
||||
obj.ConvergedTimeout = -1
|
||||
obj.Noop = false // FIXME: careful!
|
||||
|
||||
obj.GAPI = &MyGAPI{ // graph API
|
||||
Name: "libmgmt", // TODO: set on compilation
|
||||
Interval: 60 * 10, // arbitrarily change graph every 15 seconds
|
||||
}
|
||||
|
||||
if err := obj.Init(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// install the exit signal handler
|
||||
exit := make(chan struct{})
|
||||
defer close(exit)
|
||||
go func() {
|
||||
signals := make(chan os.Signal, 1)
|
||||
signal.Notify(signals, os.Interrupt) // catch ^C
|
||||
//signal.Notify(signals, os.Kill) // catch signals
|
||||
signal.Notify(signals, syscall.SIGTERM)
|
||||
|
||||
select {
|
||||
case sig := <-signals: // any signal will do
|
||||
if sig == os.Interrupt {
|
||||
log.Println("Interrupted by ^C")
|
||||
obj.Exit(nil)
|
||||
return
|
||||
}
|
||||
log.Println("Interrupted by signal")
|
||||
obj.Exit(fmt.Errorf("killed by %v", sig))
|
||||
return
|
||||
case <-exit:
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
if err := obj.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.Printf("Hello!")
|
||||
if err := Run(); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
return
|
||||
}
|
||||
log.Printf("Goodbye!")
|
||||
}
|
||||
207
examples/lib/libmgmt1.go
Normal file
@@ -0,0 +1,207 @@
|
||||
// libmgmt example
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/purpleidea/mgmt/gapi"
|
||||
mgmt "github.com/purpleidea/mgmt/lib"
|
||||
"github.com/purpleidea/mgmt/pgraph"
|
||||
"github.com/purpleidea/mgmt/resources"
|
||||
"github.com/purpleidea/mgmt/yamlgraph"
|
||||
)
|
||||
|
||||
// MyGAPI implements the main GAPI interface.
|
||||
type MyGAPI struct {
|
||||
Name string // graph name
|
||||
Interval uint // refresh interval, 0 to never refresh
|
||||
|
||||
data gapi.Data
|
||||
initialized bool
|
||||
closeChan chan struct{}
|
||||
wg sync.WaitGroup // sync group for tunnel go routines
|
||||
}
|
||||
|
||||
// NewMyGAPI creates a new MyGAPI struct and calls Init().
|
||||
func NewMyGAPI(data gapi.Data, name string, interval uint) (*MyGAPI, error) {
|
||||
obj := &MyGAPI{
|
||||
Name: name,
|
||||
Interval: interval,
|
||||
}
|
||||
return obj, obj.Init(data)
|
||||
}
|
||||
|
||||
// Init initializes the MyGAPI struct.
|
||||
func (obj *MyGAPI) Init(data gapi.Data) error {
|
||||
if obj.initialized {
|
||||
return fmt.Errorf("already initialized")
|
||||
}
|
||||
if obj.Name == "" {
|
||||
return fmt.Errorf("the graph name must be specified")
|
||||
}
|
||||
obj.data = data // store for later
|
||||
obj.closeChan = make(chan struct{})
|
||||
obj.initialized = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Graph returns a current Graph.
|
||||
func (obj *MyGAPI) Graph() (*pgraph.Graph, error) {
|
||||
if !obj.initialized {
|
||||
return nil, fmt.Errorf("libmgmt: MyGAPI is not initialized")
|
||||
}
|
||||
|
||||
n1 := &resources.NoopRes{
|
||||
BaseRes: resources.BaseRes{
|
||||
Name: "noop1",
|
||||
MetaParams: resources.DefaultMetaParams,
|
||||
},
|
||||
}
|
||||
|
||||
// we can still build a graph via the yaml method
|
||||
gc := &yamlgraph.GraphConfig{
|
||||
Graph: obj.Name,
|
||||
Resources: yamlgraph.Resources{ // must redefine anonymous struct :(
|
||||
// in alphabetical order
|
||||
Exec: []*resources.ExecRes{},
|
||||
File: []*resources.FileRes{},
|
||||
Msg: []*resources.MsgRes{},
|
||||
Noop: []*resources.NoopRes{n1},
|
||||
Pkg: []*resources.PkgRes{},
|
||||
Svc: []*resources.SvcRes{},
|
||||
Timer: []*resources.TimerRes{},
|
||||
Virt: []*resources.VirtRes{},
|
||||
},
|
||||
//Collector: []collectorResConfig{},
|
||||
//Edges: []Edge{},
|
||||
Comment: "comment!",
|
||||
}
|
||||
|
||||
g, err := gc.NewGraphFromConfig(obj.data.Hostname, obj.data.World, obj.data.Noop)
|
||||
return g, err
|
||||
}
|
||||
|
||||
// Next returns nil errors every time there could be a new graph.
|
||||
func (obj *MyGAPI) Next() chan gapi.Next {
|
||||
ch := make(chan gapi.Next)
|
||||
obj.wg.Add(1)
|
||||
go func() {
|
||||
defer obj.wg.Done()
|
||||
defer close(ch) // this will run before the obj.wg.Done()
|
||||
if !obj.initialized {
|
||||
next := gapi.Next{
|
||||
Err: fmt.Errorf("libmgmt: MyGAPI is not initialized"),
|
||||
Exit: true, // exit, b/c programming error?
|
||||
}
|
||||
ch <- next
|
||||
}
|
||||
startChan := make(chan struct{}) // start signal
|
||||
close(startChan) // kick it off!
|
||||
|
||||
ticker := make(<-chan time.Time)
|
||||
if obj.data.NoStreamWatch || obj.Interval <= 0 {
|
||||
ticker = nil
|
||||
} else {
|
||||
// arbitrarily change graph every interval seconds
|
||||
t := time.NewTicker(time.Duration(obj.Interval) * time.Second)
|
||||
defer t.Stop()
|
||||
ticker = t.C
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-startChan: // kick the loop once at start
|
||||
startChan = nil // disable
|
||||
// pass
|
||||
case <-ticker:
|
||||
// pass
|
||||
case <-obj.closeChan:
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("libmgmt: Generating new graph...")
|
||||
select {
|
||||
case ch <- gapi.Next{}: // trigger a run
|
||||
case <-obj.closeChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
// Close shuts down the MyGAPI.
|
||||
func (obj *MyGAPI) Close() error {
|
||||
if !obj.initialized {
|
||||
return fmt.Errorf("libmgmt: MyGAPI is not initialized")
|
||||
}
|
||||
close(obj.closeChan)
|
||||
obj.wg.Wait()
|
||||
obj.initialized = false // closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run runs an embedded mgmt server.
|
||||
func Run() error {
|
||||
|
||||
obj := &mgmt.Main{}
|
||||
obj.Program = "libmgmt" // TODO: set on compilation
|
||||
obj.Version = "0.0.1" // TODO: set on compilation
|
||||
obj.TmpPrefix = true
|
||||
obj.IdealClusterSize = -1
|
||||
obj.ConvergedTimeout = -1
|
||||
obj.Noop = true
|
||||
|
||||
obj.GAPI = &MyGAPI{ // graph API
|
||||
Name: "libmgmt", // TODO: set on compilation
|
||||
Interval: 15, // arbitrarily change graph every 15 seconds
|
||||
}
|
||||
|
||||
if err := obj.Init(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// install the exit signal handler
|
||||
exit := make(chan struct{})
|
||||
defer close(exit)
|
||||
go func() {
|
||||
signals := make(chan os.Signal, 1)
|
||||
signal.Notify(signals, os.Interrupt) // catch ^C
|
||||
//signal.Notify(signals, os.Kill) // catch signals
|
||||
signal.Notify(signals, syscall.SIGTERM)
|
||||
|
||||
select {
|
||||
case sig := <-signals: // any signal will do
|
||||
if sig == os.Interrupt {
|
||||
log.Println("Interrupted by ^C")
|
||||
obj.Exit(nil)
|
||||
return
|
||||
}
|
||||
log.Println("Interrupted by signal")
|
||||
obj.Exit(fmt.Errorf("killed by %v", sig))
|
||||
return
|
||||
case <-exit:
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
if err := obj.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.Printf("Hello!")
|
||||
if err := Run(); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
return
|
||||
}
|
||||
log.Printf("Goodbye!")
|
||||
}
|
||||
209
examples/lib/libmgmt2.go
Normal file
@@ -0,0 +1,209 @@
|
||||
// libmgmt example
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/purpleidea/mgmt/gapi"
|
||||
mgmt "github.com/purpleidea/mgmt/lib"
|
||||
"github.com/purpleidea/mgmt/pgraph"
|
||||
"github.com/purpleidea/mgmt/resources"
|
||||
)
|
||||
|
||||
// MyGAPI implements the main GAPI interface.
|
||||
type MyGAPI struct {
|
||||
Name string // graph name
|
||||
Count uint // number of resources to create
|
||||
Interval uint // refresh interval, 0 to never refresh
|
||||
|
||||
data gapi.Data
|
||||
initialized bool
|
||||
closeChan chan struct{}
|
||||
wg sync.WaitGroup // sync group for tunnel go routines
|
||||
}
|
||||
|
||||
// NewMyGAPI creates a new MyGAPI struct and calls Init().
|
||||
func NewMyGAPI(data gapi.Data, name string, interval uint, count uint) (*MyGAPI, error) {
|
||||
obj := &MyGAPI{
|
||||
Name: name,
|
||||
Count: count,
|
||||
Interval: interval,
|
||||
}
|
||||
return obj, obj.Init(data)
|
||||
}
|
||||
|
||||
// Init initializes the MyGAPI struct.
|
||||
func (obj *MyGAPI) Init(data gapi.Data) error {
|
||||
if obj.initialized {
|
||||
return fmt.Errorf("already initialized")
|
||||
}
|
||||
if obj.Name == "" {
|
||||
return fmt.Errorf("the graph name must be specified")
|
||||
}
|
||||
obj.data = data // store for later
|
||||
obj.closeChan = make(chan struct{})
|
||||
obj.initialized = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Graph returns a current Graph.
|
||||
func (obj *MyGAPI) Graph() (*pgraph.Graph, error) {
|
||||
if !obj.initialized {
|
||||
return nil, fmt.Errorf("libmgmt: MyGAPI is not initialized")
|
||||
}
|
||||
|
||||
g, err := pgraph.NewGraph(obj.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var vertex pgraph.Vertex
|
||||
for i := uint(0); i < obj.Count; i++ {
|
||||
n := &resources.NoopRes{
|
||||
BaseRes: resources.BaseRes{
|
||||
Name: fmt.Sprintf("noop%d", i),
|
||||
MetaParams: resources.DefaultMetaParams,
|
||||
},
|
||||
}
|
||||
g.AddVertex(n)
|
||||
if i > 0 {
|
||||
g.AddEdge(vertex, n, &resources.Edge{Name: fmt.Sprintf("e%d", i)})
|
||||
}
|
||||
vertex = n // save
|
||||
}
|
||||
|
||||
//g, err := config.NewGraphFromConfig(obj.data.Hostname, obj.data.World, obj.data.Noop)
|
||||
return g, nil
|
||||
}
|
||||
|
||||
// Next returns nil errors every time there could be a new graph.
|
||||
func (obj *MyGAPI) Next() chan gapi.Next {
|
||||
ch := make(chan gapi.Next)
|
||||
obj.wg.Add(1)
|
||||
go func() {
|
||||
defer obj.wg.Done()
|
||||
defer close(ch) // this will run before the obj.wg.Done()
|
||||
if !obj.initialized {
|
||||
next := gapi.Next{
|
||||
Err: fmt.Errorf("libmgmt: MyGAPI is not initialized"),
|
||||
Exit: true, // exit, b/c programming error?
|
||||
}
|
||||
ch <- next
|
||||
}
|
||||
startChan := make(chan struct{}) // start signal
|
||||
close(startChan) // kick it off!
|
||||
|
||||
ticker := make(<-chan time.Time)
|
||||
if obj.data.NoStreamWatch || obj.Interval <= 0 {
|
||||
ticker = nil
|
||||
} else {
|
||||
// arbitrarily change graph every interval seconds
|
||||
t := time.NewTicker(time.Duration(obj.Interval) * time.Second)
|
||||
defer t.Stop()
|
||||
ticker = t.C
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-startChan: // kick the loop once at start
|
||||
startChan = nil // disable
|
||||
// pass
|
||||
case <-ticker:
|
||||
// pass
|
||||
case <-obj.closeChan:
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("libmgmt: Generating new graph...")
|
||||
select {
|
||||
case ch <- gapi.Next{}: // trigger a run
|
||||
case <-obj.closeChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
// Close shuts down the MyGAPI.
|
||||
func (obj *MyGAPI) Close() error {
|
||||
if !obj.initialized {
|
||||
return fmt.Errorf("libmgmt: MyGAPI is not initialized")
|
||||
}
|
||||
close(obj.closeChan)
|
||||
obj.wg.Wait()
|
||||
obj.initialized = false // closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run runs an embedded mgmt server.
|
||||
func Run(count uint) error {
|
||||
|
||||
obj := &mgmt.Main{}
|
||||
obj.Program = "libmgmt" // TODO: set on compilation
|
||||
obj.Version = "0.0.1" // TODO: set on compilation
|
||||
obj.TmpPrefix = true
|
||||
obj.IdealClusterSize = -1
|
||||
obj.ConvergedTimeout = -1
|
||||
obj.Noop = true
|
||||
|
||||
obj.GAPI = &MyGAPI{ // graph API
|
||||
Name: "libmgmt", // TODO: set on compilation
|
||||
Count: count, // number of vertices to add
|
||||
Interval: 15, // arbitrarily change graph every 15 seconds
|
||||
}
|
||||
|
||||
if err := obj.Init(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// install the exit signal handler
|
||||
exit := make(chan struct{})
|
||||
defer close(exit)
|
||||
go func() {
|
||||
signals := make(chan os.Signal, 1)
|
||||
signal.Notify(signals, os.Interrupt) // catch ^C
|
||||
//signal.Notify(signals, os.Kill) // catch signals
|
||||
signal.Notify(signals, syscall.SIGTERM)
|
||||
|
||||
select {
|
||||
case sig := <-signals: // any signal will do
|
||||
if sig == os.Interrupt {
|
||||
log.Println("Interrupted by ^C")
|
||||
obj.Exit(nil)
|
||||
return
|
||||
}
|
||||
log.Println("Interrupted by signal")
|
||||
obj.Exit(fmt.Errorf("killed by %v", sig))
|
||||
return
|
||||
case <-exit:
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
if err := obj.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.Printf("Hello!")
|
||||
var count uint = 1 // default
|
||||
if len(os.Args) == 2 {
|
||||
if i, err := strconv.Atoi(os.Args[1]); err == nil && i > 0 {
|
||||
count = uint(i)
|
||||
}
|
||||
}
|
||||
if err := Run(count); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
return
|
||||
}
|
||||
log.Printf("Goodbye!")
|
||||
}
|
||||
248
examples/lib/libmgmt3.go
Normal file
@@ -0,0 +1,248 @@
|
||||
// libmgmt example of send->recv
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/purpleidea/mgmt/gapi"
|
||||
mgmt "github.com/purpleidea/mgmt/lib"
|
||||
"github.com/purpleidea/mgmt/pgraph"
|
||||
"github.com/purpleidea/mgmt/resources"
|
||||
)
|
||||
|
||||
// MyGAPI implements the main GAPI interface.
|
||||
type MyGAPI struct {
|
||||
Name string // graph name
|
||||
Interval uint // refresh interval, 0 to never refresh
|
||||
|
||||
data gapi.Data
|
||||
initialized bool
|
||||
closeChan chan struct{}
|
||||
wg sync.WaitGroup // sync group for tunnel go routines
|
||||
}
|
||||
|
||||
// NewMyGAPI creates a new MyGAPI struct and calls Init().
|
||||
func NewMyGAPI(data gapi.Data, name string, interval uint) (*MyGAPI, error) {
|
||||
obj := &MyGAPI{
|
||||
Name: name,
|
||||
Interval: interval,
|
||||
}
|
||||
return obj, obj.Init(data)
|
||||
}
|
||||
|
||||
// Init initializes the MyGAPI struct.
|
||||
func (obj *MyGAPI) Init(data gapi.Data) error {
|
||||
if obj.initialized {
|
||||
return fmt.Errorf("already initialized")
|
||||
}
|
||||
if obj.Name == "" {
|
||||
return fmt.Errorf("the graph name must be specified")
|
||||
}
|
||||
obj.data = data // store for later
|
||||
obj.closeChan = make(chan struct{})
|
||||
obj.initialized = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Graph returns a current Graph.
|
||||
func (obj *MyGAPI) Graph() (*pgraph.Graph, error) {
|
||||
if !obj.initialized {
|
||||
return nil, fmt.Errorf("libmgmt: MyGAPI is not initialized")
|
||||
}
|
||||
|
||||
g, err := pgraph.NewGraph(obj.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// FIXME: these are being specified temporarily until it's the default!
|
||||
metaparams := resources.DefaultMetaParams
|
||||
|
||||
content := "Delete me to trigger a notification!\n"
|
||||
f0 := &resources.FileRes{
|
||||
BaseRes: resources.BaseRes{
|
||||
Name: "README",
|
||||
MetaParams: metaparams,
|
||||
},
|
||||
Path: "/tmp/mgmt/README",
|
||||
Content: &content,
|
||||
State: "present",
|
||||
}
|
||||
|
||||
g.AddVertex(f0)
|
||||
|
||||
p1 := &resources.PasswordRes{
|
||||
BaseRes: resources.BaseRes{
|
||||
Name: "password1",
|
||||
MetaParams: metaparams,
|
||||
},
|
||||
Length: 8, // generated string will have this many characters
|
||||
Saved: true, // this causes passwords to be stored in plain text!
|
||||
}
|
||||
g.AddVertex(p1)
|
||||
|
||||
f1 := &resources.FileRes{
|
||||
BaseRes: resources.BaseRes{
|
||||
Name: "file1",
|
||||
MetaParams: metaparams,
|
||||
// send->recv!
|
||||
Recv: map[string]*resources.Send{
|
||||
"Content": {Res: p1, Key: "Password"},
|
||||
},
|
||||
},
|
||||
Path: "/tmp/mgmt/secret",
|
||||
//Content: p1.Password, // won't work
|
||||
State: "present",
|
||||
}
|
||||
|
||||
g.AddVertex(f1)
|
||||
|
||||
n1 := &resources.NoopRes{
|
||||
BaseRes: resources.BaseRes{
|
||||
Name: "noop1",
|
||||
MetaParams: metaparams,
|
||||
},
|
||||
}
|
||||
|
||||
g.AddVertex(n1)
|
||||
|
||||
e0 := &resources.Edge{Name: "e0"}
|
||||
e0.Notify = true // send a notification from f0 to p1
|
||||
g.AddEdge(f0, p1, e0)
|
||||
|
||||
g.AddEdge(p1, f1, &resources.Edge{Name: "e1"})
|
||||
|
||||
e2 := &resources.Edge{Name: "e2"}
|
||||
e2.Notify = true // send a notification from f1 to n1
|
||||
g.AddEdge(f1, n1, e2)
|
||||
|
||||
//g, err := config.NewGraphFromConfig(obj.data.Hostname, obj.data.World, obj.data.Noop)
|
||||
return g, nil
|
||||
}
|
||||
|
||||
// Next returns nil errors every time there could be a new graph.
|
||||
func (obj *MyGAPI) Next() chan gapi.Next {
|
||||
ch := make(chan gapi.Next)
|
||||
obj.wg.Add(1)
|
||||
go func() {
|
||||
defer obj.wg.Done()
|
||||
defer close(ch) // this will run before the obj.wg.Done()
|
||||
if !obj.initialized {
|
||||
next := gapi.Next{
|
||||
Err: fmt.Errorf("libmgmt: MyGAPI is not initialized"),
|
||||
Exit: true, // exit, b/c programming error?
|
||||
}
|
||||
ch <- next
|
||||
}
|
||||
startChan := make(chan struct{}) // start signal
|
||||
close(startChan) // kick it off!
|
||||
|
||||
ticker := make(<-chan time.Time)
|
||||
if obj.data.NoStreamWatch || obj.Interval <= 0 {
|
||||
ticker = nil
|
||||
} else {
|
||||
// arbitrarily change graph every interval seconds
|
||||
t := time.NewTicker(time.Duration(obj.Interval) * time.Second)
|
||||
defer t.Stop()
|
||||
ticker = t.C
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-startChan: // kick the loop once at start
|
||||
startChan = nil // disable
|
||||
// pass
|
||||
case <-ticker:
|
||||
// pass
|
||||
case <-obj.closeChan:
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("libmgmt: Generating new graph...")
|
||||
select {
|
||||
case ch <- gapi.Next{}: // trigger a run
|
||||
case <-obj.closeChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
// Close shuts down the MyGAPI.
|
||||
func (obj *MyGAPI) Close() error {
|
||||
if !obj.initialized {
|
||||
return fmt.Errorf("libmgmt: MyGAPI is not initialized")
|
||||
}
|
||||
close(obj.closeChan)
|
||||
obj.wg.Wait()
|
||||
obj.initialized = false // closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run runs an embedded mgmt server.
|
||||
func Run() error {
|
||||
|
||||
obj := &mgmt.Main{}
|
||||
obj.Program = "libmgmt" // TODO: set on compilation
|
||||
obj.Version = "0.0.1" // TODO: set on compilation
|
||||
obj.TmpPrefix = true // disable for easy debugging
|
||||
//prefix := "/tmp/testprefix/"
|
||||
//obj.Prefix = &p // enable for easy debugging
|
||||
obj.IdealClusterSize = -1
|
||||
obj.ConvergedTimeout = -1
|
||||
obj.Noop = false // FIXME: careful!
|
||||
|
||||
obj.GAPI = &MyGAPI{ // graph API
|
||||
Name: "libmgmt", // TODO: set on compilation
|
||||
Interval: 60 * 10, // arbitrarily change graph every 15 seconds
|
||||
}
|
||||
|
||||
if err := obj.Init(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// install the exit signal handler
|
||||
exit := make(chan struct{})
|
||||
defer close(exit)
|
||||
go func() {
|
||||
signals := make(chan os.Signal, 1)
|
||||
signal.Notify(signals, os.Interrupt) // catch ^C
|
||||
//signal.Notify(signals, os.Kill) // catch signals
|
||||
signal.Notify(signals, syscall.SIGTERM)
|
||||
|
||||
select {
|
||||
case sig := <-signals: // any signal will do
|
||||
if sig == os.Interrupt {
|
||||
log.Println("Interrupted by ^C")
|
||||
obj.Exit(nil)
|
||||
return
|
||||
}
|
||||
log.Println("Interrupted by signal")
|
||||
obj.Exit(fmt.Errorf("killed by %v", sig))
|
||||
return
|
||||
case <-exit:
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
if err := obj.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.Printf("Hello!")
|
||||
if err := Run(); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
return
|
||||
}
|
||||
log.Printf("Goodbye!")
|
||||
}
|
||||
13
examples/limit1.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
graph: mygraph
|
||||
resources:
|
||||
file:
|
||||
- name: file1
|
||||
meta:
|
||||
limit: 0.2
|
||||
burst: 5
|
||||
path: "/tmp/mgmt/limit"
|
||||
content: |
|
||||
i am a normal file
|
||||
state: exists
|
||||
edges: []
|
||||
19
examples/msg1.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
graph: mygraph
|
||||
comment: timer example
|
||||
resources:
|
||||
timer:
|
||||
- name: timer1
|
||||
interval: 30
|
||||
msg:
|
||||
- name: msg1
|
||||
body: mgmt logged this message
|
||||
journal: true
|
||||
edges:
|
||||
- name: e1
|
||||
from:
|
||||
kind: timer
|
||||
name: timer1
|
||||
to:
|
||||
kind: msg
|
||||
name: msg1
|
||||
7
examples/noop0.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
graph: mygraph
|
||||
comment: simple noop example
|
||||
resources:
|
||||
noop:
|
||||
- name: noop0
|
||||
edges: []
|
||||
30
examples/noop2.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
---
|
||||
graph: mygraph
|
||||
comment: dangerous noop example
|
||||
resources:
|
||||
noop:
|
||||
- name: noop1
|
||||
meta:
|
||||
noop: true
|
||||
file:
|
||||
- name: file1
|
||||
path: "/tmp/mgmt/hello-noop"
|
||||
content: |
|
||||
hello world from @purpleidea
|
||||
state: exists
|
||||
meta:
|
||||
noop: true
|
||||
exec:
|
||||
- name: exec1
|
||||
meta:
|
||||
noop: true
|
||||
cmd: 'rm -rf /'
|
||||
shell: '/bin/bash'
|
||||
timeout: 0
|
||||
watchcmd: ''
|
||||
watchshell: ''
|
||||
ifcmd: ''
|
||||
ifshell: ''
|
||||
pollint: 0
|
||||
state: present
|
||||
edges: []
|
||||
7
examples/nspawn1.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
graph: mygraph
|
||||
resources:
|
||||
nspawn:
|
||||
- name: mgmt-nspawn1
|
||||
state: running
|
||||
edges: []
|
||||
7
examples/nspawn2.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
graph: mygraph
|
||||
resources:
|
||||
nspawn:
|
||||
- name: mgmt-nspawn2
|
||||
state: stopped
|
||||
edges: []
|
||||
24
examples/poll1.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
---
|
||||
graph: mygraph
|
||||
resources:
|
||||
file:
|
||||
- name: file1
|
||||
meta:
|
||||
poll: 5
|
||||
path: "/tmp/mgmt/f1"
|
||||
content: |
|
||||
i poll every 5 seconds
|
||||
state: exists
|
||||
- name: file2
|
||||
path: "/tmp/mgmt/f2"
|
||||
content: |
|
||||
i use the event based watcher
|
||||
state: exists
|
||||
- name: file3
|
||||
meta:
|
||||
poll: 1
|
||||
path: "/tmp/mgmt/f3"
|
||||
content: |
|
||||
i poll every second
|
||||
state: exists
|
||||
edges: []
|
||||
20
examples/remote2a.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
graph: mygraph
|
||||
comment: remote noop example
|
||||
resources:
|
||||
file:
|
||||
- name: file1a
|
||||
path: "/tmp/file1a"
|
||||
content: |
|
||||
i am file1a
|
||||
state: exists
|
||||
- name: "@@file2a"
|
||||
path: "/tmp/file2a"
|
||||
content: |
|
||||
i am file2a, exported from host a
|
||||
state: exists
|
||||
collect:
|
||||
- kind: file
|
||||
pattern: "/tmp/"
|
||||
edges: []
|
||||
remote: ssh://root:vagrant@192.168.121.201:22
|
||||
20
examples/remote2b.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
graph: mygraph
|
||||
comment: remote noop example
|
||||
resources:
|
||||
file:
|
||||
- name: file1b
|
||||
path: "/tmp/file1b"
|
||||
content: |
|
||||
i am file1b
|
||||
state: exists
|
||||
- name: "@@file2b"
|
||||
path: "/tmp/file2b"
|
||||
content: |
|
||||
i am file2b, exported from host b
|
||||
state: exists
|
||||
collect:
|
||||
- kind: file
|
||||
pattern: "/tmp/"
|
||||
edges: []
|
||||
remote: ssh://root:vagrant@192.168.121.202:22
|
||||
57
examples/retry1.yaml
Normal file
@@ -0,0 +1,57 @@
|
||||
---
|
||||
graph: mygraph
|
||||
comment: You can test Watch and CheckApply failures with chmod ugo-r and chmod ugo-w.
|
||||
resources:
|
||||
exec:
|
||||
- name: exec1
|
||||
cmd: 'touch /tmp/mgmt/no-read && chmod ugo-r /tmp/mgmt/no-read'
|
||||
shell: '/bin/bash'
|
||||
timeout: 0
|
||||
watchcmd: ''
|
||||
watchshell: ''
|
||||
ifcmd: ''
|
||||
ifshell: ''
|
||||
pollint: 0
|
||||
state: present
|
||||
- name: exec2
|
||||
cmd: 'touch /tmp/mgmt/no-write && chmod ugo-w /tmp/mgmt/no-write'
|
||||
shell: '/bin/bash'
|
||||
timeout: 0
|
||||
watchcmd: ''
|
||||
watchshell: ''
|
||||
ifcmd: ''
|
||||
ifshell: ''
|
||||
pollint: 0
|
||||
state: present
|
||||
file:
|
||||
- name: noread
|
||||
path: "/tmp/mgmt/no-read"
|
||||
meta:
|
||||
retry: 3
|
||||
delay: 5000
|
||||
content: |
|
||||
i am f1
|
||||
state: exists
|
||||
- name: nowrite
|
||||
path: "/tmp/mgmt/no-write"
|
||||
meta:
|
||||
retry: 3
|
||||
delay: 5000
|
||||
content: |
|
||||
i am f1
|
||||
state: exists
|
||||
edges:
|
||||
- name: e1
|
||||
from:
|
||||
kind: exec
|
||||
name: exec1
|
||||
to:
|
||||
kind: file
|
||||
name: noread
|
||||
- name: e2
|
||||
from:
|
||||
kind: exec
|
||||
name: exec2
|
||||
to:
|
||||
kind: file
|
||||
name: nowrite
|
||||
8
examples/svc2.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
graph: mygraph
|
||||
resources:
|
||||
svc:
|
||||
- name: purpleidea
|
||||
state: running
|
||||
session: true
|
||||
edges: []
|
||||
@@ -4,7 +4,7 @@ comment: timer example
|
||||
resources:
|
||||
timer:
|
||||
- name: timer1
|
||||
interval: 30
|
||||
interval: 3
|
||||
exec:
|
||||
- name: exec1
|
||||
cmd: echo hello world
|
||||
|
||||
11
examples/virt1.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
graph: mygraph
|
||||
resources:
|
||||
virt:
|
||||
- name: mgmt1
|
||||
uri: 'qemu:///session'
|
||||
cpus: 1
|
||||
memory: 524288
|
||||
state: shutoff
|
||||
transient: true
|
||||
edges: []
|
||||
11
examples/virt2.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
graph: mygraph
|
||||
resources:
|
||||
virt:
|
||||
- name: mgmt2
|
||||
uri: 'qemu:///session'
|
||||
cpus: 1
|
||||
memory: 524288
|
||||
state: shutoff
|
||||
transient: false
|
||||
edges: []
|
||||
11
examples/virt3.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
graph: mygraph
|
||||
resources:
|
||||
virt:
|
||||
- name: mgmt3
|
||||
uri: 'qemu:///session'
|
||||
cpus: 1
|
||||
memory: 524288
|
||||
state: running
|
||||
transient: false
|
||||
edges: []
|
||||
21
examples/virt4.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
graph: mygraph
|
||||
resources:
|
||||
virt:
|
||||
- name: mgmt4
|
||||
meta:
|
||||
limit: .inf
|
||||
burst: 0
|
||||
uri: 'qemu:///session'
|
||||
cpus: 1
|
||||
maxcpus: 4
|
||||
memory: 524288
|
||||
boot:
|
||||
- hd
|
||||
disk:
|
||||
- type: qcow2
|
||||
source: "~/.local/share/libvirt/images/fedora-23-scratch.qcow2"
|
||||
state: running
|
||||
transient: false
|
||||
edges: []
|
||||
comment: "qemu-img create -b fedora-23.qcow2 -f qcow2 fedora-23-scratch.qcow2"
|
||||
439
exec.go
@@ -1,439 +0,0 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"log"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func init() {
|
||||
gob.Register(&ExecRes{})
|
||||
}
|
||||
|
||||
// ExecRes is an exec resource for running commands.
|
||||
type ExecRes struct {
|
||||
BaseRes `yaml:",inline"`
|
||||
State string `yaml:"state"` // state: exists/present?, absent, (undefined?)
|
||||
Cmd string `yaml:"cmd"` // the command to run
|
||||
Shell string `yaml:"shell"` // the (optional) shell to use to run the cmd
|
||||
Timeout int `yaml:"timeout"` // the cmd timeout in seconds
|
||||
WatchCmd string `yaml:"watchcmd"` // the watch command to run
|
||||
WatchShell string `yaml:"watchshell"` // the (optional) shell to use to run the watch cmd
|
||||
IfCmd string `yaml:"ifcmd"` // the if command to run
|
||||
IfShell string `yaml:"ifshell"` // the (optional) shell to use to run the if cmd
|
||||
PollInt int `yaml:"pollint"` // the poll interval for the ifcmd
|
||||
}
|
||||
|
||||
// NewExecRes is a constructor for this resource. It also calls Init() for you.
|
||||
func NewExecRes(name, cmd, shell string, timeout int, watchcmd, watchshell, ifcmd, ifshell string, pollint int, state string) *ExecRes {
|
||||
obj := &ExecRes{
|
||||
BaseRes: BaseRes{
|
||||
Name: name,
|
||||
},
|
||||
Cmd: cmd,
|
||||
Shell: shell,
|
||||
Timeout: timeout,
|
||||
WatchCmd: watchcmd,
|
||||
WatchShell: watchshell,
|
||||
IfCmd: ifcmd,
|
||||
IfShell: ifshell,
|
||||
PollInt: pollint,
|
||||
State: state,
|
||||
}
|
||||
obj.Init()
|
||||
return obj
|
||||
}
|
||||
|
||||
// Init runs some startup code for this resource.
|
||||
func (obj *ExecRes) Init() {
|
||||
obj.BaseRes.kind = "Exec"
|
||||
obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||
}
|
||||
|
||||
// validate if the params passed in are valid data
|
||||
// FIXME: where should this get called ?
|
||||
func (obj *ExecRes) Validate() bool {
|
||||
if obj.Cmd == "" { // this is the only thing that is really required
|
||||
return false
|
||||
}
|
||||
|
||||
// if we have a watch command, then we don't poll with the if command!
|
||||
if obj.WatchCmd != "" && obj.PollInt > 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// wraps the scanner output in a channel
|
||||
func (obj *ExecRes) BufioChanScanner(scanner *bufio.Scanner) (chan string, chan error) {
|
||||
ch, errch := make(chan string), make(chan error)
|
||||
go func() {
|
||||
for scanner.Scan() {
|
||||
ch <- scanner.Text() // blocks here ?
|
||||
if e := scanner.Err(); e != nil {
|
||||
errch <- e // send any misc errors we encounter
|
||||
//break // TODO ?
|
||||
}
|
||||
}
|
||||
close(ch)
|
||||
errch <- scanner.Err() // eof or some err
|
||||
close(errch)
|
||||
}()
|
||||
return ch, errch
|
||||
}
|
||||
|
||||
// Watch is the primary listener for this resource and it outputs events.
|
||||
func (obj *ExecRes) Watch(processChan chan Event) {
|
||||
if obj.IsWatching() {
|
||||
return
|
||||
}
|
||||
obj.SetWatching(true)
|
||||
defer obj.SetWatching(false)
|
||||
cuuid := obj.converger.Register()
|
||||
defer cuuid.Unregister()
|
||||
|
||||
var send = false // send event?
|
||||
var exit = false
|
||||
bufioch, errch := make(chan string), make(chan error)
|
||||
|
||||
if obj.WatchCmd != "" {
|
||||
var cmdName string
|
||||
var cmdArgs []string
|
||||
if obj.WatchShell == "" {
|
||||
// call without a shell
|
||||
// FIXME: are there still whitespace splitting issues?
|
||||
split := strings.Fields(obj.WatchCmd)
|
||||
cmdName = split[0]
|
||||
//d, _ := os.Getwd() // TODO: how does this ever error ?
|
||||
//cmdName = path.Join(d, cmdName)
|
||||
cmdArgs = split[1:]
|
||||
} else {
|
||||
cmdName = obj.Shell // usually bash, or sh
|
||||
cmdArgs = []string{"-c", obj.WatchCmd}
|
||||
}
|
||||
cmd := exec.Command(cmdName, cmdArgs...)
|
||||
//cmd.Dir = "" // look for program in pwd ?
|
||||
|
||||
cmdReader, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
log.Printf("%v[%v]: Error creating StdoutPipe for Cmd: %v", obj.Kind(), obj.GetName(), err)
|
||||
log.Fatal(err) // XXX: how should we handle errors?
|
||||
}
|
||||
scanner := bufio.NewScanner(cmdReader)
|
||||
|
||||
defer cmd.Wait() // XXX: is this necessary?
|
||||
defer func() {
|
||||
// FIXME: without wrapping this in this func it panic's
|
||||
// when running examples/graph8d.yaml
|
||||
cmd.Process.Kill() // TODO: is this necessary?
|
||||
}()
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Printf("%v[%v]: Error starting Cmd: %v", obj.Kind(), obj.GetName(), err)
|
||||
log.Fatal(err) // XXX: how should we handle errors?
|
||||
}
|
||||
|
||||
bufioch, errch = obj.BufioChanScanner(scanner)
|
||||
}
|
||||
|
||||
for {
|
||||
obj.SetState(resStateWatching) // reset
|
||||
select {
|
||||
case text := <-bufioch:
|
||||
cuuid.SetConverged(false)
|
||||
// each time we get a line of output, we loop!
|
||||
log.Printf("%v[%v]: Watch output: %s", obj.Kind(), obj.GetName(), text)
|
||||
if text != "" {
|
||||
send = true
|
||||
}
|
||||
|
||||
case err := <-errch:
|
||||
cuuid.SetConverged(false) // XXX ?
|
||||
if err == nil { // EOF
|
||||
// FIXME: add an "if watch command ends/crashes"
|
||||
// restart or generate error option
|
||||
log.Printf("%v[%v]: Reached EOF", obj.Kind(), obj.GetName())
|
||||
return
|
||||
}
|
||||
log.Printf("%v[%v]: Error reading input?: %v", obj.Kind(), obj.GetName(), err)
|
||||
log.Fatal(err)
|
||||
// XXX: how should we handle errors?
|
||||
|
||||
case event := <-obj.events:
|
||||
cuuid.SetConverged(false)
|
||||
if exit, send = obj.ReadEvent(&event); exit {
|
||||
return // exit
|
||||
}
|
||||
|
||||
case <-cuuid.ConvergedTimer():
|
||||
cuuid.SetConverged(true) // converged!
|
||||
continue
|
||||
}
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
// it is okay to invalidate the clean state on poke too
|
||||
obj.isStateOK = false // something made state dirty
|
||||
resp := NewResp()
|
||||
processChan <- Event{eventNil, resp, "", true} // trigger process
|
||||
resp.ACKWait() // wait for the ACK()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CheckApply checks the resource state and applies the resource if the bool
|
||||
// input is true. It returns error info and if the state check passed or not.
|
||||
// TODO: expand the IfCmd to be a list of commands
|
||||
func (obj *ExecRes) CheckApply(apply bool) (checkok bool, err error) {
|
||||
log.Printf("%v[%v]: CheckApply(%t)", obj.Kind(), obj.GetName(), apply)
|
||||
|
||||
// if there is a watch command, but no if command, run based on state
|
||||
if obj.WatchCmd != "" && obj.IfCmd == "" {
|
||||
if obj.isStateOK {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// if there is no watcher, but there is an onlyif check, run it to see
|
||||
} else if obj.IfCmd != "" { // && obj.WatchCmd == ""
|
||||
// there is a watcher, but there is also an if command
|
||||
//} else if obj.IfCmd != "" && obj.WatchCmd != "" {
|
||||
|
||||
if obj.PollInt > 0 { // && obj.WatchCmd == ""
|
||||
// XXX have the Watch() command output onlyif poll events...
|
||||
// XXX we can optimize by saving those results for returning here
|
||||
// return XXX
|
||||
}
|
||||
|
||||
var cmdName string
|
||||
var cmdArgs []string
|
||||
if obj.IfShell == "" {
|
||||
// call without a shell
|
||||
// FIXME: are there still whitespace splitting issues?
|
||||
split := strings.Fields(obj.IfCmd)
|
||||
cmdName = split[0]
|
||||
//d, _ := os.Getwd() // TODO: how does this ever error ?
|
||||
//cmdName = path.Join(d, cmdName)
|
||||
cmdArgs = split[1:]
|
||||
} else {
|
||||
cmdName = obj.IfShell // usually bash, or sh
|
||||
cmdArgs = []string{"-c", obj.IfCmd}
|
||||
}
|
||||
err = exec.Command(cmdName, cmdArgs...).Run()
|
||||
if err != nil {
|
||||
// TODO: check exit value
|
||||
return true, nil // don't run
|
||||
}
|
||||
|
||||
// if there is no watcher and no onlyif check, assume we should run
|
||||
} else { // if obj.WatchCmd == "" && obj.IfCmd == "" {
|
||||
// just run if state is dirty
|
||||
if obj.isStateOK {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// state is not okay, no work done, exit, but without error
|
||||
if !apply {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// apply portion
|
||||
log.Printf("%v[%v]: Apply", obj.Kind(), obj.GetName())
|
||||
var cmdName string
|
||||
var cmdArgs []string
|
||||
if obj.Shell == "" {
|
||||
// call without a shell
|
||||
// FIXME: are there still whitespace splitting issues?
|
||||
// TODO: we could make the split character user selectable...!
|
||||
split := strings.Fields(obj.Cmd)
|
||||
cmdName = split[0]
|
||||
//d, _ := os.Getwd() // TODO: how does this ever error ?
|
||||
//cmdName = path.Join(d, cmdName)
|
||||
cmdArgs = split[1:]
|
||||
} else {
|
||||
cmdName = obj.Shell // usually bash, or sh
|
||||
cmdArgs = []string{"-c", obj.Cmd}
|
||||
}
|
||||
cmd := exec.Command(cmdName, cmdArgs...)
|
||||
//cmd.Dir = "" // look for program in pwd ?
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
|
||||
if err = cmd.Start(); err != nil {
|
||||
log.Printf("%v[%v]: Error starting Cmd: %v", obj.Kind(), obj.GetName(), err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
timeout := obj.Timeout
|
||||
if timeout == 0 { // zero timeout means no timer, so disable it
|
||||
timeout = -1
|
||||
}
|
||||
done := make(chan error)
|
||||
go func() { done <- cmd.Wait() }()
|
||||
|
||||
select {
|
||||
case err = <-done:
|
||||
if err != nil {
|
||||
log.Printf("%v[%v]: Error waiting for Cmd: %v", obj.Kind(), obj.GetName(), err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
case <-TimeAfterOrBlock(timeout):
|
||||
log.Printf("%v[%v]: Timeout waiting for Cmd", obj.Kind(), obj.GetName())
|
||||
//cmd.Process.Kill() // TODO: is this necessary?
|
||||
return false, errors.New("Timeout waiting for Cmd!")
|
||||
}
|
||||
|
||||
// TODO: if we printed the stdout while the command is running, this
|
||||
// would be nice, but it would require terminal log output that doesn't
|
||||
// interleave all the parallel parts which would mix it all up...
|
||||
if s := out.String(); s == "" {
|
||||
log.Printf("Exec[%v]: Command output is empty!", obj.Name)
|
||||
} else {
|
||||
log.Printf("Exec[%v]: Command output is:", obj.Name)
|
||||
log.Printf(out.String())
|
||||
}
|
||||
// XXX: return based on exit value!!
|
||||
|
||||
// the state tracking is for exec resources that can't "detect" their
|
||||
// state, and assume it's invalid when the Watch() function triggers.
|
||||
// if we apply state successfully, we should reset it here so that we
|
||||
// know that we have applied since the state was set not ok by event!
|
||||
obj.isStateOK = true // reset
|
||||
return false, nil // success
|
||||
}
|
||||
|
||||
// ExecUUID is the UUID struct for ExecRes.
|
||||
type ExecUUID struct {
|
||||
BaseUUID
|
||||
Cmd string
|
||||
IfCmd string
|
||||
// TODO: add more elements here
|
||||
}
|
||||
|
||||
// if and only if they are equivalent, return true
|
||||
// if they are not equivalent, return false
|
||||
func (obj *ExecUUID) IFF(uuid ResUUID) bool {
|
||||
res, ok := uuid.(*ExecUUID)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if obj.Cmd != res.Cmd {
|
||||
return false
|
||||
}
|
||||
// TODO: add more checks here
|
||||
//if obj.Shell != res.Shell {
|
||||
// return false
|
||||
//}
|
||||
//if obj.Timeout != res.Timeout {
|
||||
// return false
|
||||
//}
|
||||
//if obj.WatchCmd != res.WatchCmd {
|
||||
// return false
|
||||
//}
|
||||
//if obj.WatchShell != res.WatchShell {
|
||||
// return false
|
||||
//}
|
||||
if obj.IfCmd != res.IfCmd {
|
||||
return false
|
||||
}
|
||||
//if obj.PollInt != res.PollInt {
|
||||
// return false
|
||||
//}
|
||||
//if obj.State != res.State {
|
||||
// return false
|
||||
//}
|
||||
return true
|
||||
}
|
||||
|
||||
// The AutoEdges method returns the AutoEdges. In this case none are used.
|
||||
func (obj *ExecRes) AutoEdges() AutoEdge {
|
||||
// TODO: parse as many exec params to look for auto edges, for example
|
||||
// the path of the binary in the Cmd variable might be from in a pkg
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetUUIDs includes all params to make a unique identification of this object.
|
||||
// Most resources only return one, although some resources can return multiple.
|
||||
func (obj *ExecRes) GetUUIDs() []ResUUID {
|
||||
x := &ExecUUID{
|
||||
BaseUUID: BaseUUID{name: obj.GetName(), kind: obj.Kind()},
|
||||
Cmd: obj.Cmd,
|
||||
IfCmd: obj.IfCmd,
|
||||
// TODO: add more params here
|
||||
}
|
||||
return []ResUUID{x}
|
||||
}
|
||||
|
||||
// GroupCmp returns whether two resources can be grouped together or not.
|
||||
func (obj *ExecRes) GroupCmp(r Res) bool {
|
||||
_, ok := r.(*ExecRes)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return false // not possible atm
|
||||
}
|
||||
|
||||
// Compare two resources and return if they are equivalent.
|
||||
func (obj *ExecRes) Compare(res Res) bool {
|
||||
switch res.(type) {
|
||||
case *ExecRes:
|
||||
res := res.(*ExecRes)
|
||||
if !obj.BaseRes.Compare(res) { // call base Compare
|
||||
return false
|
||||
}
|
||||
|
||||
if obj.Name != res.Name {
|
||||
return false
|
||||
}
|
||||
if obj.Cmd != res.Cmd {
|
||||
return false
|
||||
}
|
||||
if obj.Shell != res.Shell {
|
||||
return false
|
||||
}
|
||||
if obj.Timeout != res.Timeout {
|
||||
return false
|
||||
}
|
||||
if obj.WatchCmd != res.WatchCmd {
|
||||
return false
|
||||
}
|
||||
if obj.WatchShell != res.WatchShell {
|
||||
return false
|
||||
}
|
||||
if obj.IfCmd != res.IfCmd {
|
||||
return false
|
||||
}
|
||||
if obj.PollInt != res.PollInt {
|
||||
return false
|
||||
}
|
||||
if obj.State != res.State {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
529
file.go
@@ -1,529 +0,0 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"gopkg.in/fsnotify.v1"
|
||||
//"github.com/go-fsnotify/fsnotify" // git master of "gopkg.in/fsnotify.v1"
|
||||
"encoding/gob"
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func init() {
|
||||
gob.Register(&FileRes{})
|
||||
}
|
||||
|
||||
// FileRes is a file and directory resource.
|
||||
type FileRes struct {
|
||||
BaseRes `yaml:",inline"`
|
||||
Path string `yaml:"path"` // path variable (should default to name)
|
||||
Dirname string `yaml:"dirname"`
|
||||
Basename string `yaml:"basename"`
|
||||
Content string `yaml:"content"`
|
||||
State string `yaml:"state"` // state: exists/present?, absent, (undefined?)
|
||||
sha256sum string
|
||||
}
|
||||
|
||||
// NewFileRes is a constructor for this resource. It also calls Init() for you.
|
||||
func NewFileRes(name, path, dirname, basename, content, state string) *FileRes {
|
||||
// FIXME if path = nil, path = name ...
|
||||
obj := &FileRes{
|
||||
BaseRes: BaseRes{
|
||||
Name: name,
|
||||
},
|
||||
Path: path,
|
||||
Dirname: dirname,
|
||||
Basename: basename,
|
||||
Content: content,
|
||||
State: state,
|
||||
sha256sum: "",
|
||||
}
|
||||
obj.Init()
|
||||
return obj
|
||||
}
|
||||
|
||||
// Init runs some startup code for this resource.
|
||||
func (obj *FileRes) Init() {
|
||||
obj.BaseRes.kind = "File"
|
||||
obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||
}
|
||||
|
||||
// GetPath returns the actual path to use for this resource. It computes this
|
||||
// after analysis of the path, dirname and basename values.
|
||||
func (obj *FileRes) GetPath() string {
|
||||
d := Dirname(obj.Path)
|
||||
b := Basename(obj.Path)
|
||||
if !obj.Validate() || (obj.Dirname == "" && obj.Basename == "") {
|
||||
return obj.Path
|
||||
} else if obj.Dirname == "" {
|
||||
return d + obj.Basename
|
||||
} else if obj.Basename == "" {
|
||||
return obj.Dirname + b
|
||||
} else { // if obj.dirname != "" && obj.basename != "" {
|
||||
return obj.Dirname + obj.Basename
|
||||
}
|
||||
}
|
||||
|
||||
// validate if the params passed in are valid data
|
||||
func (obj *FileRes) Validate() bool {
|
||||
if obj.Dirname != "" {
|
||||
// must end with /
|
||||
if obj.Dirname[len(obj.Dirname)-1:] != "/" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if obj.Basename != "" {
|
||||
// must not start with /
|
||||
if obj.Basename[0:1] == "/" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Watch is the primary listener for this resource and it outputs events.
|
||||
// This one is a file watcher for files and directories.
|
||||
// Modify with caution, it is probably important to write some test cases first!
|
||||
// obj.GetPath(): file or directory
|
||||
func (obj *FileRes) Watch(processChan chan Event) {
|
||||
if obj.IsWatching() {
|
||||
return
|
||||
}
|
||||
obj.SetWatching(true)
|
||||
defer obj.SetWatching(false)
|
||||
cuuid := obj.converger.Register()
|
||||
defer cuuid.Unregister()
|
||||
|
||||
//var recursive bool = false
|
||||
//var isdir = (obj.GetPath()[len(obj.GetPath())-1:] == "/") // dirs have trailing slashes
|
||||
//log.Printf("IsDirectory: %v", isdir)
|
||||
var safename = path.Clean(obj.GetPath()) // no trailing slash
|
||||
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer watcher.Close()
|
||||
|
||||
patharray := PathSplit(safename) // tokenize the path
|
||||
var index = len(patharray) // starting index
|
||||
var current string // current "watcher" location
|
||||
var deltaDepth int // depth delta between watcher and event
|
||||
var send = false // send event?
|
||||
var exit = false
|
||||
var dirty = false
|
||||
|
||||
for {
|
||||
current = strings.Join(patharray[0:index], "/")
|
||||
if current == "" { // the empty string top is the root dir ("/")
|
||||
current = "/"
|
||||
}
|
||||
if DEBUG {
|
||||
log.Printf("File[%v]: Watching: %v", obj.GetName(), current) // attempting to watch...
|
||||
}
|
||||
// initialize in the loop so that we can reset on rm-ed handles
|
||||
err = watcher.Add(current)
|
||||
if err != nil {
|
||||
if DEBUG {
|
||||
log.Printf("File[%v]: watcher.Add(%v): Error: %v", obj.GetName(), current, err)
|
||||
}
|
||||
if err == syscall.ENOENT {
|
||||
index-- // usually not found, move up one dir
|
||||
} else if err == syscall.ENOSPC {
|
||||
// XXX: occasionally: no space left on device,
|
||||
// XXX: probably due to lack of inotify watches
|
||||
log.Printf("%v[%v]: Out of inotify watches!", obj.Kind(), obj.GetName())
|
||||
log.Fatal(err)
|
||||
} else {
|
||||
log.Printf("Unknown file[%v] error:", obj.Name)
|
||||
log.Fatal(err)
|
||||
}
|
||||
index = int(math.Max(1, float64(index)))
|
||||
continue
|
||||
}
|
||||
|
||||
obj.SetState(resStateWatching) // reset
|
||||
select {
|
||||
case event := <-watcher.Events:
|
||||
if DEBUG {
|
||||
log.Printf("File[%v]: Watch(%v), Event(%v): %v", obj.GetName(), current, event.Name, event.Op)
|
||||
}
|
||||
cuuid.SetConverged(false) // XXX: technically i can detect if the event is erroneous or not first
|
||||
// the deeper you go, the bigger the deltaDepth is...
|
||||
// this is the difference between what we're watching,
|
||||
// and the event... doesn't mean we can't watch deeper
|
||||
if current == event.Name {
|
||||
deltaDepth = 0 // i was watching what i was looking for
|
||||
|
||||
} else if HasPathPrefix(event.Name, current) {
|
||||
deltaDepth = len(PathSplit(current)) - len(PathSplit(event.Name)) // -1 or less
|
||||
|
||||
} else if HasPathPrefix(current, event.Name) {
|
||||
deltaDepth = len(PathSplit(event.Name)) - len(PathSplit(current)) // +1 or more
|
||||
|
||||
} else {
|
||||
// TODO different watchers get each others events!
|
||||
// https://github.com/go-fsnotify/fsnotify/issues/95
|
||||
// this happened with two values such as:
|
||||
// event.Name: /tmp/mgmt/f3 and current: /tmp/mgmt/f2
|
||||
continue
|
||||
}
|
||||
//log.Printf("The delta depth is: %v", deltaDepth)
|
||||
|
||||
// if we have what we wanted, awesome, send an event...
|
||||
if event.Name == safename {
|
||||
//log.Println("Event!")
|
||||
// FIXME: should all these below cases trigger?
|
||||
send = true
|
||||
dirty = true
|
||||
|
||||
// file removed, move the watch upwards
|
||||
if deltaDepth >= 0 && (event.Op&fsnotify.Remove == fsnotify.Remove) {
|
||||
//log.Println("Removal!")
|
||||
watcher.Remove(current)
|
||||
index--
|
||||
}
|
||||
|
||||
// we must be a parent watcher, so descend in
|
||||
if deltaDepth < 0 {
|
||||
watcher.Remove(current)
|
||||
index++
|
||||
}
|
||||
|
||||
// if safename starts with event.Name, we're above, and no event should be sent
|
||||
} else if HasPathPrefix(safename, event.Name) {
|
||||
//log.Println("Above!")
|
||||
|
||||
if deltaDepth >= 0 && (event.Op&fsnotify.Remove == fsnotify.Remove) {
|
||||
log.Println("Removal!")
|
||||
watcher.Remove(current)
|
||||
index--
|
||||
}
|
||||
|
||||
if deltaDepth < 0 {
|
||||
log.Println("Parent!")
|
||||
if PathPrefixDelta(safename, event.Name) == 1 { // we're the parent dir
|
||||
send = true
|
||||
dirty = true
|
||||
}
|
||||
watcher.Remove(current)
|
||||
index++
|
||||
}
|
||||
|
||||
// if event.Name startswith safename, send event, we're already deeper
|
||||
} else if HasPathPrefix(event.Name, safename) {
|
||||
//log.Println("Event2!")
|
||||
send = true
|
||||
dirty = true
|
||||
}
|
||||
|
||||
case err := <-watcher.Errors:
|
||||
cuuid.SetConverged(false) // XXX ?
|
||||
log.Printf("error: %v", err)
|
||||
log.Fatal(err)
|
||||
//obj.events <- fmt.Sprintf("file: %v", "error") // XXX: how should we handle errors?
|
||||
|
||||
case event := <-obj.events:
|
||||
cuuid.SetConverged(false)
|
||||
if exit, send = obj.ReadEvent(&event); exit {
|
||||
return // exit
|
||||
}
|
||||
//dirty = false // these events don't invalidate state
|
||||
|
||||
case <-cuuid.ConvergedTimer():
|
||||
cuuid.SetConverged(true) // converged!
|
||||
continue
|
||||
}
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
// only invalid state on certain types of events
|
||||
if dirty {
|
||||
dirty = false
|
||||
obj.isStateOK = false // something made state dirty
|
||||
}
|
||||
resp := NewResp()
|
||||
processChan <- Event{eventNil, resp, "", true} // trigger process
|
||||
resp.ACKWait() // wait for the ACK()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// HashSHA256fromContent computes the hash of the file contents and returns it.
|
||||
// It also caches the value if it can.
|
||||
func (obj *FileRes) HashSHA256fromContent() string {
|
||||
if obj.sha256sum != "" { // return if already computed
|
||||
return obj.sha256sum
|
||||
}
|
||||
|
||||
hash := sha256.New()
|
||||
hash.Write([]byte(obj.Content))
|
||||
obj.sha256sum = hex.EncodeToString(hash.Sum(nil))
|
||||
return obj.sha256sum
|
||||
}
|
||||
|
||||
// FileHashSHA256Check computes the hash of the actual file and compares it to
|
||||
// the computed hash of the resources file contents.
|
||||
func (obj *FileRes) FileHashSHA256Check() (bool, error) {
|
||||
if PathIsDir(obj.GetPath()) { // assert
|
||||
log.Fatal("This should only be called on a File resource.")
|
||||
}
|
||||
// run a diff, and return true if it needs changing
|
||||
hash := sha256.New()
|
||||
f, err := os.Open(obj.GetPath())
|
||||
if err != nil {
|
||||
if e, ok := err.(*os.PathError); ok && (e.Err.(syscall.Errno) == syscall.ENOENT) {
|
||||
return false, nil // no "error", file is just absent
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := io.Copy(hash, f); err != nil {
|
||||
return false, err
|
||||
}
|
||||
sha256sum := hex.EncodeToString(hash.Sum(nil))
|
||||
//log.Printf("sha256sum: %v", sha256sum)
|
||||
if obj.HashSHA256fromContent() == sha256sum {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// FileApply writes the resource file contents out to the correct path. This
|
||||
// implementation doesn't try to be particularly clever in any way.
|
||||
func (obj *FileRes) FileApply() error {
|
||||
if PathIsDir(obj.GetPath()) {
|
||||
log.Fatal("This should only be called on a File resource.")
|
||||
}
|
||||
|
||||
if obj.State == "absent" {
|
||||
log.Printf("About to remove: %v", obj.GetPath())
|
||||
err := os.Remove(obj.GetPath())
|
||||
return err // either nil or not, for success or failure
|
||||
}
|
||||
|
||||
f, err := os.Create(obj.GetPath())
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
_, err = io.WriteString(f, obj.Content)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil // success
|
||||
}
|
||||
|
||||
// CheckApply checks the resource state and applies the resource if the bool
|
||||
// input is true. It returns error info and if the state check passed or not.
|
||||
func (obj *FileRes) CheckApply(apply bool) (checkok bool, err error) {
|
||||
log.Printf("%v[%v]: CheckApply(%t)", obj.Kind(), obj.GetName(), apply)
|
||||
|
||||
if obj.isStateOK { // cache the state
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if _, err = os.Stat(obj.GetPath()); os.IsNotExist(err) {
|
||||
// no such file or directory
|
||||
if obj.State == "absent" {
|
||||
// missing file should be missing, phew :)
|
||||
obj.isStateOK = true
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
err = nil // reset
|
||||
|
||||
// FIXME: add file mode check here...
|
||||
|
||||
if PathIsDir(obj.GetPath()) {
|
||||
log.Fatal("Not implemented!") // XXX
|
||||
} else {
|
||||
ok, err := obj.FileHashSHA256Check()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ok {
|
||||
obj.isStateOK = true
|
||||
return true, nil
|
||||
}
|
||||
// if no err, but !ok, then we continue on...
|
||||
}
|
||||
|
||||
// state is not okay, no work done, exit, but without error
|
||||
if !apply {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// apply portion
|
||||
log.Printf("%v[%v]: Apply", obj.Kind(), obj.GetName())
|
||||
if PathIsDir(obj.GetPath()) {
|
||||
log.Fatal("Not implemented!") // XXX
|
||||
} else {
|
||||
err = obj.FileApply()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
obj.isStateOK = true
|
||||
return false, nil // success
|
||||
}
|
||||
|
||||
// FileUUID is the UUID struct for FileRes.
|
||||
type FileUUID struct {
|
||||
BaseUUID
|
||||
path string
|
||||
}
|
||||
|
||||
// if and only if they are equivalent, return true
|
||||
// if they are not equivalent, return false
|
||||
func (obj *FileUUID) IFF(uuid ResUUID) bool {
|
||||
res, ok := uuid.(*FileUUID)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return obj.path == res.path
|
||||
}
|
||||
|
||||
// FileResAutoEdges holds the state of the auto edge generator.
|
||||
type FileResAutoEdges struct {
|
||||
data []ResUUID
|
||||
pointer int
|
||||
found bool
|
||||
}
|
||||
|
||||
// Next returns the next automatic edge.
|
||||
func (obj *FileResAutoEdges) Next() []ResUUID {
|
||||
if obj.found {
|
||||
log.Fatal("Shouldn't be called anymore!")
|
||||
}
|
||||
if len(obj.data) == 0 { // check length for rare scenarios
|
||||
return nil
|
||||
}
|
||||
value := obj.data[obj.pointer]
|
||||
obj.pointer++
|
||||
return []ResUUID{value} // we return one, even though api supports N
|
||||
}
|
||||
|
||||
// Test gets results of the earlier Next() call, & returns if we should continue!
|
||||
func (obj *FileResAutoEdges) Test(input []bool) bool {
|
||||
// if there aren't any more remaining
|
||||
if len(obj.data) <= obj.pointer {
|
||||
return false
|
||||
}
|
||||
if obj.found { // already found, done!
|
||||
return false
|
||||
}
|
||||
if len(input) != 1 { // in case we get given bad data
|
||||
log.Fatal("Expecting a single value!")
|
||||
}
|
||||
if input[0] { // if a match is found, we're done!
|
||||
obj.found = true // no more to find!
|
||||
return false
|
||||
}
|
||||
return true // keep going
|
||||
}
|
||||
|
||||
// AutoEdges generates a simple linear sequence of each parent directory from
|
||||
// the bottom up!
|
||||
func (obj *FileRes) AutoEdges() AutoEdge {
|
||||
var data []ResUUID // store linear result chain here...
|
||||
values := PathSplitFullReversed(obj.GetPath()) // build it
|
||||
_, values = values[0], values[1:] // get rid of first value which is me!
|
||||
for _, x := range values {
|
||||
var reversed = true // cheat by passing a pointer
|
||||
data = append(data, &FileUUID{
|
||||
BaseUUID: BaseUUID{
|
||||
name: obj.GetName(),
|
||||
kind: obj.Kind(),
|
||||
reversed: &reversed,
|
||||
},
|
||||
path: x, // what matters
|
||||
}) // build list
|
||||
}
|
||||
return &FileResAutoEdges{
|
||||
data: data,
|
||||
pointer: 0,
|
||||
found: false,
|
||||
}
|
||||
}
|
||||
|
||||
// GetUUIDs includes all params to make a unique identification of this object.
|
||||
// Most resources only return one, although some resources can return multiple.
|
||||
func (obj *FileRes) GetUUIDs() []ResUUID {
|
||||
x := &FileUUID{
|
||||
BaseUUID: BaseUUID{name: obj.GetName(), kind: obj.Kind()},
|
||||
path: obj.GetPath(),
|
||||
}
|
||||
return []ResUUID{x}
|
||||
}
|
||||
|
||||
// GroupCmp returns whether two resources can be grouped together or not.
|
||||
func (obj *FileRes) GroupCmp(r Res) bool {
|
||||
_, ok := r.(*FileRes)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
// TODO: we might be able to group directory children into a single
|
||||
// recursive watcher in the future, thus saving fanotify watches
|
||||
return false // not possible atm
|
||||
}
|
||||
|
||||
// Compare two resources and return if they are equivalent.
|
||||
func (obj *FileRes) Compare(res Res) bool {
|
||||
switch res.(type) {
|
||||
case *FileRes:
|
||||
res := res.(*FileRes)
|
||||
if !obj.BaseRes.Compare(res) { // call base Compare
|
||||
return false
|
||||
}
|
||||
|
||||
if obj.Name != res.Name {
|
||||
return false
|
||||
}
|
||||
if obj.GetPath() != res.Path {
|
||||
return false
|
||||
}
|
||||
if obj.Content != res.Content {
|
||||
return false
|
||||
}
|
||||
if obj.State != res.State {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// CollectPattern applies the pattern for collection resources.
|
||||
func (obj *FileRes) CollectPattern(pattern string) {
|
||||
// XXX: currently the pattern for files can only override the Dirname variable :P
|
||||
obj.Dirname = pattern // XXX: simplistic for now
|
||||
}
|
||||
55
gapi/gapi.go
Normal file
@@ -0,0 +1,55 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package gapi defines the interface that graph API generators must meet.
|
||||
package gapi
|
||||
|
||||
import (
|
||||
"github.com/purpleidea/mgmt/pgraph"
|
||||
"github.com/purpleidea/mgmt/resources"
|
||||
)
|
||||
|
||||
// Data is the set of input values passed into the GAPI structs via Init.
|
||||
type Data struct {
|
||||
Hostname string // uuid for the host, required for GAPI
|
||||
World resources.World
|
||||
Noop bool
|
||||
NoConfigWatch bool
|
||||
NoStreamWatch bool
|
||||
// NOTE: we can add more fields here if needed by GAPI endpoints
|
||||
}
|
||||
|
||||
// Next describes the particular response the GAPI implementer wishes to emit.
|
||||
type Next struct {
|
||||
// FIXME: the Fast pause parameter should eventually get replaced with a
|
||||
// "SwitchMethod" parameter or similar that instead lets the implementer
|
||||
// choose between fast pause, slow pause, and interrupt. Interrupt could
|
||||
// be a future extension to the Resource API that lets an Interrupt() be
|
||||
// called if we want to exit immediately from the CheckApply part of the
|
||||
// resource for some reason. For now we'll keep this simple with a bool.
|
||||
Fast bool // run a fast pause to switch?
|
||||
Exit bool // should we cause the program to exit? (specify err or not)
|
||||
Err error // if something goes wrong (use with or without exit!)
|
||||
}
|
||||
|
||||
// GAPI is a Graph API that represents incoming graphs and change streams.
|
||||
type GAPI interface {
|
||||
Init(Data) error // initializes the GAPI and passes in useful data
|
||||
Graph() (*pgraph.Graph, error) // returns the most recent pgraph
|
||||
Next() chan Next // returns a stream of switch events
|
||||
Close() error // shutdown the GAPI
|
||||
}
|
||||
372
lib/cli.go
Normal file
@@ -0,0 +1,372 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package lib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/purpleidea/mgmt/puppet"
|
||||
"github.com/purpleidea/mgmt/yamlgraph"
|
||||
"github.com/purpleidea/mgmt/yamlgraph2"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// run is the main run target.
|
||||
func run(c *cli.Context) error {
|
||||
|
||||
obj := &Main{}
|
||||
|
||||
obj.Program = c.App.Name
|
||||
obj.Version = c.App.Version
|
||||
if val, exists := c.App.Metadata["flags"]; exists {
|
||||
if flags, ok := val.(Flags); ok {
|
||||
obj.Flags = flags
|
||||
}
|
||||
}
|
||||
|
||||
if h := c.String("hostname"); c.IsSet("hostname") && h != "" {
|
||||
obj.Hostname = &h
|
||||
}
|
||||
|
||||
if s := c.String("prefix"); c.IsSet("prefix") && s != "" {
|
||||
obj.Prefix = &s
|
||||
}
|
||||
obj.TmpPrefix = c.Bool("tmp-prefix")
|
||||
obj.AllowTmpPrefix = c.Bool("allow-tmp-prefix")
|
||||
|
||||
if _ = c.String("code"); c.IsSet("code") {
|
||||
if obj.GAPI != nil {
|
||||
return fmt.Errorf("can't combine code GAPI with existing GAPI")
|
||||
}
|
||||
// TODO: implement DSL GAPI
|
||||
//obj.GAPI = &dsl.GAPI{
|
||||
// Code: &s,
|
||||
//}
|
||||
return fmt.Errorf("the Code GAPI is not implemented yet") // TODO: DSL
|
||||
}
|
||||
if y := c.String("yaml"); c.IsSet("yaml") {
|
||||
if obj.GAPI != nil {
|
||||
return fmt.Errorf("can't combine YAML GAPI with existing GAPI")
|
||||
}
|
||||
obj.GAPI = &yamlgraph.GAPI{
|
||||
File: &y,
|
||||
}
|
||||
}
|
||||
if y := c.String("yaml2"); c.IsSet("yaml2") {
|
||||
if obj.GAPI != nil {
|
||||
return fmt.Errorf("can't combine YAMLv2 GAPI with existing GAPI")
|
||||
}
|
||||
obj.GAPI = &yamlgraph2.GAPI{
|
||||
File: &y,
|
||||
}
|
||||
}
|
||||
if p := c.String("puppet"); c.IsSet("puppet") {
|
||||
if obj.GAPI != nil {
|
||||
return fmt.Errorf("can't combine puppet GAPI with existing GAPI")
|
||||
}
|
||||
obj.GAPI = &puppet.GAPI{
|
||||
PuppetParam: &p,
|
||||
PuppetConf: c.String("puppet-conf"),
|
||||
}
|
||||
}
|
||||
obj.Remotes = c.StringSlice("remote") // FIXME: GAPI-ify somehow?
|
||||
|
||||
obj.NoWatch = c.Bool("no-watch")
|
||||
obj.NoConfigWatch = c.Bool("no-config-watch")
|
||||
obj.NoStreamWatch = c.Bool("no-stream-watch")
|
||||
|
||||
obj.Noop = c.Bool("noop")
|
||||
obj.Sema = c.Int("sema")
|
||||
obj.Graphviz = c.String("graphviz")
|
||||
obj.GraphvizFilter = c.String("graphviz-filter")
|
||||
obj.ConvergedTimeout = c.Int("converged-timeout")
|
||||
obj.MaxRuntime = uint(c.Int("max-runtime"))
|
||||
|
||||
obj.Seeds = c.StringSlice("seeds")
|
||||
obj.ClientURLs = c.StringSlice("client-urls")
|
||||
obj.ServerURLs = c.StringSlice("server-urls")
|
||||
obj.IdealClusterSize = c.Int("ideal-cluster-size")
|
||||
obj.NoServer = c.Bool("no-server")
|
||||
|
||||
obj.CConns = uint16(c.Int("cconns"))
|
||||
obj.AllowInteractive = c.Bool("allow-interactive")
|
||||
obj.SSHPrivIDRsa = c.String("ssh-priv-id-rsa")
|
||||
obj.NoCaching = c.Bool("no-caching")
|
||||
obj.Depth = uint16(c.Int("depth"))
|
||||
|
||||
obj.NoPgp = c.Bool("no-pgp")
|
||||
|
||||
if kp := c.String("pgp-key-path"); c.IsSet("pgp-key-path") {
|
||||
obj.PgpKeyPath = &kp
|
||||
}
|
||||
|
||||
if us := c.String("pgp-identity"); c.IsSet("pgp-identity") {
|
||||
obj.PgpIdentity = &us
|
||||
}
|
||||
|
||||
if err := obj.Init(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
obj.Prometheus = c.Bool("prometheus")
|
||||
obj.PrometheusListen = c.String("prometheus-listen")
|
||||
|
||||
// install the exit signal handler
|
||||
exit := make(chan struct{})
|
||||
defer close(exit)
|
||||
go func() {
|
||||
signals := make(chan os.Signal, 1)
|
||||
signal.Notify(signals, os.Interrupt) // catch ^C
|
||||
//signal.Notify(signals, os.Kill) // catch signals
|
||||
signal.Notify(signals, syscall.SIGTERM)
|
||||
|
||||
select {
|
||||
case sig := <-signals: // any signal will do
|
||||
if sig == os.Interrupt {
|
||||
log.Println("Interrupted by ^C")
|
||||
obj.Exit(nil)
|
||||
return
|
||||
}
|
||||
log.Println("Interrupted by signal")
|
||||
obj.Exit(fmt.Errorf("killed by %v", sig))
|
||||
return
|
||||
case <-exit:
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
if err := obj.Run(); err != nil {
|
||||
return err
|
||||
//return cli.NewExitError(err.Error(), 1) // TODO: ?
|
||||
//return cli.NewExitError("", 1) // TODO: ?
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CLI is the entry point for using mgmt normally from the CLI.
|
||||
func CLI(program, version string, flags Flags) error {
|
||||
|
||||
// test for sanity
|
||||
if program == "" || version == "" {
|
||||
return fmt.Errorf("program was not compiled correctly, see Makefile")
|
||||
}
|
||||
app := cli.NewApp()
|
||||
app.Name = program // App.name and App.version pass these values through
|
||||
app.Version = version
|
||||
app.Usage = "next generation config management"
|
||||
app.Metadata = map[string]interface{}{ // additional flags
|
||||
"flags": flags,
|
||||
}
|
||||
//app.Action = ... // without a default action, help runs
|
||||
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "run",
|
||||
Aliases: []string{"r"},
|
||||
Usage: "run",
|
||||
Action: run,
|
||||
Flags: []cli.Flag{
|
||||
// useful for testing multiple instances on same machine
|
||||
cli.StringFlag{
|
||||
Name: "hostname",
|
||||
Value: "",
|
||||
Usage: "hostname to use",
|
||||
},
|
||||
|
||||
cli.StringFlag{
|
||||
Name: "prefix",
|
||||
Usage: "specify a path to the working prefix directory",
|
||||
EnvVar: "MGMT_PREFIX",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "tmp-prefix",
|
||||
Usage: "request a pseudo-random, temporary prefix to be used",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "allow-tmp-prefix",
|
||||
Usage: "allow creation of a new temporary prefix if main prefix is unavailable",
|
||||
},
|
||||
|
||||
cli.StringFlag{
|
||||
Name: "code, c",
|
||||
Value: "",
|
||||
Usage: "code definition to run",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "yaml",
|
||||
Value: "",
|
||||
Usage: "yaml graph definition to run",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "yaml2",
|
||||
Value: "",
|
||||
Usage: "yaml graph definition to run (parser v2)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "puppet, p",
|
||||
Value: "",
|
||||
Usage: "load graph from puppet, optionally takes a manifest or path to manifest file",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "puppet-conf",
|
||||
Value: "",
|
||||
Usage: "the path to an alternate puppet.conf file",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "remote",
|
||||
Value: &cli.StringSlice{},
|
||||
Usage: "list of remote graph definitions to run",
|
||||
},
|
||||
|
||||
cli.BoolFlag{
|
||||
Name: "no-watch",
|
||||
Usage: "do not update graph under any switch events",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "no-config-watch",
|
||||
Usage: "do not update graph on config switch events",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "no-stream-watch",
|
||||
Usage: "do not update graph on stream switch events",
|
||||
},
|
||||
|
||||
cli.BoolFlag{
|
||||
Name: "noop",
|
||||
Usage: "globally force all resources into no-op mode",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "sema",
|
||||
Value: -1,
|
||||
Usage: "globally add a semaphore to all resources with this lock count",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "graphviz, g",
|
||||
Value: "",
|
||||
Usage: "output file for graphviz data",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "graphviz-filter, gf",
|
||||
Value: "",
|
||||
Usage: "graphviz filter to use",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "converged-timeout, t",
|
||||
Value: -1,
|
||||
Usage: "exit after approximately this many seconds in a converged state",
|
||||
EnvVar: "MGMT_CONVERGED_TIMEOUT",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "max-runtime",
|
||||
Value: 0,
|
||||
Usage: "exit after a maximum of approximately this many seconds",
|
||||
EnvVar: "MGMT_MAX_RUNTIME",
|
||||
},
|
||||
|
||||
// if empty, it will startup a new server
|
||||
cli.StringSliceFlag{
|
||||
Name: "seeds, s",
|
||||
Value: &cli.StringSlice{}, // empty slice
|
||||
Usage: "default etc client endpoint",
|
||||
EnvVar: "MGMT_SEEDS",
|
||||
},
|
||||
// port 2379 and 4001 are common
|
||||
cli.StringSliceFlag{
|
||||
Name: "client-urls",
|
||||
Value: &cli.StringSlice{},
|
||||
Usage: "list of URLs to listen on for client traffic",
|
||||
EnvVar: "MGMT_CLIENT_URLS",
|
||||
},
|
||||
// port 2380 and 7001 are common
|
||||
cli.StringSliceFlag{
|
||||
Name: "server-urls, peer-urls",
|
||||
Value: &cli.StringSlice{},
|
||||
Usage: "list of URLs to listen on for server (peer) traffic",
|
||||
EnvVar: "MGMT_SERVER_URLS",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "ideal-cluster-size",
|
||||
Value: -1,
|
||||
Usage: "ideal number of server peers in cluster; only read by initial server",
|
||||
EnvVar: "MGMT_IDEAL_CLUSTER_SIZE",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "no-server",
|
||||
Usage: "do not let other servers peer with me",
|
||||
},
|
||||
|
||||
cli.IntFlag{
|
||||
Name: "cconns",
|
||||
Value: 0,
|
||||
Usage: "number of maximum concurrent remote ssh connections to run; 0 for unlimited",
|
||||
EnvVar: "MGMT_CCONNS",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "allow-interactive",
|
||||
Usage: "allow interactive prompting, such as for remote passwords",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "ssh-priv-id-rsa",
|
||||
Value: "~/.ssh/id_rsa",
|
||||
Usage: "default path to ssh key file, set empty to never touch",
|
||||
EnvVar: "MGMT_SSH_PRIV_ID_RSA",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "no-caching",
|
||||
Usage: "don't allow remote caching of remote execution binary",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "depth",
|
||||
Hidden: true, // internal use only
|
||||
Value: 0,
|
||||
Usage: "specify depth in remote hierarchy",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "no-pgp",
|
||||
Usage: "don't create pgp keys",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "pgp-key-path",
|
||||
Value: "",
|
||||
Usage: "path for instance key pair",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "pgp-identity",
|
||||
Value: "",
|
||||
Usage: "default identity used for generation",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "prometheus",
|
||||
Usage: "start a prometheus instance",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "prometheus-listen",
|
||||
Value: "",
|
||||
Usage: "specify prometheus instance binding",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
app.EnableBashCompletion = true
|
||||
return app.Run(os.Args)
|
||||
}
|
||||
671
lib/main.go
Normal file
@@ -0,0 +1,671 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package lib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/purpleidea/mgmt/converger"
|
||||
"github.com/purpleidea/mgmt/etcd"
|
||||
"github.com/purpleidea/mgmt/gapi"
|
||||
"github.com/purpleidea/mgmt/pgp"
|
||||
"github.com/purpleidea/mgmt/pgraph"
|
||||
"github.com/purpleidea/mgmt/prometheus"
|
||||
"github.com/purpleidea/mgmt/recwatch"
|
||||
"github.com/purpleidea/mgmt/remote"
|
||||
"github.com/purpleidea/mgmt/resources"
|
||||
"github.com/purpleidea/mgmt/util"
|
||||
|
||||
etcdtypes "github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
multierr "github.com/hashicorp/go-multierror"
|
||||
errwrap "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Flags are some constant flags which are used throughout the program.
|
||||
type Flags struct {
|
||||
Debug bool // add additional log messages
|
||||
Trace bool // add execution flow log messages
|
||||
Verbose bool // add extra log message output
|
||||
}
|
||||
|
||||
// Main is the main struct for running the mgmt logic.
|
||||
type Main struct {
|
||||
Program string // the name of this program, usually set at compile time
|
||||
Version string // the version of this program, usually set at compile time
|
||||
|
||||
Flags Flags // static global flags that are set at compile time
|
||||
|
||||
Hostname *string // hostname to use; nil if undefined
|
||||
|
||||
Prefix *string // prefix passed in; nil if undefined
|
||||
TmpPrefix bool // request a pseudo-random, temporary prefix to be used
|
||||
AllowTmpPrefix bool // allow creation of a new temporary prefix if main prefix is unavailable
|
||||
|
||||
GAPI gapi.GAPI // graph API interface struct
|
||||
Remotes []string // list of remote graph definitions to run
|
||||
|
||||
NoWatch bool // do not change graph under any circumstances
|
||||
NoConfigWatch bool // do not update graph due to config changes
|
||||
NoStreamWatch bool // do not update graph due to stream changes
|
||||
|
||||
Noop bool // globally force all resources into no-op mode
|
||||
Sema int // add a semaphore with this lock count to each resource
|
||||
Graphviz string // output file for graphviz data
|
||||
GraphvizFilter string // graphviz filter to use
|
||||
ConvergedTimeout int // exit after approximately this many seconds in a converged state; -1 to disable
|
||||
MaxRuntime uint // exit after a maximum of approximately this many seconds
|
||||
|
||||
Seeds []string // default etc client endpoint
|
||||
ClientURLs []string // list of URLs to listen on for client traffic
|
||||
ServerURLs []string // list of URLs to listen on for server (peer) traffic
|
||||
IdealClusterSize int // ideal number of server peers in cluster; only read by initial server
|
||||
NoServer bool // do not let other servers peer with me
|
||||
|
||||
CConns uint16 // number of maximum concurrent remote ssh connections to run, 0 for unlimited
|
||||
AllowInteractive bool // allow interactive prompting, such as for remote passwords
|
||||
SSHPrivIDRsa string // default path to ssh key file, set empty to never touch
|
||||
NoCaching bool // don't allow remote caching of remote execution binary
|
||||
Depth uint16 // depth in remote hierarchy; for internal use only
|
||||
|
||||
seeds etcdtypes.URLs // processed seeds value
|
||||
clientURLs etcdtypes.URLs // processed client urls value
|
||||
serverURLs etcdtypes.URLs // processed server urls value
|
||||
idealClusterSize uint16 // processed ideal cluster size value
|
||||
|
||||
NoPgp bool // disallow pgp functionality
|
||||
PgpKeyPath *string // import a pre-made key pair
|
||||
PgpIdentity *string
|
||||
pgpKeys *pgp.PGP // agent key pair
|
||||
|
||||
Prometheus bool // enable prometheus metrics
|
||||
PrometheusListen string // prometheus instance bind specification
|
||||
|
||||
exit chan error // exit signal
|
||||
}
|
||||
|
||||
// Init initializes the main struct after it performs some validation.
|
||||
func (obj *Main) Init() error {
|
||||
|
||||
if obj.Program == "" || obj.Version == "" {
|
||||
return fmt.Errorf("you must set the Program and Version strings")
|
||||
}
|
||||
|
||||
if obj.Prefix != nil && obj.TmpPrefix {
|
||||
return fmt.Errorf("choosing a prefix and the request for a tmp prefix is illogical")
|
||||
}
|
||||
|
||||
// if we've turned off watching, then be explicit and disable them all!
|
||||
// if all the watches are disabled, then it's equivalent to no watching
|
||||
if obj.NoWatch {
|
||||
obj.NoConfigWatch = true
|
||||
obj.NoStreamWatch = true
|
||||
} else if obj.NoConfigWatch && obj.NoStreamWatch {
|
||||
obj.NoWatch = true
|
||||
}
|
||||
|
||||
obj.idealClusterSize = uint16(obj.IdealClusterSize)
|
||||
if obj.IdealClusterSize < 0 { // value is undefined, set to the default
|
||||
obj.idealClusterSize = etcd.DefaultIdealClusterSize
|
||||
}
|
||||
|
||||
if obj.idealClusterSize < 1 {
|
||||
return fmt.Errorf("the IdealClusterSize should be at least one")
|
||||
}
|
||||
|
||||
if obj.NoServer && len(obj.Remotes) > 0 {
|
||||
// TODO: in this case, we won't be able to tunnel stuff back to
|
||||
// here, so if we're okay with every remote graph running in an
|
||||
// isolated mode, then this is okay. Improve on this if there's
|
||||
// someone who really wants to be able to do this.
|
||||
return fmt.Errorf("the Server is required when using Remotes")
|
||||
}
|
||||
|
||||
if obj.CConns < 0 {
|
||||
return fmt.Errorf("the CConns value should be at least zero")
|
||||
}
|
||||
|
||||
if obj.ConvergedTimeout >= 0 && obj.CConns > 0 && len(obj.Remotes) > int(obj.CConns) {
|
||||
return fmt.Errorf("you can't converge if you have more remotes than available connections")
|
||||
}
|
||||
|
||||
if obj.Depth < 0 { // user should not be using this argument manually
|
||||
return fmt.Errorf("negative values for Depth are not permitted")
|
||||
}
|
||||
|
||||
// transform the url list inputs into etcd typed lists
|
||||
var err error
|
||||
obj.seeds, err = etcdtypes.NewURLs(
|
||||
util.FlattenListWithSplit(obj.Seeds, []string{",", ";", " "}),
|
||||
)
|
||||
if err != nil && len(obj.Seeds) > 0 {
|
||||
return fmt.Errorf("the Seeds didn't parse correctly")
|
||||
}
|
||||
obj.clientURLs, err = etcdtypes.NewURLs(
|
||||
util.FlattenListWithSplit(obj.ClientURLs, []string{",", ";", " "}),
|
||||
)
|
||||
if err != nil && len(obj.ClientURLs) > 0 {
|
||||
return fmt.Errorf("the ClientURLs didn't parse correctly")
|
||||
}
|
||||
obj.serverURLs, err = etcdtypes.NewURLs(
|
||||
util.FlattenListWithSplit(obj.ServerURLs, []string{",", ";", " "}),
|
||||
)
|
||||
if err != nil && len(obj.ServerURLs) > 0 {
|
||||
return fmt.Errorf("the ServerURLs didn't parse correctly")
|
||||
}
|
||||
|
||||
obj.exit = make(chan error)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Exit causes a safe shutdown. This is often attached to the ^C signal handler.
|
||||
func (obj *Main) Exit(err error) {
|
||||
obj.exit <- err // trigger an exit!
|
||||
}
|
||||
|
||||
// Run is the main execution entrypoint to run mgmt.
|
||||
func (obj *Main) Run() error {
|
||||
|
||||
var start = time.Now().UnixNano()
|
||||
|
||||
var flags int
|
||||
if obj.Flags.Debug || true { // TODO: remove || true
|
||||
flags = log.LstdFlags | log.Lshortfile
|
||||
}
|
||||
flags = (flags - log.Ldate) // remove the date for now
|
||||
log.SetFlags(flags)
|
||||
|
||||
// un-hijack from capnslog...
|
||||
log.SetOutput(os.Stderr)
|
||||
if obj.Flags.Verbose {
|
||||
capnslog.SetFormatter(capnslog.NewLogFormatter(os.Stderr, "(etcd) ", flags))
|
||||
} else {
|
||||
capnslog.SetFormatter(capnslog.NewNilFormatter())
|
||||
}
|
||||
|
||||
log.Printf("This is: %s, version: %s", obj.Program, obj.Version)
|
||||
log.Printf("Main: Start: %v", start)
|
||||
|
||||
hostname, err := os.Hostname() // a sensible default
|
||||
// allow passing in the hostname, instead of using the system setting
|
||||
if h := obj.Hostname; h != nil && *h != "" { // override by cli
|
||||
hostname = *h
|
||||
} else if err != nil {
|
||||
return errwrap.Wrapf(err, "can't get default hostname")
|
||||
}
|
||||
if hostname == "" { // safety check
|
||||
return fmt.Errorf("hostname cannot be empty")
|
||||
}
|
||||
|
||||
var prefix = fmt.Sprintf("/var/lib/%s/", obj.Program) // default prefix
|
||||
if p := obj.Prefix; p != nil {
|
||||
prefix = *p
|
||||
}
|
||||
// make sure the working directory prefix exists
|
||||
if obj.TmpPrefix || os.MkdirAll(prefix, 0770) != nil {
|
||||
if obj.TmpPrefix || obj.AllowTmpPrefix {
|
||||
var err error
|
||||
if prefix, err = ioutil.TempDir("", obj.Program+"-"+hostname+"-"); err != nil {
|
||||
return fmt.Errorf("can't create temporary prefix")
|
||||
}
|
||||
log.Println("Main: Warning: Working prefix directory is temporary!")
|
||||
|
||||
} else {
|
||||
return fmt.Errorf("can't create prefix")
|
||||
}
|
||||
}
|
||||
log.Printf("Main: Working prefix is: %s", prefix)
|
||||
pgraphPrefix := fmt.Sprintf("%s/", path.Join(prefix, "pgraph")) // pgraph namespace
|
||||
if err := os.MkdirAll(pgraphPrefix, 0770); err != nil {
|
||||
return errwrap.Wrapf(err, "can't create pgraph prefix")
|
||||
}
|
||||
|
||||
var prom *prometheus.Prometheus
|
||||
if obj.Prometheus {
|
||||
prom = &prometheus.Prometheus{
|
||||
Listen: obj.PrometheusListen,
|
||||
}
|
||||
if err := prom.Init(); err != nil {
|
||||
return errwrap.Wrapf(err, "can't create initiate Prometheus instance")
|
||||
}
|
||||
|
||||
log.Printf("Main: Prometheus: Starting instance on %s", prom.Listen)
|
||||
if err := prom.Start(); err != nil {
|
||||
return errwrap.Wrapf(err, "can't start initiate Prometheus instance")
|
||||
}
|
||||
}
|
||||
|
||||
if !obj.NoPgp {
|
||||
pgpPrefix := fmt.Sprintf("%s/", path.Join(prefix, "pgp"))
|
||||
if err := os.MkdirAll(pgpPrefix, 0770); err != nil {
|
||||
return errwrap.Wrapf(err, "can't create pgp prefix")
|
||||
}
|
||||
|
||||
pgpKeyringPath := path.Join(pgpPrefix, pgp.DefaultKeyringFile) // default path
|
||||
|
||||
if p := obj.PgpKeyPath; p != nil {
|
||||
pgpKeyringPath = *p
|
||||
}
|
||||
|
||||
var err error
|
||||
if obj.pgpKeys, err = pgp.Import(pgpKeyringPath); err != nil && !os.IsNotExist(err) {
|
||||
return errwrap.Wrapf(err, "can't import pgp key")
|
||||
}
|
||||
|
||||
if obj.pgpKeys == nil {
|
||||
|
||||
identity := fmt.Sprintf("%s <%s> %s", obj.Program, "root@"+hostname, "generated by "+obj.Program)
|
||||
if p := obj.PgpIdentity; p != nil {
|
||||
identity = *p
|
||||
}
|
||||
|
||||
name, comment, email, err := pgp.ParseIdentity(identity)
|
||||
if err != nil {
|
||||
return errwrap.Wrapf(err, "can't parse user string")
|
||||
|
||||
}
|
||||
|
||||
// TODO: Make hash configurable
|
||||
if obj.pgpKeys, err = pgp.Generate(name, comment, email, nil); err != nil {
|
||||
return errwrap.Wrapf(err, "can't create pgp key")
|
||||
}
|
||||
|
||||
if err := obj.pgpKeys.SaveKey(pgpKeyringPath); err != nil {
|
||||
return errwrap.Wrapf(err, "can't save pgp key")
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Import admin key
|
||||
}
|
||||
|
||||
oldGraph := &pgraph.Graph{}
|
||||
graph := &resources.MGraph{}
|
||||
// pass in the information we need
|
||||
graph.Debug = obj.Flags.Debug
|
||||
graph.Init()
|
||||
|
||||
// exit after `max-runtime` seconds for no reason at all...
|
||||
if i := obj.MaxRuntime; i > 0 {
|
||||
go func() {
|
||||
time.Sleep(time.Duration(i) * time.Second)
|
||||
obj.Exit(nil)
|
||||
}()
|
||||
}
|
||||
|
||||
// setup converger
|
||||
converger := converger.NewConverger(
|
||||
obj.ConvergedTimeout,
|
||||
nil, // stateFn gets added in by EmbdEtcd
|
||||
)
|
||||
go converger.Loop(true) // main loop for converger, true to start paused
|
||||
|
||||
// embedded etcd
|
||||
if len(obj.seeds) == 0 {
|
||||
log.Printf("Main: Seeds: No seeds specified!")
|
||||
} else {
|
||||
log.Printf("Main: Seeds(%d): %v", len(obj.seeds), obj.seeds)
|
||||
}
|
||||
EmbdEtcd := etcd.NewEmbdEtcd(
|
||||
hostname,
|
||||
obj.seeds,
|
||||
obj.clientURLs,
|
||||
obj.serverURLs,
|
||||
obj.NoServer,
|
||||
obj.idealClusterSize,
|
||||
etcd.Flags{
|
||||
Debug: obj.Flags.Debug,
|
||||
Trace: obj.Flags.Trace,
|
||||
Verbose: obj.Flags.Verbose,
|
||||
},
|
||||
prefix,
|
||||
converger,
|
||||
)
|
||||
if EmbdEtcd == nil {
|
||||
// TODO: verify EmbdEtcd is not nil below...
|
||||
obj.Exit(fmt.Errorf("Main: Etcd: Creation failed"))
|
||||
} else if err := EmbdEtcd.Startup(); err != nil { // startup (returns when etcd main loop is running)
|
||||
obj.Exit(fmt.Errorf("Main: Etcd: Startup failed: %v", err))
|
||||
}
|
||||
|
||||
// wait for etcd server to be ready before continuing...
|
||||
select {
|
||||
case <-EmbdEtcd.ServerReady():
|
||||
log.Printf("Main: Etcd: Server: Ready!")
|
||||
// pass
|
||||
case <-time.After(((etcd.MaxStartServerTimeout * etcd.MaxStartServerRetries) + 1) * time.Second):
|
||||
obj.Exit(fmt.Errorf("Main: Etcd: Startup timeout"))
|
||||
}
|
||||
|
||||
convergerStateFn := func(b bool) error {
|
||||
// exit if we are using the converged timeout and we are the
|
||||
// root node. otherwise, if we are a child node in a remote
|
||||
// execution hierarchy, we should only notify our converged
|
||||
// state and wait for the parent to trigger the exit.
|
||||
if t := obj.ConvergedTimeout; obj.Depth == 0 && t >= 0 {
|
||||
if b {
|
||||
log.Printf("Main: Converged for %d seconds, exiting!", t)
|
||||
obj.Exit(nil) // trigger an exit!
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// send our individual state into etcd for others to see
|
||||
return etcd.SetHostnameConverged(EmbdEtcd, hostname, b) // TODO: what should happen on error?
|
||||
}
|
||||
if EmbdEtcd != nil {
|
||||
converger.SetStateFn(convergerStateFn)
|
||||
}
|
||||
|
||||
// implementation of the World API (alternates can be substituted in)
|
||||
world := &etcd.World{
|
||||
Hostname: hostname,
|
||||
EmbdEtcd: EmbdEtcd,
|
||||
}
|
||||
|
||||
graph.Data = &resources.ResData{
|
||||
Hostname: hostname,
|
||||
Converger: converger,
|
||||
Prometheus: prom,
|
||||
World: world,
|
||||
Prefix: pgraphPrefix,
|
||||
Debug: obj.Flags.Debug,
|
||||
}
|
||||
|
||||
var gapiChan chan gapi.Next // stream events contain some instructions!
|
||||
if obj.GAPI != nil {
|
||||
data := gapi.Data{
|
||||
Hostname: hostname,
|
||||
World: world,
|
||||
Noop: obj.Noop,
|
||||
//NoWatch: obj.NoWatch,
|
||||
NoConfigWatch: obj.NoConfigWatch,
|
||||
NoStreamWatch: obj.NoStreamWatch,
|
||||
}
|
||||
if err := obj.GAPI.Init(data); err != nil {
|
||||
obj.Exit(fmt.Errorf("Main: GAPI: Init failed: %v", err))
|
||||
} else {
|
||||
// this must generate at least one event for it to work
|
||||
gapiChan = obj.GAPI.Next() // stream of graph switch events!
|
||||
}
|
||||
}
|
||||
|
||||
exitchan := make(chan struct{}) // exit on close
|
||||
go func() {
|
||||
first := true // first loop or not
|
||||
for {
|
||||
log.Println("Main: Waiting...")
|
||||
// The GAPI should always kick off an event on Next() at
|
||||
// startup when (and if) it indeed has a graph to share!
|
||||
fastPause := false
|
||||
select {
|
||||
case next, ok := <-gapiChan:
|
||||
if !ok { // channel closed
|
||||
if obj.Flags.Debug {
|
||||
log.Printf("Main: GAPI exited")
|
||||
}
|
||||
gapiChan = nil // disable it
|
||||
continue
|
||||
}
|
||||
|
||||
// if we've been asked to exit...
|
||||
if next.Exit {
|
||||
obj.Exit(next.Err) // trigger exit
|
||||
continue // wait for exitchan
|
||||
}
|
||||
|
||||
// the gapi lets us send an error to the channel
|
||||
// this means there was a failure, but not fatal
|
||||
if err := next.Err; err != nil {
|
||||
log.Printf("Main: Error with graph stream: %v", err)
|
||||
continue // wait for another event
|
||||
}
|
||||
// everything else passes through to cause a compile!
|
||||
|
||||
fastPause = next.Fast // should we pause fast?
|
||||
|
||||
case <-exitchan:
|
||||
return
|
||||
}
|
||||
|
||||
if obj.GAPI == nil {
|
||||
log.Printf("Main: GAPI is empty!")
|
||||
continue
|
||||
}
|
||||
|
||||
// we need the vertices to be paused to work on them, so
|
||||
// run graph vertex LOCK...
|
||||
if !first { // TODO: we can flatten this check out I think
|
||||
converger.Pause() // FIXME: add sync wait?
|
||||
graph.Pause(fastPause) // sync
|
||||
|
||||
//graph.UnGroup() // FIXME: implement me if needed!
|
||||
}
|
||||
|
||||
// make the graph from yaml, lib, puppet->yaml, or dsl!
|
||||
newGraph, err := obj.GAPI.Graph() // generate graph!
|
||||
if err != nil {
|
||||
log.Printf("Main: Error creating new graph: %v", err)
|
||||
// unpause!
|
||||
if !first {
|
||||
graph.Start(first) // sync
|
||||
converger.Start() // after Start()
|
||||
}
|
||||
continue
|
||||
}
|
||||
if obj.Flags.Debug {
|
||||
log.Printf("Main: New Graph: %v", newGraph)
|
||||
}
|
||||
|
||||
// this edits the paused vertices, but it is safe to do
|
||||
// so even if we don't use this new graph, since those
|
||||
// value should be the same for existing vertices...
|
||||
for _, v := range newGraph.Vertices() {
|
||||
m := resources.VtoR(v).Meta()
|
||||
// apply the global noop parameter if requested
|
||||
if obj.Noop {
|
||||
m.Noop = obj.Noop
|
||||
}
|
||||
|
||||
// append the semaphore to each resource
|
||||
if obj.Sema > 0 { // NOTE: size == 0 would block
|
||||
// a semaphore with an empty id is valid
|
||||
m.Sema = append(m.Sema, fmt.Sprintf(":%d", obj.Sema))
|
||||
}
|
||||
}
|
||||
|
||||
// We don't have to "UnGroup()" to compare, since we
|
||||
// save the old graph to use when we compare.
|
||||
// TODO: Does this hurt performance or graph changes ?
|
||||
log.Printf("Main: GraphSync...")
|
||||
vertexCmpFn := func(v1, v2 pgraph.Vertex) (bool, error) {
|
||||
return resources.VtoR(v1).Compare(resources.VtoR(v2)), nil
|
||||
}
|
||||
vertexAddFn := func(v pgraph.Vertex) error {
|
||||
err := resources.VtoR(v).Validate()
|
||||
return errwrap.Wrapf(err, "could not Validate() resource")
|
||||
}
|
||||
vertexRemoveFn := func(v pgraph.Vertex) error {
|
||||
// wait for exit before starting new graph!
|
||||
resources.VtoR(v).Exit() // sync
|
||||
return nil
|
||||
}
|
||||
edgeCmpFn := func(e1, e2 pgraph.Edge) (bool, error) {
|
||||
edge1 := e1.(*resources.Edge) // panic if wrong
|
||||
edge2 := e2.(*resources.Edge) // panic if wrong
|
||||
return edge1.Compare(edge2), nil
|
||||
}
|
||||
// on success, this updates the receiver graph...
|
||||
if err := oldGraph.GraphSync(newGraph, vertexCmpFn, vertexAddFn, vertexRemoveFn, edgeCmpFn); err != nil {
|
||||
log.Printf("Main: Error running graph sync: %v", err)
|
||||
// unpause!
|
||||
if !first {
|
||||
graph.Start(first) // sync
|
||||
converger.Start() // after Start()
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO: should we call each Res.Setup() here instead?
|
||||
|
||||
// add autoedges; modifies the graph only if no error
|
||||
if err := resources.AutoEdges(oldGraph); err != nil {
|
||||
log.Printf("Main: Error running auto edges: %v", err)
|
||||
// unpause!
|
||||
if !first {
|
||||
graph.Start(first) // sync
|
||||
converger.Start() // after Start()
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
graph.Update(oldGraph) // copy in structure of new graph
|
||||
|
||||
resources.AutoGroup(graph.Graph, &resources.NonReachabilityGrouper{}) // run autogroup; modifies the graph
|
||||
// TODO: do we want to do a transitive reduction?
|
||||
// FIXME: run a type checker that verifies all the send->recv relationships
|
||||
|
||||
// Call this here because at this point the graph does
|
||||
// not know anything about the prometheus instance.
|
||||
if err := prom.UpdatePgraphStartTime(); err != nil {
|
||||
log.Printf("Main: Prometheus.UpdatePgraphStartTime() errored: %v", err)
|
||||
}
|
||||
// Start() needs to be synchronous or wait,
|
||||
// because if half of the nodes are started and
|
||||
// some are not ready yet and the EtcdWatch
|
||||
// loops, we'll cause Pause() before we
|
||||
// even got going, thus causing nil pointer errors
|
||||
graph.Start(first) // sync
|
||||
converger.Start() // after Start()
|
||||
|
||||
log.Printf("Main: Graph: %v", graph) // show graph
|
||||
if obj.Graphviz != "" {
|
||||
filter := obj.GraphvizFilter
|
||||
if filter == "" {
|
||||
filter = "dot" // directed graph default
|
||||
}
|
||||
if err := graph.ExecGraphviz(filter, obj.Graphviz, hostname); err != nil {
|
||||
log.Printf("Main: Graphviz: %v", err)
|
||||
} else {
|
||||
log.Printf("Main: Graphviz: Successfully generated graph!")
|
||||
}
|
||||
}
|
||||
first = false
|
||||
}
|
||||
}()
|
||||
|
||||
configWatcher := recwatch.NewConfigWatcher()
|
||||
configWatcher.Flags = recwatch.Flags{Debug: obj.Flags.Debug}
|
||||
events := configWatcher.Events()
|
||||
if !obj.NoWatch { // FIXME: fit this into a clean GAPI?
|
||||
configWatcher.Add(obj.Remotes...) // add all the files...
|
||||
} else {
|
||||
events = nil // signal that no-watch is true
|
||||
}
|
||||
go func() {
|
||||
select {
|
||||
case err := <-configWatcher.Error():
|
||||
obj.Exit(err) // trigger an exit!
|
||||
|
||||
case <-exitchan:
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
// initialize the add watcher, which calls the f callback on map changes
|
||||
convergerCb := func(f func(map[string]bool) error) (func(), error) {
|
||||
return etcd.AddHostnameConvergedWatcher(EmbdEtcd, f)
|
||||
}
|
||||
|
||||
// build remotes struct for remote ssh
|
||||
remotes := remote.NewRemotes(
|
||||
EmbdEtcd.LocalhostClientURLs().StringSlice(),
|
||||
[]string{etcd.DefaultClientURL},
|
||||
obj.Noop,
|
||||
obj.Remotes, // list of files
|
||||
events, // watch for file changes
|
||||
obj.CConns,
|
||||
obj.AllowInteractive,
|
||||
obj.SSHPrivIDRsa,
|
||||
!obj.NoCaching,
|
||||
obj.Depth,
|
||||
prefix,
|
||||
converger,
|
||||
convergerCb,
|
||||
remote.Flags{
|
||||
Program: obj.Program,
|
||||
Debug: obj.Flags.Debug,
|
||||
},
|
||||
)
|
||||
|
||||
// TODO: is there any benefit to running the remotes above in the loop?
|
||||
// wait for etcd to be running before we remote in, which we do above!
|
||||
go remotes.Run()
|
||||
|
||||
if obj.GAPI == nil {
|
||||
converger.Start() // better start this for empty graphs
|
||||
}
|
||||
log.Println("Main: Running...")
|
||||
|
||||
reterr := <-obj.exit // wait for exit signal
|
||||
|
||||
log.Println("Main: Destroy...")
|
||||
|
||||
if obj.GAPI != nil {
|
||||
if err := obj.GAPI.Close(); err != nil {
|
||||
err = errwrap.Wrapf(err, "the GAPI closed poorly")
|
||||
reterr = multierr.Append(reterr, err) // list of errors
|
||||
}
|
||||
}
|
||||
|
||||
configWatcher.Close() // stop sending file changes to remotes
|
||||
if err := remotes.Exit(); err != nil { // tell all the remote connections to shutdown; waits!
|
||||
err = errwrap.Wrapf(err, "the Remote exited poorly")
|
||||
reterr = multierr.Append(reterr, err) // list of errors
|
||||
}
|
||||
|
||||
// tell inner main loop to exit
|
||||
close(exitchan)
|
||||
|
||||
graph.Exit() // tells all the children to exit, and waits for them to do so
|
||||
|
||||
// cleanup etcd main loop last so it can process everything first
|
||||
if err := EmbdEtcd.Destroy(); err != nil { // shutdown and cleanup etcd
|
||||
err = errwrap.Wrapf(err, "embedded Etcd exited poorly")
|
||||
reterr = multierr.Append(reterr, err) // list of errors
|
||||
}
|
||||
|
||||
if obj.Prometheus {
|
||||
log.Printf("Main: Prometheus: Stopping instance")
|
||||
if err := prom.Stop(); err != nil {
|
||||
err = errwrap.Wrapf(err, "the Prometheus instance exited poorly")
|
||||
reterr = multierr.Append(reterr, err)
|
||||
}
|
||||
}
|
||||
|
||||
if obj.Flags.Debug {
|
||||
log.Printf("Main: Graph: %v", graph)
|
||||
}
|
||||
|
||||
// TODO: wait for each vertex to exit...
|
||||
log.Println("Goodbye!")
|
||||
return reterr
|
||||
}
|
||||
559
main.go
@@ -1,5 +1,5 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
@@ -19,560 +19,33 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
etcdtypes "github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/urfave/cli"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
mgmt "github.com/purpleidea/mgmt/lib"
|
||||
)
|
||||
|
||||
// set at compile time
|
||||
var (
|
||||
program string
|
||||
version string
|
||||
prefix = fmt.Sprintf("/var/lib/%s/", program)
|
||||
)
|
||||
|
||||
// variables controlling verbosity
|
||||
// These constants are some global variables that are used throughout the code.
|
||||
const (
|
||||
DEBUG = false // add additional log messages
|
||||
TRACE = false // add execution flow log messages
|
||||
VERBOSE = false // add extra log message output
|
||||
)
|
||||
|
||||
// signal handler
|
||||
func waitForSignal(exit chan bool) {
|
||||
signals := make(chan os.Signal, 1)
|
||||
signal.Notify(signals, os.Interrupt) // catch ^C
|
||||
//signal.Notify(signals, os.Kill) // catch signals
|
||||
signal.Notify(signals, syscall.SIGTERM)
|
||||
|
||||
select {
|
||||
case e := <-signals: // any signal will do
|
||||
if e == os.Interrupt {
|
||||
log.Println("Interrupted by ^C")
|
||||
} else {
|
||||
log.Println("Interrupted by signal")
|
||||
}
|
||||
case <-exit: // or a manual signal
|
||||
log.Println("Interrupted by exit signal")
|
||||
}
|
||||
}
|
||||
|
||||
// run is the main run target.
|
||||
func run(c *cli.Context) error {
|
||||
var start = time.Now().UnixNano()
|
||||
log.Printf("This is: %v, version: %v", program, version)
|
||||
log.Printf("Main: Start: %v", start)
|
||||
|
||||
hostname, _ := os.Hostname()
|
||||
// allow passing in the hostname, instead of using --hostname
|
||||
if c.IsSet("file") {
|
||||
if config := ParseConfigFromFile(c.String("file")); config != nil {
|
||||
if h := config.Hostname; h != "" {
|
||||
hostname = h
|
||||
}
|
||||
}
|
||||
}
|
||||
if c.IsSet("hostname") { // override by cli
|
||||
if h := c.String("hostname"); h != "" {
|
||||
hostname = h
|
||||
}
|
||||
}
|
||||
noop := c.Bool("noop")
|
||||
|
||||
seeds, err := etcdtypes.NewURLs(
|
||||
FlattenListWithSplit(c.StringSlice("seeds"), []string{",", ";", " "}),
|
||||
// set at compile time
|
||||
var (
|
||||
program string
|
||||
version string
|
||||
)
|
||||
if err != nil && len(c.StringSlice("seeds")) > 0 {
|
||||
log.Printf("Main: Error: seeds didn't parse correctly!")
|
||||
return cli.NewExitError("", 1)
|
||||
}
|
||||
clientURLs, err := etcdtypes.NewURLs(
|
||||
FlattenListWithSplit(c.StringSlice("client-urls"), []string{",", ";", " "}),
|
||||
)
|
||||
if err != nil && len(c.StringSlice("client-urls")) > 0 {
|
||||
log.Printf("Main: Error: clientURLs didn't parse correctly!")
|
||||
return cli.NewExitError("", 1)
|
||||
}
|
||||
serverURLs, err := etcdtypes.NewURLs(
|
||||
FlattenListWithSplit(c.StringSlice("server-urls"), []string{",", ";", " "}),
|
||||
)
|
||||
if err != nil && len(c.StringSlice("server-urls")) > 0 {
|
||||
log.Printf("Main: Error: serverURLs didn't parse correctly!")
|
||||
return cli.NewExitError("", 1)
|
||||
}
|
||||
|
||||
idealClusterSize := uint16(c.Int("ideal-cluster-size"))
|
||||
if idealClusterSize < 1 {
|
||||
log.Printf("Main: Error: idealClusterSize should be at least one!")
|
||||
return cli.NewExitError("", 1)
|
||||
}
|
||||
|
||||
if c.IsSet("file") && c.IsSet("puppet") {
|
||||
log.Println("Main: Error: the --file and --puppet parameters cannot be used together!")
|
||||
return cli.NewExitError("", 1)
|
||||
}
|
||||
|
||||
if c.Bool("no-server") && len(c.StringSlice("remote")) > 0 {
|
||||
// TODO: in this case, we won't be able to tunnel stuff back to
|
||||
// here, so if we're okay with every remote graph running in an
|
||||
// isolated mode, then this is okay. Improve on this if there's
|
||||
// someone who really wants to be able to do this.
|
||||
log.Println("Main: Error: the --no-server and --remote parameters cannot be used together!")
|
||||
return cli.NewExitError("", 1)
|
||||
}
|
||||
|
||||
cConns := uint16(c.Int("cconns"))
|
||||
if cConns < 0 {
|
||||
log.Printf("Main: Error: --cconns should be at least zero!")
|
||||
return cli.NewExitError("", 1)
|
||||
}
|
||||
|
||||
if c.IsSet("converged-timeout") && cConns > 0 && len(c.StringSlice("remote")) > c.Int("cconns") {
|
||||
log.Printf("Main: Error: combining --converged-timeout with more remotes than available connections will never converge!")
|
||||
return cli.NewExitError("", 1)
|
||||
}
|
||||
|
||||
depth := uint16(c.Int("depth"))
|
||||
if depth < 0 { // user should not be using this argument manually
|
||||
log.Printf("Main: Error: negative values for --depth are not permitted!")
|
||||
return cli.NewExitError("", 1)
|
||||
}
|
||||
|
||||
if c.IsSet("prefix") && c.Bool("tmp-prefix") {
|
||||
log.Println("Main: Error: combining --prefix and the request for a tmp prefix is illogical!")
|
||||
return cli.NewExitError("", 1)
|
||||
}
|
||||
if s := c.String("prefix"); c.IsSet("prefix") && s != "" {
|
||||
prefix = s
|
||||
}
|
||||
|
||||
// make sure the working directory prefix exists
|
||||
if c.Bool("tmp-prefix") || os.MkdirAll(prefix, 0770) != nil {
|
||||
if c.Bool("tmp-prefix") || c.Bool("allow-tmp-prefix") {
|
||||
if prefix, err = ioutil.TempDir("", program+"-"); err != nil {
|
||||
log.Printf("Main: Error: Can't create temporary prefix!")
|
||||
return cli.NewExitError("", 1)
|
||||
}
|
||||
log.Println("Main: Warning: Working prefix directory is temporary!")
|
||||
|
||||
} else {
|
||||
log.Printf("Main: Error: Can't create prefix!")
|
||||
return cli.NewExitError("", 1)
|
||||
}
|
||||
}
|
||||
log.Printf("Main: Working prefix is: %s", prefix)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
exit := make(chan bool) // exit signal
|
||||
var G, fullGraph *Graph
|
||||
|
||||
// exit after `max-runtime` seconds for no reason at all...
|
||||
if i := c.Int("max-runtime"); i > 0 {
|
||||
go func() {
|
||||
time.Sleep(time.Duration(i) * time.Second)
|
||||
exit <- true
|
||||
}()
|
||||
}
|
||||
|
||||
// setup converger
|
||||
converger := NewConverger(
|
||||
c.Int("converged-timeout"),
|
||||
nil, // stateFn gets added in by EmbdEtcd
|
||||
)
|
||||
go converger.Loop(true) // main loop for converger, true to start paused
|
||||
|
||||
// embedded etcd
|
||||
if len(seeds) == 0 {
|
||||
log.Printf("Main: Seeds: No seeds specified!")
|
||||
} else {
|
||||
log.Printf("Main: Seeds(%v): %v", len(seeds), seeds)
|
||||
}
|
||||
EmbdEtcd := NewEmbdEtcd(
|
||||
hostname,
|
||||
seeds,
|
||||
clientURLs,
|
||||
serverURLs,
|
||||
c.Bool("no-server"),
|
||||
idealClusterSize,
|
||||
prefix,
|
||||
converger,
|
||||
)
|
||||
if EmbdEtcd == nil {
|
||||
// TODO: verify EmbdEtcd is not nil below...
|
||||
log.Printf("Main: Etcd: Creation failed!")
|
||||
exit <- true
|
||||
} else if err := EmbdEtcd.Startup(); err != nil { // startup (returns when etcd main loop is running)
|
||||
log.Printf("Main: Etcd: Startup failed: %v", err)
|
||||
exit <- true
|
||||
}
|
||||
convergerStateFn := func(b bool) error {
|
||||
// exit if we are using the converged-timeout and we are the
|
||||
// root node. otherwise, if we are a child node in a remote
|
||||
// execution hierarchy, we should only notify our converged
|
||||
// state and wait for the parent to trigger the exit.
|
||||
if depth == 0 && c.Int("converged-timeout") >= 0 {
|
||||
if b {
|
||||
log.Printf("Converged for %d seconds, exiting!", c.Int("converged-timeout"))
|
||||
exit <- true // trigger an exit!
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// send our individual state into etcd for others to see
|
||||
return EtcdSetHostnameConverged(EmbdEtcd, hostname, b) // TODO: what should happen on error?
|
||||
}
|
||||
if EmbdEtcd != nil {
|
||||
converger.SetStateFn(convergerStateFn)
|
||||
}
|
||||
|
||||
exitchan := make(chan Event) // exit event
|
||||
go func() {
|
||||
startchan := make(chan struct{}) // start signal
|
||||
go func() { startchan <- struct{}{} }()
|
||||
file := c.String("file")
|
||||
var configchan chan bool
|
||||
var puppetchan <-chan time.Time
|
||||
if !c.Bool("no-watch") && c.IsSet("file") {
|
||||
configchan = ConfigWatch(file)
|
||||
} else if c.IsSet("puppet") {
|
||||
interval := PuppetInterval(c.String("puppet-conf"))
|
||||
puppetchan = time.Tick(time.Duration(interval) * time.Second)
|
||||
}
|
||||
log.Println("Etcd: Starting...")
|
||||
etcdchan := EtcdWatch(EmbdEtcd)
|
||||
first := true // first loop or not
|
||||
for {
|
||||
log.Println("Main: Waiting...")
|
||||
select {
|
||||
case <-startchan: // kick the loop once at start
|
||||
// pass
|
||||
|
||||
case b := <-etcdchan:
|
||||
if !b { // ignore the message
|
||||
continue
|
||||
}
|
||||
// everything else passes through to cause a compile!
|
||||
|
||||
case <-puppetchan:
|
||||
// nothing, just go on
|
||||
|
||||
case msg := <-configchan:
|
||||
if c.Bool("no-watch") || !msg {
|
||||
continue // not ready to read config
|
||||
}
|
||||
// XXX: case compile_event: ...
|
||||
// ...
|
||||
case msg := <-exitchan:
|
||||
msg.ACK()
|
||||
return
|
||||
}
|
||||
|
||||
var config *GraphConfig
|
||||
if c.IsSet("file") {
|
||||
config = ParseConfigFromFile(file)
|
||||
} else if c.IsSet("puppet") {
|
||||
config = ParseConfigFromPuppet(c.String("puppet"), c.String("puppet-conf"))
|
||||
}
|
||||
if config == nil {
|
||||
log.Printf("Config: Parse failure")
|
||||
continue
|
||||
}
|
||||
|
||||
if config.Hostname != "" && config.Hostname != hostname {
|
||||
log.Printf("Config: Hostname changed, ignoring config!")
|
||||
continue
|
||||
}
|
||||
config.Hostname = hostname // set it in case it was ""
|
||||
|
||||
// run graph vertex LOCK...
|
||||
if !first { // TODO: we can flatten this check out I think
|
||||
converger.Pause() // FIXME: add sync wait?
|
||||
G.Pause() // sync
|
||||
}
|
||||
|
||||
// build graph from yaml file on events (eg: from etcd)
|
||||
// we need the vertices to be paused to work on them
|
||||
if newFullgraph, err := fullGraph.NewGraphFromConfig(config, EmbdEtcd, noop); err == nil { // keep references to all original elements
|
||||
fullGraph = newFullgraph
|
||||
} else {
|
||||
log.Printf("Config: Error making new graph from config: %v", err)
|
||||
// unpause!
|
||||
if !first {
|
||||
G.Start(&wg, first) // sync
|
||||
converger.Start() // after G.Start()
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
G = fullGraph.Copy() // copy to active graph
|
||||
// XXX: do etcd transaction out here...
|
||||
G.AutoEdges() // add autoedges; modifies the graph
|
||||
G.AutoGroup() // run autogroup; modifies the graph
|
||||
// TODO: do we want to do a transitive reduction?
|
||||
|
||||
log.Printf("Graph: %v", G) // show graph
|
||||
err := G.ExecGraphviz(c.String("graphviz-filter"), c.String("graphviz"))
|
||||
if err != nil {
|
||||
log.Printf("Graphviz: %v", err)
|
||||
} else {
|
||||
log.Printf("Graphviz: Successfully generated graph!")
|
||||
}
|
||||
G.AssociateData(converger)
|
||||
// G.Start(...) needs to be synchronous or wait,
|
||||
// because if half of the nodes are started and
|
||||
// some are not ready yet and the EtcdWatch
|
||||
// loops, we'll cause G.Pause(...) before we
|
||||
// even got going, thus causing nil pointer errors
|
||||
G.Start(&wg, first) // sync
|
||||
converger.Start() // after G.Start()
|
||||
first = false
|
||||
}
|
||||
}()
|
||||
|
||||
configWatcher := NewConfigWatcher()
|
||||
events := configWatcher.Events()
|
||||
if !c.Bool("no-watch") {
|
||||
configWatcher.Add(c.StringSlice("remote")...) // add all the files...
|
||||
} else {
|
||||
events = nil // signal that no-watch is true
|
||||
}
|
||||
|
||||
// initialize the add watcher, which calls the f callback on map changes
|
||||
convergerCb := func(f func(map[string]bool) error) (func(), error) {
|
||||
return EtcdAddHostnameConvergedWatcher(EmbdEtcd, f)
|
||||
}
|
||||
|
||||
// build remotes struct for remote ssh
|
||||
remotes := NewRemotes(
|
||||
EmbdEtcd.LocalhostClientURLs().StringSlice(),
|
||||
[]string{DefaultClientURL},
|
||||
noop,
|
||||
c.StringSlice("remote"), // list of files
|
||||
events, // watch for file changes
|
||||
cConns,
|
||||
c.Bool("allow-interactive"),
|
||||
c.String("ssh-priv-id-rsa"),
|
||||
!c.Bool("no-caching"),
|
||||
depth,
|
||||
prefix,
|
||||
converger,
|
||||
convergerCb,
|
||||
)
|
||||
|
||||
// TODO: is there any benefit to running the remotes above in the loop?
|
||||
// wait for etcd to be running before we remote in, which we do above!
|
||||
go remotes.Run()
|
||||
|
||||
if !c.IsSet("file") && !c.IsSet("puppet") {
|
||||
converger.Start() // better start this for empty graphs
|
||||
}
|
||||
log.Println("Main: Running...")
|
||||
|
||||
waitForSignal(exit) // pass in exit channel to watch
|
||||
|
||||
log.Println("Destroy...")
|
||||
|
||||
configWatcher.Close() // stop sending file changes to remotes
|
||||
remotes.Exit() // tell all the remote connections to shutdown; waits!
|
||||
|
||||
G.Exit() // tell all the children to exit
|
||||
|
||||
// tell inner main loop to exit
|
||||
resp := NewResp()
|
||||
go func() { exitchan <- Event{eventExit, resp, "", false} }()
|
||||
|
||||
// cleanup etcd main loop last so it can process everything first
|
||||
if err := EmbdEtcd.Destroy(); err != nil { // shutdown and cleanup etcd
|
||||
log.Printf("Etcd exited poorly with: %v", err)
|
||||
}
|
||||
|
||||
resp.ACKWait() // let inner main loop finish cleanly just in case
|
||||
|
||||
if DEBUG {
|
||||
log.Printf("Graph: %v", G)
|
||||
}
|
||||
|
||||
wg.Wait() // wait for primary go routines to exit
|
||||
|
||||
// TODO: wait for each vertex to exit...
|
||||
log.Println("Goodbye!")
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
var flags int
|
||||
if DEBUG || true { // TODO: remove || true
|
||||
flags = log.LstdFlags | log.Lshortfile
|
||||
flags := mgmt.Flags{
|
||||
Debug: DEBUG,
|
||||
Trace: TRACE,
|
||||
Verbose: VERBOSE,
|
||||
}
|
||||
flags = (flags - log.Ldate) // remove the date for now
|
||||
log.SetFlags(flags)
|
||||
|
||||
// un-hijack from capnslog...
|
||||
log.SetOutput(os.Stderr)
|
||||
if VERBOSE {
|
||||
capnslog.SetFormatter(capnslog.NewLogFormatter(os.Stderr, "(etcd) ", flags))
|
||||
} else {
|
||||
capnslog.SetFormatter(capnslog.NewNilFormatter())
|
||||
if err := mgmt.CLI(program, version, flags); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
return
|
||||
}
|
||||
|
||||
// test for sanity
|
||||
if program == "" || version == "" {
|
||||
log.Fatal("Program was not compiled correctly. Please see Makefile.")
|
||||
}
|
||||
app := cli.NewApp()
|
||||
app.Name = program
|
||||
app.Usage = "next generation config management"
|
||||
app.Version = version
|
||||
//app.Action = ... // without a default action, help runs
|
||||
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "run",
|
||||
Aliases: []string{"r"},
|
||||
Usage: "run",
|
||||
Action: run,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "file, f",
|
||||
Value: "",
|
||||
Usage: "graph definition to run",
|
||||
EnvVar: "MGMT_FILE",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "no-watch",
|
||||
Usage: "do not update graph on watched graph definition file changes",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "code, c",
|
||||
Value: "",
|
||||
Usage: "code definition to run",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "graphviz, g",
|
||||
Value: "",
|
||||
Usage: "output file for graphviz data",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "graphviz-filter, gf",
|
||||
Value: "dot", // directed graph default
|
||||
Usage: "graphviz filter to use",
|
||||
},
|
||||
// useful for testing multiple instances on same machine
|
||||
cli.StringFlag{
|
||||
Name: "hostname",
|
||||
Value: "",
|
||||
Usage: "hostname to use",
|
||||
},
|
||||
// if empty, it will startup a new server
|
||||
cli.StringSliceFlag{
|
||||
Name: "seeds, s",
|
||||
Value: &cli.StringSlice{}, // empty slice
|
||||
Usage: "default etc client endpoint",
|
||||
EnvVar: "MGMT_SEEDS",
|
||||
},
|
||||
// port 2379 and 4001 are common
|
||||
cli.StringSliceFlag{
|
||||
Name: "client-urls",
|
||||
Value: &cli.StringSlice{},
|
||||
Usage: "list of URLs to listen on for client traffic",
|
||||
EnvVar: "MGMT_CLIENT_URLS",
|
||||
},
|
||||
// port 2380 and 7001 are common
|
||||
cli.StringSliceFlag{
|
||||
Name: "server-urls, peer-urls",
|
||||
Value: &cli.StringSlice{},
|
||||
Usage: "list of URLs to listen on for server (peer) traffic",
|
||||
EnvVar: "MGMT_SERVER_URLS",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "no-server",
|
||||
Usage: "do not let other servers peer with me",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "ideal-cluster-size",
|
||||
Value: defaultIdealClusterSize,
|
||||
Usage: "ideal number of server peers in cluster, only read by initial server",
|
||||
EnvVar: "MGMT_IDEAL_CLUSTER_SIZE",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "converged-timeout, t",
|
||||
Value: -1,
|
||||
Usage: "exit after approximately this many seconds in a converged state",
|
||||
EnvVar: "MGMT_CONVERGED_TIMEOUT",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "max-runtime",
|
||||
Value: 0,
|
||||
Usage: "exit after a maximum of approximately this many seconds",
|
||||
EnvVar: "MGMT_MAX_RUNTIME",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "noop",
|
||||
Usage: "globally force all resources into no-op mode",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "puppet, p",
|
||||
Value: "",
|
||||
Usage: "load graph from puppet, optionally takes a manifest or path to manifest file",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "puppet-conf",
|
||||
Value: "",
|
||||
Usage: "supply the path to an alternate puppet.conf file to use",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "remote",
|
||||
Value: &cli.StringSlice{},
|
||||
Usage: "list of remote graph definitions to run",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "allow-interactive",
|
||||
Usage: "allow interactive prompting, such as for remote passwords",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "ssh-priv-id-rsa",
|
||||
Value: "~/.ssh/id_rsa",
|
||||
Usage: "default path to ssh key file, set empty to never touch",
|
||||
EnvVar: "MGMT_SSH_PRIV_ID_RSA",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "cconns",
|
||||
Value: 0,
|
||||
Usage: "number of maximum concurrent remote ssh connections to run, 0 for unlimited",
|
||||
EnvVar: "MGMT_CCONNS",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "no-caching",
|
||||
Usage: "don't allow remote caching of remote execution binary",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "depth",
|
||||
Hidden: true, // internal use only
|
||||
Value: 0,
|
||||
Usage: "specify depth in remote hierarchy",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "prefix",
|
||||
Usage: "specify a path to the working prefix directory",
|
||||
EnvVar: "MGMT_PREFIX",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "tmp-prefix",
|
||||
Usage: "request a pseudo-random, temporary prefix to be used",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "allow-tmp-prefix",
|
||||
Usage: "allow creation of a new temporary prefix if main prefix is unavailable",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
app.EnableBashCompletion = true
|
||||
app.Run(os.Args)
|
||||
}
|
||||
|
||||
81
misc/delta-cpu.sh
Executable file
@@ -0,0 +1,81 @@
|
||||
#!/bin/bash
|
||||
# shitty cpu count control, useful for live demos
|
||||
|
||||
minimum=1 # don't decrease below this number of cpus
|
||||
maximum=8 # don't increase above this number of cpus
|
||||
count=1 # initial count
|
||||
factor=3
|
||||
function output() {
|
||||
count=$1 # arg!
|
||||
cat << EOF > ~/code/mgmt/examples/virt4.yaml
|
||||
---
|
||||
graph: mygraph
|
||||
resources:
|
||||
virt:
|
||||
- name: mgmt4
|
||||
meta:
|
||||
limit: .inf
|
||||
burst: 0
|
||||
uri: 'qemu:///session'
|
||||
cpus: $count
|
||||
maxcpus: $maximum
|
||||
memory: 524288
|
||||
boot:
|
||||
- hd
|
||||
disk:
|
||||
- type: qcow2
|
||||
source: "~/.local/share/libvirt/images/fedora-23-scratch.qcow2"
|
||||
state: running
|
||||
transient: false
|
||||
edges: []
|
||||
comment: "qemu-img create -b fedora-23.qcow2 -f qcow2 fedora-23-scratch.qcow2"
|
||||
EOF
|
||||
}
|
||||
#tput cuu 1 && tput el # remove last line
|
||||
str=''
|
||||
tnuoc=$((maximum-count)) # backwards count
|
||||
count2=$((count * factor))
|
||||
tnuoc2=$((tnuoc * factor))
|
||||
left=`yes '>' | head -$count2 | paste -s -d '' -`
|
||||
right=`yes ' ' | head -$tnuoc2 | paste -s -d '' -`
|
||||
str="${left}${right}"
|
||||
_min=$((minimum-1))
|
||||
_max=$((maximum+1))
|
||||
reset # clean up once...
|
||||
output $count # call function
|
||||
while true; do
|
||||
|
||||
read -n1 -r -s -p "CPUs count is: $count; ${str} Press +/- key to adjust." key
|
||||
if [ "$key" = "q" ] || [ "$key" = "Q" ]; then
|
||||
echo # newline
|
||||
exit
|
||||
fi
|
||||
if [ ! "$key" = "+" ] && [ ! "$key" = "-" ] && [ ! "$key" = "=" ] && [ ! "$key" = "_" ]; then # wrong key
|
||||
reset # woops, reset it all...
|
||||
continue
|
||||
fi
|
||||
if [ "$key" == "+" ] || [ "$key" == "=" ]; then
|
||||
count=$((count+1))
|
||||
fi
|
||||
if [ "$key" == "-" ] || [ "$key" == "_" ]; then
|
||||
count=$((count-1))
|
||||
fi
|
||||
if [ $count -eq $_min ]; then # min
|
||||
count=$minimum
|
||||
fi
|
||||
if [ $count -eq $_max ]; then # max
|
||||
count=$maximum
|
||||
fi
|
||||
|
||||
tnuoc=$((maximum-count)) # backwards count
|
||||
#echo "count is: $count"
|
||||
#echo "tnuoc is: $tnuoc"
|
||||
count2=$((count * factor))
|
||||
tnuoc2=$((tnuoc * factor))
|
||||
left=`yes '>' | head -$count2 | paste -s -d '' -`
|
||||
right=`yes ' ' | head -$tnuoc2 | paste -s -d '' -`
|
||||
str="${left}${right}"
|
||||
#echo "str is: $str"
|
||||
echo -ne '\r' # backup
|
||||
output $count # call function
|
||||
done
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
# setup a simple go environment
|
||||
XPWD=`pwd`
|
||||
ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )" # dir!
|
||||
@@ -11,13 +11,42 @@ fi
|
||||
|
||||
sudo_command=$(which sudo)
|
||||
|
||||
if [ $travis -eq 0 ]; then
|
||||
YUM=`which yum 2>/dev/null`
|
||||
DNF=`which dnf 2>/dev/null`
|
||||
APT=`which apt-get 2>/dev/null`
|
||||
if [ -z "$YUM" -a -z "$APT" ]; then
|
||||
BREW=`which brew 2>/dev/null`
|
||||
PACMAN=`which pacman 2>/dev/null`
|
||||
|
||||
# if DNF is available use it
|
||||
if [ -x "$DNF" ]; then
|
||||
YUM=$DNF
|
||||
fi
|
||||
|
||||
if [ -z "$YUM" -a -z "$APT" -a -z "$BREW" -a -z "$PACMAN" ]; then
|
||||
echo "The package managers can't be found."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -z "$YUM" ]; then
|
||||
$sudo_command $YUM install -y libvirt-devel
|
||||
$sudo_command $YUM install -y augeas-devel
|
||||
|
||||
fi
|
||||
if [ ! -z "$APT" ]; then
|
||||
$sudo_command $APT install -y libvirt-dev || true
|
||||
$sudo_command $APT install -y libaugeas-dev || true
|
||||
$sudo_command $APT install -y libpcap0.8-dev || true
|
||||
fi
|
||||
|
||||
if [ ! -z "$BREW" ]; then
|
||||
$BREW install libvirt || true
|
||||
fi
|
||||
|
||||
if [ ! -z "$PACMAN" ]; then
|
||||
$sudo_command $PACMAN -S --noconfirm libvirt augeas libpcap
|
||||
fi
|
||||
|
||||
if [ $travis -eq 0 ]; then
|
||||
if [ ! -z "$YUM" ]; then
|
||||
# some go dependencies are stored in mercurial
|
||||
$sudo_command $YUM install -y golang golang-googlecode-tools-stringer hg
|
||||
@@ -30,15 +59,18 @@ if [ $travis -eq 0 ]; then
|
||||
$sudo_command $APT install -y golang-golang-x-tools || true
|
||||
$sudo_command $APT install -y golang-go.tools || true
|
||||
fi
|
||||
if [ ! -z "$PACMAN" ]; then
|
||||
$sudo_command $PACMAN -S --noconfirm go
|
||||
fi
|
||||
fi
|
||||
|
||||
# if golang is too old, we don't want to fail with an obscure error later
|
||||
if go version | grep 'go1\.[0123]\.'; then
|
||||
echo "mgmt requires go1.4 or higher."
|
||||
if go version | grep 'go1\.[012345]\.'; then
|
||||
echo "mgmt requires go1.6 or higher."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
go get ./... # get all the go dependencies
|
||||
go get -d ./... # get all the go dependencies
|
||||
[ -e "$GOBIN/mgmt" ] && rm -f "$GOBIN/mgmt" # the `go get` version has no -X
|
||||
# vet is built-in in go 1.6 - we check for go vet command
|
||||
go vet 1> /dev/null 2>&1
|
||||
@@ -48,4 +80,5 @@ if [[ $ret != 0 ]]; then
|
||||
fi
|
||||
go get golang.org/x/tools/cmd/stringer # for automatic stringer-ing
|
||||
go get github.com/golang/lint/golint # for `golint`-ing
|
||||
go get github.com/alecthomas/gometalinter && gometalinter --install # bonus
|
||||
cd "$XPWD" >/dev/null
|
||||
|
||||
@@ -5,7 +5,7 @@ After=systemd-networkd.service
|
||||
Requires=systemd-networkd.service
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/bin/mgmt run ${OPTS}
|
||||
ExecStart=/usr/bin/mgmt run $OPTS
|
||||
RestartSec=5s
|
||||
Restart=always
|
||||
|
||||
|
||||
155
noop.go
@@ -1,155 +0,0 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/gob"
|
||||
"log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
gob.Register(&NoopRes{})
|
||||
}
|
||||
|
||||
// NoopRes is a no-op resource that does nothing.
|
||||
type NoopRes struct {
|
||||
BaseRes `yaml:",inline"`
|
||||
Comment string `yaml:"comment"` // extra field for example purposes
|
||||
}
|
||||
|
||||
// NewNoopRes is a constructor for this resource. It also calls Init() for you.
|
||||
func NewNoopRes(name string) *NoopRes {
|
||||
obj := &NoopRes{
|
||||
BaseRes: BaseRes{
|
||||
Name: name,
|
||||
},
|
||||
Comment: "",
|
||||
}
|
||||
obj.Init()
|
||||
return obj
|
||||
}
|
||||
|
||||
// Init runs some startup code for this resource.
|
||||
func (obj *NoopRes) Init() {
|
||||
obj.BaseRes.kind = "Noop"
|
||||
obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||
}
|
||||
|
||||
// validate if the params passed in are valid data
|
||||
// FIXME: where should this get called ?
|
||||
func (obj *NoopRes) Validate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Watch is the primary listener for this resource and it outputs events.
|
||||
func (obj *NoopRes) Watch(processChan chan Event) {
|
||||
if obj.IsWatching() {
|
||||
return
|
||||
}
|
||||
obj.SetWatching(true)
|
||||
defer obj.SetWatching(false)
|
||||
cuuid := obj.converger.Register()
|
||||
defer cuuid.Unregister()
|
||||
|
||||
var send = false // send event?
|
||||
var exit = false
|
||||
for {
|
||||
obj.SetState(resStateWatching) // reset
|
||||
select {
|
||||
case event := <-obj.events:
|
||||
cuuid.SetConverged(false)
|
||||
// we avoid sending events on unpause
|
||||
if exit, send = obj.ReadEvent(&event); exit {
|
||||
return // exit
|
||||
}
|
||||
|
||||
case <-cuuid.ConvergedTimer():
|
||||
cuuid.SetConverged(true) // converged!
|
||||
continue
|
||||
}
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
// only do this on certain types of events
|
||||
//obj.isStateOK = false // something made state dirty
|
||||
resp := NewResp()
|
||||
processChan <- Event{eventNil, resp, "", true} // trigger process
|
||||
resp.ACKWait() // wait for the ACK()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CheckApply method for Noop resource. Does nothing, returns happy!
|
||||
func (obj *NoopRes) CheckApply(apply bool) (checkok bool, err error) {
|
||||
log.Printf("%v[%v]: CheckApply(%t)", obj.Kind(), obj.GetName(), apply)
|
||||
return true, nil // state is always okay
|
||||
}
|
||||
|
||||
// NoopUUID is the UUID struct for NoopRes.
|
||||
type NoopUUID struct {
|
||||
BaseUUID
|
||||
name string
|
||||
}
|
||||
|
||||
// The AutoEdges method returns the AutoEdges. In this case none are used.
|
||||
func (obj *NoopRes) AutoEdges() AutoEdge {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetUUIDs includes all params to make a unique identification of this object.
|
||||
// Most resources only return one, although some resources can return multiple.
|
||||
func (obj *NoopRes) GetUUIDs() []ResUUID {
|
||||
x := &NoopUUID{
|
||||
BaseUUID: BaseUUID{name: obj.GetName(), kind: obj.Kind()},
|
||||
name: obj.Name,
|
||||
}
|
||||
return []ResUUID{x}
|
||||
}
|
||||
|
||||
// GroupCmp returns whether two resources can be grouped together or not.
|
||||
func (obj *NoopRes) GroupCmp(r Res) bool {
|
||||
_, ok := r.(*NoopRes)
|
||||
if !ok {
|
||||
// NOTE: technically we could group a noop into any other
|
||||
// resource, if that resource knew how to handle it, although,
|
||||
// since the mechanics of inter-kind resource grouping are
|
||||
// tricky, avoid doing this until there's a good reason.
|
||||
return false
|
||||
}
|
||||
return true // noop resources can always be grouped together!
|
||||
}
|
||||
|
||||
// Compare two resources and return if they are equivalent.
|
||||
func (obj *NoopRes) Compare(res Res) bool {
|
||||
switch res.(type) {
|
||||
// we can only compare NoopRes to others of the same resource
|
||||
case *NoopRes:
|
||||
res := res.(*NoopRes)
|
||||
// calling base Compare is unneeded for the noop res
|
||||
//if !obj.BaseRes.Compare(res) { // call base Compare
|
||||
// return false
|
||||
//}
|
||||
if obj.Name != res.Name {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
230
pgp/pgp.go
Normal file
@@ -0,0 +1,230 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package pgp
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto"
|
||||
"encoding/base64"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
errwrap "github.com/pkg/errors"
|
||||
"golang.org/x/crypto/openpgp"
|
||||
"golang.org/x/crypto/openpgp/packet"
|
||||
)
|
||||
|
||||
// DefaultKeyringFile is the default file name for keyrings.
|
||||
const DefaultKeyringFile = "keyring.pgp"
|
||||
|
||||
// CONFIG set default Hash.
|
||||
var CONFIG packet.Config
|
||||
|
||||
func init() {
|
||||
CONFIG.DefaultHash = crypto.SHA256
|
||||
}
|
||||
|
||||
// PGP contains base entity.
|
||||
type PGP struct {
|
||||
Entity *openpgp.Entity
|
||||
}
|
||||
|
||||
// Import private key from defined path.
|
||||
func Import(privKeyPath string) (*PGP, error) {
|
||||
|
||||
privKeyFile, err := os.Open(privKeyPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer privKeyFile.Close()
|
||||
|
||||
file := packet.NewReader(bufio.NewReader(privKeyFile))
|
||||
entity, err := openpgp.ReadEntity(file)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf(err, "can't read entity from path")
|
||||
}
|
||||
|
||||
obj := &PGP{
|
||||
Entity: entity,
|
||||
}
|
||||
|
||||
log.Printf("PGP: Imported key: %s", obj.Entity.PrivateKey.KeyIdShortString())
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// Generate creates new key pair. This key pair must be saved or it will be lost.
|
||||
func Generate(name, comment, email string, hash *crypto.Hash) (*PGP, error) {
|
||||
if hash != nil {
|
||||
CONFIG.DefaultHash = *hash
|
||||
}
|
||||
// generate a new public/private key pair
|
||||
entity, err := openpgp.NewEntity(name, comment, email, &CONFIG)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf(err, "can't generate entity")
|
||||
}
|
||||
|
||||
obj := &PGP{
|
||||
Entity: entity,
|
||||
}
|
||||
|
||||
log.Printf("PGP: Created key: %s", obj.Entity.PrivateKey.KeyIdShortString())
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// SaveKey writes the whole entity (including private key!) to a .gpg file.
|
||||
func (obj *PGP) SaveKey(path string) error {
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return errwrap.Wrapf(err, "can't create file from given path")
|
||||
}
|
||||
|
||||
w := bufio.NewWriter(f)
|
||||
if err != nil {
|
||||
return errwrap.Wrapf(err, "can't create writer")
|
||||
}
|
||||
|
||||
if err := obj.Entity.SerializePrivate(w, &CONFIG); err != nil {
|
||||
return errwrap.Wrapf(err, "can't serialize private key")
|
||||
}
|
||||
|
||||
for _, ident := range obj.Entity.Identities {
|
||||
for _, sig := range ident.Signatures {
|
||||
if err := sig.Serialize(w); err != nil {
|
||||
return errwrap.Wrapf(err, "can't serialize signature")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := w.Flush(); err != nil {
|
||||
return errwrap.Wrapf(err, "enable to flush writer")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteFile from given buffer in specified path.
|
||||
func (obj *PGP) WriteFile(path string, buff *bytes.Buffer) error {
|
||||
w, err := createWriter(path)
|
||||
if err != nil {
|
||||
return errwrap.Wrapf(err, "can't create writer")
|
||||
}
|
||||
buff.WriteTo(w)
|
||||
|
||||
if err := w.Flush(); err != nil {
|
||||
return errwrap.Wrapf(err, "can't flush buffered data")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateWriter remove duplicate function.
|
||||
func createWriter(path string) (*bufio.Writer, error) {
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf(err, "can't create file from given path")
|
||||
}
|
||||
return bufio.NewWriter(f), nil
|
||||
}
|
||||
|
||||
// Encrypt message for specified entity.
|
||||
func (obj *PGP) Encrypt(to *openpgp.Entity, msg string) (string, error) {
|
||||
buf, err := obj.EncryptMsg(to, msg)
|
||||
if err != nil {
|
||||
return "", errwrap.Wrapf(err, "can't encrypt message")
|
||||
}
|
||||
|
||||
// encode to base64
|
||||
bytes, err := ioutil.ReadAll(buf)
|
||||
if err != nil {
|
||||
return "", errwrap.Wrapf(err, "can't read unverified body")
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(bytes), nil
|
||||
}
|
||||
|
||||
// EncryptMsg encrypts the message.
|
||||
func (obj *PGP) EncryptMsg(to *openpgp.Entity, msg string) (*bytes.Buffer, error) {
|
||||
ents := []*openpgp.Entity{to}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
w, err := openpgp.Encrypt(buf, ents, obj.Entity, nil, nil)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf(err, "can't encrypt message")
|
||||
}
|
||||
|
||||
_, err = w.Write([]byte(msg))
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf(err, "can't write to buffer")
|
||||
}
|
||||
|
||||
if err = w.Close(); err != nil {
|
||||
return nil, errwrap.Wrapf(err, "can't close writer")
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// Decrypt an encrypted msg.
|
||||
func (obj *PGP) Decrypt(encString string) (string, error) {
|
||||
entityList := openpgp.EntityList{obj.Entity}
|
||||
|
||||
// decode the base64 string
|
||||
dec, err := base64.StdEncoding.DecodeString(encString)
|
||||
if err != nil {
|
||||
return "", errwrap.Wrapf(err, "fail at decoding encrypted string")
|
||||
}
|
||||
|
||||
// decrypt it with the contents of the private key
|
||||
md, err := openpgp.ReadMessage(bytes.NewBuffer(dec), entityList, nil, nil)
|
||||
if err != nil {
|
||||
return "", errwrap.Wrapf(err, "can't read message")
|
||||
}
|
||||
|
||||
bytes, err := ioutil.ReadAll(md.UnverifiedBody)
|
||||
if err != nil {
|
||||
return "", errwrap.Wrapf(err, "can't read unverified body")
|
||||
}
|
||||
return string(bytes), nil
|
||||
}
|
||||
|
||||
// GetIdentities return the first identities from current object.
|
||||
func (obj *PGP) GetIdentities() (string, error) {
|
||||
identities := []*openpgp.Identity{}
|
||||
|
||||
for _, v := range obj.Entity.Identities {
|
||||
identities = append(identities, v)
|
||||
}
|
||||
return identities[0].Name, nil
|
||||
}
|
||||
|
||||
// ParseIdentity parses an identity into name, comment and email components.
|
||||
func ParseIdentity(identity string) (name, comment, email string, err error) {
|
||||
// get name
|
||||
n := strings.Split(identity, " <")
|
||||
if len(n) != 2 {
|
||||
return "", "", "", errwrap.Wrap(err, "user string mal formated")
|
||||
}
|
||||
|
||||
// get email and comment
|
||||
ec := strings.Split(n[1], "> ")
|
||||
if len(ec) != 2 {
|
||||
return "", "", "", errwrap.Wrap(err, "user string mal formated")
|
||||
}
|
||||
|
||||
return n[0], ec[1], ec[0], nil
|
||||
}
|
||||
892
pgraph.go
@@ -1,892 +0,0 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Pgraph (Pointer Graph)
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
//go:generate stringer -type=graphState -output=graphstate_stringer.go
|
||||
type graphState int
|
||||
|
||||
const (
|
||||
graphStateNil graphState = iota
|
||||
graphStateStarting
|
||||
graphStateStarted
|
||||
graphStatePausing
|
||||
graphStatePaused
|
||||
)
|
||||
|
||||
// Graph is the graph structure in this library.
|
||||
// The graph abstract data type (ADT) is defined as follows:
|
||||
// * the directed graph arrows point from left to right ( -> )
|
||||
// * the arrows point away from their dependencies (eg: arrows mean "before")
|
||||
// * IOW, you might see package -> file -> service (where package runs first)
|
||||
// * This is also the direction that the notify should happen in...
|
||||
type Graph struct {
|
||||
Name string
|
||||
Adjacency map[*Vertex]map[*Vertex]*Edge // *Vertex -> *Vertex (edge)
|
||||
state graphState
|
||||
mutex sync.Mutex // used when modifying graph State variable
|
||||
}
|
||||
|
||||
// Vertex is the primary vertex struct in this library.
|
||||
type Vertex struct {
|
||||
Res // anonymous field
|
||||
timestamp int64 // last updated timestamp ?
|
||||
}
|
||||
|
||||
// Edge is the primary edge struct in this library.
|
||||
type Edge struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
// NewGraph builds a new graph.
|
||||
func NewGraph(name string) *Graph {
|
||||
return &Graph{
|
||||
Name: name,
|
||||
Adjacency: make(map[*Vertex]map[*Vertex]*Edge),
|
||||
state: graphStateNil,
|
||||
}
|
||||
}
|
||||
|
||||
// NewVertex returns a new graph vertex struct with a contained resource.
|
||||
func NewVertex(r Res) *Vertex {
|
||||
return &Vertex{
|
||||
Res: r,
|
||||
}
|
||||
}
|
||||
|
||||
// NewEdge returns a new graph edge struct.
|
||||
func NewEdge(name string) *Edge {
|
||||
return &Edge{
|
||||
Name: name,
|
||||
}
|
||||
}
|
||||
|
||||
// Copy makes a copy of the graph struct
|
||||
func (g *Graph) Copy() *Graph {
|
||||
newGraph := &Graph{
|
||||
Name: g.Name,
|
||||
Adjacency: make(map[*Vertex]map[*Vertex]*Edge, len(g.Adjacency)),
|
||||
state: g.state,
|
||||
}
|
||||
for k, v := range g.Adjacency {
|
||||
newGraph.Adjacency[k] = v // copy
|
||||
}
|
||||
return newGraph
|
||||
}
|
||||
|
||||
// GetName returns the name of the graph.
|
||||
func (g *Graph) GetName() string {
|
||||
return g.Name
|
||||
}
|
||||
|
||||
// SetName sets the name of the graph.
|
||||
func (g *Graph) SetName(name string) {
|
||||
g.Name = name
|
||||
}
|
||||
|
||||
// getState returns the state of the graph. This state is used for optimizing
|
||||
// certain algorithms by knowing what part of processing the graph is currently
|
||||
// undergoing.
|
||||
func (g *Graph) getState() graphState {
|
||||
//g.mutex.Lock()
|
||||
//defer g.mutex.Unlock()
|
||||
return g.state
|
||||
}
|
||||
|
||||
// setState sets the graph state and returns the previous state.
|
||||
func (g *Graph) setState(state graphState) graphState {
|
||||
g.mutex.Lock()
|
||||
defer g.mutex.Unlock()
|
||||
prev := g.getState()
|
||||
g.state = state
|
||||
return prev
|
||||
}
|
||||
|
||||
// AddVertex uses variadic input to add all listed vertices to the graph
|
||||
func (g *Graph) AddVertex(xv ...*Vertex) {
|
||||
for _, v := range xv {
|
||||
if _, exists := g.Adjacency[v]; !exists {
|
||||
g.Adjacency[v] = make(map[*Vertex]*Edge)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteVertex deletes a particular vertex from the graph.
|
||||
func (g *Graph) DeleteVertex(v *Vertex) {
|
||||
delete(g.Adjacency, v)
|
||||
for k := range g.Adjacency {
|
||||
delete(g.Adjacency[k], v)
|
||||
}
|
||||
}
|
||||
|
||||
// AddEdge adds a directed edge to the graph from v1 to v2.
|
||||
func (g *Graph) AddEdge(v1, v2 *Vertex, e *Edge) {
|
||||
// NOTE: this doesn't allow more than one edge between two vertexes...
|
||||
g.AddVertex(v1, v2) // supports adding N vertices now
|
||||
// TODO: check if an edge exists to avoid overwriting it!
|
||||
// NOTE: VertexMerge() depends on overwriting it at the moment...
|
||||
g.Adjacency[v1][v2] = e
|
||||
}
|
||||
|
||||
// GetVertexMatch searches for an equivalent resource in the graph and returns
|
||||
// the vertex it is found in, or nil if not found.
|
||||
func (g *Graph) GetVertexMatch(obj Res) *Vertex {
|
||||
for k := range g.Adjacency {
|
||||
if k.Res.Compare(obj) {
|
||||
return k
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasVertex returns if the input vertex exists in the graph.
|
||||
func (g *Graph) HasVertex(v *Vertex) bool {
|
||||
if _, exists := g.Adjacency[v]; exists {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NumVertices returns the number of vertices in the graph.
|
||||
func (g *Graph) NumVertices() int {
|
||||
return len(g.Adjacency)
|
||||
}
|
||||
|
||||
// NumEdges returns the number of edges in the graph.
|
||||
func (g *Graph) NumEdges() int {
|
||||
count := 0
|
||||
for k := range g.Adjacency {
|
||||
count += len(g.Adjacency[k])
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// GetVertices returns a randomly sorted slice of all vertices in the graph
|
||||
// The order is random, because the map implementation is intentionally so!
|
||||
func (g *Graph) GetVertices() []*Vertex {
|
||||
var vertices []*Vertex
|
||||
for k := range g.Adjacency {
|
||||
vertices = append(vertices, k)
|
||||
}
|
||||
return vertices
|
||||
}
|
||||
|
||||
// GetVerticesChan returns a channel of all vertices in the graph.
|
||||
func (g *Graph) GetVerticesChan() chan *Vertex {
|
||||
ch := make(chan *Vertex)
|
||||
go func(ch chan *Vertex) {
|
||||
for k := range g.Adjacency {
|
||||
ch <- k
|
||||
}
|
||||
close(ch)
|
||||
}(ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
// VertexSlice is a linear list of vertices. It can be sorted.
|
||||
type VertexSlice []*Vertex
|
||||
|
||||
func (vs VertexSlice) Len() int { return len(vs) }
|
||||
func (vs VertexSlice) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] }
|
||||
func (vs VertexSlice) Less(i, j int) bool { return vs[i].String() < vs[j].String() }
|
||||
|
||||
// GetVerticesSorted returns a sorted slice of all vertices in the graph
|
||||
// The order is sorted by String() to avoid the non-determinism in the map type
|
||||
func (g *Graph) GetVerticesSorted() []*Vertex {
|
||||
var vertices []*Vertex
|
||||
for k := range g.Adjacency {
|
||||
vertices = append(vertices, k)
|
||||
}
|
||||
sort.Sort(VertexSlice(vertices)) // add determinism
|
||||
return vertices
|
||||
}
|
||||
|
||||
// String makes the graph pretty print.
|
||||
func (g *Graph) String() string {
|
||||
return fmt.Sprintf("Vertices(%d), Edges(%d)", g.NumVertices(), g.NumEdges())
|
||||
}
|
||||
|
||||
// String returns the canonical form for a vertex
|
||||
func (v *Vertex) String() string {
|
||||
return fmt.Sprintf("%s[%s]", v.Res.Kind(), v.Res.GetName())
|
||||
}
|
||||
|
||||
// Graphviz outputs the graph in graphviz format.
|
||||
// https://en.wikipedia.org/wiki/DOT_%28graph_description_language%29
|
||||
func (g *Graph) Graphviz() (out string) {
|
||||
//digraph g {
|
||||
// label="hello world";
|
||||
// node [shape=box];
|
||||
// A [label="A"];
|
||||
// B [label="B"];
|
||||
// C [label="C"];
|
||||
// D [label="D"];
|
||||
// E [label="E"];
|
||||
// A -> B [label=f];
|
||||
// B -> C [label=g];
|
||||
// D -> E [label=h];
|
||||
//}
|
||||
out += fmt.Sprintf("digraph %v {\n", g.GetName())
|
||||
out += fmt.Sprintf("\tlabel=\"%v\";\n", g.GetName())
|
||||
//out += "\tnode [shape=box];\n"
|
||||
str := ""
|
||||
for i := range g.Adjacency { // reverse paths
|
||||
out += fmt.Sprintf("\t%v [label=\"%v[%v]\"];\n", i.GetName(), i.Kind(), i.GetName())
|
||||
for j := range g.Adjacency[i] {
|
||||
k := g.Adjacency[i][j]
|
||||
// use str for clearer output ordering
|
||||
str += fmt.Sprintf("\t%v -> %v [label=%v];\n", i.GetName(), j.GetName(), k.Name)
|
||||
}
|
||||
}
|
||||
out += str
|
||||
out += "}\n"
|
||||
return
|
||||
}
|
||||
|
||||
// ExecGraphviz writes out the graphviz data and runs the correct graphviz
|
||||
// filter command.
|
||||
func (g *Graph) ExecGraphviz(program, filename string) error {
|
||||
|
||||
switch program {
|
||||
case "dot", "neato", "twopi", "circo", "fdp":
|
||||
default:
|
||||
return errors.New("Invalid graphviz program selected!")
|
||||
}
|
||||
|
||||
if filename == "" {
|
||||
return errors.New("No filename given!")
|
||||
}
|
||||
|
||||
// run as a normal user if possible when run with sudo
|
||||
uid, err1 := strconv.Atoi(os.Getenv("SUDO_UID"))
|
||||
gid, err2 := strconv.Atoi(os.Getenv("SUDO_GID"))
|
||||
|
||||
err := ioutil.WriteFile(filename, []byte(g.Graphviz()), 0644)
|
||||
if err != nil {
|
||||
return errors.New("Error writing to filename!")
|
||||
}
|
||||
|
||||
if err1 == nil && err2 == nil {
|
||||
if err := os.Chown(filename, uid, gid); err != nil {
|
||||
return errors.New("Error changing file owner!")
|
||||
}
|
||||
}
|
||||
|
||||
path, err := exec.LookPath(program)
|
||||
if err != nil {
|
||||
return errors.New("Graphviz is missing!")
|
||||
}
|
||||
|
||||
out := fmt.Sprintf("%v.png", filename)
|
||||
cmd := exec.Command(path, "-Tpng", fmt.Sprintf("-o%v", out), filename)
|
||||
|
||||
if err1 == nil && err2 == nil {
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{}
|
||||
cmd.SysProcAttr.Credential = &syscall.Credential{
|
||||
Uid: uint32(uid),
|
||||
Gid: uint32(gid),
|
||||
}
|
||||
}
|
||||
_, err = cmd.Output()
|
||||
if err != nil {
|
||||
return errors.New("Error writing to image!")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IncomingGraphEdges returns an array (slice) of all directed vertices to
|
||||
// vertex v (??? -> v). OKTimestamp should probably use this.
|
||||
func (g *Graph) IncomingGraphEdges(v *Vertex) []*Vertex {
|
||||
// TODO: we might be able to implement this differently by reversing
|
||||
// the Adjacency graph and then looping through it again...
|
||||
var s []*Vertex
|
||||
for k := range g.Adjacency { // reverse paths
|
||||
for w := range g.Adjacency[k] {
|
||||
if w == v {
|
||||
s = append(s, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// OutgoingGraphEdges returns an array (slice) of all vertices that vertex v
|
||||
// points to (v -> ???). Poke should probably use this.
|
||||
func (g *Graph) OutgoingGraphEdges(v *Vertex) []*Vertex {
|
||||
var s []*Vertex
|
||||
for k := range g.Adjacency[v] { // forward paths
|
||||
s = append(s, k)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// GraphEdges returns an array (slice) of all vertices that connect to vertex v.
|
||||
// This is the union of IncomingGraphEdges and OutgoingGraphEdges.
|
||||
func (g *Graph) GraphEdges(v *Vertex) []*Vertex {
|
||||
var s []*Vertex
|
||||
s = append(s, g.IncomingGraphEdges(v)...)
|
||||
s = append(s, g.OutgoingGraphEdges(v)...)
|
||||
return s
|
||||
}
|
||||
|
||||
// DFS returns a depth first search for the graph, starting at the input vertex.
|
||||
func (g *Graph) DFS(start *Vertex) []*Vertex {
|
||||
var d []*Vertex // discovered
|
||||
var s []*Vertex // stack
|
||||
if _, exists := g.Adjacency[start]; !exists {
|
||||
return nil // TODO: error
|
||||
}
|
||||
v := start
|
||||
s = append(s, v)
|
||||
for len(s) > 0 {
|
||||
v, s = s[len(s)-1], s[:len(s)-1] // s.pop()
|
||||
|
||||
if !VertexContains(v, d) { // if not discovered
|
||||
d = append(d, v) // label as discovered
|
||||
|
||||
for _, w := range g.GraphEdges(v) {
|
||||
s = append(s, w)
|
||||
}
|
||||
}
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// FilterGraph builds a new graph containing only vertices from the list.
|
||||
func (g *Graph) FilterGraph(name string, vertices []*Vertex) *Graph {
|
||||
newgraph := NewGraph(name)
|
||||
for k1, x := range g.Adjacency {
|
||||
for k2, e := range x {
|
||||
//log.Printf("Filter: %v -> %v # %v", k1.Name, k2.Name, e.Name)
|
||||
if VertexContains(k1, vertices) || VertexContains(k2, vertices) {
|
||||
newgraph.AddEdge(k1, k2, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
return newgraph
|
||||
}
|
||||
|
||||
// GetDisconnectedGraphs returns a channel containing the N disconnected graphs
|
||||
// in our main graph. We can then process each of these in parallel.
|
||||
func (g *Graph) GetDisconnectedGraphs() chan *Graph {
|
||||
ch := make(chan *Graph)
|
||||
go func() {
|
||||
var start *Vertex
|
||||
var d []*Vertex // discovered
|
||||
c := g.NumVertices()
|
||||
for len(d) < c {
|
||||
|
||||
// get an undiscovered vertex to start from
|
||||
for _, s := range g.GetVertices() {
|
||||
if !VertexContains(s, d) {
|
||||
start = s
|
||||
}
|
||||
}
|
||||
|
||||
// dfs through the graph
|
||||
dfs := g.DFS(start)
|
||||
// filter all the collected elements into a new graph
|
||||
newgraph := g.FilterGraph(g.Name, dfs)
|
||||
|
||||
// add number of elements found to found variable
|
||||
d = append(d, dfs...) // extend
|
||||
|
||||
// return this new graph to the channel
|
||||
ch <- newgraph
|
||||
|
||||
// if we've found all the elements, then we're done
|
||||
// otherwise loop through to continue...
|
||||
}
|
||||
close(ch)
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
// InDegree returns the count of vertices that point to me in one big lookup map.
|
||||
func (g *Graph) InDegree() map[*Vertex]int {
|
||||
result := make(map[*Vertex]int)
|
||||
for k := range g.Adjacency {
|
||||
result[k] = 0 // initialize
|
||||
}
|
||||
|
||||
for k := range g.Adjacency {
|
||||
for z := range g.Adjacency[k] {
|
||||
result[z]++
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// OutDegree returns the count of vertices that point away in one big lookup map.
|
||||
func (g *Graph) OutDegree() map[*Vertex]int {
|
||||
result := make(map[*Vertex]int)
|
||||
|
||||
for k := range g.Adjacency {
|
||||
result[k] = 0 // initialize
|
||||
for range g.Adjacency[k] {
|
||||
result[k]++
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// TopologicalSort returns the sort of graph vertices in that order.
|
||||
// based on descriptions and code from wikipedia and rosetta code
|
||||
// TODO: add memoization, and cache invalidation to speed this up :)
|
||||
func (g *Graph) TopologicalSort() (result []*Vertex, ok bool) { // kahn's algorithm
|
||||
var L []*Vertex // empty list that will contain the sorted elements
|
||||
var S []*Vertex // set of all nodes with no incoming edges
|
||||
remaining := make(map[*Vertex]int) // amount of edges remaining
|
||||
|
||||
for v, d := range g.InDegree() {
|
||||
if d == 0 {
|
||||
// accumulate set of all nodes with no incoming edges
|
||||
S = append(S, v)
|
||||
} else {
|
||||
// initialize remaining edge count from indegree
|
||||
remaining[v] = d
|
||||
}
|
||||
}
|
||||
|
||||
for len(S) > 0 {
|
||||
last := len(S) - 1 // remove a node v from S
|
||||
v := S[last]
|
||||
S = S[:last]
|
||||
L = append(L, v) // add v to tail of L
|
||||
for n := range g.Adjacency[v] {
|
||||
// for each node n remaining in the graph, consume from
|
||||
// remaining, so for remaining[n] > 0
|
||||
if remaining[n] > 0 {
|
||||
remaining[n]-- // remove edge from the graph
|
||||
if remaining[n] == 0 { // if n has no other incoming edges
|
||||
S = append(S, n) // insert n into S
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if graph has edges, eg if any value in rem is > 0
|
||||
for c, in := range remaining {
|
||||
if in > 0 {
|
||||
for n := range g.Adjacency[c] {
|
||||
if remaining[n] > 0 {
|
||||
return nil, false // not a dag!
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return L, true
|
||||
}
|
||||
|
||||
// Reachability finds the shortest path in a DAG from a to b, and returns the
|
||||
// slice of vertices that matched this particular path including both a and b.
|
||||
// It returns nil if a or b is nil, and returns empty list if no path is found.
|
||||
// Since there could be more than one possible result for this operation, we
|
||||
// arbitrarily choose one of the shortest possible. As a result, this should
|
||||
// actually return a tree if we cared about correctness.
|
||||
// This operates by a recursive algorithm; a more efficient version is likely.
|
||||
// If you don't give this function a DAG, you might cause infinite recursion!
|
||||
func (g *Graph) Reachability(a, b *Vertex) []*Vertex {
|
||||
if a == nil || b == nil {
|
||||
return nil
|
||||
}
|
||||
vertices := g.OutgoingGraphEdges(a) // what points away from a ?
|
||||
if len(vertices) == 0 {
|
||||
return []*Vertex{} // nope
|
||||
}
|
||||
if VertexContains(b, vertices) {
|
||||
return []*Vertex{a, b} // found
|
||||
}
|
||||
// TODO: parallelize this with go routines?
|
||||
var collected = make([][]*Vertex, len(vertices))
|
||||
pick := -1
|
||||
for i, v := range vertices {
|
||||
collected[i] = g.Reachability(v, b) // find b by recursion
|
||||
if l := len(collected[i]); l > 0 {
|
||||
// pick shortest path
|
||||
// TODO: technically i should return a tree
|
||||
if pick < 0 || l < len(collected[pick]) {
|
||||
pick = i
|
||||
}
|
||||
}
|
||||
}
|
||||
if pick < 0 {
|
||||
return []*Vertex{} // nope
|
||||
}
|
||||
result := []*Vertex{a} // tack on a
|
||||
result = append(result, collected[pick]...)
|
||||
return result
|
||||
}
|
||||
|
||||
// VertexMerge merges v2 into v1 by reattaching the edges where appropriate,
|
||||
// and then by deleting v2 from the graph. Since more than one edge between two
|
||||
// vertices is not allowed, duplicate edges are merged as well. an edge merge
|
||||
// function can be provided if you'd like to control how you merge the edges!
|
||||
func (g *Graph) VertexMerge(v1, v2 *Vertex, vertexMergeFn func(*Vertex, *Vertex) (*Vertex, error), edgeMergeFn func(*Edge, *Edge) *Edge) error {
|
||||
// methodology
|
||||
// 1) edges between v1 and v2 are removed
|
||||
//Loop:
|
||||
for k1 := range g.Adjacency {
|
||||
for k2 := range g.Adjacency[k1] {
|
||||
// v1 -> v2 || v2 -> v1
|
||||
if (k1 == v1 && k2 == v2) || (k1 == v2 && k2 == v1) {
|
||||
delete(g.Adjacency[k1], k2) // delete map & edge
|
||||
// NOTE: if we assume this is a DAG, then we can
|
||||
// assume only v1 -> v2 OR v2 -> v1 exists, and
|
||||
// we can break out of these loops immediately!
|
||||
//break Loop
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 2) edges that point towards v2 from X now point to v1 from X (no dupes)
|
||||
for _, x := range g.IncomingGraphEdges(v2) { // all to vertex v (??? -> v)
|
||||
e := g.Adjacency[x][v2] // previous edge
|
||||
r := g.Reachability(x, v1)
|
||||
// merge e with ex := g.Adjacency[x][v1] if it exists!
|
||||
if ex, exists := g.Adjacency[x][v1]; exists && edgeMergeFn != nil && len(r) == 0 {
|
||||
e = edgeMergeFn(e, ex)
|
||||
}
|
||||
if len(r) == 0 { // if not reachable, add it
|
||||
g.AddEdge(x, v1, e) // overwrite edge
|
||||
} else if edgeMergeFn != nil { // reachable, merge e through...
|
||||
prev := x // initial condition
|
||||
for i, next := range r {
|
||||
if i == 0 {
|
||||
// next == prev, therefore skip
|
||||
continue
|
||||
}
|
||||
// this edge is from: prev, to: next
|
||||
ex, _ := g.Adjacency[prev][next] // get
|
||||
ex = edgeMergeFn(ex, e)
|
||||
g.Adjacency[prev][next] = ex // set
|
||||
prev = next
|
||||
}
|
||||
}
|
||||
delete(g.Adjacency[x], v2) // delete old edge
|
||||
}
|
||||
|
||||
// 3) edges that point from v2 to X now point from v1 to X (no dupes)
|
||||
for _, x := range g.OutgoingGraphEdges(v2) { // all from vertex v (v -> ???)
|
||||
e := g.Adjacency[v2][x] // previous edge
|
||||
r := g.Reachability(v1, x)
|
||||
// merge e with ex := g.Adjacency[v1][x] if it exists!
|
||||
if ex, exists := g.Adjacency[v1][x]; exists && edgeMergeFn != nil && len(r) == 0 {
|
||||
e = edgeMergeFn(e, ex)
|
||||
}
|
||||
if len(r) == 0 {
|
||||
g.AddEdge(v1, x, e) // overwrite edge
|
||||
} else if edgeMergeFn != nil { // reachable, merge e through...
|
||||
prev := v1 // initial condition
|
||||
for i, next := range r {
|
||||
if i == 0 {
|
||||
// next == prev, therefore skip
|
||||
continue
|
||||
}
|
||||
// this edge is from: prev, to: next
|
||||
ex, _ := g.Adjacency[prev][next]
|
||||
ex = edgeMergeFn(ex, e)
|
||||
g.Adjacency[prev][next] = ex
|
||||
prev = next
|
||||
}
|
||||
}
|
||||
delete(g.Adjacency[v2], x)
|
||||
}
|
||||
|
||||
// 4) merge and then remove the (now merged/grouped) vertex
|
||||
if vertexMergeFn != nil { // run vertex merge function
|
||||
if v, err := vertexMergeFn(v1, v2); err != nil {
|
||||
return err
|
||||
} else if v != nil { // replace v1 with the "merged" version...
|
||||
v1 = v // XXX: will this replace v1 the way we want?
|
||||
}
|
||||
}
|
||||
g.DeleteVertex(v2) // remove grouped vertex
|
||||
|
||||
// 5) creation of a cyclic graph should throw an error
|
||||
if _, dag := g.TopologicalSort(); !dag { // am i a dag or not?
|
||||
return fmt.Errorf("Graph is not a dag!")
|
||||
}
|
||||
return nil // success
|
||||
}
|
||||
|
||||
// GetTimestamp returns the timestamp of a vertex
|
||||
func (v *Vertex) GetTimestamp() int64 {
|
||||
return v.timestamp
|
||||
}
|
||||
|
||||
// UpdateTimestamp updates the timestamp on a vertex and returns the new value
|
||||
func (v *Vertex) UpdateTimestamp() int64 {
|
||||
v.timestamp = time.Now().UnixNano() // update
|
||||
return v.timestamp
|
||||
}
|
||||
|
||||
// OKTimestamp returns true if this element can run right now?
|
||||
func (g *Graph) OKTimestamp(v *Vertex) bool {
|
||||
// these are all the vertices pointing TO v, eg: ??? -> v
|
||||
for _, n := range g.IncomingGraphEdges(v) {
|
||||
// if the vertex has a greater timestamp than any pre-req (n)
|
||||
// then we can't run right now...
|
||||
// if they're equal (eg: on init of 0) then we also can't run
|
||||
// b/c we should let our pre-req's go first...
|
||||
x, y := v.GetTimestamp(), n.GetTimestamp()
|
||||
if DEBUG {
|
||||
log.Printf("%v[%v]: OKTimestamp: (%v) >= %v[%v](%v): !%v", v.Kind(), v.GetName(), x, n.Kind(), n.GetName(), y, x >= y)
|
||||
}
|
||||
if x >= y {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Poke notifies nodes after me in the dependency graph that they need refreshing...
|
||||
// NOTE: this assumes that this can never fail or need to be rescheduled
|
||||
func (g *Graph) Poke(v *Vertex, activity bool) {
|
||||
// these are all the vertices pointing AWAY FROM v, eg: v -> ???
|
||||
for _, n := range g.OutgoingGraphEdges(v) {
|
||||
// XXX: if we're in state event and haven't been cancelled by
|
||||
// apply, then we can cancel a poke to a child, right? XXX
|
||||
// XXX: if n.Res.getState() != resStateEvent { // is this correct?
|
||||
if true { // XXX
|
||||
if DEBUG {
|
||||
log.Printf("%v[%v]: Poke: %v[%v]", v.Kind(), v.GetName(), n.Kind(), n.GetName())
|
||||
}
|
||||
n.SendEvent(eventPoke, false, activity) // XXX: can this be switched to sync?
|
||||
} else {
|
||||
if DEBUG {
|
||||
log.Printf("%v[%v]: Poke: %v[%v]: Skipped!", v.Kind(), v.GetName(), n.Kind(), n.GetName())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BackPoke pokes the pre-requisites that are stale and need to run before I can run.
|
||||
func (g *Graph) BackPoke(v *Vertex) {
|
||||
// these are all the vertices pointing TO v, eg: ??? -> v
|
||||
for _, n := range g.IncomingGraphEdges(v) {
|
||||
x, y, s := v.GetTimestamp(), n.GetTimestamp(), n.Res.GetState()
|
||||
// if the parent timestamp needs poking AND it's not in state
|
||||
// resStateEvent, then poke it. If the parent is in resStateEvent it
|
||||
// means that an event is pending, so we'll be expecting a poke
|
||||
// back soon, so we can safely discard the extra parent poke...
|
||||
// TODO: implement a stateLT (less than) to tell if something
|
||||
// happens earlier in the state cycle and that doesn't wrap nil
|
||||
if x >= y && (s != resStateEvent && s != resStateCheckApply) {
|
||||
if DEBUG {
|
||||
log.Printf("%v[%v]: BackPoke: %v[%v]", v.Kind(), v.GetName(), n.Kind(), n.GetName())
|
||||
}
|
||||
n.SendEvent(eventBackPoke, false, false) // XXX: can this be switched to sync?
|
||||
} else {
|
||||
if DEBUG {
|
||||
log.Printf("%v[%v]: BackPoke: %v[%v]: Skipped!", v.Kind(), v.GetName(), n.Kind(), n.GetName())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process is the primary function to execute for a particular vertex in the graph.
|
||||
// XXX: rename this function
|
||||
func (g *Graph) Process(v *Vertex) {
|
||||
obj := v.Res
|
||||
if DEBUG {
|
||||
log.Printf("%v[%v]: Process()", obj.Kind(), obj.GetName())
|
||||
}
|
||||
obj.SetState(resStateEvent)
|
||||
var ok = true
|
||||
var apply = false // did we run an apply?
|
||||
// is it okay to run dependency wise right now?
|
||||
// if not, that's okay because when the dependency runs, it will poke
|
||||
// us back and we will run if needed then!
|
||||
if g.OKTimestamp(v) {
|
||||
if DEBUG {
|
||||
log.Printf("%v[%v]: OKTimestamp(%v)", obj.Kind(), obj.GetName(), v.GetTimestamp())
|
||||
}
|
||||
|
||||
obj.SetState(resStateCheckApply)
|
||||
// if this fails, don't UpdateTimestamp()
|
||||
checkok, err := obj.CheckApply(!obj.Meta().Noop)
|
||||
if checkok && err != nil { // should never return this way
|
||||
log.Fatalf("%v[%v]: CheckApply(): %t, %+v", obj.Kind(), obj.GetName(), checkok, err)
|
||||
}
|
||||
if DEBUG {
|
||||
log.Printf("%v[%v]: CheckApply(): %t, %v", obj.Kind(), obj.GetName(), checkok, err)
|
||||
}
|
||||
|
||||
if !checkok { // if state *was* not ok, we had to have apply'ed
|
||||
if err != nil { // error during check or apply
|
||||
ok = false
|
||||
} else {
|
||||
apply = true
|
||||
}
|
||||
}
|
||||
|
||||
// when noop is true we always want to update timestamp
|
||||
if obj.Meta().Noop && err == nil {
|
||||
ok = true
|
||||
}
|
||||
|
||||
if ok {
|
||||
// update this timestamp *before* we poke or the poked
|
||||
// nodes might fail due to having a too old timestamp!
|
||||
v.UpdateTimestamp() // this was touched...
|
||||
obj.SetState(resStatePoking) // can't cancel parent poke
|
||||
g.Poke(v, apply)
|
||||
}
|
||||
// poke at our pre-req's instead since they need to refresh/run...
|
||||
} else {
|
||||
// only poke at the pre-req's that need to run
|
||||
go g.BackPoke(v)
|
||||
}
|
||||
}
|
||||
|
||||
// Start is a main kick to start the graph. It goes through in reverse topological
|
||||
// sort order so that events can't hit un-started vertices.
|
||||
func (g *Graph) Start(wg *sync.WaitGroup, first bool) { // start or continue
|
||||
log.Printf("State: %v -> %v", g.setState(graphStateStarting), g.getState())
|
||||
defer log.Printf("State: %v -> %v", g.setState(graphStateStarted), g.getState())
|
||||
t, _ := g.TopologicalSort()
|
||||
// TODO: only calculate indegree if `first` is true to save resources
|
||||
indegree := g.InDegree() // compute all of the indegree's
|
||||
for _, v := range Reverse(t) {
|
||||
|
||||
if !v.Res.IsWatching() { // if Watch() is not running...
|
||||
wg.Add(1)
|
||||
// must pass in value to avoid races...
|
||||
// see: https://ttboj.wordpress.com/2015/07/27/golang-parallelism-issues-causing-too-many-open-files-error/
|
||||
go func(vv *Vertex) {
|
||||
defer wg.Done()
|
||||
// listen for chan events from Watch() and run
|
||||
// the Process() function when they're received
|
||||
// this avoids us having to pass the data into
|
||||
// the Watch() function about which graph it is
|
||||
// running on, which isolates things nicely...
|
||||
chanProcess := make(chan Event)
|
||||
go func() {
|
||||
for event := range chanProcess {
|
||||
// this has to be synchronous,
|
||||
// because otherwise the Res
|
||||
// event loop will keep running
|
||||
// and change state, causing the
|
||||
// converged timeout to fire!
|
||||
g.Process(vv)
|
||||
event.ACK() // sync
|
||||
}
|
||||
}()
|
||||
vv.Res.Watch(chanProcess) // i block until i end
|
||||
close(chanProcess)
|
||||
log.Printf("%v[%v]: Exited", vv.Kind(), vv.GetName())
|
||||
}(v)
|
||||
}
|
||||
|
||||
// selective poke: here we reduce the number of initial pokes
|
||||
// to the minimum required to activate every vertex in the
|
||||
// graph, either by direct action, or by getting poked by a
|
||||
// vertex that was previously activated. if we poke each vertex
|
||||
// that has no incoming edges, then we can be sure to reach the
|
||||
// whole graph. Please note: this may mask certain optimization
|
||||
// failures, such as any poke limiting code in Poke() or
|
||||
// BackPoke(). You might want to disable this selective start
|
||||
// when experimenting with and testing those elements.
|
||||
// if we are unpausing (since it's not the first run of this
|
||||
// function) we need to poke to *unpause* every graph vertex,
|
||||
// and not just selectively the subset with no indegree.
|
||||
if (!first) || indegree[v] == 0 {
|
||||
// ensure state is started before continuing on to next vertex
|
||||
for !v.SendEvent(eventStart, true, false) {
|
||||
if DEBUG {
|
||||
// if SendEvent fails, we aren't up yet
|
||||
log.Printf("%v[%v]: Retrying SendEvent(Start)", v.Kind(), v.GetName())
|
||||
// sleep here briefly or otherwise cause
|
||||
// a different goroutine to be scheduled
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Pause sends pause events to the graph in a topological sort order.
|
||||
func (g *Graph) Pause() {
|
||||
log.Printf("State: %v -> %v", g.setState(graphStatePausing), g.getState())
|
||||
defer log.Printf("State: %v -> %v", g.setState(graphStatePaused), g.getState())
|
||||
t, _ := g.TopologicalSort()
|
||||
for _, v := range t { // squeeze out the events...
|
||||
v.SendEvent(eventPause, true, false)
|
||||
}
|
||||
}
|
||||
|
||||
// Exit sends exit events to the graph in a topological sort order.
|
||||
func (g *Graph) Exit() {
|
||||
if g == nil {
|
||||
return
|
||||
} // empty graph that wasn't populated yet
|
||||
t, _ := g.TopologicalSort()
|
||||
for _, v := range t { // squeeze out the events...
|
||||
// turn off the taps...
|
||||
// XXX: do this by sending an exit signal, and then returning
|
||||
// when we hit the 'default' in the select statement!
|
||||
// XXX: we can do this to quiesce, but it's not necessary now
|
||||
|
||||
v.SendEvent(eventExit, true, false)
|
||||
}
|
||||
}
|
||||
|
||||
// AssociateData associates some data with the object in the graph in question
|
||||
func (g *Graph) AssociateData(converger Converger) {
|
||||
for v := range g.GetVerticesChan() {
|
||||
v.Res.AssociateData(converger)
|
||||
}
|
||||
}
|
||||
|
||||
// VertexContains is an "in array" function to test for a vertex in a slice of vertices.
|
||||
func VertexContains(needle *Vertex, haystack []*Vertex) bool {
|
||||
for _, v := range haystack {
|
||||
if needle == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Reverse reverses a list of vertices.
|
||||
func Reverse(vs []*Vertex) []*Vertex {
|
||||
//var out []*Vertex // XXX: golint suggests, but it fails testing
|
||||
out := make([]*Vertex, 0) // empty list
|
||||
l := len(vs)
|
||||
for i := range vs {
|
||||
out = append(out, vs[l-i-1])
|
||||
}
|
||||
return out
|
||||
}
|
||||
129
pgraph/graphsync.go
Normal file
@@ -0,0 +1,129 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package pgraph
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
errwrap "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// GraphSync updates the Graph so that it matches the newGraph. It leaves
|
||||
// identical elements alone so that they don't need to be refreshed.
|
||||
// It tries to mutate existing elements into new ones, if they support this.
|
||||
// This updates the Graph on success only.
|
||||
// FIXME: should we do this with copies of the vertex resources?
|
||||
// FIXME: add test cases
|
||||
func (obj *Graph) GraphSync(newGraph *Graph, vertexCmpFn func(Vertex, Vertex) (bool, error), vertexAddFn func(Vertex) error, vertexRemoveFn func(Vertex) error, edgeCmpFn func(Edge, Edge) (bool, error)) error {
|
||||
|
||||
oldGraph := obj.Copy() // work on a copy of the old graph
|
||||
if oldGraph == nil {
|
||||
var err error
|
||||
oldGraph, err = NewGraph(newGraph.GetName()) // copy over the name
|
||||
if err != nil {
|
||||
return errwrap.Wrapf(err, "GraphSync failed")
|
||||
}
|
||||
}
|
||||
oldGraph.SetName(newGraph.GetName()) // overwrite the name
|
||||
|
||||
var lookup = make(map[Vertex]Vertex)
|
||||
var vertexKeep []Vertex // list of vertices which are the same in new graph
|
||||
var edgeKeep []Edge // list of vertices which are the same in new graph
|
||||
|
||||
for v := range newGraph.Adjacency() { // loop through the vertices (resources)
|
||||
var vertex Vertex
|
||||
// step one, direct compare with res.Compare
|
||||
if vertex == nil { // redundant guard for consistency
|
||||
fn := func(vv Vertex) (bool, error) {
|
||||
b, err := vertexCmpFn(vv, v)
|
||||
return b, errwrap.Wrapf(err, "vertexCmpFn failed")
|
||||
}
|
||||
var err error
|
||||
vertex, err = oldGraph.VertexMatchFn(fn)
|
||||
if err != nil {
|
||||
return errwrap.Wrapf(err, "VertexMatchFn failed")
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: consider adding a mutate API.
|
||||
// step two, try and mutate with res.Mutate
|
||||
//if vertex == nil { // not found yet...
|
||||
// vertex = oldGraph.MutateMatch(res)
|
||||
//}
|
||||
|
||||
if vertex == nil { // no match found yet
|
||||
if err := vertexAddFn(v); err != nil {
|
||||
return errwrap.Wrapf(err, "vertexAddFn failed")
|
||||
}
|
||||
vertex = v
|
||||
oldGraph.AddVertex(vertex) // call standalone in case not part of an edge
|
||||
}
|
||||
lookup[v] = vertex // used for constructing edges
|
||||
vertexKeep = append(vertexKeep, vertex) // append
|
||||
}
|
||||
|
||||
// get rid of any vertices we shouldn't keep (that aren't in new graph)
|
||||
for v := range oldGraph.Adjacency() {
|
||||
if !VertexContains(v, vertexKeep) {
|
||||
if err := vertexRemoveFn(v); err != nil {
|
||||
return errwrap.Wrapf(err, "vertexRemoveFn failed")
|
||||
}
|
||||
oldGraph.DeleteVertex(v)
|
||||
}
|
||||
}
|
||||
|
||||
// compare edges
|
||||
for v1 := range newGraph.Adjacency() { // loop through the vertices (resources)
|
||||
for v2, e := range newGraph.Adjacency()[v1] {
|
||||
// we have an edge!
|
||||
// lookup vertices (these should exist now)
|
||||
vertex1, exists1 := lookup[v1]
|
||||
vertex2, exists2 := lookup[v2]
|
||||
if !exists1 || !exists2 { // no match found, bug?
|
||||
//if vertex1 == nil || vertex2 == nil { // no match found
|
||||
return fmt.Errorf("new vertices weren't found") // programming error
|
||||
}
|
||||
|
||||
edge, exists := oldGraph.Adjacency()[vertex1][vertex2]
|
||||
if !exists {
|
||||
edge = e // use edge
|
||||
} else if b, err := edgeCmpFn(edge, e); err != nil {
|
||||
return errwrap.Wrapf(err, "edgeCmpFn failed")
|
||||
} else if !b {
|
||||
edge = e // overwrite edge
|
||||
}
|
||||
|
||||
oldGraph.Adjacency()[vertex1][vertex2] = edge // store it (AddEdge)
|
||||
edgeKeep = append(edgeKeep, edge) // mark as saved
|
||||
}
|
||||
}
|
||||
|
||||
// delete unused edges
|
||||
for v1 := range oldGraph.Adjacency() {
|
||||
for _, e := range oldGraph.Adjacency()[v1] {
|
||||
// we have an edge!
|
||||
if !EdgeContains(e, edgeKeep) {
|
||||
oldGraph.DeleteEdge(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// success
|
||||
*obj = *oldGraph // save old graph
|
||||
return nil
|
||||
}
|
||||
118
pgraph/graphviz.go
Normal file
@@ -0,0 +1,118 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package pgraph // TODO: this should be a subpackage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// Graphviz outputs the graph in graphviz format.
|
||||
// https://en.wikipedia.org/wiki/DOT_%28graph_description_language%29
|
||||
func (g *Graph) Graphviz() (out string) {
|
||||
//digraph g {
|
||||
// label="hello world";
|
||||
// node [shape=box];
|
||||
// A [label="A"];
|
||||
// B [label="B"];
|
||||
// C [label="C"];
|
||||
// D [label="D"];
|
||||
// E [label="E"];
|
||||
// A -> B [label=f];
|
||||
// B -> C [label=g];
|
||||
// D -> E [label=h];
|
||||
//}
|
||||
out += fmt.Sprintf("digraph %s {\n", g.GetName())
|
||||
out += fmt.Sprintf("\tlabel=\"%s\";\n", g.GetName())
|
||||
//out += "\tnode [shape=box];\n"
|
||||
str := ""
|
||||
for i := range g.Adjacency() { // reverse paths
|
||||
out += fmt.Sprintf("\t\"%s\" [label=\"%s\"];\n", i, i)
|
||||
for j := range g.Adjacency()[i] {
|
||||
k := g.Adjacency()[i][j]
|
||||
// use str for clearer output ordering
|
||||
//if fmtBoldFn(k) { // TODO: add this sort of formatting
|
||||
// str += fmt.Sprintf("\t\"%s\" -> \"%s\" [label=\"%s\",style=bold];\n", i, j, k)
|
||||
//} else {
|
||||
str += fmt.Sprintf("\t\"%s\" -> \"%s\" [label=\"%s\"];\n", i, j, k)
|
||||
//}
|
||||
}
|
||||
}
|
||||
out += str
|
||||
out += "}\n"
|
||||
return
|
||||
}
|
||||
|
||||
// ExecGraphviz writes out the graphviz data and runs the correct graphviz
|
||||
// filter command.
|
||||
func (g *Graph) ExecGraphviz(program, filename, hostname string) error {
|
||||
|
||||
switch program {
|
||||
case "dot", "neato", "twopi", "circo", "fdp":
|
||||
default:
|
||||
return fmt.Errorf("invalid graphviz program selected")
|
||||
}
|
||||
|
||||
if filename == "" {
|
||||
return fmt.Errorf("no filename given")
|
||||
}
|
||||
|
||||
if hostname != "" {
|
||||
filename = fmt.Sprintf("%s@%s", filename, hostname)
|
||||
}
|
||||
|
||||
// run as a normal user if possible when run with sudo
|
||||
uid, err1 := strconv.Atoi(os.Getenv("SUDO_UID"))
|
||||
gid, err2 := strconv.Atoi(os.Getenv("SUDO_GID"))
|
||||
|
||||
err := ioutil.WriteFile(filename, []byte(g.Graphviz()), 0644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing to filename")
|
||||
}
|
||||
|
||||
if err1 == nil && err2 == nil {
|
||||
if err := os.Chown(filename, uid, gid); err != nil {
|
||||
return fmt.Errorf("error changing file owner")
|
||||
}
|
||||
}
|
||||
|
||||
path, err := exec.LookPath(program)
|
||||
if err != nil {
|
||||
return fmt.Errorf("the Graphviz program is missing")
|
||||
}
|
||||
|
||||
out := fmt.Sprintf("%s.png", filename)
|
||||
cmd := exec.Command(path, "-Tpng", fmt.Sprintf("-o%s", out), filename)
|
||||
|
||||
if err1 == nil && err2 == nil {
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{}
|
||||
cmd.SysProcAttr.Credential = &syscall.Credential{
|
||||
Uid: uint32(uid),
|
||||
Gid: uint32(gid),
|
||||
}
|
||||
}
|
||||
_, err = cmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing to image")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
623
pgraph/pgraph.go
Normal file
@@ -0,0 +1,623 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package pgraph represents the internal "pointer graph" that we use.
|
||||
package pgraph
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
errwrap "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Graph is the graph structure in this library.
|
||||
// The graph abstract data type (ADT) is defined as follows:
|
||||
// * the directed graph arrows point from left to right ( -> )
|
||||
// * the arrows point away from their dependencies (eg: arrows mean "before")
|
||||
// * IOW, you might see package -> file -> service (where package runs first)
|
||||
// * This is also the direction that the notify should happen in...
|
||||
type Graph struct {
|
||||
Name string
|
||||
|
||||
adjacency map[Vertex]map[Vertex]Edge // Vertex -> Vertex (edge)
|
||||
kv map[string]interface{} // some values associated with the graph
|
||||
}
|
||||
|
||||
// Vertex is the primary vertex struct in this library. It can be anything that
|
||||
// implements Stringer. The string output must be stable and unique in a graph.
|
||||
type Vertex interface {
|
||||
fmt.Stringer // String() string
|
||||
}
|
||||
|
||||
// Edge is the primary edge struct in this library. It can be anything that
|
||||
// implements Stringer. The string output must be stable and unique in a graph.
|
||||
type Edge interface {
|
||||
fmt.Stringer // String() string
|
||||
}
|
||||
|
||||
// Init initializes the graph which populates all the internal structures.
|
||||
func (g *Graph) Init() error {
|
||||
if g.Name == "" { // FIXME: is this really a good requirement?
|
||||
return fmt.Errorf("can't initialize graph with empty name")
|
||||
}
|
||||
|
||||
//g.adjacency = make(map[Vertex]map[Vertex]Edge) // not required
|
||||
//g.kv = make(map[string]interface{}) // not required
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewGraph builds a new graph.
|
||||
func NewGraph(name string) (*Graph, error) {
|
||||
g := &Graph{
|
||||
Name: name,
|
||||
}
|
||||
if err := g.Init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return g, nil
|
||||
}
|
||||
|
||||
// Value returns a value stored alongside the graph in a particular key.
|
||||
func (g *Graph) Value(key string) (interface{}, bool) {
|
||||
val, exists := g.kv[key]
|
||||
return val, exists
|
||||
}
|
||||
|
||||
// SetValue sets a value to be stored alongside the graph in a particular key.
|
||||
func (g *Graph) SetValue(key string, val interface{}) {
|
||||
if g.kv == nil { // initialize on first use
|
||||
g.kv = make(map[string]interface{})
|
||||
}
|
||||
g.kv[key] = val
|
||||
}
|
||||
|
||||
// Copy makes a copy of the graph struct.
|
||||
func (g *Graph) Copy() *Graph {
|
||||
if g == nil { // allow nil graphs through
|
||||
return g
|
||||
}
|
||||
newGraph := &Graph{
|
||||
Name: g.Name,
|
||||
adjacency: make(map[Vertex]map[Vertex]Edge, len(g.adjacency)),
|
||||
kv: g.kv,
|
||||
}
|
||||
for k, v := range g.adjacency {
|
||||
newGraph.adjacency[k] = v // copy
|
||||
}
|
||||
return newGraph
|
||||
}
|
||||
|
||||
// GetName returns the name of the graph.
|
||||
func (g *Graph) GetName() string {
|
||||
return g.Name
|
||||
}
|
||||
|
||||
// SetName sets the name of the graph.
|
||||
func (g *Graph) SetName(name string) {
|
||||
g.Name = name
|
||||
}
|
||||
|
||||
// AddVertex uses variadic input to add all listed vertices to the graph.
|
||||
func (g *Graph) AddVertex(xv ...Vertex) {
|
||||
if g.adjacency == nil { // initialize on first use
|
||||
g.adjacency = make(map[Vertex]map[Vertex]Edge)
|
||||
}
|
||||
for _, v := range xv {
|
||||
if _, exists := g.adjacency[v]; !exists {
|
||||
g.adjacency[v] = make(map[Vertex]Edge)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteVertex deletes a particular vertex from the graph.
|
||||
func (g *Graph) DeleteVertex(v Vertex) {
|
||||
delete(g.adjacency, v)
|
||||
for k := range g.adjacency {
|
||||
delete(g.adjacency[k], v)
|
||||
}
|
||||
}
|
||||
|
||||
// AddEdge adds a directed edge to the graph from v1 to v2.
|
||||
func (g *Graph) AddEdge(v1, v2 Vertex, e Edge) {
|
||||
// NOTE: this doesn't allow more than one edge between two vertexes...
|
||||
g.AddVertex(v1, v2) // supports adding N vertices now
|
||||
// TODO: check if an edge exists to avoid overwriting it!
|
||||
// NOTE: VertexMerge() depends on overwriting it at the moment...
|
||||
g.adjacency[v1][v2] = e
|
||||
}
|
||||
|
||||
// DeleteEdge deletes a particular edge from the graph.
|
||||
// FIXME: add test cases
|
||||
func (g *Graph) DeleteEdge(e Edge) {
|
||||
for v1 := range g.adjacency {
|
||||
for v2, edge := range g.adjacency[v1] {
|
||||
if e == edge {
|
||||
delete(g.adjacency[v1], v2)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// HasVertex returns if the input vertex exists in the graph.
|
||||
func (g *Graph) HasVertex(v Vertex) bool {
|
||||
if _, exists := g.adjacency[v]; exists {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NumVertices returns the number of vertices in the graph.
|
||||
func (g *Graph) NumVertices() int {
|
||||
return len(g.adjacency)
|
||||
}
|
||||
|
||||
// NumEdges returns the number of edges in the graph.
|
||||
func (g *Graph) NumEdges() int {
|
||||
count := 0
|
||||
for k := range g.adjacency {
|
||||
count += len(g.adjacency[k])
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Adjacency returns the adjacency map representing this graph. This is useful
|
||||
// for users who which to operate on the raw data structure more efficiently.
|
||||
// This works because maps are reference types so we can edit this at will.
|
||||
func (g *Graph) Adjacency() map[Vertex]map[Vertex]Edge {
|
||||
return g.adjacency
|
||||
}
|
||||
|
||||
// Vertices returns a randomly sorted slice of all vertices in the graph.
|
||||
// The order is random, because the map implementation is intentionally so!
|
||||
func (g *Graph) Vertices() []Vertex {
|
||||
var vertices []Vertex
|
||||
for k := range g.adjacency {
|
||||
vertices = append(vertices, k)
|
||||
}
|
||||
return vertices
|
||||
}
|
||||
|
||||
// VerticesChan returns a channel of all vertices in the graph.
|
||||
func (g *Graph) VerticesChan() chan Vertex {
|
||||
ch := make(chan Vertex)
|
||||
go func(ch chan Vertex) {
|
||||
for k := range g.adjacency {
|
||||
ch <- k
|
||||
}
|
||||
close(ch)
|
||||
}(ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
// VertexSlice is a linear list of vertices. It can be sorted.
|
||||
type VertexSlice []Vertex
|
||||
|
||||
func (vs VertexSlice) Len() int { return len(vs) }
|
||||
func (vs VertexSlice) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] }
|
||||
func (vs VertexSlice) Less(i, j int) bool { return vs[i].String() < vs[j].String() }
|
||||
|
||||
// VerticesSorted returns a sorted slice of all vertices in the graph.
|
||||
// The order is sorted by String() to avoid the non-determinism in the map type.
|
||||
func (g *Graph) VerticesSorted() []Vertex {
|
||||
var vertices []Vertex
|
||||
for k := range g.adjacency {
|
||||
vertices = append(vertices, k)
|
||||
}
|
||||
sort.Sort(VertexSlice(vertices)) // add determinism
|
||||
return vertices
|
||||
}
|
||||
|
||||
// String makes the graph pretty print.
|
||||
func (g *Graph) String() string {
|
||||
return fmt.Sprintf("Vertices(%d), Edges(%d)", g.NumVertices(), g.NumEdges())
|
||||
}
|
||||
|
||||
// IncomingGraphVertices returns an array (slice) of all directed vertices to
|
||||
// vertex v (??? -> v). OKTimestamp should probably use this.
|
||||
func (g *Graph) IncomingGraphVertices(v Vertex) []Vertex {
|
||||
// TODO: we might be able to implement this differently by reversing
|
||||
// the Adjacency graph and then looping through it again...
|
||||
var s []Vertex
|
||||
for k := range g.adjacency { // reverse paths
|
||||
for w := range g.adjacency[k] {
|
||||
if w == v {
|
||||
s = append(s, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// OutgoingGraphVertices returns an array (slice) of all vertices that vertex v
|
||||
// points to (v -> ???). Poke should probably use this.
|
||||
func (g *Graph) OutgoingGraphVertices(v Vertex) []Vertex {
|
||||
var s []Vertex
|
||||
for k := range g.adjacency[v] { // forward paths
|
||||
s = append(s, k)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// GraphVertices returns an array (slice) of all vertices that connect to vertex v.
|
||||
// This is the union of IncomingGraphVertices and OutgoingGraphVertices.
|
||||
func (g *Graph) GraphVertices(v Vertex) []Vertex {
|
||||
var s []Vertex
|
||||
s = append(s, g.IncomingGraphVertices(v)...)
|
||||
s = append(s, g.OutgoingGraphVertices(v)...)
|
||||
return s
|
||||
}
|
||||
|
||||
// IncomingGraphEdges returns all of the edges that point to vertex v (??? -> v).
|
||||
func (g *Graph) IncomingGraphEdges(v Vertex) []Edge {
|
||||
var edges []Edge
|
||||
for v1 := range g.adjacency { // reverse paths
|
||||
for v2, e := range g.adjacency[v1] {
|
||||
if v2 == v {
|
||||
edges = append(edges, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
return edges
|
||||
}
|
||||
|
||||
// OutgoingGraphEdges returns all of the edges that point from vertex v (v -> ???).
|
||||
func (g *Graph) OutgoingGraphEdges(v Vertex) []Edge {
|
||||
var edges []Edge
|
||||
for _, e := range g.adjacency[v] { // forward paths
|
||||
edges = append(edges, e)
|
||||
}
|
||||
return edges
|
||||
}
|
||||
|
||||
// GraphEdges returns an array (slice) of all edges that connect to vertex v.
|
||||
// This is the union of IncomingGraphEdges and OutgoingGraphEdges.
|
||||
func (g *Graph) GraphEdges(v Vertex) []Edge {
|
||||
var edges []Edge
|
||||
edges = append(edges, g.IncomingGraphEdges(v)...)
|
||||
edges = append(edges, g.OutgoingGraphEdges(v)...)
|
||||
return edges
|
||||
}
|
||||
|
||||
// DFS returns a depth first search for the graph, starting at the input vertex.
|
||||
func (g *Graph) DFS(start Vertex) []Vertex {
|
||||
var d []Vertex // discovered
|
||||
var s []Vertex // stack
|
||||
if _, exists := g.adjacency[start]; !exists {
|
||||
return nil // TODO: error
|
||||
}
|
||||
v := start
|
||||
s = append(s, v)
|
||||
for len(s) > 0 {
|
||||
v, s = s[len(s)-1], s[:len(s)-1] // s.pop()
|
||||
|
||||
if !VertexContains(v, d) { // if not discovered
|
||||
d = append(d, v) // label as discovered
|
||||
|
||||
for _, w := range g.GraphVertices(v) {
|
||||
s = append(s, w)
|
||||
}
|
||||
}
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// FilterGraph builds a new graph containing only vertices from the list.
|
||||
func (g *Graph) FilterGraph(name string, vertices []Vertex) (*Graph, error) {
|
||||
newGraph := &Graph{Name: name}
|
||||
if err := newGraph.Init(); err != nil {
|
||||
return nil, errwrap.Wrapf(err, "could not run FilterGraph() properly")
|
||||
}
|
||||
for k1, x := range g.adjacency {
|
||||
for k2, e := range x {
|
||||
//log.Printf("Filter: %s -> %s # %s", k1.Name, k2.Name, e.Name)
|
||||
if VertexContains(k1, vertices) || VertexContains(k2, vertices) {
|
||||
newGraph.AddEdge(k1, k2, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
return newGraph, nil
|
||||
}
|
||||
|
||||
// DisconnectedGraphs returns a list containing the N disconnected graphs.
|
||||
func (g *Graph) DisconnectedGraphs() ([]*Graph, error) {
|
||||
graphs := []*Graph{}
|
||||
var start Vertex
|
||||
var d []Vertex // discovered
|
||||
c := g.NumVertices()
|
||||
for len(d) < c {
|
||||
|
||||
// get an undiscovered vertex to start from
|
||||
for _, s := range g.Vertices() {
|
||||
if !VertexContains(s, d) {
|
||||
start = s
|
||||
}
|
||||
}
|
||||
|
||||
// dfs through the graph
|
||||
dfs := g.DFS(start)
|
||||
// filter all the collected elements into a new graph
|
||||
newgraph, err := g.FilterGraph(g.Name, dfs)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf(err, "could not run DisconnectedGraphs() properly")
|
||||
}
|
||||
// add number of elements found to found variable
|
||||
d = append(d, dfs...) // extend
|
||||
|
||||
// append this new graph to the list
|
||||
graphs = append(graphs, newgraph)
|
||||
|
||||
// if we've found all the elements, then we're done
|
||||
// otherwise loop through to continue...
|
||||
}
|
||||
return graphs, nil
|
||||
}
|
||||
|
||||
// InDegree returns the count of vertices that point to me in one big lookup map.
|
||||
func (g *Graph) InDegree() map[Vertex]int {
|
||||
result := make(map[Vertex]int)
|
||||
for k := range g.adjacency {
|
||||
result[k] = 0 // initialize
|
||||
}
|
||||
|
||||
for k := range g.adjacency {
|
||||
for z := range g.adjacency[k] {
|
||||
result[z]++
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// OutDegree returns the count of vertices that point away in one big lookup map.
|
||||
func (g *Graph) OutDegree() map[Vertex]int {
|
||||
result := make(map[Vertex]int)
|
||||
|
||||
for k := range g.adjacency {
|
||||
result[k] = 0 // initialize
|
||||
for range g.adjacency[k] {
|
||||
result[k]++
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// TopologicalSort returns the sort of graph vertices in that order.
|
||||
// It is based on descriptions and code from wikipedia and rosetta code.
|
||||
// TODO: add memoization, and cache invalidation to speed this up :)
|
||||
func (g *Graph) TopologicalSort() ([]Vertex, error) { // kahn's algorithm
|
||||
var L []Vertex // empty list that will contain the sorted elements
|
||||
var S []Vertex // set of all nodes with no incoming edges
|
||||
remaining := make(map[Vertex]int) // amount of edges remaining
|
||||
|
||||
for v, d := range g.InDegree() {
|
||||
if d == 0 {
|
||||
// accumulate set of all nodes with no incoming edges
|
||||
S = append(S, v)
|
||||
} else {
|
||||
// initialize remaining edge count from indegree
|
||||
remaining[v] = d
|
||||
}
|
||||
}
|
||||
|
||||
for len(S) > 0 {
|
||||
last := len(S) - 1 // remove a node v from S
|
||||
v := S[last]
|
||||
S = S[:last]
|
||||
L = append(L, v) // add v to tail of L
|
||||
for n := range g.adjacency[v] {
|
||||
// for each node n remaining in the graph, consume from
|
||||
// remaining, so for remaining[n] > 0
|
||||
if remaining[n] > 0 {
|
||||
remaining[n]-- // remove edge from the graph
|
||||
if remaining[n] == 0 { // if n has no other incoming edges
|
||||
S = append(S, n) // insert n into S
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if graph has edges, eg if any value in rem is > 0
|
||||
for c, in := range remaining {
|
||||
if in > 0 {
|
||||
for n := range g.adjacency[c] {
|
||||
if remaining[n] > 0 {
|
||||
return nil, fmt.Errorf("not a dag")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return L, nil
|
||||
}
|
||||
|
||||
// Reachability finds the shortest path in a DAG from a to b, and returns the
|
||||
// slice of vertices that matched this particular path including both a and b.
|
||||
// It returns nil if a or b is nil, and returns empty list if no path is found.
|
||||
// Since there could be more than one possible result for this operation, we
|
||||
// arbitrarily choose one of the shortest possible. As a result, this should
|
||||
// actually return a tree if we cared about correctness.
|
||||
// This operates by a recursive algorithm; a more efficient version is likely.
|
||||
// If you don't give this function a DAG, you might cause infinite recursion!
|
||||
func (g *Graph) Reachability(a, b Vertex) []Vertex {
|
||||
if a == nil || b == nil {
|
||||
return nil
|
||||
}
|
||||
vertices := g.OutgoingGraphVertices(a) // what points away from a ?
|
||||
if len(vertices) == 0 {
|
||||
return []Vertex{} // nope
|
||||
}
|
||||
if VertexContains(b, vertices) {
|
||||
return []Vertex{a, b} // found
|
||||
}
|
||||
// TODO: parallelize this with go routines?
|
||||
var collected = make([][]Vertex, len(vertices))
|
||||
pick := -1
|
||||
for i, v := range vertices {
|
||||
collected[i] = g.Reachability(v, b) // find b by recursion
|
||||
if l := len(collected[i]); l > 0 {
|
||||
// pick shortest path
|
||||
// TODO: technically i should return a tree
|
||||
if pick < 0 || l < len(collected[pick]) {
|
||||
pick = i
|
||||
}
|
||||
}
|
||||
}
|
||||
if pick < 0 {
|
||||
return []Vertex{} // nope
|
||||
}
|
||||
result := []Vertex{a} // tack on a
|
||||
result = append(result, collected[pick]...)
|
||||
return result
|
||||
}
|
||||
|
||||
// VertexMatchFn searches for a vertex in the graph and returns the vertex if
|
||||
// one matches. It uses a user defined function to match. That function must
|
||||
// return true on match, and an error if anything goes wrong.
|
||||
func (g *Graph) VertexMatchFn(fn func(Vertex) (bool, error)) (Vertex, error) {
|
||||
for v := range g.adjacency {
|
||||
if b, err := fn(v); err != nil {
|
||||
return nil, errwrap.Wrapf(err, "fn in VertexMatchFn() errored")
|
||||
} else if b {
|
||||
return v, nil
|
||||
}
|
||||
}
|
||||
return nil, nil // nothing found
|
||||
}
|
||||
|
||||
// GraphCmp compares the topology of this graph to another and returns nil if
|
||||
// they're equal. It uses a user defined function to compare topologically
|
||||
// equivalent vertices, and edges.
|
||||
// FIXME: add more test cases
|
||||
func (g *Graph) GraphCmp(graph *Graph, vertexCmpFn func(Vertex, Vertex) (bool, error), edgeCmpFn func(Edge, Edge) (bool, error)) error {
|
||||
n1, n2 := g.NumVertices(), graph.NumVertices()
|
||||
if n1 != n2 {
|
||||
return fmt.Errorf("base graph has %d vertices, while input graph has %d", n1, n2)
|
||||
}
|
||||
if e1, e2 := g.NumEdges(), graph.NumEdges(); e1 != e2 {
|
||||
return fmt.Errorf("base graph has %d edges, while input graph has %d", e1, e2)
|
||||
}
|
||||
|
||||
var m = make(map[Vertex]Vertex) // g to graph vertex correspondence
|
||||
Loop:
|
||||
// check vertices
|
||||
for v1 := range g.Adjacency() { // for each vertex in g
|
||||
for v2 := range graph.Adjacency() { // does it match in graph ?
|
||||
b, err := vertexCmpFn(v1, v2)
|
||||
if err != nil {
|
||||
return errwrap.Wrapf(err, "could not run vertexCmpFn() properly")
|
||||
}
|
||||
// does it match ?
|
||||
if b {
|
||||
m[v1] = v2 // store the mapping
|
||||
continue Loop
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("base graph, has no match in input graph for: %s", v1)
|
||||
}
|
||||
// vertices match :)
|
||||
|
||||
// is the mapping the right length?
|
||||
if n1 := len(m); n1 != n2 {
|
||||
return fmt.Errorf("mapping only has correspondence of %d, when it should have %d", n1, n2)
|
||||
}
|
||||
|
||||
// check if mapping is unique (are there duplicates?)
|
||||
m1 := []Vertex{}
|
||||
m2 := []Vertex{}
|
||||
for k, v := range m {
|
||||
if VertexContains(k, m1) {
|
||||
return fmt.Errorf("mapping from %s is used more than once to: %s", k, m1)
|
||||
}
|
||||
if VertexContains(v, m2) {
|
||||
return fmt.Errorf("mapping to %s is used more than once from: %s", v, m2)
|
||||
}
|
||||
m1 = append(m1, k)
|
||||
m2 = append(m2, v)
|
||||
}
|
||||
|
||||
// check edges
|
||||
for v1 := range g.Adjacency() { // for each vertex in g
|
||||
v2 := m[v1] // lookup in map to get correspondance
|
||||
// g.Adjacency()[v1] corresponds to graph.Adjacency()[v2]
|
||||
if e1, e2 := len(g.Adjacency()[v1]), len(graph.Adjacency()[v2]); e1 != e2 {
|
||||
return fmt.Errorf("base graph, vertex(%s) has %d edges, while input graph, vertex(%s) has %d", v1, e1, v2, e2)
|
||||
}
|
||||
|
||||
for vv1, ee1 := range g.Adjacency()[v1] {
|
||||
vv2 := m[vv1]
|
||||
ee2 := graph.Adjacency()[v2][vv2]
|
||||
|
||||
// these are edges from v1 -> vv1 via ee1 (graph 1)
|
||||
// to cmp to edges from v2 -> vv2 via ee2 (graph 2)
|
||||
|
||||
// check: (1) vv1 == vv2 ? (we've already checked this!)
|
||||
|
||||
// check: (2) ee1 == ee2
|
||||
b, err := edgeCmpFn(ee1, ee2)
|
||||
if err != nil {
|
||||
return errwrap.Wrapf(err, "could not run edgeCmpFn() properly")
|
||||
}
|
||||
if !b {
|
||||
return fmt.Errorf("base graph edge(%s) doesn't match input graph edge(%s)", ee1, ee2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil // success!
|
||||
}
|
||||
|
||||
// VertexContains is an "in array" function to test for a vertex in a slice of vertices.
|
||||
func VertexContains(needle Vertex, haystack []Vertex) bool {
|
||||
for _, v := range haystack {
|
||||
if needle == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// EdgeContains is an "in array" function to test for an edge in a slice of edges.
|
||||
func EdgeContains(needle Edge, haystack []Edge) bool {
|
||||
for _, v := range haystack {
|
||||
if needle == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Reverse reverses a list of vertices.
|
||||
func Reverse(vs []Vertex) []Vertex {
|
||||
out := []Vertex{}
|
||||
l := len(vs)
|
||||
for i := range vs {
|
||||
out = append(out, vs[l-i-1])
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Sort the list of vertices and return a copy without modifying the input.
|
||||
func Sort(vs []Vertex) []Vertex {
|
||||
vertices := []Vertex{}
|
||||
for _, v := range vs { // copy
|
||||
vertices = append(vertices, v)
|
||||
}
|
||||
sort.Sort(VertexSlice(vertices))
|
||||
return vertices
|
||||
// sort.Sort(VertexSlice(vs)) // this is wrong, it would modify input!
|
||||
//return vs
|
||||
}
|
||||
741
pgraph/pgraph_test.go
Normal file
@@ -0,0 +1,741 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package pgraph
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// vertex is a test struct to test the library.
|
||||
type vertex struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// String is a required method of the Vertex interface that we must fulfill.
|
||||
func (v *vertex) String() string {
|
||||
return v.name
|
||||
}
|
||||
|
||||
// NV is a helper function to make testing easier. It creates a new noop vertex.
|
||||
func NV(s string) Vertex {
|
||||
return &vertex{s}
|
||||
}
|
||||
|
||||
// edge is a test struct to test the library.
|
||||
type edge struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// String is a required method of the Edge interface that we must fulfill.
|
||||
func (e *edge) String() string {
|
||||
return e.name
|
||||
}
|
||||
|
||||
// NE is a helper function to make testing easier. It creates a new noop edge.
|
||||
func NE(s string) Edge {
|
||||
return &edge{s}
|
||||
}
|
||||
|
||||
func TestPgraphT1(t *testing.T) {
|
||||
|
||||
G := &Graph{}
|
||||
|
||||
if i := G.NumVertices(); i != 0 {
|
||||
t.Errorf("should have 0 vertices instead of: %d", i)
|
||||
}
|
||||
|
||||
if i := G.NumEdges(); i != 0 {
|
||||
t.Errorf("should have 0 edges instead of: %d", i)
|
||||
}
|
||||
|
||||
v1 := NV("v1")
|
||||
v2 := NV("v2")
|
||||
e1 := NE("e1")
|
||||
G.AddEdge(v1, v2, e1)
|
||||
|
||||
if i := G.NumVertices(); i != 2 {
|
||||
t.Errorf("should have 2 vertices instead of: %d", i)
|
||||
}
|
||||
|
||||
if i := G.NumEdges(); i != 1 {
|
||||
t.Errorf("should have 1 edges instead of: %d", i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPgraphT2(t *testing.T) {
|
||||
|
||||
G := &Graph{Name: "g2"}
|
||||
v1 := NV("v1")
|
||||
v2 := NV("v2")
|
||||
v3 := NV("v3")
|
||||
v4 := NV("v4")
|
||||
v5 := NV("v5")
|
||||
v6 := NV("v6")
|
||||
e1 := NE("e1")
|
||||
e2 := NE("e2")
|
||||
e3 := NE("e3")
|
||||
e4 := NE("e4")
|
||||
e5 := NE("e5")
|
||||
//e6 := NE("e6")
|
||||
G.AddEdge(v1, v2, e1)
|
||||
G.AddEdge(v2, v3, e2)
|
||||
G.AddEdge(v3, v1, e3)
|
||||
|
||||
G.AddEdge(v4, v5, e4)
|
||||
G.AddEdge(v5, v6, e5)
|
||||
|
||||
if i := G.NumVertices(); i != 6 {
|
||||
t.Errorf("should have 6 vertices instead of: %d", i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPgraphT3(t *testing.T) {
|
||||
|
||||
G, _ := NewGraph("g3")
|
||||
v1 := NV("v1")
|
||||
v2 := NV("v2")
|
||||
v3 := NV("v3")
|
||||
v4 := NV("v4")
|
||||
v5 := NV("v5")
|
||||
v6 := NV("v6")
|
||||
e1 := NE("e1")
|
||||
e2 := NE("e2")
|
||||
e3 := NE("e3")
|
||||
e4 := NE("e4")
|
||||
e5 := NE("e5")
|
||||
//e6 := NE("e6")
|
||||
G.AddEdge(v1, v2, e1)
|
||||
G.AddEdge(v2, v3, e2)
|
||||
G.AddEdge(v3, v1, e3)
|
||||
|
||||
G.AddEdge(v4, v5, e4)
|
||||
G.AddEdge(v5, v6, e5)
|
||||
//G.AddEdge(v6, v4, e6)
|
||||
out1 := G.DFS(v1)
|
||||
if i := len(out1); i != 3 {
|
||||
t.Errorf("should have 3 vertices instead of: %d", i)
|
||||
t.Errorf("found: %v", out1)
|
||||
for _, v := range out1 {
|
||||
t.Errorf("value: %s", v)
|
||||
}
|
||||
}
|
||||
|
||||
out2 := G.DFS(v4)
|
||||
if i := len(out2); i != 3 {
|
||||
t.Errorf("should have 3 vertices instead of: %d", i)
|
||||
t.Errorf("found: %v", out1)
|
||||
for _, v := range out1 {
|
||||
t.Errorf("value: %s", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPgraphT4(t *testing.T) {
|
||||
|
||||
G, _ := NewGraph("g4")
|
||||
v1 := NV("v1")
|
||||
v2 := NV("v2")
|
||||
v3 := NV("v3")
|
||||
e1 := NE("e1")
|
||||
e2 := NE("e2")
|
||||
e3 := NE("e3")
|
||||
G.AddEdge(v1, v2, e1)
|
||||
G.AddEdge(v2, v3, e2)
|
||||
G.AddEdge(v3, v1, e3)
|
||||
|
||||
out := G.DFS(v1)
|
||||
if i := len(out); i != 3 {
|
||||
t.Errorf("should have 3 vertices instead of: %d", i)
|
||||
t.Errorf("found: %v", out)
|
||||
for _, v := range out {
|
||||
t.Errorf("value: %s", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPgraphT5(t *testing.T) {
|
||||
G, _ := NewGraph("g5")
|
||||
v1 := NV("v1")
|
||||
v2 := NV("v2")
|
||||
v3 := NV("v3")
|
||||
v4 := NV("v4")
|
||||
v5 := NV("v5")
|
||||
v6 := NV("v6")
|
||||
e1 := NE("e1")
|
||||
e2 := NE("e2")
|
||||
e3 := NE("e3")
|
||||
e4 := NE("e4")
|
||||
e5 := NE("e5")
|
||||
//e6 := NE("e6")
|
||||
G.AddEdge(v1, v2, e1)
|
||||
G.AddEdge(v2, v3, e2)
|
||||
G.AddEdge(v3, v1, e3)
|
||||
|
||||
G.AddEdge(v4, v5, e4)
|
||||
G.AddEdge(v5, v6, e5)
|
||||
//G.AddEdge(v6, v4, e6)
|
||||
|
||||
save := []Vertex{v1, v2, v3}
|
||||
out, err := G.FilterGraph("new g5", save)
|
||||
if err != nil {
|
||||
t.Errorf("failed with: %v", err)
|
||||
}
|
||||
|
||||
if i := out.NumVertices(); i != 3 {
|
||||
t.Errorf("should have 3 vertices instead of: %d", i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPgraphT6(t *testing.T) {
|
||||
G, _ := NewGraph("g6")
|
||||
v1 := NV("v1")
|
||||
v2 := NV("v2")
|
||||
v3 := NV("v3")
|
||||
v4 := NV("v4")
|
||||
v5 := NV("v5")
|
||||
v6 := NV("v6")
|
||||
e1 := NE("e1")
|
||||
e2 := NE("e2")
|
||||
e3 := NE("e3")
|
||||
e4 := NE("e4")
|
||||
e5 := NE("e5")
|
||||
//e6 := NE("e6")
|
||||
G.AddEdge(v1, v2, e1)
|
||||
G.AddEdge(v2, v3, e2)
|
||||
G.AddEdge(v3, v1, e3)
|
||||
|
||||
G.AddEdge(v4, v5, e4)
|
||||
G.AddEdge(v5, v6, e5)
|
||||
//G.AddEdge(v6, v4, e6)
|
||||
|
||||
graphs, err := G.DisconnectedGraphs()
|
||||
if err != nil {
|
||||
t.Errorf("failed with: %v", err)
|
||||
}
|
||||
|
||||
if i := len(graphs); i != 2 {
|
||||
t.Errorf("should have 2 graphs instead of: %d", i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPgraphT7(t *testing.T) {
|
||||
|
||||
G, _ := NewGraph("g7")
|
||||
v1 := NV("v1")
|
||||
v2 := NV("v2")
|
||||
v3 := NV("v3")
|
||||
e1 := NE("e1")
|
||||
e2 := NE("e2")
|
||||
e3 := NE("e3")
|
||||
G.AddEdge(v1, v2, e1)
|
||||
G.AddEdge(v2, v3, e2)
|
||||
G.AddEdge(v3, v1, e3)
|
||||
|
||||
if i := G.NumVertices(); i != 3 {
|
||||
t.Errorf("should have 3 vertices instead of: %d", i)
|
||||
}
|
||||
|
||||
G.DeleteVertex(v2)
|
||||
|
||||
if i := G.NumVertices(); i != 2 {
|
||||
t.Errorf("should have 2 vertices instead of: %d", i)
|
||||
}
|
||||
|
||||
G.DeleteVertex(v1)
|
||||
|
||||
if i := G.NumVertices(); i != 1 {
|
||||
t.Errorf("should have 1 vertices instead of: %d", i)
|
||||
}
|
||||
|
||||
G.DeleteVertex(v3)
|
||||
|
||||
if i := G.NumVertices(); i != 0 {
|
||||
t.Errorf("should have 0 vertices instead of: %d", i)
|
||||
}
|
||||
|
||||
G.DeleteVertex(v2) // duplicate deletes don't error...
|
||||
|
||||
if i := G.NumVertices(); i != 0 {
|
||||
t.Errorf("should have 0 vertices instead of: %d", i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPgraphT8(t *testing.T) {
|
||||
|
||||
v1 := NV("v1")
|
||||
v2 := NV("v2")
|
||||
v3 := NV("v3")
|
||||
if VertexContains(v1, []Vertex{v1, v2, v3}) != true {
|
||||
t.Errorf("should be true instead of false.")
|
||||
}
|
||||
|
||||
v4 := NV("v4")
|
||||
v5 := NV("v5")
|
||||
v6 := NV("v6")
|
||||
if VertexContains(v4, []Vertex{v5, v6}) != false {
|
||||
t.Errorf("should be false instead of true.")
|
||||
}
|
||||
|
||||
v7 := NV("v7")
|
||||
v8 := NV("v8")
|
||||
v9 := NV("v9")
|
||||
if VertexContains(v8, []Vertex{v7, v8, v9}) != true {
|
||||
t.Errorf("should be true instead of false.")
|
||||
}
|
||||
|
||||
v1b := NV("v1") // same value, different objects
|
||||
if VertexContains(v1b, []Vertex{v1, v2, v3}) != false {
|
||||
t.Errorf("should be false instead of true.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPgraphT9(t *testing.T) {
|
||||
|
||||
G, _ := NewGraph("g9")
|
||||
v1 := NV("v1")
|
||||
v2 := NV("v2")
|
||||
v3 := NV("v3")
|
||||
v4 := NV("v4")
|
||||
v5 := NV("v5")
|
||||
v6 := NV("v6")
|
||||
e1 := NE("e1")
|
||||
e2 := NE("e2")
|
||||
e3 := NE("e3")
|
||||
e4 := NE("e4")
|
||||
e5 := NE("e5")
|
||||
e6 := NE("e6")
|
||||
G.AddEdge(v1, v2, e1)
|
||||
G.AddEdge(v1, v3, e2)
|
||||
G.AddEdge(v2, v4, e3)
|
||||
G.AddEdge(v3, v4, e4)
|
||||
|
||||
G.AddEdge(v4, v5, e5)
|
||||
G.AddEdge(v5, v6, e6)
|
||||
|
||||
indegree := G.InDegree() // map[Vertex]int
|
||||
if i := indegree[v1]; i != 0 {
|
||||
t.Errorf("indegree of v1 should be 0 instead of: %d", i)
|
||||
}
|
||||
if i := indegree[v2]; i != 1 {
|
||||
t.Errorf("indegree of v2 should be 1 instead of: %d", i)
|
||||
}
|
||||
if i := indegree[v3]; i != 1 {
|
||||
t.Errorf("indegree of v3 should be 1 instead of: %d", i)
|
||||
}
|
||||
if i := indegree[v4]; i != 2 {
|
||||
t.Errorf("indegree of v4 should be 2 instead of: %d", i)
|
||||
}
|
||||
if i := indegree[v5]; i != 1 {
|
||||
t.Errorf("indegree of v5 should be 1 instead of: %d", i)
|
||||
}
|
||||
if i := indegree[v6]; i != 1 {
|
||||
t.Errorf("indegree of v6 should be 1 instead of: %d", i)
|
||||
}
|
||||
|
||||
outdegree := G.OutDegree() // map[Vertex]int
|
||||
if i := outdegree[v1]; i != 2 {
|
||||
t.Errorf("outdegree of v1 should be 2 instead of: %d", i)
|
||||
}
|
||||
if i := outdegree[v2]; i != 1 {
|
||||
t.Errorf("outdegree of v2 should be 1 instead of: %d", i)
|
||||
}
|
||||
if i := outdegree[v3]; i != 1 {
|
||||
t.Errorf("outdegree of v3 should be 1 instead of: %d", i)
|
||||
}
|
||||
if i := outdegree[v4]; i != 1 {
|
||||
t.Errorf("outdegree of v4 should be 1 instead of: %d", i)
|
||||
}
|
||||
if i := outdegree[v5]; i != 1 {
|
||||
t.Errorf("outdegree of v5 should be 1 instead of: %d", i)
|
||||
}
|
||||
if i := outdegree[v6]; i != 0 {
|
||||
t.Errorf("outdegree of v6 should be 0 instead of: %d", i)
|
||||
}
|
||||
|
||||
s, err := G.TopologicalSort()
|
||||
// either possibility is a valid toposort
|
||||
match := reflect.DeepEqual(s, []Vertex{v1, v2, v3, v4, v5, v6}) || reflect.DeepEqual(s, []Vertex{v1, v3, v2, v4, v5, v6})
|
||||
if err != nil || !match {
|
||||
t.Errorf("topological sort failed, error: %v", err)
|
||||
str := "Found:"
|
||||
for _, v := range s {
|
||||
str += " " + v.String()
|
||||
}
|
||||
t.Errorf(str)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPgraphT10(t *testing.T) {
|
||||
|
||||
G, _ := NewGraph("g10")
|
||||
v1 := NV("v1")
|
||||
v2 := NV("v2")
|
||||
v3 := NV("v3")
|
||||
v4 := NV("v4")
|
||||
v5 := NV("v5")
|
||||
v6 := NV("v6")
|
||||
e1 := NE("e1")
|
||||
e2 := NE("e2")
|
||||
e3 := NE("e3")
|
||||
e4 := NE("e4")
|
||||
e5 := NE("e5")
|
||||
e6 := NE("e6")
|
||||
G.AddEdge(v1, v2, e1)
|
||||
G.AddEdge(v2, v3, e2)
|
||||
G.AddEdge(v3, v4, e3)
|
||||
G.AddEdge(v4, v5, e4)
|
||||
G.AddEdge(v5, v6, e5)
|
||||
G.AddEdge(v4, v2, e6) // cycle
|
||||
|
||||
if _, err := G.TopologicalSort(); err == nil {
|
||||
t.Errorf("topological sort passed, but graph is cyclic")
|
||||
}
|
||||
}
|
||||
|
||||
// empty
|
||||
func TestPgraphReachability0(t *testing.T) {
|
||||
{
|
||||
G, _ := NewGraph("g")
|
||||
result := G.Reachability(nil, nil)
|
||||
if result != nil {
|
||||
t.Logf("reachability failed")
|
||||
str := "Got:"
|
||||
for _, v := range result {
|
||||
str += " " + v.String()
|
||||
}
|
||||
t.Errorf(str)
|
||||
}
|
||||
}
|
||||
{
|
||||
G, _ := NewGraph("g")
|
||||
v1 := NV("v1")
|
||||
v6 := NV("v6")
|
||||
|
||||
result := G.Reachability(v1, v6)
|
||||
expected := []Vertex{}
|
||||
|
||||
if !reflect.DeepEqual(result, expected) {
|
||||
t.Logf("reachability failed")
|
||||
str := "Got:"
|
||||
for _, v := range result {
|
||||
str += " " + v.String()
|
||||
}
|
||||
t.Errorf(str)
|
||||
}
|
||||
}
|
||||
{
|
||||
G, _ := NewGraph("g")
|
||||
v1 := NV("v1")
|
||||
v2 := NV("v2")
|
||||
v3 := NV("v3")
|
||||
v4 := NV("v4")
|
||||
v5 := NV("v5")
|
||||
v6 := NV("v6")
|
||||
e1 := NE("e1")
|
||||
e2 := NE("e2")
|
||||
e3 := NE("e3")
|
||||
e4 := NE("e4")
|
||||
e5 := NE("e5")
|
||||
G.AddEdge(v1, v2, e1)
|
||||
G.AddEdge(v2, v3, e2)
|
||||
G.AddEdge(v1, v4, e3)
|
||||
G.AddEdge(v3, v4, e4)
|
||||
G.AddEdge(v3, v5, e5)
|
||||
|
||||
result := G.Reachability(v1, v6)
|
||||
expected := []Vertex{}
|
||||
|
||||
if !reflect.DeepEqual(result, expected) {
|
||||
t.Logf("reachability failed")
|
||||
str := "Got:"
|
||||
for _, v := range result {
|
||||
str += " " + v.String()
|
||||
}
|
||||
t.Errorf(str)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// simple linear path
|
||||
func TestPgraphReachability1(t *testing.T) {
|
||||
G, _ := NewGraph("g")
|
||||
v1 := NV("v1")
|
||||
v2 := NV("v2")
|
||||
v3 := NV("v3")
|
||||
v4 := NV("v4")
|
||||
v5 := NV("v5")
|
||||
v6 := NV("v6")
|
||||
e1 := NE("e1")
|
||||
e2 := NE("e2")
|
||||
e3 := NE("e3")
|
||||
e4 := NE("e4")
|
||||
e5 := NE("e5")
|
||||
//e6 := NE("e6")
|
||||
G.AddEdge(v1, v2, e1)
|
||||
G.AddEdge(v2, v3, e2)
|
||||
G.AddEdge(v3, v4, e3)
|
||||
G.AddEdge(v4, v5, e4)
|
||||
G.AddEdge(v5, v6, e5)
|
||||
|
||||
result := G.Reachability(v1, v6)
|
||||
expected := []Vertex{v1, v2, v3, v4, v5, v6}
|
||||
|
||||
if !reflect.DeepEqual(result, expected) {
|
||||
t.Logf("reachability failed")
|
||||
str := "Got:"
|
||||
for _, v := range result {
|
||||
str += " " + v.String()
|
||||
}
|
||||
t.Errorf(str)
|
||||
}
|
||||
}
|
||||
|
||||
// pick one of two correct paths
|
||||
func TestPgraphReachability2(t *testing.T) {
|
||||
G, _ := NewGraph("g")
|
||||
v1 := NV("v1")
|
||||
v2 := NV("v2")
|
||||
v3 := NV("v3")
|
||||
v4 := NV("v4")
|
||||
v5 := NV("v5")
|
||||
v6 := NV("v6")
|
||||
e1 := NE("e1")
|
||||
e2 := NE("e2")
|
||||
e3 := NE("e3")
|
||||
e4 := NE("e4")
|
||||
e5 := NE("e5")
|
||||
e6 := NE("e6")
|
||||
G.AddEdge(v1, v2, e1)
|
||||
G.AddEdge(v1, v3, e2)
|
||||
G.AddEdge(v2, v4, e3)
|
||||
G.AddEdge(v3, v4, e4)
|
||||
G.AddEdge(v4, v5, e5)
|
||||
G.AddEdge(v5, v6, e6)
|
||||
|
||||
result := G.Reachability(v1, v6)
|
||||
expected1 := []Vertex{v1, v2, v4, v5, v6}
|
||||
expected2 := []Vertex{v1, v3, v4, v5, v6}
|
||||
|
||||
// !xor test
|
||||
if reflect.DeepEqual(result, expected1) == reflect.DeepEqual(result, expected2) {
|
||||
t.Logf("reachability failed")
|
||||
str := "Got:"
|
||||
for _, v := range result {
|
||||
str += " " + v.String()
|
||||
}
|
||||
t.Errorf(str)
|
||||
}
|
||||
}
|
||||
|
||||
// pick shortest path
|
||||
func TestPgraphReachability3(t *testing.T) {
|
||||
G, _ := NewGraph("g")
|
||||
v1 := NV("v1")
|
||||
v2 := NV("v2")
|
||||
v3 := NV("v3")
|
||||
v4 := NV("v4")
|
||||
v5 := NV("v5")
|
||||
v6 := NV("v6")
|
||||
e1 := NE("e1")
|
||||
e2 := NE("e2")
|
||||
e3 := NE("e3")
|
||||
e4 := NE("e4")
|
||||
e5 := NE("e5")
|
||||
e6 := NE("e6")
|
||||
G.AddEdge(v1, v2, e1)
|
||||
G.AddEdge(v2, v3, e2)
|
||||
G.AddEdge(v3, v4, e3)
|
||||
G.AddEdge(v4, v5, e4)
|
||||
G.AddEdge(v1, v5, e5)
|
||||
G.AddEdge(v5, v6, e6)
|
||||
|
||||
result := G.Reachability(v1, v6)
|
||||
expected := []Vertex{v1, v5, v6}
|
||||
|
||||
if !reflect.DeepEqual(result, expected) {
|
||||
t.Logf("reachability failed")
|
||||
str := "Got:"
|
||||
for _, v := range result {
|
||||
str += " " + v.String()
|
||||
}
|
||||
t.Errorf(str)
|
||||
}
|
||||
}
|
||||
|
||||
// direct path
|
||||
func TestPgraphReachability4(t *testing.T) {
|
||||
G, _ := NewGraph("g")
|
||||
v1 := NV("v1")
|
||||
v2 := NV("v2")
|
||||
v3 := NV("v3")
|
||||
v4 := NV("v4")
|
||||
v5 := NV("v5")
|
||||
v6 := NV("v6")
|
||||
e1 := NE("e1")
|
||||
e2 := NE("e2")
|
||||
e3 := NE("e3")
|
||||
e4 := NE("e4")
|
||||
e5 := NE("e5")
|
||||
e6 := NE("e6")
|
||||
G.AddEdge(v1, v2, e1)
|
||||
G.AddEdge(v2, v3, e2)
|
||||
G.AddEdge(v3, v4, e3)
|
||||
G.AddEdge(v4, v5, e4)
|
||||
G.AddEdge(v5, v6, e5)
|
||||
G.AddEdge(v1, v6, e6)
|
||||
|
||||
result := G.Reachability(v1, v6)
|
||||
expected := []Vertex{v1, v6}
|
||||
|
||||
if !reflect.DeepEqual(result, expected) {
|
||||
t.Logf("reachability failed")
|
||||
str := "Got:"
|
||||
for _, v := range result {
|
||||
str += " " + v.String()
|
||||
}
|
||||
t.Errorf(str)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPgraphT11(t *testing.T) {
|
||||
v1 := NV("v1")
|
||||
v2 := NV("v2")
|
||||
v3 := NV("v3")
|
||||
v4 := NV("v4")
|
||||
v5 := NV("v5")
|
||||
v6 := NV("v6")
|
||||
|
||||
if rev := Reverse([]Vertex{}); !reflect.DeepEqual(rev, []Vertex{}) {
|
||||
t.Errorf("reverse of vertex slice failed (empty)")
|
||||
}
|
||||
|
||||
if rev := Reverse([]Vertex{v1}); !reflect.DeepEqual(rev, []Vertex{v1}) {
|
||||
t.Errorf("reverse of vertex slice failed (single)")
|
||||
}
|
||||
|
||||
if rev := Reverse([]Vertex{v1, v2, v3, v4, v5, v6}); !reflect.DeepEqual(rev, []Vertex{v6, v5, v4, v3, v2, v1}) {
|
||||
t.Errorf("reverse of vertex slice failed (1..6)")
|
||||
}
|
||||
|
||||
if rev := Reverse([]Vertex{v6, v5, v4, v3, v2, v1}); !reflect.DeepEqual(rev, []Vertex{v1, v2, v3, v4, v5, v6}) {
|
||||
t.Errorf("reverse of vertex slice failed (6..1)")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPgraphCopy1(t *testing.T) {
|
||||
g1 := &Graph{}
|
||||
g2 := g1.Copy() // check this doesn't panic
|
||||
if !reflect.DeepEqual(g1.String(), g2.String()) {
|
||||
t.Errorf("graph copy failed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPgraphDelete1(t *testing.T) {
|
||||
G := &Graph{}
|
||||
v1 := NV("v1")
|
||||
G.DeleteVertex(v1) // check this doesn't panic
|
||||
|
||||
if i := G.NumVertices(); i != 0 {
|
||||
t.Errorf("should have 0 vertices instead of: %d", i)
|
||||
}
|
||||
}
|
||||
|
||||
func vertexCmpFn(v1, v2 Vertex) (bool, error) {
|
||||
if v1.String() == "" || v2.String() == "" {
|
||||
return false, fmt.Errorf("oops, empty vertex")
|
||||
}
|
||||
return v1.String() == v2.String(), nil
|
||||
}
|
||||
|
||||
func edgeCmpFn(e1, e2 Edge) (bool, error) {
|
||||
if e1.String() == "" || e2.String() == "" {
|
||||
return false, fmt.Errorf("oops, empty edge")
|
||||
}
|
||||
return e1.String() == e2.String(), nil
|
||||
}
|
||||
|
||||
func TestPgraphGraphCmp1(t *testing.T) {
|
||||
g1 := &Graph{}
|
||||
g2 := &Graph{}
|
||||
g3 := &Graph{}
|
||||
g3.AddVertex(NV("v1"))
|
||||
g4 := &Graph{}
|
||||
g4.AddVertex(NV("v2"))
|
||||
|
||||
if err := g1.GraphCmp(g2, vertexCmpFn, edgeCmpFn); err != nil {
|
||||
t.Errorf("should have no error during GraphCmp, but got: %v", err)
|
||||
}
|
||||
|
||||
if err := g1.GraphCmp(g3, vertexCmpFn, edgeCmpFn); err == nil {
|
||||
t.Errorf("should have error during GraphCmp, but got nil")
|
||||
}
|
||||
|
||||
if err := g3.GraphCmp(g4, vertexCmpFn, edgeCmpFn); err == nil {
|
||||
t.Errorf("should have error during GraphCmp, but got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPgraphSort0(t *testing.T) {
|
||||
vs := []Vertex{}
|
||||
s := Sort(vs)
|
||||
|
||||
if !reflect.DeepEqual(s, []Vertex{}) {
|
||||
t.Errorf("sort failed!")
|
||||
if s == nil {
|
||||
t.Logf("output is nil!")
|
||||
} else {
|
||||
str := "Got:"
|
||||
for _, v := range s {
|
||||
str += " " + v.String()
|
||||
}
|
||||
t.Errorf(str)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPgraphSort1(t *testing.T) {
|
||||
v1 := NV("v1")
|
||||
v2 := NV("v2")
|
||||
v3 := NV("v3")
|
||||
v4 := NV("v4")
|
||||
v5 := NV("v5")
|
||||
v6 := NV("v6")
|
||||
|
||||
vs := []Vertex{v3, v2, v6, v1, v5, v4}
|
||||
s := Sort(vs)
|
||||
|
||||
if !reflect.DeepEqual(s, []Vertex{v1, v2, v3, v4, v5, v6}) {
|
||||
t.Errorf("sort failed!")
|
||||
str := "Got:"
|
||||
for _, v := range s {
|
||||
str += " " + v.String()
|
||||
}
|
||||
t.Errorf(str)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(vs, []Vertex{v3, v2, v6, v1, v5, v4}) {
|
||||
t.Errorf("sort modified input!")
|
||||
str := "Got:"
|
||||
for _, v := range vs {
|
||||
str += " " + v.String()
|
||||
}
|
||||
t.Errorf(str)
|
||||
}
|
||||
}
|
||||
106
pgraph/subgraph.go
Normal file
@@ -0,0 +1,106 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package pgraph
|
||||
|
||||
// AddGraph adds the set of edges and vertices of a graph to the existing graph.
|
||||
func (g *Graph) AddGraph(graph *Graph) {
|
||||
g.addEdgeVertexGraphHelper(nil, graph, nil, false, false)
|
||||
}
|
||||
|
||||
// AddEdgeVertexGraph adds a directed edge to the graph from a vertex.
|
||||
// This is useful for flattening the relationship between a subgraph and an
|
||||
// existing graph, without having to run the subgraph recursively. It adds the
|
||||
// maximum number of edges, creating a relationship to every vertex.
|
||||
func (g *Graph) AddEdgeVertexGraph(vertex Vertex, graph *Graph, edgeGenFn func(v1, v2 Vertex) Edge) {
|
||||
g.addEdgeVertexGraphHelper(vertex, graph, edgeGenFn, false, false)
|
||||
}
|
||||
|
||||
// AddEdgeVertexGraphLight adds a directed edge to the graph from a vertex.
|
||||
// This is useful for flattening the relationship between a subgraph and an
|
||||
// existing graph, without having to run the subgraph recursively. It adds the
|
||||
// minimum number of edges, creating a relationship to the vertices with
|
||||
// indegree equal to zero.
|
||||
func (g *Graph) AddEdgeVertexGraphLight(vertex Vertex, graph *Graph, edgeGenFn func(v1, v2 Vertex) Edge) {
|
||||
g.addEdgeVertexGraphHelper(vertex, graph, edgeGenFn, false, true)
|
||||
}
|
||||
|
||||
// AddEdgeGraphVertex adds a directed edge to the vertex from a graph.
|
||||
// This is useful for flattening the relationship between a subgraph and an
|
||||
// existing graph, without having to run the subgraph recursively. It adds the
|
||||
// maximum number of edges, creating a relationship from every vertex.
|
||||
func (g *Graph) AddEdgeGraphVertex(graph *Graph, vertex Vertex, edgeGenFn func(v1, v2 Vertex) Edge) {
|
||||
g.addEdgeVertexGraphHelper(vertex, graph, edgeGenFn, true, false)
|
||||
}
|
||||
|
||||
// AddEdgeGraphVertexLight adds a directed edge to the vertex from a graph.
|
||||
// This is useful for flattening the relationship between a subgraph and an
|
||||
// existing graph, without having to run the subgraph recursively. It adds the
|
||||
// minimum number of edges, creating a relationship from the vertices with
|
||||
// outdegree equal to zero.
|
||||
func (g *Graph) AddEdgeGraphVertexLight(graph *Graph, vertex Vertex, edgeGenFn func(v1, v2 Vertex) Edge) {
|
||||
g.addEdgeVertexGraphHelper(vertex, graph, edgeGenFn, true, true)
|
||||
}
|
||||
|
||||
// addEdgeVertexGraphHelper is a helper function to add a directed edges to the
|
||||
// graph from a vertex, or vice-versa. It operates in this reverse direction by
|
||||
// specifying the reverse argument as true. It is useful for flattening the
|
||||
// relationship between a subgraph and an existing graph, without having to run
|
||||
// the subgraph recursively. It adds the maximum number of edges, creating a
|
||||
// relationship to or from every vertex if the light argument is false, and if
|
||||
// it is true, it adds the minimum number of edges, creating a relationship to
|
||||
// or from the vertices with an indegree or outdegree equal to zero depending on
|
||||
// if we specified reverse or not.
|
||||
func (g *Graph) addEdgeVertexGraphHelper(vertex Vertex, graph *Graph, edgeGenFn func(v1, v2 Vertex) Edge, reverse, light bool) {
|
||||
|
||||
var degree map[Vertex]int // compute all of the in/outdegree's if needed
|
||||
if light && reverse {
|
||||
degree = graph.OutDegree()
|
||||
} else if light { // && !reverse
|
||||
degree = graph.InDegree()
|
||||
}
|
||||
for _, v := range graph.VerticesSorted() { // sort to help out edgeGenFn
|
||||
|
||||
// forward:
|
||||
// we only want to add edges to indegree == 0, because every
|
||||
// other vertex is a dependency of at least one of those
|
||||
|
||||
// reverse:
|
||||
// we only want to add edges to outdegree == 0, because every
|
||||
// other vertex is a pre-requisite to at least one of these
|
||||
if light && degree[v] != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
g.AddVertex(v) // ensure vertex is part of the graph
|
||||
|
||||
if vertex != nil && reverse {
|
||||
edge := edgeGenFn(v, vertex) // generate a new unique edge
|
||||
g.AddEdge(v, vertex, edge)
|
||||
} else if vertex != nil { // && !reverse
|
||||
edge := edgeGenFn(vertex, v)
|
||||
g.AddEdge(vertex, v, edge)
|
||||
}
|
||||
}
|
||||
|
||||
// also remember to suck in all of the graph's edges too!
|
||||
for v1 := range graph.Adjacency() {
|
||||
for v2, e := range graph.Adjacency()[v1] {
|
||||
g.AddEdge(v1, v2, e)
|
||||
}
|
||||
}
|
||||
}
|
||||