Compare commits
67 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2e2658ab6f | ||
|
|
1370f2a76b | ||
|
|
75dedf391a | ||
|
|
7b5c640d05 | ||
|
|
aa9a21b4d0 | ||
|
|
71de8014d5 | ||
|
|
80476d19f9 | ||
|
|
15103d18ef | ||
|
|
0dbd2004ad | ||
|
|
8c92566889 | ||
|
|
fb9449038b | ||
|
|
e06c4a873d | ||
|
|
c4c28c6c82 | ||
|
|
42ff9b803a | ||
|
|
3831e9739c | ||
|
|
f196e5cca2 | ||
|
|
d3af9105ee | ||
|
|
6d685ae4d6 | ||
|
|
8381d8246a | ||
|
|
b26322fc20 | ||
|
|
1c1e8127d8 | ||
|
|
1b3b4406ff | ||
|
|
cf0b77518a | ||
|
|
afdbf44e23 | ||
|
|
ec87781956 | ||
|
|
a6ae958be7 | ||
|
|
312103ef1b | ||
|
|
c2911bb2b7 | ||
|
|
8ca5e38121 | ||
|
|
4b8ad3a8a7 | ||
|
|
f219c2649d | ||
|
|
cfde54261b | ||
|
|
71a82b0a34 | ||
|
|
b7bd2d2664 | ||
|
|
cd26a0770d | ||
|
|
46893e84c3 | ||
|
|
567dcaf79d | ||
|
|
9368c7e05f | ||
|
|
654b3e9dbe | ||
|
|
f09db490f0 | ||
|
|
30d93cfde7 | ||
|
|
41b3db7d6b | ||
|
|
2a60debceb | ||
|
|
eb30642b6f | ||
|
|
ea85e2af6b | ||
|
|
ef979a0839 | ||
|
|
e0107b1dda | ||
|
|
ccc00f913d | ||
|
|
ad3c6bdc88 | ||
|
|
8fe3891ea9 | ||
|
|
63f21952f4 | ||
|
|
361d643ce7 | ||
|
|
abe1ffaab6 | ||
|
|
fc24c91dde | ||
|
|
53cabd5ee4 | ||
|
|
2b1e8cdbee | ||
|
|
9715146495 | ||
|
|
22b0b89949 | ||
|
|
2ebc23a777 | ||
|
|
0199285319 | ||
|
|
277ab2fe44 | ||
|
|
8a96dfdc8a | ||
|
|
66fbbb940a | ||
|
|
716ea1bb3c | ||
|
|
3d701d3daa | ||
|
|
598c74657c | ||
|
|
4bd53d5ab0 |
9
.gitmodules
vendored
@@ -1,3 +1,12 @@
|
|||||||
[submodule "vendor/github.com/coreos/etcd"]
|
[submodule "vendor/github.com/coreos/etcd"]
|
||||||
path = vendor/github.com/coreos/etcd
|
path = vendor/github.com/coreos/etcd
|
||||||
url = https://github.com/coreos/etcd/
|
url = https://github.com/coreos/etcd/
|
||||||
|
[submodule "vendor/google.golang.org/grpc"]
|
||||||
|
path = vendor/google.golang.org/grpc
|
||||||
|
url = https://github.com/grpc/grpc-go
|
||||||
|
[submodule "vendor/github.com/grpc-ecosystem/grpc-gateway"]
|
||||||
|
path = vendor/github.com/grpc-ecosystem/grpc-gateway
|
||||||
|
url = https://github.com/grpc-ecosystem/grpc-gateway
|
||||||
|
[submodule "vendor/gopkg.in/fsnotify.v1"]
|
||||||
|
path = vendor/gopkg.in/fsnotify.v1
|
||||||
|
url = https://gopkg.in/fsnotify.v1
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
language: go
|
language: go
|
||||||
go:
|
go:
|
||||||
- 1.4.3
|
|
||||||
- 1.5.3
|
|
||||||
- 1.6
|
- 1.6
|
||||||
|
- 1.7
|
||||||
- tip
|
- tip
|
||||||
sudo: false
|
sudo: true
|
||||||
|
dist: trusty
|
||||||
before_install: 'git fetch --unshallow'
|
before_install: 'git fetch --unshallow'
|
||||||
install: 'make deps'
|
install: 'make deps'
|
||||||
script: 'make test'
|
script: 'make test'
|
||||||
@@ -12,8 +12,7 @@ matrix:
|
|||||||
fast_finish: true
|
fast_finish: true
|
||||||
allow_failures:
|
allow_failures:
|
||||||
- go: tip
|
- go: tip
|
||||||
- go: 1.4.3
|
- go: 1.7
|
||||||
- go: 1.6
|
|
||||||
notifications:
|
notifications:
|
||||||
irc:
|
irc:
|
||||||
channels:
|
channels:
|
||||||
|
|||||||
178
DOCUMENTATION.md
@@ -35,13 +35,16 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|||||||
* [Autogrouping - Automatic resource grouping](#autogrouping)
|
* [Autogrouping - Automatic resource grouping](#autogrouping)
|
||||||
* [Automatic clustering - Automatic cluster management](#automatic-clustering)
|
* [Automatic clustering - Automatic cluster management](#automatic-clustering)
|
||||||
* [Remote mode - Remote "agent-less" execution](#remote-agent-less-mode)
|
* [Remote mode - Remote "agent-less" execution](#remote-agent-less-mode)
|
||||||
5. [Usage/FAQ - Notes on usage and frequently asked questions](#usage-and-frequently-asked-questions)
|
* [Puppet support - write manifest code for mgmt](#puppet-support)
|
||||||
6. [Reference - Detailed reference](#reference)
|
5. [Resources - All built-in primitives](#resources)
|
||||||
|
6. [Usage/FAQ - Notes on usage and frequently asked questions](#usage-and-frequently-asked-questions)
|
||||||
|
7. [Reference - Detailed reference](#reference)
|
||||||
|
* [Meta parameters](#meta-parameters)
|
||||||
* [Graph definition file](#graph-definition-file)
|
* [Graph definition file](#graph-definition-file)
|
||||||
* [Command line](#command-line)
|
* [Command line](#command-line)
|
||||||
7. [Examples - Example configurations](#examples)
|
8. [Examples - Example configurations](#examples)
|
||||||
8. [Development - Background on module development and reporting bugs](#development)
|
9. [Development - Background on module development and reporting bugs](#development)
|
||||||
9. [Authors - Authors and contact information](#authors)
|
10. [Authors - Authors and contact information](#authors)
|
||||||
|
|
||||||
##Overview
|
##Overview
|
||||||
|
|
||||||
@@ -67,7 +70,7 @@ Older videos and other material [is available](https://github.com/purpleidea/mgm
|
|||||||
##Setup
|
##Setup
|
||||||
|
|
||||||
During this prototype phase, the tool can be run out of the source directory.
|
During this prototype phase, the tool can be run out of the source directory.
|
||||||
You'll probably want to use ```./run.sh run --file examples/graph1.yaml``` to
|
You'll probably want to use ```./run.sh run --yaml examples/graph1.yaml``` to
|
||||||
get started. Beware that this _can_ cause data loss. Understand what you're
|
get started. Beware that this _can_ cause data loss. Understand what you're
|
||||||
doing first, or perform these actions in a virtual environment such as the one
|
doing first, or perform these actions in a virtual environment such as the one
|
||||||
provided by [Oh-My-Vagrant](https://github.com/purpleidea/oh-my-vagrant).
|
provided by [Oh-My-Vagrant](https://github.com/purpleidea/oh-my-vagrant).
|
||||||
@@ -167,7 +170,127 @@ which need to exchange information that is only available at run time.
|
|||||||
|
|
||||||
####Blog post
|
####Blog post
|
||||||
|
|
||||||
An introductory blog post about this topic will follow soon.
|
You can read the introductory blog post about this topic here:
|
||||||
|
[https://ttboj.wordpress.com/2016/10/07/remote-execution-in-mgmt/](https://ttboj.wordpress.com/2016/10/07/remote-execution-in-mgmt/)
|
||||||
|
|
||||||
|
###Puppet support
|
||||||
|
|
||||||
|
You can supply a Puppet manifest instead of creating the (YAML) graph manually.
|
||||||
|
Puppet must be installed and in `mgmt`'s search path. You also need the
|
||||||
|
[ffrank-mgmtgraph Puppet module](https://forge.puppet.com/ffrank/mgmtgraph).
|
||||||
|
|
||||||
|
Invoke `mgmt` with the `--puppet` switch, which supports 3 variants:
|
||||||
|
|
||||||
|
1. Request the configuration from the Puppet Master (like `puppet agent` does)
|
||||||
|
|
||||||
|
mgmt run --puppet agent
|
||||||
|
|
||||||
|
2. Compile a local manifest file (like `puppet apply`)
|
||||||
|
|
||||||
|
mgmt run --puppet /path/to/my/manifest.pp
|
||||||
|
|
||||||
|
3. Compile an ad hoc manifest from the commandline (like `puppet apply -e`)
|
||||||
|
|
||||||
|
mgmt run --puppet 'file { "/etc/ntp.conf": ensure => file }'
|
||||||
|
|
||||||
|
For more details and caveats see [Puppet.md](Puppet.md).
|
||||||
|
|
||||||
|
####Blog post
|
||||||
|
|
||||||
|
An introductory post on the Puppet support is on
|
||||||
|
[Felix's blog](http://ffrank.github.io/features/2016/06/19/puppet-powered-mgmt/).
|
||||||
|
|
||||||
|
##Resources
|
||||||
|
|
||||||
|
This section lists all the built-in resources and their properties. The
|
||||||
|
resource primitives in `mgmt` are typically more powerful than resources in
|
||||||
|
other configuration management systems because they can be event based which
|
||||||
|
lets them respond in real-time to converge to the desired state. This property
|
||||||
|
allows you to build more complex resources that you probably hadn't considered
|
||||||
|
in the past.
|
||||||
|
|
||||||
|
In addition to the resource specific properties, there are resource properties
|
||||||
|
(otherwise known as parameters) which can apply to every resource. These are
|
||||||
|
called [meta parameters](#meta-parameters) and are listed separately. Certain
|
||||||
|
meta parameters aren't very useful when combined with certain resources, but
|
||||||
|
in general, it should be fairly obvious, such as when combining the `noop` meta
|
||||||
|
parameter with the [Noop](#Noop) resource.
|
||||||
|
|
||||||
|
* [Exec](#Exec): Execute shell commands on the system.
|
||||||
|
* [File](#File): Manage files and directories.
|
||||||
|
* [Msg](#Msg): Send log messages.
|
||||||
|
* [Noop](#Noop): A simple resource that does nothing.
|
||||||
|
* [Pkg](#Pkg): Manage system packages with PackageKit.
|
||||||
|
* [Svc](#Svc): Manage system systemd services.
|
||||||
|
* [Timer](#Timer): Manage system systemd services.
|
||||||
|
* [Virt](#Virt): Manage virtual machines with libvirt.
|
||||||
|
|
||||||
|
###Exec
|
||||||
|
|
||||||
|
The exec resource can execute commands on your system.
|
||||||
|
|
||||||
|
###File
|
||||||
|
|
||||||
|
The file resource manages files and directories. In `mgmt`, directories are
|
||||||
|
identified by a trailing slash in their path name. File have no such slash.
|
||||||
|
|
||||||
|
####Path
|
||||||
|
|
||||||
|
The path property specifies the file or directory that we are managing.
|
||||||
|
|
||||||
|
####Content
|
||||||
|
|
||||||
|
The content property is a string that specifies the desired file contents.
|
||||||
|
|
||||||
|
####Source
|
||||||
|
|
||||||
|
The source property points to a source file or directory path that we wish to
|
||||||
|
copy over and use as the desired contents for our resource.
|
||||||
|
|
||||||
|
####State
|
||||||
|
|
||||||
|
The state property describes the action we'd like to apply for the resource. The
|
||||||
|
possible values are: `exists` and `absent`.
|
||||||
|
|
||||||
|
####Recurse
|
||||||
|
|
||||||
|
The recurse property limits whether file resource operations should recurse into
|
||||||
|
and monitor directory contents with a depth greater than one.
|
||||||
|
|
||||||
|
####Force
|
||||||
|
|
||||||
|
The force property is required if we want the file resource to be able to change
|
||||||
|
a file into a directory or vice-versa. If such a change is needed, but the force
|
||||||
|
property is not set to `true`, then this file resource will error.
|
||||||
|
|
||||||
|
###Msg
|
||||||
|
|
||||||
|
The msg resource sends messages to the main log, or an external service such
|
||||||
|
as systemd's journal.
|
||||||
|
|
||||||
|
###Noop
|
||||||
|
|
||||||
|
The noop resource does absolutely nothing. It does have some utility in testing
|
||||||
|
`mgmt` and also as a placeholder in the resource graph.
|
||||||
|
|
||||||
|
###Pkg
|
||||||
|
|
||||||
|
The pkg resource is used to manage system packages. This resource works on many
|
||||||
|
different distributions because it uses the underlying packagekit facility which
|
||||||
|
supports different backends for different environments. This ensures that we
|
||||||
|
have great Debian (deb/dpkg) and Fedora (rpm/dnf) support simultaneously.
|
||||||
|
|
||||||
|
###Svc
|
||||||
|
|
||||||
|
The service resource is still very WIP. Please help us my improving it!
|
||||||
|
|
||||||
|
###Timer
|
||||||
|
|
||||||
|
This resource needs better documentation. Please help us my improving it!
|
||||||
|
|
||||||
|
###Virt
|
||||||
|
|
||||||
|
The virt resource can manage virtual machines via libvirt.
|
||||||
|
|
||||||
##Usage and frequently asked questions
|
##Usage and frequently asked questions
|
||||||
(Send your questions as a patch to this FAQ! I'll review it, merge it, and
|
(Send your questions as a patch to this FAQ! I'll review it, merge it, and
|
||||||
@@ -216,7 +339,7 @@ starting up, and as a result, a default endpoint never gets added. The solution
|
|||||||
is to either reconcile the mistake, and if there is no important data saved, you
|
is to either reconcile the mistake, and if there is no important data saved, you
|
||||||
can remove the etcd dataDir. This is typically `/var/lib/mgmt/etcd/member/`.
|
can remove the etcd dataDir. This is typically `/var/lib/mgmt/etcd/member/`.
|
||||||
|
|
||||||
###Why do resources have both a `Compare` method and an `IFF` (on the UUID) method?
|
###Why do resources have both a `Compare` method and an `IFF` (on the UID) method?
|
||||||
|
|
||||||
The `Compare()` methods are for determining if two resources are effectively the
|
The `Compare()` methods are for determining if two resources are effectively the
|
||||||
same, which is used to make graph change delta's efficient. This is when we want
|
same, which is used to make graph change delta's efficient. This is when we want
|
||||||
@@ -225,9 +348,9 @@ vertices. Since we want to make this process efficient, we only update the parts
|
|||||||
that are different, and leave everything else alone. This `Compare()` method can
|
that are different, and leave everything else alone. This `Compare()` method can
|
||||||
tell us if two resources are the same.
|
tell us if two resources are the same.
|
||||||
|
|
||||||
The `IFF()` method is part of the whole UUID system, which is for discerning if
|
The `IFF()` method is part of the whole UID system, which is for discerning if a
|
||||||
a resource meets the requirements another expects for an automatic edge. This is
|
resource meets the requirements another expects for an automatic edge. This is
|
||||||
because the automatic edge system assumes a unified UUID pattern to test for
|
because the automatic edge system assumes a unified UID pattern to test for
|
||||||
equality. In the future it might be helpful or sane to merge the two similar
|
equality. In the future it might be helpful or sane to merge the two similar
|
||||||
comparison functions although for now they are separate because they are
|
comparison functions although for now they are separate because they are
|
||||||
actually answer different questions.
|
actually answer different questions.
|
||||||
@@ -256,9 +379,40 @@ information on these options, please view the source at:
|
|||||||
If you feel that a well used option needs documenting here, please patch it!
|
If you feel that a well used option needs documenting here, please patch it!
|
||||||
|
|
||||||
###Overview of reference
|
###Overview of reference
|
||||||
|
* [Meta parameters](#meta-parameters): List of available resource meta parameters.
|
||||||
* [Graph definition file](#graph-definition-file): Main graph definition file.
|
* [Graph definition file](#graph-definition-file): Main graph definition file.
|
||||||
* [Command line](#command-line): Command line parameters.
|
* [Command line](#command-line): Command line parameters.
|
||||||
|
|
||||||
|
###Meta parameters
|
||||||
|
These meta parameters are special parameters (or properties) which can apply to
|
||||||
|
any resource. The usefulness of doing so will depend on the particular meta
|
||||||
|
parameter and resource combination.
|
||||||
|
|
||||||
|
####AutoEdge
|
||||||
|
Boolean. Should we generate auto edges for this resource?
|
||||||
|
|
||||||
|
####AutoGroup
|
||||||
|
Boolean. Should we attempt to automatically group this resource with others?
|
||||||
|
|
||||||
|
####Noop
|
||||||
|
Boolean. Should the Apply portion of the CheckApply method of the resource
|
||||||
|
make any changes? Noop is a concatenation of no-operation.
|
||||||
|
|
||||||
|
####Retry
|
||||||
|
Integer. The number of times to retry running the resource on error. Use -1 for
|
||||||
|
infinite. This currently applies for both the Watch operation (which can fail)
|
||||||
|
and for the CheckApply operation. While they could have separate values, I've
|
||||||
|
decided to use the same ones for both until there's a proper reason to want to
|
||||||
|
do something differently for the Watch errors.
|
||||||
|
|
||||||
|
####Delay
|
||||||
|
Integer. Number of milliseconds to wait between retries. The same value is
|
||||||
|
shared between the Watch and CheckApply retries. This currently applies for both
|
||||||
|
the Watch operation (which can fail) and for the CheckApply operation. While
|
||||||
|
they could have separate values, I've decided to use the same ones for both
|
||||||
|
until there's a proper reason to want to do something differently for the Watch
|
||||||
|
errors.
|
||||||
|
|
||||||
###Graph definition file
|
###Graph definition file
|
||||||
graph.yaml is the compiled graph definition file. The format is currently
|
graph.yaml is the compiled graph definition file. The format is currently
|
||||||
undocumented, but by looking through the [examples/](https://github.com/purpleidea/mgmt/tree/master/examples)
|
undocumented, but by looking through the [examples/](https://github.com/purpleidea/mgmt/tree/master/examples)
|
||||||
@@ -268,7 +422,7 @@ you can probably figure out most of it, as it's fairly intuitive.
|
|||||||
The main interface to the `mgmt` tool is the command line. For the most recent
|
The main interface to the `mgmt` tool is the command line. For the most recent
|
||||||
documentation, please run `mgmt --help`.
|
documentation, please run `mgmt --help`.
|
||||||
|
|
||||||
####`--file <graph.yaml>`
|
####`--yaml <graph.yaml>`
|
||||||
Point to a graph file to run.
|
Point to a graph file to run.
|
||||||
|
|
||||||
####`--converged-timeout <seconds>`
|
####`--converged-timeout <seconds>`
|
||||||
|
|||||||
47
Makefile
@@ -15,8 +15,8 @@
|
|||||||
# You should have received a copy of the GNU Affero General Public License
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
SHELL = /bin/bash
|
SHELL = /usr/bin/env bash
|
||||||
.PHONY: all version program path deps run race generate build clean test gofmt yamlfmt format docs rpmbuild mkdirs rpm srpm spec tar upload upload-sources upload-srpms upload-rpms copr
|
.PHONY: all art cleanart version program path deps run race generate build clean test gofmt yamlfmt format docs rpmbuild mkdirs rpm srpm spec tar upload upload-sources upload-srpms upload-rpms copr
|
||||||
.SILENT: clean
|
.SILENT: clean
|
||||||
|
|
||||||
SVERSION := $(or $(SVERSION),$(shell git describe --match '[0-9]*\.[0-9]*\.[0-9]*' --tags --dirty --always))
|
SVERSION := $(or $(SVERSION),$(shell git describe --match '[0-9]*\.[0-9]*\.[0-9]*' --tags --dirty --always))
|
||||||
@@ -38,6 +38,42 @@ USERNAME := $(shell cat ~/.config/copr 2>/dev/null | grep username | awk -F '='
|
|||||||
SERVER = 'dl.fedoraproject.org'
|
SERVER = 'dl.fedoraproject.org'
|
||||||
REMOTE_PATH = 'pub/alt/$(USERNAME)/$(PROGRAM)'
|
REMOTE_PATH = 'pub/alt/$(USERNAME)/$(PROGRAM)'
|
||||||
|
|
||||||
|
#
|
||||||
|
# art
|
||||||
|
#
|
||||||
|
art: art/mgmt_logo_default_symbol.png art/mgmt_logo_default_tall.png art/mgmt_logo_default_wide.png art/mgmt_logo_reversed_symbol.png art/mgmt_logo_reversed_tall.png art/mgmt_logo_reversed_wide.png art/mgmt_logo_white_symbol.png art/mgmt_logo_white_tall.png art/mgmt_logo_white_wide.png
|
||||||
|
|
||||||
|
cleanart:
|
||||||
|
rm -f art/mgmt_logo_default_symbol.png art/mgmt_logo_default_tall.png art/mgmt_logo_default_wide.png art/mgmt_logo_reversed_symbol.png art/mgmt_logo_reversed_tall.png art/mgmt_logo_reversed_wide.png art/mgmt_logo_white_symbol.png art/mgmt_logo_white_tall.png art/mgmt_logo_white_wide.png
|
||||||
|
|
||||||
|
# NOTE: the widths are arbitrary
|
||||||
|
art/mgmt_logo_default_symbol.png: art/mgmt_logo_default_symbol.svg
|
||||||
|
inkscape --export-background='#ffffff' --without-gui --export-png "$@" --export-width 300 $(@:png=svg)
|
||||||
|
|
||||||
|
art/mgmt_logo_default_tall.png: art/mgmt_logo_default_tall.svg
|
||||||
|
inkscape --export-background='#ffffff' --without-gui --export-png "$@" --export-width 400 $(@:png=svg)
|
||||||
|
|
||||||
|
art/mgmt_logo_default_wide.png: art/mgmt_logo_default_wide.svg
|
||||||
|
inkscape --export-background='#ffffff' --without-gui --export-png "$@" --export-width 800 $(@:png=svg)
|
||||||
|
|
||||||
|
art/mgmt_logo_reversed_symbol.png: art/mgmt_logo_reversed_symbol.svg
|
||||||
|
inkscape --export-background='#231f20' --without-gui --export-png "$@" --export-width 300 $(@:png=svg)
|
||||||
|
|
||||||
|
art/mgmt_logo_reversed_tall.png: art/mgmt_logo_reversed_tall.svg
|
||||||
|
inkscape --export-background='#231f20' --without-gui --export-png "$@" --export-width 400 $(@:png=svg)
|
||||||
|
|
||||||
|
art/mgmt_logo_reversed_wide.png: art/mgmt_logo_reversed_wide.svg
|
||||||
|
inkscape --export-background='#231f20' --without-gui --export-png "$@" --export-width 800 $(@:png=svg)
|
||||||
|
|
||||||
|
art/mgmt_logo_white_symbol.png: art/mgmt_logo_white_symbol.svg
|
||||||
|
inkscape --export-background='#231f20' --without-gui --export-png "$@" --export-width 300 $(@:png=svg)
|
||||||
|
|
||||||
|
art/mgmt_logo_white_tall.png: art/mgmt_logo_white_tall.svg
|
||||||
|
inkscape --export-background='#231f20' --without-gui --export-png "$@" --export-width 400 $(@:png=svg)
|
||||||
|
|
||||||
|
art/mgmt_logo_white_wide.png: art/mgmt_logo_white_wide.svg
|
||||||
|
inkscape --export-background='#231f20' --without-gui --export-png "$@" --export-width 800 $(@:png=svg)
|
||||||
|
|
||||||
all: docs $(PROGRAM).static
|
all: docs $(PROGRAM).static
|
||||||
|
|
||||||
# show the current version
|
# show the current version
|
||||||
@@ -86,7 +122,8 @@ endif
|
|||||||
|
|
||||||
clean:
|
clean:
|
||||||
[ ! -e $(PROGRAM) ] || rm $(PROGRAM)
|
[ ! -e $(PROGRAM) ] || rm $(PROGRAM)
|
||||||
#rm -f *_stringer.go # generated by `go generate`
|
rm -f *_stringer.go # generated by `go generate`
|
||||||
|
rm -f *_mock.go # generated by `go generate`
|
||||||
|
|
||||||
test:
|
test:
|
||||||
./test.sh
|
./test.sh
|
||||||
@@ -95,7 +132,7 @@ gofmt:
|
|||||||
find . -maxdepth 3 -type f -name '*.go' -not -path './old/*' -not -path './tmp/*' -exec gofmt -w {} \;
|
find . -maxdepth 3 -type f -name '*.go' -not -path './old/*' -not -path './tmp/*' -exec gofmt -w {} \;
|
||||||
|
|
||||||
yamlfmt:
|
yamlfmt:
|
||||||
find . -type f -name '*.yaml' -not -path './old/*' -not -path './tmp/*' -not -path './omv.yaml' -exec ruby -e "require 'yaml'; x=YAML.load_file('{}').to_yaml.each_line.map(&:rstrip).join(10.chr)+10.chr; File.open('{}', 'w').write x" \;
|
find . -maxdepth 3 -type f -name '*.yaml' -not -path './old/*' -not -path './tmp/*' -not -path './omv.yaml' -exec ruby -e "require 'yaml'; x=YAML.load_file('{}').to_yaml.each_line.map(&:rstrip).join(10.chr)+10.chr; File.open('{}', 'w').write x" \;
|
||||||
|
|
||||||
format: gofmt yamlfmt
|
format: gofmt yamlfmt
|
||||||
|
|
||||||
@@ -147,7 +184,7 @@ $(SRPM): $(SPEC) $(SOURCE)
|
|||||||
$(SPEC): rpmbuild/ spec.in
|
$(SPEC): rpmbuild/ spec.in
|
||||||
@echo Running templater...
|
@echo Running templater...
|
||||||
#cat spec.in > $(SPEC)
|
#cat spec.in > $(SPEC)
|
||||||
sed -e s/__PROGRAM__/$(PROGRAM)/ -e s/__VERSION__/$(VERSION)/ -e s/__RELEASE__/$(RELEASE)/ < spec.in > $(SPEC)
|
sed -e s/__PROGRAM__/$(PROGRAM)/g -e s/__VERSION__/$(VERSION)/g -e s/__RELEASE__/$(RELEASE)/g < spec.in > $(SPEC)
|
||||||
# append a changelog to the .spec file
|
# append a changelog to the .spec file
|
||||||
git log --format="* %cd %aN <%aE>%n- (%h) %s%d%n" --date=local | sed -r 's/[0-9]+:[0-9]+:[0-9]+ //' >> $(SPEC)
|
git log --format="* %cd %aN <%aE>%n- (%h) %s%d%n" --date=local | sed -r 's/[0-9]+:[0-9]+:[0-9]+ //' >> $(SPEC)
|
||||||
|
|
||||||
|
|||||||
163
Puppet.md
Normal file
@@ -0,0 +1,163 @@
|
|||||||
|
#mgmt Puppet support
|
||||||
|
|
||||||
|
1. [Prerequisites](#prerequisites)
|
||||||
|
* [Testing the Puppet side](#testing-the-puppet-side)
|
||||||
|
2. [Writing a suitable manifest](#writing-a-suitable-manifest)
|
||||||
|
* [Unsupported attributes](#unsupported-attributes)
|
||||||
|
* [Unsupported resources](#unsupported-resources)
|
||||||
|
* [Avoiding common warnings](#avoiding-common-warnings)
|
||||||
|
3. [Configuring Puppet](#configuring-puppet)
|
||||||
|
4. [Caveats](#caveats)
|
||||||
|
|
||||||
|
`mgmt` can use Puppet as its source for the configuration graph.
|
||||||
|
This document goes into detail on how this works, and lists
|
||||||
|
some pitfalls and limitations.
|
||||||
|
|
||||||
|
For basic instructions on how to use the Puppet support, see
|
||||||
|
the [main documentation](DOCUMENTATION.md#puppet-support).
|
||||||
|
|
||||||
|
##Prerequisites
|
||||||
|
|
||||||
|
You need Puppet installed in your system. It is not important how you
|
||||||
|
get it. On the most common Linux distributions, you can use packages
|
||||||
|
from the OS maintainer, or upstream Puppet repositories. An alternative
|
||||||
|
that will also work on OSX is the `puppet` Ruby gem. It also has the
|
||||||
|
advantage that you can install any desired version in your home directory
|
||||||
|
or any other location.
|
||||||
|
|
||||||
|
Any release of Puppet's 3.x and 4.x series should be suitable for use with
|
||||||
|
`mgmt`. Most importantly, make sure to install the `ffrank-mgmtgraph` Puppet
|
||||||
|
module (referred to below as "the translator module").
|
||||||
|
|
||||||
|
puppet module install ffrank-mgmtgraph
|
||||||
|
|
||||||
|
Please note that the module is not required on your Puppet master (if you
|
||||||
|
use a master/agent setup). It's needed on the machine that runs `mgmt`.
|
||||||
|
You can install the module on the master anyway, so that it gets distributed
|
||||||
|
to your agents through Puppet's `pluginsync` mechanism.
|
||||||
|
|
||||||
|
###Testing the Puppet side
|
||||||
|
|
||||||
|
The following command should run successfully and print a YAML hash on your
|
||||||
|
terminal:
|
||||||
|
|
||||||
|
```puppet
|
||||||
|
puppet mgmtgraph print --code 'file { "/tmp/mgmt-test": ensure => present }'
|
||||||
|
```
|
||||||
|
|
||||||
|
You can use this CLI to test any manifests before handing them straight
|
||||||
|
to `mgmt`.
|
||||||
|
|
||||||
|
##Writing a suitable manifest
|
||||||
|
|
||||||
|
###Unsupported attributes
|
||||||
|
|
||||||
|
`mgmt` inherited its resource module from Puppet, so by and large, it's quite
|
||||||
|
possible to express `mgmt` graphs in terms of Puppet manifests. However,
|
||||||
|
there isn't (and likely never will be) full feature parity between the
|
||||||
|
respective resource types. In consequence, a manifest can have semantics that
|
||||||
|
cannot be transferred to `mgmt`.
|
||||||
|
|
||||||
|
For example, at the time of writing this, the `file` type in `mgmt` had no
|
||||||
|
notion of permissions (the file `mode`) yet. This lead to the following
|
||||||
|
warning (among others that will be discussed below):
|
||||||
|
|
||||||
|
$ puppet mgmtgraph print --code 'file { "/tmp/foo": mode => "0600" }'
|
||||||
|
Warning: cannot translate: File[/tmp/foo] { mode => "600" } (attribute is ignored)
|
||||||
|
|
||||||
|
This is a heads-up for the user, because the resulting `mgmt` graph will
|
||||||
|
in fact not pass this information to the `/tmp/foo` file resource, and
|
||||||
|
`mgmt` will ignore this file's permissions. Including such attributes in
|
||||||
|
manifests that are written expressly for `mgmt` is not sensible and should
|
||||||
|
be avoided.
|
||||||
|
|
||||||
|
###Unsupported resources
|
||||||
|
|
||||||
|
Puppet has a fairly large number of
|
||||||
|
[built-in types](https://docs.puppet.com/puppet/latest/reference/type.html),
|
||||||
|
and countless more are available through
|
||||||
|
[modules](https://forge.puppet.com/). It's unlikely that all of them will
|
||||||
|
eventually receive native counterparts in `mgmt`.
|
||||||
|
|
||||||
|
When encountering an unknown resource, the translator module will replace
|
||||||
|
it with an `exec` resource in its output. This resource will run the equivalent
|
||||||
|
of a `puppet resource` command to make Puppet apply the original resource
|
||||||
|
itself. This has quite abysmal performance, because processing such a
|
||||||
|
resource requires the forking of at least one Puppet process (two if it
|
||||||
|
is found to be out of sync). This comes with considerable overhead.
|
||||||
|
On most systems, starting up any Puppet command takes several seconds.
|
||||||
|
Compared to the split second that the actual work usually takes,
|
||||||
|
this overhead can amount to several orders of magnitude.
|
||||||
|
|
||||||
|
Avoid Puppet types that `mgmt` does not implement (yet).
|
||||||
|
|
||||||
|
###Avoiding common warnings
|
||||||
|
|
||||||
|
Many resource parameters in Puppet take default values. For the most part,
|
||||||
|
the translator module just ignores them. However, there are cases in which
|
||||||
|
Puppet will default to convenient behavior that `mgmt` cannot quite replicate.
|
||||||
|
For example, translating a plain `file` resource will lead to a warning message:
|
||||||
|
|
||||||
|
$ puppet mgmtgraph print --code 'file { "/tmp/mgmt-test": }'
|
||||||
|
Warning: File[/tmp/mgmt-test] uses the 'puppet' file bucket, which mgmt cannot do. There will be no backup copies!
|
||||||
|
|
||||||
|
The reason is that per default, Puppet assumes the following parameter value
|
||||||
|
(among others)
|
||||||
|
|
||||||
|
```puppet
|
||||||
|
file { "/tmp/mgmt-test":
|
||||||
|
backup => 'puppet',
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
To avoid this, specify the parameter explicitly:
|
||||||
|
|
||||||
|
$ puppet mgmtgraph print --code 'file { "/tmp/mgmt-test": backup => false }'
|
||||||
|
|
||||||
|
This is tedious in a more complex manifest. A good simplification is the
|
||||||
|
following [resource default](https://docs.puppet.com/puppet/latest/reference/lang_defaults.html)
|
||||||
|
anywhere on the top scope of your manifest:
|
||||||
|
|
||||||
|
```puppet
|
||||||
|
File { backup => false }
|
||||||
|
```
|
||||||
|
|
||||||
|
If you encounter similar warnings from other types and/or parameters,
|
||||||
|
use the same approach to silence them if possible.
|
||||||
|
|
||||||
|
##Configuring Puppet
|
||||||
|
|
||||||
|
Since `mgmt` uses an actual Puppet CLI behind the scenes, you might
|
||||||
|
need to tweak some of Puppet's runtime options in order to make it
|
||||||
|
do what you want. Reasons for this could be among the following:
|
||||||
|
|
||||||
|
* You use the `--puppet agent` variant and need to configure
|
||||||
|
`servername`, `certname` and other master/agent-related options.
|
||||||
|
* You don't want runtime information to end up in the `vardir`
|
||||||
|
that is used by your regular `puppet agent`.
|
||||||
|
* You install specific Puppet modules for `mgmt` in a non-standard
|
||||||
|
location.
|
||||||
|
|
||||||
|
`mgmt` exposes only one Puppet option in order to allow you to
|
||||||
|
control all of them, through its `--puppet-conf` option. It allows
|
||||||
|
you to specify which `puppet.conf` file should be used during
|
||||||
|
translation.
|
||||||
|
|
||||||
|
mgmt run --puppet /opt/my-manifest.pp --puppet-conf /etc/mgmt/puppet.conf
|
||||||
|
|
||||||
|
Within this file, you can just specify any needed options in the
|
||||||
|
`[main]` section:
|
||||||
|
|
||||||
|
[main]
|
||||||
|
server=mgmt-master.example.net
|
||||||
|
vardir=/var/lib/mgmt/puppet
|
||||||
|
|
||||||
|
##Caveats
|
||||||
|
|
||||||
|
Please see the [README](https://github.com/ffrank/puppet-mgmtgraph/blob/master/README.md)
|
||||||
|
of the translator module for the current state of supported and unsupported
|
||||||
|
language features.
|
||||||
|
|
||||||
|
You should probably make sure to always use the latest release of
|
||||||
|
both `ffrank-mgmtgraph` and `ffrank-yamlresource` (the latter is
|
||||||
|
getting pulled in as a dependency of the former).
|
||||||
63
README.md
@@ -1,25 +1,47 @@
|
|||||||
# *mgmt*: This is: mgmt!
|
# *mgmt*: next generation config management!
|
||||||
|
|
||||||
|
[](art/)
|
||||||
|
|
||||||
[](https://goreportcard.com/report/github.com/purpleidea/mgmt)
|
[](https://goreportcard.com/report/github.com/purpleidea/mgmt)
|
||||||
[](http://travis-ci.org/purpleidea/mgmt)
|
[](http://travis-ci.org/purpleidea/mgmt)
|
||||||
[](DOCUMENTATION.md)
|
[](DOCUMENTATION.md)
|
||||||
|
[](https://godoc.org/github.com/purpleidea/mgmt)
|
||||||
[](https://webchat.freenode.net/?channels=#mgmtconfig)
|
[](https://webchat.freenode.net/?channels=#mgmtconfig)
|
||||||
[](https://ci.centos.org/job/purpleidea-mgmt/)
|
[](https://ci.centos.org/job/purpleidea-mgmt/)
|
||||||
[](https://copr.fedoraproject.org/coprs/purpleidea/mgmt/)
|
[](https://copr.fedoraproject.org/coprs/purpleidea/mgmt/)
|
||||||
|
[](https://aur.archlinux.org/packages/mgmt/)
|
||||||
|
|
||||||
## Community:
|
## Community:
|
||||||
Come join us on IRC in [#mgmtconfig](https://webchat.freenode.net/?channels=#mgmtconfig) on Freenode!
|
Come join us on IRC in [#mgmtconfig](https://webchat.freenode.net/?channels=#mgmtconfig) on Freenode!
|
||||||
You may like the [#mgmtconfig](https://twitter.com/hashtag/mgmtconfig) hashtag if you're on [Twitter](https://twitter.com/#!/purpleidea).
|
You may like the [#mgmtconfig](https://twitter.com/hashtag/mgmtconfig) hashtag if you're on [Twitter](https://twitter.com/#!/purpleidea).
|
||||||
|
|
||||||
|
## Status:
|
||||||
|
Mgmt is a fairly new project.
|
||||||
|
We're working towards being minimally useful for production environments.
|
||||||
|
We aren't feature complete for what we'd consider a 1.x release yet.
|
||||||
|
With your help you'll be able to influence our design and get us there sooner!
|
||||||
|
|
||||||
## Questions:
|
## Questions:
|
||||||
Please join the [#mgmtconfig](https://webchat.freenode.net/?channels=#mgmtconfig) IRC community!
|
Please join the [#mgmtconfig](https://webchat.freenode.net/?channels=#mgmtconfig) IRC community!
|
||||||
If you have a well phrased question that might benefit others, consider asking it by sending a patch to the documentation [FAQ](https://github.com/purpleidea/mgmt/blob/master/DOCUMENTATION.md#usage-and-frequently-asked-questions) section. I'll merge your question, and a patch with the answer!
|
If you have a well phrased question that might benefit others, consider asking it by sending a patch to the documentation [FAQ](https://github.com/purpleidea/mgmt/blob/master/DOCUMENTATION.md#usage-and-frequently-asked-questions) section. I'll merge your question, and a patch with the answer!
|
||||||
|
|
||||||
## Quick start:
|
## Quick start:
|
||||||
* Clone the repository recursively, eg: `git clone --recursive https://github.com/purpleidea/mgmt/`.
|
* Make sure you have golang version 1.6 or greater installed.
|
||||||
* Get the remaining golang dependencies on your own, or run `make deps` if you're comfortable with how we install them.
|
* If you do not have a GOPATH yet, create one and export it:
|
||||||
|
```
|
||||||
|
mkdir $HOME/gopath
|
||||||
|
export GOPATH=$HOME/gopath
|
||||||
|
```
|
||||||
|
* You might also want to add the GOPATH to your `~/.bashrc` or `~/.profile`.
|
||||||
|
* For more information you can read the [GOPATH documentation](https://golang.org/cmd/go/#hdr-GOPATH_environment_variable).
|
||||||
|
* Next download the mgmt code base, and switch to that directory:
|
||||||
|
```
|
||||||
|
go get -u github.com/purpleidea/mgmt
|
||||||
|
cd $GOPATH/src/github.com/purpleidea/mgmt
|
||||||
|
```
|
||||||
|
* Get the remaining golang deps with `go get ./...`, or run `make deps` if you're comfortable with how we install them.
|
||||||
* Run `make build` to get a freshly built `mgmt` binary.
|
* Run `make build` to get a freshly built `mgmt` binary.
|
||||||
* Run `time ./mgmt run --file examples/graph0.yaml --converged-timeout=1` to try out a very simple example!
|
* Run `time ./mgmt run --yaml examples/graph0.yaml --converged-timeout=5 --tmp-prefix` to try out a very simple example!
|
||||||
* To run continuously in the default mode of operation, omit the `--converged-timeout` option.
|
* To run continuously in the default mode of operation, omit the `--converged-timeout` option.
|
||||||
* Have fun hacking on our future technology!
|
* Have fun hacking on our future technology!
|
||||||
|
|
||||||
@@ -27,7 +49,7 @@ If you have a well phrased question that might benefit others, consider asking i
|
|||||||
Please look in the [examples/](examples/) folder for more examples!
|
Please look in the [examples/](examples/) folder for more examples!
|
||||||
|
|
||||||
## Documentation:
|
## Documentation:
|
||||||
Please see: [DOCUMENTATION.md](DOCUMENTATION.md) or [PDF](https://pdfdoc-purpleidea.rhcloud.com/pdf/https://github.com/purpleidea/mgmt/blob/master/DOCUMENTATION.md).
|
Please see: the manually created [DOCUMENTATION.md](DOCUMENTATION.md) (also available as [PDF](https://pdfdoc-purpleidea.rhcloud.com/pdf/https://github.com/purpleidea/mgmt/blob/master/DOCUMENTATION.md)) and the automatically generated [GoDoc documentation](https://godoc.org/github.com/purpleidea/mgmt).
|
||||||
|
|
||||||
## Roadmap:
|
## Roadmap:
|
||||||
Please see: [TODO.md](TODO.md) for a list of upcoming work and TODO items.
|
Please see: [TODO.md](TODO.md) for a list of upcoming work and TODO items.
|
||||||
@@ -40,21 +62,22 @@ Bonus points if you provide a [shell](https://github.com/purpleidea/mgmt/tree/ma
|
|||||||
Feel free to read my article on [debugging golang programs](https://ttboj.wordpress.com/2016/02/15/debugging-golang-programs/).
|
Feel free to read my article on [debugging golang programs](https://ttboj.wordpress.com/2016/02/15/debugging-golang-programs/).
|
||||||
|
|
||||||
## Dependencies:
|
## Dependencies:
|
||||||
* golang 1.4 or higher (required, available in most distros)
|
* golang 1.6 or higher (required, available in most distros)
|
||||||
* golang libraries (required, available with `go get`)
|
* golang libraries (required, available with `go get`)
|
||||||
|
```
|
||||||
go get github.com/coreos/etcd/client
|
go get github.com/coreos/etcd/client
|
||||||
go get gopkg.in/yaml.v2
|
go get gopkg.in/yaml.v2
|
||||||
go get gopkg.in/fsnotify.v1
|
go get gopkg.in/fsnotify.v1
|
||||||
go get github.com/codegangsta/cli
|
go get github.com/urfave/cli
|
||||||
go get github.com/coreos/go-systemd/dbus
|
go get github.com/coreos/go-systemd/dbus
|
||||||
go get github.com/coreos/go-systemd/util
|
go get github.com/coreos/go-systemd/util
|
||||||
go get github.com/coreos/pkg/capnslog
|
go get github.com/coreos/pkg/capnslog
|
||||||
|
go get github.com/rgbkrk/libvirt-go
|
||||||
* stringer (required for building), available as a package on some platforms, otherwise via `go get`
|
```
|
||||||
|
* stringer (optional for building), available as a package on some platforms, otherwise via `go get`
|
||||||
go get golang.org/x/tools/cmd/stringer
|
```
|
||||||
|
go get golang.org/x/tools/cmd/stringer
|
||||||
|
```
|
||||||
* pandoc (optional, for building a pdf of the documentation)
|
* pandoc (optional, for building a pdf of the documentation)
|
||||||
* graphviz (optional, for building a visual representation of the graph)
|
* graphviz (optional, for building a visual representation of the graph)
|
||||||
|
|
||||||
@@ -79,6 +102,8 @@ We'd love to have your patches! Please send them by email, or as a pull request.
|
|||||||
* James Shubin; video: [Recording from DebConf16](http://meetings-archive.debian.net/pub/debian-meetings/2016/debconf16/Next_Generation_Config_Mgmt.webm) ([Slides](https://annex.debconf.org//debconf-share/debconf16/slides/15-next-generation-config-mgmt.pdf))
|
* James Shubin; video: [Recording from DebConf16](http://meetings-archive.debian.net/pub/debian-meetings/2016/debconf16/Next_Generation_Config_Mgmt.webm) ([Slides](https://annex.debconf.org//debconf-share/debconf16/slides/15-next-generation-config-mgmt.pdf))
|
||||||
* Felix Frank; blog: [Edging It All In (puppet and mgmt edges)](https://ffrank.github.io/features/2016/07/12/edging-it-all-in/)
|
* Felix Frank; blog: [Edging It All In (puppet and mgmt edges)](https://ffrank.github.io/features/2016/07/12/edging-it-all-in/)
|
||||||
* Felix Frank; blog: [Translating All The Things (puppet to mgmt translation warnings)](https://ffrank.github.io/features/2016/08/19/translating-all-the-things/)
|
* Felix Frank; blog: [Translating All The Things (puppet to mgmt translation warnings)](https://ffrank.github.io/features/2016/08/19/translating-all-the-things/)
|
||||||
|
* James Shubin; video: [Recording from systemd.conf 2016](https://www.youtube.com/watch?v=jB992Zb3nH0&html5=1)
|
||||||
|
* James Shubin; blog: [Remote execution in mgmt](https://ttboj.wordpress.com/2016/10/07/remote-execution-in-mgmt/)
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
|
|||||||
36
TODO.md
@@ -9,24 +9,48 @@ Let us know if you're working on one of the items.
|
|||||||
- [ ] install signal blocker [bug](https://github.com/hughsie/PackageKit/issues/109)
|
- [ ] install signal blocker [bug](https://github.com/hughsie/PackageKit/issues/109)
|
||||||
|
|
||||||
## File resource [bug](https://github.com/purpleidea/mgmt/issues/13) [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
## File resource [bug](https://github.com/purpleidea/mgmt/issues/13) [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||||
- [ ] ability to make/delete folders
|
- [ ] chown/chmod support [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||||
- [ ] recursive argument (can recursively watch/modify contents)
|
- [ ] user/group support [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||||
- [ ] force argument (can cause switch from file <-> folder)
|
- [ ] recurse limit support [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||||
- [ ] fanotify support [bug](https://github.com/go-fsnotify/fsnotify/issues/114)
|
- [ ] fanotify support [bug](https://github.com/go-fsnotify/fsnotify/issues/114)
|
||||||
|
|
||||||
|
## Svc resource
|
||||||
|
- [ ] base resource improvements
|
||||||
|
|
||||||
## Exec resource
|
## Exec resource
|
||||||
- [ ] base resource improvements
|
- [ ] base resource improvements
|
||||||
|
|
||||||
## Timer resource
|
## Timer resource
|
||||||
- [ ] base resource [bug](https://github.com/purpleidea/mgmt/issues/15) [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
|
||||||
- [ ] reset on recompile
|
- [ ] reset on recompile
|
||||||
- [ ] increment algorithm (linear, exponential, etc...)
|
- [ ] increment algorithm (linear, exponential, etc...) [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||||
|
|
||||||
|
## User/Group resource
|
||||||
|
- [ ] base resource [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||||
|
- [ ] automatic edges to file resource [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||||
|
|
||||||
## Virt (libvirt) resource
|
## Virt (libvirt) resource
|
||||||
- [ ] base resource [bug](https://github.com/purpleidea/mgmt/issues/25)
|
- [ ] base resource [bug](https://github.com/purpleidea/mgmt/issues/25)
|
||||||
|
|
||||||
|
## Net (systemd-networkd) resource
|
||||||
|
- [ ] base resource
|
||||||
|
|
||||||
|
## Nspawn (systemd-nspawn) resource
|
||||||
|
- [ ] base resource [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||||
|
|
||||||
|
## Mount (systemd-mount) resource
|
||||||
|
- [ ] base resource [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||||
|
|
||||||
|
## Cron (systemd-timer) resource
|
||||||
|
- [ ] base resource [:heart:](https://github.com/purpleidea/mgmt/labels/mgmtlove)
|
||||||
|
|
||||||
|
## Http resource
|
||||||
|
- [ ] base resource
|
||||||
|
|
||||||
## Etcd improvements
|
## Etcd improvements
|
||||||
- [ ] embedded etcd master
|
- [ ] fix embedded etcd master race
|
||||||
|
|
||||||
|
## Torrent/dht file transfer
|
||||||
|
- [ ] base plumbing
|
||||||
|
|
||||||
## Language improvements
|
## Language improvements
|
||||||
- [ ] language design
|
- [ ] language design
|
||||||
|
|||||||
2
art/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
*.png
|
||||||
|
misc/
|
||||||
BIN
art/mgmt.png
Normal file
|
After Width: | Height: | Size: 24 KiB |
94
art/mgmt_logo_default_symbol.svg
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||||
|
viewBox="0 0 120 107.1" style="enable-background:new 0 0 120 107.1;" xml:space="preserve">
|
||||||
|
<style type="text/css">
|
||||||
|
.st0{fill:#FFFFFF;}
|
||||||
|
.st1{fill:#E22434;}
|
||||||
|
.st2{display:none;}
|
||||||
|
.st3{fill:#1B3663;}
|
||||||
|
.st4{fill:#00B1D1;}
|
||||||
|
.st5{fill:#BFE6EF;}
|
||||||
|
.st6{fill:#69CBE0;}
|
||||||
|
.st7{fill:#0080BD;}
|
||||||
|
.st8{fill:#005DAB;}
|
||||||
|
.st9{fill:#183660;}
|
||||||
|
.st10{fill:none;stroke:#183660;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st11{fill:none;stroke:#183660;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
.st12{fill:none;stroke:#183660;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st13{fill:none;stroke:#C0E6EF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st14{fill:#C0E6EF;}
|
||||||
|
.st15{fill:none;stroke:#C0E6EF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st16{fill:none;stroke:#C0E6EF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
.st17{fill:none;stroke:#FFFFFF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st18{fill:none;stroke:#FFFFFF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st19{fill:none;stroke:#FFFFFF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
</style>
|
||||||
|
<g id="Layer_2">
|
||||||
|
</g>
|
||||||
|
<g id="Layer_1">
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st10" x1="29.2" y1="24.1" x2="52.1" y2="42.8"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st9" points="27.7,27.8 24.9,20.5 32.6,21.8 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<circle class="st5" cx="16.1" cy="12.2" r="12.1"/>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st10" x1="52.1" y1="42.1" x2="74.1" y2="80"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st9" points="70.1,80.9 76.9,84.8 76.8,77.1 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st10" x1="69.4" y1="46.7" x2="95.8" y2="52.4"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st9" points="69.7,50.7 63.9,45.6 71.3,43.2 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st10" x1="52.1" y1="42.8" x2="71.9" y2="30.1"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st9" points="73.1,34 76.6,27.1 68.9,27.5 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st10" x1="16.8" y1="49.6" x2="34.8" y2="46.5"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st9" points="34.3,50.5 40.3,45.6 33,42.9 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st10" x1="92.3" y1="79.5" x2="107.8" y2="54.2"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st9" points="96.1,80.6 89.3,84.3 89.5,76.5 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st10" x1="97.3" y1="36.5" x2="107.8" y2="54.2"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st9" points="94.6,39.4 94.5,31.7 101.2,35.5 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<circle class="st3" cx="52.1" cy="42.8" r="12.1"/>
|
||||||
|
<circle class="st4" cx="12.2" cy="50.8" r="12.1"/>
|
||||||
|
<circle class="st7" cx="87.5" cy="21.7" r="12.1"/>
|
||||||
|
<circle class="st8" cx="83.5" cy="95" r="12.1"/>
|
||||||
|
<circle class="st6" cx="107.8" cy="54.2" r="12.1"/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 2.8 KiB |
132
art/mgmt_logo_default_tall.svg
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||||
|
viewBox="0 0 168.3 131.6" style="enable-background:new 0 0 168.3 131.6;" xml:space="preserve">
|
||||||
|
<style type="text/css">
|
||||||
|
.st0{fill:#FFFFFF;}
|
||||||
|
.st1{fill:#E22434;}
|
||||||
|
.st2{display:none;}
|
||||||
|
.st3{fill:#1B3663;}
|
||||||
|
.st4{fill:#00B1D1;}
|
||||||
|
.st5{fill:#BFE6EF;}
|
||||||
|
.st6{fill:#69CBE0;}
|
||||||
|
.st7{fill:#0080BD;}
|
||||||
|
.st8{fill:#005DAB;}
|
||||||
|
.st9{fill:#183660;}
|
||||||
|
.st10{fill:none;stroke:#183660;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st11{fill:none;stroke:#183660;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
.st12{fill:none;stroke:#183660;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st13{fill:none;stroke:#C0E6EF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st14{fill:#C0E6EF;}
|
||||||
|
.st15{fill:none;stroke:#C0E6EF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st16{fill:none;stroke:#C0E6EF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
.st17{fill:none;stroke:#FFFFFF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st18{fill:none;stroke:#FFFFFF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st19{fill:none;stroke:#FFFFFF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
</style>
|
||||||
|
<g id="Layer_2">
|
||||||
|
</g>
|
||||||
|
<g id="Layer_1">
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<path class="st3" d="M4.7,105l0.1,1.8c1.1-1.4,2.6-2.1,4.4-2.1c1.9,0,3.2,0.9,4,2.6c1.1-1.7,2.6-2.6,4.7-2.6
|
||||||
|
c3.3,0,5,2.3,5.1,6.9V124h-5v-12.1c0-1.1-0.2-1.9-0.5-2.4c-0.3-0.5-0.8-0.7-1.5-0.7c-0.9,0-1.6,0.6-2.1,1.7l0,0.6V124H9v-12.1
|
||||||
|
c0-1.1-0.1-1.9-0.4-2.4c-0.3-0.5-0.8-0.7-1.6-0.7c-0.9,0-1.5,0.5-2,1.4V124H0v-19H4.7z"/>
|
||||||
|
<path class="st3" d="M26.4,113.9c0-3.1,0.6-5.4,1.7-7s2.7-2.3,4.7-2.3c1.7,0,3.1,0.7,4,2L37,105h4.5v19c0,2.4-0.7,4.3-2,5.6
|
||||||
|
c-1.4,1.3-3.3,1.9-5.9,1.9c-1,0-2.1-0.2-3.3-0.6s-2-0.9-2.6-1.6l1.7-3.4c0.5,0.5,1.1,0.9,1.8,1.2c0.7,0.3,1.5,0.5,2.1,0.5
|
||||||
|
c1.1,0,1.9-0.3,2.4-0.8s0.7-1.4,0.7-2.6v-1.6c-0.9,1.2-2.2,1.9-3.7,1.9c-2,0-3.6-0.8-4.7-2.4s-1.7-3.8-1.7-6.7V113.9z
|
||||||
|
M31.4,115.2c0,1.8,0.2,3,0.7,3.8s1.2,1.2,2.2,1.2c1,0,1.8-0.4,2.3-1.1V110c-0.5-0.8-1.3-1.2-2.2-1.2c-1,0-1.7,0.4-2.2,1.2
|
||||||
|
s-0.7,2.1-0.7,3.9V115.2z"/>
|
||||||
|
<path class="st3" d="M50.1,105l0.1,1.8c1.1-1.4,2.6-2.1,4.4-2.1c1.9,0,3.2,0.9,4,2.6c1.1-1.7,2.6-2.6,4.7-2.6
|
||||||
|
c3.3,0,5,2.3,5.1,6.9V124h-5v-12.1c0-1.1-0.2-1.9-0.5-2.4s-0.8-0.7-1.5-0.7c-0.9,0-1.6,0.6-2.1,1.7l0,0.6V124h-5v-12.1
|
||||||
|
c0-1.1-0.1-1.9-0.4-2.4s-0.8-0.7-1.6-0.7c-0.9,0-1.5,0.5-2,1.4V124h-5v-19H50.1z"/>
|
||||||
|
<path class="st3" d="M78.2,100.3v4.7h2.5v3.7h-2.5v9.5c0,0.8,0.1,1.3,0.3,1.5c0.2,0.3,0.6,0.4,1.2,0.4c0.5,0,0.9,0,1.2-0.1
|
||||||
|
l0,3.9c-0.8,0.3-1.8,0.5-2.7,0.5c-3.2,0-4.8-1.8-4.9-5.5v-10.1h-2.2V105h2.2v-4.7H78.2z"/>
|
||||||
|
<path class="st4" d="M90.6,122.6c1.4,0,2.4-0.4,3.1-1.1c0.7-0.8,1.1-1.9,1.2-3.3h1.9c-0.1,1.9-0.7,3.5-1.9,4.6
|
||||||
|
c-1.1,1.1-2.6,1.7-4.3,1.7c-2.3,0-4-0.7-5.1-2.2s-1.7-3.6-1.8-6.4v-2.3c0-2.9,0.6-5.1,1.7-6.6s2.9-2.2,5.1-2.2
|
||||||
|
c1.9,0,3.4,0.6,4.5,1.8s1.7,2.9,1.7,5H95c-0.1-1.6-0.5-2.8-1.2-3.6s-1.8-1.3-3.1-1.3c-1.7,0-2.9,0.6-3.7,1.7s-1.2,2.9-1.2,5.2
|
||||||
|
v2.2c0,2.4,0.4,4.2,1.2,5.3S89,122.6,90.6,122.6z"/>
|
||||||
|
<path class="st4" d="M100.5,113.6c0-2.7,0.6-4.9,1.9-6.5s2.9-2.4,5.1-2.4c2.2,0,3.9,0.8,5.1,2.4s1.9,3.7,1.9,6.5v2
|
||||||
|
c0,2.8-0.6,5-1.9,6.5s-2.9,2.3-5.1,2.3c-2.1,0-3.8-0.8-5-2.3s-1.9-3.6-1.9-6.3V113.6z M102.5,115.5c0,2.2,0.4,3.9,1.3,5.2
|
||||||
|
c0.9,1.3,2.1,1.9,3.7,1.9c1.6,0,2.8-0.6,3.7-1.8s1.3-2.9,1.3-5.2v-2c0-2.2-0.4-3.9-1.3-5.2c-0.9-1.3-2.1-1.9-3.7-1.9
|
||||||
|
c-1.5,0-2.7,0.6-3.6,1.8s-1.3,2.9-1.4,5.1V115.5z"/>
|
||||||
|
<path class="st4" d="M121.1,105l0.1,3c0.6-1,1.3-1.9,2.2-2.5s1.9-0.9,3-0.9c3.3,0,5,2.2,5.1,6.6V124h-1.9v-12.5
|
||||||
|
c0-1.7-0.3-3-0.9-3.8c-0.6-0.8-1.5-1.2-2.8-1.2c-1,0-1.9,0.4-2.8,1.2s-1.4,1.8-1.9,3.2V124h-2v-19H121.1z"/>
|
||||||
|
<path class="st4" d="M138.2,124v-17.3h-2.6V105h2.6v-2.5c0-1.9,0.5-3.3,1.3-4.3s2-1.5,3.5-1.5c0.7,0,1.3,0.1,1.9,0.3l-0.1,1.8
|
||||||
|
c-0.5-0.1-1-0.2-1.6-0.2c-0.9,0-1.7,0.4-2.2,1.1s-0.8,1.7-0.8,3v2.3h3.7v1.8h-3.7V124H138.2z"/>
|
||||||
|
<path class="st4" d="M148,99.5c0-0.4,0.1-0.7,0.3-1s0.5-0.4,0.9-0.4s0.7,0.1,1,0.4s0.3,0.6,0.3,1s-0.1,0.7-0.3,1s-0.5,0.4-1,0.4
|
||||||
|
s-0.7-0.1-0.9-0.4S148,99.9,148,99.5z M150.2,124h-2v-19h2V124z"/>
|
||||||
|
<path class="st4" d="M155.3,113.6c0-3,0.5-5.2,1.5-6.7s2.6-2.2,4.6-2.2c2.2,0,3.8,1,4.9,3l0.1-2.7h1.8v19.6c0,2.3-0.6,4-1.6,5.2
|
||||||
|
c-1.1,1.2-2.7,1.8-4.7,1.8c-1,0-2.1-0.3-3.2-0.8s-1.9-1.1-2.4-1.9l0.9-1.4c1.3,1.5,2.8,2.2,4.5,2.2c1.6,0,2.7-0.4,3.5-1.3
|
||||||
|
c0.7-0.8,1.1-2.1,1.1-3.8v-3c-1.1,1.8-2.7,2.7-4.9,2.7c-2,0-3.5-0.7-4.5-2.2s-1.6-3.6-1.6-6.5V113.6z M157.2,115.4
|
||||||
|
c0,2.4,0.4,4.2,1.1,5.4s1.9,1.7,3.5,1.7c2.1,0,3.6-1,4.5-3v-9.7c-0.9-2.2-2.4-3.3-4.4-3.3c-1.6,0-2.8,0.6-3.5,1.7
|
||||||
|
s-1.1,2.9-1.1,5.3V115.4z"/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st11" x1="58.9" y1="20.1" x2="77.9" y2="35.6"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st9" points="57.6,23.2 55.3,17.1 61.7,18.2 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<circle class="st5" cx="48" cy="10.1" r="10.1"/>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st11" x1="77.9" y1="35.1" x2="96.2" y2="66.7"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st9" points="93,67.5 98.6,70.7 98.6,64.2 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st11" x1="92.3" y1="38.9" x2="114.4" y2="43.6"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st9" points="92.6,42.3 87.8,38 93.9,36 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st11" x1="77.9" y1="35.6" x2="94.5" y2="25.1"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st9" points="95.4,28.3 98.4,22.6 91.9,22.9 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st11" x1="48.5" y1="41.3" x2="63.5" y2="38.7"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st9" points="63.1,42.1 68.1,38 62,35.7 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st11" x1="111.4" y1="66.3" x2="124.4" y2="45.2"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st9" points="114.6,67.2 109,70.2 109.1,63.8 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st11" x1="115.6" y1="30.4" x2="124.4" y2="45.2"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st9" points="113.3,32.8 113.3,26.4 118.9,29.5 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<circle class="st3" cx="77.9" cy="35.6" r="10.1"/>
|
||||||
|
<circle class="st4" cx="44.6" cy="42.4" r="10.1"/>
|
||||||
|
<circle class="st7" cx="107.4" cy="18.1" r="10.1"/>
|
||||||
|
<circle class="st8" cx="104.1" cy="79.1" r="10.1"/>
|
||||||
|
<circle class="st6" cx="124.4" cy="45.2" r="10.1"/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 6.2 KiB |
132
art/mgmt_logo_default_wide.svg
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||||
|
viewBox="0 0 260.4 71.4" style="enable-background:new 0 0 260.4 71.4;" xml:space="preserve">
|
||||||
|
<style type="text/css">
|
||||||
|
.st0{fill:#FFFFFF;}
|
||||||
|
.st1{fill:#E22434;}
|
||||||
|
.st2{display:none;}
|
||||||
|
.st3{fill:#1B3663;}
|
||||||
|
.st4{fill:#00B1D1;}
|
||||||
|
.st5{fill:#BFE6EF;}
|
||||||
|
.st6{fill:#69CBE0;}
|
||||||
|
.st7{fill:#0080BD;}
|
||||||
|
.st8{fill:#005DAB;}
|
||||||
|
.st9{fill:#183660;}
|
||||||
|
.st10{fill:none;stroke:#183660;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st11{fill:none;stroke:#183660;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
.st12{fill:none;stroke:#183660;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st13{fill:none;stroke:#C0E6EF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st14{fill:#C0E6EF;}
|
||||||
|
.st15{fill:none;stroke:#C0E6EF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st16{fill:none;stroke:#C0E6EF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
.st17{fill:none;stroke:#FFFFFF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st18{fill:none;stroke:#FFFFFF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st19{fill:none;stroke:#FFFFFF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
</style>
|
||||||
|
<g id="Layer_2">
|
||||||
|
</g>
|
||||||
|
<g id="Layer_1">
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<path class="st3" d="M96.7,25.7l0.1,1.8c1.1-1.4,2.6-2.1,4.4-2.1c1.9,0,3.2,0.9,4,2.6c1.1-1.7,2.6-2.6,4.7-2.6
|
||||||
|
c3.3,0,5,2.3,5.1,6.9v12.5h-5V32.6c0-1.1-0.2-1.9-0.5-2.4s-0.8-0.7-1.5-0.7c-0.9,0-1.6,0.6-2.1,1.7l0,0.6v12.9h-5V32.6
|
||||||
|
c0-1.1-0.1-1.9-0.4-2.4s-0.8-0.7-1.6-0.7c-0.9,0-1.5,0.5-2,1.4v13.8h-5v-19H96.7z"/>
|
||||||
|
<path class="st3" d="M118.5,34.6c0-3.1,0.6-5.4,1.7-7s2.7-2.3,4.7-2.3c1.7,0,3.1,0.7,4,2l0.2-1.7h4.5v19c0,2.4-0.7,4.3-2,5.6
|
||||||
|
c-1.4,1.3-3.3,1.9-5.9,1.9c-1,0-2.1-0.2-3.3-0.6s-2-0.9-2.6-1.6l1.7-3.4c0.5,0.5,1.1,0.9,1.8,1.2c0.7,0.3,1.5,0.5,2.1,0.5
|
||||||
|
c1.1,0,1.9-0.3,2.4-0.8s0.7-1.4,0.7-2.6v-1.6c-0.9,1.2-2.2,1.9-3.7,1.9c-2,0-3.6-0.8-4.7-2.4s-1.7-3.8-1.7-6.7V34.6z
|
||||||
|
M123.5,35.9c0,1.8,0.2,3,0.7,3.8s1.2,1.2,2.2,1.2c1,0,1.8-0.4,2.3-1.1v-9.1c-0.5-0.8-1.3-1.2-2.2-1.2c-1,0-1.7,0.4-2.2,1.2
|
||||||
|
s-0.7,2.1-0.7,3.9V35.9z"/>
|
||||||
|
<path class="st3" d="M142.2,25.7l0.1,1.8c1.1-1.4,2.6-2.1,4.4-2.1c1.9,0,3.2,0.9,4,2.6c1.1-1.7,2.6-2.6,4.7-2.6
|
||||||
|
c3.3,0,5,2.3,5.1,6.9v12.5h-5V32.6c0-1.1-0.2-1.9-0.5-2.4c-0.3-0.5-0.8-0.7-1.5-0.7c-0.9,0-1.6,0.6-2.1,1.7l0,0.6v12.9h-5V32.6
|
||||||
|
c0-1.1-0.1-1.9-0.4-2.4c-0.3-0.5-0.8-0.7-1.6-0.7c-0.9,0-1.5,0.5-2,1.4v13.8h-5v-19H142.2z"/>
|
||||||
|
<path class="st3" d="M170.3,21v4.7h2.5v3.7h-2.5v9.5c0,0.8,0.1,1.3,0.3,1.5c0.2,0.3,0.6,0.4,1.2,0.4c0.5,0,0.9,0,1.2-0.1l0,3.9
|
||||||
|
c-0.8,0.3-1.8,0.5-2.7,0.5c-3.2,0-4.8-1.8-4.9-5.5V29.4h-2.2v-3.7h2.2V21H170.3z"/>
|
||||||
|
<path class="st4" d="M182.7,43.2c1.4,0,2.4-0.4,3.1-1.1s1.1-1.8,1.2-3.3h1.9c-0.1,1.9-0.7,3.5-1.9,4.6s-2.6,1.7-4.3,1.7
|
||||||
|
c-2.3,0-4-0.7-5.1-2.2s-1.7-3.6-1.8-6.4v-2.3c0-2.9,0.6-5.1,1.7-6.6s2.9-2.2,5.1-2.2c1.9,0,3.4,0.6,4.5,1.8s1.7,2.9,1.7,5H187
|
||||||
|
c-0.1-1.6-0.5-2.8-1.2-3.6s-1.8-1.3-3.1-1.3c-1.7,0-2.9,0.6-3.7,1.7c-0.8,1.1-1.2,2.9-1.2,5.2v2.2c0,2.4,0.4,4.2,1.2,5.3
|
||||||
|
S181,43.2,182.7,43.2z"/>
|
||||||
|
<path class="st4" d="M192.6,34.2c0-2.7,0.6-4.9,1.9-6.5s2.9-2.4,5.1-2.4c2.2,0,3.9,0.8,5.1,2.4c1.2,1.6,1.9,3.7,1.9,6.5v2
|
||||||
|
c0,2.8-0.6,5-1.9,6.5s-2.9,2.3-5.1,2.3s-3.8-0.8-5-2.3s-1.9-3.6-1.9-6.3V34.2z M194.6,36.2c0,2.2,0.4,3.9,1.3,5.2
|
||||||
|
c0.9,1.3,2.1,1.9,3.7,1.9c1.6,0,2.8-0.6,3.7-1.8c0.8-1.2,1.3-2.9,1.3-5.2v-2c0-2.2-0.4-3.9-1.3-5.2c-0.9-1.3-2.1-1.9-3.7-1.9
|
||||||
|
c-1.5,0-2.7,0.6-3.6,1.8c-0.9,1.2-1.3,2.9-1.4,5.1V36.2z"/>
|
||||||
|
<path class="st4" d="M213.2,25.7l0.1,3c0.6-1,1.3-1.9,2.2-2.5s1.9-0.9,3-0.9c3.3,0,5,2.2,5.1,6.6v12.7h-1.9V32.2
|
||||||
|
c0-1.7-0.3-3-0.9-3.8c-0.6-0.8-1.5-1.2-2.8-1.2c-1,0-1.9,0.4-2.8,1.2s-1.4,1.8-1.9,3.2v13.2h-2v-19H213.2z"/>
|
||||||
|
<path class="st4" d="M230.3,44.7V27.4h-2.6v-1.8h2.6v-2.5c0-1.9,0.5-3.3,1.3-4.3s2-1.5,3.5-1.5c0.7,0,1.3,0.1,1.9,0.3l-0.1,1.8
|
||||||
|
c-0.5-0.1-1-0.2-1.6-0.2c-0.9,0-1.7,0.4-2.2,1.1s-0.8,1.7-0.8,3v2.3h3.7v1.8h-3.7v17.3H230.3z"/>
|
||||||
|
<path class="st4" d="M240.1,20.2c0-0.4,0.1-0.7,0.3-1s0.5-0.4,0.9-0.4s0.7,0.1,1,0.4s0.3,0.6,0.3,1s-0.1,0.7-0.3,1
|
||||||
|
s-0.5,0.4-1,0.4s-0.7-0.1-0.9-0.4S240.1,20.6,240.1,20.2z M242.3,44.7h-2v-19h2V44.7z"/>
|
||||||
|
<path class="st4" d="M247.4,34.3c0-3,0.5-5.2,1.5-6.7s2.6-2.2,4.6-2.2c2.2,0,3.8,1,4.9,3l0.1-2.7h1.8v19.6c0,2.3-0.6,4-1.6,5.2
|
||||||
|
s-2.7,1.8-4.7,1.8c-1,0-2.1-0.3-3.2-0.8s-1.9-1.1-2.4-1.9l0.9-1.4c1.3,1.5,2.8,2.2,4.5,2.2c1.6,0,2.7-0.4,3.5-1.3
|
||||||
|
c0.7-0.8,1.1-2.1,1.1-3.8v-3c-1.1,1.8-2.7,2.7-4.9,2.7c-2,0-3.5-0.7-4.5-2.2s-1.6-3.6-1.6-6.5V34.3z M249.3,36.1
|
||||||
|
c0,2.4,0.4,4.2,1.1,5.4s1.9,1.7,3.5,1.7c2.1,0,3.6-1,4.5-3v-9.7c-0.9-2.2-2.4-3.3-4.4-3.3c-1.6,0-2.8,0.6-3.5,1.7
|
||||||
|
s-1.1,2.9-1.1,5.3V36.1z"/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st12" x1="19.5" y1="16" x2="34.7" y2="28.5"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st9" points="18.4,18.5 16.6,13.7 21.7,14.5 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<circle class="st5" cx="10.8" cy="8.1" r="8.1"/>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st12" x1="34.7" y1="28" x2="49.4" y2="53.3"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st9" points="46.8,54 51.2,56.5 51.2,51.4 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st12" x1="46.2" y1="31.1" x2="63.9" y2="34.9"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st9" points="46.4,33.8 42.6,30.4 47.5,28.8 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st12" x1="34.7" y1="28.5" x2="47.9" y2="20.1"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st9" points="48.7,22.7 51.1,18.1 45.9,18.3 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st12" x1="11.2" y1="33.1" x2="23.2" y2="31"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st9" points="22.9,33.7 26.8,30.4 22,28.6 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st12" x1="61.5" y1="53" x2="71.9" y2="36.1"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st9" points="64.1,53.7 59.5,56.2 59.7,51 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st12" x1="64.2" y1="24.2" x2="70.7" y2="33.9"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st9" points="62.5,26.3 62.2,21.1 66.8,23.4 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<circle class="st3" cx="34.7" cy="28.5" r="8.1"/>
|
||||||
|
<circle class="st4" cx="8.1" cy="33.9" r="8.1"/>
|
||||||
|
<circle class="st7" cx="58.3" cy="14.5" r="8.1"/>
|
||||||
|
<circle class="st8" cx="55.7" cy="63.3" r="8.1"/>
|
||||||
|
<circle class="st6" cx="71.9" cy="36.1" r="8.1"/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 6.2 KiB |
94
art/mgmt_logo_reversed_symbol.svg
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||||
|
viewBox="0 0 120 107.1" style="enable-background:new 0 0 120 107.1;" xml:space="preserve">
|
||||||
|
<style type="text/css">
|
||||||
|
.st0{fill:#FFFFFF;}
|
||||||
|
.st1{fill:#E22434;}
|
||||||
|
.st2{display:none;}
|
||||||
|
.st3{fill:#1B3663;}
|
||||||
|
.st4{fill:#00B1D1;}
|
||||||
|
.st5{fill:#BFE6EF;}
|
||||||
|
.st6{fill:#69CBE0;}
|
||||||
|
.st7{fill:#0080BD;}
|
||||||
|
.st8{fill:#005DAB;}
|
||||||
|
.st9{fill:#183660;}
|
||||||
|
.st10{fill:none;stroke:#183660;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st11{fill:none;stroke:#183660;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
.st12{fill:none;stroke:#183660;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st13{fill:none;stroke:#C0E6EF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st14{fill:#C0E6EF;}
|
||||||
|
.st15{fill:none;stroke:#C0E6EF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st16{fill:none;stroke:#C0E6EF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
.st17{fill:none;stroke:#FFFFFF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st18{fill:none;stroke:#FFFFFF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st19{fill:none;stroke:#FFFFFF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
</style>
|
||||||
|
<g id="Layer_2">
|
||||||
|
</g>
|
||||||
|
<g id="Layer_1">
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st13" x1="29.2" y1="24.1" x2="52.1" y2="42.8"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st14" points="27.7,27.8 24.9,20.5 32.6,21.8 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<circle class="st3" cx="16.1" cy="12.2" r="12.1"/>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st13" x1="52.1" y1="42.1" x2="74.1" y2="80"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st14" points="70.1,80.9 76.9,84.8 76.8,77.1 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st13" x1="69.4" y1="46.7" x2="95.8" y2="52.4"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st14" points="69.7,50.7 63.9,45.6 71.3,43.2 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st13" x1="52.1" y1="42.8" x2="71.9" y2="30.1"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st14" points="73.1,34 76.6,27.1 68.9,27.5 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st13" x1="16.8" y1="49.6" x2="34.8" y2="46.5"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st14" points="34.3,50.5 40.3,45.6 33,42.9 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st13" x1="92.3" y1="79.5" x2="107.8" y2="54.2"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st14" points="96.1,80.6 89.3,84.3 89.5,76.5 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st13" x1="97.2" y1="36.3" x2="107.8" y2="54.2"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st14" points="94.4,39.3 94.3,31.5 101.1,35.3 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<circle class="st5" cx="52.1" cy="42.8" r="12.1"/>
|
||||||
|
<circle class="st4" cx="12.2" cy="50.8" r="12.1"/>
|
||||||
|
<circle class="st7" cx="87.5" cy="21.7" r="12.1"/>
|
||||||
|
<circle class="st8" cx="83.5" cy="95" r="12.1"/>
|
||||||
|
<circle class="st6" cx="107.8" cy="54.2" r="12.1"/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 2.8 KiB |
132
art/mgmt_logo_reversed_tall.svg
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||||
|
viewBox="0 0 168.3 133" style="enable-background:new 0 0 168.3 133;" xml:space="preserve">
|
||||||
|
<style type="text/css">
|
||||||
|
.st0{fill:#FFFFFF;}
|
||||||
|
.st1{fill:#E22434;}
|
||||||
|
.st2{display:none;}
|
||||||
|
.st3{fill:#1B3663;}
|
||||||
|
.st4{fill:#00B1D1;}
|
||||||
|
.st5{fill:#BFE6EF;}
|
||||||
|
.st6{fill:#69CBE0;}
|
||||||
|
.st7{fill:#0080BD;}
|
||||||
|
.st8{fill:#005DAB;}
|
||||||
|
.st9{fill:#183660;}
|
||||||
|
.st10{fill:none;stroke:#183660;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st11{fill:none;stroke:#183660;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
.st12{fill:none;stroke:#183660;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st13{fill:none;stroke:#C0E6EF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st14{fill:#C0E6EF;}
|
||||||
|
.st15{fill:none;stroke:#C0E6EF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st16{fill:none;stroke:#C0E6EF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
.st17{fill:none;stroke:#FFFFFF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st18{fill:none;stroke:#FFFFFF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st19{fill:none;stroke:#FFFFFF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
</style>
|
||||||
|
<g id="Layer_2">
|
||||||
|
</g>
|
||||||
|
<g id="Layer_1">
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<path class="st0" d="M4.7,106.4l0.1,1.8c1.1-1.4,2.6-2.1,4.4-2.1c1.9,0,3.2,0.9,4,2.6c1.1-1.7,2.6-2.6,4.7-2.6
|
||||||
|
c3.3,0,5,2.3,5.1,6.9v12.5h-5v-12.1c0-1.1-0.2-1.9-0.5-2.4c-0.3-0.5-0.8-0.7-1.5-0.7c-0.9,0-1.6,0.6-2.1,1.7l0,0.6v12.9H9v-12.1
|
||||||
|
c0-1.1-0.1-1.9-0.4-2.4c-0.3-0.5-0.8-0.7-1.6-0.7c-0.9,0-1.5,0.5-2,1.4v13.8H0v-19H4.7z"/>
|
||||||
|
<path class="st0" d="M26.4,115.3c0-3.1,0.6-5.4,1.7-7s2.7-2.3,4.7-2.3c1.7,0,3.1,0.7,4,2l0.2-1.7h4.5v19c0,2.4-0.7,4.3-2,5.6
|
||||||
|
c-1.4,1.3-3.3,1.9-5.9,1.9c-1,0-2.1-0.2-3.3-0.6s-2-0.9-2.6-1.6l1.7-3.4c0.5,0.5,1.1,0.9,1.8,1.2c0.7,0.3,1.5,0.5,2.1,0.5
|
||||||
|
c1.1,0,1.9-0.3,2.4-0.8c0.5-0.5,0.7-1.4,0.7-2.6v-1.6c-0.9,1.2-2.2,1.9-3.7,1.9c-2,0-3.6-0.8-4.7-2.4s-1.7-3.8-1.7-6.7V115.3z
|
||||||
|
M31.4,116.6c0,1.8,0.2,3,0.7,3.8s1.2,1.2,2.2,1.2c1,0,1.8-0.4,2.3-1.1v-9.1c-0.5-0.8-1.3-1.2-2.2-1.2c-1,0-1.7,0.4-2.2,1.2
|
||||||
|
s-0.7,2.1-0.7,3.9V116.6z"/>
|
||||||
|
<path class="st0" d="M50.1,106.4l0.1,1.8c1.1-1.4,2.6-2.1,4.4-2.1c1.9,0,3.2,0.9,4,2.6c1.1-1.7,2.6-2.6,4.7-2.6
|
||||||
|
c3.3,0,5,2.3,5.1,6.9v12.5h-5v-12.1c0-1.1-0.2-1.9-0.5-2.4s-0.8-0.7-1.5-0.7c-0.9,0-1.6,0.6-2.1,1.7l0,0.6v12.9h-5v-12.1
|
||||||
|
c0-1.1-0.1-1.9-0.4-2.4s-0.8-0.7-1.6-0.7c-0.9,0-1.5,0.5-2,1.4v13.8h-5v-19H50.1z"/>
|
||||||
|
<path class="st0" d="M78.2,101.7v4.7h2.5v3.7h-2.5v9.5c0,0.8,0.1,1.3,0.3,1.5c0.2,0.3,0.6,0.4,1.2,0.4c0.5,0,0.9,0,1.2-0.1
|
||||||
|
l0,3.9c-0.8,0.3-1.8,0.5-2.7,0.5c-3.2,0-4.8-1.8-4.9-5.5v-10.1h-2.2v-3.7h2.2v-4.7H78.2z"/>
|
||||||
|
<path class="st4" d="M90.6,124c1.4,0,2.4-0.4,3.1-1.1c0.7-0.8,1.1-1.9,1.2-3.3h1.9c-0.1,1.9-0.7,3.5-1.9,4.6
|
||||||
|
c-1.1,1.1-2.6,1.7-4.3,1.7c-2.3,0-4-0.7-5.1-2.2s-1.7-3.6-1.8-6.4v-2.3c0-2.9,0.6-5.1,1.7-6.6s2.9-2.2,5.1-2.2
|
||||||
|
c1.9,0,3.4,0.6,4.5,1.8s1.7,2.9,1.7,5H95c-0.1-1.6-0.5-2.8-1.2-3.6s-1.8-1.3-3.1-1.3c-1.7,0-2.9,0.6-3.7,1.7s-1.2,2.9-1.2,5.2
|
||||||
|
v2.2c0,2.4,0.4,4.2,1.2,5.3S89,124,90.6,124z"/>
|
||||||
|
<path class="st4" d="M100.5,115c0-2.7,0.6-4.9,1.9-6.5s2.9-2.4,5.1-2.4c2.2,0,3.9,0.8,5.1,2.4s1.9,3.7,1.9,6.5v2
|
||||||
|
c0,2.8-0.6,5-1.9,6.5s-2.9,2.3-5.1,2.3c-2.1,0-3.8-0.8-5-2.3s-1.9-3.6-1.9-6.3V115z M102.5,116.9c0,2.2,0.4,3.9,1.3,5.2
|
||||||
|
c0.9,1.3,2.1,1.9,3.7,1.9c1.6,0,2.8-0.6,3.7-1.8s1.3-2.9,1.3-5.2v-2c0-2.2-0.4-3.9-1.3-5.2c-0.9-1.3-2.1-1.9-3.7-1.9
|
||||||
|
c-1.5,0-2.7,0.6-3.6,1.8s-1.3,2.9-1.4,5.1V116.9z"/>
|
||||||
|
<path class="st4" d="M121.1,106.4l0.1,3c0.6-1,1.3-1.9,2.2-2.5s1.9-0.9,3-0.9c3.3,0,5,2.2,5.1,6.6v12.7h-1.9v-12.5
|
||||||
|
c0-1.7-0.3-3-0.9-3.8c-0.6-0.8-1.5-1.2-2.8-1.2c-1,0-1.9,0.4-2.8,1.2s-1.4,1.8-1.9,3.2v13.2h-2v-19H121.1z"/>
|
||||||
|
<path class="st4" d="M138.2,125.4v-17.3h-2.6v-1.8h2.6v-2.5c0-1.9,0.5-3.3,1.3-4.3s2-1.5,3.5-1.5c0.7,0,1.3,0.1,1.9,0.3
|
||||||
|
l-0.1,1.8c-0.5-0.1-1-0.2-1.6-0.2c-0.9,0-1.7,0.4-2.2,1.1s-0.8,1.7-0.8,3v2.3h3.7v1.8h-3.7v17.3H138.2z"/>
|
||||||
|
<path class="st4" d="M148,100.9c0-0.4,0.1-0.7,0.3-1s0.5-0.4,0.9-0.4s0.7,0.1,1,0.4s0.3,0.6,0.3,1s-0.1,0.7-0.3,1
|
||||||
|
s-0.5,0.4-1,0.4s-0.7-0.1-0.9-0.4S148,101.3,148,100.9z M150.2,125.4h-2v-19h2V125.4z"/>
|
||||||
|
<path class="st4" d="M155.3,115c0-3,0.5-5.2,1.5-6.7s2.6-2.2,4.6-2.2c2.2,0,3.8,1,4.9,3l0.1-2.7h1.8V126c0,2.3-0.6,4-1.6,5.2
|
||||||
|
c-1.1,1.2-2.7,1.8-4.7,1.8c-1,0-2.1-0.3-3.2-0.8s-1.9-1.1-2.4-1.9l0.9-1.4c1.3,1.5,2.8,2.2,4.5,2.2c1.6,0,2.7-0.4,3.5-1.3
|
||||||
|
c0.7-0.8,1.1-2.1,1.1-3.8v-3c-1.1,1.8-2.7,2.7-4.9,2.7c-2,0-3.5-0.7-4.5-2.2s-1.6-3.6-1.6-6.5V115z M157.2,116.8
|
||||||
|
c0,2.4,0.4,4.2,1.1,5.4s1.9,1.7,3.5,1.7c2.1,0,3.6-1,4.5-3v-9.7c-0.9-2.2-2.4-3.3-4.4-3.3c-1.6,0-2.8,0.6-3.5,1.7
|
||||||
|
s-1.1,2.9-1.1,5.3V116.8z"/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st16" x1="58.9" y1="20.1" x2="77.9" y2="35.6"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st14" points="57.6,23.2 55.3,17.1 61.7,18.2 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<circle class="st3" cx="48" cy="10.1" r="10.1"/>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st16" x1="77.9" y1="35.1" x2="96.2" y2="66.7"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st14" points="93,67.5 98.6,70.7 98.6,64.2 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st16" x1="92.3" y1="38.9" x2="114.4" y2="43.6"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st14" points="92.6,42.3 87.8,38 93.9,36 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st16" x1="77.9" y1="35.6" x2="94.5" y2="25.1"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st14" points="95.4,28.3 98.4,22.6 91.9,22.9 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st16" x1="48.5" y1="41.3" x2="63.5" y2="38.7"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st14" points="63.1,42.1 68.1,38 62,35.7 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st16" x1="111.4" y1="66.3" x2="124.4" y2="45.2"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st14" points="114.6,67.2 109,70.2 109.1,63.8 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st16" x1="115.5" y1="30.5" x2="124.4" y2="45.2"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st14" points="113.2,32.9 113.1,26.5 118.8,29.6 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<circle class="st5" cx="77.9" cy="35.6" r="10.1"/>
|
||||||
|
<circle class="st4" cx="44.6" cy="42.4" r="10.1"/>
|
||||||
|
<circle class="st7" cx="107.4" cy="18.1" r="10.1"/>
|
||||||
|
<circle class="st8" cx="104.1" cy="79.1" r="10.1"/>
|
||||||
|
<circle class="st6" cx="124.4" cy="45.2" r="10.1"/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 6.2 KiB |
132
art/mgmt_logo_reversed_wide.svg
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||||
|
viewBox="0 0 260.4 71.4" style="enable-background:new 0 0 260.4 71.4;" xml:space="preserve">
|
||||||
|
<style type="text/css">
|
||||||
|
.st0{fill:#FFFFFF;}
|
||||||
|
.st1{fill:#E22434;}
|
||||||
|
.st2{display:none;}
|
||||||
|
.st3{fill:#1B3663;}
|
||||||
|
.st4{fill:#00B1D1;}
|
||||||
|
.st5{fill:#BFE6EF;}
|
||||||
|
.st6{fill:#69CBE0;}
|
||||||
|
.st7{fill:#0080BD;}
|
||||||
|
.st8{fill:#005DAB;}
|
||||||
|
.st9{fill:#183660;}
|
||||||
|
.st10{fill:none;stroke:#183660;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st11{fill:none;stroke:#183660;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
.st12{fill:none;stroke:#183660;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st13{fill:none;stroke:#C0E6EF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st14{fill:#C0E6EF;}
|
||||||
|
.st15{fill:none;stroke:#C0E6EF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st16{fill:none;stroke:#C0E6EF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
.st17{fill:none;stroke:#FFFFFF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st18{fill:none;stroke:#FFFFFF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st19{fill:none;stroke:#FFFFFF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
</style>
|
||||||
|
<g id="Layer_2">
|
||||||
|
</g>
|
||||||
|
<g id="Layer_1">
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<path class="st0" d="M96.7,27.6l0.1,1.8c1.1-1.4,2.6-2.1,4.4-2.1c1.9,0,3.2,0.9,4,2.6c1.1-1.7,2.6-2.6,4.7-2.6
|
||||||
|
c3.3,0,5,2.3,5.1,6.9v12.5h-5V34.4c0-1.1-0.2-1.9-0.5-2.4c-0.3-0.5-0.8-0.7-1.5-0.7c-0.9,0-1.6,0.6-2.1,1.7l0,0.6v12.9h-5V34.5
|
||||||
|
c0-1.1-0.1-1.9-0.4-2.4c-0.3-0.5-0.8-0.7-1.6-0.7c-0.9,0-1.5,0.5-2,1.4v13.8h-5v-19H96.7z"/>
|
||||||
|
<path class="st0" d="M118.5,36.5c0-3.1,0.6-5.4,1.7-7s2.7-2.3,4.7-2.3c1.7,0,3.1,0.7,4,2l0.2-1.7h4.5v19c0,2.4-0.7,4.3-2,5.6
|
||||||
|
c-1.4,1.3-3.3,1.9-5.9,1.9c-1,0-2.1-0.2-3.3-0.6s-2-0.9-2.6-1.6l1.7-3.4c0.5,0.5,1.1,0.9,1.8,1.2c0.7,0.3,1.5,0.5,2.1,0.5
|
||||||
|
c1.1,0,1.9-0.3,2.4-0.8s0.7-1.4,0.7-2.6v-1.6c-0.9,1.2-2.2,1.9-3.7,1.9c-2,0-3.6-0.8-4.7-2.4s-1.7-3.8-1.7-6.7V36.5z
|
||||||
|
M123.5,37.7c0,1.8,0.2,3,0.7,3.8s1.2,1.2,2.2,1.2c1,0,1.8-0.4,2.3-1.1v-9.1c-0.5-0.8-1.3-1.2-2.2-1.2c-1,0-1.7,0.4-2.2,1.2
|
||||||
|
c-0.5,0.8-0.7,2.1-0.7,3.9V37.7z"/>
|
||||||
|
<path class="st0" d="M142.2,27.6l0.1,1.8c1.1-1.4,2.6-2.1,4.4-2.1c1.9,0,3.2,0.9,4,2.6c1.1-1.7,2.6-2.6,4.7-2.6
|
||||||
|
c3.3,0,5,2.3,5.1,6.9v12.5h-5V34.4c0-1.1-0.2-1.9-0.5-2.4c-0.3-0.5-0.8-0.7-1.5-0.7c-0.9,0-1.6,0.6-2.1,1.7l0,0.6v12.9h-5V34.5
|
||||||
|
c0-1.1-0.1-1.9-0.4-2.4c-0.3-0.5-0.8-0.7-1.6-0.7c-0.9,0-1.5,0.5-2,1.4v13.8h-5v-19H142.2z"/>
|
||||||
|
<path class="st0" d="M170.3,22.9v4.7h2.5v3.7h-2.5v9.5c0,0.8,0.1,1.3,0.3,1.5c0.2,0.3,0.6,0.4,1.2,0.4c0.5,0,0.9,0,1.2-0.1
|
||||||
|
l0,3.9c-0.8,0.3-1.8,0.5-2.7,0.5c-3.2,0-4.8-1.8-4.9-5.5V31.3h-2.2v-3.7h2.2v-4.7H170.3z"/>
|
||||||
|
<path class="st4" d="M182.7,45.1c1.4,0,2.4-0.4,3.1-1.1c0.7-0.8,1.1-1.9,1.2-3.3h1.9c-0.1,1.9-0.7,3.5-1.9,4.6s-2.6,1.7-4.3,1.7
|
||||||
|
c-2.3,0-4-0.7-5.1-2.2s-1.7-3.6-1.8-6.4V36c0-2.9,0.6-5.1,1.7-6.6s2.9-2.2,5.1-2.2c1.9,0,3.4,0.6,4.5,1.8s1.7,2.9,1.7,5H187
|
||||||
|
c-0.1-1.6-0.5-2.8-1.2-3.6s-1.8-1.3-3.1-1.3c-1.7,0-2.9,0.6-3.7,1.7s-1.2,2.9-1.2,5.2v2.2c0,2.4,0.4,4.2,1.2,5.3
|
||||||
|
S181,45.1,182.7,45.1z"/>
|
||||||
|
<path class="st4" d="M192.6,36.1c0-2.7,0.6-4.9,1.9-6.5s2.9-2.4,5.1-2.4c2.2,0,3.9,0.8,5.1,2.4c1.2,1.6,1.9,3.7,1.9,6.5v2
|
||||||
|
c0,2.8-0.6,5-1.9,6.5s-2.9,2.3-5.1,2.3s-3.8-0.8-5-2.3s-1.9-3.6-1.9-6.3V36.1z M194.6,38.1c0,2.2,0.4,3.9,1.3,5.2
|
||||||
|
c0.9,1.3,2.1,1.9,3.7,1.9c1.6,0,2.8-0.6,3.7-1.8c0.8-1.2,1.3-2.9,1.3-5.2v-2c0-2.2-0.4-3.9-1.3-5.2c-0.9-1.3-2.1-1.9-3.7-1.9
|
||||||
|
c-1.5,0-2.7,0.6-3.6,1.8c-0.9,1.2-1.3,2.9-1.4,5.1V38.1z"/>
|
||||||
|
<path class="st4" d="M213.2,27.6l0.1,3c0.6-1,1.3-1.9,2.2-2.5s1.9-0.9,3-0.9c3.3,0,5,2.2,5.1,6.6v12.7h-1.9V34.1
|
||||||
|
c0-1.7-0.3-3-0.9-3.8c-0.6-0.8-1.5-1.2-2.8-1.2c-1,0-1.9,0.4-2.8,1.2s-1.4,1.8-1.9,3.2v13.2h-2v-19H213.2z"/>
|
||||||
|
<path class="st4" d="M230.3,46.6V29.3h-2.6v-1.8h2.6v-2.5c0-1.9,0.5-3.3,1.3-4.3s2-1.5,3.5-1.5c0.7,0,1.3,0.1,1.9,0.3l-0.1,1.8
|
||||||
|
c-0.5-0.1-1-0.2-1.6-0.2c-0.9,0-1.7,0.4-2.2,1.1s-0.8,1.7-0.8,3v2.3h3.7v1.8h-3.7v17.3H230.3z"/>
|
||||||
|
<path class="st4" d="M240.1,22.1c0-0.4,0.1-0.7,0.3-1s0.5-0.4,0.9-0.4s0.7,0.1,1,0.4s0.3,0.6,0.3,1s-0.1,0.7-0.3,1
|
||||||
|
s-0.5,0.4-1,0.4s-0.7-0.1-0.9-0.4S240.1,22.5,240.1,22.1z M242.3,46.6h-2v-19h2V46.6z"/>
|
||||||
|
<path class="st4" d="M247.4,36.2c0-3,0.5-5.2,1.5-6.7s2.6-2.2,4.6-2.2c2.2,0,3.8,1,4.9,3l0.1-2.7h1.8v19.6c0,2.3-0.6,4-1.6,5.2
|
||||||
|
s-2.7,1.8-4.7,1.8c-1,0-2.1-0.3-3.2-0.8s-1.9-1.1-2.4-1.9l0.9-1.4c1.3,1.5,2.8,2.2,4.5,2.2c1.6,0,2.7-0.4,3.5-1.3
|
||||||
|
c0.7-0.8,1.1-2.1,1.1-3.8v-3c-1.1,1.8-2.7,2.7-4.9,2.7c-2,0-3.5-0.7-4.5-2.2s-1.6-3.6-1.6-6.5V36.2z M249.3,38
|
||||||
|
c0,2.4,0.4,4.2,1.1,5.4s1.9,1.7,3.5,1.7c2.1,0,3.6-1,4.5-3v-9.7c-0.9-2.2-2.4-3.3-4.4-3.3c-1.6,0-2.8,0.6-3.5,1.7
|
||||||
|
s-1.1,2.9-1.1,5.3V38z"/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st15" x1="19.5" y1="16" x2="34.7" y2="28.5"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st14" points="18.4,18.5 16.6,13.7 21.7,14.5 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<circle class="st3" cx="10.8" cy="8.1" r="8.1"/>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st15" x1="34.7" y1="28" x2="49.4" y2="53.3"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st14" points="46.8,54 51.2,56.5 51.2,51.4 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st15" x1="46.2" y1="31.1" x2="63.9" y2="34.9"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st14" points="46.4,33.8 42.6,30.4 47.5,28.8 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st15" x1="34.7" y1="28.5" x2="47.9" y2="20.1"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st14" points="48.7,22.7 51.1,18.1 45.9,18.3 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st15" x1="11.2" y1="33.1" x2="23.2" y2="31"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st14" points="22.9,33.7 26.8,30.4 22,28.6 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st15" x1="61.5" y1="53" x2="71.9" y2="36.1"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st14" points="64.1,53.7 59.5,56.2 59.7,51 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st15" x1="64.8" y1="24.4" x2="71.9" y2="36.1"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st14" points="63,26.4 62.9,21.2 67.4,23.7 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<circle class="st5" cx="34.7" cy="28.5" r="8.1"/>
|
||||||
|
<circle class="st4" cx="8.1" cy="33.9" r="8.1"/>
|
||||||
|
<circle class="st7" cx="58.3" cy="14.5" r="8.1"/>
|
||||||
|
<circle class="st8" cx="55.7" cy="63.3" r="8.1"/>
|
||||||
|
<circle class="st6" cx="71.9" cy="36.1" r="8.1"/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 6.2 KiB |
94
art/mgmt_logo_white_symbol.svg
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||||
|
viewBox="0 0 120 107.1" style="enable-background:new 0 0 120 107.1;" xml:space="preserve">
|
||||||
|
<style type="text/css">
|
||||||
|
.st0{fill:#FFFFFF;}
|
||||||
|
.st1{fill:#E22434;}
|
||||||
|
.st2{display:none;}
|
||||||
|
.st3{fill:#1B3663;}
|
||||||
|
.st4{fill:#00B1D1;}
|
||||||
|
.st5{fill:#BFE6EF;}
|
||||||
|
.st6{fill:#69CBE0;}
|
||||||
|
.st7{fill:#0080BD;}
|
||||||
|
.st8{fill:#005DAB;}
|
||||||
|
.st9{fill:#183660;}
|
||||||
|
.st10{fill:none;stroke:#183660;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st11{fill:none;stroke:#183660;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
.st12{fill:none;stroke:#183660;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st13{fill:none;stroke:#C0E6EF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st14{fill:#C0E6EF;}
|
||||||
|
.st15{fill:none;stroke:#C0E6EF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st16{fill:none;stroke:#C0E6EF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
.st17{fill:none;stroke:#FFFFFF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st18{fill:none;stroke:#FFFFFF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st19{fill:none;stroke:#FFFFFF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
</style>
|
||||||
|
<g id="Layer_2">
|
||||||
|
</g>
|
||||||
|
<g id="Layer_1">
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st17" x1="29.2" y1="24.1" x2="52.1" y2="42.8"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st0" points="27.7,27.8 24.9,20.5 32.6,21.8 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<circle class="st0" cx="16.1" cy="12.2" r="12.1"/>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st17" x1="52.1" y1="42.1" x2="74.1" y2="80"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st0" points="70.1,80.9 76.9,84.8 76.8,77.1 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st17" x1="69.4" y1="46.7" x2="95.8" y2="52.4"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st0" points="69.7,50.7 63.9,45.6 71.3,43.2 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st17" x1="52.1" y1="42.8" x2="71.9" y2="30.1"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st0" points="73.1,34 76.6,27.1 68.9,27.5 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st17" x1="16.8" y1="49.6" x2="34.8" y2="46.5"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st0" points="34.3,50.5 40.3,45.6 33,42.9 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st17" x1="92.3" y1="79.5" x2="107.8" y2="54.2"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st0" points="96.1,80.6 89.3,84.3 89.5,76.5 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st17" x1="97.2" y1="36.6" x2="107.8" y2="54.2"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st0" points="94.4,39.5 94.3,31.8 101.1,35.5 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<circle class="st0" cx="52.1" cy="42.8" r="12.1"/>
|
||||||
|
<circle class="st0" cx="12.2" cy="50.8" r="12.1"/>
|
||||||
|
<circle class="st0" cx="87.5" cy="21.7" r="12.1"/>
|
||||||
|
<circle class="st0" cx="83.5" cy="95" r="12.1"/>
|
||||||
|
<circle class="st0" cx="107.8" cy="54.2" r="12.1"/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 2.8 KiB |
132
art/mgmt_logo_white_tall.svg
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||||
|
viewBox="0 0 168.3 131.4" style="enable-background:new 0 0 168.3 131.4;" xml:space="preserve">
|
||||||
|
<style type="text/css">
|
||||||
|
.st0{fill:#FFFFFF;}
|
||||||
|
.st1{fill:#E22434;}
|
||||||
|
.st2{display:none;}
|
||||||
|
.st3{fill:#1B3663;}
|
||||||
|
.st4{fill:#00B1D1;}
|
||||||
|
.st5{fill:#BFE6EF;}
|
||||||
|
.st6{fill:#69CBE0;}
|
||||||
|
.st7{fill:#0080BD;}
|
||||||
|
.st8{fill:#005DAB;}
|
||||||
|
.st9{fill:#183660;}
|
||||||
|
.st10{fill:none;stroke:#183660;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st11{fill:none;stroke:#183660;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
.st12{fill:none;stroke:#183660;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st13{fill:none;stroke:#C0E6EF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st14{fill:#C0E6EF;}
|
||||||
|
.st15{fill:none;stroke:#C0E6EF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st16{fill:none;stroke:#C0E6EF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
.st17{fill:none;stroke:#FFFFFF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st18{fill:none;stroke:#FFFFFF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st19{fill:none;stroke:#FFFFFF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
</style>
|
||||||
|
<g id="Layer_2">
|
||||||
|
</g>
|
||||||
|
<g id="Layer_1">
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<path class="st0" d="M4.7,104.8l0.1,1.8c1.1-1.4,2.6-2.1,4.4-2.1c1.9,0,3.2,0.9,4,2.6c1.1-1.7,2.6-2.6,4.7-2.6
|
||||||
|
c3.3,0,5,2.3,5.1,6.9v12.5h-5v-12.1c0-1.1-0.2-1.9-0.5-2.4c-0.3-0.5-0.8-0.7-1.5-0.7c-0.9,0-1.6,0.6-2.1,1.7l0,0.6v12.9H9v-12.1
|
||||||
|
c0-1.1-0.1-1.9-0.4-2.4c-0.3-0.5-0.8-0.7-1.6-0.7c-0.9,0-1.5,0.5-2,1.4v13.8H0v-19H4.7z"/>
|
||||||
|
<path class="st0" d="M26.4,113.8c0-3.1,0.6-5.4,1.7-7s2.7-2.3,4.7-2.3c1.7,0,3.1,0.7,4,2l0.2-1.7h4.5v19c0,2.4-0.7,4.3-2,5.6
|
||||||
|
c-1.4,1.3-3.3,1.9-5.9,1.9c-1,0-2.1-0.2-3.3-0.6s-2-0.9-2.6-1.6l1.7-3.4c0.5,0.5,1.1,0.9,1.8,1.2c0.7,0.3,1.5,0.5,2.1,0.5
|
||||||
|
c1.1,0,1.9-0.3,2.4-0.8s0.7-1.4,0.7-2.6v-1.6c-0.9,1.2-2.2,1.9-3.7,1.9c-2,0-3.6-0.8-4.7-2.4s-1.7-3.8-1.7-6.7V113.8z M31.4,115
|
||||||
|
c0,1.8,0.2,3,0.7,3.8s1.2,1.2,2.2,1.2c1,0,1.8-0.4,2.3-1.1v-9.1c-0.5-0.8-1.3-1.2-2.2-1.2c-1,0-1.7,0.4-2.2,1.2
|
||||||
|
s-0.7,2.1-0.7,3.9V115z"/>
|
||||||
|
<path class="st0" d="M50.1,104.8l0.1,1.8c1.1-1.4,2.6-2.1,4.4-2.1c1.9,0,3.2,0.9,4,2.6c1.1-1.7,2.6-2.6,4.7-2.6
|
||||||
|
c3.3,0,5,2.3,5.1,6.9v12.5h-5v-12.1c0-1.1-0.2-1.9-0.5-2.4s-0.8-0.7-1.5-0.7c-0.9,0-1.6,0.6-2.1,1.7l0,0.6v12.9h-5v-12.1
|
||||||
|
c0-1.1-0.1-1.9-0.4-2.4s-0.8-0.7-1.6-0.7c-0.9,0-1.5,0.5-2,1.4v13.8h-5v-19H50.1z"/>
|
||||||
|
<path class="st0" d="M78.2,100.2v4.7h2.5v3.7h-2.5v9.5c0,0.8,0.1,1.3,0.3,1.5c0.2,0.3,0.6,0.4,1.2,0.4c0.5,0,0.9,0,1.2-0.1
|
||||||
|
l0,3.9c-0.8,0.3-1.8,0.5-2.7,0.5c-3.2,0-4.8-1.8-4.9-5.5v-10.1h-2.2v-3.7h2.2v-4.7H78.2z"/>
|
||||||
|
<path class="st0" d="M90.6,122.4c1.4,0,2.4-0.4,3.1-1.1c0.7-0.8,1.1-1.8,1.2-3.3h1.9c-0.1,1.9-0.7,3.5-1.9,4.6
|
||||||
|
c-1.1,1.1-2.6,1.7-4.3,1.7c-2.3,0-4-0.7-5.1-2.2s-1.7-3.6-1.8-6.4v-2.3c0-2.9,0.6-5.1,1.7-6.6s2.9-2.2,5.1-2.2
|
||||||
|
c1.9,0,3.4,0.6,4.5,1.8s1.7,2.9,1.7,5H95c-0.1-1.6-0.5-2.8-1.2-3.6s-1.8-1.3-3.1-1.3c-1.7,0-2.9,0.6-3.7,1.7
|
||||||
|
c-0.8,1.1-1.2,2.9-1.2,5.2v2.2c0,2.4,0.4,4.2,1.2,5.3S89,122.4,90.6,122.4z"/>
|
||||||
|
<path class="st0" d="M100.5,113.4c0-2.7,0.6-4.9,1.9-6.5s2.9-2.4,5.1-2.4c2.2,0,3.9,0.8,5.1,2.4s1.9,3.7,1.9,6.5v2
|
||||||
|
c0,2.8-0.6,5-1.9,6.5s-2.9,2.3-5.1,2.3c-2.1,0-3.8-0.8-5-2.3s-1.9-3.6-1.9-6.3V113.4z M102.5,115.3c0,2.2,0.4,3.9,1.3,5.2
|
||||||
|
c0.9,1.3,2.1,1.9,3.7,1.9c1.6,0,2.8-0.6,3.7-1.8c0.8-1.2,1.3-2.9,1.3-5.2v-2c0-2.2-0.4-3.9-1.3-5.2c-0.9-1.3-2.1-1.9-3.7-1.9
|
||||||
|
c-1.5,0-2.7,0.6-3.6,1.8c-0.9,1.2-1.3,2.9-1.4,5.1V115.3z"/>
|
||||||
|
<path class="st0" d="M121.1,104.8l0.1,3c0.6-1,1.3-1.9,2.2-2.5s1.9-0.9,3-0.9c3.3,0,5,2.2,5.1,6.6v12.7h-1.9v-12.5
|
||||||
|
c0-1.7-0.3-3-0.9-3.8c-0.6-0.8-1.5-1.2-2.8-1.2c-1,0-1.9,0.4-2.8,1.2s-1.4,1.8-1.9,3.2v13.2h-2v-19H121.1z"/>
|
||||||
|
<path class="st0" d="M138.2,123.8v-17.3h-2.6v-1.8h2.6v-2.5c0-1.9,0.5-3.3,1.3-4.3s2-1.5,3.5-1.5c0.7,0,1.3,0.1,1.9,0.3
|
||||||
|
l-0.1,1.8c-0.5-0.1-1-0.2-1.6-0.2c-0.9,0-1.7,0.4-2.2,1.1s-0.8,1.7-0.8,3v2.3h3.7v1.8h-3.7v17.3H138.2z"/>
|
||||||
|
<path class="st0" d="M148,99.3c0-0.4,0.1-0.7,0.3-1s0.5-0.4,0.9-0.4s0.7,0.1,1,0.4s0.3,0.6,0.3,1s-0.1,0.7-0.3,1s-0.5,0.4-1,0.4
|
||||||
|
s-0.7-0.1-0.9-0.4S148,99.7,148,99.3z M150.2,123.8h-2v-19h2V123.8z"/>
|
||||||
|
<path class="st0" d="M155.3,113.5c0-3,0.5-5.2,1.5-6.7s2.6-2.2,4.6-2.2c2.2,0,3.8,1,4.9,3l0.1-2.7h1.8v19.6c0,2.3-0.6,4-1.6,5.2
|
||||||
|
c-1.1,1.2-2.7,1.8-4.7,1.8c-1,0-2.1-0.3-3.2-0.8s-1.9-1.1-2.4-1.9l0.9-1.4c1.3,1.5,2.8,2.2,4.5,2.2c1.6,0,2.7-0.4,3.5-1.3
|
||||||
|
c0.7-0.8,1.1-2.1,1.1-3.8v-3c-1.1,1.8-2.7,2.7-4.9,2.7c-2,0-3.5-0.7-4.5-2.2s-1.6-3.6-1.6-6.5V113.5z M157.2,115.2
|
||||||
|
c0,2.4,0.4,4.2,1.1,5.4s1.9,1.7,3.5,1.7c2.1,0,3.6-1,4.5-3v-9.7c-0.9-2.2-2.4-3.3-4.4-3.3c-1.6,0-2.8,0.6-3.5,1.7
|
||||||
|
s-1.1,2.9-1.1,5.3V115.2z"/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st19" x1="58.9" y1="20.1" x2="77.9" y2="35.6"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st0" points="57.6,23.2 55.3,17.1 61.7,18.2 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<circle class="st0" cx="48" cy="10.1" r="10.1"/>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st19" x1="77.9" y1="35.1" x2="96.2" y2="66.7"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st0" points="93,67.5 98.6,70.7 98.6,64.2 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st19" x1="92.3" y1="38.9" x2="114.4" y2="43.6"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st0" points="92.6,42.3 87.8,38 93.9,36 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st19" x1="77.9" y1="35.6" x2="94.5" y2="25.1"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st0" points="95.4,28.3 98.4,22.6 91.9,22.9 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st19" x1="48.5" y1="41.3" x2="63.5" y2="38.7"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st0" points="63.1,42.1 68.1,38 62,35.7 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st19" x1="111.4" y1="66.3" x2="124.4" y2="45.2"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st0" points="114.6,67.2 109,70.2 109.1,63.8 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st19" x1="115.4" y1="30.4" x2="124.4" y2="45.2"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st0" points="113.2,32.8 113,26.4 118.7,29.5 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<circle class="st0" cx="77.9" cy="35.6" r="10.1"/>
|
||||||
|
<circle class="st0" cx="44.6" cy="42.4" r="10.1"/>
|
||||||
|
<circle class="st0" cx="107.4" cy="18.1" r="10.1"/>
|
||||||
|
<circle class="st0" cx="104.1" cy="79.1" r="10.1"/>
|
||||||
|
<circle class="st0" cx="124.4" cy="45.2" r="10.1"/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 6.2 KiB |
132
art/mgmt_logo_white_wide.svg
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||||
|
viewBox="0 0 260.4 71.4" style="enable-background:new 0 0 260.4 71.4;" xml:space="preserve">
|
||||||
|
<style type="text/css">
|
||||||
|
.st0{fill:#FFFFFF;}
|
||||||
|
.st1{fill:#E22434;}
|
||||||
|
.st2{display:none;}
|
||||||
|
.st3{fill:#1B3663;}
|
||||||
|
.st4{fill:#00B1D1;}
|
||||||
|
.st5{fill:#BFE6EF;}
|
||||||
|
.st6{fill:#69CBE0;}
|
||||||
|
.st7{fill:#0080BD;}
|
||||||
|
.st8{fill:#005DAB;}
|
||||||
|
.st9{fill:#183660;}
|
||||||
|
.st10{fill:none;stroke:#183660;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st11{fill:none;stroke:#183660;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
.st12{fill:none;stroke:#183660;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st13{fill:none;stroke:#C0E6EF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st14{fill:#C0E6EF;}
|
||||||
|
.st15{fill:none;stroke:#C0E6EF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st16{fill:none;stroke:#C0E6EF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
.st17{fill:none;stroke:#FFFFFF;stroke-width:1.9441;stroke-miterlimit:10;}
|
||||||
|
.st18{fill:none;stroke:#FFFFFF;stroke-width:1.2961;stroke-miterlimit:10;}
|
||||||
|
.st19{fill:none;stroke:#FFFFFF;stroke-width:1.6201;stroke-miterlimit:10;}
|
||||||
|
</style>
|
||||||
|
<g id="Layer_2">
|
||||||
|
</g>
|
||||||
|
<g id="Layer_1">
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<path class="st0" d="M96.7,26l0.1,1.8c1.1-1.4,2.6-2.1,4.4-2.1c1.9,0,3.2,0.9,4,2.6c1.1-1.7,2.6-2.6,4.7-2.6
|
||||||
|
c3.3,0,5,2.3,5.1,6.9V45h-5V32.9c0-1.1-0.2-1.9-0.5-2.4s-0.8-0.7-1.5-0.7c-0.9,0-1.6,0.6-2.1,1.7l0,0.6V45h-5V32.9
|
||||||
|
c0-1.1-0.1-1.9-0.4-2.4s-0.8-0.7-1.6-0.7c-0.9,0-1.5,0.5-2,1.4V45h-5V26H96.7z"/>
|
||||||
|
<path class="st0" d="M118.5,34.9c0-3.1,0.6-5.4,1.7-7s2.7-2.3,4.7-2.3c1.7,0,3.1,0.7,4,2l0.2-1.7h4.5v19c0,2.4-0.7,4.3-2,5.6
|
||||||
|
c-1.4,1.3-3.3,1.9-5.9,1.9c-1,0-2.1-0.2-3.3-0.6s-2-0.9-2.6-1.6l1.7-3.4c0.5,0.5,1.1,0.9,1.8,1.2c0.7,0.3,1.5,0.5,2.1,0.5
|
||||||
|
c1.1,0,1.9-0.3,2.4-0.8s0.7-1.4,0.7-2.6v-1.6c-0.9,1.2-2.2,1.9-3.7,1.9c-2,0-3.6-0.8-4.7-2.4s-1.7-3.8-1.7-6.7V34.9z
|
||||||
|
M123.5,36.2c0,1.8,0.2,3,0.7,3.8s1.2,1.2,2.2,1.2c1,0,1.8-0.4,2.3-1.1V31c-0.5-0.8-1.3-1.2-2.2-1.2c-1,0-1.7,0.4-2.2,1.2
|
||||||
|
s-0.7,2.1-0.7,3.9V36.2z"/>
|
||||||
|
<path class="st0" d="M142.2,26l0.1,1.8c1.1-1.4,2.6-2.1,4.4-2.1c1.9,0,3.2,0.9,4,2.6c1.1-1.7,2.6-2.6,4.7-2.6
|
||||||
|
c3.3,0,5,2.3,5.1,6.9V45h-5V32.9c0-1.1-0.2-1.9-0.5-2.4c-0.3-0.5-0.8-0.7-1.5-0.7c-0.9,0-1.6,0.6-2.1,1.7l0,0.6V45h-5V32.9
|
||||||
|
c0-1.1-0.1-1.9-0.4-2.4c-0.3-0.5-0.8-0.7-1.6-0.7c-0.9,0-1.5,0.5-2,1.4V45h-5V26H142.2z"/>
|
||||||
|
<path class="st0" d="M170.3,21.3V26h2.5v3.7h-2.5v9.5c0,0.8,0.1,1.3,0.3,1.5c0.2,0.3,0.6,0.4,1.2,0.4c0.5,0,0.9,0,1.2-0.1l0,3.9
|
||||||
|
c-0.8,0.3-1.8,0.5-2.7,0.5c-3.2,0-4.8-1.8-4.9-5.5V29.7h-2.2V26h2.2v-4.7H170.3z"/>
|
||||||
|
<path class="st0" d="M182.7,43.5c1.4,0,2.4-0.4,3.1-1.1c0.7-0.8,1.1-1.9,1.2-3.3h1.9c-0.1,1.9-0.7,3.5-1.9,4.6s-2.6,1.7-4.3,1.7
|
||||||
|
c-2.3,0-4-0.7-5.1-2.2s-1.7-3.6-1.8-6.4v-2.3c0-2.9,0.6-5.1,1.7-6.6s2.9-2.2,5.1-2.2c1.9,0,3.4,0.6,4.5,1.8s1.7,2.8,1.7,5H187
|
||||||
|
c-0.1-1.6-0.5-2.8-1.2-3.6s-1.8-1.3-3.1-1.3c-1.7,0-2.9,0.6-3.7,1.7c-0.8,1.1-1.2,2.9-1.2,5.2v2.2c0,2.4,0.4,4.2,1.2,5.3
|
||||||
|
C179.8,43,181,43.5,182.7,43.5z"/>
|
||||||
|
<path class="st0" d="M192.6,34.5c0-2.7,0.6-4.9,1.9-6.5s2.9-2.4,5.1-2.4c2.2,0,3.9,0.8,5.1,2.4c1.2,1.6,1.9,3.7,1.9,6.5v2
|
||||||
|
c0,2.8-0.6,5-1.9,6.5s-2.9,2.3-5.1,2.3s-3.8-0.8-5-2.3s-1.9-3.6-1.9-6.3V34.5z M194.6,36.5c0,2.2,0.4,3.9,1.3,5.2
|
||||||
|
c0.9,1.3,2.1,1.9,3.7,1.9c1.6,0,2.8-0.6,3.7-1.8c0.8-1.2,1.3-2.9,1.3-5.2v-2c0-2.2-0.4-3.9-1.3-5.2c-0.9-1.3-2.1-1.9-3.7-1.9
|
||||||
|
c-1.5,0-2.7,0.6-3.6,1.8c-0.9,1.2-1.3,2.9-1.4,5.1V36.5z"/>
|
||||||
|
<path class="st0" d="M213.2,26l0.1,3c0.6-1,1.3-1.9,2.2-2.5s1.9-0.9,3-0.9c3.3,0,5,2.2,5.1,6.6V45h-1.9V32.5
|
||||||
|
c0-1.7-0.3-3-0.9-3.8c-0.6-0.8-1.5-1.2-2.8-1.2c-1,0-1.9,0.4-2.8,1.2s-1.4,1.8-1.9,3.2V45h-2V26H213.2z"/>
|
||||||
|
<path class="st0" d="M230.3,45V27.7h-2.6V26h2.6v-2.5c0-1.9,0.5-3.3,1.3-4.3s2-1.5,3.5-1.5c0.7,0,1.3,0.1,1.9,0.3l-0.1,1.8
|
||||||
|
c-0.5-0.1-1-0.2-1.6-0.2c-0.9,0-1.7,0.4-2.2,1.1s-0.8,1.7-0.8,3V26h3.7v1.8h-3.7V45H230.3z"/>
|
||||||
|
<path class="st0" d="M240.1,20.5c0-0.4,0.1-0.7,0.3-1s0.5-0.4,0.9-0.4s0.7,0.1,1,0.4s0.3,0.6,0.3,1s-0.1,0.7-0.3,1
|
||||||
|
s-0.5,0.4-1,0.4s-0.7-0.1-0.9-0.4S240.1,20.9,240.1,20.5z M242.3,45h-2V26h2V45z"/>
|
||||||
|
<path class="st0" d="M247.4,34.6c0-3,0.5-5.2,1.5-6.7s2.6-2.2,4.6-2.2c2.2,0,3.8,1,4.9,3l0.1-2.7h1.8v19.6c0,2.3-0.6,4-1.6,5.2
|
||||||
|
s-2.7,1.8-4.7,1.8c-1,0-2.1-0.3-3.2-0.8s-1.9-1.1-2.4-1.9l0.9-1.4c1.3,1.5,2.8,2.2,4.5,2.2c1.6,0,2.7-0.4,3.5-1.3
|
||||||
|
c0.7-0.8,1.1-2.1,1.1-3.8v-3c-1.1,1.8-2.7,2.7-4.9,2.7c-2,0-3.5-0.7-4.5-2.2s-1.6-3.6-1.6-6.5V34.6z M249.3,36.4
|
||||||
|
c0,2.4,0.4,4.2,1.1,5.4s1.9,1.7,3.5,1.7c2.1,0,3.6-1,4.5-3v-9.7c-0.9-2.2-2.4-3.3-4.4-3.3c-1.6,0-2.8,0.6-3.5,1.7
|
||||||
|
s-1.1,2.9-1.1,5.3V36.4z"/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st18" x1="19.5" y1="16" x2="34.7" y2="28.5"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st0" points="18.4,18.5 16.6,13.7 21.7,14.5 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<circle class="st0" cx="10.8" cy="8.1" r="8.1"/>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st18" x1="34.7" y1="28" x2="49.4" y2="53.3"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st0" points="46.8,54 51.2,56.5 51.2,51.4 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st18" x1="46.2" y1="31.1" x2="63.9" y2="34.9"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st0" points="46.4,33.8 42.6,30.4 47.5,28.8 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st18" x1="34.7" y1="28.5" x2="47.9" y2="20.1"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st0" points="48.7,22.7 51.1,18.1 45.9,18.3 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st18" x1="11.2" y1="33.1" x2="23.2" y2="31"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st0" points="22.9,33.7 26.8,30.4 22,28.6 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st18" x1="61.5" y1="53" x2="71.9" y2="36.1"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st0" points="64.1,53.7 59.5,56.2 59.7,51 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g>
|
||||||
|
<g>
|
||||||
|
<line class="st18" x1="64.7" y1="24.4" x2="71.9" y2="36.6"/>
|
||||||
|
<g>
|
||||||
|
<polygon class="st0" points="62.9,26.4 62.9,21.2 67.3,23.7 "/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<circle class="st0" cx="34.7" cy="28.5" r="8.1"/>
|
||||||
|
<circle class="st0" cx="8.1" cy="33.9" r="8.1"/>
|
||||||
|
<circle class="st0" cx="58.3" cy="14.5" r="8.1"/>
|
||||||
|
<circle class="st0" cx="55.7" cy="63.3" r="8.1"/>
|
||||||
|
<circle class="st0" cx="71.9" cy="36.1" r="8.1"/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 6.2 KiB |
566
config.go
@@ -1,566 +0,0 @@
|
|||||||
// Mgmt
|
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
|
||||||
//
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"gopkg.in/yaml.v2"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type collectorResConfig struct {
|
|
||||||
Kind string `yaml:"kind"`
|
|
||||||
Pattern string `yaml:"pattern"` // XXX: Not Implemented
|
|
||||||
}
|
|
||||||
|
|
||||||
type vertexConfig struct {
|
|
||||||
Kind string `yaml:"kind"`
|
|
||||||
Name string `yaml:"name"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type edgeConfig struct {
|
|
||||||
Name string `yaml:"name"`
|
|
||||||
From vertexConfig `yaml:"from"`
|
|
||||||
To vertexConfig `yaml:"to"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GraphConfig is the data structure that describes a single graph to run.
|
|
||||||
type GraphConfig struct {
|
|
||||||
Graph string `yaml:"graph"`
|
|
||||||
Resources struct {
|
|
||||||
Noop []*NoopRes `yaml:"noop"`
|
|
||||||
Pkg []*PkgRes `yaml:"pkg"`
|
|
||||||
File []*FileRes `yaml:"file"`
|
|
||||||
Svc []*SvcRes `yaml:"svc"`
|
|
||||||
Exec []*ExecRes `yaml:"exec"`
|
|
||||||
Timer []*TimerRes `yaml:"timer"`
|
|
||||||
} `yaml:"resources"`
|
|
||||||
Collector []collectorResConfig `yaml:"collect"`
|
|
||||||
Edges []edgeConfig `yaml:"edges"`
|
|
||||||
Comment string `yaml:"comment"`
|
|
||||||
Hostname string `yaml:"hostname"` // uuid for the host
|
|
||||||
Remote string `yaml:"remote"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse parses a data stream into the graph structure.
|
|
||||||
func (c *GraphConfig) Parse(data []byte) error {
|
|
||||||
if err := yaml.Unmarshal(data, c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if c.Graph == "" {
|
|
||||||
return errors.New("Graph config: invalid `graph`")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseConfigFromFile takes a filename and returns the graph config structure.
|
|
||||||
func ParseConfigFromFile(filename string) *GraphConfig {
|
|
||||||
data, err := ioutil.ReadFile(filename)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Config: Error: ParseConfigFromFile: File: %v", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var config GraphConfig
|
|
||||||
if err := config.Parse(data); err != nil {
|
|
||||||
log.Printf("Config: Error: ParseConfigFromFile: Parse: %v", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return &config
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewGraphFromConfig returns a new graph from existing input, such as from the
|
|
||||||
// existing graph, and a GraphConfig struct.
|
|
||||||
func (g *Graph) NewGraphFromConfig(config *GraphConfig, embdEtcd *EmbdEtcd, noop bool) (*Graph, error) {
|
|
||||||
if config.Hostname == "" {
|
|
||||||
return nil, fmt.Errorf("Config: Error: Hostname can't be empty!")
|
|
||||||
}
|
|
||||||
|
|
||||||
var graph *Graph // new graph to return
|
|
||||||
if g == nil { // FIXME: how can we check for an empty graph?
|
|
||||||
graph = NewGraph("Graph") // give graph a default name
|
|
||||||
} else {
|
|
||||||
graph = g.Copy() // same vertices, since they're pointers!
|
|
||||||
}
|
|
||||||
|
|
||||||
var lookup = make(map[string]map[string]*Vertex)
|
|
||||||
|
|
||||||
//log.Printf("%+v", config) // debug
|
|
||||||
|
|
||||||
// TODO: if defined (somehow)...
|
|
||||||
graph.SetName(config.Graph) // set graph name
|
|
||||||
|
|
||||||
var keep []*Vertex // list of vertex which are the same in new graph
|
|
||||||
var resources []Res // list of resources to export
|
|
||||||
// use reflection to avoid duplicating code... better options welcome!
|
|
||||||
value := reflect.Indirect(reflect.ValueOf(config.Resources))
|
|
||||||
vtype := value.Type()
|
|
||||||
for i := 0; i < vtype.NumField(); i++ { // number of fields in struct
|
|
||||||
name := vtype.Field(i).Name // string of field name
|
|
||||||
field := value.FieldByName(name)
|
|
||||||
iface := field.Interface() // interface type of value
|
|
||||||
slice := reflect.ValueOf(iface)
|
|
||||||
// XXX: should we just drop these everywhere and have the kind strings be all lowercase?
|
|
||||||
kind := FirstToUpper(name)
|
|
||||||
if DEBUG {
|
|
||||||
log.Printf("Config: Processing: %v...", kind)
|
|
||||||
}
|
|
||||||
for j := 0; j < slice.Len(); j++ { // loop through resources of same kind
|
|
||||||
x := slice.Index(j).Interface()
|
|
||||||
res, ok := x.(Res) // convert to Res type
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("Config: Error: Can't convert: %v of type: %T to Res.", x, x)
|
|
||||||
}
|
|
||||||
if noop {
|
|
||||||
res.Meta().Noop = noop
|
|
||||||
}
|
|
||||||
if _, exists := lookup[kind]; !exists {
|
|
||||||
lookup[kind] = make(map[string]*Vertex)
|
|
||||||
}
|
|
||||||
// XXX: should we export based on a @@ prefix, or a metaparam
|
|
||||||
// like exported => true || exported => (host pattern)||(other pattern?)
|
|
||||||
if !strings.HasPrefix(res.GetName(), "@@") { // not exported resource
|
|
||||||
// XXX: we don't have a way of knowing if any of the
|
|
||||||
// metaparams are undefined, and as a result to set the
|
|
||||||
// defaults that we want! I hate the go yaml parser!!!
|
|
||||||
v := graph.GetVertexMatch(res)
|
|
||||||
if v == nil { // no match found
|
|
||||||
res.Init()
|
|
||||||
v = NewVertex(res)
|
|
||||||
graph.AddVertex(v) // call standalone in case not part of an edge
|
|
||||||
}
|
|
||||||
lookup[kind][res.GetName()] = v // used for constructing edges
|
|
||||||
keep = append(keep, v) // append
|
|
||||||
|
|
||||||
} else if !noop { // do not export any resources if noop
|
|
||||||
// store for addition to etcd storage...
|
|
||||||
res.SetName(res.GetName()[2:]) //slice off @@
|
|
||||||
res.setKind(kind) // cheap init
|
|
||||||
resources = append(resources, res)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// store in etcd
|
|
||||||
if err := EtcdSetResources(embdEtcd, config.Hostname, resources); err != nil {
|
|
||||||
return nil, fmt.Errorf("Config: Could not export resources: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// lookup from etcd
|
|
||||||
var hostnameFilter []string // empty to get from everyone
|
|
||||||
kindFilter := []string{}
|
|
||||||
for _, t := range config.Collector {
|
|
||||||
// XXX: should we just drop these everywhere and have the kind strings be all lowercase?
|
|
||||||
kind := FirstToUpper(t.Kind)
|
|
||||||
kindFilter = append(kindFilter, kind)
|
|
||||||
}
|
|
||||||
// do all the graph look ups in one single step, so that if the etcd
|
|
||||||
// database changes, we don't have a partial state of affairs...
|
|
||||||
if len(kindFilter) > 0 { // if kindFilter is empty, don't need to do lookups!
|
|
||||||
var err error
|
|
||||||
resources, err = EtcdGetResources(embdEtcd, hostnameFilter, kindFilter)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Config: Could not collect resources: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, res := range resources {
|
|
||||||
matched := false
|
|
||||||
// see if we find a collect pattern that matches
|
|
||||||
for _, t := range config.Collector {
|
|
||||||
// XXX: should we just drop these everywhere and have the kind strings be all lowercase?
|
|
||||||
kind := FirstToUpper(t.Kind)
|
|
||||||
// use t.Kind and optionally t.Pattern to collect from etcd storage
|
|
||||||
log.Printf("Collect: %v; Pattern: %v", kind, t.Pattern)
|
|
||||||
|
|
||||||
// XXX: expand to more complex pattern matching here...
|
|
||||||
if res.Kind() != kind {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if matched {
|
|
||||||
// we've already matched this resource, should we match again?
|
|
||||||
log.Printf("Config: Warning: Matching %v[%v] again!", kind, res.GetName())
|
|
||||||
}
|
|
||||||
matched = true
|
|
||||||
|
|
||||||
// collect resources but add the noop metaparam
|
|
||||||
if noop {
|
|
||||||
res.Meta().Noop = noop
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.Pattern != "" { // XXX: simplistic for now
|
|
||||||
res.CollectPattern(t.Pattern) // res.Dirname = t.Pattern
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("Collect: %v[%v]: collected!", kind, res.GetName())
|
|
||||||
|
|
||||||
// XXX: similar to other resource add code:
|
|
||||||
if _, exists := lookup[kind]; !exists {
|
|
||||||
lookup[kind] = make(map[string]*Vertex)
|
|
||||||
}
|
|
||||||
v := graph.GetVertexMatch(res)
|
|
||||||
if v == nil { // no match found
|
|
||||||
res.Init() // initialize go channels or things won't work!!!
|
|
||||||
v = NewVertex(res)
|
|
||||||
graph.AddVertex(v) // call standalone in case not part of an edge
|
|
||||||
}
|
|
||||||
lookup[kind][res.GetName()] = v // used for constructing edges
|
|
||||||
keep = append(keep, v) // append
|
|
||||||
|
|
||||||
//break // let's see if another resource even matches
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// get rid of any vertices we shouldn't "keep" (that aren't in new graph)
|
|
||||||
for _, v := range graph.GetVertices() {
|
|
||||||
if !VertexContains(v, keep) {
|
|
||||||
// wait for exit before starting new graph!
|
|
||||||
v.SendEvent(eventExit, true, false)
|
|
||||||
graph.DeleteVertex(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, e := range config.Edges {
|
|
||||||
if _, ok := lookup[FirstToUpper(e.From.Kind)]; !ok {
|
|
||||||
return nil, fmt.Errorf("Can't find 'from' resource!")
|
|
||||||
}
|
|
||||||
if _, ok := lookup[FirstToUpper(e.To.Kind)]; !ok {
|
|
||||||
return nil, fmt.Errorf("Can't find 'to' resource!")
|
|
||||||
}
|
|
||||||
if _, ok := lookup[FirstToUpper(e.From.Kind)][e.From.Name]; !ok {
|
|
||||||
return nil, fmt.Errorf("Can't find 'from' name!")
|
|
||||||
}
|
|
||||||
if _, ok := lookup[FirstToUpper(e.To.Kind)][e.To.Name]; !ok {
|
|
||||||
return nil, fmt.Errorf("Can't find 'to' name!")
|
|
||||||
}
|
|
||||||
graph.AddEdge(lookup[FirstToUpper(e.From.Kind)][e.From.Name], lookup[FirstToUpper(e.To.Kind)][e.To.Name], NewEdge(e.Name))
|
|
||||||
}
|
|
||||||
|
|
||||||
return graph, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// add edges to the vertex in a graph based on if it matches a uuid list
|
|
||||||
func (g *Graph) addEdgesByMatchingUUIDS(v *Vertex, uuids []ResUUID) []bool {
|
|
||||||
// search for edges and see what matches!
|
|
||||||
var result []bool
|
|
||||||
|
|
||||||
// loop through each uuid, and see if it matches any vertex
|
|
||||||
for _, uuid := range uuids {
|
|
||||||
var found = false
|
|
||||||
// uuid is a ResUUID object
|
|
||||||
for _, vv := range g.GetVertices() { // search
|
|
||||||
if v == vv { // skip self
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if DEBUG {
|
|
||||||
log.Printf("Compile: AutoEdge: Match: %v[%v] with UUID: %v[%v]", vv.Kind(), vv.GetName(), uuid.Kind(), uuid.GetName())
|
|
||||||
}
|
|
||||||
// we must match to an effective UUID for the resource,
|
|
||||||
// that is to say, the name value of a res is a helpful
|
|
||||||
// handle, but it is not necessarily a unique identity!
|
|
||||||
// remember, resources can return multiple UUID's each!
|
|
||||||
if UUIDExistsInUUIDs(uuid, vv.GetUUIDs()) {
|
|
||||||
// add edge from: vv -> v
|
|
||||||
if uuid.Reversed() {
|
|
||||||
txt := fmt.Sprintf("AutoEdge: %v[%v] -> %v[%v]", vv.Kind(), vv.GetName(), v.Kind(), v.GetName())
|
|
||||||
log.Printf("Compile: Adding %v", txt)
|
|
||||||
g.AddEdge(vv, v, NewEdge(txt))
|
|
||||||
} else { // edges go the "normal" way, eg: pkg resource
|
|
||||||
txt := fmt.Sprintf("AutoEdge: %v[%v] -> %v[%v]", v.Kind(), v.GetName(), vv.Kind(), vv.GetName())
|
|
||||||
log.Printf("Compile: Adding %v", txt)
|
|
||||||
g.AddEdge(v, vv, NewEdge(txt))
|
|
||||||
}
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
result = append(result, found)
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// AutoEdges adds the automatic edges to the graph.
|
|
||||||
func (g *Graph) AutoEdges() {
|
|
||||||
log.Println("Compile: Adding AutoEdges...")
|
|
||||||
for _, v := range g.GetVertices() { // for each vertexes autoedges
|
|
||||||
if !v.Meta().AutoEdge { // is the metaparam true?
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
autoEdgeObj := v.AutoEdges()
|
|
||||||
if autoEdgeObj == nil {
|
|
||||||
log.Printf("%v[%v]: Config: No auto edges were found!", v.Kind(), v.GetName())
|
|
||||||
continue // next vertex
|
|
||||||
}
|
|
||||||
|
|
||||||
for { // while the autoEdgeObj has more uuids to add...
|
|
||||||
uuids := autoEdgeObj.Next() // get some!
|
|
||||||
if uuids == nil {
|
|
||||||
log.Printf("%v[%v]: Config: The auto edge list is empty!", v.Kind(), v.GetName())
|
|
||||||
break // inner loop
|
|
||||||
}
|
|
||||||
if DEBUG {
|
|
||||||
log.Println("Compile: AutoEdge: UUIDS:")
|
|
||||||
for i, u := range uuids {
|
|
||||||
log.Printf("Compile: AutoEdge: UUID%d: %v", i, u)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// match and add edges
|
|
||||||
result := g.addEdgesByMatchingUUIDS(v, uuids)
|
|
||||||
|
|
||||||
// report back, and find out if we should continue
|
|
||||||
if !autoEdgeObj.Test(result) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AutoGrouper is the required interface to implement for an autogroup algorithm
|
|
||||||
type AutoGrouper interface {
|
|
||||||
// listed in the order these are typically called in...
|
|
||||||
name() string // friendly identifier
|
|
||||||
init(*Graph) error // only call once
|
|
||||||
vertexNext() (*Vertex, *Vertex, error) // mostly algorithmic
|
|
||||||
vertexCmp(*Vertex, *Vertex) error // can we merge these ?
|
|
||||||
vertexMerge(*Vertex, *Vertex) (*Vertex, error) // vertex merge fn to use
|
|
||||||
edgeMerge(*Edge, *Edge) *Edge // edge merge fn to use
|
|
||||||
vertexTest(bool) (bool, error) // call until false
|
|
||||||
}
|
|
||||||
|
|
||||||
// baseGrouper is the base type for implementing the AutoGrouper interface
|
|
||||||
type baseGrouper struct {
|
|
||||||
graph *Graph // store a pointer to the graph
|
|
||||||
vertices []*Vertex // cached list of vertices
|
|
||||||
i int
|
|
||||||
j int
|
|
||||||
done bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// name provides a friendly name for the logs to see
|
|
||||||
func (ag *baseGrouper) name() string {
|
|
||||||
return "baseGrouper"
|
|
||||||
}
|
|
||||||
|
|
||||||
// init is called only once and before using other AutoGrouper interface methods
|
|
||||||
// the name method is the only exception: call it any time without side effects!
|
|
||||||
func (ag *baseGrouper) init(g *Graph) error {
|
|
||||||
if ag.graph != nil {
|
|
||||||
return fmt.Errorf("The init method has already been called!")
|
|
||||||
}
|
|
||||||
ag.graph = g // pointer
|
|
||||||
ag.vertices = ag.graph.GetVerticesSorted() // cache in deterministic order!
|
|
||||||
ag.i = 0
|
|
||||||
ag.j = 0
|
|
||||||
if len(ag.vertices) == 0 { // empty graph
|
|
||||||
ag.done = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// vertexNext is a simple iterator that loops through vertex (pair) combinations
|
|
||||||
// an intelligent algorithm would selectively offer only valid pairs of vertices
|
|
||||||
// these should satisfy logical grouping requirements for the autogroup designs!
|
|
||||||
// the desired algorithms can override, but keep this method as a base iterator!
|
|
||||||
func (ag *baseGrouper) vertexNext() (v1, v2 *Vertex, err error) {
|
|
||||||
// this does a for v... { for w... { return v, w }} but stepwise!
|
|
||||||
l := len(ag.vertices)
|
|
||||||
if ag.i < l {
|
|
||||||
v1 = ag.vertices[ag.i]
|
|
||||||
}
|
|
||||||
if ag.j < l {
|
|
||||||
v2 = ag.vertices[ag.j]
|
|
||||||
}
|
|
||||||
|
|
||||||
// in case the vertex was deleted
|
|
||||||
if !ag.graph.HasVertex(v1) {
|
|
||||||
v1 = nil
|
|
||||||
}
|
|
||||||
if !ag.graph.HasVertex(v2) {
|
|
||||||
v2 = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// two nested loops...
|
|
||||||
if ag.j < l {
|
|
||||||
ag.j++
|
|
||||||
}
|
|
||||||
if ag.j == l {
|
|
||||||
ag.j = 0
|
|
||||||
if ag.i < l {
|
|
||||||
ag.i++
|
|
||||||
}
|
|
||||||
if ag.i == l {
|
|
||||||
ag.done = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ag *baseGrouper) vertexCmp(v1, v2 *Vertex) error {
|
|
||||||
if v1 == nil || v2 == nil {
|
|
||||||
return fmt.Errorf("Vertex is nil!")
|
|
||||||
}
|
|
||||||
if v1 == v2 { // skip yourself
|
|
||||||
return fmt.Errorf("Vertices are the same!")
|
|
||||||
}
|
|
||||||
if v1.Kind() != v2.Kind() { // we must group similar kinds
|
|
||||||
// TODO: maybe future resources won't need this limitation?
|
|
||||||
return fmt.Errorf("The two resources aren't the same kind!")
|
|
||||||
}
|
|
||||||
// someone doesn't want to group!
|
|
||||||
if !v1.Meta().AutoGroup || !v2.Meta().AutoGroup {
|
|
||||||
return fmt.Errorf("One of the autogroup flags is false!")
|
|
||||||
}
|
|
||||||
if v1.Res.IsGrouped() { // already grouped!
|
|
||||||
return fmt.Errorf("Already grouped!")
|
|
||||||
}
|
|
||||||
if len(v2.Res.GetGroup()) > 0 { // already has children grouped!
|
|
||||||
return fmt.Errorf("Already has groups!")
|
|
||||||
}
|
|
||||||
if !v1.Res.GroupCmp(v2.Res) { // resource groupcmp failed!
|
|
||||||
return fmt.Errorf("The GroupCmp failed!")
|
|
||||||
}
|
|
||||||
return nil // success
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ag *baseGrouper) vertexMerge(v1, v2 *Vertex) (v *Vertex, err error) {
|
|
||||||
// NOTE: it's important to use w.Res instead of w, b/c
|
|
||||||
// the w by itself is the *Vertex obj, not the *Res obj
|
|
||||||
// which is contained within it! They both satisfy the
|
|
||||||
// Res interface, which is why both will compile! :(
|
|
||||||
err = v1.Res.GroupRes(v2.Res) // GroupRes skips stupid groupings
|
|
||||||
return // success or fail, and no need to merge the actual vertices!
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ag *baseGrouper) edgeMerge(e1, e2 *Edge) *Edge {
|
|
||||||
return e1 // noop
|
|
||||||
}
|
|
||||||
|
|
||||||
// vertexTest processes the results of the grouping for the algorithm to know
|
|
||||||
// return an error if something went horribly wrong, and bool false to stop
|
|
||||||
func (ag *baseGrouper) vertexTest(b bool) (bool, error) {
|
|
||||||
// NOTE: this particular baseGrouper version doesn't track what happens
|
|
||||||
// because since we iterate over every pair, we don't care which merge!
|
|
||||||
if ag.done {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: this algorithm may not be correct in all cases. replace if needed!
|
|
||||||
type nonReachabilityGrouper struct {
|
|
||||||
baseGrouper // "inherit" what we want, and reimplement the rest
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ag *nonReachabilityGrouper) name() string {
|
|
||||||
return "nonReachabilityGrouper"
|
|
||||||
}
|
|
||||||
|
|
||||||
// this algorithm relies on the observation that if there's a path from a to b,
|
|
||||||
// then they *can't* be merged (b/c of the existing dependency) so therefore we
|
|
||||||
// merge anything that *doesn't* satisfy this condition or that of the reverse!
|
|
||||||
func (ag *nonReachabilityGrouper) vertexNext() (v1, v2 *Vertex, err error) {
|
|
||||||
for {
|
|
||||||
v1, v2, err = ag.baseGrouper.vertexNext() // get all iterable pairs
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Error running autoGroup(vertexNext): %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if v1 != v2 { // ignore self cmp early (perf optimization)
|
|
||||||
// if NOT reachable, they're viable...
|
|
||||||
out1 := ag.graph.Reachability(v1, v2)
|
|
||||||
out2 := ag.graph.Reachability(v2, v1)
|
|
||||||
if len(out1) == 0 && len(out2) == 0 {
|
|
||||||
return // return v1 and v2, they're viable
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// if we got here, it means we're skipping over this candidate!
|
|
||||||
if ok, err := ag.baseGrouper.vertexTest(false); err != nil {
|
|
||||||
log.Fatalf("Error running autoGroup(vertexTest): %v", err)
|
|
||||||
} else if !ok {
|
|
||||||
return nil, nil, nil // done!
|
|
||||||
}
|
|
||||||
|
|
||||||
// the vertexTest passed, so loop and try with a new pair...
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// autoGroup is the mechanical auto group "runner" that runs the interface spec
|
|
||||||
func (g *Graph) autoGroup(ag AutoGrouper) chan string {
|
|
||||||
strch := make(chan string) // output log messages here
|
|
||||||
go func(strch chan string) {
|
|
||||||
strch <- fmt.Sprintf("Compile: Grouping: Algorithm: %v...", ag.name())
|
|
||||||
if err := ag.init(g); err != nil {
|
|
||||||
log.Fatalf("Error running autoGroup(init): %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
var v, w *Vertex
|
|
||||||
v, w, err := ag.vertexNext() // get pair to compare
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Error running autoGroup(vertexNext): %v", err)
|
|
||||||
}
|
|
||||||
merged := false
|
|
||||||
// save names since they change during the runs
|
|
||||||
vStr := fmt.Sprintf("%s", v) // valid even if it is nil
|
|
||||||
wStr := fmt.Sprintf("%s", w)
|
|
||||||
|
|
||||||
if err := ag.vertexCmp(v, w); err != nil { // cmp ?
|
|
||||||
if DEBUG {
|
|
||||||
strch <- fmt.Sprintf("Compile: Grouping: !GroupCmp for: %s into %s", wStr, vStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// remove grouped vertex and merge edges (res is safe)
|
|
||||||
} else if err := g.VertexMerge(v, w, ag.vertexMerge, ag.edgeMerge); err != nil { // merge...
|
|
||||||
strch <- fmt.Sprintf("Compile: Grouping: !VertexMerge for: %s into %s", wStr, vStr)
|
|
||||||
|
|
||||||
} else { // success!
|
|
||||||
strch <- fmt.Sprintf("Compile: Grouping: Success for: %s into %s", wStr, vStr)
|
|
||||||
merged = true // woo
|
|
||||||
}
|
|
||||||
|
|
||||||
// did these get used?
|
|
||||||
if ok, err := ag.vertexTest(merged); err != nil {
|
|
||||||
log.Fatalf("Error running autoGroup(vertexTest): %v", err)
|
|
||||||
} else if !ok {
|
|
||||||
break // done!
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
close(strch)
|
|
||||||
return
|
|
||||||
}(strch) // call function
|
|
||||||
return strch
|
|
||||||
}
|
|
||||||
|
|
||||||
// AutoGroup runs the auto grouping on the graph and prints out log messages
|
|
||||||
func (g *Graph) AutoGroup() {
|
|
||||||
// receive log messages from channel...
|
|
||||||
// this allows test cases to avoid printing them when they're unwanted!
|
|
||||||
// TODO: this algorithm may not be correct in all cases. replace if needed!
|
|
||||||
for str := range g.autoGroup(&nonReachabilityGrouper{}) {
|
|
||||||
log.Println(str)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
224
configwatch.go
@@ -1,224 +0,0 @@
|
|||||||
// Mgmt
|
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
|
||||||
//
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"gopkg.in/fsnotify.v1"
|
|
||||||
//"github.com/go-fsnotify/fsnotify" // git master of "gopkg.in/fsnotify.v1"
|
|
||||||
"log"
|
|
||||||
"math"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ConfigWatcher returns events on a channel anytime one of its files events.
|
|
||||||
type ConfigWatcher struct {
|
|
||||||
ch chan string
|
|
||||||
wg sync.WaitGroup
|
|
||||||
closechan chan struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConfigWatcher creates a new ConfigWatcher struct.
|
|
||||||
func NewConfigWatcher() *ConfigWatcher {
|
|
||||||
return &ConfigWatcher{
|
|
||||||
ch: make(chan string),
|
|
||||||
closechan: make(chan struct{}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The Add method adds a new file path to watch for events on.
|
|
||||||
func (obj *ConfigWatcher) Add(file ...string) {
|
|
||||||
if len(file) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(file) > 1 {
|
|
||||||
for _, f := range file { // add all the files...
|
|
||||||
obj.Add(f) // recurse
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// otherwise, add the one file passed in...
|
|
||||||
obj.wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer obj.wg.Done()
|
|
||||||
ch := ConfigWatch(file[0])
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ch:
|
|
||||||
obj.ch <- file[0]
|
|
||||||
continue
|
|
||||||
case <-obj.closechan:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Events returns a channel to listen on for file events. It closes when it is
|
|
||||||
// emptied after the Close() method is called. You can test for closure with the
|
|
||||||
// f, more := <-obj.Events() pattern.
|
|
||||||
func (obj *ConfigWatcher) Events() chan string {
|
|
||||||
return obj.ch
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close shuts down the ConfigWatcher object. It closes the Events channel after
|
|
||||||
// all the currently pending events have been emptied.
|
|
||||||
func (obj *ConfigWatcher) Close() {
|
|
||||||
if obj.ch == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
close(obj.closechan)
|
|
||||||
obj.wg.Wait() // wait until everyone is done sending on obj.ch
|
|
||||||
//obj.ch <- "" // send finished message
|
|
||||||
close(obj.ch)
|
|
||||||
obj.ch = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConfigWatch writes on the channel everytime an event is seen for the path.
|
|
||||||
// XXX: it would be great if we could reuse code between this and the file resource
|
|
||||||
// XXX: patch this to submit it as part of go-fsnotify if they're interested...
|
|
||||||
func ConfigWatch(file string) chan bool {
|
|
||||||
ch := make(chan bool)
|
|
||||||
go func() {
|
|
||||||
var safename = path.Clean(file) // no trailing slash
|
|
||||||
|
|
||||||
watcher, err := fsnotify.NewWatcher()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
defer watcher.Close()
|
|
||||||
|
|
||||||
patharray := PathSplit(safename) // tokenize the path
|
|
||||||
var index = len(patharray) // starting index
|
|
||||||
var current string // current "watcher" location
|
|
||||||
var deltaDepth int // depth delta between watcher and event
|
|
||||||
var send = false // send event?
|
|
||||||
|
|
||||||
for {
|
|
||||||
current = strings.Join(patharray[0:index], "/")
|
|
||||||
if current == "" { // the empty string top is the root dir ("/")
|
|
||||||
current = "/"
|
|
||||||
}
|
|
||||||
if DEBUG {
|
|
||||||
log.Printf("Watching: %v", current) // attempting to watch...
|
|
||||||
}
|
|
||||||
// initialize in the loop so that we can reset on rm-ed handles
|
|
||||||
err = watcher.Add(current)
|
|
||||||
if err != nil {
|
|
||||||
if err == syscall.ENOENT {
|
|
||||||
index-- // usually not found, move up one dir
|
|
||||||
} else if err == syscall.ENOSPC {
|
|
||||||
// XXX: occasionally: no space left on device,
|
|
||||||
// XXX: probably due to lack of inotify watches
|
|
||||||
log.Printf("Out of inotify watches for config(%v)", file)
|
|
||||||
log.Fatal(err)
|
|
||||||
} else {
|
|
||||||
log.Printf("Unknown config(%v) error:", file)
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
index = int(math.Max(1, float64(index)))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case event := <-watcher.Events:
|
|
||||||
// the deeper you go, the bigger the deltaDepth is...
|
|
||||||
// this is the difference between what we're watching,
|
|
||||||
// and the event... doesn't mean we can't watch deeper
|
|
||||||
if current == event.Name {
|
|
||||||
deltaDepth = 0 // i was watching what i was looking for
|
|
||||||
|
|
||||||
} else if HasPathPrefix(event.Name, current) {
|
|
||||||
deltaDepth = len(PathSplit(current)) - len(PathSplit(event.Name)) // -1 or less
|
|
||||||
|
|
||||||
} else if HasPathPrefix(current, event.Name) {
|
|
||||||
deltaDepth = len(PathSplit(event.Name)) - len(PathSplit(current)) // +1 or more
|
|
||||||
|
|
||||||
} else {
|
|
||||||
// TODO different watchers get each others events!
|
|
||||||
// https://github.com/go-fsnotify/fsnotify/issues/95
|
|
||||||
// this happened with two values such as:
|
|
||||||
// event.Name: /tmp/mgmt/f3 and current: /tmp/mgmt/f2
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
//log.Printf("The delta depth is: %v", deltaDepth)
|
|
||||||
|
|
||||||
// if we have what we wanted, awesome, send an event...
|
|
||||||
if event.Name == safename {
|
|
||||||
//log.Println("Event!")
|
|
||||||
// TODO: filter out some of the events, is Write a sufficient minimum?
|
|
||||||
if event.Op&fsnotify.Write == fsnotify.Write {
|
|
||||||
send = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// file removed, move the watch upwards
|
|
||||||
if deltaDepth >= 0 && (event.Op&fsnotify.Remove == fsnotify.Remove) {
|
|
||||||
//log.Println("Removal!")
|
|
||||||
watcher.Remove(current)
|
|
||||||
index--
|
|
||||||
}
|
|
||||||
|
|
||||||
// we must be a parent watcher, so descend in
|
|
||||||
if deltaDepth < 0 {
|
|
||||||
watcher.Remove(current)
|
|
||||||
index++
|
|
||||||
}
|
|
||||||
|
|
||||||
// if safename starts with event.Name, we're above, and no event should be sent
|
|
||||||
} else if HasPathPrefix(safename, event.Name) {
|
|
||||||
//log.Println("Above!")
|
|
||||||
|
|
||||||
if deltaDepth >= 0 && (event.Op&fsnotify.Remove == fsnotify.Remove) {
|
|
||||||
log.Println("Removal!")
|
|
||||||
watcher.Remove(current)
|
|
||||||
index--
|
|
||||||
}
|
|
||||||
|
|
||||||
if deltaDepth < 0 {
|
|
||||||
log.Println("Parent!")
|
|
||||||
if PathPrefixDelta(safename, event.Name) == 1 { // we're the parent dir
|
|
||||||
//send = true
|
|
||||||
}
|
|
||||||
watcher.Remove(current)
|
|
||||||
index++
|
|
||||||
}
|
|
||||||
|
|
||||||
// if event.Name startswith safename, send event, we're already deeper
|
|
||||||
} else if HasPathPrefix(event.Name, safename) {
|
|
||||||
//log.Println("Event2!")
|
|
||||||
//send = true
|
|
||||||
}
|
|
||||||
|
|
||||||
case err := <-watcher.Errors:
|
|
||||||
log.Printf("error: %v", err)
|
|
||||||
log.Fatal(err)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// do our event sending all together to avoid duplicate msgs
|
|
||||||
if send {
|
|
||||||
send = false
|
|
||||||
ch <- true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
//close(ch)
|
|
||||||
}()
|
|
||||||
return ch
|
|
||||||
}
|
|
||||||
@@ -15,35 +15,38 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package main
|
// Package converger is a facility for reporting the converged state.
|
||||||
|
package converger
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: we could make a new function that masks out the state of certain
|
// TODO: we could make a new function that masks out the state of certain
|
||||||
// UUID's, but at the moment the new Timer code has obsoleted the need...
|
// UID's, but at the moment the new Timer code has obsoleted the need...
|
||||||
|
|
||||||
// Converger is the general interface for implementing a convergence watcher
|
// Converger is the general interface for implementing a convergence watcher
|
||||||
type Converger interface { // TODO: need a better name
|
type Converger interface { // TODO: need a better name
|
||||||
Register() ConvergerUUID
|
Register() ConvergerUID
|
||||||
IsConverged(ConvergerUUID) bool // is the UUID converged ?
|
IsConverged(ConvergerUID) bool // is the UID converged ?
|
||||||
SetConverged(ConvergerUUID, bool) error // set the converged state of the UUID
|
SetConverged(ConvergerUID, bool) error // set the converged state of the UID
|
||||||
Unregister(ConvergerUUID)
|
Unregister(ConvergerUID)
|
||||||
Start()
|
Start()
|
||||||
Pause()
|
Pause()
|
||||||
Loop(bool)
|
Loop(bool)
|
||||||
ConvergedTimer(ConvergerUUID) <-chan time.Time
|
ConvergedTimer(ConvergerUID) <-chan time.Time
|
||||||
Status() map[uint64]bool
|
Status() map[uint64]bool
|
||||||
Timeout() int // returns the timeout that this was created with
|
Timeout() int // returns the timeout that this was created with
|
||||||
SetStateFn(func(bool) error) // sets the stateFn
|
SetStateFn(func(bool) error) // sets the stateFn
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConvergerUUID is the interface resources can use to notify with if converged
|
// ConvergerUID is the interface resources can use to notify with if converged
|
||||||
// you'll need to use part of the Converger interface to Register initially too
|
// you'll need to use part of the Converger interface to Register initially too
|
||||||
type ConvergerUUID interface {
|
type ConvergerUID interface {
|
||||||
ID() uint64 // get Id
|
ID() uint64 // get Id
|
||||||
Name() string // get a friendly name
|
Name() string // get a friendly name
|
||||||
SetName(string)
|
SetName(string)
|
||||||
@@ -70,8 +73,8 @@ type converger struct {
|
|||||||
status map[uint64]bool
|
status map[uint64]bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// convergerUUID is an implementation of the ConvergerUUID interface
|
// convergerUID is an implementation of the ConvergerUID interface
|
||||||
type convergerUUID struct {
|
type convergerUID struct {
|
||||||
converger Converger
|
converger Converger
|
||||||
id uint64
|
id uint64
|
||||||
name string // user defined, friendly name
|
name string // user defined, friendly name
|
||||||
@@ -92,13 +95,13 @@ func NewConverger(timeout int, stateFn func(bool) error) *converger {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register assigns a ConvergerUUID to the caller
|
// Register assigns a ConvergerUID to the caller
|
||||||
func (obj *converger) Register() ConvergerUUID {
|
func (obj *converger) Register() ConvergerUID {
|
||||||
obj.mutex.Lock()
|
obj.mutex.Lock()
|
||||||
defer obj.mutex.Unlock()
|
defer obj.mutex.Unlock()
|
||||||
obj.lastid++
|
obj.lastid++
|
||||||
obj.status[obj.lastid] = false // initialize as not converged
|
obj.status[obj.lastid] = false // initialize as not converged
|
||||||
return &convergerUUID{
|
return &convergerUID{
|
||||||
converger: obj,
|
converger: obj,
|
||||||
id: obj.lastid,
|
id: obj.lastid,
|
||||||
name: fmt.Sprintf("%d", obj.lastid), // some default
|
name: fmt.Sprintf("%d", obj.lastid), // some default
|
||||||
@@ -107,30 +110,30 @@ func (obj *converger) Register() ConvergerUUID {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsConverged gets the converged status of a uuid
|
// IsConverged gets the converged status of a uid
|
||||||
func (obj *converger) IsConverged(uuid ConvergerUUID) bool {
|
func (obj *converger) IsConverged(uid ConvergerUID) bool {
|
||||||
if !uuid.IsValid() {
|
if !uid.IsValid() {
|
||||||
panic(fmt.Sprintf("Id of ConvergerUUID(%s) is nil!", uuid.Name()))
|
panic(fmt.Sprintf("Id of ConvergerUID(%s) is nil!", uid.Name()))
|
||||||
}
|
}
|
||||||
obj.mutex.RLock()
|
obj.mutex.RLock()
|
||||||
isConverged, found := obj.status[uuid.ID()] // lookup
|
isConverged, found := obj.status[uid.ID()] // lookup
|
||||||
obj.mutex.RUnlock()
|
obj.mutex.RUnlock()
|
||||||
if !found {
|
if !found {
|
||||||
panic("Id of ConvergerUUID is unregistered!")
|
panic("Id of ConvergerUID is unregistered!")
|
||||||
}
|
}
|
||||||
return isConverged
|
return isConverged
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetConverged updates the converger with the converged state of the UUID
|
// SetConverged updates the converger with the converged state of the UID
|
||||||
func (obj *converger) SetConverged(uuid ConvergerUUID, isConverged bool) error {
|
func (obj *converger) SetConverged(uid ConvergerUID, isConverged bool) error {
|
||||||
if !uuid.IsValid() {
|
if !uid.IsValid() {
|
||||||
return fmt.Errorf("Id of ConvergerUUID(%s) is nil!", uuid.Name())
|
return fmt.Errorf("Id of ConvergerUID(%s) is nil!", uid.Name())
|
||||||
}
|
}
|
||||||
obj.mutex.Lock()
|
obj.mutex.Lock()
|
||||||
if _, found := obj.status[uuid.ID()]; !found {
|
if _, found := obj.status[uid.ID()]; !found {
|
||||||
panic("Id of ConvergerUUID is unregistered!")
|
panic("Id of ConvergerUID is unregistered!")
|
||||||
}
|
}
|
||||||
obj.status[uuid.ID()] = isConverged // set
|
obj.status[uid.ID()] = isConverged // set
|
||||||
obj.mutex.Unlock() // unlock *before* poke or deadlock!
|
obj.mutex.Unlock() // unlock *before* poke or deadlock!
|
||||||
if isConverged != obj.converged { // only poke if it would be helpful
|
if isConverged != obj.converged { // only poke if it would be helpful
|
||||||
// run in a go routine so that we never block... just queue up!
|
// run in a go routine so that we never block... just queue up!
|
||||||
@@ -140,7 +143,7 @@ func (obj *converger) SetConverged(uuid ConvergerUUID, isConverged bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// isConverged returns true if *every* registered uuid has converged
|
// isConverged returns true if *every* registered uid has converged
|
||||||
func (obj *converger) isConverged() bool {
|
func (obj *converger) isConverged() bool {
|
||||||
obj.mutex.RLock() // take a read lock
|
obj.mutex.RLock() // take a read lock
|
||||||
defer obj.mutex.RUnlock()
|
defer obj.mutex.RUnlock()
|
||||||
@@ -152,16 +155,16 @@ func (obj *converger) isConverged() bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unregister dissociates the ConvergedUUID from the converged checking
|
// Unregister dissociates the ConvergedUID from the converged checking
|
||||||
func (obj *converger) Unregister(uuid ConvergerUUID) {
|
func (obj *converger) Unregister(uid ConvergerUID) {
|
||||||
if !uuid.IsValid() {
|
if !uid.IsValid() {
|
||||||
panic(fmt.Sprintf("Id of ConvergerUUID(%s) is nil!", uuid.Name()))
|
panic(fmt.Sprintf("Id of ConvergerUID(%s) is nil!", uid.Name()))
|
||||||
}
|
}
|
||||||
obj.mutex.Lock()
|
obj.mutex.Lock()
|
||||||
uuid.StopTimer() // ignore any errors
|
uid.StopTimer() // ignore any errors
|
||||||
delete(obj.status, uuid.ID())
|
delete(obj.status, uid.ID())
|
||||||
obj.mutex.Unlock()
|
obj.mutex.Unlock()
|
||||||
uuid.InvalidateID()
|
uid.InvalidateID()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start causes a Converger object to start or resume running
|
// Start causes a Converger object to start or resume running
|
||||||
@@ -242,18 +245,18 @@ func (obj *converger) Loop(startPaused bool) {
|
|||||||
|
|
||||||
// ConvergedTimer adds a timeout to a select call and blocks until then
|
// ConvergedTimer adds a timeout to a select call and blocks until then
|
||||||
// TODO: this means we could eventually have per resource converged timeouts
|
// TODO: this means we could eventually have per resource converged timeouts
|
||||||
func (obj *converger) ConvergedTimer(uuid ConvergerUUID) <-chan time.Time {
|
func (obj *converger) ConvergedTimer(uid ConvergerUID) <-chan time.Time {
|
||||||
// be clever: if i'm already converged, this timeout should block which
|
// be clever: if i'm already converged, this timeout should block which
|
||||||
// avoids unnecessary new signals being sent! this avoids fast loops if
|
// avoids unnecessary new signals being sent! this avoids fast loops if
|
||||||
// we have a low timeout, or in particular a timeout == 0
|
// we have a low timeout, or in particular a timeout == 0
|
||||||
if uuid.IsConverged() {
|
if uid.IsConverged() {
|
||||||
// blocks the case statement in select forever!
|
// blocks the case statement in select forever!
|
||||||
return TimeAfterOrBlock(-1)
|
return util.TimeAfterOrBlock(-1)
|
||||||
}
|
}
|
||||||
return TimeAfterOrBlock(obj.timeout)
|
return util.TimeAfterOrBlock(obj.timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Status returns a map of the converged status of each UUID.
|
// Status returns a map of the converged status of each UID.
|
||||||
func (obj *converger) Status() map[uint64]bool {
|
func (obj *converger) Status() map[uint64]bool {
|
||||||
status := make(map[uint64]bool)
|
status := make(map[uint64]bool)
|
||||||
obj.mutex.RLock() // take a read lock
|
obj.mutex.RLock() // take a read lock
|
||||||
@@ -276,53 +279,53 @@ func (obj *converger) SetStateFn(stateFn func(bool) error) {
|
|||||||
obj.stateFn = stateFn
|
obj.stateFn = stateFn
|
||||||
}
|
}
|
||||||
|
|
||||||
// Id returns the unique id of this UUID object
|
// Id returns the unique id of this UID object
|
||||||
func (obj *convergerUUID) ID() uint64 {
|
func (obj *convergerUID) ID() uint64 {
|
||||||
return obj.id
|
return obj.id
|
||||||
}
|
}
|
||||||
|
|
||||||
// Name returns a user defined name for the specific convergerUUID.
|
// Name returns a user defined name for the specific convergerUID.
|
||||||
func (obj *convergerUUID) Name() string {
|
func (obj *convergerUID) Name() string {
|
||||||
return obj.name
|
return obj.name
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetName sets a user defined name for the specific convergerUUID.
|
// SetName sets a user defined name for the specific convergerUID.
|
||||||
func (obj *convergerUUID) SetName(name string) {
|
func (obj *convergerUID) SetName(name string) {
|
||||||
obj.name = name
|
obj.name = name
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsValid tells us if the id is valid or has already been destroyed
|
// IsValid tells us if the id is valid or has already been destroyed
|
||||||
func (obj *convergerUUID) IsValid() bool {
|
func (obj *convergerUID) IsValid() bool {
|
||||||
return obj.id != 0 // an id of 0 is invalid
|
return obj.id != 0 // an id of 0 is invalid
|
||||||
}
|
}
|
||||||
|
|
||||||
// InvalidateID marks the id as no longer valid
|
// InvalidateID marks the id as no longer valid
|
||||||
func (obj *convergerUUID) InvalidateID() {
|
func (obj *convergerUID) InvalidateID() {
|
||||||
obj.id = 0 // an id of 0 is invalid
|
obj.id = 0 // an id of 0 is invalid
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsConverged is a helper function to the regular IsConverged method
|
// IsConverged is a helper function to the regular IsConverged method
|
||||||
func (obj *convergerUUID) IsConverged() bool {
|
func (obj *convergerUID) IsConverged() bool {
|
||||||
return obj.converger.IsConverged(obj)
|
return obj.converger.IsConverged(obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetConverged is a helper function to the regular SetConverged notification
|
// SetConverged is a helper function to the regular SetConverged notification
|
||||||
func (obj *convergerUUID) SetConverged(isConverged bool) error {
|
func (obj *convergerUID) SetConverged(isConverged bool) error {
|
||||||
return obj.converger.SetConverged(obj, isConverged)
|
return obj.converger.SetConverged(obj, isConverged)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unregister is a helper function to unregister myself
|
// Unregister is a helper function to unregister myself
|
||||||
func (obj *convergerUUID) Unregister() {
|
func (obj *convergerUID) Unregister() {
|
||||||
obj.converger.Unregister(obj)
|
obj.converger.Unregister(obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConvergedTimer is a helper around the regular ConvergedTimer method
|
// ConvergedTimer is a helper around the regular ConvergedTimer method
|
||||||
func (obj *convergerUUID) ConvergedTimer() <-chan time.Time {
|
func (obj *convergerUID) ConvergedTimer() <-chan time.Time {
|
||||||
return obj.converger.ConvergedTimer(obj)
|
return obj.converger.ConvergedTimer(obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartTimer runs an invisible timer that automatically converges on timeout.
|
// StartTimer runs an invisible timer that automatically converges on timeout.
|
||||||
func (obj *convergerUUID) StartTimer() (func() error, error) {
|
func (obj *convergerUID) StartTimer() (func() error, error) {
|
||||||
obj.mutex.Lock()
|
obj.mutex.Lock()
|
||||||
if !obj.running {
|
if !obj.running {
|
||||||
obj.timer = make(chan struct{})
|
obj.timer = make(chan struct{})
|
||||||
@@ -356,7 +359,7 @@ func (obj *convergerUUID) StartTimer() (func() error, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ResetTimer resets the counter to zero if using a StartTimer internally.
|
// ResetTimer resets the counter to zero if using a StartTimer internally.
|
||||||
func (obj *convergerUUID) ResetTimer() error {
|
func (obj *convergerUID) ResetTimer() error {
|
||||||
obj.mutex.Lock()
|
obj.mutex.Lock()
|
||||||
defer obj.mutex.Unlock()
|
defer obj.mutex.Unlock()
|
||||||
if obj.running {
|
if obj.running {
|
||||||
@@ -367,7 +370,7 @@ func (obj *convergerUUID) ResetTimer() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StopTimer stops the running timer permanently until a StartTimer is run.
|
// StopTimer stops the running timer permanently until a StartTimer is run.
|
||||||
func (obj *convergerUUID) StopTimer() error {
|
func (obj *convergerUID) StopTimer() error {
|
||||||
obj.mutex.Lock()
|
obj.mutex.Lock()
|
||||||
defer obj.mutex.Unlock()
|
defer obj.mutex.Unlock()
|
||||||
if !obj.running {
|
if !obj.running {
|
||||||
@@ -15,12 +15,5 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// Package main provides the main entrypoint for using the `mgmt` software.
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
|
||||||
//"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
//func TestT1(t *testing.T) {
|
|
||||||
|
|
||||||
//}
|
|
||||||
@@ -24,16 +24,8 @@
|
|||||||
// TODO: Auto assign ports/ip's for peers (if possible)
|
// TODO: Auto assign ports/ip's for peers (if possible)
|
||||||
// TODO: Fix godoc
|
// TODO: Fix godoc
|
||||||
|
|
||||||
// Smoke testing:
|
// Package etcd implements the distributed key value store integration.
|
||||||
// ./mgmt run --file examples/etcd1a.yaml --hostname h1
|
// This also takes care of managing and clustering the embedded etcd server.
|
||||||
// ./mgmt run --file examples/etcd1b.yaml --hostname h2 --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2381 --server-urls http://127.0.0.1:2382
|
|
||||||
// ./mgmt run --file examples/etcd1c.yaml --hostname h3 --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2383 --server-urls http://127.0.0.1:2384
|
|
||||||
// ./mgmt run --file examples/etcd1d.yaml --hostname h4 --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2385 --server-urls http://127.0.0.1:2386
|
|
||||||
// ETCDCTL_API=3 etcdctl --endpoints 127.0.0.1:2379 member list
|
|
||||||
// ETCDCTL_API=3 etcdctl --endpoints 127.0.0.1:2381 member list
|
|
||||||
// ETCDCTL_API=3 etcdctl --endpoints 127.0.0.1:2379 put /_mgmt/idealClusterSize 3
|
|
||||||
// ETCDCTL_API=3 etcdctl --endpoints 127.0.0.1:2381 put /_mgmt/idealClusterSize 5
|
|
||||||
|
|
||||||
// The elastic etcd algorithm works in the following way:
|
// The elastic etcd algorithm works in the following way:
|
||||||
// * When you start up mgmt, you can pass it a list of seeds.
|
// * When you start up mgmt, you can pass it a list of seeds.
|
||||||
// * If no seeds are given, then assume you are the first server and startup.
|
// * If no seeds are given, then assume you are the first server and startup.
|
||||||
@@ -42,7 +34,18 @@
|
|||||||
// * If a client has been nominated, it should startup a server.
|
// * If a client has been nominated, it should startup a server.
|
||||||
// * All servers should list for their nomination to be removed and shutdown if so.
|
// * All servers should list for their nomination to be removed and shutdown if so.
|
||||||
// * The elected leader should decide who to nominate/unnominate to keep the right number of servers.
|
// * The elected leader should decide who to nominate/unnominate to keep the right number of servers.
|
||||||
package main
|
//
|
||||||
|
// Smoke testing:
|
||||||
|
// mkdir /tmp/mgmt{A..E}
|
||||||
|
// ./mgmt run --yaml examples/etcd1a.yaml --hostname h1 --tmp-prefix
|
||||||
|
// ./mgmt run --yaml examples/etcd1b.yaml --hostname h2 --tmp-prefix --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2381 --server-urls http://127.0.0.1:2382
|
||||||
|
// ./mgmt run --yaml examples/etcd1c.yaml --hostname h3 --tmp-prefix --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2383 --server-urls http://127.0.0.1:2384
|
||||||
|
// ETCDCTL_API=3 etcdctl --endpoints 127.0.0.1:2379 put /_mgmt/idealClusterSize 3
|
||||||
|
// ./mgmt run --yaml examples/etcd1d.yaml --hostname h4 --tmp-prefix --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2385 --server-urls http://127.0.0.1:2386
|
||||||
|
// ETCDCTL_API=3 etcdctl --endpoints 127.0.0.1:2379 member list
|
||||||
|
// ETCDCTL_API=3 etcdctl --endpoints 127.0.0.1:2381 put /_mgmt/idealClusterSize 5
|
||||||
|
// ETCDCTL_API=3 etcdctl --endpoints 127.0.0.1:2381 member list
|
||||||
|
package etcd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
@@ -59,6 +62,12 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/converger"
|
||||||
|
"github.com/purpleidea/mgmt/event"
|
||||||
|
"github.com/purpleidea/mgmt/global"
|
||||||
|
"github.com/purpleidea/mgmt/resources"
|
||||||
|
"github.com/purpleidea/mgmt/util"
|
||||||
|
|
||||||
etcd "github.com/coreos/etcd/clientv3" // "clientv3"
|
etcd "github.com/coreos/etcd/clientv3" // "clientv3"
|
||||||
"github.com/coreos/etcd/embed"
|
"github.com/coreos/etcd/embed"
|
||||||
"github.com/coreos/etcd/etcdserver"
|
"github.com/coreos/etcd/etcdserver"
|
||||||
@@ -78,7 +87,7 @@ const (
|
|||||||
maxClientConnectRetries = 5 // number of times to retry consecutive connect failures
|
maxClientConnectRetries = 5 // number of times to retry consecutive connect failures
|
||||||
selfRemoveTimeout = 3 // give unnominated members a chance to self exit
|
selfRemoveTimeout = 3 // give unnominated members a chance to self exit
|
||||||
exitDelay = 3 // number of sec of inactivity after exit to clean up
|
exitDelay = 3 // number of sec of inactivity after exit to clean up
|
||||||
defaultIdealClusterSize = 5 // default ideal cluster size target for initial seed
|
DefaultIdealClusterSize = 5 // default ideal cluster size target for initial seed
|
||||||
DefaultClientURL = "127.0.0.1:2379"
|
DefaultClientURL = "127.0.0.1:2379"
|
||||||
DefaultServerURL = "127.0.0.1:2380"
|
DefaultServerURL = "127.0.0.1:2380"
|
||||||
)
|
)
|
||||||
@@ -94,7 +103,7 @@ type AW struct {
|
|||||||
callback func(*RE) error
|
callback func(*RE) error
|
||||||
errCheck bool
|
errCheck bool
|
||||||
skipConv bool // ask event to skip converger updates
|
skipConv bool // ask event to skip converger updates
|
||||||
resp Resp
|
resp event.Resp
|
||||||
cancelFunc func() // data
|
cancelFunc func() // data
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -116,7 +125,7 @@ type KV struct {
|
|||||||
key string
|
key string
|
||||||
value string
|
value string
|
||||||
opts []etcd.OpOption
|
opts []etcd.OpOption
|
||||||
resp Resp
|
resp event.Resp
|
||||||
}
|
}
|
||||||
|
|
||||||
// GQ is a struct for the get queue
|
// GQ is a struct for the get queue
|
||||||
@@ -124,7 +133,7 @@ type GQ struct {
|
|||||||
path string
|
path string
|
||||||
skipConv bool
|
skipConv bool
|
||||||
opts []etcd.OpOption
|
opts []etcd.OpOption
|
||||||
resp Resp
|
resp event.Resp
|
||||||
data map[string]string
|
data map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -132,7 +141,7 @@ type GQ struct {
|
|||||||
type DL struct {
|
type DL struct {
|
||||||
path string
|
path string
|
||||||
opts []etcd.OpOption
|
opts []etcd.OpOption
|
||||||
resp Resp
|
resp event.Resp
|
||||||
data int64
|
data int64
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -141,7 +150,7 @@ type TN struct {
|
|||||||
ifcmps []etcd.Cmp
|
ifcmps []etcd.Cmp
|
||||||
thenops []etcd.Op
|
thenops []etcd.Op
|
||||||
elseops []etcd.Op
|
elseops []etcd.Op
|
||||||
resp Resp
|
resp event.Resp
|
||||||
data *etcd.TxnResponse
|
data *etcd.TxnResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -182,7 +191,7 @@ type EmbdEtcd struct { // EMBeddeD etcd
|
|||||||
txnq chan *TN // txn queue
|
txnq chan *TN // txn queue
|
||||||
|
|
||||||
prefix string // folder prefix to use for misc storage
|
prefix string // folder prefix to use for misc storage
|
||||||
converger Converger // converged tracking
|
converger converger.Converger // converged tracking
|
||||||
|
|
||||||
// etcd server related
|
// etcd server related
|
||||||
serverwg sync.WaitGroup // wait for server to shutdown
|
serverwg sync.WaitGroup // wait for server to shutdown
|
||||||
@@ -191,7 +200,7 @@ type EmbdEtcd struct { // EMBeddeD etcd
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewEmbdEtcd creates the top level embedded etcd struct client and server obj
|
// NewEmbdEtcd creates the top level embedded etcd struct client and server obj
|
||||||
func NewEmbdEtcd(hostname string, seeds, clientURLs, serverURLs etcdtypes.URLs, noServer bool, idealClusterSize uint16, prefix string, converger Converger) *EmbdEtcd {
|
func NewEmbdEtcd(hostname string, seeds, clientURLs, serverURLs etcdtypes.URLs, noServer bool, idealClusterSize uint16, prefix string, converger converger.Converger) *EmbdEtcd {
|
||||||
endpoints := make(etcdtypes.URLsMap)
|
endpoints := make(etcdtypes.URLsMap)
|
||||||
if hostname == seedSentinel { // safety
|
if hostname == seedSentinel { // safety
|
||||||
return nil
|
return nil
|
||||||
@@ -264,7 +273,7 @@ func (obj *EmbdEtcd) GetConfig() etcd.Config {
|
|||||||
// Connect connects the client to a server, and then builds the *API structs.
|
// Connect connects the client to a server, and then builds the *API structs.
|
||||||
// If reconnect is true, it will force a reconnect with new config endpoints.
|
// If reconnect is true, it will force a reconnect with new config endpoints.
|
||||||
func (obj *EmbdEtcd) Connect(reconnect bool) error {
|
func (obj *EmbdEtcd) Connect(reconnect bool) error {
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Println("Etcd: Connect...")
|
log.Println("Etcd: Connect...")
|
||||||
}
|
}
|
||||||
obj.cLock.Lock()
|
obj.cLock.Lock()
|
||||||
@@ -520,29 +529,29 @@ func (obj *EmbdEtcd) CtxError(ctx context.Context, err error) (context.Context,
|
|||||||
var isTimeout = false
|
var isTimeout = false
|
||||||
var iter int // = 0
|
var iter int // = 0
|
||||||
if ctxerr, ok := ctx.Value(ctxErr).(error); ok {
|
if ctxerr, ok := ctx.Value(ctxErr).(error); ok {
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("Etcd: CtxError: err(%v), ctxerr(%v)", err, ctxerr)
|
log.Printf("Etcd: CtxError: err(%v), ctxerr(%v)", err, ctxerr)
|
||||||
}
|
}
|
||||||
if i, ok := ctx.Value(ctxIter).(int); ok {
|
if i, ok := ctx.Value(ctxIter).(int); ok {
|
||||||
iter = i + 1 // load and increment
|
iter = i + 1 // load and increment
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("Etcd: CtxError: Iter: %v", iter)
|
log.Printf("Etcd: CtxError: Iter: %v", iter)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
isTimeout = err == context.DeadlineExceeded
|
isTimeout = err == context.DeadlineExceeded
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("Etcd: CtxError: isTimeout: %v", isTimeout)
|
log.Printf("Etcd: CtxError: isTimeout: %v", isTimeout)
|
||||||
}
|
}
|
||||||
if !isTimeout {
|
if !isTimeout {
|
||||||
iter = 0 // reset timer
|
iter = 0 // reset timer
|
||||||
}
|
}
|
||||||
err = ctxerr // restore error
|
err = ctxerr // restore error
|
||||||
} else if DEBUG {
|
} else if global.DEBUG {
|
||||||
log.Printf("Etcd: CtxError: No value found")
|
log.Printf("Etcd: CtxError: No value found")
|
||||||
}
|
}
|
||||||
ctxHelper := func(tmin, texp, tmax int) context.Context {
|
ctxHelper := func(tmin, texp, tmax int) context.Context {
|
||||||
t := expBackoff(tmin, texp, iter, tmax)
|
t := expBackoff(tmin, texp, iter, tmax)
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("Etcd: CtxError: Timeout: %v", t)
|
log.Printf("Etcd: CtxError: Timeout: %v", t)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -629,13 +638,13 @@ func (obj *EmbdEtcd) CtxError(ctx context.Context, err error) (context.Context,
|
|||||||
fallthrough
|
fallthrough
|
||||||
case isGrpc(grpc.ErrClientConnClosing):
|
case isGrpc(grpc.ErrClientConnClosing):
|
||||||
|
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("Etcd: CtxError: Error(%T): %+v", err, err)
|
log.Printf("Etcd: CtxError: Error(%T): %+v", err, err)
|
||||||
log.Printf("Etcd: Endpoints are: %v", obj.client.Endpoints())
|
log.Printf("Etcd: Endpoints are: %v", obj.client.Endpoints())
|
||||||
log.Printf("Etcd: Client endpoints are: %v", obj.endpoints)
|
log.Printf("Etcd: Client endpoints are: %v", obj.endpoints)
|
||||||
}
|
}
|
||||||
|
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("Etcd: CtxError: Locking...")
|
log.Printf("Etcd: CtxError: Locking...")
|
||||||
}
|
}
|
||||||
obj.rLock.Lock()
|
obj.rLock.Lock()
|
||||||
@@ -656,7 +665,7 @@ func (obj *EmbdEtcd) CtxError(ctx context.Context, err error) (context.Context,
|
|||||||
obj.ctxErr = fmt.Errorf("Etcd: Permanent connect error: %v", err)
|
obj.ctxErr = fmt.Errorf("Etcd: Permanent connect error: %v", err)
|
||||||
return ctx, obj.ctxErr
|
return ctx, obj.ctxErr
|
||||||
}
|
}
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("Etcd: CtxError: Unlocking...")
|
log.Printf("Etcd: CtxError: Unlocking...")
|
||||||
}
|
}
|
||||||
obj.rLock.Unlock()
|
obj.rLock.Unlock()
|
||||||
@@ -683,24 +692,24 @@ func (obj *EmbdEtcd) CtxError(ctx context.Context, err error) (context.Context,
|
|||||||
|
|
||||||
// CbLoop is the loop where callback execution is serialized
|
// CbLoop is the loop where callback execution is serialized
|
||||||
func (obj *EmbdEtcd) CbLoop() {
|
func (obj *EmbdEtcd) CbLoop() {
|
||||||
cuuid := obj.converger.Register()
|
cuid := obj.converger.Register()
|
||||||
cuuid.SetName("Etcd: CbLoop")
|
cuid.SetName("Etcd: CbLoop")
|
||||||
defer cuuid.Unregister()
|
defer cuid.Unregister()
|
||||||
if e := obj.Connect(false); e != nil {
|
if e := obj.Connect(false); e != nil {
|
||||||
return // fatal
|
return // fatal
|
||||||
}
|
}
|
||||||
// we use this timer because when we ignore un-converge events and loop,
|
// we use this timer because when we ignore un-converge events and loop,
|
||||||
// we reset the ConvergedTimer case statement, ruining the timeout math!
|
// we reset the ConvergedTimer case statement, ruining the timeout math!
|
||||||
cuuid.StartTimer()
|
cuid.StartTimer()
|
||||||
for {
|
for {
|
||||||
ctx := context.Background() // TODO: inherit as input argument?
|
ctx := context.Background() // TODO: inherit as input argument?
|
||||||
select {
|
select {
|
||||||
// etcd watcher event
|
// etcd watcher event
|
||||||
case re := <-obj.wevents:
|
case re := <-obj.wevents:
|
||||||
if !re.skipConv { // if we want to count it...
|
if !re.skipConv { // if we want to count it...
|
||||||
cuuid.ResetTimer() // activity!
|
cuid.ResetTimer() // activity!
|
||||||
}
|
}
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: CbLoop: Event: StartLoop")
|
log.Printf("Trace: Etcd: CbLoop: Event: StartLoop")
|
||||||
}
|
}
|
||||||
for {
|
for {
|
||||||
@@ -708,11 +717,11 @@ func (obj *EmbdEtcd) CbLoop() {
|
|||||||
//re.resp.NACK() // nope!
|
//re.resp.NACK() // nope!
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: CbLoop: rawCallback()")
|
log.Printf("Trace: Etcd: CbLoop: rawCallback()")
|
||||||
}
|
}
|
||||||
err := rawCallback(ctx, re)
|
err := rawCallback(ctx, re)
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: CbLoop: rawCallback(): %v", err)
|
log.Printf("Trace: Etcd: CbLoop: rawCallback(): %v", err)
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -724,14 +733,14 @@ func (obj *EmbdEtcd) CbLoop() {
|
|||||||
break // TODO: it's bad, break or return?
|
break // TODO: it's bad, break or return?
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: CbLoop: Event: FinishLoop")
|
log.Printf("Trace: Etcd: CbLoop: Event: FinishLoop")
|
||||||
}
|
}
|
||||||
|
|
||||||
// exit loop commit
|
// exit loop commit
|
||||||
case <-obj.exitTimeout:
|
case <-obj.exitTimeout:
|
||||||
log.Println("Etcd: Exiting callback loop!")
|
log.Println("Etcd: Exiting callback loop!")
|
||||||
cuuid.StopTimer() // clean up nicely
|
cuid.StopTimer() // clean up nicely
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -739,24 +748,24 @@ func (obj *EmbdEtcd) CbLoop() {
|
|||||||
|
|
||||||
// Loop is the main loop where everything is serialized
|
// Loop is the main loop where everything is serialized
|
||||||
func (obj *EmbdEtcd) Loop() {
|
func (obj *EmbdEtcd) Loop() {
|
||||||
cuuid := obj.converger.Register()
|
cuid := obj.converger.Register()
|
||||||
cuuid.SetName("Etcd: Loop")
|
cuid.SetName("Etcd: Loop")
|
||||||
defer cuuid.Unregister()
|
defer cuid.Unregister()
|
||||||
if e := obj.Connect(false); e != nil {
|
if e := obj.Connect(false); e != nil {
|
||||||
return // fatal
|
return // fatal
|
||||||
}
|
}
|
||||||
cuuid.StartTimer()
|
cuid.StartTimer()
|
||||||
for {
|
for {
|
||||||
ctx := context.Background() // TODO: inherit as input argument?
|
ctx := context.Background() // TODO: inherit as input argument?
|
||||||
// priority channel...
|
// priority channel...
|
||||||
select {
|
select {
|
||||||
case aw := <-obj.awq:
|
case aw := <-obj.awq:
|
||||||
cuuid.ResetTimer() // activity!
|
cuid.ResetTimer() // activity!
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: Loop: PriorityAW: StartLoop")
|
log.Printf("Trace: Etcd: Loop: PriorityAW: StartLoop")
|
||||||
}
|
}
|
||||||
obj.loopProcessAW(ctx, aw)
|
obj.loopProcessAW(ctx, aw)
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: Loop: PriorityAW: FinishLoop")
|
log.Printf("Trace: Etcd: Loop: PriorityAW: FinishLoop")
|
||||||
}
|
}
|
||||||
continue // loop to drain the priority channel first!
|
continue // loop to drain the priority channel first!
|
||||||
@@ -767,19 +776,19 @@ func (obj *EmbdEtcd) Loop() {
|
|||||||
select {
|
select {
|
||||||
// add watcher
|
// add watcher
|
||||||
case aw := <-obj.awq:
|
case aw := <-obj.awq:
|
||||||
cuuid.ResetTimer() // activity!
|
cuid.ResetTimer() // activity!
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: Loop: AW: StartLoop")
|
log.Printf("Trace: Etcd: Loop: AW: StartLoop")
|
||||||
}
|
}
|
||||||
obj.loopProcessAW(ctx, aw)
|
obj.loopProcessAW(ctx, aw)
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: Loop: AW: FinishLoop")
|
log.Printf("Trace: Etcd: Loop: AW: FinishLoop")
|
||||||
}
|
}
|
||||||
|
|
||||||
// set kv pair
|
// set kv pair
|
||||||
case kv := <-obj.setq:
|
case kv := <-obj.setq:
|
||||||
cuuid.ResetTimer() // activity!
|
cuid.ResetTimer() // activity!
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: Loop: Set: StartLoop")
|
log.Printf("Trace: Etcd: Loop: Set: StartLoop")
|
||||||
}
|
}
|
||||||
for {
|
for {
|
||||||
@@ -796,16 +805,16 @@ func (obj *EmbdEtcd) Loop() {
|
|||||||
break // TODO: it's bad, break or return?
|
break // TODO: it's bad, break or return?
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: Loop: Set: FinishLoop")
|
log.Printf("Trace: Etcd: Loop: Set: FinishLoop")
|
||||||
}
|
}
|
||||||
|
|
||||||
// get value
|
// get value
|
||||||
case gq := <-obj.getq:
|
case gq := <-obj.getq:
|
||||||
if !gq.skipConv {
|
if !gq.skipConv {
|
||||||
cuuid.ResetTimer() // activity!
|
cuid.ResetTimer() // activity!
|
||||||
}
|
}
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: Loop: Get: StartLoop")
|
log.Printf("Trace: Etcd: Loop: Get: StartLoop")
|
||||||
}
|
}
|
||||||
for {
|
for {
|
||||||
@@ -823,14 +832,14 @@ func (obj *EmbdEtcd) Loop() {
|
|||||||
break // TODO: it's bad, break or return?
|
break // TODO: it's bad, break or return?
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: Loop: Get: FinishLoop")
|
log.Printf("Trace: Etcd: Loop: Get: FinishLoop")
|
||||||
}
|
}
|
||||||
|
|
||||||
// delete value
|
// delete value
|
||||||
case dl := <-obj.delq:
|
case dl := <-obj.delq:
|
||||||
cuuid.ResetTimer() // activity!
|
cuid.ResetTimer() // activity!
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: Loop: Delete: StartLoop")
|
log.Printf("Trace: Etcd: Loop: Delete: StartLoop")
|
||||||
}
|
}
|
||||||
for {
|
for {
|
||||||
@@ -848,14 +857,14 @@ func (obj *EmbdEtcd) Loop() {
|
|||||||
break // TODO: it's bad, break or return?
|
break // TODO: it's bad, break or return?
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: Loop: Delete: FinishLoop")
|
log.Printf("Trace: Etcd: Loop: Delete: FinishLoop")
|
||||||
}
|
}
|
||||||
|
|
||||||
// run txn
|
// run txn
|
||||||
case tn := <-obj.txnq:
|
case tn := <-obj.txnq:
|
||||||
cuuid.ResetTimer() // activity!
|
cuid.ResetTimer() // activity!
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: Loop: Txn: StartLoop")
|
log.Printf("Trace: Etcd: Loop: Txn: StartLoop")
|
||||||
}
|
}
|
||||||
for {
|
for {
|
||||||
@@ -873,7 +882,7 @@ func (obj *EmbdEtcd) Loop() {
|
|||||||
break // TODO: it's bad, break or return?
|
break // TODO: it's bad, break or return?
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: Loop: Txn: FinishLoop")
|
log.Printf("Trace: Etcd: Loop: Txn: FinishLoop")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -884,12 +893,12 @@ func (obj *EmbdEtcd) Loop() {
|
|||||||
// seconds of inactivity in this select switch, which
|
// seconds of inactivity in this select switch, which
|
||||||
// lets everything get bled dry to avoid blocking calls
|
// lets everything get bled dry to avoid blocking calls
|
||||||
// which would otherwise block us from exiting cleanly!
|
// which would otherwise block us from exiting cleanly!
|
||||||
obj.exitTimeout = TimeAfterOrBlock(exitDelay)
|
obj.exitTimeout = util.TimeAfterOrBlock(exitDelay)
|
||||||
|
|
||||||
// exit loop commit
|
// exit loop commit
|
||||||
case <-obj.exitTimeout:
|
case <-obj.exitTimeout:
|
||||||
log.Println("Etcd: Exiting loop!")
|
log.Println("Etcd: Exiting loop!")
|
||||||
cuuid.StopTimer() // clean up nicely
|
cuid.StopTimer() // clean up nicely
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -917,17 +926,17 @@ func (obj *EmbdEtcd) loopProcessAW(ctx context.Context, aw *AW) {
|
|||||||
|
|
||||||
// Set queues up a set operation to occur using our mainloop
|
// Set queues up a set operation to occur using our mainloop
|
||||||
func (obj *EmbdEtcd) Set(key, value string, opts ...etcd.OpOption) error {
|
func (obj *EmbdEtcd) Set(key, value string, opts ...etcd.OpOption) error {
|
||||||
resp := NewResp()
|
resp := event.NewResp()
|
||||||
obj.setq <- &KV{key: key, value: value, opts: opts, resp: resp}
|
obj.setq <- &KV{key: key, value: value, opts: opts, resp: resp}
|
||||||
if !resp.Wait() { // wait for ack/nack
|
if err := resp.Wait(); err != nil { // wait for ack/nack
|
||||||
return fmt.Errorf("Etcd: Set: Probably received an exit...")
|
return fmt.Errorf("Etcd: Set: Probably received an exit: %v", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// rawSet actually implements the key set operation
|
// rawSet actually implements the key set operation
|
||||||
func (obj *EmbdEtcd) rawSet(ctx context.Context, kv *KV) error {
|
func (obj *EmbdEtcd) rawSet(ctx context.Context, kv *KV) error {
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: rawSet()")
|
log.Printf("Trace: Etcd: rawSet()")
|
||||||
}
|
}
|
||||||
// key is the full key path
|
// key is the full key path
|
||||||
@@ -936,7 +945,7 @@ func (obj *EmbdEtcd) rawSet(ctx context.Context, kv *KV) error {
|
|||||||
response, err := obj.client.KV.Put(ctx, kv.key, kv.value, kv.opts...)
|
response, err := obj.client.KV.Put(ctx, kv.key, kv.value, kv.opts...)
|
||||||
obj.rLock.RUnlock()
|
obj.rLock.RUnlock()
|
||||||
log.Printf("Etcd: Set(%s): %v", kv.key, response) // w00t... bonus
|
log.Printf("Etcd: Set(%s): %v", kv.key, response) // w00t... bonus
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: rawSet(): %v", err)
|
log.Printf("Trace: Etcd: rawSet(): %v", err)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
@@ -951,17 +960,17 @@ func (obj *EmbdEtcd) Get(path string, opts ...etcd.OpOption) (map[string]string,
|
|||||||
// accept more arguments that are useful for the less common operations.
|
// accept more arguments that are useful for the less common operations.
|
||||||
// TODO: perhaps a get should never cause an un-converge ?
|
// TODO: perhaps a get should never cause an un-converge ?
|
||||||
func (obj *EmbdEtcd) ComplexGet(path string, skipConv bool, opts ...etcd.OpOption) (map[string]string, error) {
|
func (obj *EmbdEtcd) ComplexGet(path string, skipConv bool, opts ...etcd.OpOption) (map[string]string, error) {
|
||||||
resp := NewResp()
|
resp := event.NewResp()
|
||||||
gq := &GQ{path: path, skipConv: skipConv, opts: opts, resp: resp, data: nil}
|
gq := &GQ{path: path, skipConv: skipConv, opts: opts, resp: resp, data: nil}
|
||||||
obj.getq <- gq // send
|
obj.getq <- gq // send
|
||||||
if !resp.Wait() { // wait for ack/nack
|
if err := resp.Wait(); err != nil { // wait for ack/nack
|
||||||
return nil, fmt.Errorf("Etcd: Get: Probably received an exit...")
|
return nil, fmt.Errorf("Etcd: Get: Probably received an exit: %v", err)
|
||||||
}
|
}
|
||||||
return gq.data, nil
|
return gq.data, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (obj *EmbdEtcd) rawGet(ctx context.Context, gq *GQ) (result map[string]string, err error) {
|
func (obj *EmbdEtcd) rawGet(ctx context.Context, gq *GQ) (result map[string]string, err error) {
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: rawGet()")
|
log.Printf("Trace: Etcd: rawGet()")
|
||||||
}
|
}
|
||||||
obj.rLock.RLock()
|
obj.rLock.RLock()
|
||||||
@@ -977,7 +986,7 @@ func (obj *EmbdEtcd) rawGet(ctx context.Context, gq *GQ) (result map[string]stri
|
|||||||
result[bytes.NewBuffer(x.Key).String()] = bytes.NewBuffer(x.Value).String()
|
result[bytes.NewBuffer(x.Key).String()] = bytes.NewBuffer(x.Value).String()
|
||||||
}
|
}
|
||||||
|
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: rawGet(): %v", result)
|
log.Printf("Trace: Etcd: rawGet(): %v", result)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
@@ -985,17 +994,17 @@ func (obj *EmbdEtcd) rawGet(ctx context.Context, gq *GQ) (result map[string]stri
|
|||||||
|
|
||||||
// Delete performs a delete operation and waits for an ACK to continue
|
// Delete performs a delete operation and waits for an ACK to continue
|
||||||
func (obj *EmbdEtcd) Delete(path string, opts ...etcd.OpOption) (int64, error) {
|
func (obj *EmbdEtcd) Delete(path string, opts ...etcd.OpOption) (int64, error) {
|
||||||
resp := NewResp()
|
resp := event.NewResp()
|
||||||
dl := &DL{path: path, opts: opts, resp: resp, data: -1}
|
dl := &DL{path: path, opts: opts, resp: resp, data: -1}
|
||||||
obj.delq <- dl // send
|
obj.delq <- dl // send
|
||||||
if !resp.Wait() { // wait for ack/nack
|
if err := resp.Wait(); err != nil { // wait for ack/nack
|
||||||
return -1, fmt.Errorf("Etcd: Delete: Probably received an exit...")
|
return -1, fmt.Errorf("Etcd: Delete: Probably received an exit: %v", err)
|
||||||
}
|
}
|
||||||
return dl.data, nil
|
return dl.data, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (obj *EmbdEtcd) rawDelete(ctx context.Context, dl *DL) (count int64, err error) {
|
func (obj *EmbdEtcd) rawDelete(ctx context.Context, dl *DL) (count int64, err error) {
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: rawDelete()")
|
log.Printf("Trace: Etcd: rawDelete()")
|
||||||
}
|
}
|
||||||
count = -1
|
count = -1
|
||||||
@@ -1005,7 +1014,7 @@ func (obj *EmbdEtcd) rawDelete(ctx context.Context, dl *DL) (count int64, err er
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
count = response.Deleted
|
count = response.Deleted
|
||||||
}
|
}
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: rawDelete(): %v", err)
|
log.Printf("Trace: Etcd: rawDelete(): %v", err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
@@ -1013,23 +1022,23 @@ func (obj *EmbdEtcd) rawDelete(ctx context.Context, dl *DL) (count int64, err er
|
|||||||
|
|
||||||
// Txn performs a transaction and waits for an ACK to continue
|
// Txn performs a transaction and waits for an ACK to continue
|
||||||
func (obj *EmbdEtcd) Txn(ifcmps []etcd.Cmp, thenops, elseops []etcd.Op) (*etcd.TxnResponse, error) {
|
func (obj *EmbdEtcd) Txn(ifcmps []etcd.Cmp, thenops, elseops []etcd.Op) (*etcd.TxnResponse, error) {
|
||||||
resp := NewResp()
|
resp := event.NewResp()
|
||||||
tn := &TN{ifcmps: ifcmps, thenops: thenops, elseops: elseops, resp: resp, data: nil}
|
tn := &TN{ifcmps: ifcmps, thenops: thenops, elseops: elseops, resp: resp, data: nil}
|
||||||
obj.txnq <- tn // send
|
obj.txnq <- tn // send
|
||||||
if !resp.Wait() { // wait for ack/nack
|
if err := resp.Wait(); err != nil { // wait for ack/nack
|
||||||
return nil, fmt.Errorf("Etcd: Txn: Probably received an exit...")
|
return nil, fmt.Errorf("Etcd: Txn: Probably received an exit: %v", err)
|
||||||
}
|
}
|
||||||
return tn.data, nil
|
return tn.data, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (obj *EmbdEtcd) rawTxn(ctx context.Context, tn *TN) (*etcd.TxnResponse, error) {
|
func (obj *EmbdEtcd) rawTxn(ctx context.Context, tn *TN) (*etcd.TxnResponse, error) {
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: rawTxn()")
|
log.Printf("Trace: Etcd: rawTxn()")
|
||||||
}
|
}
|
||||||
obj.rLock.RLock()
|
obj.rLock.RLock()
|
||||||
response, err := obj.client.KV.Txn(ctx).If(tn.ifcmps...).Then(tn.thenops...).Else(tn.elseops...).Commit()
|
response, err := obj.client.KV.Txn(ctx).If(tn.ifcmps...).Then(tn.thenops...).Else(tn.elseops...).Commit()
|
||||||
obj.rLock.RUnlock()
|
obj.rLock.RUnlock()
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: rawTxn(): %v, %v", response, err)
|
log.Printf("Trace: Etcd: rawTxn(): %v, %v", response, err)
|
||||||
}
|
}
|
||||||
return response, err
|
return response, err
|
||||||
@@ -1038,11 +1047,11 @@ func (obj *EmbdEtcd) rawTxn(ctx context.Context, tn *TN) (*etcd.TxnResponse, err
|
|||||||
// AddWatcher queues up an add watcher request and returns a cancel function
|
// AddWatcher queues up an add watcher request and returns a cancel function
|
||||||
// Remember to add the etcd.WithPrefix() option if you want to watch recursively
|
// Remember to add the etcd.WithPrefix() option if you want to watch recursively
|
||||||
func (obj *EmbdEtcd) AddWatcher(path string, callback func(re *RE) error, errCheck bool, skipConv bool, opts ...etcd.OpOption) (func(), error) {
|
func (obj *EmbdEtcd) AddWatcher(path string, callback func(re *RE) error, errCheck bool, skipConv bool, opts ...etcd.OpOption) (func(), error) {
|
||||||
resp := NewResp()
|
resp := event.NewResp()
|
||||||
awq := &AW{path: path, opts: opts, callback: callback, errCheck: errCheck, skipConv: skipConv, cancelFunc: nil, resp: resp}
|
awq := &AW{path: path, opts: opts, callback: callback, errCheck: errCheck, skipConv: skipConv, cancelFunc: nil, resp: resp}
|
||||||
obj.awq <- awq // send
|
obj.awq <- awq // send
|
||||||
if !resp.Wait() { // wait for ack/nack
|
if err := resp.Wait(); err != nil { // wait for ack/nack
|
||||||
return nil, fmt.Errorf("Etcd: AddWatcher: Got NACK!")
|
return nil, fmt.Errorf("Etcd: AddWatcher: Got NACK: %v", err)
|
||||||
}
|
}
|
||||||
return awq.cancelFunc, nil
|
return awq.cancelFunc, nil
|
||||||
}
|
}
|
||||||
@@ -1063,7 +1072,7 @@ func (obj *EmbdEtcd) rawAddWatcher(ctx context.Context, aw *AW) (func(), error)
|
|||||||
err := response.Err()
|
err := response.Err()
|
||||||
isCanceled := response.Canceled || err == context.Canceled
|
isCanceled := response.Canceled || err == context.Canceled
|
||||||
if response.Header.Revision == 0 { // by inspection
|
if response.Header.Revision == 0 { // by inspection
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("Etcd: Watch: Received empty message!") // switched client connection
|
log.Printf("Etcd: Watch: Received empty message!") // switched client connection
|
||||||
}
|
}
|
||||||
isCanceled = true
|
isCanceled = true
|
||||||
@@ -1077,14 +1086,14 @@ func (obj *EmbdEtcd) rawAddWatcher(ctx context.Context, aw *AW) (func(), error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err == nil { // watch from latest good revision
|
if err == nil { // watch from latest good revision
|
||||||
rev = response.Header.Revision // TODO +1 ?
|
rev = response.Header.Revision // TODO: +1 ?
|
||||||
useRev = true
|
useRev = true
|
||||||
if !locked {
|
if !locked {
|
||||||
retry = false
|
retry = false
|
||||||
}
|
}
|
||||||
locked = false
|
locked = false
|
||||||
} else {
|
} else {
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("Etcd: Watch: Error: %v", err) // probably fixable
|
log.Printf("Etcd: Watch: Error: %v", err) // probably fixable
|
||||||
}
|
}
|
||||||
// this new context is the fix for a tricky set
|
// this new context is the fix for a tricky set
|
||||||
@@ -1133,7 +1142,7 @@ func rawCallback(ctx context.Context, re *RE) error {
|
|||||||
// NOTE: the callback must *not* block!
|
// NOTE: the callback must *not* block!
|
||||||
// FIXME: do we need to pass ctx in via *RE, or in the callback signature ?
|
// FIXME: do we need to pass ctx in via *RE, or in the callback signature ?
|
||||||
err = callback(re) // run the callback
|
err = callback(re) // run the callback
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: rawCallback(): %v", err)
|
log.Printf("Trace: Etcd: rawCallback(): %v", err)
|
||||||
}
|
}
|
||||||
if !re.errCheck || err == nil {
|
if !re.errCheck || err == nil {
|
||||||
@@ -1151,7 +1160,7 @@ func rawCallback(ctx context.Context, re *RE) error {
|
|||||||
// FIXME: we might need to respond to member change/disconnect/shutdown events,
|
// FIXME: we might need to respond to member change/disconnect/shutdown events,
|
||||||
// see: https://github.com/coreos/etcd/issues/5277
|
// see: https://github.com/coreos/etcd/issues/5277
|
||||||
func (obj *EmbdEtcd) volunteerCallback(re *RE) error {
|
func (obj *EmbdEtcd) volunteerCallback(re *RE) error {
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: volunteerCallback()")
|
log.Printf("Trace: Etcd: volunteerCallback()")
|
||||||
defer log.Printf("Trace: Etcd: volunteerCallback(): Finished!")
|
defer log.Printf("Trace: Etcd: volunteerCallback(): Finished!")
|
||||||
}
|
}
|
||||||
@@ -1178,7 +1187,7 @@ func (obj *EmbdEtcd) volunteerCallback(re *RE) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Etcd: Members: Error: %+v", err)
|
return fmt.Errorf("Etcd: Members: Error: %+v", err)
|
||||||
}
|
}
|
||||||
members := StrMapValuesUint64(membersMap) // get values
|
members := util.StrMapValuesUint64(membersMap) // get values
|
||||||
log.Printf("Etcd: Members: List: %+v", members)
|
log.Printf("Etcd: Members: List: %+v", members)
|
||||||
|
|
||||||
// we only do *one* change operation at a time so that the cluster can
|
// we only do *one* change operation at a time so that the cluster can
|
||||||
@@ -1224,7 +1233,7 @@ func (obj *EmbdEtcd) volunteerCallback(re *RE) error {
|
|||||||
log.Printf("Etcd: Volunteers: %v", volunteers)
|
log.Printf("Etcd: Volunteers: %v", volunteers)
|
||||||
|
|
||||||
// unnominate anyone that unvolunteers, so that they can shutdown cleanly
|
// unnominate anyone that unvolunteers, so that they can shutdown cleanly
|
||||||
quitters := StrFilterElementsInList(volunteers, members)
|
quitters := util.StrFilterElementsInList(volunteers, members)
|
||||||
log.Printf("Etcd: Quitters: %v", quitters)
|
log.Printf("Etcd: Quitters: %v", quitters)
|
||||||
|
|
||||||
// if we're the only member left, just shutdown...
|
// if we're the only member left, just shutdown...
|
||||||
@@ -1236,7 +1245,7 @@ func (obj *EmbdEtcd) volunteerCallback(re *RE) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
candidates := StrFilterElementsInList(members, volunteers)
|
candidates := util.StrFilterElementsInList(members, volunteers)
|
||||||
log.Printf("Etcd: Candidates: %v", candidates)
|
log.Printf("Etcd: Candidates: %v", candidates)
|
||||||
|
|
||||||
// TODO: switch to < 0 so that we can shut the whole cluster down with 0
|
// TODO: switch to < 0 so that we can shut the whole cluster down with 0
|
||||||
@@ -1291,7 +1300,7 @@ func (obj *EmbdEtcd) volunteerCallback(re *RE) error {
|
|||||||
log.Printf("Etcd: Quitters: Shutting down %d members...", lq)
|
log.Printf("Etcd: Quitters: Shutting down %d members...", lq)
|
||||||
}
|
}
|
||||||
for _, quitter := range quitters {
|
for _, quitter := range quitters {
|
||||||
mID, ok := Uint64KeyFromStrInMap(quitter, membersMap)
|
mID, ok := util.Uint64KeyFromStrInMap(quitter, membersMap)
|
||||||
if !ok {
|
if !ok {
|
||||||
// programming error
|
// programming error
|
||||||
log.Fatalf("Etcd: Member Remove: Error: %v(%v) not in members list!", quitter, mID)
|
log.Fatalf("Etcd: Member Remove: Error: %v(%v) not in members list!", quitter, mID)
|
||||||
@@ -1339,7 +1348,7 @@ func (obj *EmbdEtcd) volunteerCallback(re *RE) error {
|
|||||||
// nominateCallback runs to respond to the nomination list change events
|
// nominateCallback runs to respond to the nomination list change events
|
||||||
// functionally, it controls the starting and stopping of the server process
|
// functionally, it controls the starting and stopping of the server process
|
||||||
func (obj *EmbdEtcd) nominateCallback(re *RE) error {
|
func (obj *EmbdEtcd) nominateCallback(re *RE) error {
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: nominateCallback()")
|
log.Printf("Trace: Etcd: nominateCallback()")
|
||||||
defer log.Printf("Trace: Etcd: nominateCallback(): Finished!")
|
defer log.Printf("Trace: Etcd: nominateCallback(): Finished!")
|
||||||
}
|
}
|
||||||
@@ -1388,10 +1397,10 @@ func (obj *EmbdEtcd) nominateCallback(re *RE) error {
|
|||||||
_, exists := obj.nominated[obj.hostname]
|
_, exists := obj.nominated[obj.hostname]
|
||||||
// FIXME: can we get rid of the len(obj.nominated) == 0 ?
|
// FIXME: can we get rid of the len(obj.nominated) == 0 ?
|
||||||
newCluster := len(obj.nominated) == 0 || (len(obj.nominated) == 1 && exists)
|
newCluster := len(obj.nominated) == 0 || (len(obj.nominated) == 1 && exists)
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("Etcd: nominateCallback(): newCluster: %v; exists: %v; obj.server == nil: %t", newCluster, exists, obj.server == nil)
|
log.Printf("Etcd: nominateCallback(): newCluster: %v; exists: %v; obj.server == nil: %t", newCluster, exists, obj.server == nil)
|
||||||
}
|
}
|
||||||
// XXX check if i have actually volunteered first of all...
|
// XXX: check if i have actually volunteered first of all...
|
||||||
if obj.server == nil && (newCluster || exists) {
|
if obj.server == nil && (newCluster || exists) {
|
||||||
|
|
||||||
log.Printf("Etcd: StartServer(newCluster: %t): %+v", newCluster, obj.nominated)
|
log.Printf("Etcd: StartServer(newCluster: %t): %+v", newCluster, obj.nominated)
|
||||||
@@ -1400,8 +1409,12 @@ func (obj *EmbdEtcd) nominateCallback(re *RE) error {
|
|||||||
obj.nominated, // other peer members and urls or empty map
|
obj.nominated, // other peer members and urls or empty map
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
var retries uint
|
||||||
|
if re != nil {
|
||||||
|
retries = re.retries
|
||||||
|
}
|
||||||
// retry maxStartServerRetries times, then permanently fail
|
// retry maxStartServerRetries times, then permanently fail
|
||||||
return &CtxRetriesErr{maxStartServerRetries - re.retries, fmt.Sprintf("Etcd: StartServer: Error: %+v", err)}
|
return &CtxRetriesErr{maxStartServerRetries - retries, fmt.Sprintf("Etcd: StartServer: Error: %+v", err)}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(obj.endpoints) == 0 {
|
if len(obj.endpoints) == 0 {
|
||||||
@@ -1418,7 +1431,7 @@ func (obj *EmbdEtcd) nominateCallback(re *RE) error {
|
|||||||
// XXX: just put this wherever for now so we don't block
|
// XXX: just put this wherever for now so we don't block
|
||||||
// nominate self so "member" list is correct for peers to see
|
// nominate self so "member" list is correct for peers to see
|
||||||
EtcdNominate(obj, obj.hostname, obj.serverURLs)
|
EtcdNominate(obj, obj.hostname, obj.serverURLs)
|
||||||
// XXX if this fails, where will we retry this part ?
|
// XXX: if this fails, where will we retry this part ?
|
||||||
}
|
}
|
||||||
|
|
||||||
// advertise client urls
|
// advertise client urls
|
||||||
@@ -1426,7 +1439,7 @@ func (obj *EmbdEtcd) nominateCallback(re *RE) error {
|
|||||||
// XXX: don't advertise local addresses! 127.0.0.1:2381 doesn't really help remote hosts
|
// XXX: don't advertise local addresses! 127.0.0.1:2381 doesn't really help remote hosts
|
||||||
// XXX: but sometimes this is what we want... hmmm how do we decide? filter on callback?
|
// XXX: but sometimes this is what we want... hmmm how do we decide? filter on callback?
|
||||||
EtcdAdvertiseEndpoints(obj, curls)
|
EtcdAdvertiseEndpoints(obj, curls)
|
||||||
// XXX if this fails, where will we retry this part ?
|
// XXX: if this fails, where will we retry this part ?
|
||||||
|
|
||||||
// force this to remove sentinel before we reconnect...
|
// force this to remove sentinel before we reconnect...
|
||||||
obj.endpointCallback(nil)
|
obj.endpointCallback(nil)
|
||||||
@@ -1487,7 +1500,7 @@ func (obj *EmbdEtcd) nominateCallback(re *RE) error {
|
|||||||
|
|
||||||
// endpointCallback runs to respond to the endpoint list change events
|
// endpointCallback runs to respond to the endpoint list change events
|
||||||
func (obj *EmbdEtcd) endpointCallback(re *RE) error {
|
func (obj *EmbdEtcd) endpointCallback(re *RE) error {
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: endpointCallback()")
|
log.Printf("Trace: Etcd: endpointCallback()")
|
||||||
defer log.Printf("Trace: Etcd: endpointCallback(): Finished!")
|
defer log.Printf("Trace: Etcd: endpointCallback(): Finished!")
|
||||||
}
|
}
|
||||||
@@ -1553,7 +1566,7 @@ func (obj *EmbdEtcd) endpointCallback(re *RE) error {
|
|||||||
|
|
||||||
// idealClusterSizeCallback runs to respond to the ideal cluster size changes
|
// idealClusterSizeCallback runs to respond to the ideal cluster size changes
|
||||||
func (obj *EmbdEtcd) idealClusterSizeCallback(re *RE) error {
|
func (obj *EmbdEtcd) idealClusterSizeCallback(re *RE) error {
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: idealClusterSizeCallback()")
|
log.Printf("Trace: Etcd: idealClusterSizeCallback()")
|
||||||
defer log.Printf("Trace: Etcd: idealClusterSizeCallback(): Finished!")
|
defer log.Printf("Trace: Etcd: idealClusterSizeCallback(): Finished!")
|
||||||
}
|
}
|
||||||
@@ -1642,7 +1655,7 @@ func (obj *EmbdEtcd) StartServer(newCluster bool, peerURLsMap etcdtypes.URLsMap)
|
|||||||
} else {
|
} else {
|
||||||
cfg.ClusterState = embed.ClusterStateFlagExisting
|
cfg.ClusterState = embed.ClusterStateFlagExisting
|
||||||
}
|
}
|
||||||
//cfg.ForceNewCluster = newCluster // TODO ?
|
//cfg.ForceNewCluster = newCluster // TODO: ?
|
||||||
|
|
||||||
log.Printf("Etcd: StartServer: Starting server...")
|
log.Printf("Etcd: StartServer: Starting server...")
|
||||||
obj.server, err = embed.StartEtcd(cfg)
|
obj.server, err = embed.StartEtcd(cfg)
|
||||||
@@ -1692,7 +1705,7 @@ func (obj *EmbdEtcd) DestroyServer() error {
|
|||||||
|
|
||||||
// EtcdNominate nominates a particular client to be a server (peer)
|
// EtcdNominate nominates a particular client to be a server (peer)
|
||||||
func EtcdNominate(obj *EmbdEtcd, hostname string, urls etcdtypes.URLs) error {
|
func EtcdNominate(obj *EmbdEtcd, hostname string, urls etcdtypes.URLs) error {
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: EtcdNominate(%v): %v", hostname, urls.String())
|
log.Printf("Trace: Etcd: EtcdNominate(%v): %v", hostname, urls.String())
|
||||||
defer log.Printf("Trace: Etcd: EtcdNominate(%v): Finished!", hostname)
|
defer log.Printf("Trace: Etcd: EtcdNominate(%v): Finished!", hostname)
|
||||||
}
|
}
|
||||||
@@ -1734,7 +1747,7 @@ func EtcdNominated(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
|||||||
return nil, fmt.Errorf("Etcd: Nominated: Data format error!: %v", err)
|
return nil, fmt.Errorf("Etcd: Nominated: Data format error!: %v", err)
|
||||||
}
|
}
|
||||||
nominated[name] = urls // add to map
|
nominated[name] = urls // add to map
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("Etcd: Nominated(%v): %v", name, val)
|
log.Printf("Etcd: Nominated(%v): %v", name, val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1743,7 +1756,7 @@ func EtcdNominated(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
|||||||
|
|
||||||
// EtcdVolunteer offers yourself up to be a server if needed
|
// EtcdVolunteer offers yourself up to be a server if needed
|
||||||
func EtcdVolunteer(obj *EmbdEtcd, urls etcdtypes.URLs) error {
|
func EtcdVolunteer(obj *EmbdEtcd, urls etcdtypes.URLs) error {
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: EtcdVolunteer(%v): %v", obj.hostname, urls.String())
|
log.Printf("Trace: Etcd: EtcdVolunteer(%v): %v", obj.hostname, urls.String())
|
||||||
defer log.Printf("Trace: Etcd: EtcdVolunteer(%v): Finished!", obj.hostname)
|
defer log.Printf("Trace: Etcd: EtcdVolunteer(%v): Finished!", obj.hostname)
|
||||||
}
|
}
|
||||||
@@ -1766,7 +1779,7 @@ func EtcdVolunteer(obj *EmbdEtcd, urls etcdtypes.URLs) error {
|
|||||||
|
|
||||||
// EtcdVolunteers returns a urls map of available etcd server volunteers
|
// EtcdVolunteers returns a urls map of available etcd server volunteers
|
||||||
func EtcdVolunteers(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
func EtcdVolunteers(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: EtcdVolunteers()")
|
log.Printf("Trace: Etcd: EtcdVolunteers()")
|
||||||
defer log.Printf("Trace: Etcd: EtcdVolunteers(): Finished!")
|
defer log.Printf("Trace: Etcd: EtcdVolunteers(): Finished!")
|
||||||
}
|
}
|
||||||
@@ -1789,7 +1802,7 @@ func EtcdVolunteers(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
|||||||
return nil, fmt.Errorf("Etcd: Volunteers: Data format error!: %v", err)
|
return nil, fmt.Errorf("Etcd: Volunteers: Data format error!: %v", err)
|
||||||
}
|
}
|
||||||
volunteers[name] = urls // add to map
|
volunteers[name] = urls // add to map
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("Etcd: Volunteer(%v): %v", name, val)
|
log.Printf("Etcd: Volunteer(%v): %v", name, val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1798,7 +1811,7 @@ func EtcdVolunteers(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
|||||||
|
|
||||||
// EtcdAdvertiseEndpoints advertises the list of available client endpoints
|
// EtcdAdvertiseEndpoints advertises the list of available client endpoints
|
||||||
func EtcdAdvertiseEndpoints(obj *EmbdEtcd, urls etcdtypes.URLs) error {
|
func EtcdAdvertiseEndpoints(obj *EmbdEtcd, urls etcdtypes.URLs) error {
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: EtcdAdvertiseEndpoints(%v): %v", obj.hostname, urls.String())
|
log.Printf("Trace: Etcd: EtcdAdvertiseEndpoints(%v): %v", obj.hostname, urls.String())
|
||||||
defer log.Printf("Trace: Etcd: EtcdAdvertiseEndpoints(%v): Finished!", obj.hostname)
|
defer log.Printf("Trace: Etcd: EtcdAdvertiseEndpoints(%v): Finished!", obj.hostname)
|
||||||
}
|
}
|
||||||
@@ -1821,7 +1834,7 @@ func EtcdAdvertiseEndpoints(obj *EmbdEtcd, urls etcdtypes.URLs) error {
|
|||||||
|
|
||||||
// EtcdEndpoints returns a urls map of available etcd server endpoints
|
// EtcdEndpoints returns a urls map of available etcd server endpoints
|
||||||
func EtcdEndpoints(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
func EtcdEndpoints(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: EtcdEndpoints()")
|
log.Printf("Trace: Etcd: EtcdEndpoints()")
|
||||||
defer log.Printf("Trace: Etcd: EtcdEndpoints(): Finished!")
|
defer log.Printf("Trace: Etcd: EtcdEndpoints(): Finished!")
|
||||||
}
|
}
|
||||||
@@ -1844,7 +1857,7 @@ func EtcdEndpoints(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
|||||||
return nil, fmt.Errorf("Etcd: Endpoints: Data format error!: %v", err)
|
return nil, fmt.Errorf("Etcd: Endpoints: Data format error!: %v", err)
|
||||||
}
|
}
|
||||||
endpoints[name] = urls // add to map
|
endpoints[name] = urls // add to map
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("Etcd: Endpoint(%v): %v", name, val)
|
log.Printf("Etcd: Endpoint(%v): %v", name, val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1853,7 +1866,7 @@ func EtcdEndpoints(obj *EmbdEtcd) (etcdtypes.URLsMap, error) {
|
|||||||
|
|
||||||
// EtcdSetHostnameConverged sets whether a specific hostname is converged.
|
// EtcdSetHostnameConverged sets whether a specific hostname is converged.
|
||||||
func EtcdSetHostnameConverged(obj *EmbdEtcd, hostname string, isConverged bool) error {
|
func EtcdSetHostnameConverged(obj *EmbdEtcd, hostname string, isConverged bool) error {
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: EtcdSetHostnameConverged(%s): %v", hostname, isConverged)
|
log.Printf("Trace: Etcd: EtcdSetHostnameConverged(%s): %v", hostname, isConverged)
|
||||||
defer log.Printf("Trace: Etcd: EtcdSetHostnameConverged(%v): Finished!", hostname)
|
defer log.Printf("Trace: Etcd: EtcdSetHostnameConverged(%v): Finished!", hostname)
|
||||||
}
|
}
|
||||||
@@ -1867,7 +1880,7 @@ func EtcdSetHostnameConverged(obj *EmbdEtcd, hostname string, isConverged bool)
|
|||||||
|
|
||||||
// EtcdHostnameConverged returns a map of every hostname's converged state.
|
// EtcdHostnameConverged returns a map of every hostname's converged state.
|
||||||
func EtcdHostnameConverged(obj *EmbdEtcd) (map[string]bool, error) {
|
func EtcdHostnameConverged(obj *EmbdEtcd) (map[string]bool, error) {
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: EtcdHostnameConverged()")
|
log.Printf("Trace: Etcd: EtcdHostnameConverged()")
|
||||||
defer log.Printf("Trace: Etcd: EtcdHostnameConverged(): Finished!")
|
defer log.Printf("Trace: Etcd: EtcdHostnameConverged(): Finished!")
|
||||||
}
|
}
|
||||||
@@ -1912,7 +1925,7 @@ func EtcdAddHostnameConvergedWatcher(obj *EmbdEtcd, callbackFn func(map[string]b
|
|||||||
|
|
||||||
// EtcdSetClusterSize sets the ideal target cluster size of etcd peers
|
// EtcdSetClusterSize sets the ideal target cluster size of etcd peers
|
||||||
func EtcdSetClusterSize(obj *EmbdEtcd, value uint16) error {
|
func EtcdSetClusterSize(obj *EmbdEtcd, value uint16) error {
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: EtcdSetClusterSize(): %v", value)
|
log.Printf("Trace: Etcd: EtcdSetClusterSize(): %v", value)
|
||||||
defer log.Printf("Trace: Etcd: EtcdSetClusterSize(): Finished!")
|
defer log.Printf("Trace: Etcd: EtcdSetClusterSize(): Finished!")
|
||||||
}
|
}
|
||||||
@@ -1946,7 +1959,7 @@ func EtcdGetClusterSize(obj *EmbdEtcd) (uint16, error) {
|
|||||||
|
|
||||||
// EtcdMemberAdd adds a member to the cluster.
|
// EtcdMemberAdd adds a member to the cluster.
|
||||||
func EtcdMemberAdd(obj *EmbdEtcd, peerURLs etcdtypes.URLs) (*etcd.MemberAddResponse, error) {
|
func EtcdMemberAdd(obj *EmbdEtcd, peerURLs etcdtypes.URLs) (*etcd.MemberAddResponse, error) {
|
||||||
//obj.Connect(false) // TODO ?
|
//obj.Connect(false) // TODO: ?
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
var response *etcd.MemberAddResponse
|
var response *etcd.MemberAddResponse
|
||||||
var err error
|
var err error
|
||||||
@@ -1971,7 +1984,7 @@ func EtcdMemberAdd(obj *EmbdEtcd, peerURLs etcdtypes.URLs) (*etcd.MemberAddRespo
|
|||||||
// if there was an error. This is because it might have run without error, but
|
// if there was an error. This is because it might have run without error, but
|
||||||
// the member wasn't found, for example.
|
// the member wasn't found, for example.
|
||||||
func EtcdMemberRemove(obj *EmbdEtcd, mID uint64) (bool, error) {
|
func EtcdMemberRemove(obj *EmbdEtcd, mID uint64) (bool, error) {
|
||||||
//obj.Connect(false) // TODO ?
|
//obj.Connect(false) // TODO: ?
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
for {
|
for {
|
||||||
if obj.exiting { // the exit signal has been sent!
|
if obj.exiting { // the exit signal has been sent!
|
||||||
@@ -1997,7 +2010,7 @@ func EtcdMemberRemove(obj *EmbdEtcd, mID uint64) (bool, error) {
|
|||||||
// The member ID's are the keys, because an empty names means unstarted!
|
// The member ID's are the keys, because an empty names means unstarted!
|
||||||
// TODO: consider queueing this through the main loop with CtxError(ctx, err)
|
// TODO: consider queueing this through the main loop with CtxError(ctx, err)
|
||||||
func EtcdMembers(obj *EmbdEtcd) (map[uint64]string, error) {
|
func EtcdMembers(obj *EmbdEtcd) (map[uint64]string, error) {
|
||||||
//obj.Connect(false) // TODO ?
|
//obj.Connect(false) // TODO: ?
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
var response *etcd.MemberListResponse
|
var response *etcd.MemberListResponse
|
||||||
var err error
|
var err error
|
||||||
@@ -2006,7 +2019,7 @@ func EtcdMembers(obj *EmbdEtcd) (map[uint64]string, error) {
|
|||||||
return nil, fmt.Errorf("Exiting...")
|
return nil, fmt.Errorf("Exiting...")
|
||||||
}
|
}
|
||||||
obj.rLock.RLock()
|
obj.rLock.RLock()
|
||||||
if TRACE {
|
if global.TRACE {
|
||||||
log.Printf("Trace: Etcd: EtcdMembers(): Endpoints are: %v", obj.client.Endpoints())
|
log.Printf("Trace: Etcd: EtcdMembers(): Endpoints are: %v", obj.client.Endpoints())
|
||||||
}
|
}
|
||||||
response, err = obj.client.MemberList(ctx)
|
response, err = obj.client.MemberList(ctx)
|
||||||
@@ -2028,7 +2041,7 @@ func EtcdMembers(obj *EmbdEtcd) (map[uint64]string, error) {
|
|||||||
|
|
||||||
// EtcdLeader returns the current leader of the etcd server cluster
|
// EtcdLeader returns the current leader of the etcd server cluster
|
||||||
func EtcdLeader(obj *EmbdEtcd) (string, error) {
|
func EtcdLeader(obj *EmbdEtcd) (string, error) {
|
||||||
//obj.Connect(false) // TODO ?
|
//obj.Connect(false) // TODO: ?
|
||||||
var err error
|
var err error
|
||||||
membersMap := make(map[uint64]string)
|
membersMap := make(map[uint64]string)
|
||||||
if membersMap, err = EtcdMembers(obj); err != nil {
|
if membersMap, err = EtcdMembers(obj); err != nil {
|
||||||
@@ -2100,8 +2113,8 @@ func EtcdWatch(obj *EmbdEtcd) chan bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// EtcdSetResources exports all of the resources which we pass in to etcd
|
// EtcdSetResources exports all of the resources which we pass in to etcd
|
||||||
func EtcdSetResources(obj *EmbdEtcd, hostname string, resources []Res) error {
|
func EtcdSetResources(obj *EmbdEtcd, hostname string, resourceList []resources.Res) error {
|
||||||
// key structure is /$NS/exported/$hostname/resources/$uuid = $data
|
// key structure is /$NS/exported/$hostname/resources/$uid = $data
|
||||||
|
|
||||||
var kindFilter []string // empty to get from everyone
|
var kindFilter []string // empty to get from everyone
|
||||||
hostnameFilter := []string{hostname}
|
hostnameFilter := []string{hostname}
|
||||||
@@ -2112,19 +2125,19 @@ func EtcdSetResources(obj *EmbdEtcd, hostname string, resources []Res) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(originals) == 0 && len(resources) == 0 { // special case of no add or del
|
if len(originals) == 0 && len(resourceList) == 0 { // special case of no add or del
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
ifs := []etcd.Cmp{} // list matching the desired state
|
ifs := []etcd.Cmp{} // list matching the desired state
|
||||||
ops := []etcd.Op{} // list of ops in this transaction
|
ops := []etcd.Op{} // list of ops in this transaction
|
||||||
for _, res := range resources {
|
for _, res := range resourceList {
|
||||||
if res.Kind() == "" {
|
if res.Kind() == "" {
|
||||||
log.Fatalf("Etcd: SetResources: Error: Empty kind: %v", res.GetName())
|
log.Fatalf("Etcd: SetResources: Error: Empty kind: %v", res.GetName())
|
||||||
}
|
}
|
||||||
uuid := fmt.Sprintf("%s/%s", res.Kind(), res.GetName())
|
uid := fmt.Sprintf("%s/%s", res.Kind(), res.GetName())
|
||||||
path := fmt.Sprintf("/%s/exported/%s/resources/%s", NS, hostname, uuid)
|
path := fmt.Sprintf("/%s/exported/%s/resources/%s", NS, hostname, uid)
|
||||||
if data, err := ResToB64(res); err == nil {
|
if data, err := resources.ResToB64(res); err == nil {
|
||||||
ifs = append(ifs, etcd.Compare(etcd.Value(path), "=", data)) // desired state
|
ifs = append(ifs, etcd.Compare(etcd.Value(path), "=", data)) // desired state
|
||||||
ops = append(ops, etcd.OpPut(path, data))
|
ops = append(ops, etcd.OpPut(path, data))
|
||||||
} else {
|
} else {
|
||||||
@@ -2132,8 +2145,8 @@ func EtcdSetResources(obj *EmbdEtcd, hostname string, resources []Res) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
match := func(res Res, resources []Res) bool { // helper lambda
|
match := func(res resources.Res, resourceList []resources.Res) bool { // helper lambda
|
||||||
for _, x := range resources {
|
for _, x := range resourceList {
|
||||||
if res.Kind() == x.Kind() && res.GetName() == x.GetName() {
|
if res.Kind() == x.Kind() && res.GetName() == x.GetName() {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -2147,10 +2160,10 @@ func EtcdSetResources(obj *EmbdEtcd, hostname string, resources []Res) error {
|
|||||||
if res.Kind() == "" {
|
if res.Kind() == "" {
|
||||||
log.Fatalf("Etcd: SetResources: Error: Empty kind: %v", res.GetName())
|
log.Fatalf("Etcd: SetResources: Error: Empty kind: %v", res.GetName())
|
||||||
}
|
}
|
||||||
uuid := fmt.Sprintf("%s/%s", res.Kind(), res.GetName())
|
uid := fmt.Sprintf("%s/%s", res.Kind(), res.GetName())
|
||||||
path := fmt.Sprintf("/%s/exported/%s/resources/%s", NS, hostname, uuid)
|
path := fmt.Sprintf("/%s/exported/%s/resources/%s", NS, hostname, uid)
|
||||||
|
|
||||||
if match(res, resources) { // if we match, no need to delete!
|
if match(res, resourceList) { // if we match, no need to delete!
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2174,11 +2187,11 @@ func EtcdSetResources(obj *EmbdEtcd, hostname string, resources []Res) error {
|
|||||||
// If the kindfilter or hostnameFilter is empty, then it assumes no filtering...
|
// If the kindfilter or hostnameFilter is empty, then it assumes no filtering...
|
||||||
// TODO: Expand this with a more powerful filter based on what we eventually
|
// TODO: Expand this with a more powerful filter based on what we eventually
|
||||||
// support in our collect DSL. Ideally a server side filter like WithFilter()
|
// support in our collect DSL. Ideally a server side filter like WithFilter()
|
||||||
// We could do this if the pattern was /$NS/exported/$kind/$hostname/$uuid = $data
|
// We could do this if the pattern was /$NS/exported/$kind/$hostname/$uid = $data
|
||||||
func EtcdGetResources(obj *EmbdEtcd, hostnameFilter, kindFilter []string) ([]Res, error) {
|
func EtcdGetResources(obj *EmbdEtcd, hostnameFilter, kindFilter []string) ([]resources.Res, error) {
|
||||||
// key structure is /$NS/exported/$hostname/resources/$uuid = $data
|
// key structure is /$NS/exported/$hostname/resources/$uid = $data
|
||||||
path := fmt.Sprintf("/%s/exported/", NS)
|
path := fmt.Sprintf("/%s/exported/", NS)
|
||||||
resources := []Res{}
|
resourceList := []resources.Res{}
|
||||||
keyMap, err := obj.Get(path, etcd.WithPrefix(), etcd.WithSort(etcd.SortByKey, etcd.SortAscend))
|
keyMap, err := obj.Get(path, etcd.WithPrefix(), etcd.WithSort(etcd.SortByKey, etcd.SortAscend))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Etcd: GetResources: Error: Could not get resources: %v", err)
|
return nil, fmt.Errorf("Etcd: GetResources: Error: Could not get resources: %v", err)
|
||||||
@@ -2201,24 +2214,24 @@ func EtcdGetResources(obj *EmbdEtcd, hostnameFilter, kindFilter []string) ([]Res
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: ideally this would be a server side filter instead!
|
// FIXME: ideally this would be a server side filter instead!
|
||||||
if len(hostnameFilter) > 0 && !StrInList(hostname, hostnameFilter) {
|
if len(hostnameFilter) > 0 && !util.StrInList(hostname, hostnameFilter) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: ideally this would be a server side filter instead!
|
// FIXME: ideally this would be a server side filter instead!
|
||||||
if len(kindFilter) > 0 && !StrInList(kind, kindFilter) {
|
if len(kindFilter) > 0 && !util.StrInList(kind, kindFilter) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if obj, err := B64ToRes(val); err == nil {
|
if obj, err := resources.B64ToRes(val); err == nil {
|
||||||
obj.setKind(kind) // cheap init
|
obj.SetKind(kind) // cheap init
|
||||||
log.Printf("Etcd: Get: (Hostname, Kind, Name): (%s, %s, %s)", hostname, kind, name)
|
log.Printf("Etcd: Get: (Hostname, Kind, Name): (%s, %s, %s)", hostname, kind, name)
|
||||||
resources = append(resources, obj)
|
resourceList = append(resourceList, obj)
|
||||||
} else {
|
} else {
|
||||||
return nil, fmt.Errorf("Etcd: GetResources: Error: Can't convert from B64: %v", err)
|
return nil, fmt.Errorf("Etcd: GetResources: Error: Can't convert from B64: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return resources, nil
|
return resourceList, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//func UrlRemoveScheme(urls etcdtypes.URLs) []string {
|
//func UrlRemoveScheme(urls etcdtypes.URLs) []string {
|
||||||
@@ -2258,7 +2271,7 @@ func ApplyDeltaEvents(re *RE, urlsmap etcdtypes.URLsMap) (etcdtypes.URLsMap, err
|
|||||||
if _, exists := urlsmap[key]; !exists {
|
if _, exists := urlsmap[key]; !exists {
|
||||||
// this can happen if we retry an operation b/w
|
// this can happen if we retry an operation b/w
|
||||||
// a reconnect so ignore if we are reconnecting
|
// a reconnect so ignore if we are reconnecting
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("Etcd: ApplyDeltaEvents: Inconsistent key: %v", key)
|
log.Printf("Etcd: ApplyDeltaEvents: Inconsistent key: %v", key)
|
||||||
}
|
}
|
||||||
return nil, errApplyDeltaEventsInconsistent
|
return nil, errApplyDeltaEventsInconsistent
|
||||||
@@ -15,26 +15,35 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package main
|
// Package event provides some primitives that are used for message passing.
|
||||||
|
package event
|
||||||
|
|
||||||
//go:generate stringer -type=eventName -output=eventname_stringer.go
|
import (
|
||||||
type eventName int
|
"fmt"
|
||||||
|
|
||||||
const (
|
|
||||||
eventNil eventName = iota
|
|
||||||
eventExit
|
|
||||||
eventStart
|
|
||||||
eventPause
|
|
||||||
eventPoke
|
|
||||||
eventBackPoke
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Resp is a channel to be used for boolean responses.
|
//go:generate stringer -type=EventName -output=eventname_stringer.go
|
||||||
type Resp chan bool
|
|
||||||
|
// EventName represents the type of event being passed.
|
||||||
|
type EventName int
|
||||||
|
|
||||||
|
// The different event names are used in different contexts.
|
||||||
|
const (
|
||||||
|
EventNil EventName = iota
|
||||||
|
EventExit
|
||||||
|
EventStart
|
||||||
|
EventPause
|
||||||
|
EventPoke
|
||||||
|
EventBackPoke
|
||||||
|
)
|
||||||
|
|
||||||
|
// Resp is a channel to be used for boolean responses. A nil represents an ACK,
|
||||||
|
// and a non-nil represents a NACK (false). This also lets us use custom errors.
|
||||||
|
type Resp chan error
|
||||||
|
|
||||||
// Event is the main struct that stores event information and responses.
|
// Event is the main struct that stores event information and responses.
|
||||||
type Event struct {
|
type Event struct {
|
||||||
Name eventName
|
Name EventName
|
||||||
Resp Resp // channel to send an ack response on, nil to skip
|
Resp Resp // channel to send an ack response on, nil to skip
|
||||||
//Wg *sync.WaitGroup // receiver barrier to Wait() for everyone else on
|
//Wg *sync.WaitGroup // receiver barrier to Wait() for everyone else on
|
||||||
Msg string // some words for fun
|
Msg string // some words for fun
|
||||||
@@ -55,28 +64,43 @@ func (event *Event) NACK() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ACKNACK sends a custom ACK or NACK message on the channel if one was requested.
|
||||||
|
func (event *Event) ACKNACK(err error) {
|
||||||
|
if event.Resp != nil { // if they've requested a NACK
|
||||||
|
event.Resp.ACKNACK(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// NewResp is just a helper to return the right type of response channel.
|
// NewResp is just a helper to return the right type of response channel.
|
||||||
func NewResp() Resp {
|
func NewResp() Resp {
|
||||||
resp := make(chan bool)
|
resp := make(chan error)
|
||||||
return resp
|
return resp
|
||||||
}
|
}
|
||||||
|
|
||||||
// ACK sends a true value to resp.
|
// ACK sends a true value to resp.
|
||||||
func (resp Resp) ACK() {
|
func (resp Resp) ACK() {
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
resp <- true
|
resp <- nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NACK sends a false value to resp.
|
// NACK sends a false value to resp.
|
||||||
func (resp Resp) NACK() {
|
func (resp Resp) NACK() {
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
resp <- false
|
resp <- fmt.Errorf("NACK")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ACKNACK sends a custom ACK or NACK. The ACK value is always nil, the NACK can
|
||||||
|
// be any non-nil error value.
|
||||||
|
func (resp Resp) ACKNACK(err error) {
|
||||||
|
if resp != nil {
|
||||||
|
resp <- err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait waits for any response from a Resp channel and returns it.
|
// Wait waits for any response from a Resp channel and returns it.
|
||||||
func (resp Resp) Wait() bool {
|
func (resp Resp) Wait() error {
|
||||||
return <-resp
|
return <-resp
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -84,7 +108,7 @@ func (resp Resp) Wait() bool {
|
|||||||
func (resp Resp) ACKWait() {
|
func (resp Resp) ACKWait() {
|
||||||
for {
|
for {
|
||||||
// wait until true value
|
// wait until true value
|
||||||
if resp.Wait() {
|
if resp.Wait() == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
13
examples/file2.yaml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
resources:
|
||||||
|
noop:
|
||||||
|
- name: noop1
|
||||||
|
file:
|
||||||
|
- name: file1
|
||||||
|
path: "/tmp/mgmt/hello/"
|
||||||
|
source: "/var/lib/mgmt/files/some_dir/"
|
||||||
|
recurse: true
|
||||||
|
force: true
|
||||||
|
state: exists
|
||||||
|
edges: []
|
||||||
14
examples/file3.yaml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
comment: You can test Watch and CheckApply failures with chmod ugo-r and chmod ugo-w.
|
||||||
|
resources:
|
||||||
|
file:
|
||||||
|
- name: file1
|
||||||
|
path: "/tmp/mgmt/f1"
|
||||||
|
meta:
|
||||||
|
retry: 3
|
||||||
|
delay: 5000
|
||||||
|
content: |
|
||||||
|
i am f1
|
||||||
|
state: exists
|
||||||
|
edges: []
|
||||||
188
examples/lib/libmgmt1.go
Normal file
@@ -0,0 +1,188 @@
|
|||||||
|
// libmgmt example
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/gapi"
|
||||||
|
mgmt "github.com/purpleidea/mgmt/mgmtmain"
|
||||||
|
"github.com/purpleidea/mgmt/pgraph"
|
||||||
|
"github.com/purpleidea/mgmt/resources"
|
||||||
|
"github.com/purpleidea/mgmt/yamlgraph"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MyGAPI implements the main GAPI interface.
|
||||||
|
type MyGAPI struct {
|
||||||
|
Name string // graph name
|
||||||
|
Interval uint // refresh interval, 0 to never refresh
|
||||||
|
|
||||||
|
data gapi.Data
|
||||||
|
initialized bool
|
||||||
|
closeChan chan struct{}
|
||||||
|
wg sync.WaitGroup // sync group for tunnel go routines
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMyGAPI creates a new MyGAPI struct and calls Init().
|
||||||
|
func NewMyGAPI(data gapi.Data, name string, interval uint) (*MyGAPI, error) {
|
||||||
|
obj := &MyGAPI{
|
||||||
|
Name: name,
|
||||||
|
Interval: interval,
|
||||||
|
}
|
||||||
|
return obj, obj.Init(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init initializes the MyGAPI struct.
|
||||||
|
func (obj *MyGAPI) Init(data gapi.Data) error {
|
||||||
|
if obj.initialized {
|
||||||
|
return fmt.Errorf("Already initialized!")
|
||||||
|
}
|
||||||
|
if obj.Name == "" {
|
||||||
|
return fmt.Errorf("The graph name must be specified!")
|
||||||
|
}
|
||||||
|
obj.data = data // store for later
|
||||||
|
obj.closeChan = make(chan struct{})
|
||||||
|
obj.initialized = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Graph returns a current Graph.
|
||||||
|
func (obj *MyGAPI) Graph() (*pgraph.Graph, error) {
|
||||||
|
if !obj.initialized {
|
||||||
|
return nil, fmt.Errorf("libmgmt: MyGAPI is not initialized")
|
||||||
|
}
|
||||||
|
|
||||||
|
n1, err := resources.NewNoopRes("noop1")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Can't create resource: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// we can still build a graph via the yaml method
|
||||||
|
gc := &yamlgraph.GraphConfig{
|
||||||
|
Graph: obj.Name,
|
||||||
|
Resources: yamlgraph.Resources{ // must redefine anonymous struct :(
|
||||||
|
// in alphabetical order
|
||||||
|
Exec: []*resources.ExecRes{},
|
||||||
|
File: []*resources.FileRes{},
|
||||||
|
Msg: []*resources.MsgRes{},
|
||||||
|
Noop: []*resources.NoopRes{n1},
|
||||||
|
Pkg: []*resources.PkgRes{},
|
||||||
|
Svc: []*resources.SvcRes{},
|
||||||
|
Timer: []*resources.TimerRes{},
|
||||||
|
Virt: []*resources.VirtRes{},
|
||||||
|
},
|
||||||
|
//Collector: []collectorResConfig{},
|
||||||
|
//Edges: []Edge{},
|
||||||
|
Comment: "comment!",
|
||||||
|
}
|
||||||
|
|
||||||
|
g, err := gc.NewGraphFromConfig(obj.data.Hostname, obj.data.EmbdEtcd, obj.data.Noop)
|
||||||
|
return g, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SwitchStream returns nil errors every time there could be a new graph.
|
||||||
|
func (obj *MyGAPI) SwitchStream() chan error {
|
||||||
|
if obj.data.NoWatch || obj.Interval <= 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ch := make(chan error)
|
||||||
|
obj.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer obj.wg.Done()
|
||||||
|
defer close(ch) // this will run before the obj.wg.Done()
|
||||||
|
if !obj.initialized {
|
||||||
|
ch <- fmt.Errorf("libmgmt: MyGAPI is not initialized")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// arbitrarily change graph every interval seconds
|
||||||
|
ticker := time.NewTicker(time.Duration(obj.Interval) * time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
log.Printf("libmgmt: Generating new graph...")
|
||||||
|
ch <- nil // trigger a run
|
||||||
|
case <-obj.closeChan:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close shuts down the MyGAPI.
|
||||||
|
func (obj *MyGAPI) Close() error {
|
||||||
|
if !obj.initialized {
|
||||||
|
return fmt.Errorf("libmgmt: MyGAPI is not initialized")
|
||||||
|
}
|
||||||
|
close(obj.closeChan)
|
||||||
|
obj.wg.Wait()
|
||||||
|
obj.initialized = false // closed = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run runs an embedded mgmt server.
|
||||||
|
func Run() error {
|
||||||
|
|
||||||
|
obj := &mgmt.Main{}
|
||||||
|
obj.Program = "libmgmt" // TODO: set on compilation
|
||||||
|
obj.Version = "0.0.1" // TODO: set on compilation
|
||||||
|
obj.TmpPrefix = true
|
||||||
|
obj.IdealClusterSize = -1
|
||||||
|
obj.ConvergedTimeout = -1
|
||||||
|
obj.Noop = true
|
||||||
|
|
||||||
|
obj.GAPI = &MyGAPI{ // graph API
|
||||||
|
Name: "libmgmt", // TODO: set on compilation
|
||||||
|
Interval: 15, // arbitrarily change graph every 15 seconds
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := obj.Init(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// install the exit signal handler
|
||||||
|
exit := make(chan struct{})
|
||||||
|
defer close(exit)
|
||||||
|
go func() {
|
||||||
|
signals := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(signals, os.Interrupt) // catch ^C
|
||||||
|
//signal.Notify(signals, os.Kill) // catch signals
|
||||||
|
signal.Notify(signals, syscall.SIGTERM)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case sig := <-signals: // any signal will do
|
||||||
|
if sig == os.Interrupt {
|
||||||
|
log.Println("Interrupted by ^C")
|
||||||
|
obj.Exit(nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Println("Interrupted by signal")
|
||||||
|
obj.Exit(fmt.Errorf("Killed by %v", sig))
|
||||||
|
return
|
||||||
|
case <-exit:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err := obj.Run(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.Printf("Hello!")
|
||||||
|
if err := Run(); err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Printf("Goodbye!")
|
||||||
|
}
|
||||||
188
examples/lib/libmgmt2.go
Normal file
@@ -0,0 +1,188 @@
|
|||||||
|
// libmgmt example
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/gapi"
|
||||||
|
mgmt "github.com/purpleidea/mgmt/mgmtmain"
|
||||||
|
"github.com/purpleidea/mgmt/pgraph"
|
||||||
|
"github.com/purpleidea/mgmt/resources"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MyGAPI implements the main GAPI interface.
|
||||||
|
type MyGAPI struct {
|
||||||
|
Name string // graph name
|
||||||
|
Count uint // number of resources to create
|
||||||
|
Interval uint // refresh interval, 0 to never refresh
|
||||||
|
|
||||||
|
data gapi.Data
|
||||||
|
initialized bool
|
||||||
|
closeChan chan struct{}
|
||||||
|
wg sync.WaitGroup // sync group for tunnel go routines
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMyGAPI creates a new MyGAPI struct and calls Init().
|
||||||
|
func NewMyGAPI(data gapi.Data, name string, interval uint, count uint) (*MyGAPI, error) {
|
||||||
|
obj := &MyGAPI{
|
||||||
|
Name: name,
|
||||||
|
Count: count,
|
||||||
|
Interval: interval,
|
||||||
|
}
|
||||||
|
return obj, obj.Init(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init initializes the MyGAPI struct.
|
||||||
|
func (obj *MyGAPI) Init(data gapi.Data) error {
|
||||||
|
if obj.initialized {
|
||||||
|
return fmt.Errorf("Already initialized!")
|
||||||
|
}
|
||||||
|
if obj.Name == "" {
|
||||||
|
return fmt.Errorf("The graph name must be specified!")
|
||||||
|
}
|
||||||
|
obj.data = data // store for later
|
||||||
|
obj.closeChan = make(chan struct{})
|
||||||
|
obj.initialized = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Graph returns a current Graph.
|
||||||
|
func (obj *MyGAPI) Graph() (*pgraph.Graph, error) {
|
||||||
|
if !obj.initialized {
|
||||||
|
return nil, fmt.Errorf("libmgmt: MyGAPI is not initialized")
|
||||||
|
}
|
||||||
|
|
||||||
|
g := pgraph.NewGraph(obj.Name)
|
||||||
|
var vertex *pgraph.Vertex
|
||||||
|
for i := uint(0); i < obj.Count; i++ {
|
||||||
|
n, err := resources.NewNoopRes(fmt.Sprintf("noop%d", i))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Can't create resource: %v", err)
|
||||||
|
}
|
||||||
|
v := pgraph.NewVertex(n)
|
||||||
|
g.AddVertex(v)
|
||||||
|
if i > 0 {
|
||||||
|
g.AddEdge(vertex, v, pgraph.NewEdge(fmt.Sprintf("e%d", i)))
|
||||||
|
}
|
||||||
|
vertex = v // save
|
||||||
|
}
|
||||||
|
|
||||||
|
//g, err := config.NewGraphFromConfig(obj.data.Hostname, obj.data.EmbdEtcd, obj.data.Noop)
|
||||||
|
return g, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SwitchStream returns nil errors every time there could be a new graph.
|
||||||
|
func (obj *MyGAPI) SwitchStream() chan error {
|
||||||
|
if obj.data.NoWatch || obj.Interval <= 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ch := make(chan error)
|
||||||
|
obj.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer obj.wg.Done()
|
||||||
|
defer close(ch) // this will run before the obj.wg.Done()
|
||||||
|
if !obj.initialized {
|
||||||
|
ch <- fmt.Errorf("libmgmt: MyGAPI is not initialized")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// arbitrarily change graph every interval seconds
|
||||||
|
ticker := time.NewTicker(time.Duration(obj.Interval) * time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
log.Printf("libmgmt: Generating new graph...")
|
||||||
|
ch <- nil // trigger a run
|
||||||
|
case <-obj.closeChan:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close shuts down the MyGAPI.
|
||||||
|
func (obj *MyGAPI) Close() error {
|
||||||
|
if !obj.initialized {
|
||||||
|
return fmt.Errorf("libmgmt: MyGAPI is not initialized")
|
||||||
|
}
|
||||||
|
close(obj.closeChan)
|
||||||
|
obj.wg.Wait()
|
||||||
|
obj.initialized = false // closed = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run runs an embedded mgmt server.
|
||||||
|
func Run(count uint) error {
|
||||||
|
|
||||||
|
obj := &mgmt.Main{}
|
||||||
|
obj.Program = "libmgmt" // TODO: set on compilation
|
||||||
|
obj.Version = "0.0.1" // TODO: set on compilation
|
||||||
|
obj.TmpPrefix = true
|
||||||
|
obj.IdealClusterSize = -1
|
||||||
|
obj.ConvergedTimeout = -1
|
||||||
|
obj.Noop = true
|
||||||
|
|
||||||
|
obj.GAPI = &MyGAPI{ // graph API
|
||||||
|
Name: "libmgmt", // TODO: set on compilation
|
||||||
|
Count: count, // number of vertices to add
|
||||||
|
Interval: 15, // arbitrarily change graph every 15 seconds
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := obj.Init(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// install the exit signal handler
|
||||||
|
exit := make(chan struct{})
|
||||||
|
defer close(exit)
|
||||||
|
go func() {
|
||||||
|
signals := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(signals, os.Interrupt) // catch ^C
|
||||||
|
//signal.Notify(signals, os.Kill) // catch signals
|
||||||
|
signal.Notify(signals, syscall.SIGTERM)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case sig := <-signals: // any signal will do
|
||||||
|
if sig == os.Interrupt {
|
||||||
|
log.Println("Interrupted by ^C")
|
||||||
|
obj.Exit(nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Println("Interrupted by signal")
|
||||||
|
obj.Exit(fmt.Errorf("Killed by %v", sig))
|
||||||
|
return
|
||||||
|
case <-exit:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err := obj.Run(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.Printf("Hello!")
|
||||||
|
var count uint = 1 // default
|
||||||
|
if len(os.Args) == 2 {
|
||||||
|
if i, err := strconv.Atoi(os.Args[1]); err == nil && i > 0 {
|
||||||
|
count = uint(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := Run(count); err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Printf("Goodbye!")
|
||||||
|
}
|
||||||
19
examples/msg1.yaml
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
comment: timer example
|
||||||
|
resources:
|
||||||
|
timer:
|
||||||
|
- name: timer1
|
||||||
|
interval: 30
|
||||||
|
msg:
|
||||||
|
- name: msg1
|
||||||
|
body: mgmt logged this message
|
||||||
|
journal: true
|
||||||
|
edges:
|
||||||
|
- name: e1
|
||||||
|
from:
|
||||||
|
kind: timer
|
||||||
|
name: timer1
|
||||||
|
to:
|
||||||
|
kind: msg
|
||||||
|
name: msg1
|
||||||
20
examples/remote2a.yaml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
comment: remote noop example
|
||||||
|
resources:
|
||||||
|
file:
|
||||||
|
- name: file1a
|
||||||
|
path: "/tmp/file1a"
|
||||||
|
content: |
|
||||||
|
i am file1a
|
||||||
|
state: exists
|
||||||
|
- name: "@@file2a"
|
||||||
|
path: "/tmp/file2a"
|
||||||
|
content: |
|
||||||
|
i am file2a, exported from host a
|
||||||
|
state: exists
|
||||||
|
collect:
|
||||||
|
- kind: file
|
||||||
|
pattern: "/tmp/"
|
||||||
|
edges: []
|
||||||
|
remote: ssh://root:vagrant@192.168.121.201:22
|
||||||
20
examples/remote2b.yaml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
comment: remote noop example
|
||||||
|
resources:
|
||||||
|
file:
|
||||||
|
- name: file1b
|
||||||
|
path: "/tmp/file1b"
|
||||||
|
content: |
|
||||||
|
i am file1b
|
||||||
|
state: exists
|
||||||
|
- name: "@@file2b"
|
||||||
|
path: "/tmp/file2b"
|
||||||
|
content: |
|
||||||
|
i am file2b, exported from host b
|
||||||
|
state: exists
|
||||||
|
collect:
|
||||||
|
- kind: file
|
||||||
|
pattern: "/tmp/"
|
||||||
|
edges: []
|
||||||
|
remote: ssh://root:vagrant@192.168.121.202:22
|
||||||
11
examples/virt1.yaml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
resources:
|
||||||
|
virt:
|
||||||
|
- name: mgmt1
|
||||||
|
uri: 'qemu:///session'
|
||||||
|
cpus: 1
|
||||||
|
memory: 524288
|
||||||
|
state: shutoff
|
||||||
|
transient: true
|
||||||
|
edges: []
|
||||||
11
examples/virt2.yaml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
resources:
|
||||||
|
virt:
|
||||||
|
- name: mgmt2
|
||||||
|
uri: 'qemu:///session'
|
||||||
|
cpus: 1
|
||||||
|
memory: 524288
|
||||||
|
state: shutoff
|
||||||
|
transient: false
|
||||||
|
edges: []
|
||||||
11
examples/virt3.yaml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
resources:
|
||||||
|
virt:
|
||||||
|
- name: mgmt3
|
||||||
|
uri: 'qemu:///session'
|
||||||
|
cpus: 1
|
||||||
|
memory: 524288
|
||||||
|
state: running
|
||||||
|
transient: false
|
||||||
|
edges: []
|
||||||
529
file.go
@@ -1,529 +0,0 @@
|
|||||||
// Mgmt
|
|
||||||
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
|
||||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
|
||||||
//
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"gopkg.in/fsnotify.v1"
|
|
||||||
//"github.com/go-fsnotify/fsnotify" // git master of "gopkg.in/fsnotify.v1"
|
|
||||||
"encoding/gob"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"math"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
gob.Register(&FileRes{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileRes is a file and directory resource.
|
|
||||||
type FileRes struct {
|
|
||||||
BaseRes `yaml:",inline"`
|
|
||||||
Path string `yaml:"path"` // path variable (should default to name)
|
|
||||||
Dirname string `yaml:"dirname"`
|
|
||||||
Basename string `yaml:"basename"`
|
|
||||||
Content string `yaml:"content"`
|
|
||||||
State string `yaml:"state"` // state: exists/present?, absent, (undefined?)
|
|
||||||
sha256sum string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFileRes is a constructor for this resource. It also calls Init() for you.
|
|
||||||
func NewFileRes(name, path, dirname, basename, content, state string) *FileRes {
|
|
||||||
// FIXME if path = nil, path = name ...
|
|
||||||
obj := &FileRes{
|
|
||||||
BaseRes: BaseRes{
|
|
||||||
Name: name,
|
|
||||||
},
|
|
||||||
Path: path,
|
|
||||||
Dirname: dirname,
|
|
||||||
Basename: basename,
|
|
||||||
Content: content,
|
|
||||||
State: state,
|
|
||||||
sha256sum: "",
|
|
||||||
}
|
|
||||||
obj.Init()
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init runs some startup code for this resource.
|
|
||||||
func (obj *FileRes) Init() {
|
|
||||||
obj.BaseRes.kind = "File"
|
|
||||||
obj.BaseRes.Init() // call base init, b/c we're overriding
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPath returns the actual path to use for this resource. It computes this
|
|
||||||
// after analysis of the path, dirname and basename values.
|
|
||||||
func (obj *FileRes) GetPath() string {
|
|
||||||
d := Dirname(obj.Path)
|
|
||||||
b := Basename(obj.Path)
|
|
||||||
if !obj.Validate() || (obj.Dirname == "" && obj.Basename == "") {
|
|
||||||
return obj.Path
|
|
||||||
} else if obj.Dirname == "" {
|
|
||||||
return d + obj.Basename
|
|
||||||
} else if obj.Basename == "" {
|
|
||||||
return obj.Dirname + b
|
|
||||||
} else { // if obj.dirname != "" && obj.basename != "" {
|
|
||||||
return obj.Dirname + obj.Basename
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// validate if the params passed in are valid data
|
|
||||||
func (obj *FileRes) Validate() bool {
|
|
||||||
if obj.Dirname != "" {
|
|
||||||
// must end with /
|
|
||||||
if obj.Dirname[len(obj.Dirname)-1:] != "/" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if obj.Basename != "" {
|
|
||||||
// must not start with /
|
|
||||||
if obj.Basename[0:1] == "/" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Watch is the primary listener for this resource and it outputs events.
|
|
||||||
// This one is a file watcher for files and directories.
|
|
||||||
// Modify with caution, it is probably important to write some test cases first!
|
|
||||||
// obj.GetPath(): file or directory
|
|
||||||
func (obj *FileRes) Watch(processChan chan Event) {
|
|
||||||
if obj.IsWatching() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
obj.SetWatching(true)
|
|
||||||
defer obj.SetWatching(false)
|
|
||||||
cuuid := obj.converger.Register()
|
|
||||||
defer cuuid.Unregister()
|
|
||||||
|
|
||||||
//var recursive bool = false
|
|
||||||
//var isdir = (obj.GetPath()[len(obj.GetPath())-1:] == "/") // dirs have trailing slashes
|
|
||||||
//log.Printf("IsDirectory: %v", isdir)
|
|
||||||
var safename = path.Clean(obj.GetPath()) // no trailing slash
|
|
||||||
|
|
||||||
watcher, err := fsnotify.NewWatcher()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
defer watcher.Close()
|
|
||||||
|
|
||||||
patharray := PathSplit(safename) // tokenize the path
|
|
||||||
var index = len(patharray) // starting index
|
|
||||||
var current string // current "watcher" location
|
|
||||||
var deltaDepth int // depth delta between watcher and event
|
|
||||||
var send = false // send event?
|
|
||||||
var exit = false
|
|
||||||
var dirty = false
|
|
||||||
|
|
||||||
for {
|
|
||||||
current = strings.Join(patharray[0:index], "/")
|
|
||||||
if current == "" { // the empty string top is the root dir ("/")
|
|
||||||
current = "/"
|
|
||||||
}
|
|
||||||
if DEBUG {
|
|
||||||
log.Printf("File[%v]: Watching: %v", obj.GetName(), current) // attempting to watch...
|
|
||||||
}
|
|
||||||
// initialize in the loop so that we can reset on rm-ed handles
|
|
||||||
err = watcher.Add(current)
|
|
||||||
if err != nil {
|
|
||||||
if DEBUG {
|
|
||||||
log.Printf("File[%v]: watcher.Add(%v): Error: %v", obj.GetName(), current, err)
|
|
||||||
}
|
|
||||||
if err == syscall.ENOENT {
|
|
||||||
index-- // usually not found, move up one dir
|
|
||||||
} else if err == syscall.ENOSPC {
|
|
||||||
// XXX: occasionally: no space left on device,
|
|
||||||
// XXX: probably due to lack of inotify watches
|
|
||||||
log.Printf("%v[%v]: Out of inotify watches!", obj.Kind(), obj.GetName())
|
|
||||||
log.Fatal(err)
|
|
||||||
} else {
|
|
||||||
log.Printf("Unknown file[%v] error:", obj.Name)
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
index = int(math.Max(1, float64(index)))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
obj.SetState(resStateWatching) // reset
|
|
||||||
select {
|
|
||||||
case event := <-watcher.Events:
|
|
||||||
if DEBUG {
|
|
||||||
log.Printf("File[%v]: Watch(%v), Event(%v): %v", obj.GetName(), current, event.Name, event.Op)
|
|
||||||
}
|
|
||||||
cuuid.SetConverged(false) // XXX: technically i can detect if the event is erroneous or not first
|
|
||||||
// the deeper you go, the bigger the deltaDepth is...
|
|
||||||
// this is the difference between what we're watching,
|
|
||||||
// and the event... doesn't mean we can't watch deeper
|
|
||||||
if current == event.Name {
|
|
||||||
deltaDepth = 0 // i was watching what i was looking for
|
|
||||||
|
|
||||||
} else if HasPathPrefix(event.Name, current) {
|
|
||||||
deltaDepth = len(PathSplit(current)) - len(PathSplit(event.Name)) // -1 or less
|
|
||||||
|
|
||||||
} else if HasPathPrefix(current, event.Name) {
|
|
||||||
deltaDepth = len(PathSplit(event.Name)) - len(PathSplit(current)) // +1 or more
|
|
||||||
|
|
||||||
} else {
|
|
||||||
// TODO different watchers get each others events!
|
|
||||||
// https://github.com/go-fsnotify/fsnotify/issues/95
|
|
||||||
// this happened with two values such as:
|
|
||||||
// event.Name: /tmp/mgmt/f3 and current: /tmp/mgmt/f2
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
//log.Printf("The delta depth is: %v", deltaDepth)
|
|
||||||
|
|
||||||
// if we have what we wanted, awesome, send an event...
|
|
||||||
if event.Name == safename {
|
|
||||||
//log.Println("Event!")
|
|
||||||
// FIXME: should all these below cases trigger?
|
|
||||||
send = true
|
|
||||||
dirty = true
|
|
||||||
|
|
||||||
// file removed, move the watch upwards
|
|
||||||
if deltaDepth >= 0 && (event.Op&fsnotify.Remove == fsnotify.Remove) {
|
|
||||||
//log.Println("Removal!")
|
|
||||||
watcher.Remove(current)
|
|
||||||
index--
|
|
||||||
}
|
|
||||||
|
|
||||||
// we must be a parent watcher, so descend in
|
|
||||||
if deltaDepth < 0 {
|
|
||||||
watcher.Remove(current)
|
|
||||||
index++
|
|
||||||
}
|
|
||||||
|
|
||||||
// if safename starts with event.Name, we're above, and no event should be sent
|
|
||||||
} else if HasPathPrefix(safename, event.Name) {
|
|
||||||
//log.Println("Above!")
|
|
||||||
|
|
||||||
if deltaDepth >= 0 && (event.Op&fsnotify.Remove == fsnotify.Remove) {
|
|
||||||
log.Println("Removal!")
|
|
||||||
watcher.Remove(current)
|
|
||||||
index--
|
|
||||||
}
|
|
||||||
|
|
||||||
if deltaDepth < 0 {
|
|
||||||
log.Println("Parent!")
|
|
||||||
if PathPrefixDelta(safename, event.Name) == 1 { // we're the parent dir
|
|
||||||
send = true
|
|
||||||
dirty = true
|
|
||||||
}
|
|
||||||
watcher.Remove(current)
|
|
||||||
index++
|
|
||||||
}
|
|
||||||
|
|
||||||
// if event.Name startswith safename, send event, we're already deeper
|
|
||||||
} else if HasPathPrefix(event.Name, safename) {
|
|
||||||
//log.Println("Event2!")
|
|
||||||
send = true
|
|
||||||
dirty = true
|
|
||||||
}
|
|
||||||
|
|
||||||
case err := <-watcher.Errors:
|
|
||||||
cuuid.SetConverged(false) // XXX ?
|
|
||||||
log.Printf("error: %v", err)
|
|
||||||
log.Fatal(err)
|
|
||||||
//obj.events <- fmt.Sprintf("file: %v", "error") // XXX: how should we handle errors?
|
|
||||||
|
|
||||||
case event := <-obj.events:
|
|
||||||
cuuid.SetConverged(false)
|
|
||||||
if exit, send = obj.ReadEvent(&event); exit {
|
|
||||||
return // exit
|
|
||||||
}
|
|
||||||
//dirty = false // these events don't invalidate state
|
|
||||||
|
|
||||||
case <-cuuid.ConvergedTimer():
|
|
||||||
cuuid.SetConverged(true) // converged!
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// do all our event sending all together to avoid duplicate msgs
|
|
||||||
if send {
|
|
||||||
send = false
|
|
||||||
// only invalid state on certain types of events
|
|
||||||
if dirty {
|
|
||||||
dirty = false
|
|
||||||
obj.isStateOK = false // something made state dirty
|
|
||||||
}
|
|
||||||
resp := NewResp()
|
|
||||||
processChan <- Event{eventNil, resp, "", true} // trigger process
|
|
||||||
resp.ACKWait() // wait for the ACK()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HashSHA256fromContent computes the hash of the file contents and returns it.
|
|
||||||
// It also caches the value if it can.
|
|
||||||
func (obj *FileRes) HashSHA256fromContent() string {
|
|
||||||
if obj.sha256sum != "" { // return if already computed
|
|
||||||
return obj.sha256sum
|
|
||||||
}
|
|
||||||
|
|
||||||
hash := sha256.New()
|
|
||||||
hash.Write([]byte(obj.Content))
|
|
||||||
obj.sha256sum = hex.EncodeToString(hash.Sum(nil))
|
|
||||||
return obj.sha256sum
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileHashSHA256Check computes the hash of the actual file and compares it to
|
|
||||||
// the computed hash of the resources file contents.
|
|
||||||
func (obj *FileRes) FileHashSHA256Check() (bool, error) {
|
|
||||||
if PathIsDir(obj.GetPath()) { // assert
|
|
||||||
log.Fatal("This should only be called on a File resource.")
|
|
||||||
}
|
|
||||||
// run a diff, and return true if it needs changing
|
|
||||||
hash := sha256.New()
|
|
||||||
f, err := os.Open(obj.GetPath())
|
|
||||||
if err != nil {
|
|
||||||
if e, ok := err.(*os.PathError); ok && (e.Err.(syscall.Errno) == syscall.ENOENT) {
|
|
||||||
return false, nil // no "error", file is just absent
|
|
||||||
}
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
if _, err := io.Copy(hash, f); err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
sha256sum := hex.EncodeToString(hash.Sum(nil))
|
|
||||||
//log.Printf("sha256sum: %v", sha256sum)
|
|
||||||
if obj.HashSHA256fromContent() == sha256sum {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileApply writes the resource file contents out to the correct path. This
|
|
||||||
// implementation doesn't try to be particularly clever in any way.
|
|
||||||
func (obj *FileRes) FileApply() error {
|
|
||||||
if PathIsDir(obj.GetPath()) {
|
|
||||||
log.Fatal("This should only be called on a File resource.")
|
|
||||||
}
|
|
||||||
|
|
||||||
if obj.State == "absent" {
|
|
||||||
log.Printf("About to remove: %v", obj.GetPath())
|
|
||||||
err := os.Remove(obj.GetPath())
|
|
||||||
return err // either nil or not, for success or failure
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := os.Create(obj.GetPath())
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
_, err = io.WriteString(f, obj.Content)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil // success
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckApply checks the resource state and applies the resource if the bool
|
|
||||||
// input is true. It returns error info and if the state check passed or not.
|
|
||||||
func (obj *FileRes) CheckApply(apply bool) (checkok bool, err error) {
|
|
||||||
log.Printf("%v[%v]: CheckApply(%t)", obj.Kind(), obj.GetName(), apply)
|
|
||||||
|
|
||||||
if obj.isStateOK { // cache the state
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = os.Stat(obj.GetPath()); os.IsNotExist(err) {
|
|
||||||
// no such file or directory
|
|
||||||
if obj.State == "absent" {
|
|
||||||
// missing file should be missing, phew :)
|
|
||||||
obj.isStateOK = true
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = nil // reset
|
|
||||||
|
|
||||||
// FIXME: add file mode check here...
|
|
||||||
|
|
||||||
if PathIsDir(obj.GetPath()) {
|
|
||||||
log.Fatal("Not implemented!") // XXX
|
|
||||||
} else {
|
|
||||||
ok, err := obj.FileHashSHA256Check()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if ok {
|
|
||||||
obj.isStateOK = true
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
// if no err, but !ok, then we continue on...
|
|
||||||
}
|
|
||||||
|
|
||||||
// state is not okay, no work done, exit, but without error
|
|
||||||
if !apply {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// apply portion
|
|
||||||
log.Printf("%v[%v]: Apply", obj.Kind(), obj.GetName())
|
|
||||||
if PathIsDir(obj.GetPath()) {
|
|
||||||
log.Fatal("Not implemented!") // XXX
|
|
||||||
} else {
|
|
||||||
err = obj.FileApply()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
obj.isStateOK = true
|
|
||||||
return false, nil // success
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileUUID is the UUID struct for FileRes.
|
|
||||||
type FileUUID struct {
|
|
||||||
BaseUUID
|
|
||||||
path string
|
|
||||||
}
|
|
||||||
|
|
||||||
// if and only if they are equivalent, return true
|
|
||||||
// if they are not equivalent, return false
|
|
||||||
func (obj *FileUUID) IFF(uuid ResUUID) bool {
|
|
||||||
res, ok := uuid.(*FileUUID)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return obj.path == res.path
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileResAutoEdges holds the state of the auto edge generator.
|
|
||||||
type FileResAutoEdges struct {
|
|
||||||
data []ResUUID
|
|
||||||
pointer int
|
|
||||||
found bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next returns the next automatic edge.
|
|
||||||
func (obj *FileResAutoEdges) Next() []ResUUID {
|
|
||||||
if obj.found {
|
|
||||||
log.Fatal("Shouldn't be called anymore!")
|
|
||||||
}
|
|
||||||
if len(obj.data) == 0 { // check length for rare scenarios
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
value := obj.data[obj.pointer]
|
|
||||||
obj.pointer++
|
|
||||||
return []ResUUID{value} // we return one, even though api supports N
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test gets results of the earlier Next() call, & returns if we should continue!
|
|
||||||
func (obj *FileResAutoEdges) Test(input []bool) bool {
|
|
||||||
// if there aren't any more remaining
|
|
||||||
if len(obj.data) <= obj.pointer {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if obj.found { // already found, done!
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if len(input) != 1 { // in case we get given bad data
|
|
||||||
log.Fatal("Expecting a single value!")
|
|
||||||
}
|
|
||||||
if input[0] { // if a match is found, we're done!
|
|
||||||
obj.found = true // no more to find!
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true // keep going
|
|
||||||
}
|
|
||||||
|
|
||||||
// AutoEdges generates a simple linear sequence of each parent directory from
|
|
||||||
// the bottom up!
|
|
||||||
func (obj *FileRes) AutoEdges() AutoEdge {
|
|
||||||
var data []ResUUID // store linear result chain here...
|
|
||||||
values := PathSplitFullReversed(obj.GetPath()) // build it
|
|
||||||
_, values = values[0], values[1:] // get rid of first value which is me!
|
|
||||||
for _, x := range values {
|
|
||||||
var reversed = true // cheat by passing a pointer
|
|
||||||
data = append(data, &FileUUID{
|
|
||||||
BaseUUID: BaseUUID{
|
|
||||||
name: obj.GetName(),
|
|
||||||
kind: obj.Kind(),
|
|
||||||
reversed: &reversed,
|
|
||||||
},
|
|
||||||
path: x, // what matters
|
|
||||||
}) // build list
|
|
||||||
}
|
|
||||||
return &FileResAutoEdges{
|
|
||||||
data: data,
|
|
||||||
pointer: 0,
|
|
||||||
found: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetUUIDs includes all params to make a unique identification of this object.
|
|
||||||
// Most resources only return one, although some resources can return multiple.
|
|
||||||
func (obj *FileRes) GetUUIDs() []ResUUID {
|
|
||||||
x := &FileUUID{
|
|
||||||
BaseUUID: BaseUUID{name: obj.GetName(), kind: obj.Kind()},
|
|
||||||
path: obj.GetPath(),
|
|
||||||
}
|
|
||||||
return []ResUUID{x}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GroupCmp returns whether two resources can be grouped together or not.
|
|
||||||
func (obj *FileRes) GroupCmp(r Res) bool {
|
|
||||||
_, ok := r.(*FileRes)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// TODO: we might be able to group directory children into a single
|
|
||||||
// recursive watcher in the future, thus saving fanotify watches
|
|
||||||
return false // not possible atm
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compare two resources and return if they are equivalent.
|
|
||||||
func (obj *FileRes) Compare(res Res) bool {
|
|
||||||
switch res.(type) {
|
|
||||||
case *FileRes:
|
|
||||||
res := res.(*FileRes)
|
|
||||||
if !obj.BaseRes.Compare(res) { // call base Compare
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if obj.Name != res.Name {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if obj.GetPath() != res.Path {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if obj.Content != res.Content {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if obj.State != res.State {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// CollectPattern applies the pattern for collection resources.
|
|
||||||
func (obj *FileRes) CollectPattern(pattern string) {
|
|
||||||
// XXX: currently the pattern for files can only override the Dirname variable :P
|
|
||||||
obj.Dirname = pattern // XXX: simplistic for now
|
|
||||||
}
|
|
||||||
41
gapi/gapi.go
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// Package gapi defines the interface that graph API generators must meet.
|
||||||
|
package gapi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/purpleidea/mgmt/etcd"
|
||||||
|
"github.com/purpleidea/mgmt/pgraph"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Data is the set of input values passed into the GAPI structs via Init.
|
||||||
|
type Data struct {
|
||||||
|
Hostname string // uuid for the host, required for GAPI
|
||||||
|
EmbdEtcd *etcd.EmbdEtcd
|
||||||
|
Noop bool
|
||||||
|
NoWatch bool
|
||||||
|
// NOTE: we can add more fields here if needed by GAPI endpoints
|
||||||
|
}
|
||||||
|
|
||||||
|
// GAPI is a Graph API that represents incoming graphs and change streams.
|
||||||
|
type GAPI interface {
|
||||||
|
Init(Data) error // initializes the GAPI and passes in useful data
|
||||||
|
Graph() (*pgraph.Graph, error) // returns the most recent pgraph
|
||||||
|
SwitchStream() chan error // returns a stream of switch events
|
||||||
|
Close() error // shutdown the GAPI
|
||||||
|
}
|
||||||
26
global/global.go
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// Package global holds some global variables that are used throughout the code.
|
||||||
|
package global
|
||||||
|
|
||||||
|
// These constants are used throughout the program.
|
||||||
|
const (
|
||||||
|
DEBUG = false // add additional log messages
|
||||||
|
TRACE = false // add execution flow log messages
|
||||||
|
VERBOSE = false // add extra log message output
|
||||||
|
)
|
||||||
551
main.go
@@ -19,560 +19,21 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
etcdtypes "github.com/coreos/etcd/pkg/types"
|
|
||||||
"github.com/coreos/pkg/capnslog"
|
|
||||||
"github.com/urfave/cli"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
|
||||||
"sync"
|
"github.com/purpleidea/mgmt/mgmtmain"
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// set at compile time
|
// set at compile time
|
||||||
var (
|
var (
|
||||||
program string
|
program string
|
||||||
version string
|
version string
|
||||||
prefix = fmt.Sprintf("/var/lib/%s/", program)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// variables controlling verbosity
|
|
||||||
const (
|
|
||||||
DEBUG = false // add additional log messages
|
|
||||||
TRACE = false // add execution flow log messages
|
|
||||||
VERBOSE = false // add extra log message output
|
|
||||||
)
|
|
||||||
|
|
||||||
// signal handler
|
|
||||||
func waitForSignal(exit chan bool) {
|
|
||||||
signals := make(chan os.Signal, 1)
|
|
||||||
signal.Notify(signals, os.Interrupt) // catch ^C
|
|
||||||
//signal.Notify(signals, os.Kill) // catch signals
|
|
||||||
signal.Notify(signals, syscall.SIGTERM)
|
|
||||||
|
|
||||||
select {
|
|
||||||
case e := <-signals: // any signal will do
|
|
||||||
if e == os.Interrupt {
|
|
||||||
log.Println("Interrupted by ^C")
|
|
||||||
} else {
|
|
||||||
log.Println("Interrupted by signal")
|
|
||||||
}
|
|
||||||
case <-exit: // or a manual signal
|
|
||||||
log.Println("Interrupted by exit signal")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// run is the main run target.
|
|
||||||
func run(c *cli.Context) error {
|
|
||||||
var start = time.Now().UnixNano()
|
|
||||||
log.Printf("This is: %v, version: %v", program, version)
|
|
||||||
log.Printf("Main: Start: %v", start)
|
|
||||||
|
|
||||||
hostname, _ := os.Hostname()
|
|
||||||
// allow passing in the hostname, instead of using --hostname
|
|
||||||
if c.IsSet("file") {
|
|
||||||
if config := ParseConfigFromFile(c.String("file")); config != nil {
|
|
||||||
if h := config.Hostname; h != "" {
|
|
||||||
hostname = h
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if c.IsSet("hostname") { // override by cli
|
|
||||||
if h := c.String("hostname"); h != "" {
|
|
||||||
hostname = h
|
|
||||||
}
|
|
||||||
}
|
|
||||||
noop := c.Bool("noop")
|
|
||||||
|
|
||||||
seeds, err := etcdtypes.NewURLs(
|
|
||||||
FlattenListWithSplit(c.StringSlice("seeds"), []string{",", ";", " "}),
|
|
||||||
)
|
|
||||||
if err != nil && len(c.StringSlice("seeds")) > 0 {
|
|
||||||
log.Printf("Main: Error: seeds didn't parse correctly!")
|
|
||||||
return cli.NewExitError("", 1)
|
|
||||||
}
|
|
||||||
clientURLs, err := etcdtypes.NewURLs(
|
|
||||||
FlattenListWithSplit(c.StringSlice("client-urls"), []string{",", ";", " "}),
|
|
||||||
)
|
|
||||||
if err != nil && len(c.StringSlice("client-urls")) > 0 {
|
|
||||||
log.Printf("Main: Error: clientURLs didn't parse correctly!")
|
|
||||||
return cli.NewExitError("", 1)
|
|
||||||
}
|
|
||||||
serverURLs, err := etcdtypes.NewURLs(
|
|
||||||
FlattenListWithSplit(c.StringSlice("server-urls"), []string{",", ";", " "}),
|
|
||||||
)
|
|
||||||
if err != nil && len(c.StringSlice("server-urls")) > 0 {
|
|
||||||
log.Printf("Main: Error: serverURLs didn't parse correctly!")
|
|
||||||
return cli.NewExitError("", 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
idealClusterSize := uint16(c.Int("ideal-cluster-size"))
|
|
||||||
if idealClusterSize < 1 {
|
|
||||||
log.Printf("Main: Error: idealClusterSize should be at least one!")
|
|
||||||
return cli.NewExitError("", 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.IsSet("file") && c.IsSet("puppet") {
|
|
||||||
log.Println("Main: Error: the --file and --puppet parameters cannot be used together!")
|
|
||||||
return cli.NewExitError("", 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.Bool("no-server") && len(c.StringSlice("remote")) > 0 {
|
|
||||||
// TODO: in this case, we won't be able to tunnel stuff back to
|
|
||||||
// here, so if we're okay with every remote graph running in an
|
|
||||||
// isolated mode, then this is okay. Improve on this if there's
|
|
||||||
// someone who really wants to be able to do this.
|
|
||||||
log.Println("Main: Error: the --no-server and --remote parameters cannot be used together!")
|
|
||||||
return cli.NewExitError("", 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
cConns := uint16(c.Int("cconns"))
|
|
||||||
if cConns < 0 {
|
|
||||||
log.Printf("Main: Error: --cconns should be at least zero!")
|
|
||||||
return cli.NewExitError("", 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.IsSet("converged-timeout") && cConns > 0 && len(c.StringSlice("remote")) > c.Int("cconns") {
|
|
||||||
log.Printf("Main: Error: combining --converged-timeout with more remotes than available connections will never converge!")
|
|
||||||
return cli.NewExitError("", 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
depth := uint16(c.Int("depth"))
|
|
||||||
if depth < 0 { // user should not be using this argument manually
|
|
||||||
log.Printf("Main: Error: negative values for --depth are not permitted!")
|
|
||||||
return cli.NewExitError("", 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.IsSet("prefix") && c.Bool("tmp-prefix") {
|
|
||||||
log.Println("Main: Error: combining --prefix and the request for a tmp prefix is illogical!")
|
|
||||||
return cli.NewExitError("", 1)
|
|
||||||
}
|
|
||||||
if s := c.String("prefix"); c.IsSet("prefix") && s != "" {
|
|
||||||
prefix = s
|
|
||||||
}
|
|
||||||
|
|
||||||
// make sure the working directory prefix exists
|
|
||||||
if c.Bool("tmp-prefix") || os.MkdirAll(prefix, 0770) != nil {
|
|
||||||
if c.Bool("tmp-prefix") || c.Bool("allow-tmp-prefix") {
|
|
||||||
if prefix, err = ioutil.TempDir("", program+"-"); err != nil {
|
|
||||||
log.Printf("Main: Error: Can't create temporary prefix!")
|
|
||||||
return cli.NewExitError("", 1)
|
|
||||||
}
|
|
||||||
log.Println("Main: Warning: Working prefix directory is temporary!")
|
|
||||||
|
|
||||||
} else {
|
|
||||||
log.Printf("Main: Error: Can't create prefix!")
|
|
||||||
return cli.NewExitError("", 1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
log.Printf("Main: Working prefix is: %s", prefix)
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
exit := make(chan bool) // exit signal
|
|
||||||
var G, fullGraph *Graph
|
|
||||||
|
|
||||||
// exit after `max-runtime` seconds for no reason at all...
|
|
||||||
if i := c.Int("max-runtime"); i > 0 {
|
|
||||||
go func() {
|
|
||||||
time.Sleep(time.Duration(i) * time.Second)
|
|
||||||
exit <- true
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// setup converger
|
|
||||||
converger := NewConverger(
|
|
||||||
c.Int("converged-timeout"),
|
|
||||||
nil, // stateFn gets added in by EmbdEtcd
|
|
||||||
)
|
|
||||||
go converger.Loop(true) // main loop for converger, true to start paused
|
|
||||||
|
|
||||||
// embedded etcd
|
|
||||||
if len(seeds) == 0 {
|
|
||||||
log.Printf("Main: Seeds: No seeds specified!")
|
|
||||||
} else {
|
|
||||||
log.Printf("Main: Seeds(%v): %v", len(seeds), seeds)
|
|
||||||
}
|
|
||||||
EmbdEtcd := NewEmbdEtcd(
|
|
||||||
hostname,
|
|
||||||
seeds,
|
|
||||||
clientURLs,
|
|
||||||
serverURLs,
|
|
||||||
c.Bool("no-server"),
|
|
||||||
idealClusterSize,
|
|
||||||
prefix,
|
|
||||||
converger,
|
|
||||||
)
|
|
||||||
if EmbdEtcd == nil {
|
|
||||||
// TODO: verify EmbdEtcd is not nil below...
|
|
||||||
log.Printf("Main: Etcd: Creation failed!")
|
|
||||||
exit <- true
|
|
||||||
} else if err := EmbdEtcd.Startup(); err != nil { // startup (returns when etcd main loop is running)
|
|
||||||
log.Printf("Main: Etcd: Startup failed: %v", err)
|
|
||||||
exit <- true
|
|
||||||
}
|
|
||||||
convergerStateFn := func(b bool) error {
|
|
||||||
// exit if we are using the converged-timeout and we are the
|
|
||||||
// root node. otherwise, if we are a child node in a remote
|
|
||||||
// execution hierarchy, we should only notify our converged
|
|
||||||
// state and wait for the parent to trigger the exit.
|
|
||||||
if depth == 0 && c.Int("converged-timeout") >= 0 {
|
|
||||||
if b {
|
|
||||||
log.Printf("Converged for %d seconds, exiting!", c.Int("converged-timeout"))
|
|
||||||
exit <- true // trigger an exit!
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// send our individual state into etcd for others to see
|
|
||||||
return EtcdSetHostnameConverged(EmbdEtcd, hostname, b) // TODO: what should happen on error?
|
|
||||||
}
|
|
||||||
if EmbdEtcd != nil {
|
|
||||||
converger.SetStateFn(convergerStateFn)
|
|
||||||
}
|
|
||||||
|
|
||||||
exitchan := make(chan Event) // exit event
|
|
||||||
go func() {
|
|
||||||
startchan := make(chan struct{}) // start signal
|
|
||||||
go func() { startchan <- struct{}{} }()
|
|
||||||
file := c.String("file")
|
|
||||||
var configchan chan bool
|
|
||||||
var puppetchan <-chan time.Time
|
|
||||||
if !c.Bool("no-watch") && c.IsSet("file") {
|
|
||||||
configchan = ConfigWatch(file)
|
|
||||||
} else if c.IsSet("puppet") {
|
|
||||||
interval := PuppetInterval(c.String("puppet-conf"))
|
|
||||||
puppetchan = time.Tick(time.Duration(interval) * time.Second)
|
|
||||||
}
|
|
||||||
log.Println("Etcd: Starting...")
|
|
||||||
etcdchan := EtcdWatch(EmbdEtcd)
|
|
||||||
first := true // first loop or not
|
|
||||||
for {
|
|
||||||
log.Println("Main: Waiting...")
|
|
||||||
select {
|
|
||||||
case <-startchan: // kick the loop once at start
|
|
||||||
// pass
|
|
||||||
|
|
||||||
case b := <-etcdchan:
|
|
||||||
if !b { // ignore the message
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// everything else passes through to cause a compile!
|
|
||||||
|
|
||||||
case <-puppetchan:
|
|
||||||
// nothing, just go on
|
|
||||||
|
|
||||||
case msg := <-configchan:
|
|
||||||
if c.Bool("no-watch") || !msg {
|
|
||||||
continue // not ready to read config
|
|
||||||
}
|
|
||||||
// XXX: case compile_event: ...
|
|
||||||
// ...
|
|
||||||
case msg := <-exitchan:
|
|
||||||
msg.ACK()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var config *GraphConfig
|
|
||||||
if c.IsSet("file") {
|
|
||||||
config = ParseConfigFromFile(file)
|
|
||||||
} else if c.IsSet("puppet") {
|
|
||||||
config = ParseConfigFromPuppet(c.String("puppet"), c.String("puppet-conf"))
|
|
||||||
}
|
|
||||||
if config == nil {
|
|
||||||
log.Printf("Config: Parse failure")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if config.Hostname != "" && config.Hostname != hostname {
|
|
||||||
log.Printf("Config: Hostname changed, ignoring config!")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
config.Hostname = hostname // set it in case it was ""
|
|
||||||
|
|
||||||
// run graph vertex LOCK...
|
|
||||||
if !first { // TODO: we can flatten this check out I think
|
|
||||||
converger.Pause() // FIXME: add sync wait?
|
|
||||||
G.Pause() // sync
|
|
||||||
}
|
|
||||||
|
|
||||||
// build graph from yaml file on events (eg: from etcd)
|
|
||||||
// we need the vertices to be paused to work on them
|
|
||||||
if newFullgraph, err := fullGraph.NewGraphFromConfig(config, EmbdEtcd, noop); err == nil { // keep references to all original elements
|
|
||||||
fullGraph = newFullgraph
|
|
||||||
} else {
|
|
||||||
log.Printf("Config: Error making new graph from config: %v", err)
|
|
||||||
// unpause!
|
|
||||||
if !first {
|
|
||||||
G.Start(&wg, first) // sync
|
|
||||||
converger.Start() // after G.Start()
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
G = fullGraph.Copy() // copy to active graph
|
|
||||||
// XXX: do etcd transaction out here...
|
|
||||||
G.AutoEdges() // add autoedges; modifies the graph
|
|
||||||
G.AutoGroup() // run autogroup; modifies the graph
|
|
||||||
// TODO: do we want to do a transitive reduction?
|
|
||||||
|
|
||||||
log.Printf("Graph: %v", G) // show graph
|
|
||||||
err := G.ExecGraphviz(c.String("graphviz-filter"), c.String("graphviz"))
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Graphviz: %v", err)
|
|
||||||
} else {
|
|
||||||
log.Printf("Graphviz: Successfully generated graph!")
|
|
||||||
}
|
|
||||||
G.AssociateData(converger)
|
|
||||||
// G.Start(...) needs to be synchronous or wait,
|
|
||||||
// because if half of the nodes are started and
|
|
||||||
// some are not ready yet and the EtcdWatch
|
|
||||||
// loops, we'll cause G.Pause(...) before we
|
|
||||||
// even got going, thus causing nil pointer errors
|
|
||||||
G.Start(&wg, first) // sync
|
|
||||||
converger.Start() // after G.Start()
|
|
||||||
first = false
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
configWatcher := NewConfigWatcher()
|
|
||||||
events := configWatcher.Events()
|
|
||||||
if !c.Bool("no-watch") {
|
|
||||||
configWatcher.Add(c.StringSlice("remote")...) // add all the files...
|
|
||||||
} else {
|
|
||||||
events = nil // signal that no-watch is true
|
|
||||||
}
|
|
||||||
|
|
||||||
// initialize the add watcher, which calls the f callback on map changes
|
|
||||||
convergerCb := func(f func(map[string]bool) error) (func(), error) {
|
|
||||||
return EtcdAddHostnameConvergedWatcher(EmbdEtcd, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// build remotes struct for remote ssh
|
|
||||||
remotes := NewRemotes(
|
|
||||||
EmbdEtcd.LocalhostClientURLs().StringSlice(),
|
|
||||||
[]string{DefaultClientURL},
|
|
||||||
noop,
|
|
||||||
c.StringSlice("remote"), // list of files
|
|
||||||
events, // watch for file changes
|
|
||||||
cConns,
|
|
||||||
c.Bool("allow-interactive"),
|
|
||||||
c.String("ssh-priv-id-rsa"),
|
|
||||||
!c.Bool("no-caching"),
|
|
||||||
depth,
|
|
||||||
prefix,
|
|
||||||
converger,
|
|
||||||
convergerCb,
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO: is there any benefit to running the remotes above in the loop?
|
|
||||||
// wait for etcd to be running before we remote in, which we do above!
|
|
||||||
go remotes.Run()
|
|
||||||
|
|
||||||
if !c.IsSet("file") && !c.IsSet("puppet") {
|
|
||||||
converger.Start() // better start this for empty graphs
|
|
||||||
}
|
|
||||||
log.Println("Main: Running...")
|
|
||||||
|
|
||||||
waitForSignal(exit) // pass in exit channel to watch
|
|
||||||
|
|
||||||
log.Println("Destroy...")
|
|
||||||
|
|
||||||
configWatcher.Close() // stop sending file changes to remotes
|
|
||||||
remotes.Exit() // tell all the remote connections to shutdown; waits!
|
|
||||||
|
|
||||||
G.Exit() // tell all the children to exit
|
|
||||||
|
|
||||||
// tell inner main loop to exit
|
|
||||||
resp := NewResp()
|
|
||||||
go func() { exitchan <- Event{eventExit, resp, "", false} }()
|
|
||||||
|
|
||||||
// cleanup etcd main loop last so it can process everything first
|
|
||||||
if err := EmbdEtcd.Destroy(); err != nil { // shutdown and cleanup etcd
|
|
||||||
log.Printf("Etcd exited poorly with: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp.ACKWait() // let inner main loop finish cleanly just in case
|
|
||||||
|
|
||||||
if DEBUG {
|
|
||||||
log.Printf("Graph: %v", G)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait() // wait for primary go routines to exit
|
|
||||||
|
|
||||||
// TODO: wait for each vertex to exit...
|
|
||||||
log.Println("Goodbye!")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var flags int
|
if err := mgmtmain.CLI(program, version); err != nil {
|
||||||
if DEBUG || true { // TODO: remove || true
|
fmt.Println(err)
|
||||||
flags = log.LstdFlags | log.Lshortfile
|
os.Exit(1)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
flags = (flags - log.Ldate) // remove the date for now
|
|
||||||
log.SetFlags(flags)
|
|
||||||
|
|
||||||
// un-hijack from capnslog...
|
|
||||||
log.SetOutput(os.Stderr)
|
|
||||||
if VERBOSE {
|
|
||||||
capnslog.SetFormatter(capnslog.NewLogFormatter(os.Stderr, "(etcd) ", flags))
|
|
||||||
} else {
|
|
||||||
capnslog.SetFormatter(capnslog.NewNilFormatter())
|
|
||||||
}
|
|
||||||
|
|
||||||
// test for sanity
|
|
||||||
if program == "" || version == "" {
|
|
||||||
log.Fatal("Program was not compiled correctly. Please see Makefile.")
|
|
||||||
}
|
|
||||||
app := cli.NewApp()
|
|
||||||
app.Name = program
|
|
||||||
app.Usage = "next generation config management"
|
|
||||||
app.Version = version
|
|
||||||
//app.Action = ... // without a default action, help runs
|
|
||||||
|
|
||||||
app.Commands = []cli.Command{
|
|
||||||
{
|
|
||||||
Name: "run",
|
|
||||||
Aliases: []string{"r"},
|
|
||||||
Usage: "run",
|
|
||||||
Action: run,
|
|
||||||
Flags: []cli.Flag{
|
|
||||||
cli.StringFlag{
|
|
||||||
Name: "file, f",
|
|
||||||
Value: "",
|
|
||||||
Usage: "graph definition to run",
|
|
||||||
EnvVar: "MGMT_FILE",
|
|
||||||
},
|
|
||||||
cli.BoolFlag{
|
|
||||||
Name: "no-watch",
|
|
||||||
Usage: "do not update graph on watched graph definition file changes",
|
|
||||||
},
|
|
||||||
cli.StringFlag{
|
|
||||||
Name: "code, c",
|
|
||||||
Value: "",
|
|
||||||
Usage: "code definition to run",
|
|
||||||
},
|
|
||||||
cli.StringFlag{
|
|
||||||
Name: "graphviz, g",
|
|
||||||
Value: "",
|
|
||||||
Usage: "output file for graphviz data",
|
|
||||||
},
|
|
||||||
cli.StringFlag{
|
|
||||||
Name: "graphviz-filter, gf",
|
|
||||||
Value: "dot", // directed graph default
|
|
||||||
Usage: "graphviz filter to use",
|
|
||||||
},
|
|
||||||
// useful for testing multiple instances on same machine
|
|
||||||
cli.StringFlag{
|
|
||||||
Name: "hostname",
|
|
||||||
Value: "",
|
|
||||||
Usage: "hostname to use",
|
|
||||||
},
|
|
||||||
// if empty, it will startup a new server
|
|
||||||
cli.StringSliceFlag{
|
|
||||||
Name: "seeds, s",
|
|
||||||
Value: &cli.StringSlice{}, // empty slice
|
|
||||||
Usage: "default etc client endpoint",
|
|
||||||
EnvVar: "MGMT_SEEDS",
|
|
||||||
},
|
|
||||||
// port 2379 and 4001 are common
|
|
||||||
cli.StringSliceFlag{
|
|
||||||
Name: "client-urls",
|
|
||||||
Value: &cli.StringSlice{},
|
|
||||||
Usage: "list of URLs to listen on for client traffic",
|
|
||||||
EnvVar: "MGMT_CLIENT_URLS",
|
|
||||||
},
|
|
||||||
// port 2380 and 7001 are common
|
|
||||||
cli.StringSliceFlag{
|
|
||||||
Name: "server-urls, peer-urls",
|
|
||||||
Value: &cli.StringSlice{},
|
|
||||||
Usage: "list of URLs to listen on for server (peer) traffic",
|
|
||||||
EnvVar: "MGMT_SERVER_URLS",
|
|
||||||
},
|
|
||||||
cli.BoolFlag{
|
|
||||||
Name: "no-server",
|
|
||||||
Usage: "do not let other servers peer with me",
|
|
||||||
},
|
|
||||||
cli.IntFlag{
|
|
||||||
Name: "ideal-cluster-size",
|
|
||||||
Value: defaultIdealClusterSize,
|
|
||||||
Usage: "ideal number of server peers in cluster, only read by initial server",
|
|
||||||
EnvVar: "MGMT_IDEAL_CLUSTER_SIZE",
|
|
||||||
},
|
|
||||||
cli.IntFlag{
|
|
||||||
Name: "converged-timeout, t",
|
|
||||||
Value: -1,
|
|
||||||
Usage: "exit after approximately this many seconds in a converged state",
|
|
||||||
EnvVar: "MGMT_CONVERGED_TIMEOUT",
|
|
||||||
},
|
|
||||||
cli.IntFlag{
|
|
||||||
Name: "max-runtime",
|
|
||||||
Value: 0,
|
|
||||||
Usage: "exit after a maximum of approximately this many seconds",
|
|
||||||
EnvVar: "MGMT_MAX_RUNTIME",
|
|
||||||
},
|
|
||||||
cli.BoolFlag{
|
|
||||||
Name: "noop",
|
|
||||||
Usage: "globally force all resources into no-op mode",
|
|
||||||
},
|
|
||||||
cli.StringFlag{
|
|
||||||
Name: "puppet, p",
|
|
||||||
Value: "",
|
|
||||||
Usage: "load graph from puppet, optionally takes a manifest or path to manifest file",
|
|
||||||
},
|
|
||||||
cli.StringFlag{
|
|
||||||
Name: "puppet-conf",
|
|
||||||
Value: "",
|
|
||||||
Usage: "supply the path to an alternate puppet.conf file to use",
|
|
||||||
},
|
|
||||||
cli.StringSliceFlag{
|
|
||||||
Name: "remote",
|
|
||||||
Value: &cli.StringSlice{},
|
|
||||||
Usage: "list of remote graph definitions to run",
|
|
||||||
},
|
|
||||||
cli.BoolFlag{
|
|
||||||
Name: "allow-interactive",
|
|
||||||
Usage: "allow interactive prompting, such as for remote passwords",
|
|
||||||
},
|
|
||||||
cli.StringFlag{
|
|
||||||
Name: "ssh-priv-id-rsa",
|
|
||||||
Value: "~/.ssh/id_rsa",
|
|
||||||
Usage: "default path to ssh key file, set empty to never touch",
|
|
||||||
EnvVar: "MGMT_SSH_PRIV_ID_RSA",
|
|
||||||
},
|
|
||||||
cli.IntFlag{
|
|
||||||
Name: "cconns",
|
|
||||||
Value: 0,
|
|
||||||
Usage: "number of maximum concurrent remote ssh connections to run, 0 for unlimited",
|
|
||||||
EnvVar: "MGMT_CCONNS",
|
|
||||||
},
|
|
||||||
cli.BoolFlag{
|
|
||||||
Name: "no-caching",
|
|
||||||
Usage: "don't allow remote caching of remote execution binary",
|
|
||||||
},
|
|
||||||
cli.IntFlag{
|
|
||||||
Name: "depth",
|
|
||||||
Hidden: true, // internal use only
|
|
||||||
Value: 0,
|
|
||||||
Usage: "specify depth in remote hierarchy",
|
|
||||||
},
|
|
||||||
cli.StringFlag{
|
|
||||||
Name: "prefix",
|
|
||||||
Usage: "specify a path to the working prefix directory",
|
|
||||||
EnvVar: "MGMT_PREFIX",
|
|
||||||
},
|
|
||||||
cli.BoolFlag{
|
|
||||||
Name: "tmp-prefix",
|
|
||||||
Usage: "request a pseudo-random, temporary prefix to be used",
|
|
||||||
},
|
|
||||||
cli.BoolFlag{
|
|
||||||
Name: "allow-tmp-prefix",
|
|
||||||
Usage: "allow creation of a new temporary prefix if main prefix is unavailable",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
app.EnableBashCompletion = true
|
|
||||||
app.Run(os.Args)
|
|
||||||
}
|
}
|
||||||
|
|||||||
296
mgmtmain/cli.go
Normal file
@@ -0,0 +1,296 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package mgmtmain
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/puppet"
|
||||||
|
"github.com/purpleidea/mgmt/yamlgraph"
|
||||||
|
|
||||||
|
"github.com/urfave/cli"
|
||||||
|
)
|
||||||
|
|
||||||
|
// run is the main run target.
|
||||||
|
func run(c *cli.Context) error {
|
||||||
|
|
||||||
|
obj := &Main{}
|
||||||
|
|
||||||
|
obj.Program = c.App.Name
|
||||||
|
obj.Version = c.App.Version
|
||||||
|
|
||||||
|
if h := c.String("hostname"); c.IsSet("hostname") && h != "" {
|
||||||
|
obj.Hostname = &h
|
||||||
|
}
|
||||||
|
|
||||||
|
if s := c.String("prefix"); c.IsSet("prefix") && s != "" {
|
||||||
|
obj.Prefix = &s
|
||||||
|
}
|
||||||
|
obj.TmpPrefix = c.Bool("tmp-prefix")
|
||||||
|
obj.AllowTmpPrefix = c.Bool("allow-tmp-prefix")
|
||||||
|
|
||||||
|
if _ = c.String("code"); c.IsSet("code") {
|
||||||
|
if obj.GAPI != nil {
|
||||||
|
return fmt.Errorf("Can't combine code GAPI with existing GAPI.")
|
||||||
|
}
|
||||||
|
// TODO: implement DSL GAPI
|
||||||
|
//obj.GAPI = &dsl.GAPI{
|
||||||
|
// Code: &s,
|
||||||
|
//}
|
||||||
|
return fmt.Errorf("The Code GAPI is not implemented yet!") // TODO: DSL
|
||||||
|
}
|
||||||
|
if y := c.String("yaml"); c.IsSet("yaml") {
|
||||||
|
if obj.GAPI != nil {
|
||||||
|
return fmt.Errorf("Can't combine YAML GAPI with existing GAPI.")
|
||||||
|
}
|
||||||
|
obj.GAPI = &yamlgraph.GAPI{
|
||||||
|
File: &y,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if p := c.String("puppet"); c.IsSet("puppet") {
|
||||||
|
if obj.GAPI != nil {
|
||||||
|
return fmt.Errorf("Can't combine puppet GAPI with existing GAPI.")
|
||||||
|
}
|
||||||
|
obj.GAPI = &puppet.GAPI{
|
||||||
|
PuppetParam: &p,
|
||||||
|
PuppetConf: c.String("puppet-conf"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
obj.Remotes = c.StringSlice("remote") // FIXME: GAPI-ify somehow?
|
||||||
|
|
||||||
|
obj.NoWatch = c.Bool("no-watch")
|
||||||
|
obj.Noop = c.Bool("noop")
|
||||||
|
obj.Graphviz = c.String("graphviz")
|
||||||
|
obj.GraphvizFilter = c.String("graphviz-filter")
|
||||||
|
obj.ConvergedTimeout = c.Int("converged-timeout")
|
||||||
|
obj.MaxRuntime = uint(c.Int("max-runtime"))
|
||||||
|
|
||||||
|
obj.Seeds = c.StringSlice("seeds")
|
||||||
|
obj.ClientURLs = c.StringSlice("client-urls")
|
||||||
|
obj.ServerURLs = c.StringSlice("server-urls")
|
||||||
|
obj.IdealClusterSize = c.Int("ideal-cluster-size")
|
||||||
|
obj.NoServer = c.Bool("no-server")
|
||||||
|
|
||||||
|
obj.CConns = uint16(c.Int("cconns"))
|
||||||
|
obj.AllowInteractive = c.Bool("allow-interactive")
|
||||||
|
obj.SSHPrivIDRsa = c.String("ssh-priv-id-rsa")
|
||||||
|
obj.NoCaching = c.Bool("no-caching")
|
||||||
|
obj.Depth = uint16(c.Int("depth"))
|
||||||
|
|
||||||
|
if err := obj.Init(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// install the exit signal handler
|
||||||
|
exit := make(chan struct{})
|
||||||
|
defer close(exit)
|
||||||
|
go func() {
|
||||||
|
signals := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(signals, os.Interrupt) // catch ^C
|
||||||
|
//signal.Notify(signals, os.Kill) // catch signals
|
||||||
|
signal.Notify(signals, syscall.SIGTERM)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case sig := <-signals: // any signal will do
|
||||||
|
if sig == os.Interrupt {
|
||||||
|
log.Println("Interrupted by ^C")
|
||||||
|
obj.Exit(nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Println("Interrupted by signal")
|
||||||
|
obj.Exit(fmt.Errorf("Killed by %v", sig))
|
||||||
|
return
|
||||||
|
case <-exit:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err := obj.Run(); err != nil {
|
||||||
|
return err
|
||||||
|
//return cli.NewExitError(err.Error(), 1) // TODO: ?
|
||||||
|
//return cli.NewExitError("", 1) // TODO: ?
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CLI is the entry point for using mgmt normally from the CLI.
|
||||||
|
func CLI(program, version string) error {
|
||||||
|
|
||||||
|
// test for sanity
|
||||||
|
if program == "" || version == "" {
|
||||||
|
return fmt.Errorf("Program was not compiled correctly. Please see Makefile.")
|
||||||
|
}
|
||||||
|
app := cli.NewApp()
|
||||||
|
app.Name = program // App.name and App.version pass these values through
|
||||||
|
app.Version = version
|
||||||
|
app.Usage = "next generation config management"
|
||||||
|
//app.Action = ... // without a default action, help runs
|
||||||
|
|
||||||
|
app.Commands = []cli.Command{
|
||||||
|
{
|
||||||
|
Name: "run",
|
||||||
|
Aliases: []string{"r"},
|
||||||
|
Usage: "run",
|
||||||
|
Action: run,
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
// useful for testing multiple instances on same machine
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "hostname",
|
||||||
|
Value: "",
|
||||||
|
Usage: "hostname to use",
|
||||||
|
},
|
||||||
|
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "prefix",
|
||||||
|
Usage: "specify a path to the working prefix directory",
|
||||||
|
EnvVar: "MGMT_PREFIX",
|
||||||
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "tmp-prefix",
|
||||||
|
Usage: "request a pseudo-random, temporary prefix to be used",
|
||||||
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "allow-tmp-prefix",
|
||||||
|
Usage: "allow creation of a new temporary prefix if main prefix is unavailable",
|
||||||
|
},
|
||||||
|
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "code, c",
|
||||||
|
Value: "",
|
||||||
|
Usage: "code definition to run",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "yaml",
|
||||||
|
Value: "",
|
||||||
|
Usage: "yaml graph definition to run",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "puppet, p",
|
||||||
|
Value: "",
|
||||||
|
Usage: "load graph from puppet, optionally takes a manifest or path to manifest file",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "puppet-conf",
|
||||||
|
Value: "",
|
||||||
|
Usage: "the path to an alternate puppet.conf file",
|
||||||
|
},
|
||||||
|
cli.StringSliceFlag{
|
||||||
|
Name: "remote",
|
||||||
|
Value: &cli.StringSlice{},
|
||||||
|
Usage: "list of remote graph definitions to run",
|
||||||
|
},
|
||||||
|
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "no-watch",
|
||||||
|
Usage: "do not update graph on stream switch events",
|
||||||
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "noop",
|
||||||
|
Usage: "globally force all resources into no-op mode",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "graphviz, g",
|
||||||
|
Value: "",
|
||||||
|
Usage: "output file for graphviz data",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "graphviz-filter, gf",
|
||||||
|
Value: "dot", // directed graph default
|
||||||
|
Usage: "graphviz filter to use",
|
||||||
|
},
|
||||||
|
cli.IntFlag{
|
||||||
|
Name: "converged-timeout, t",
|
||||||
|
Value: -1,
|
||||||
|
Usage: "exit after approximately this many seconds in a converged state",
|
||||||
|
EnvVar: "MGMT_CONVERGED_TIMEOUT",
|
||||||
|
},
|
||||||
|
cli.IntFlag{
|
||||||
|
Name: "max-runtime",
|
||||||
|
Value: 0,
|
||||||
|
Usage: "exit after a maximum of approximately this many seconds",
|
||||||
|
EnvVar: "MGMT_MAX_RUNTIME",
|
||||||
|
},
|
||||||
|
|
||||||
|
// if empty, it will startup a new server
|
||||||
|
cli.StringSliceFlag{
|
||||||
|
Name: "seeds, s",
|
||||||
|
Value: &cli.StringSlice{}, // empty slice
|
||||||
|
Usage: "default etc client endpoint",
|
||||||
|
EnvVar: "MGMT_SEEDS",
|
||||||
|
},
|
||||||
|
// port 2379 and 4001 are common
|
||||||
|
cli.StringSliceFlag{
|
||||||
|
Name: "client-urls",
|
||||||
|
Value: &cli.StringSlice{},
|
||||||
|
Usage: "list of URLs to listen on for client traffic",
|
||||||
|
EnvVar: "MGMT_CLIENT_URLS",
|
||||||
|
},
|
||||||
|
// port 2380 and 7001 are common
|
||||||
|
cli.StringSliceFlag{
|
||||||
|
Name: "server-urls, peer-urls",
|
||||||
|
Value: &cli.StringSlice{},
|
||||||
|
Usage: "list of URLs to listen on for server (peer) traffic",
|
||||||
|
EnvVar: "MGMT_SERVER_URLS",
|
||||||
|
},
|
||||||
|
cli.IntFlag{
|
||||||
|
Name: "ideal-cluster-size",
|
||||||
|
Value: -1,
|
||||||
|
Usage: "ideal number of server peers in cluster; only read by initial server",
|
||||||
|
EnvVar: "MGMT_IDEAL_CLUSTER_SIZE",
|
||||||
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "no-server",
|
||||||
|
Usage: "do not let other servers peer with me",
|
||||||
|
},
|
||||||
|
|
||||||
|
cli.IntFlag{
|
||||||
|
Name: "cconns",
|
||||||
|
Value: 0,
|
||||||
|
Usage: "number of maximum concurrent remote ssh connections to run; 0 for unlimited",
|
||||||
|
EnvVar: "MGMT_CCONNS",
|
||||||
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "allow-interactive",
|
||||||
|
Usage: "allow interactive prompting, such as for remote passwords",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "ssh-priv-id-rsa",
|
||||||
|
Value: "~/.ssh/id_rsa",
|
||||||
|
Usage: "default path to ssh key file, set empty to never touch",
|
||||||
|
EnvVar: "MGMT_SSH_PRIV_ID_RSA",
|
||||||
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "no-caching",
|
||||||
|
Usage: "don't allow remote caching of remote execution binary",
|
||||||
|
},
|
||||||
|
cli.IntFlag{
|
||||||
|
Name: "depth",
|
||||||
|
Hidden: true, // internal use only
|
||||||
|
Value: 0,
|
||||||
|
Usage: "specify depth in remote hierarchy",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
app.EnableBashCompletion = true
|
||||||
|
return app.Run(os.Args)
|
||||||
|
}
|
||||||
480
mgmtmain/main.go
Normal file
@@ -0,0 +1,480 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package mgmtmain
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/converger"
|
||||||
|
"github.com/purpleidea/mgmt/etcd"
|
||||||
|
"github.com/purpleidea/mgmt/gapi"
|
||||||
|
"github.com/purpleidea/mgmt/pgraph"
|
||||||
|
"github.com/purpleidea/mgmt/recwatch"
|
||||||
|
"github.com/purpleidea/mgmt/remote"
|
||||||
|
"github.com/purpleidea/mgmt/util"
|
||||||
|
|
||||||
|
etcdtypes "github.com/coreos/etcd/pkg/types"
|
||||||
|
"github.com/coreos/pkg/capnslog"
|
||||||
|
multierr "github.com/hashicorp/go-multierror"
|
||||||
|
errwrap "github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Main is the main struct for running the mgmt logic.
|
||||||
|
type Main struct {
|
||||||
|
Program string // the name of this program, usually set at compile time
|
||||||
|
Version string // the version of this program, usually set at compile time
|
||||||
|
|
||||||
|
Hostname *string // hostname to use; nil if undefined
|
||||||
|
|
||||||
|
Prefix *string // prefix passed in; nil if undefined
|
||||||
|
TmpPrefix bool // request a pseudo-random, temporary prefix to be used
|
||||||
|
AllowTmpPrefix bool // allow creation of a new temporary prefix if main prefix is unavailable
|
||||||
|
|
||||||
|
GAPI gapi.GAPI // graph API interface struct
|
||||||
|
Remotes []string // list of remote graph definitions to run
|
||||||
|
|
||||||
|
NoWatch bool // do not update graph on watched graph definition file changes
|
||||||
|
Noop bool // globally force all resources into no-op mode
|
||||||
|
Graphviz string // output file for graphviz data
|
||||||
|
GraphvizFilter string // graphviz filter to use
|
||||||
|
ConvergedTimeout int // exit after approximately this many seconds in a converged state; -1 to disable
|
||||||
|
MaxRuntime uint // exit after a maximum of approximately this many seconds
|
||||||
|
|
||||||
|
Seeds []string // default etc client endpoint
|
||||||
|
ClientURLs []string // list of URLs to listen on for client traffic
|
||||||
|
ServerURLs []string // list of URLs to listen on for server (peer) traffic
|
||||||
|
IdealClusterSize int // ideal number of server peers in cluster; only read by initial server
|
||||||
|
NoServer bool // do not let other servers peer with me
|
||||||
|
|
||||||
|
CConns uint16 // number of maximum concurrent remote ssh connections to run, 0 for unlimited
|
||||||
|
AllowInteractive bool // allow interactive prompting, such as for remote passwords
|
||||||
|
SSHPrivIDRsa string // default path to ssh key file, set empty to never touch
|
||||||
|
NoCaching bool // don't allow remote caching of remote execution binary
|
||||||
|
Depth uint16 // depth in remote hierarchy; for internal use only
|
||||||
|
|
||||||
|
DEBUG bool
|
||||||
|
VERBOSE bool
|
||||||
|
|
||||||
|
seeds etcdtypes.URLs // processed seeds value
|
||||||
|
clientURLs etcdtypes.URLs // processed client urls value
|
||||||
|
serverURLs etcdtypes.URLs // processed server urls value
|
||||||
|
idealClusterSize uint16 // processed ideal cluster size value
|
||||||
|
|
||||||
|
exit chan error // exit signal
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init initializes the main struct after it performs some validation.
|
||||||
|
func (obj *Main) Init() error {
|
||||||
|
|
||||||
|
if obj.Program == "" || obj.Version == "" {
|
||||||
|
return fmt.Errorf("You must set the Program and Version strings!")
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.Prefix != nil && obj.TmpPrefix {
|
||||||
|
return fmt.Errorf("Choosing a prefix and the request for a tmp prefix is illogical!")
|
||||||
|
}
|
||||||
|
|
||||||
|
obj.idealClusterSize = uint16(obj.IdealClusterSize)
|
||||||
|
if obj.IdealClusterSize < 0 { // value is undefined, set to the default
|
||||||
|
obj.idealClusterSize = etcd.DefaultIdealClusterSize
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.idealClusterSize < 1 {
|
||||||
|
return fmt.Errorf("IdealClusterSize should be at least one!")
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.NoServer && len(obj.Remotes) > 0 {
|
||||||
|
// TODO: in this case, we won't be able to tunnel stuff back to
|
||||||
|
// here, so if we're okay with every remote graph running in an
|
||||||
|
// isolated mode, then this is okay. Improve on this if there's
|
||||||
|
// someone who really wants to be able to do this.
|
||||||
|
return fmt.Errorf("The Server is required when using Remotes!")
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.CConns < 0 {
|
||||||
|
return fmt.Errorf("The CConns value should be at least zero!")
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.ConvergedTimeout >= 0 && obj.CConns > 0 && len(obj.Remotes) > int(obj.CConns) {
|
||||||
|
return fmt.Errorf("You can't converge if you have more remotes than available connections!")
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.Depth < 0 { // user should not be using this argument manually
|
||||||
|
return fmt.Errorf("Negative values for Depth are not permitted!")
|
||||||
|
}
|
||||||
|
|
||||||
|
// transform the url list inputs into etcd typed lists
|
||||||
|
var err error
|
||||||
|
obj.seeds, err = etcdtypes.NewURLs(
|
||||||
|
util.FlattenListWithSplit(obj.Seeds, []string{",", ";", " "}),
|
||||||
|
)
|
||||||
|
if err != nil && len(obj.Seeds) > 0 {
|
||||||
|
return fmt.Errorf("Seeds didn't parse correctly!")
|
||||||
|
}
|
||||||
|
obj.clientURLs, err = etcdtypes.NewURLs(
|
||||||
|
util.FlattenListWithSplit(obj.ClientURLs, []string{",", ";", " "}),
|
||||||
|
)
|
||||||
|
if err != nil && len(obj.ClientURLs) > 0 {
|
||||||
|
return fmt.Errorf("ClientURLs didn't parse correctly!")
|
||||||
|
}
|
||||||
|
obj.serverURLs, err = etcdtypes.NewURLs(
|
||||||
|
util.FlattenListWithSplit(obj.ServerURLs, []string{",", ";", " "}),
|
||||||
|
)
|
||||||
|
if err != nil && len(obj.ServerURLs) > 0 {
|
||||||
|
return fmt.Errorf("ServerURLs didn't parse correctly!")
|
||||||
|
}
|
||||||
|
|
||||||
|
obj.exit = make(chan error)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exit causes a safe shutdown. This is often attached to the ^C signal handler.
|
||||||
|
func (obj *Main) Exit(err error) {
|
||||||
|
obj.exit <- err // trigger an exit!
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run is the main execution entrypoint to run mgmt.
|
||||||
|
func (obj *Main) Run() error {
|
||||||
|
|
||||||
|
var start = time.Now().UnixNano()
|
||||||
|
|
||||||
|
var flags int
|
||||||
|
if obj.DEBUG || true { // TODO: remove || true
|
||||||
|
flags = log.LstdFlags | log.Lshortfile
|
||||||
|
}
|
||||||
|
flags = (flags - log.Ldate) // remove the date for now
|
||||||
|
log.SetFlags(flags)
|
||||||
|
|
||||||
|
// un-hijack from capnslog...
|
||||||
|
log.SetOutput(os.Stderr)
|
||||||
|
if obj.VERBOSE {
|
||||||
|
capnslog.SetFormatter(capnslog.NewLogFormatter(os.Stderr, "(etcd) ", flags))
|
||||||
|
} else {
|
||||||
|
capnslog.SetFormatter(capnslog.NewNilFormatter())
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("This is: %s, version: %s", obj.Program, obj.Version)
|
||||||
|
log.Printf("Main: Start: %v", start)
|
||||||
|
|
||||||
|
hostname, err := os.Hostname() // a sensible default
|
||||||
|
// allow passing in the hostname, instead of using the system setting
|
||||||
|
if h := obj.Hostname; h != nil && *h != "" { // override by cli
|
||||||
|
hostname = *h
|
||||||
|
} else if err != nil {
|
||||||
|
return errwrap.Wrapf(err, "Can't get default hostname!")
|
||||||
|
}
|
||||||
|
if hostname == "" { // safety check
|
||||||
|
return fmt.Errorf("Hostname cannot be empty!")
|
||||||
|
}
|
||||||
|
|
||||||
|
var prefix = fmt.Sprintf("/var/lib/%s/", obj.Program) // default prefix
|
||||||
|
if p := obj.Prefix; p != nil {
|
||||||
|
prefix = *p
|
||||||
|
}
|
||||||
|
// make sure the working directory prefix exists
|
||||||
|
if obj.TmpPrefix || os.MkdirAll(prefix, 0770) != nil {
|
||||||
|
if obj.TmpPrefix || obj.AllowTmpPrefix {
|
||||||
|
var err error
|
||||||
|
if prefix, err = ioutil.TempDir("", obj.Program+"-"+hostname+"-"); err != nil {
|
||||||
|
return fmt.Errorf("Main: Error: Can't create temporary prefix!")
|
||||||
|
}
|
||||||
|
log.Println("Main: Warning: Working prefix directory is temporary!")
|
||||||
|
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("Main: Error: Can't create prefix!")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Printf("Main: Working prefix is: %s", prefix)
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
var G, oldGraph *pgraph.Graph
|
||||||
|
|
||||||
|
// exit after `max-runtime` seconds for no reason at all...
|
||||||
|
if i := obj.MaxRuntime; i > 0 {
|
||||||
|
go func() {
|
||||||
|
time.Sleep(time.Duration(i) * time.Second)
|
||||||
|
obj.Exit(nil)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// setup converger
|
||||||
|
converger := converger.NewConverger(
|
||||||
|
obj.ConvergedTimeout,
|
||||||
|
nil, // stateFn gets added in by EmbdEtcd
|
||||||
|
)
|
||||||
|
go converger.Loop(true) // main loop for converger, true to start paused
|
||||||
|
|
||||||
|
// embedded etcd
|
||||||
|
if len(obj.seeds) == 0 {
|
||||||
|
log.Printf("Main: Seeds: No seeds specified!")
|
||||||
|
} else {
|
||||||
|
log.Printf("Main: Seeds(%d): %v", len(obj.seeds), obj.seeds)
|
||||||
|
}
|
||||||
|
EmbdEtcd := etcd.NewEmbdEtcd(
|
||||||
|
hostname,
|
||||||
|
obj.seeds,
|
||||||
|
obj.clientURLs,
|
||||||
|
obj.serverURLs,
|
||||||
|
obj.NoServer,
|
||||||
|
obj.idealClusterSize,
|
||||||
|
prefix,
|
||||||
|
converger,
|
||||||
|
)
|
||||||
|
if EmbdEtcd == nil {
|
||||||
|
// TODO: verify EmbdEtcd is not nil below...
|
||||||
|
obj.Exit(fmt.Errorf("Main: Etcd: Creation failed!"))
|
||||||
|
} else if err := EmbdEtcd.Startup(); err != nil { // startup (returns when etcd main loop is running)
|
||||||
|
obj.Exit(fmt.Errorf("Main: Etcd: Startup failed: %v", err))
|
||||||
|
}
|
||||||
|
convergerStateFn := func(b bool) error {
|
||||||
|
// exit if we are using the converged timeout and we are the
|
||||||
|
// root node. otherwise, if we are a child node in a remote
|
||||||
|
// execution hierarchy, we should only notify our converged
|
||||||
|
// state and wait for the parent to trigger the exit.
|
||||||
|
if t := obj.ConvergedTimeout; obj.Depth == 0 && t >= 0 {
|
||||||
|
if b {
|
||||||
|
log.Printf("Converged for %d seconds, exiting!", t)
|
||||||
|
obj.Exit(nil) // trigger an exit!
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// send our individual state into etcd for others to see
|
||||||
|
return etcd.EtcdSetHostnameConverged(EmbdEtcd, hostname, b) // TODO: what should happen on error?
|
||||||
|
}
|
||||||
|
if EmbdEtcd != nil {
|
||||||
|
converger.SetStateFn(convergerStateFn)
|
||||||
|
}
|
||||||
|
|
||||||
|
var gapiChan chan error // stream events are nil errors
|
||||||
|
if obj.GAPI != nil {
|
||||||
|
data := gapi.Data{
|
||||||
|
Hostname: hostname,
|
||||||
|
EmbdEtcd: EmbdEtcd,
|
||||||
|
Noop: obj.Noop,
|
||||||
|
NoWatch: obj.NoWatch,
|
||||||
|
}
|
||||||
|
if err := obj.GAPI.Init(data); err != nil {
|
||||||
|
obj.Exit(fmt.Errorf("Main: GAPI: Init failed: %v", err))
|
||||||
|
} else if !obj.NoWatch {
|
||||||
|
gapiChan = obj.GAPI.SwitchStream() // stream of graph switch events!
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
exitchan := make(chan struct{}) // exit on close
|
||||||
|
go func() {
|
||||||
|
startchan := make(chan struct{}) // start signal
|
||||||
|
go func() { startchan <- struct{}{} }()
|
||||||
|
|
||||||
|
log.Println("Etcd: Starting...")
|
||||||
|
etcdchan := etcd.EtcdWatch(EmbdEtcd)
|
||||||
|
first := true // first loop or not
|
||||||
|
for {
|
||||||
|
log.Println("Main: Waiting...")
|
||||||
|
select {
|
||||||
|
case <-startchan: // kick the loop once at start
|
||||||
|
// pass
|
||||||
|
|
||||||
|
case b := <-etcdchan:
|
||||||
|
if !b { // ignore the message
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// everything else passes through to cause a compile!
|
||||||
|
|
||||||
|
case err, ok := <-gapiChan:
|
||||||
|
if !ok { // channel closed
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
obj.Exit(err) // trigger exit
|
||||||
|
continue
|
||||||
|
//return // TODO: return or wait for exitchan?
|
||||||
|
}
|
||||||
|
if obj.NoWatch { // extra safety for bad GAPI's
|
||||||
|
log.Printf("Main: GAPI stream should be quiet with NoWatch!") // fix the GAPI!
|
||||||
|
continue // no stream events should be sent
|
||||||
|
}
|
||||||
|
|
||||||
|
case <-exitchan:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.GAPI == nil {
|
||||||
|
log.Printf("Config: GAPI is empty!")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// we need the vertices to be paused to work on them, so
|
||||||
|
// run graph vertex LOCK...
|
||||||
|
if !first { // TODO: we can flatten this check out I think
|
||||||
|
converger.Pause() // FIXME: add sync wait?
|
||||||
|
G.Pause() // sync
|
||||||
|
|
||||||
|
//G.UnGroup() // FIXME: implement me if needed!
|
||||||
|
}
|
||||||
|
|
||||||
|
// make the graph from yaml, lib, puppet->yaml, or dsl!
|
||||||
|
newGraph, err := obj.GAPI.Graph() // generate graph!
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Config: Error creating new graph: %v", err)
|
||||||
|
// unpause!
|
||||||
|
if !first {
|
||||||
|
G.Start(&wg, first) // sync
|
||||||
|
converger.Start() // after G.Start()
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// apply the global noop parameter if requested
|
||||||
|
if obj.Noop {
|
||||||
|
for _, m := range newGraph.GraphMetas() {
|
||||||
|
m.Noop = obj.Noop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME: make sure we "UnGroup()" any semi-destructive
|
||||||
|
// changes to the resources so our efficient GraphSync
|
||||||
|
// will be able to re-use and cmp to the old graph.
|
||||||
|
newFullGraph, err := newGraph.GraphSync(oldGraph)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Config: Error running graph sync: %v", err)
|
||||||
|
// unpause!
|
||||||
|
if !first {
|
||||||
|
G.Start(&wg, first) // sync
|
||||||
|
converger.Start() // after G.Start()
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
oldGraph = newFullGraph // save old graph
|
||||||
|
G = oldGraph.Copy() // copy to active graph
|
||||||
|
|
||||||
|
G.AutoEdges() // add autoedges; modifies the graph
|
||||||
|
G.AutoGroup() // run autogroup; modifies the graph
|
||||||
|
// TODO: do we want to do a transitive reduction?
|
||||||
|
|
||||||
|
log.Printf("Graph: %v", G) // show graph
|
||||||
|
if obj.GraphvizFilter != "" {
|
||||||
|
if err := G.ExecGraphviz(obj.GraphvizFilter, obj.Graphviz); err != nil {
|
||||||
|
log.Printf("Graphviz: %v", err)
|
||||||
|
} else {
|
||||||
|
log.Printf("Graphviz: Successfully generated graph!")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
G.AssociateData(converger)
|
||||||
|
// G.Start(...) needs to be synchronous or wait,
|
||||||
|
// because if half of the nodes are started and
|
||||||
|
// some are not ready yet and the EtcdWatch
|
||||||
|
// loops, we'll cause G.Pause(...) before we
|
||||||
|
// even got going, thus causing nil pointer errors
|
||||||
|
G.Start(&wg, first) // sync
|
||||||
|
converger.Start() // after G.Start()
|
||||||
|
first = false
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
configWatcher := recwatch.NewConfigWatcher()
|
||||||
|
events := configWatcher.Events()
|
||||||
|
if !obj.NoWatch {
|
||||||
|
configWatcher.Add(obj.Remotes...) // add all the files...
|
||||||
|
} else {
|
||||||
|
events = nil // signal that no-watch is true
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
select {
|
||||||
|
case err := <-configWatcher.Error():
|
||||||
|
obj.Exit(err) // trigger an exit!
|
||||||
|
|
||||||
|
case <-exitchan:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// initialize the add watcher, which calls the f callback on map changes
|
||||||
|
convergerCb := func(f func(map[string]bool) error) (func(), error) {
|
||||||
|
return etcd.EtcdAddHostnameConvergedWatcher(EmbdEtcd, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// build remotes struct for remote ssh
|
||||||
|
remotes := remote.NewRemotes(
|
||||||
|
EmbdEtcd.LocalhostClientURLs().StringSlice(),
|
||||||
|
[]string{etcd.DefaultClientURL},
|
||||||
|
obj.Noop,
|
||||||
|
obj.Remotes, // list of files
|
||||||
|
events, // watch for file changes
|
||||||
|
obj.CConns,
|
||||||
|
obj.AllowInteractive,
|
||||||
|
obj.SSHPrivIDRsa,
|
||||||
|
!obj.NoCaching,
|
||||||
|
obj.Depth,
|
||||||
|
prefix,
|
||||||
|
converger,
|
||||||
|
convergerCb,
|
||||||
|
obj.Program,
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: is there any benefit to running the remotes above in the loop?
|
||||||
|
// wait for etcd to be running before we remote in, which we do above!
|
||||||
|
go remotes.Run()
|
||||||
|
|
||||||
|
if obj.GAPI == nil {
|
||||||
|
converger.Start() // better start this for empty graphs
|
||||||
|
}
|
||||||
|
log.Println("Main: Running...")
|
||||||
|
|
||||||
|
reterr := <-obj.exit // wait for exit signal
|
||||||
|
|
||||||
|
log.Println("Destroy...")
|
||||||
|
|
||||||
|
if obj.GAPI != nil {
|
||||||
|
if err := obj.GAPI.Close(); err != nil {
|
||||||
|
err = errwrap.Wrapf(err, "GAPI closed poorly!")
|
||||||
|
reterr = multierr.Append(reterr, err) // list of errors
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
configWatcher.Close() // stop sending file changes to remotes
|
||||||
|
if err := remotes.Exit(); err != nil { // tell all the remote connections to shutdown; waits!
|
||||||
|
err = errwrap.Wrapf(err, "Remote exited poorly!")
|
||||||
|
reterr = multierr.Append(reterr, err) // list of errors
|
||||||
|
}
|
||||||
|
|
||||||
|
G.Exit() // tell all the children to exit
|
||||||
|
|
||||||
|
// tell inner main loop to exit
|
||||||
|
close(exitchan)
|
||||||
|
|
||||||
|
// cleanup etcd main loop last so it can process everything first
|
||||||
|
if err := EmbdEtcd.Destroy(); err != nil { // shutdown and cleanup etcd
|
||||||
|
err = errwrap.Wrapf(err, "Etcd exited poorly!")
|
||||||
|
reterr = multierr.Append(reterr, err) // list of errors
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.DEBUG {
|
||||||
|
log.Printf("Graph: %v", G)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait() // wait for primary go routines to exit
|
||||||
|
|
||||||
|
// TODO: wait for each vertex to exit...
|
||||||
|
log.Println("Goodbye!")
|
||||||
|
return reterr
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
#!/bin/bash
|
#!/usr/bin/env bash
|
||||||
# setup a simple go environment
|
# setup a simple go environment
|
||||||
XPWD=`pwd`
|
XPWD=`pwd`
|
||||||
ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )" # dir!
|
ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )" # dir!
|
||||||
@@ -11,13 +11,23 @@ fi
|
|||||||
|
|
||||||
sudo_command=$(which sudo)
|
sudo_command=$(which sudo)
|
||||||
|
|
||||||
if [ $travis -eq 0 ]; then
|
YUM=`which yum 2>/dev/null`
|
||||||
YUM=`which yum 2>/dev/null`
|
APT=`which apt-get 2>/dev/null`
|
||||||
APT=`which apt-get 2>/dev/null`
|
if [ -z "$YUM" -a -z "$APT" ]; then
|
||||||
if [ -z "$YUM" -a -z "$APT" ]; then
|
|
||||||
echo "The package managers can't be found."
|
echo "The package managers can't be found."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ ! -z "$YUM" ]; then
|
||||||
|
$sudo_command $YUM install -y libvirt-devel
|
||||||
|
|
||||||
|
fi
|
||||||
|
if [ ! -z "$APT" ]; then
|
||||||
|
$sudo_command $APT install -y libvirt-dev || true
|
||||||
|
$sudo_command $APT install -y libpcap0.8-dev || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $travis -eq 0 ]; then
|
||||||
if [ ! -z "$YUM" ]; then
|
if [ ! -z "$YUM" ]; then
|
||||||
# some go dependencies are stored in mercurial
|
# some go dependencies are stored in mercurial
|
||||||
$sudo_command $YUM install -y golang golang-googlecode-tools-stringer hg
|
$sudo_command $YUM install -y golang golang-googlecode-tools-stringer hg
|
||||||
@@ -38,7 +48,7 @@ if go version | grep 'go1\.[0123]\.'; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
go get ./... # get all the go dependencies
|
go get -d ./... # get all the go dependencies
|
||||||
[ -e "$GOBIN/mgmt" ] && rm -f "$GOBIN/mgmt" # the `go get` version has no -X
|
[ -e "$GOBIN/mgmt" ] && rm -f "$GOBIN/mgmt" # the `go get` version has no -X
|
||||||
# vet is built-in in go 1.6 - we check for go vet command
|
# vet is built-in in go 1.6 - we check for go vet command
|
||||||
go vet 1> /dev/null 2>&1
|
go vet 1> /dev/null 2>&1
|
||||||
|
|||||||
104
pgraph/autoedge.go
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// Package pgraph represents the internal "pointer graph" that we use.
|
||||||
|
package pgraph
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/global"
|
||||||
|
"github.com/purpleidea/mgmt/resources"
|
||||||
|
)
|
||||||
|
|
||||||
|
// add edges to the vertex in a graph based on if it matches a uid list
|
||||||
|
func (g *Graph) addEdgesByMatchingUIDS(v *Vertex, uids []resources.ResUID) []bool {
|
||||||
|
// search for edges and see what matches!
|
||||||
|
var result []bool
|
||||||
|
|
||||||
|
// loop through each uid, and see if it matches any vertex
|
||||||
|
for _, uid := range uids {
|
||||||
|
var found = false
|
||||||
|
// uid is a ResUID object
|
||||||
|
for _, vv := range g.GetVertices() { // search
|
||||||
|
if v == vv { // skip self
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if global.DEBUG {
|
||||||
|
log.Printf("Compile: AutoEdge: Match: %v[%v] with UID: %v[%v]", vv.Kind(), vv.GetName(), uid.Kind(), uid.GetName())
|
||||||
|
}
|
||||||
|
// we must match to an effective UID for the resource,
|
||||||
|
// that is to say, the name value of a res is a helpful
|
||||||
|
// handle, but it is not necessarily a unique identity!
|
||||||
|
// remember, resources can return multiple UID's each!
|
||||||
|
if resources.UIDExistsInUIDs(uid, vv.GetUIDs()) {
|
||||||
|
// add edge from: vv -> v
|
||||||
|
if uid.Reversed() {
|
||||||
|
txt := fmt.Sprintf("AutoEdge: %v[%v] -> %v[%v]", vv.Kind(), vv.GetName(), v.Kind(), v.GetName())
|
||||||
|
log.Printf("Compile: Adding %v", txt)
|
||||||
|
g.AddEdge(vv, v, NewEdge(txt))
|
||||||
|
} else { // edges go the "normal" way, eg: pkg resource
|
||||||
|
txt := fmt.Sprintf("AutoEdge: %v[%v] -> %v[%v]", v.Kind(), v.GetName(), vv.Kind(), vv.GetName())
|
||||||
|
log.Printf("Compile: Adding %v", txt)
|
||||||
|
g.AddEdge(v, vv, NewEdge(txt))
|
||||||
|
}
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
result = append(result, found)
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutoEdges adds the automatic edges to the graph.
|
||||||
|
func (g *Graph) AutoEdges() {
|
||||||
|
log.Println("Compile: Adding AutoEdges...")
|
||||||
|
for _, v := range g.GetVertices() { // for each vertexes autoedges
|
||||||
|
if !v.Meta().AutoEdge { // is the metaparam true?
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
autoEdgeObj := v.AutoEdges()
|
||||||
|
if autoEdgeObj == nil {
|
||||||
|
log.Printf("%v[%v]: Config: No auto edges were found!", v.Kind(), v.GetName())
|
||||||
|
continue // next vertex
|
||||||
|
}
|
||||||
|
|
||||||
|
for { // while the autoEdgeObj has more uids to add...
|
||||||
|
uids := autoEdgeObj.Next() // get some!
|
||||||
|
if uids == nil {
|
||||||
|
log.Printf("%v[%v]: Config: The auto edge list is empty!", v.Kind(), v.GetName())
|
||||||
|
break // inner loop
|
||||||
|
}
|
||||||
|
if global.DEBUG {
|
||||||
|
log.Println("Compile: AutoEdge: UIDS:")
|
||||||
|
for i, u := range uids {
|
||||||
|
log.Printf("Compile: AutoEdge: UID%d: %v", i, u)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// match and add edges
|
||||||
|
result := g.addEdgesByMatchingUIDS(v, uids)
|
||||||
|
|
||||||
|
// report back, and find out if we should continue
|
||||||
|
if !autoEdgeObj.Test(result) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
350
pgraph/autogroup.go
Normal file
@@ -0,0 +1,350 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package pgraph
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/global"
|
||||||
|
|
||||||
|
errwrap "github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AutoGrouper is the required interface to implement for an autogroup algorithm
|
||||||
|
type AutoGrouper interface {
|
||||||
|
// listed in the order these are typically called in...
|
||||||
|
name() string // friendly identifier
|
||||||
|
init(*Graph) error // only call once
|
||||||
|
vertexNext() (*Vertex, *Vertex, error) // mostly algorithmic
|
||||||
|
vertexCmp(*Vertex, *Vertex) error // can we merge these ?
|
||||||
|
vertexMerge(*Vertex, *Vertex) (*Vertex, error) // vertex merge fn to use
|
||||||
|
edgeMerge(*Edge, *Edge) *Edge // edge merge fn to use
|
||||||
|
vertexTest(bool) (bool, error) // call until false
|
||||||
|
}
|
||||||
|
|
||||||
|
// baseGrouper is the base type for implementing the AutoGrouper interface
|
||||||
|
type baseGrouper struct {
|
||||||
|
graph *Graph // store a pointer to the graph
|
||||||
|
vertices []*Vertex // cached list of vertices
|
||||||
|
i int
|
||||||
|
j int
|
||||||
|
done bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// name provides a friendly name for the logs to see
|
||||||
|
func (ag *baseGrouper) name() string {
|
||||||
|
return "baseGrouper"
|
||||||
|
}
|
||||||
|
|
||||||
|
// init is called only once and before using other AutoGrouper interface methods
|
||||||
|
// the name method is the only exception: call it any time without side effects!
|
||||||
|
func (ag *baseGrouper) init(g *Graph) error {
|
||||||
|
if ag.graph != nil {
|
||||||
|
return fmt.Errorf("The init method has already been called!")
|
||||||
|
}
|
||||||
|
ag.graph = g // pointer
|
||||||
|
ag.vertices = ag.graph.GetVerticesSorted() // cache in deterministic order!
|
||||||
|
ag.i = 0
|
||||||
|
ag.j = 0
|
||||||
|
if len(ag.vertices) == 0 { // empty graph
|
||||||
|
ag.done = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// vertexNext is a simple iterator that loops through vertex (pair) combinations
|
||||||
|
// an intelligent algorithm would selectively offer only valid pairs of vertices
|
||||||
|
// these should satisfy logical grouping requirements for the autogroup designs!
|
||||||
|
// the desired algorithms can override, but keep this method as a base iterator!
|
||||||
|
func (ag *baseGrouper) vertexNext() (v1, v2 *Vertex, err error) {
|
||||||
|
// this does a for v... { for w... { return v, w }} but stepwise!
|
||||||
|
l := len(ag.vertices)
|
||||||
|
if ag.i < l {
|
||||||
|
v1 = ag.vertices[ag.i]
|
||||||
|
}
|
||||||
|
if ag.j < l {
|
||||||
|
v2 = ag.vertices[ag.j]
|
||||||
|
}
|
||||||
|
|
||||||
|
// in case the vertex was deleted
|
||||||
|
if !ag.graph.HasVertex(v1) {
|
||||||
|
v1 = nil
|
||||||
|
}
|
||||||
|
if !ag.graph.HasVertex(v2) {
|
||||||
|
v2 = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// two nested loops...
|
||||||
|
if ag.j < l {
|
||||||
|
ag.j++
|
||||||
|
}
|
||||||
|
if ag.j == l {
|
||||||
|
ag.j = 0
|
||||||
|
if ag.i < l {
|
||||||
|
ag.i++
|
||||||
|
}
|
||||||
|
if ag.i == l {
|
||||||
|
ag.done = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ag *baseGrouper) vertexCmp(v1, v2 *Vertex) error {
|
||||||
|
if v1 == nil || v2 == nil {
|
||||||
|
return fmt.Errorf("Vertex is nil!")
|
||||||
|
}
|
||||||
|
if v1 == v2 { // skip yourself
|
||||||
|
return fmt.Errorf("Vertices are the same!")
|
||||||
|
}
|
||||||
|
if v1.Kind() != v2.Kind() { // we must group similar kinds
|
||||||
|
// TODO: maybe future resources won't need this limitation?
|
||||||
|
return fmt.Errorf("The two resources aren't the same kind!")
|
||||||
|
}
|
||||||
|
// someone doesn't want to group!
|
||||||
|
if !v1.Meta().AutoGroup || !v2.Meta().AutoGroup {
|
||||||
|
return fmt.Errorf("One of the autogroup flags is false!")
|
||||||
|
}
|
||||||
|
if v1.Res.IsGrouped() { // already grouped!
|
||||||
|
return fmt.Errorf("Already grouped!")
|
||||||
|
}
|
||||||
|
if len(v2.Res.GetGroup()) > 0 { // already has children grouped!
|
||||||
|
return fmt.Errorf("Already has groups!")
|
||||||
|
}
|
||||||
|
if !v1.Res.GroupCmp(v2.Res) { // resource groupcmp failed!
|
||||||
|
return fmt.Errorf("The GroupCmp failed!")
|
||||||
|
}
|
||||||
|
return nil // success
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ag *baseGrouper) vertexMerge(v1, v2 *Vertex) (v *Vertex, err error) {
|
||||||
|
// NOTE: it's important to use w.Res instead of w, b/c
|
||||||
|
// the w by itself is the *Vertex obj, not the *Res obj
|
||||||
|
// which is contained within it! They both satisfy the
|
||||||
|
// Res interface, which is why both will compile! :(
|
||||||
|
err = v1.Res.GroupRes(v2.Res) // GroupRes skips stupid groupings
|
||||||
|
return // success or fail, and no need to merge the actual vertices!
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ag *baseGrouper) edgeMerge(e1, e2 *Edge) *Edge {
|
||||||
|
return e1 // noop
|
||||||
|
}
|
||||||
|
|
||||||
|
// vertexTest processes the results of the grouping for the algorithm to know
|
||||||
|
// return an error if something went horribly wrong, and bool false to stop
|
||||||
|
func (ag *baseGrouper) vertexTest(b bool) (bool, error) {
|
||||||
|
// NOTE: this particular baseGrouper version doesn't track what happens
|
||||||
|
// because since we iterate over every pair, we don't care which merge!
|
||||||
|
if ag.done {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: this algorithm may not be correct in all cases. replace if needed!
|
||||||
|
type nonReachabilityGrouper struct {
|
||||||
|
baseGrouper // "inherit" what we want, and reimplement the rest
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ag *nonReachabilityGrouper) name() string {
|
||||||
|
return "nonReachabilityGrouper"
|
||||||
|
}
|
||||||
|
|
||||||
|
// this algorithm relies on the observation that if there's a path from a to b,
|
||||||
|
// then they *can't* be merged (b/c of the existing dependency) so therefore we
|
||||||
|
// merge anything that *doesn't* satisfy this condition or that of the reverse!
|
||||||
|
func (ag *nonReachabilityGrouper) vertexNext() (v1, v2 *Vertex, err error) {
|
||||||
|
for {
|
||||||
|
v1, v2, err = ag.baseGrouper.vertexNext() // get all iterable pairs
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Error running autoGroup(vertexNext): %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v1 != v2 { // ignore self cmp early (perf optimization)
|
||||||
|
// if NOT reachable, they're viable...
|
||||||
|
out1 := ag.graph.Reachability(v1, v2)
|
||||||
|
out2 := ag.graph.Reachability(v2, v1)
|
||||||
|
if len(out1) == 0 && len(out2) == 0 {
|
||||||
|
return // return v1 and v2, they're viable
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if we got here, it means we're skipping over this candidate!
|
||||||
|
if ok, err := ag.baseGrouper.vertexTest(false); err != nil {
|
||||||
|
log.Fatalf("Error running autoGroup(vertexTest): %v", err)
|
||||||
|
} else if !ok {
|
||||||
|
return nil, nil, nil // done!
|
||||||
|
}
|
||||||
|
|
||||||
|
// the vertexTest passed, so loop and try with a new pair...
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// VertexMerge merges v2 into v1 by reattaching the edges where appropriate,
|
||||||
|
// and then by deleting v2 from the graph. Since more than one edge between two
|
||||||
|
// vertices is not allowed, duplicate edges are merged as well. an edge merge
|
||||||
|
// function can be provided if you'd like to control how you merge the edges!
|
||||||
|
func (g *Graph) VertexMerge(v1, v2 *Vertex, vertexMergeFn func(*Vertex, *Vertex) (*Vertex, error), edgeMergeFn func(*Edge, *Edge) *Edge) error {
|
||||||
|
// methodology
|
||||||
|
// 1) edges between v1 and v2 are removed
|
||||||
|
//Loop:
|
||||||
|
for k1 := range g.Adjacency {
|
||||||
|
for k2 := range g.Adjacency[k1] {
|
||||||
|
// v1 -> v2 || v2 -> v1
|
||||||
|
if (k1 == v1 && k2 == v2) || (k1 == v2 && k2 == v1) {
|
||||||
|
delete(g.Adjacency[k1], k2) // delete map & edge
|
||||||
|
// NOTE: if we assume this is a DAG, then we can
|
||||||
|
// assume only v1 -> v2 OR v2 -> v1 exists, and
|
||||||
|
// we can break out of these loops immediately!
|
||||||
|
//break Loop
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2) edges that point towards v2 from X now point to v1 from X (no dupes)
|
||||||
|
for _, x := range g.IncomingGraphEdges(v2) { // all to vertex v (??? -> v)
|
||||||
|
e := g.Adjacency[x][v2] // previous edge
|
||||||
|
r := g.Reachability(x, v1)
|
||||||
|
// merge e with ex := g.Adjacency[x][v1] if it exists!
|
||||||
|
if ex, exists := g.Adjacency[x][v1]; exists && edgeMergeFn != nil && len(r) == 0 {
|
||||||
|
e = edgeMergeFn(e, ex)
|
||||||
|
}
|
||||||
|
if len(r) == 0 { // if not reachable, add it
|
||||||
|
g.AddEdge(x, v1, e) // overwrite edge
|
||||||
|
} else if edgeMergeFn != nil { // reachable, merge e through...
|
||||||
|
prev := x // initial condition
|
||||||
|
for i, next := range r {
|
||||||
|
if i == 0 {
|
||||||
|
// next == prev, therefore skip
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// this edge is from: prev, to: next
|
||||||
|
ex, _ := g.Adjacency[prev][next] // get
|
||||||
|
ex = edgeMergeFn(ex, e)
|
||||||
|
g.Adjacency[prev][next] = ex // set
|
||||||
|
prev = next
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delete(g.Adjacency[x], v2) // delete old edge
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3) edges that point from v2 to X now point from v1 to X (no dupes)
|
||||||
|
for _, x := range g.OutgoingGraphEdges(v2) { // all from vertex v (v -> ???)
|
||||||
|
e := g.Adjacency[v2][x] // previous edge
|
||||||
|
r := g.Reachability(v1, x)
|
||||||
|
// merge e with ex := g.Adjacency[v1][x] if it exists!
|
||||||
|
if ex, exists := g.Adjacency[v1][x]; exists && edgeMergeFn != nil && len(r) == 0 {
|
||||||
|
e = edgeMergeFn(e, ex)
|
||||||
|
}
|
||||||
|
if len(r) == 0 {
|
||||||
|
g.AddEdge(v1, x, e) // overwrite edge
|
||||||
|
} else if edgeMergeFn != nil { // reachable, merge e through...
|
||||||
|
prev := v1 // initial condition
|
||||||
|
for i, next := range r {
|
||||||
|
if i == 0 {
|
||||||
|
// next == prev, therefore skip
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// this edge is from: prev, to: next
|
||||||
|
ex, _ := g.Adjacency[prev][next]
|
||||||
|
ex = edgeMergeFn(ex, e)
|
||||||
|
g.Adjacency[prev][next] = ex
|
||||||
|
prev = next
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delete(g.Adjacency[v2], x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4) merge and then remove the (now merged/grouped) vertex
|
||||||
|
if vertexMergeFn != nil { // run vertex merge function
|
||||||
|
if v, err := vertexMergeFn(v1, v2); err != nil {
|
||||||
|
return err
|
||||||
|
} else if v != nil { // replace v1 with the "merged" version...
|
||||||
|
v1 = v // XXX: will this replace v1 the way we want?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
g.DeleteVertex(v2) // remove grouped vertex
|
||||||
|
|
||||||
|
// 5) creation of a cyclic graph should throw an error
|
||||||
|
if _, err := g.TopologicalSort(); err != nil { // am i a dag or not?
|
||||||
|
return errwrap.Wrapf(err, "TopologicalSort failed") // not a dag
|
||||||
|
}
|
||||||
|
return nil // success
|
||||||
|
}
|
||||||
|
|
||||||
|
// autoGroup is the mechanical auto group "runner" that runs the interface spec
|
||||||
|
func (g *Graph) autoGroup(ag AutoGrouper) chan string {
|
||||||
|
strch := make(chan string) // output log messages here
|
||||||
|
go func(strch chan string) {
|
||||||
|
strch <- fmt.Sprintf("Compile: Grouping: Algorithm: %v...", ag.name())
|
||||||
|
if err := ag.init(g); err != nil {
|
||||||
|
log.Fatalf("Error running autoGroup(init): %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
var v, w *Vertex
|
||||||
|
v, w, err := ag.vertexNext() // get pair to compare
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Error running autoGroup(vertexNext): %v", err)
|
||||||
|
}
|
||||||
|
merged := false
|
||||||
|
// save names since they change during the runs
|
||||||
|
vStr := fmt.Sprintf("%s", v) // valid even if it is nil
|
||||||
|
wStr := fmt.Sprintf("%s", w)
|
||||||
|
|
||||||
|
if err := ag.vertexCmp(v, w); err != nil { // cmp ?
|
||||||
|
if global.DEBUG {
|
||||||
|
strch <- fmt.Sprintf("Compile: Grouping: !GroupCmp for: %s into %s", wStr, vStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove grouped vertex and merge edges (res is safe)
|
||||||
|
} else if err := g.VertexMerge(v, w, ag.vertexMerge, ag.edgeMerge); err != nil { // merge...
|
||||||
|
strch <- fmt.Sprintf("Compile: Grouping: !VertexMerge for: %s into %s", wStr, vStr)
|
||||||
|
|
||||||
|
} else { // success!
|
||||||
|
strch <- fmt.Sprintf("Compile: Grouping: Success for: %s into %s", wStr, vStr)
|
||||||
|
merged = true // woo
|
||||||
|
}
|
||||||
|
|
||||||
|
// did these get used?
|
||||||
|
if ok, err := ag.vertexTest(merged); err != nil {
|
||||||
|
log.Fatalf("Error running autoGroup(vertexTest): %v", err)
|
||||||
|
} else if !ok {
|
||||||
|
break // done!
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
close(strch)
|
||||||
|
return
|
||||||
|
}(strch) // call function
|
||||||
|
return strch
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutoGroup runs the auto grouping on the graph and prints out log messages
|
||||||
|
func (g *Graph) AutoGroup() {
|
||||||
|
// receive log messages from channel...
|
||||||
|
// this allows test cases to avoid printing them when they're unwanted!
|
||||||
|
// TODO: this algorithm may not be correct in all cases. replace if needed!
|
||||||
|
for str := range g.autoGroup(&nonReachabilityGrouper{}) {
|
||||||
|
log.Println(str)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -15,14 +15,14 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
// Pgraph (Pointer Graph)
|
// Package pgraph represents the internal "pointer graph" that we use.
|
||||||
package main
|
package pgraph
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"sort"
|
"sort"
|
||||||
@@ -30,6 +30,13 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/converger"
|
||||||
|
"github.com/purpleidea/mgmt/event"
|
||||||
|
"github.com/purpleidea/mgmt/global"
|
||||||
|
"github.com/purpleidea/mgmt/resources"
|
||||||
|
|
||||||
|
errwrap "github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate stringer -type=graphState -output=graphstate_stringer.go
|
//go:generate stringer -type=graphState -output=graphstate_stringer.go
|
||||||
@@ -58,7 +65,7 @@ type Graph struct {
|
|||||||
|
|
||||||
// Vertex is the primary vertex struct in this library.
|
// Vertex is the primary vertex struct in this library.
|
||||||
type Vertex struct {
|
type Vertex struct {
|
||||||
Res // anonymous field
|
resources.Res // anonymous field
|
||||||
timestamp int64 // last updated timestamp ?
|
timestamp int64 // last updated timestamp ?
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -77,7 +84,7 @@ func NewGraph(name string) *Graph {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewVertex returns a new graph vertex struct with a contained resource.
|
// NewVertex returns a new graph vertex struct with a contained resource.
|
||||||
func NewVertex(r Res) *Vertex {
|
func NewVertex(r resources.Res) *Vertex {
|
||||||
return &Vertex{
|
return &Vertex{
|
||||||
Res: r,
|
Res: r,
|
||||||
}
|
}
|
||||||
@@ -157,9 +164,21 @@ func (g *Graph) AddEdge(v1, v2 *Vertex, e *Edge) {
|
|||||||
g.Adjacency[v1][v2] = e
|
g.Adjacency[v1][v2] = e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeleteEdge deletes a particular edge from the graph.
|
||||||
|
// FIXME: add test cases
|
||||||
|
func (g *Graph) DeleteEdge(e *Edge) {
|
||||||
|
for v1 := range g.Adjacency {
|
||||||
|
for v2, edge := range g.Adjacency[v1] {
|
||||||
|
if e == edge {
|
||||||
|
delete(g.Adjacency[v1], v2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// GetVertexMatch searches for an equivalent resource in the graph and returns
|
// GetVertexMatch searches for an equivalent resource in the graph and returns
|
||||||
// the vertex it is found in, or nil if not found.
|
// the vertex it is found in, or nil if not found.
|
||||||
func (g *Graph) GetVertexMatch(obj Res) *Vertex {
|
func (g *Graph) GetVertexMatch(obj resources.Res) *Vertex {
|
||||||
for k := range g.Adjacency {
|
for k := range g.Adjacency {
|
||||||
if k.Res.Compare(obj) {
|
if k.Res.Compare(obj) {
|
||||||
return k
|
return k
|
||||||
@@ -279,11 +298,11 @@ func (g *Graph) ExecGraphviz(program, filename string) error {
|
|||||||
switch program {
|
switch program {
|
||||||
case "dot", "neato", "twopi", "circo", "fdp":
|
case "dot", "neato", "twopi", "circo", "fdp":
|
||||||
default:
|
default:
|
||||||
return errors.New("Invalid graphviz program selected!")
|
return fmt.Errorf("Invalid graphviz program selected!")
|
||||||
}
|
}
|
||||||
|
|
||||||
if filename == "" {
|
if filename == "" {
|
||||||
return errors.New("No filename given!")
|
return fmt.Errorf("No filename given!")
|
||||||
}
|
}
|
||||||
|
|
||||||
// run as a normal user if possible when run with sudo
|
// run as a normal user if possible when run with sudo
|
||||||
@@ -292,18 +311,18 @@ func (g *Graph) ExecGraphviz(program, filename string) error {
|
|||||||
|
|
||||||
err := ioutil.WriteFile(filename, []byte(g.Graphviz()), 0644)
|
err := ioutil.WriteFile(filename, []byte(g.Graphviz()), 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.New("Error writing to filename!")
|
return fmt.Errorf("Error writing to filename!")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err1 == nil && err2 == nil {
|
if err1 == nil && err2 == nil {
|
||||||
if err := os.Chown(filename, uid, gid); err != nil {
|
if err := os.Chown(filename, uid, gid); err != nil {
|
||||||
return errors.New("Error changing file owner!")
|
return fmt.Errorf("Error changing file owner!")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
path, err := exec.LookPath(program)
|
path, err := exec.LookPath(program)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.New("Graphviz is missing!")
|
return fmt.Errorf("Graphviz is missing!")
|
||||||
}
|
}
|
||||||
|
|
||||||
out := fmt.Sprintf("%v.png", filename)
|
out := fmt.Sprintf("%v.png", filename)
|
||||||
@@ -318,7 +337,7 @@ func (g *Graph) ExecGraphviz(program, filename string) error {
|
|||||||
}
|
}
|
||||||
_, err = cmd.Output()
|
_, err = cmd.Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.New("Error writing to image!")
|
return fmt.Errorf("Error writing to image!")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -462,7 +481,7 @@ func (g *Graph) OutDegree() map[*Vertex]int {
|
|||||||
// TopologicalSort returns the sort of graph vertices in that order.
|
// TopologicalSort returns the sort of graph vertices in that order.
|
||||||
// based on descriptions and code from wikipedia and rosetta code
|
// based on descriptions and code from wikipedia and rosetta code
|
||||||
// TODO: add memoization, and cache invalidation to speed this up :)
|
// TODO: add memoization, and cache invalidation to speed this up :)
|
||||||
func (g *Graph) TopologicalSort() (result []*Vertex, ok bool) { // kahn's algorithm
|
func (g *Graph) TopologicalSort() ([]*Vertex, error) { // kahn's algorithm
|
||||||
var L []*Vertex // empty list that will contain the sorted elements
|
var L []*Vertex // empty list that will contain the sorted elements
|
||||||
var S []*Vertex // set of all nodes with no incoming edges
|
var S []*Vertex // set of all nodes with no incoming edges
|
||||||
remaining := make(map[*Vertex]int) // amount of edges remaining
|
remaining := make(map[*Vertex]int) // amount of edges remaining
|
||||||
@@ -499,13 +518,13 @@ func (g *Graph) TopologicalSort() (result []*Vertex, ok bool) { // kahn's algori
|
|||||||
if in > 0 {
|
if in > 0 {
|
||||||
for n := range g.Adjacency[c] {
|
for n := range g.Adjacency[c] {
|
||||||
if remaining[n] > 0 {
|
if remaining[n] > 0 {
|
||||||
return nil, false // not a dag!
|
return nil, fmt.Errorf("Not a dag!")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return L, true
|
return L, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reachability finds the shortest path in a DAG from a to b, and returns the
|
// Reachability finds the shortest path in a DAG from a to b, and returns the
|
||||||
@@ -548,99 +567,6 @@ func (g *Graph) Reachability(a, b *Vertex) []*Vertex {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// VertexMerge merges v2 into v1 by reattaching the edges where appropriate,
|
|
||||||
// and then by deleting v2 from the graph. Since more than one edge between two
|
|
||||||
// vertices is not allowed, duplicate edges are merged as well. an edge merge
|
|
||||||
// function can be provided if you'd like to control how you merge the edges!
|
|
||||||
func (g *Graph) VertexMerge(v1, v2 *Vertex, vertexMergeFn func(*Vertex, *Vertex) (*Vertex, error), edgeMergeFn func(*Edge, *Edge) *Edge) error {
|
|
||||||
// methodology
|
|
||||||
// 1) edges between v1 and v2 are removed
|
|
||||||
//Loop:
|
|
||||||
for k1 := range g.Adjacency {
|
|
||||||
for k2 := range g.Adjacency[k1] {
|
|
||||||
// v1 -> v2 || v2 -> v1
|
|
||||||
if (k1 == v1 && k2 == v2) || (k1 == v2 && k2 == v1) {
|
|
||||||
delete(g.Adjacency[k1], k2) // delete map & edge
|
|
||||||
// NOTE: if we assume this is a DAG, then we can
|
|
||||||
// assume only v1 -> v2 OR v2 -> v1 exists, and
|
|
||||||
// we can break out of these loops immediately!
|
|
||||||
//break Loop
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2) edges that point towards v2 from X now point to v1 from X (no dupes)
|
|
||||||
for _, x := range g.IncomingGraphEdges(v2) { // all to vertex v (??? -> v)
|
|
||||||
e := g.Adjacency[x][v2] // previous edge
|
|
||||||
r := g.Reachability(x, v1)
|
|
||||||
// merge e with ex := g.Adjacency[x][v1] if it exists!
|
|
||||||
if ex, exists := g.Adjacency[x][v1]; exists && edgeMergeFn != nil && len(r) == 0 {
|
|
||||||
e = edgeMergeFn(e, ex)
|
|
||||||
}
|
|
||||||
if len(r) == 0 { // if not reachable, add it
|
|
||||||
g.AddEdge(x, v1, e) // overwrite edge
|
|
||||||
} else if edgeMergeFn != nil { // reachable, merge e through...
|
|
||||||
prev := x // initial condition
|
|
||||||
for i, next := range r {
|
|
||||||
if i == 0 {
|
|
||||||
// next == prev, therefore skip
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// this edge is from: prev, to: next
|
|
||||||
ex, _ := g.Adjacency[prev][next] // get
|
|
||||||
ex = edgeMergeFn(ex, e)
|
|
||||||
g.Adjacency[prev][next] = ex // set
|
|
||||||
prev = next
|
|
||||||
}
|
|
||||||
}
|
|
||||||
delete(g.Adjacency[x], v2) // delete old edge
|
|
||||||
}
|
|
||||||
|
|
||||||
// 3) edges that point from v2 to X now point from v1 to X (no dupes)
|
|
||||||
for _, x := range g.OutgoingGraphEdges(v2) { // all from vertex v (v -> ???)
|
|
||||||
e := g.Adjacency[v2][x] // previous edge
|
|
||||||
r := g.Reachability(v1, x)
|
|
||||||
// merge e with ex := g.Adjacency[v1][x] if it exists!
|
|
||||||
if ex, exists := g.Adjacency[v1][x]; exists && edgeMergeFn != nil && len(r) == 0 {
|
|
||||||
e = edgeMergeFn(e, ex)
|
|
||||||
}
|
|
||||||
if len(r) == 0 {
|
|
||||||
g.AddEdge(v1, x, e) // overwrite edge
|
|
||||||
} else if edgeMergeFn != nil { // reachable, merge e through...
|
|
||||||
prev := v1 // initial condition
|
|
||||||
for i, next := range r {
|
|
||||||
if i == 0 {
|
|
||||||
// next == prev, therefore skip
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// this edge is from: prev, to: next
|
|
||||||
ex, _ := g.Adjacency[prev][next]
|
|
||||||
ex = edgeMergeFn(ex, e)
|
|
||||||
g.Adjacency[prev][next] = ex
|
|
||||||
prev = next
|
|
||||||
}
|
|
||||||
}
|
|
||||||
delete(g.Adjacency[v2], x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// 4) merge and then remove the (now merged/grouped) vertex
|
|
||||||
if vertexMergeFn != nil { // run vertex merge function
|
|
||||||
if v, err := vertexMergeFn(v1, v2); err != nil {
|
|
||||||
return err
|
|
||||||
} else if v != nil { // replace v1 with the "merged" version...
|
|
||||||
v1 = v // XXX: will this replace v1 the way we want?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
g.DeleteVertex(v2) // remove grouped vertex
|
|
||||||
|
|
||||||
// 5) creation of a cyclic graph should throw an error
|
|
||||||
if _, dag := g.TopologicalSort(); !dag { // am i a dag or not?
|
|
||||||
return fmt.Errorf("Graph is not a dag!")
|
|
||||||
}
|
|
||||||
return nil // success
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTimestamp returns the timestamp of a vertex
|
// GetTimestamp returns the timestamp of a vertex
|
||||||
func (v *Vertex) GetTimestamp() int64 {
|
func (v *Vertex) GetTimestamp() int64 {
|
||||||
return v.timestamp
|
return v.timestamp
|
||||||
@@ -661,7 +587,7 @@ func (g *Graph) OKTimestamp(v *Vertex) bool {
|
|||||||
// if they're equal (eg: on init of 0) then we also can't run
|
// if they're equal (eg: on init of 0) then we also can't run
|
||||||
// b/c we should let our pre-req's go first...
|
// b/c we should let our pre-req's go first...
|
||||||
x, y := v.GetTimestamp(), n.GetTimestamp()
|
x, y := v.GetTimestamp(), n.GetTimestamp()
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("%v[%v]: OKTimestamp: (%v) >= %v[%v](%v): !%v", v.Kind(), v.GetName(), x, n.Kind(), n.GetName(), y, x >= y)
|
log.Printf("%v[%v]: OKTimestamp: (%v) >= %v[%v](%v): !%v", v.Kind(), v.GetName(), x, n.Kind(), n.GetName(), y, x >= y)
|
||||||
}
|
}
|
||||||
if x >= y {
|
if x >= y {
|
||||||
@@ -678,14 +604,14 @@ func (g *Graph) Poke(v *Vertex, activity bool) {
|
|||||||
for _, n := range g.OutgoingGraphEdges(v) {
|
for _, n := range g.OutgoingGraphEdges(v) {
|
||||||
// XXX: if we're in state event and haven't been cancelled by
|
// XXX: if we're in state event and haven't been cancelled by
|
||||||
// apply, then we can cancel a poke to a child, right? XXX
|
// apply, then we can cancel a poke to a child, right? XXX
|
||||||
// XXX: if n.Res.getState() != resStateEvent { // is this correct?
|
// XXX: if n.Res.getState() != resources.ResStateEvent { // is this correct?
|
||||||
if true { // XXX
|
if true { // XXX
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("%v[%v]: Poke: %v[%v]", v.Kind(), v.GetName(), n.Kind(), n.GetName())
|
log.Printf("%v[%v]: Poke: %v[%v]", v.Kind(), v.GetName(), n.Kind(), n.GetName())
|
||||||
}
|
}
|
||||||
n.SendEvent(eventPoke, false, activity) // XXX: can this be switched to sync?
|
n.SendEvent(event.EventPoke, false, activity) // XXX: can this be switched to sync?
|
||||||
} else {
|
} else {
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("%v[%v]: Poke: %v[%v]: Skipped!", v.Kind(), v.GetName(), n.Kind(), n.GetName())
|
log.Printf("%v[%v]: Poke: %v[%v]: Skipped!", v.Kind(), v.GetName(), n.Kind(), n.GetName())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -698,18 +624,18 @@ func (g *Graph) BackPoke(v *Vertex) {
|
|||||||
for _, n := range g.IncomingGraphEdges(v) {
|
for _, n := range g.IncomingGraphEdges(v) {
|
||||||
x, y, s := v.GetTimestamp(), n.GetTimestamp(), n.Res.GetState()
|
x, y, s := v.GetTimestamp(), n.GetTimestamp(), n.Res.GetState()
|
||||||
// if the parent timestamp needs poking AND it's not in state
|
// if the parent timestamp needs poking AND it's not in state
|
||||||
// resStateEvent, then poke it. If the parent is in resStateEvent it
|
// ResStateEvent, then poke it. If the parent is in ResStateEvent it
|
||||||
// means that an event is pending, so we'll be expecting a poke
|
// means that an event is pending, so we'll be expecting a poke
|
||||||
// back soon, so we can safely discard the extra parent poke...
|
// back soon, so we can safely discard the extra parent poke...
|
||||||
// TODO: implement a stateLT (less than) to tell if something
|
// TODO: implement a stateLT (less than) to tell if something
|
||||||
// happens earlier in the state cycle and that doesn't wrap nil
|
// happens earlier in the state cycle and that doesn't wrap nil
|
||||||
if x >= y && (s != resStateEvent && s != resStateCheckApply) {
|
if x >= y && (s != resources.ResStateEvent && s != resources.ResStateCheckApply) {
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("%v[%v]: BackPoke: %v[%v]", v.Kind(), v.GetName(), n.Kind(), n.GetName())
|
log.Printf("%v[%v]: BackPoke: %v[%v]", v.Kind(), v.GetName(), n.Kind(), n.GetName())
|
||||||
}
|
}
|
||||||
n.SendEvent(eventBackPoke, false, false) // XXX: can this be switched to sync?
|
n.SendEvent(event.EventBackPoke, false, false) // XXX: can this be switched to sync?
|
||||||
} else {
|
} else {
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("%v[%v]: BackPoke: %v[%v]: Skipped!", v.Kind(), v.GetName(), n.Kind(), n.GetName())
|
log.Printf("%v[%v]: BackPoke: %v[%v]: Skipped!", v.Kind(), v.GetName(), n.Kind(), n.GetName())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -717,30 +643,29 @@ func (g *Graph) BackPoke(v *Vertex) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Process is the primary function to execute for a particular vertex in the graph.
|
// Process is the primary function to execute for a particular vertex in the graph.
|
||||||
// XXX: rename this function
|
func (g *Graph) Process(v *Vertex) error {
|
||||||
func (g *Graph) Process(v *Vertex) {
|
|
||||||
obj := v.Res
|
obj := v.Res
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("%v[%v]: Process()", obj.Kind(), obj.GetName())
|
log.Printf("%v[%v]: Process()", obj.Kind(), obj.GetName())
|
||||||
}
|
}
|
||||||
obj.SetState(resStateEvent)
|
obj.SetState(resources.ResStateEvent)
|
||||||
var ok = true
|
var ok = true
|
||||||
var apply = false // did we run an apply?
|
var apply = false // did we run an apply?
|
||||||
// is it okay to run dependency wise right now?
|
// is it okay to run dependency wise right now?
|
||||||
// if not, that's okay because when the dependency runs, it will poke
|
// if not, that's okay because when the dependency runs, it will poke
|
||||||
// us back and we will run if needed then!
|
// us back and we will run if needed then!
|
||||||
if g.OKTimestamp(v) {
|
if g.OKTimestamp(v) {
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("%v[%v]: OKTimestamp(%v)", obj.Kind(), obj.GetName(), v.GetTimestamp())
|
log.Printf("%v[%v]: OKTimestamp(%v)", obj.Kind(), obj.GetName(), v.GetTimestamp())
|
||||||
}
|
}
|
||||||
|
|
||||||
obj.SetState(resStateCheckApply)
|
obj.SetState(resources.ResStateCheckApply)
|
||||||
// if this fails, don't UpdateTimestamp()
|
// if this fails, don't UpdateTimestamp()
|
||||||
checkok, err := obj.CheckApply(!obj.Meta().Noop)
|
checkok, err := obj.CheckApply(!obj.Meta().Noop)
|
||||||
if checkok && err != nil { // should never return this way
|
if checkok && err != nil { // should never return this way
|
||||||
log.Fatalf("%v[%v]: CheckApply(): %t, %+v", obj.Kind(), obj.GetName(), checkok, err)
|
log.Fatalf("%v[%v]: CheckApply(): %t, %+v", obj.Kind(), obj.GetName(), checkok, err)
|
||||||
}
|
}
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("%v[%v]: CheckApply(): %t, %v", obj.Kind(), obj.GetName(), checkok, err)
|
log.Printf("%v[%v]: CheckApply(): %t, %v", obj.Kind(), obj.GetName(), checkok, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -761,14 +686,189 @@ func (g *Graph) Process(v *Vertex) {
|
|||||||
// update this timestamp *before* we poke or the poked
|
// update this timestamp *before* we poke or the poked
|
||||||
// nodes might fail due to having a too old timestamp!
|
// nodes might fail due to having a too old timestamp!
|
||||||
v.UpdateTimestamp() // this was touched...
|
v.UpdateTimestamp() // this was touched...
|
||||||
obj.SetState(resStatePoking) // can't cancel parent poke
|
obj.SetState(resources.ResStatePoking) // can't cancel parent poke
|
||||||
g.Poke(v, apply)
|
g.Poke(v, apply)
|
||||||
}
|
}
|
||||||
// poke at our pre-req's instead since they need to refresh/run...
|
// poke at our pre-req's instead since they need to refresh/run...
|
||||||
} else {
|
return err
|
||||||
// only poke at the pre-req's that need to run
|
|
||||||
go g.BackPoke(v)
|
|
||||||
}
|
}
|
||||||
|
// else... only poke at the pre-req's that need to run
|
||||||
|
go g.BackPoke(v)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SentinelErr is a sentinal as an error type that wraps an arbitrary error.
|
||||||
|
type SentinelErr struct {
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error is the required method to fulfill the error type.
|
||||||
|
func (obj *SentinelErr) Error() string {
|
||||||
|
return obj.err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Worker is the common run frontend of the vertex. It handles all of the retry
|
||||||
|
// and retry delay common code, and ultimately returns the final status of this
|
||||||
|
// vertex execution.
|
||||||
|
func (g *Graph) Worker(v *Vertex) error {
|
||||||
|
// listen for chan events from Watch() and run
|
||||||
|
// the Process() function when they're received
|
||||||
|
// this avoids us having to pass the data into
|
||||||
|
// the Watch() function about which graph it is
|
||||||
|
// running on, which isolates things nicely...
|
||||||
|
obj := v.Res
|
||||||
|
chanProcess := make(chan event.Event)
|
||||||
|
go func() {
|
||||||
|
running := false
|
||||||
|
var timer = time.NewTimer(time.Duration(math.MaxInt64)) // longest duration
|
||||||
|
if !timer.Stop() {
|
||||||
|
<-timer.C // unnecessary, shouldn't happen
|
||||||
|
}
|
||||||
|
var delay = time.Duration(v.Meta().Delay) * time.Millisecond
|
||||||
|
var retry = v.Meta().Retry // number of tries left, -1 for infinite
|
||||||
|
var saved event.Event
|
||||||
|
Loop:
|
||||||
|
for {
|
||||||
|
// this has to be synchronous, because otherwise the Res
|
||||||
|
// event loop will keep running and change state,
|
||||||
|
// causing the converged timeout to fire!
|
||||||
|
select {
|
||||||
|
case event, ok := <-chanProcess: // must use like this
|
||||||
|
if running && ok {
|
||||||
|
// we got an event that wasn't a close,
|
||||||
|
// while we were waiting for the timer!
|
||||||
|
// if this happens, it might be a bug:(
|
||||||
|
log.Fatalf("%v[%v]: Worker: Unexpected event: %+v", v.Kind(), v.GetName(), event)
|
||||||
|
}
|
||||||
|
if !ok { // chanProcess closed, let's exit
|
||||||
|
break Loop // no event, so no ack!
|
||||||
|
}
|
||||||
|
|
||||||
|
// the above mentioned synchronous part, is the
|
||||||
|
// running of this function, paired with an ack.
|
||||||
|
if e := g.Process(v); e != nil {
|
||||||
|
saved = event
|
||||||
|
log.Printf("%v[%v]: CheckApply errored: %v", v.Kind(), v.GetName(), e)
|
||||||
|
if retry == 0 {
|
||||||
|
// wrap the error in the sentinel
|
||||||
|
event.ACKNACK(&SentinelErr{e}) // fail the Watch()
|
||||||
|
break Loop
|
||||||
|
}
|
||||||
|
if retry > 0 { // don't decrement the -1
|
||||||
|
retry--
|
||||||
|
}
|
||||||
|
log.Printf("%v[%v]: CheckApply: Retrying after %.4f seconds (%d left)", v.Kind(), v.GetName(), delay.Seconds(), retry)
|
||||||
|
// start the timer...
|
||||||
|
timer.Reset(delay)
|
||||||
|
running = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
retry = v.Meta().Retry // reset on success
|
||||||
|
event.ACK() // sync
|
||||||
|
|
||||||
|
case <-timer.C:
|
||||||
|
if !timer.Stop() {
|
||||||
|
//<-timer.C // blocks, docs are wrong!
|
||||||
|
}
|
||||||
|
running = false
|
||||||
|
log.Printf("%s[%s]: CheckApply delay expired!", v.Kind(), v.GetName())
|
||||||
|
// re-send this failed event, to trigger a CheckApply()
|
||||||
|
go func() { chanProcess <- saved }()
|
||||||
|
// TODO: should we send a fake event instead?
|
||||||
|
//saved = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
var err error // propagate the error up (this is a permanent BAD error!)
|
||||||
|
// the watch delay runs inside of the Watch resource loop, so that it
|
||||||
|
// can still process signals and exit if needed. It shouldn't run any
|
||||||
|
// resource specific code since this is supposed to be a retry delay.
|
||||||
|
// NOTE: we're using the same retry and delay metaparams that CheckApply
|
||||||
|
// uses. This is for practicality. We can separate them later if needed!
|
||||||
|
var watchDelay time.Duration
|
||||||
|
var watchRetry = v.Meta().Retry // number of tries left, -1 for infinite
|
||||||
|
// watch blocks until it ends, & errors to retry
|
||||||
|
for {
|
||||||
|
// TODO: do we have to stop the converged-timeout when in this block (perhaps we're in the delay block!)
|
||||||
|
// TODO: should we setup/manage some of the converged timeout stuff in here anyways?
|
||||||
|
|
||||||
|
// if a retry-delay was requested, wait, but don't block our events!
|
||||||
|
if watchDelay > 0 {
|
||||||
|
//var pendingSendEvent bool
|
||||||
|
timer := time.NewTimer(watchDelay)
|
||||||
|
Loop:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-timer.C: // the wait is over
|
||||||
|
break Loop // critical
|
||||||
|
|
||||||
|
// TODO: resources could have a separate exit channel to avoid this complexity!?
|
||||||
|
case event := <-obj.Events():
|
||||||
|
// NOTE: this code should match the similar Res code!
|
||||||
|
//cuid.SetConverged(false) // TODO: ?
|
||||||
|
if exit, send := obj.ReadEvent(&event); exit {
|
||||||
|
return nil // exit
|
||||||
|
} else if send {
|
||||||
|
// if we dive down this rabbit hole, our
|
||||||
|
// timer.C won't get seen until we get out!
|
||||||
|
// in this situation, the Watch() is blocked
|
||||||
|
// from performing until CheckApply returns
|
||||||
|
// successfully, or errors out. This isn't
|
||||||
|
// so bad, but we should document it. Is it
|
||||||
|
// possible that some resource *needs* Watch
|
||||||
|
// to run to be able to execute a CheckApply?
|
||||||
|
// That situation shouldn't be common, and
|
||||||
|
// should probably not be allowed. Can we
|
||||||
|
// avoid it though?
|
||||||
|
//if exit, err := doSend(); exit || err != nil {
|
||||||
|
// return err // we exit or bubble up a NACK...
|
||||||
|
//}
|
||||||
|
// Instead of doing the above, we can
|
||||||
|
// add events to a pending list, and
|
||||||
|
// when we finish the delay, we can run
|
||||||
|
// them.
|
||||||
|
//pendingSendEvent = true // all events are identical for now...
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
timer.Stop() // it's nice to cleanup
|
||||||
|
log.Printf("%s[%s]: Watch delay expired!", v.Kind(), v.GetName())
|
||||||
|
// NOTE: we can avoid the send if running Watch guarantees
|
||||||
|
// one CheckApply event on startup!
|
||||||
|
//if pendingSendEvent { // TODO: should this become a list in the future?
|
||||||
|
// if exit, err := obj.DoSend(chanProcess, ""); exit || err != nil {
|
||||||
|
// return err // we exit or bubble up a NACK...
|
||||||
|
// }
|
||||||
|
//}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: reset the watch retry count after some amount of success
|
||||||
|
e := v.Res.Watch(chanProcess)
|
||||||
|
if e == nil { // exit signal
|
||||||
|
err = nil // clean exit
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if sentinelErr, ok := e.(*SentinelErr); ok { // unwrap the sentinel
|
||||||
|
err = sentinelErr.err
|
||||||
|
break // sentinel means, perma-exit
|
||||||
|
}
|
||||||
|
log.Printf("%v[%v]: Watch errored: %v", v.Kind(), v.GetName(), e)
|
||||||
|
if watchRetry == 0 {
|
||||||
|
err = fmt.Errorf("Permanent watch error: %v", e)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if watchRetry > 0 { // don't decrement the -1
|
||||||
|
watchRetry--
|
||||||
|
}
|
||||||
|
watchDelay = time.Duration(v.Meta().Delay) * time.Millisecond
|
||||||
|
log.Printf("%v[%v]: Watch: Retrying after %.4f seconds (%d left)", v.Kind(), v.GetName(), watchDelay.Seconds(), watchRetry)
|
||||||
|
// We need to trigger a CheckApply after Watch restarts, so that
|
||||||
|
// we catch any lost events that happened while down. We do this
|
||||||
|
// by getting the Watch resource to send one event once it's up!
|
||||||
|
//v.SendEvent(eventPoke, false, false)
|
||||||
|
}
|
||||||
|
close(chanProcess)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start is a main kick to start the graph. It goes through in reverse topological
|
// Start is a main kick to start the graph. It goes through in reverse topological
|
||||||
@@ -787,25 +887,13 @@ func (g *Graph) Start(wg *sync.WaitGroup, first bool) { // start or continue
|
|||||||
// see: https://ttboj.wordpress.com/2015/07/27/golang-parallelism-issues-causing-too-many-open-files-error/
|
// see: https://ttboj.wordpress.com/2015/07/27/golang-parallelism-issues-causing-too-many-open-files-error/
|
||||||
go func(vv *Vertex) {
|
go func(vv *Vertex) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
// listen for chan events from Watch() and run
|
// TODO: if a sufficient number of workers error,
|
||||||
// the Process() function when they're received
|
// should something be done? Will these restart
|
||||||
// this avoids us having to pass the data into
|
// after perma-failure if we have a graph change?
|
||||||
// the Watch() function about which graph it is
|
if err := g.Worker(vv); err != nil { // contains the Watch and CheckApply loops
|
||||||
// running on, which isolates things nicely...
|
log.Printf("%s[%s]: Exited with failure: %v", vv.Kind(), vv.GetName(), err)
|
||||||
chanProcess := make(chan Event)
|
return
|
||||||
go func() {
|
|
||||||
for event := range chanProcess {
|
|
||||||
// this has to be synchronous,
|
|
||||||
// because otherwise the Res
|
|
||||||
// event loop will keep running
|
|
||||||
// and change state, causing the
|
|
||||||
// converged timeout to fire!
|
|
||||||
g.Process(vv)
|
|
||||||
event.ACK() // sync
|
|
||||||
}
|
}
|
||||||
}()
|
|
||||||
vv.Res.Watch(chanProcess) // i block until i end
|
|
||||||
close(chanProcess)
|
|
||||||
log.Printf("%v[%v]: Exited", vv.Kind(), vv.GetName())
|
log.Printf("%v[%v]: Exited", vv.Kind(), vv.GetName())
|
||||||
}(v)
|
}(v)
|
||||||
}
|
}
|
||||||
@@ -824,8 +912,8 @@ func (g *Graph) Start(wg *sync.WaitGroup, first bool) { // start or continue
|
|||||||
// and not just selectively the subset with no indegree.
|
// and not just selectively the subset with no indegree.
|
||||||
if (!first) || indegree[v] == 0 {
|
if (!first) || indegree[v] == 0 {
|
||||||
// ensure state is started before continuing on to next vertex
|
// ensure state is started before continuing on to next vertex
|
||||||
for !v.SendEvent(eventStart, true, false) {
|
for !v.SendEvent(event.EventStart, true, false) {
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
// if SendEvent fails, we aren't up yet
|
// if SendEvent fails, we aren't up yet
|
||||||
log.Printf("%v[%v]: Retrying SendEvent(Start)", v.Kind(), v.GetName())
|
log.Printf("%v[%v]: Retrying SendEvent(Start)", v.Kind(), v.GetName())
|
||||||
// sleep here briefly or otherwise cause
|
// sleep here briefly or otherwise cause
|
||||||
@@ -843,7 +931,7 @@ func (g *Graph) Pause() {
|
|||||||
defer log.Printf("State: %v -> %v", g.setState(graphStatePaused), g.getState())
|
defer log.Printf("State: %v -> %v", g.setState(graphStatePaused), g.getState())
|
||||||
t, _ := g.TopologicalSort()
|
t, _ := g.TopologicalSort()
|
||||||
for _, v := range t { // squeeze out the events...
|
for _, v := range t { // squeeze out the events...
|
||||||
v.SendEvent(eventPause, true, false)
|
v.SendEvent(event.EventPause, true, false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -855,16 +943,105 @@ func (g *Graph) Exit() {
|
|||||||
t, _ := g.TopologicalSort()
|
t, _ := g.TopologicalSort()
|
||||||
for _, v := range t { // squeeze out the events...
|
for _, v := range t { // squeeze out the events...
|
||||||
// turn off the taps...
|
// turn off the taps...
|
||||||
|
// XXX: consider instead doing this by closing the Res.events channel instead?
|
||||||
// XXX: do this by sending an exit signal, and then returning
|
// XXX: do this by sending an exit signal, and then returning
|
||||||
// when we hit the 'default' in the select statement!
|
// when we hit the 'default' in the select statement!
|
||||||
// XXX: we can do this to quiesce, but it's not necessary now
|
// XXX: we can do this to quiesce, but it's not necessary now
|
||||||
|
|
||||||
v.SendEvent(eventExit, true, false)
|
v.SendEvent(event.EventExit, true, false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GraphSync updates the oldGraph so that it matches the newGraph receiver. It
|
||||||
|
// leaves identical elements alone so that they don't need to be refreshed.
|
||||||
|
// FIXME: add test cases
|
||||||
|
func (g *Graph) GraphSync(oldGraph *Graph) (*Graph, error) {
|
||||||
|
|
||||||
|
if oldGraph == nil {
|
||||||
|
oldGraph = NewGraph(g.GetName()) // copy over the name
|
||||||
|
}
|
||||||
|
oldGraph.SetName(g.GetName()) // overwrite the name
|
||||||
|
|
||||||
|
var lookup = make(map[*Vertex]*Vertex)
|
||||||
|
var vertexKeep []*Vertex // list of vertices which are the same in new graph
|
||||||
|
var edgeKeep []*Edge // list of vertices which are the same in new graph
|
||||||
|
|
||||||
|
for v := range g.Adjacency { // loop through the vertices (resources)
|
||||||
|
res := v.Res // resource
|
||||||
|
|
||||||
|
vertex := oldGraph.GetVertexMatch(res)
|
||||||
|
if vertex == nil { // no match found
|
||||||
|
if err := res.Init(); err != nil {
|
||||||
|
return nil, errwrap.Wrapf(err, "could not Init() resource")
|
||||||
|
}
|
||||||
|
vertex = NewVertex(res)
|
||||||
|
oldGraph.AddVertex(vertex) // call standalone in case not part of an edge
|
||||||
|
}
|
||||||
|
lookup[v] = vertex // used for constructing edges
|
||||||
|
vertexKeep = append(vertexKeep, vertex) // append
|
||||||
|
}
|
||||||
|
|
||||||
|
// get rid of any vertices we shouldn't keep (that aren't in new graph)
|
||||||
|
for v := range oldGraph.Adjacency {
|
||||||
|
if !VertexContains(v, vertexKeep) {
|
||||||
|
// wait for exit before starting new graph!
|
||||||
|
v.SendEvent(event.EventExit, true, false)
|
||||||
|
oldGraph.DeleteVertex(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// compare edges
|
||||||
|
for v1 := range g.Adjacency { // loop through the vertices (resources)
|
||||||
|
for v2, e := range g.Adjacency[v1] {
|
||||||
|
// we have an edge!
|
||||||
|
|
||||||
|
// lookup vertices (these should exist now)
|
||||||
|
//res1 := v1.Res // resource
|
||||||
|
//res2 := v2.Res
|
||||||
|
//vertex1 := oldGraph.GetVertexMatch(res1)
|
||||||
|
//vertex2 := oldGraph.GetVertexMatch(res2)
|
||||||
|
vertex1, exists1 := lookup[v1]
|
||||||
|
vertex2, exists2 := lookup[v2]
|
||||||
|
if !exists1 || !exists2 { // no match found, bug?
|
||||||
|
//if vertex1 == nil || vertex2 == nil { // no match found
|
||||||
|
return nil, fmt.Errorf("New vertices weren't found!") // programming error
|
||||||
|
}
|
||||||
|
|
||||||
|
edge, exists := oldGraph.Adjacency[vertex1][vertex2]
|
||||||
|
if !exists || edge.Name != e.Name { // TODO: edgeCmp
|
||||||
|
edge = e // use or overwrite edge
|
||||||
|
}
|
||||||
|
oldGraph.Adjacency[vertex1][vertex2] = edge // store it (AddEdge)
|
||||||
|
edgeKeep = append(edgeKeep, edge) // mark as saved
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// delete unused edges
|
||||||
|
for v1 := range oldGraph.Adjacency {
|
||||||
|
for _, e := range oldGraph.Adjacency[v1] {
|
||||||
|
// we have an edge!
|
||||||
|
if !EdgeContains(e, edgeKeep) {
|
||||||
|
oldGraph.DeleteEdge(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return oldGraph, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GraphMetas returns a list of pointers to each of the resource MetaParams.
|
||||||
|
func (g *Graph) GraphMetas() []*resources.MetaParams {
|
||||||
|
metas := []*resources.MetaParams{}
|
||||||
|
for v := range g.Adjacency { // loop through the vertices (resources))
|
||||||
|
res := v.Res // resource
|
||||||
|
meta := res.Meta()
|
||||||
|
metas = append(metas, meta)
|
||||||
|
}
|
||||||
|
return metas
|
||||||
|
}
|
||||||
|
|
||||||
// AssociateData associates some data with the object in the graph in question
|
// AssociateData associates some data with the object in the graph in question
|
||||||
func (g *Graph) AssociateData(converger Converger) {
|
func (g *Graph) AssociateData(converger converger.Converger) {
|
||||||
for v := range g.GetVerticesChan() {
|
for v := range g.GetVerticesChan() {
|
||||||
v.Res.AssociateData(converger)
|
v.Res.AssociateData(converger)
|
||||||
}
|
}
|
||||||
@@ -880,6 +1057,16 @@ func VertexContains(needle *Vertex, haystack []*Vertex) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EdgeContains is an "in array" function to test for an edge in a slice of edges.
|
||||||
|
func EdgeContains(needle *Edge, haystack []*Edge) bool {
|
||||||
|
for _, v := range haystack {
|
||||||
|
if needle == v {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// Reverse reverses a list of vertices.
|
// Reverse reverses a list of vertices.
|
||||||
func Reverse(vs []*Vertex) []*Vertex {
|
func Reverse(vs []*Vertex) []*Vertex {
|
||||||
//var out []*Vertex // XXX: golint suggests, but it fails testing
|
//var out []*Vertex // XXX: golint suggests, but it fails testing
|
||||||
@@ -15,9 +15,7 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
// NOTE: this is pgraph, a pointer graph
|
package pgraph
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -25,8 +23,18 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// NV is a helper function to make testing easier. It creates a new noop vertex.
|
||||||
|
func NV(s string) *Vertex {
|
||||||
|
obj, err := NewNoopRes(s)
|
||||||
|
if err != nil {
|
||||||
|
panic(err) // unlikely test failure!
|
||||||
|
}
|
||||||
|
return NewVertex(obj)
|
||||||
|
}
|
||||||
|
|
||||||
func TestPgraphT1(t *testing.T) {
|
func TestPgraphT1(t *testing.T) {
|
||||||
|
|
||||||
G := NewGraph("g1")
|
G := NewGraph("g1")
|
||||||
@@ -39,8 +47,8 @@ func TestPgraphT1(t *testing.T) {
|
|||||||
t.Errorf("Should have 0 edges instead of: %d.", i)
|
t.Errorf("Should have 0 edges instead of: %d.", i)
|
||||||
}
|
}
|
||||||
|
|
||||||
v1 := NewVertex(NewNoopRes("v1"))
|
v1 := NV("v1")
|
||||||
v2 := NewVertex(NewNoopRes("v2"))
|
v2 := NV("v2")
|
||||||
e1 := NewEdge("e1")
|
e1 := NewEdge("e1")
|
||||||
G.AddEdge(v1, v2, e1)
|
G.AddEdge(v1, v2, e1)
|
||||||
|
|
||||||
@@ -56,12 +64,12 @@ func TestPgraphT1(t *testing.T) {
|
|||||||
func TestPgraphT2(t *testing.T) {
|
func TestPgraphT2(t *testing.T) {
|
||||||
|
|
||||||
G := NewGraph("g2")
|
G := NewGraph("g2")
|
||||||
v1 := NewVertex(NewNoopRes("v1"))
|
v1 := NV("v1")
|
||||||
v2 := NewVertex(NewNoopRes("v2"))
|
v2 := NV("v2")
|
||||||
v3 := NewVertex(NewNoopRes("v3"))
|
v3 := NV("v3")
|
||||||
v4 := NewVertex(NewNoopRes("v4"))
|
v4 := NV("v4")
|
||||||
v5 := NewVertex(NewNoopRes("v5"))
|
v5 := NV("v5")
|
||||||
v6 := NewVertex(NewNoopRes("v6"))
|
v6 := NV("v6")
|
||||||
e1 := NewEdge("e1")
|
e1 := NewEdge("e1")
|
||||||
e2 := NewEdge("e2")
|
e2 := NewEdge("e2")
|
||||||
e3 := NewEdge("e3")
|
e3 := NewEdge("e3")
|
||||||
@@ -83,12 +91,12 @@ func TestPgraphT2(t *testing.T) {
|
|||||||
func TestPgraphT3(t *testing.T) {
|
func TestPgraphT3(t *testing.T) {
|
||||||
|
|
||||||
G := NewGraph("g3")
|
G := NewGraph("g3")
|
||||||
v1 := NewVertex(NewNoopRes("v1"))
|
v1 := NV("v1")
|
||||||
v2 := NewVertex(NewNoopRes("v2"))
|
v2 := NV("v2")
|
||||||
v3 := NewVertex(NewNoopRes("v3"))
|
v3 := NV("v3")
|
||||||
v4 := NewVertex(NewNoopRes("v4"))
|
v4 := NV("v4")
|
||||||
v5 := NewVertex(NewNoopRes("v5"))
|
v5 := NV("v5")
|
||||||
v6 := NewVertex(NewNoopRes("v6"))
|
v6 := NV("v6")
|
||||||
e1 := NewEdge("e1")
|
e1 := NewEdge("e1")
|
||||||
e2 := NewEdge("e2")
|
e2 := NewEdge("e2")
|
||||||
e3 := NewEdge("e3")
|
e3 := NewEdge("e3")
|
||||||
@@ -124,9 +132,9 @@ func TestPgraphT3(t *testing.T) {
|
|||||||
func TestPgraphT4(t *testing.T) {
|
func TestPgraphT4(t *testing.T) {
|
||||||
|
|
||||||
G := NewGraph("g4")
|
G := NewGraph("g4")
|
||||||
v1 := NewVertex(NewNoopRes("v1"))
|
v1 := NV("v1")
|
||||||
v2 := NewVertex(NewNoopRes("v2"))
|
v2 := NV("v2")
|
||||||
v3 := NewVertex(NewNoopRes("v3"))
|
v3 := NV("v3")
|
||||||
e1 := NewEdge("e1")
|
e1 := NewEdge("e1")
|
||||||
e2 := NewEdge("e2")
|
e2 := NewEdge("e2")
|
||||||
e3 := NewEdge("e3")
|
e3 := NewEdge("e3")
|
||||||
@@ -146,12 +154,12 @@ func TestPgraphT4(t *testing.T) {
|
|||||||
|
|
||||||
func TestPgraphT5(t *testing.T) {
|
func TestPgraphT5(t *testing.T) {
|
||||||
G := NewGraph("g5")
|
G := NewGraph("g5")
|
||||||
v1 := NewVertex(NewNoopRes("v1"))
|
v1 := NV("v1")
|
||||||
v2 := NewVertex(NewNoopRes("v2"))
|
v2 := NV("v2")
|
||||||
v3 := NewVertex(NewNoopRes("v3"))
|
v3 := NV("v3")
|
||||||
v4 := NewVertex(NewNoopRes("v4"))
|
v4 := NV("v4")
|
||||||
v5 := NewVertex(NewNoopRes("v5"))
|
v5 := NV("v5")
|
||||||
v6 := NewVertex(NewNoopRes("v6"))
|
v6 := NV("v6")
|
||||||
e1 := NewEdge("e1")
|
e1 := NewEdge("e1")
|
||||||
e2 := NewEdge("e2")
|
e2 := NewEdge("e2")
|
||||||
e3 := NewEdge("e3")
|
e3 := NewEdge("e3")
|
||||||
@@ -175,12 +183,12 @@ func TestPgraphT5(t *testing.T) {
|
|||||||
|
|
||||||
func TestPgraphT6(t *testing.T) {
|
func TestPgraphT6(t *testing.T) {
|
||||||
G := NewGraph("g6")
|
G := NewGraph("g6")
|
||||||
v1 := NewVertex(NewNoopRes("v1"))
|
v1 := NV("v1")
|
||||||
v2 := NewVertex(NewNoopRes("v2"))
|
v2 := NV("v2")
|
||||||
v3 := NewVertex(NewNoopRes("v3"))
|
v3 := NV("v3")
|
||||||
v4 := NewVertex(NewNoopRes("v4"))
|
v4 := NV("v4")
|
||||||
v5 := NewVertex(NewNoopRes("v5"))
|
v5 := NV("v5")
|
||||||
v6 := NewVertex(NewNoopRes("v6"))
|
v6 := NV("v6")
|
||||||
e1 := NewEdge("e1")
|
e1 := NewEdge("e1")
|
||||||
e2 := NewEdge("e2")
|
e2 := NewEdge("e2")
|
||||||
e3 := NewEdge("e3")
|
e3 := NewEdge("e3")
|
||||||
@@ -213,9 +221,9 @@ func TestPgraphT6(t *testing.T) {
|
|||||||
func TestPgraphT7(t *testing.T) {
|
func TestPgraphT7(t *testing.T) {
|
||||||
|
|
||||||
G := NewGraph("g7")
|
G := NewGraph("g7")
|
||||||
v1 := NewVertex(NewNoopRes("v1"))
|
v1 := NV("v1")
|
||||||
v2 := NewVertex(NewNoopRes("v2"))
|
v2 := NV("v2")
|
||||||
v3 := NewVertex(NewNoopRes("v3"))
|
v3 := NV("v3")
|
||||||
e1 := NewEdge("e1")
|
e1 := NewEdge("e1")
|
||||||
e2 := NewEdge("e2")
|
e2 := NewEdge("e2")
|
||||||
e3 := NewEdge("e3")
|
e3 := NewEdge("e3")
|
||||||
@@ -254,28 +262,28 @@ func TestPgraphT7(t *testing.T) {
|
|||||||
|
|
||||||
func TestPgraphT8(t *testing.T) {
|
func TestPgraphT8(t *testing.T) {
|
||||||
|
|
||||||
v1 := NewVertex(NewNoopRes("v1"))
|
v1 := NV("v1")
|
||||||
v2 := NewVertex(NewNoopRes("v2"))
|
v2 := NV("v2")
|
||||||
v3 := NewVertex(NewNoopRes("v3"))
|
v3 := NV("v3")
|
||||||
if VertexContains(v1, []*Vertex{v1, v2, v3}) != true {
|
if VertexContains(v1, []*Vertex{v1, v2, v3}) != true {
|
||||||
t.Errorf("Should be true instead of false.")
|
t.Errorf("Should be true instead of false.")
|
||||||
}
|
}
|
||||||
|
|
||||||
v4 := NewVertex(NewNoopRes("v4"))
|
v4 := NV("v4")
|
||||||
v5 := NewVertex(NewNoopRes("v5"))
|
v5 := NV("v5")
|
||||||
v6 := NewVertex(NewNoopRes("v6"))
|
v6 := NV("v6")
|
||||||
if VertexContains(v4, []*Vertex{v5, v6}) != false {
|
if VertexContains(v4, []*Vertex{v5, v6}) != false {
|
||||||
t.Errorf("Should be false instead of true.")
|
t.Errorf("Should be false instead of true.")
|
||||||
}
|
}
|
||||||
|
|
||||||
v7 := NewVertex(NewNoopRes("v7"))
|
v7 := NV("v7")
|
||||||
v8 := NewVertex(NewNoopRes("v8"))
|
v8 := NV("v8")
|
||||||
v9 := NewVertex(NewNoopRes("v9"))
|
v9 := NV("v9")
|
||||||
if VertexContains(v8, []*Vertex{v7, v8, v9}) != true {
|
if VertexContains(v8, []*Vertex{v7, v8, v9}) != true {
|
||||||
t.Errorf("Should be true instead of false.")
|
t.Errorf("Should be true instead of false.")
|
||||||
}
|
}
|
||||||
|
|
||||||
v1b := NewVertex(NewNoopRes("v1")) // same value, different objects
|
v1b := NV("v1") // same value, different objects
|
||||||
if VertexContains(v1b, []*Vertex{v1, v2, v3}) != false {
|
if VertexContains(v1b, []*Vertex{v1, v2, v3}) != false {
|
||||||
t.Errorf("Should be false instead of true.")
|
t.Errorf("Should be false instead of true.")
|
||||||
}
|
}
|
||||||
@@ -284,12 +292,12 @@ func TestPgraphT8(t *testing.T) {
|
|||||||
func TestPgraphT9(t *testing.T) {
|
func TestPgraphT9(t *testing.T) {
|
||||||
|
|
||||||
G := NewGraph("g9")
|
G := NewGraph("g9")
|
||||||
v1 := NewVertex(NewNoopRes("v1"))
|
v1 := NV("v1")
|
||||||
v2 := NewVertex(NewNoopRes("v2"))
|
v2 := NV("v2")
|
||||||
v3 := NewVertex(NewNoopRes("v3"))
|
v3 := NV("v3")
|
||||||
v4 := NewVertex(NewNoopRes("v4"))
|
v4 := NV("v4")
|
||||||
v5 := NewVertex(NewNoopRes("v5"))
|
v5 := NV("v5")
|
||||||
v6 := NewVertex(NewNoopRes("v6"))
|
v6 := NV("v6")
|
||||||
e1 := NewEdge("e1")
|
e1 := NewEdge("e1")
|
||||||
e2 := NewEdge("e2")
|
e2 := NewEdge("e2")
|
||||||
e3 := NewEdge("e3")
|
e3 := NewEdge("e3")
|
||||||
@@ -344,11 +352,11 @@ func TestPgraphT9(t *testing.T) {
|
|||||||
t.Errorf("Outdegree of v6 should be 0 instead of: %d.", i)
|
t.Errorf("Outdegree of v6 should be 0 instead of: %d.", i)
|
||||||
}
|
}
|
||||||
|
|
||||||
s, ok := G.TopologicalSort()
|
s, err := G.TopologicalSort()
|
||||||
// either possibility is a valid toposort
|
// either possibility is a valid toposort
|
||||||
match := reflect.DeepEqual(s, []*Vertex{v1, v2, v3, v4, v5, v6}) || reflect.DeepEqual(s, []*Vertex{v1, v3, v2, v4, v5, v6})
|
match := reflect.DeepEqual(s, []*Vertex{v1, v2, v3, v4, v5, v6}) || reflect.DeepEqual(s, []*Vertex{v1, v3, v2, v4, v5, v6})
|
||||||
if !ok || !match {
|
if err != nil || !match {
|
||||||
t.Errorf("Topological sort failed, status: %v.", ok)
|
t.Errorf("Topological sort failed, error: %v.", err)
|
||||||
str := "Found:"
|
str := "Found:"
|
||||||
for _, v := range s {
|
for _, v := range s {
|
||||||
str += " " + v.Res.GetName()
|
str += " " + v.Res.GetName()
|
||||||
@@ -360,12 +368,12 @@ func TestPgraphT9(t *testing.T) {
|
|||||||
func TestPgraphT10(t *testing.T) {
|
func TestPgraphT10(t *testing.T) {
|
||||||
|
|
||||||
G := NewGraph("g10")
|
G := NewGraph("g10")
|
||||||
v1 := NewVertex(NewNoopRes("v1"))
|
v1 := NV("v1")
|
||||||
v2 := NewVertex(NewNoopRes("v2"))
|
v2 := NV("v2")
|
||||||
v3 := NewVertex(NewNoopRes("v3"))
|
v3 := NV("v3")
|
||||||
v4 := NewVertex(NewNoopRes("v4"))
|
v4 := NV("v4")
|
||||||
v5 := NewVertex(NewNoopRes("v5"))
|
v5 := NV("v5")
|
||||||
v6 := NewVertex(NewNoopRes("v6"))
|
v6 := NV("v6")
|
||||||
e1 := NewEdge("e1")
|
e1 := NewEdge("e1")
|
||||||
e2 := NewEdge("e2")
|
e2 := NewEdge("e2")
|
||||||
e3 := NewEdge("e3")
|
e3 := NewEdge("e3")
|
||||||
@@ -379,8 +387,8 @@ func TestPgraphT10(t *testing.T) {
|
|||||||
G.AddEdge(v5, v6, e5)
|
G.AddEdge(v5, v6, e5)
|
||||||
G.AddEdge(v4, v2, e6) // cycle
|
G.AddEdge(v4, v2, e6) // cycle
|
||||||
|
|
||||||
if _, ok := G.TopologicalSort(); ok {
|
if _, err := G.TopologicalSort(); err == nil {
|
||||||
t.Errorf("Topological sort passed, but graph is cyclic.")
|
t.Errorf("Topological sort passed, but graph is cyclic!")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -400,8 +408,8 @@ func TestPgraphReachability0(t *testing.T) {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
G := NewGraph("g")
|
G := NewGraph("g")
|
||||||
v1 := NewVertex(NewNoopRes("v1"))
|
v1 := NV("v1")
|
||||||
v6 := NewVertex(NewNoopRes("v6"))
|
v6 := NV("v6")
|
||||||
|
|
||||||
result := G.Reachability(v1, v6)
|
result := G.Reachability(v1, v6)
|
||||||
expected := []*Vertex{}
|
expected := []*Vertex{}
|
||||||
@@ -417,12 +425,12 @@ func TestPgraphReachability0(t *testing.T) {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
G := NewGraph("g")
|
G := NewGraph("g")
|
||||||
v1 := NewVertex(NewNoopRes("v1"))
|
v1 := NV("v1")
|
||||||
v2 := NewVertex(NewNoopRes("v2"))
|
v2 := NV("v2")
|
||||||
v3 := NewVertex(NewNoopRes("v3"))
|
v3 := NV("v3")
|
||||||
v4 := NewVertex(NewNoopRes("v4"))
|
v4 := NV("v4")
|
||||||
v5 := NewVertex(NewNoopRes("v5"))
|
v5 := NV("v5")
|
||||||
v6 := NewVertex(NewNoopRes("v6"))
|
v6 := NV("v6")
|
||||||
e1 := NewEdge("e1")
|
e1 := NewEdge("e1")
|
||||||
e2 := NewEdge("e2")
|
e2 := NewEdge("e2")
|
||||||
e3 := NewEdge("e3")
|
e3 := NewEdge("e3")
|
||||||
@@ -451,12 +459,12 @@ func TestPgraphReachability0(t *testing.T) {
|
|||||||
// simple linear path
|
// simple linear path
|
||||||
func TestPgraphReachability1(t *testing.T) {
|
func TestPgraphReachability1(t *testing.T) {
|
||||||
G := NewGraph("g")
|
G := NewGraph("g")
|
||||||
v1 := NewVertex(NewNoopRes("v1"))
|
v1 := NV("v1")
|
||||||
v2 := NewVertex(NewNoopRes("v2"))
|
v2 := NV("v2")
|
||||||
v3 := NewVertex(NewNoopRes("v3"))
|
v3 := NV("v3")
|
||||||
v4 := NewVertex(NewNoopRes("v4"))
|
v4 := NV("v4")
|
||||||
v5 := NewVertex(NewNoopRes("v5"))
|
v5 := NV("v5")
|
||||||
v6 := NewVertex(NewNoopRes("v6"))
|
v6 := NV("v6")
|
||||||
e1 := NewEdge("e1")
|
e1 := NewEdge("e1")
|
||||||
e2 := NewEdge("e2")
|
e2 := NewEdge("e2")
|
||||||
e3 := NewEdge("e3")
|
e3 := NewEdge("e3")
|
||||||
@@ -485,12 +493,12 @@ func TestPgraphReachability1(t *testing.T) {
|
|||||||
// pick one of two correct paths
|
// pick one of two correct paths
|
||||||
func TestPgraphReachability2(t *testing.T) {
|
func TestPgraphReachability2(t *testing.T) {
|
||||||
G := NewGraph("g")
|
G := NewGraph("g")
|
||||||
v1 := NewVertex(NewNoopRes("v1"))
|
v1 := NV("v1")
|
||||||
v2 := NewVertex(NewNoopRes("v2"))
|
v2 := NV("v2")
|
||||||
v3 := NewVertex(NewNoopRes("v3"))
|
v3 := NV("v3")
|
||||||
v4 := NewVertex(NewNoopRes("v4"))
|
v4 := NV("v4")
|
||||||
v5 := NewVertex(NewNoopRes("v5"))
|
v5 := NV("v5")
|
||||||
v6 := NewVertex(NewNoopRes("v6"))
|
v6 := NV("v6")
|
||||||
e1 := NewEdge("e1")
|
e1 := NewEdge("e1")
|
||||||
e2 := NewEdge("e2")
|
e2 := NewEdge("e2")
|
||||||
e3 := NewEdge("e3")
|
e3 := NewEdge("e3")
|
||||||
@@ -522,12 +530,12 @@ func TestPgraphReachability2(t *testing.T) {
|
|||||||
// pick shortest path
|
// pick shortest path
|
||||||
func TestPgraphReachability3(t *testing.T) {
|
func TestPgraphReachability3(t *testing.T) {
|
||||||
G := NewGraph("g")
|
G := NewGraph("g")
|
||||||
v1 := NewVertex(NewNoopRes("v1"))
|
v1 := NV("v1")
|
||||||
v2 := NewVertex(NewNoopRes("v2"))
|
v2 := NV("v2")
|
||||||
v3 := NewVertex(NewNoopRes("v3"))
|
v3 := NV("v3")
|
||||||
v4 := NewVertex(NewNoopRes("v4"))
|
v4 := NV("v4")
|
||||||
v5 := NewVertex(NewNoopRes("v5"))
|
v5 := NV("v5")
|
||||||
v6 := NewVertex(NewNoopRes("v6"))
|
v6 := NV("v6")
|
||||||
e1 := NewEdge("e1")
|
e1 := NewEdge("e1")
|
||||||
e2 := NewEdge("e2")
|
e2 := NewEdge("e2")
|
||||||
e3 := NewEdge("e3")
|
e3 := NewEdge("e3")
|
||||||
@@ -557,12 +565,12 @@ func TestPgraphReachability3(t *testing.T) {
|
|||||||
// direct path
|
// direct path
|
||||||
func TestPgraphReachability4(t *testing.T) {
|
func TestPgraphReachability4(t *testing.T) {
|
||||||
G := NewGraph("g")
|
G := NewGraph("g")
|
||||||
v1 := NewVertex(NewNoopRes("v1"))
|
v1 := NV("v1")
|
||||||
v2 := NewVertex(NewNoopRes("v2"))
|
v2 := NV("v2")
|
||||||
v3 := NewVertex(NewNoopRes("v3"))
|
v3 := NV("v3")
|
||||||
v4 := NewVertex(NewNoopRes("v4"))
|
v4 := NV("v4")
|
||||||
v5 := NewVertex(NewNoopRes("v5"))
|
v5 := NV("v5")
|
||||||
v6 := NewVertex(NewNoopRes("v6"))
|
v6 := NV("v6")
|
||||||
e1 := NewEdge("e1")
|
e1 := NewEdge("e1")
|
||||||
e2 := NewEdge("e2")
|
e2 := NewEdge("e2")
|
||||||
e3 := NewEdge("e3")
|
e3 := NewEdge("e3")
|
||||||
@@ -590,12 +598,12 @@ func TestPgraphReachability4(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPgraphT11(t *testing.T) {
|
func TestPgraphT11(t *testing.T) {
|
||||||
v1 := NewVertex(NewNoopRes("v1"))
|
v1 := NV("v1")
|
||||||
v2 := NewVertex(NewNoopRes("v2"))
|
v2 := NV("v2")
|
||||||
v3 := NewVertex(NewNoopRes("v3"))
|
v3 := NV("v3")
|
||||||
v4 := NewVertex(NewNoopRes("v4"))
|
v4 := NV("v4")
|
||||||
v5 := NewVertex(NewNoopRes("v5"))
|
v5 := NV("v5")
|
||||||
v6 := NewVertex(NewNoopRes("v6"))
|
v6 := NV("v6")
|
||||||
|
|
||||||
if rev := Reverse([]*Vertex{}); !reflect.DeepEqual(rev, []*Vertex{}) {
|
if rev := Reverse([]*Vertex{}); !reflect.DeepEqual(rev, []*Vertex{}) {
|
||||||
t.Errorf("Reverse of vertex slice failed.")
|
t.Errorf("Reverse of vertex slice failed.")
|
||||||
@@ -1282,3 +1290,13 @@ func TestPgraphGroupingConnected1(t *testing.T) {
|
|||||||
}
|
}
|
||||||
runGraphCmp(t, g1, g2)
|
runGraphCmp(t, g1, g2)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDurationAssumptions(t *testing.T) {
|
||||||
|
var d time.Duration
|
||||||
|
if (d == 0) != true {
|
||||||
|
t.Errorf("Empty time.Duration is no longer equal to zero!")
|
||||||
|
}
|
||||||
|
if (d > 0) != false {
|
||||||
|
t.Errorf("Empty time.Duration is now greater than zero!")
|
||||||
|
}
|
||||||
|
}
|
||||||
121
puppet/gapi.go
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package puppet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/gapi"
|
||||||
|
"github.com/purpleidea/mgmt/pgraph"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GAPI implements the main puppet GAPI interface.
|
||||||
|
type GAPI struct {
|
||||||
|
PuppetParam *string // puppet mode to run; nil if undefined
|
||||||
|
PuppetConf string // the path to an alternate puppet.conf file
|
||||||
|
|
||||||
|
data gapi.Data
|
||||||
|
initialized bool
|
||||||
|
closeChan chan struct{}
|
||||||
|
wg sync.WaitGroup // sync group for tunnel go routines
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGAPI creates a new puppet GAPI struct and calls Init().
|
||||||
|
func NewGAPI(data gapi.Data, puppetParam *string, puppetConf string) (*GAPI, error) {
|
||||||
|
obj := &GAPI{
|
||||||
|
PuppetParam: puppetParam,
|
||||||
|
PuppetConf: puppetConf,
|
||||||
|
}
|
||||||
|
return obj, obj.Init(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init initializes the puppet GAPI struct.
|
||||||
|
func (obj *GAPI) Init(data gapi.Data) error {
|
||||||
|
if obj.initialized {
|
||||||
|
return fmt.Errorf("Already initialized!")
|
||||||
|
}
|
||||||
|
if obj.PuppetParam == nil {
|
||||||
|
return fmt.Errorf("The PuppetParam param must be specified!")
|
||||||
|
}
|
||||||
|
obj.data = data // store for later
|
||||||
|
obj.closeChan = make(chan struct{})
|
||||||
|
obj.initialized = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Graph returns a current Graph.
|
||||||
|
func (obj *GAPI) Graph() (*pgraph.Graph, error) {
|
||||||
|
if !obj.initialized {
|
||||||
|
return nil, fmt.Errorf("Puppet: GAPI is not initialized!")
|
||||||
|
}
|
||||||
|
config := ParseConfigFromPuppet(*obj.PuppetParam, obj.PuppetConf)
|
||||||
|
if config == nil {
|
||||||
|
return nil, fmt.Errorf("Puppet: ParseConfigFromPuppet returned nil!")
|
||||||
|
}
|
||||||
|
g, err := config.NewGraphFromConfig(obj.data.Hostname, obj.data.EmbdEtcd, obj.data.Noop)
|
||||||
|
return g, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SwitchStream returns nil errors every time there could be a new graph.
|
||||||
|
func (obj *GAPI) SwitchStream() chan error {
|
||||||
|
if obj.data.NoWatch {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
puppetChan := func() <-chan time.Time { // helper function
|
||||||
|
return time.Tick(time.Duration(PuppetInterval(obj.PuppetConf)) * time.Second)
|
||||||
|
}
|
||||||
|
ch := make(chan error)
|
||||||
|
obj.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer obj.wg.Done()
|
||||||
|
defer close(ch) // this will run before the obj.wg.Done()
|
||||||
|
if !obj.initialized {
|
||||||
|
ch <- fmt.Errorf("Puppet: GAPI is not initialized!")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pChan := puppetChan()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case _, ok := <-pChan:
|
||||||
|
if !ok { // the channel closed!
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Printf("Puppet: Generating new graph...")
|
||||||
|
pChan = puppetChan() // TODO: okay to update interval in case it changed?
|
||||||
|
ch <- nil // trigger a run
|
||||||
|
case <-obj.closeChan:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close shuts down the Puppet GAPI.
|
||||||
|
func (obj *GAPI) Close() error {
|
||||||
|
if !obj.initialized {
|
||||||
|
return fmt.Errorf("Puppet: GAPI is not initialized!")
|
||||||
|
}
|
||||||
|
close(obj.closeChan)
|
||||||
|
obj.wg.Wait()
|
||||||
|
obj.initialized = false // closed = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -15,7 +15,8 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package main
|
// Package puppet provides the integration entrypoint for the puppet language.
|
||||||
|
package puppet
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
@@ -24,6 +25,9 @@ import (
|
|||||||
"os/exec"
|
"os/exec"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/global"
|
||||||
|
"github.com/purpleidea/mgmt/yamlgraph"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -32,7 +36,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func runPuppetCommand(cmd *exec.Cmd) ([]byte, error) {
|
func runPuppetCommand(cmd *exec.Cmd) ([]byte, error) {
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("Puppet: running command: %v", cmd)
|
log.Printf("Puppet: running command: %v", cmd)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -67,7 +71,7 @@ func runPuppetCommand(cmd *exec.Cmd) ([]byte, error) {
|
|||||||
// will choke on an oversized slice. http://stackoverflow.com/a/33726617/3356612
|
// will choke on an oversized slice. http://stackoverflow.com/a/33726617/3356612
|
||||||
result = append(result, data[0:count]...)
|
result = append(result, data[0:count]...)
|
||||||
}
|
}
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("Puppet: read %v bytes of data from puppet", len(result))
|
log.Printf("Puppet: read %v bytes of data from puppet", len(result))
|
||||||
}
|
}
|
||||||
for scanner := bufio.NewScanner(stderr); scanner.Scan(); {
|
for scanner := bufio.NewScanner(stderr); scanner.Scan(); {
|
||||||
@@ -83,7 +87,7 @@ func runPuppetCommand(cmd *exec.Cmd) ([]byte, error) {
|
|||||||
|
|
||||||
// ParseConfigFromPuppet takes a special puppet param string and config and
|
// ParseConfigFromPuppet takes a special puppet param string and config and
|
||||||
// returns the graph configuration structure.
|
// returns the graph configuration structure.
|
||||||
func ParseConfigFromPuppet(puppetParam, puppetConf string) *GraphConfig {
|
func ParseConfigFromPuppet(puppetParam, puppetConf string) *yamlgraph.GraphConfig {
|
||||||
var puppetConfArg string
|
var puppetConfArg string
|
||||||
if puppetConf != "" {
|
if puppetConf != "" {
|
||||||
puppetConfArg = "--config=" + puppetConf
|
puppetConfArg = "--config=" + puppetConf
|
||||||
@@ -100,7 +104,7 @@ func ParseConfigFromPuppet(puppetParam, puppetConf string) *GraphConfig {
|
|||||||
|
|
||||||
log.Println("Puppet: launching translator")
|
log.Println("Puppet: launching translator")
|
||||||
|
|
||||||
var config GraphConfig
|
var config yamlgraph.GraphConfig
|
||||||
if data, err := runPuppetCommand(cmd); err != nil {
|
if data, err := runPuppetCommand(cmd); err != nil {
|
||||||
return nil
|
return nil
|
||||||
} else if err := config.Parse(data); err != nil {
|
} else if err := config.Parse(data); err != nil {
|
||||||
@@ -113,7 +117,7 @@ func ParseConfigFromPuppet(puppetParam, puppetConf string) *GraphConfig {
|
|||||||
|
|
||||||
// PuppetInterval returns the graph refresh interval from the puppet configuration.
|
// PuppetInterval returns the graph refresh interval from the puppet configuration.
|
||||||
func PuppetInterval(puppetConf string) int {
|
func PuppetInterval(puppetConf string) int {
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("Puppet: determining graph refresh interval")
|
log.Printf("Puppet: determining graph refresh interval")
|
||||||
}
|
}
|
||||||
var cmd *exec.Cmd
|
var cmd *exec.Cmd
|
||||||
134
recwatch/configwatch.go
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package recwatch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/global"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConfigWatcher returns events on a channel anytime one of its files events.
|
||||||
|
type ConfigWatcher struct {
|
||||||
|
ch chan string
|
||||||
|
wg sync.WaitGroup
|
||||||
|
closechan chan struct{}
|
||||||
|
errorchan chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConfigWatcher creates a new ConfigWatcher struct.
|
||||||
|
func NewConfigWatcher() *ConfigWatcher {
|
||||||
|
return &ConfigWatcher{
|
||||||
|
ch: make(chan string),
|
||||||
|
closechan: make(chan struct{}),
|
||||||
|
errorchan: make(chan error),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add new file paths to watch for events on.
|
||||||
|
func (obj *ConfigWatcher) Add(file ...string) {
|
||||||
|
if len(file) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(file) > 1 {
|
||||||
|
for _, f := range file { // add all the files...
|
||||||
|
obj.Add(f) // recurse
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// otherwise, add the one file passed in...
|
||||||
|
obj.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer obj.wg.Done()
|
||||||
|
ch := ConfigWatch(file[0])
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case e := <-ch:
|
||||||
|
if e != nil {
|
||||||
|
obj.errorchan <- e
|
||||||
|
return
|
||||||
|
}
|
||||||
|
obj.ch <- file[0]
|
||||||
|
continue
|
||||||
|
case <-obj.closechan:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns a channel of errors that notifies us of permanent issues.
|
||||||
|
func (obj *ConfigWatcher) Error() <-chan error {
|
||||||
|
return obj.errorchan
|
||||||
|
}
|
||||||
|
|
||||||
|
// Events returns a channel to listen on for file events. It closes when it is
|
||||||
|
// emptied after the Close() method is called. You can test for closure with the
|
||||||
|
// f, more := <-obj.Events() pattern.
|
||||||
|
func (obj *ConfigWatcher) Events() chan string {
|
||||||
|
return obj.ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close shuts down the ConfigWatcher object. It closes the Events channel after
|
||||||
|
// all the currently pending events have been emptied.
|
||||||
|
func (obj *ConfigWatcher) Close() {
|
||||||
|
if obj.ch == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
close(obj.closechan)
|
||||||
|
obj.wg.Wait() // wait until everyone is done sending on obj.ch
|
||||||
|
//obj.ch <- "" // send finished message
|
||||||
|
close(obj.ch)
|
||||||
|
obj.ch = nil
|
||||||
|
close(obj.errorchan)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigWatch writes on the channel every time an event is seen for the path.
|
||||||
|
func ConfigWatch(file string) chan error {
|
||||||
|
ch := make(chan error)
|
||||||
|
go func() {
|
||||||
|
recWatcher, err := NewRecWatcher(file, false)
|
||||||
|
if err != nil {
|
||||||
|
ch <- err
|
||||||
|
close(ch)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer recWatcher.Close()
|
||||||
|
for {
|
||||||
|
if global.DEBUG {
|
||||||
|
log.Printf("Watching: %v", file)
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case event, ok := <-recWatcher.Events():
|
||||||
|
if !ok { // channel is closed
|
||||||
|
close(ch)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := event.Error; err != nil {
|
||||||
|
ch <- err
|
||||||
|
close(ch)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ch <- nil // send event!
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//close(ch)
|
||||||
|
}()
|
||||||
|
return ch
|
||||||
|
}
|
||||||
319
recwatch/recwatch.go
Normal file
@@ -0,0 +1,319 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// Package recwatch provides recursive file watching events via fsnotify.
|
||||||
|
package recwatch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/global" // XXX: package mgmtmain instead?
|
||||||
|
"github.com/purpleidea/mgmt/util"
|
||||||
|
|
||||||
|
"gopkg.in/fsnotify.v1"
|
||||||
|
//"github.com/go-fsnotify/fsnotify" // git master of "gopkg.in/fsnotify.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Event represents a watcher event. These can include errors.
|
||||||
|
type Event struct {
|
||||||
|
Error error
|
||||||
|
Body *fsnotify.Event
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecWatcher is the struct for the recursive watcher. Run Init() on it.
|
||||||
|
type RecWatcher struct {
|
||||||
|
Path string // computed path
|
||||||
|
Recurse bool // should we watch recursively?
|
||||||
|
isDir bool // computed isDir
|
||||||
|
safename string // safe path
|
||||||
|
watcher *fsnotify.Watcher
|
||||||
|
watches map[string]struct{}
|
||||||
|
events chan Event // one channel for events and err...
|
||||||
|
once sync.Once
|
||||||
|
wg sync.WaitGroup
|
||||||
|
exit chan struct{}
|
||||||
|
closeErr error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRecWatcher creates an initializes a new recursive watcher.
|
||||||
|
func NewRecWatcher(path string, recurse bool) (*RecWatcher, error) {
|
||||||
|
obj := &RecWatcher{
|
||||||
|
Path: path,
|
||||||
|
Recurse: recurse,
|
||||||
|
}
|
||||||
|
return obj, obj.Init()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init starts the recursive file watcher.
|
||||||
|
func (obj *RecWatcher) Init() error {
|
||||||
|
obj.watcher = nil
|
||||||
|
obj.watches = make(map[string]struct{})
|
||||||
|
obj.events = make(chan Event)
|
||||||
|
obj.exit = make(chan struct{})
|
||||||
|
obj.isDir = strings.HasSuffix(obj.Path, "/") // dirs have trailing slashes
|
||||||
|
obj.safename = path.Clean(obj.Path) // no trailing slash
|
||||||
|
|
||||||
|
var err error
|
||||||
|
obj.watcher, err = fsnotify.NewWatcher()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.isDir {
|
||||||
|
if err := obj.addSubFolders(obj.safename); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
if err := obj.Watch(); err != nil {
|
||||||
|
obj.events <- Event{Error: err}
|
||||||
|
}
|
||||||
|
obj.Close()
|
||||||
|
}()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//func (obj *RecWatcher) Add(path string) error { // XXX: implement me or not?
|
||||||
|
//
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
//func (obj *RecWatcher) Remove(path string) error { // XXX: implement me or not?
|
||||||
|
//
|
||||||
|
//}
|
||||||
|
|
||||||
|
// Close shuts down the watcher.
|
||||||
|
func (obj *RecWatcher) Close() error {
|
||||||
|
obj.once.Do(obj.close) // don't cause the channel to close twice
|
||||||
|
return obj.closeErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// This close function is the function that actually does the close work. Don't
|
||||||
|
// call it more than once!
|
||||||
|
func (obj *RecWatcher) close() {
|
||||||
|
var err error
|
||||||
|
close(obj.exit) // send exit signal
|
||||||
|
obj.wg.Wait()
|
||||||
|
if obj.watcher != nil {
|
||||||
|
err = obj.watcher.Close()
|
||||||
|
obj.watcher = nil
|
||||||
|
// TODO: should we send the close error?
|
||||||
|
//if err != nil {
|
||||||
|
// obj.events <- Event{Error: err}
|
||||||
|
//}
|
||||||
|
}
|
||||||
|
close(obj.events)
|
||||||
|
obj.closeErr = err // set the error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Events returns a channel of events. These include events for errors.
|
||||||
|
func (obj *RecWatcher) Events() chan Event { return obj.events }
|
||||||
|
|
||||||
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
|
func (obj *RecWatcher) Watch() error {
|
||||||
|
if obj.watcher == nil {
|
||||||
|
return fmt.Errorf("Watcher is not initialized!")
|
||||||
|
}
|
||||||
|
obj.wg.Add(1)
|
||||||
|
defer obj.wg.Done()
|
||||||
|
|
||||||
|
patharray := util.PathSplit(obj.safename) // tokenize the path
|
||||||
|
var index = len(patharray) // starting index
|
||||||
|
var current string // current "watcher" location
|
||||||
|
var deltaDepth int // depth delta between watcher and event
|
||||||
|
var send = false // send event?
|
||||||
|
|
||||||
|
for {
|
||||||
|
current = strings.Join(patharray[0:index], "/")
|
||||||
|
if current == "" { // the empty string top is the root dir ("/")
|
||||||
|
current = "/"
|
||||||
|
}
|
||||||
|
if global.DEBUG {
|
||||||
|
log.Printf("Watching: %s", current) // attempting to watch...
|
||||||
|
}
|
||||||
|
// initialize in the loop so that we can reset on rm-ed handles
|
||||||
|
if err := obj.watcher.Add(current); err != nil {
|
||||||
|
if global.DEBUG {
|
||||||
|
log.Printf("watcher.Add(%s): Error: %v", current, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == syscall.ENOENT {
|
||||||
|
index-- // usually not found, move up one dir
|
||||||
|
index = int(math.Max(1, float64(index)))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == syscall.ENOSPC {
|
||||||
|
// no space left on device, out of inotify watches
|
||||||
|
// TODO: consider letting the user fall back to
|
||||||
|
// polling if they hit this error very often...
|
||||||
|
return fmt.Errorf("Out of inotify watches: %v", err)
|
||||||
|
} else if os.IsPermission(err) {
|
||||||
|
return fmt.Errorf("Permission denied adding a watch: %v", err)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("Unknown error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case event := <-obj.watcher.Events:
|
||||||
|
if global.DEBUG {
|
||||||
|
log.Printf("Watch(%s), Event(%s): %v", current, event.Name, event.Op)
|
||||||
|
}
|
||||||
|
// the deeper you go, the bigger the deltaDepth is...
|
||||||
|
// this is the difference between what we're watching,
|
||||||
|
// and the event... doesn't mean we can't watch deeper
|
||||||
|
if current == event.Name {
|
||||||
|
deltaDepth = 0 // i was watching what i was looking for
|
||||||
|
|
||||||
|
} else if util.HasPathPrefix(event.Name, current) {
|
||||||
|
deltaDepth = len(util.PathSplit(current)) - len(util.PathSplit(event.Name)) // -1 or less
|
||||||
|
|
||||||
|
} else if util.HasPathPrefix(current, event.Name) {
|
||||||
|
deltaDepth = len(util.PathSplit(event.Name)) - len(util.PathSplit(current)) // +1 or more
|
||||||
|
// if below me...
|
||||||
|
if _, exists := obj.watches[event.Name]; exists {
|
||||||
|
send = true
|
||||||
|
if event.Op&fsnotify.Remove == fsnotify.Remove {
|
||||||
|
obj.watcher.Remove(event.Name)
|
||||||
|
delete(obj.watches, event.Name)
|
||||||
|
}
|
||||||
|
if (event.Op&fsnotify.Create == fsnotify.Create) && isDir(event.Name) {
|
||||||
|
obj.watcher.Add(event.Name)
|
||||||
|
obj.watches[event.Name] = struct{}{}
|
||||||
|
if err := obj.addSubFolders(event.Name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// TODO: different watchers get each others events!
|
||||||
|
// https://github.com/go-fsnotify/fsnotify/issues/95
|
||||||
|
// this happened with two values such as:
|
||||||
|
// event.Name: /tmp/mgmt/f3 and current: /tmp/mgmt/f2
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
//log.Printf("The delta depth is: %v", deltaDepth)
|
||||||
|
|
||||||
|
// if we have what we wanted, awesome, send an event...
|
||||||
|
if event.Name == obj.safename {
|
||||||
|
//log.Println("Event!")
|
||||||
|
// FIXME: should all these below cases trigger?
|
||||||
|
send = true
|
||||||
|
|
||||||
|
if obj.isDir {
|
||||||
|
if err := obj.addSubFolders(obj.safename); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// file removed, move the watch upwards
|
||||||
|
if deltaDepth >= 0 && (event.Op&fsnotify.Remove == fsnotify.Remove) {
|
||||||
|
//log.Println("Removal!")
|
||||||
|
obj.watcher.Remove(current)
|
||||||
|
index--
|
||||||
|
}
|
||||||
|
|
||||||
|
// we must be a parent watcher, so descend in
|
||||||
|
if deltaDepth < 0 {
|
||||||
|
// XXX: we can block here due to: https://github.com/fsnotify/fsnotify/issues/123
|
||||||
|
obj.watcher.Remove(current)
|
||||||
|
index++
|
||||||
|
}
|
||||||
|
|
||||||
|
// if safename starts with event.Name, we're above, and no event should be sent
|
||||||
|
} else if util.HasPathPrefix(obj.safename, event.Name) {
|
||||||
|
//log.Println("Above!")
|
||||||
|
|
||||||
|
if deltaDepth >= 0 && (event.Op&fsnotify.Remove == fsnotify.Remove) {
|
||||||
|
log.Println("Removal!")
|
||||||
|
obj.watcher.Remove(current)
|
||||||
|
index--
|
||||||
|
}
|
||||||
|
|
||||||
|
if deltaDepth < 0 {
|
||||||
|
log.Println("Parent!")
|
||||||
|
if util.PathPrefixDelta(obj.safename, event.Name) == 1 { // we're the parent dir
|
||||||
|
send = true
|
||||||
|
}
|
||||||
|
obj.watcher.Remove(current)
|
||||||
|
index++
|
||||||
|
}
|
||||||
|
|
||||||
|
// if event.Name startswith safename, send event, we're already deeper
|
||||||
|
} else if util.HasPathPrefix(event.Name, obj.safename) {
|
||||||
|
//log.Println("Event2!")
|
||||||
|
send = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// do all our event sending all together to avoid duplicate msgs
|
||||||
|
if send {
|
||||||
|
send = false
|
||||||
|
// only invalid state on certain types of events
|
||||||
|
obj.events <- Event{Error: nil, Body: &event}
|
||||||
|
}
|
||||||
|
|
||||||
|
case err := <-obj.watcher.Errors:
|
||||||
|
return fmt.Errorf("Unknown watcher error: %v", err)
|
||||||
|
|
||||||
|
case <-obj.exit:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// addSubFolders is a helper that is used to add recursive dirs to the watches.
|
||||||
|
func (obj *RecWatcher) addSubFolders(p string) error {
|
||||||
|
if !obj.Recurse {
|
||||||
|
return nil // if we're not watching recursively, just exit early
|
||||||
|
}
|
||||||
|
// look at all subfolders...
|
||||||
|
walkFn := func(path string, info os.FileInfo, err error) error {
|
||||||
|
if global.DEBUG {
|
||||||
|
log.Printf("Walk: %s (%v): %v", path, info, err)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if info.IsDir() {
|
||||||
|
obj.watches[path] = struct{}{} // add key
|
||||||
|
err := obj.watcher.Add(path)
|
||||||
|
if err != nil {
|
||||||
|
return err // TODO: will this bubble up?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
err := filepath.Walk(p, walkFn)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func isDir(path string) bool {
|
||||||
|
finfo, err := os.Stat(path)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return finfo.IsDir()
|
||||||
|
}
|
||||||
@@ -15,6 +15,7 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// Package remote provides the remoting facilities for agentless execution.
|
||||||
// This set of structs and methods are for running mgmt remotely over SSH. This
|
// This set of structs and methods are for running mgmt remotely over SSH. This
|
||||||
// gives us the architectural robustness of our current design, combined with
|
// gives us the architectural robustness of our current design, combined with
|
||||||
// the ability to run it with an "agent-less" approach for bootstrapping, and
|
// the ability to run it with an "agent-less" approach for bootstrapping, and
|
||||||
@@ -35,7 +36,7 @@
|
|||||||
// remote mgmt transient agents are running, they can still exchange data and
|
// remote mgmt transient agents are running, they can still exchange data and
|
||||||
// converge together without directly connecting, since they all tunnel through
|
// converge together without directly connecting, since they all tunnel through
|
||||||
// the etcd server running on the initiator.
|
// the etcd server running on the initiator.
|
||||||
package main // TODO: make this a separate "remote" package
|
package remote
|
||||||
|
|
||||||
// TODO: running with two identical remote endpoints over a slow connection, eg:
|
// TODO: running with two identical remote endpoints over a slow connection, eg:
|
||||||
// --remote file1.yaml --remote file1.yaml
|
// --remote file1.yaml --remote file1.yaml
|
||||||
@@ -46,10 +47,6 @@ import (
|
|||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/howeyc/gopass"
|
|
||||||
"github.com/kardianos/osext"
|
|
||||||
"github.com/pkg/sftp"
|
|
||||||
"golang.org/x/crypto/ssh"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
@@ -63,6 +60,18 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
cv "github.com/purpleidea/mgmt/converger"
|
||||||
|
"github.com/purpleidea/mgmt/global"
|
||||||
|
"github.com/purpleidea/mgmt/util"
|
||||||
|
"github.com/purpleidea/mgmt/yamlgraph"
|
||||||
|
|
||||||
|
multierr "github.com/hashicorp/go-multierror"
|
||||||
|
"github.com/howeyc/gopass"
|
||||||
|
"github.com/kardianos/osext"
|
||||||
|
errwrap "github.com/pkg/errors"
|
||||||
|
"github.com/pkg/sftp"
|
||||||
|
"golang.org/x/crypto/ssh"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -94,7 +103,7 @@ type SSH struct {
|
|||||||
depth uint16 // depth of this node in the remote execution hierarchy
|
depth uint16 // depth of this node in the remote execution hierarchy
|
||||||
caching bool // whether to try and cache the copy of the binary
|
caching bool // whether to try and cache the copy of the binary
|
||||||
prefix string // location we're allowed to put data on the remote server
|
prefix string // location we're allowed to put data on the remote server
|
||||||
converger Converger
|
converger cv.Converger
|
||||||
|
|
||||||
client *ssh.Client // client object
|
client *ssh.Client // client object
|
||||||
sftp *sftp.Client // sftp object
|
sftp *sftp.Client // sftp object
|
||||||
@@ -107,6 +116,7 @@ type SSH struct {
|
|||||||
lock sync.Mutex // mutex to avoid exit races
|
lock sync.Mutex // mutex to avoid exit races
|
||||||
exiting bool // flag to let us know if we're exiting
|
exiting bool // flag to let us know if we're exiting
|
||||||
|
|
||||||
|
program string // name of the binary
|
||||||
remotewd string // path to remote working directory
|
remotewd string // path to remote working directory
|
||||||
execpath string // path to remote mgmt binary
|
execpath string // path to remote mgmt binary
|
||||||
filepath string // path to remote file config
|
filepath string // path to remote file config
|
||||||
@@ -135,7 +145,7 @@ func (obj *SSH) Close() error {
|
|||||||
return obj.client.Close()
|
return obj.client.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// The Sftp function uses the sftp protocol to create a remote dir and copy over
|
// Sftp is a function for the sftp protocol to create a remote dir and copy over
|
||||||
// the binary to run. On error the string represents the path to the remote dir.
|
// the binary to run. On error the string represents the path to the remote dir.
|
||||||
func (obj *SSH) Sftp() error {
|
func (obj *SSH) Sftp() error {
|
||||||
var err error
|
var err error
|
||||||
@@ -163,7 +173,7 @@ func (obj *SSH) Sftp() error {
|
|||||||
|
|
||||||
// TODO: make the path configurable to deal with /tmp/ mounted noexec?
|
// TODO: make the path configurable to deal with /tmp/ mounted noexec?
|
||||||
tmpdir := func() string {
|
tmpdir := func() string {
|
||||||
return fmt.Sprintf(formatPattern, fmtUUID(10)) // eg: /tmp/mgmt.abcdefghij/
|
return fmt.Sprintf(formatPattern, fmtUID(10)) // eg: /tmp/mgmt.abcdefghij/
|
||||||
}
|
}
|
||||||
var ready bool
|
var ready bool
|
||||||
obj.remotewd = ""
|
obj.remotewd = ""
|
||||||
@@ -181,7 +191,7 @@ func (obj *SSH) Sftp() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; true; {
|
for i := 0; true; {
|
||||||
// NOTE: since fmtUUID is deterministic, if we don't clean up
|
// NOTE: since fmtUID is deterministic, if we don't clean up
|
||||||
// previous runs, we may get the same paths generated, and here
|
// previous runs, we may get the same paths generated, and here
|
||||||
// they will conflict.
|
// they will conflict.
|
||||||
if err := obj.sftp.Mkdir(obj.remotewd); err != nil {
|
if err := obj.sftp.Mkdir(obj.remotewd); err != nil {
|
||||||
@@ -214,7 +224,7 @@ func (obj *SSH) Sftp() error {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
obj.execpath = path.Join(obj.remotewd, program) // program is a compile time string from main.go
|
obj.execpath = path.Join(obj.remotewd, obj.program) // program is a compile time string
|
||||||
log.Printf("Remote: Remote path is: %s", obj.execpath)
|
log.Printf("Remote: Remote path is: %s", obj.execpath)
|
||||||
|
|
||||||
var same bool
|
var same bool
|
||||||
@@ -438,7 +448,7 @@ func (obj *SSH) forward(remoteConn net.Conn) net.Conn {
|
|||||||
log.Printf("Remote: io.Copy error: %s", err)
|
log.Printf("Remote: io.Copy error: %s", err)
|
||||||
// FIXME: what should we do here???
|
// FIXME: what should we do here???
|
||||||
}
|
}
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("Remote: io.Copy finished: %d", n)
|
log.Printf("Remote: io.Copy finished: %d", n)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -483,7 +493,7 @@ func (obj *SSH) Exec() error {
|
|||||||
// TODO: do something less arbitrary about which one we pick?
|
// TODO: do something less arbitrary about which one we pick?
|
||||||
url := cleanURL(obj.remoteURLs[0]) // arbitrarily pick the first one
|
url := cleanURL(obj.remoteURLs[0]) // arbitrarily pick the first one
|
||||||
seeds := fmt.Sprintf("--no-server --seeds 'http://%s'", url) // XXX: escape untrusted input? (or check if url is valid)
|
seeds := fmt.Sprintf("--no-server --seeds 'http://%s'", url) // XXX: escape untrusted input? (or check if url is valid)
|
||||||
file := fmt.Sprintf("--file '%s'", obj.filepath) // XXX: escape untrusted input! (or check if file path exists)
|
file := fmt.Sprintf("--yaml '%s'", obj.filepath) // XXX: escape untrusted input! (or check if file path exists)
|
||||||
depth := fmt.Sprintf("--depth %d", obj.depth+1) // child is +1 distance
|
depth := fmt.Sprintf("--depth %d", obj.depth+1) // child is +1 distance
|
||||||
args := []string{hostname, seeds, file, depth}
|
args := []string{hostname, seeds, file, depth}
|
||||||
if obj.noop {
|
if obj.noop {
|
||||||
@@ -553,7 +563,7 @@ func (obj *SSH) ExecExit() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: workaround: force a signal!
|
// FIXME: workaround: force a signal!
|
||||||
if _, err := obj.simpleRun(fmt.Sprintf("killall -SIGINT %s", program)); err != nil { // FIXME: low specificity
|
if _, err := obj.simpleRun(fmt.Sprintf("killall -SIGINT %s", obj.program)); err != nil { // FIXME: low specificity
|
||||||
log.Printf("Remote: Failed to send SIGINT: %s", err.Error())
|
log.Printf("Remote: Failed to send SIGINT: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -562,12 +572,12 @@ func (obj *SSH) ExecExit() error {
|
|||||||
// try killing the process more violently
|
// try killing the process more violently
|
||||||
time.Sleep(10 * time.Second)
|
time.Sleep(10 * time.Second)
|
||||||
//obj.session.Signal(ssh.SIGKILL)
|
//obj.session.Signal(ssh.SIGKILL)
|
||||||
cmd := fmt.Sprintf("killall -SIGKILL %s", program) // FIXME: low specificity
|
cmd := fmt.Sprintf("killall -SIGKILL %s", obj.program) // FIXME: low specificity
|
||||||
obj.simpleRun(cmd)
|
obj.simpleRun(cmd)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// FIXME: workaround: wait (spin lock) until process quits cleanly...
|
// FIXME: workaround: wait (spin lock) until process quits cleanly...
|
||||||
cmd := fmt.Sprintf("while killall -0 %s 2> /dev/null; do sleep 1s; done", program) // FIXME: low specificity
|
cmd := fmt.Sprintf("while killall -0 %s 2> /dev/null; do sleep 1s; done", obj.program) // FIXME: low specificity
|
||||||
if _, err := obj.simpleRun(cmd); err != nil {
|
if _, err := obj.simpleRun(cmd); err != nil {
|
||||||
return fmt.Errorf("Error waiting: %s", err)
|
return fmt.Errorf("Error waiting: %s", err)
|
||||||
}
|
}
|
||||||
@@ -680,7 +690,7 @@ type Remotes struct {
|
|||||||
caching bool // whether to try and cache the copy of the binary
|
caching bool // whether to try and cache the copy of the binary
|
||||||
depth uint16 // depth of this node in the remote execution hierarchy
|
depth uint16 // depth of this node in the remote execution hierarchy
|
||||||
prefix string // folder prefix to use for misc storage
|
prefix string // folder prefix to use for misc storage
|
||||||
converger Converger
|
converger cv.Converger
|
||||||
convergerCb func(func(map[string]bool) error) (func(), error)
|
convergerCb func(func(map[string]bool) error) (func(), error)
|
||||||
|
|
||||||
wg sync.WaitGroup // keep track of each running SSH connection
|
wg sync.WaitGroup // keep track of each running SSH connection
|
||||||
@@ -690,19 +700,20 @@ type Remotes struct {
|
|||||||
exitChan chan struct{} // closes when we should exit
|
exitChan chan struct{} // closes when we should exit
|
||||||
semaphore Semaphore // counting semaphore to limit concurrent connections
|
semaphore Semaphore // counting semaphore to limit concurrent connections
|
||||||
hostnames []string // list of hostnames we've seen so far
|
hostnames []string // list of hostnames we've seen so far
|
||||||
cuuid ConvergerUUID // convergerUUID for the remote itself
|
cuid cv.ConvergerUID // convergerUID for the remote itself
|
||||||
cuuids map[string]ConvergerUUID // map to each SSH struct with the remote as the key
|
cuids map[string]cv.ConvergerUID // map to each SSH struct with the remote as the key
|
||||||
callbackCancelFunc func() // stored callback function cancel function
|
callbackCancelFunc func() // stored callback function cancel function
|
||||||
|
|
||||||
|
program string // name of the program
|
||||||
}
|
}
|
||||||
|
|
||||||
// The NewRemotes function builds a Remotes struct.
|
// NewRemotes builds a Remotes struct.
|
||||||
func NewRemotes(clientURLs, remoteURLs []string, noop bool, remotes []string, fileWatch chan string, cConns uint16, interactive bool, sshPrivIdRsa string, caching bool, depth uint16, prefix string, converger Converger, convergerCb func(func(map[string]bool) error) (func(), error)) *Remotes {
|
func NewRemotes(clientURLs, remoteURLs []string, noop bool, remotes []string, fileWatch chan string, cConns uint16, interactive bool, sshPrivIdRsa string, caching bool, depth uint16, prefix string, converger cv.Converger, convergerCb func(func(map[string]bool) error) (func(), error), program string) *Remotes {
|
||||||
return &Remotes{
|
return &Remotes{
|
||||||
clientURLs: clientURLs,
|
clientURLs: clientURLs,
|
||||||
remoteURLs: remoteURLs,
|
remoteURLs: remoteURLs,
|
||||||
noop: noop,
|
noop: noop,
|
||||||
remotes: StrRemoveDuplicatesInList(remotes),
|
remotes: util.StrRemoveDuplicatesInList(remotes),
|
||||||
fileWatch: fileWatch,
|
fileWatch: fileWatch,
|
||||||
cConns: cConns,
|
cConns: cConns,
|
||||||
interactive: interactive,
|
interactive: interactive,
|
||||||
@@ -716,7 +727,8 @@ func NewRemotes(clientURLs, remoteURLs []string, noop bool, remotes []string, fi
|
|||||||
exitChan: make(chan struct{}),
|
exitChan: make(chan struct{}),
|
||||||
semaphore: NewSemaphore(int(cConns)),
|
semaphore: NewSemaphore(int(cConns)),
|
||||||
hostnames: make([]string, len(remotes)),
|
hostnames: make([]string, len(remotes)),
|
||||||
cuuids: make(map[string]ConvergerUUID),
|
cuids: make(map[string]cv.ConvergerUID),
|
||||||
|
program: program,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -724,7 +736,7 @@ func NewRemotes(clientURLs, remoteURLs []string, noop bool, remotes []string, fi
|
|||||||
// It takes as input the path to a graph definition file.
|
// It takes as input the path to a graph definition file.
|
||||||
func (obj *Remotes) NewSSH(file string) (*SSH, error) {
|
func (obj *Remotes) NewSSH(file string) (*SSH, error) {
|
||||||
// first do the parsing...
|
// first do the parsing...
|
||||||
config := ParseConfigFromFile(file)
|
config := yamlgraph.ParseConfigFromFile(file) // FIXME: GAPI-ify somehow?
|
||||||
if config == nil {
|
if config == nil {
|
||||||
return nil, fmt.Errorf("Remote: Error parsing remote graph: %s", file)
|
return nil, fmt.Errorf("Remote: Error parsing remote graph: %s", file)
|
||||||
}
|
}
|
||||||
@@ -781,11 +793,12 @@ func (obj *Remotes) NewSSH(file string) (*SSH, error) {
|
|||||||
return nil, fmt.Errorf("No authentication methods available!")
|
return nil, fmt.Errorf("No authentication methods available!")
|
||||||
}
|
}
|
||||||
|
|
||||||
hostname := config.Hostname
|
//hostname := config.Hostname // TODO: optionally specify local hostname somehow
|
||||||
|
hostname := ""
|
||||||
if hostname == "" {
|
if hostname == "" {
|
||||||
hostname = host // default to above
|
hostname = host // default to above
|
||||||
}
|
}
|
||||||
if StrInList(hostname, obj.hostnames) {
|
if util.StrInList(hostname, obj.hostnames) {
|
||||||
return nil, fmt.Errorf("Remote: Hostname `%s` already exists!", hostname)
|
return nil, fmt.Errorf("Remote: Hostname `%s` already exists!", hostname)
|
||||||
}
|
}
|
||||||
obj.hostnames = append(obj.hostnames, hostname)
|
obj.hostnames = append(obj.hostnames, hostname)
|
||||||
@@ -805,6 +818,7 @@ func (obj *Remotes) NewSSH(file string) (*SSH, error) {
|
|||||||
caching: obj.caching,
|
caching: obj.caching,
|
||||||
converger: obj.converger,
|
converger: obj.converger,
|
||||||
prefix: obj.prefix,
|
prefix: obj.prefix,
|
||||||
|
program: obj.program,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -872,24 +886,23 @@ func (obj *Remotes) passwordCallback(user, host string) func() (string, error) {
|
|||||||
return p, nil
|
return p, nil
|
||||||
case e := <-failchan:
|
case e := <-failchan:
|
||||||
return "", e
|
return "", e
|
||||||
case <-TimeAfterOrBlock(timeout):
|
case <-util.TimeAfterOrBlock(timeout):
|
||||||
return "", fmt.Errorf("Interactive timeout reached!")
|
return "", fmt.Errorf("Interactive timeout reached!")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return cb
|
return cb
|
||||||
}
|
}
|
||||||
|
|
||||||
// The Run method of the Remotes struct kicks it all off. It is usually run from
|
// Run kicks it all off. It is usually run from a go routine.
|
||||||
// a go routine.
|
|
||||||
func (obj *Remotes) Run() {
|
func (obj *Remotes) Run() {
|
||||||
// TODO: we can disable a lot of this if we're not using --converged-timeout
|
// TODO: we can disable a lot of this if we're not using --converged-timeout
|
||||||
// link in all the converged timeout checking and callbacks...
|
// link in all the converged timeout checking and callbacks...
|
||||||
obj.cuuid = obj.converger.Register() // one for me!
|
obj.cuid = obj.converger.Register() // one for me!
|
||||||
obj.cuuid.SetName("Remote: Run")
|
obj.cuid.SetName("Remote: Run")
|
||||||
for _, f := range obj.remotes { // one for each remote...
|
for _, f := range obj.remotes { // one for each remote...
|
||||||
obj.cuuids[f] = obj.converger.Register() // save a reference
|
obj.cuids[f] = obj.converger.Register() // save a reference
|
||||||
obj.cuuids[f].SetName(fmt.Sprintf("Remote: %s", f))
|
obj.cuids[f].SetName(fmt.Sprintf("Remote: %s", f))
|
||||||
//obj.cuuids[f].SetConverged(false) // everyone starts off false
|
//obj.cuids[f].SetConverged(false) // everyone starts off false
|
||||||
}
|
}
|
||||||
|
|
||||||
// watch for converged state in the group of remotes...
|
// watch for converged state in the group of remotes...
|
||||||
@@ -911,12 +924,12 @@ func (obj *Remotes) Run() {
|
|||||||
if !ok { // no status on hostname means unconverged!
|
if !ok { // no status on hostname means unconverged!
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("Remote: Converged: Status: %+v", obj.converger.Status())
|
log.Printf("Remote: Converged: Status: %+v", obj.converger.Status())
|
||||||
}
|
}
|
||||||
// if exiting, don't update, it will be unregistered...
|
// if exiting, don't update, it will be unregistered...
|
||||||
if !sshobj.exiting { // this is actually racy, but safe
|
if !sshobj.exiting { // this is actually racy, but safe
|
||||||
obj.cuuids[f].SetConverged(b) // ignore errors!
|
obj.cuids[f].SetConverged(b) // ignore errors!
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -943,10 +956,10 @@ func (obj *Remotes) Run() {
|
|||||||
if !more {
|
if !more {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
obj.cuuid.SetConverged(false) // activity!
|
obj.cuid.SetConverged(false) // activity!
|
||||||
|
|
||||||
case <-obj.cuuid.ConvergedTimer():
|
case <-obj.cuid.ConvergedTimer():
|
||||||
obj.cuuid.SetConverged(true) // converged!
|
obj.cuid.SetConverged(true) // converged!
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
obj.lock.Lock()
|
obj.lock.Lock()
|
||||||
@@ -965,7 +978,7 @@ func (obj *Remotes) Run() {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
} else {
|
} else {
|
||||||
obj.cuuid.SetConverged(true) // if no watches, we're converged!
|
obj.cuid.SetConverged(true) // if no watches, we're converged!
|
||||||
}
|
}
|
||||||
|
|
||||||
// the semaphore provides the max simultaneous connection limit
|
// the semaphore provides the max simultaneous connection limit
|
||||||
@@ -983,7 +996,7 @@ func (obj *Remotes) Run() {
|
|||||||
if obj.cConns != 0 {
|
if obj.cConns != 0 {
|
||||||
obj.semaphore.V(1) // don't lock the loop
|
obj.semaphore.V(1) // don't lock the loop
|
||||||
}
|
}
|
||||||
obj.cuuids[f].Unregister() // don't stall the converge!
|
obj.cuids[f].Unregister() // don't stall the converge!
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
obj.sshmap[f] = sshobj // save a reference
|
obj.sshmap[f] = sshobj // save a reference
|
||||||
@@ -994,7 +1007,7 @@ func (obj *Remotes) Run() {
|
|||||||
defer obj.semaphore.V(1)
|
defer obj.semaphore.V(1)
|
||||||
}
|
}
|
||||||
defer obj.wg.Done()
|
defer obj.wg.Done()
|
||||||
defer obj.cuuids[f].Unregister()
|
defer obj.cuids[f].Unregister()
|
||||||
|
|
||||||
if err := sshobj.Go(); err != nil {
|
if err := sshobj.Go(); err != nil {
|
||||||
log.Printf("Remote: Error: %s", err)
|
log.Printf("Remote: Error: %s", err)
|
||||||
@@ -1005,13 +1018,14 @@ func (obj *Remotes) Run() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// The Exit method causes as much of the Remotes struct to shutdown as quickly
|
// Exit causes as much of the Remotes struct to shutdown as quickly and as
|
||||||
// and as cleanly as possible. It only returns once everything is shutdown.
|
// cleanly as possible. It only returns once everything is shutdown.
|
||||||
func (obj *Remotes) Exit() {
|
func (obj *Remotes) Exit() error {
|
||||||
obj.lock.Lock()
|
obj.lock.Lock()
|
||||||
obj.exiting = true // don't spawn new ones once this flag is set!
|
obj.exiting = true // don't spawn new ones once this flag is set!
|
||||||
obj.lock.Unlock()
|
obj.lock.Unlock()
|
||||||
close(obj.exitChan)
|
close(obj.exitChan)
|
||||||
|
var reterr error
|
||||||
for _, f := range obj.remotes {
|
for _, f := range obj.remotes {
|
||||||
sshobj, exists := obj.sshmap[f]
|
sshobj, exists := obj.sshmap[f]
|
||||||
if !exists || sshobj == nil {
|
if !exists || sshobj == nil {
|
||||||
@@ -1020,7 +1034,8 @@ func (obj *Remotes) Exit() {
|
|||||||
|
|
||||||
// TODO: should we run these as go routines?
|
// TODO: should we run these as go routines?
|
||||||
if err := sshobj.Stop(); err != nil {
|
if err := sshobj.Stop(); err != nil {
|
||||||
log.Printf("Remote: Error stopping: %s", err)
|
err = errwrap.Wrapf(err, "Remote: Error stopping!")
|
||||||
|
reterr = multierr.Append(reterr, err) // list of errors
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1028,14 +1043,15 @@ func (obj *Remotes) Exit() {
|
|||||||
obj.callbackCancelFunc() // cancel our callback
|
obj.callbackCancelFunc() // cancel our callback
|
||||||
}
|
}
|
||||||
|
|
||||||
defer obj.cuuid.Unregister()
|
defer obj.cuid.Unregister()
|
||||||
obj.wg.Wait() // wait for everyone to exit
|
obj.wg.Wait() // wait for everyone to exit
|
||||||
|
return reterr
|
||||||
}
|
}
|
||||||
|
|
||||||
// fmtUUID makes a random string of length n, it is not cryptographically safe.
|
// fmtUID makes a random string of length n, it is not cryptographically safe.
|
||||||
// This function actually usually generates the same sequence of random strings
|
// This function actually usually generates the same sequence of random strings
|
||||||
// each time the program is run, which makes repeatability of this code easier.
|
// each time the program is run, which makes repeatability of this code easier.
|
||||||
func fmtUUID(n int) string {
|
func fmtUID(n int) string {
|
||||||
b := make([]byte, n)
|
b := make([]byte, n)
|
||||||
for i := range b {
|
for i := range b {
|
||||||
b[i] = formatChars[rand.Intn(len(formatChars))]
|
b[i] = formatChars[rand.Intn(len(formatChars))]
|
||||||
@@ -15,16 +15,21 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package main
|
package resources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/event"
|
||||||
|
"github.com/purpleidea/mgmt/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -46,7 +51,7 @@ type ExecRes struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewExecRes is a constructor for this resource. It also calls Init() for you.
|
// NewExecRes is a constructor for this resource. It also calls Init() for you.
|
||||||
func NewExecRes(name, cmd, shell string, timeout int, watchcmd, watchshell, ifcmd, ifshell string, pollint int, state string) *ExecRes {
|
func NewExecRes(name, cmd, shell string, timeout int, watchcmd, watchshell, ifcmd, ifshell string, pollint int, state string) (*ExecRes, error) {
|
||||||
obj := &ExecRes{
|
obj := &ExecRes{
|
||||||
BaseRes: BaseRes{
|
BaseRes: BaseRes{
|
||||||
Name: name,
|
Name: name,
|
||||||
@@ -61,32 +66,31 @@ func NewExecRes(name, cmd, shell string, timeout int, watchcmd, watchshell, ifcm
|
|||||||
PollInt: pollint,
|
PollInt: pollint,
|
||||||
State: state,
|
State: state,
|
||||||
}
|
}
|
||||||
obj.Init()
|
return obj, obj.Init()
|
||||||
return obj
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init runs some startup code for this resource.
|
// Init runs some startup code for this resource.
|
||||||
func (obj *ExecRes) Init() {
|
func (obj *ExecRes) Init() error {
|
||||||
obj.BaseRes.kind = "Exec"
|
obj.BaseRes.kind = "Exec"
|
||||||
obj.BaseRes.Init() // call base init, b/c we're overriding
|
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
}
|
}
|
||||||
|
|
||||||
// validate if the params passed in are valid data
|
// Validate if the params passed in are valid data.
|
||||||
// FIXME: where should this get called ?
|
// FIXME: where should this get called ?
|
||||||
func (obj *ExecRes) Validate() bool {
|
func (obj *ExecRes) Validate() error {
|
||||||
if obj.Cmd == "" { // this is the only thing that is really required
|
if obj.Cmd == "" { // this is the only thing that is really required
|
||||||
return false
|
return fmt.Errorf("Command can't be empty!")
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we have a watch command, then we don't poll with the if command!
|
// if we have a watch command, then we don't poll with the if command!
|
||||||
if obj.WatchCmd != "" && obj.PollInt > 0 {
|
if obj.WatchCmd != "" && obj.PollInt > 0 {
|
||||||
return false
|
return fmt.Errorf("Don't poll when we have a watch command.")
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// wraps the scanner output in a channel
|
// BufioChanScanner wraps the scanner output in a channel.
|
||||||
func (obj *ExecRes) BufioChanScanner(scanner *bufio.Scanner) (chan string, chan error) {
|
func (obj *ExecRes) BufioChanScanner(scanner *bufio.Scanner) (chan string, chan error) {
|
||||||
ch, errch := make(chan string), make(chan error)
|
ch, errch := make(chan string), make(chan error)
|
||||||
go func() {
|
go func() {
|
||||||
@@ -94,7 +98,7 @@ func (obj *ExecRes) BufioChanScanner(scanner *bufio.Scanner) (chan string, chan
|
|||||||
ch <- scanner.Text() // blocks here ?
|
ch <- scanner.Text() // blocks here ?
|
||||||
if e := scanner.Err(); e != nil {
|
if e := scanner.Err(); e != nil {
|
||||||
errch <- e // send any misc errors we encounter
|
errch <- e // send any misc errors we encounter
|
||||||
//break // TODO ?
|
//break // TODO: ?
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
close(ch)
|
close(ch)
|
||||||
@@ -105,14 +109,23 @@ func (obj *ExecRes) BufioChanScanner(scanner *bufio.Scanner) (chan string, chan
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Watch is the primary listener for this resource and it outputs events.
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
func (obj *ExecRes) Watch(processChan chan Event) {
|
func (obj *ExecRes) Watch(processChan chan event.Event) error {
|
||||||
if obj.IsWatching() {
|
if obj.IsWatching() {
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
obj.SetWatching(true)
|
obj.SetWatching(true)
|
||||||
defer obj.SetWatching(false)
|
defer obj.SetWatching(false)
|
||||||
cuuid := obj.converger.Register()
|
cuid := obj.converger.Register()
|
||||||
defer cuuid.Unregister()
|
defer cuid.Unregister()
|
||||||
|
|
||||||
|
var startup bool
|
||||||
|
Startup := func(block bool) <-chan time.Time {
|
||||||
|
if block {
|
||||||
|
return nil // blocks forever
|
||||||
|
//return make(chan time.Time) // blocks forever
|
||||||
|
}
|
||||||
|
return time.After(time.Duration(500) * time.Millisecond) // 1/2 the resolution of converged timeout
|
||||||
|
}
|
||||||
|
|
||||||
var send = false // send event?
|
var send = false // send event?
|
||||||
var exit = false
|
var exit = false
|
||||||
@@ -138,8 +151,7 @@ func (obj *ExecRes) Watch(processChan chan Event) {
|
|||||||
|
|
||||||
cmdReader, err := cmd.StdoutPipe()
|
cmdReader, err := cmd.StdoutPipe()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("%v[%v]: Error creating StdoutPipe for Cmd: %v", obj.Kind(), obj.GetName(), err)
|
return fmt.Errorf("%s[%s]: Error creating StdoutPipe for Cmd: %v", obj.Kind(), obj.GetName(), err)
|
||||||
log.Fatal(err) // XXX: how should we handle errors?
|
|
||||||
}
|
}
|
||||||
scanner := bufio.NewScanner(cmdReader)
|
scanner := bufio.NewScanner(cmdReader)
|
||||||
|
|
||||||
@@ -150,18 +162,17 @@ func (obj *ExecRes) Watch(processChan chan Event) {
|
|||||||
cmd.Process.Kill() // TODO: is this necessary?
|
cmd.Process.Kill() // TODO: is this necessary?
|
||||||
}()
|
}()
|
||||||
if err := cmd.Start(); err != nil {
|
if err := cmd.Start(); err != nil {
|
||||||
log.Printf("%v[%v]: Error starting Cmd: %v", obj.Kind(), obj.GetName(), err)
|
return fmt.Errorf("%s[%s]: Error starting Cmd: %v", obj.Kind(), obj.GetName(), err)
|
||||||
log.Fatal(err) // XXX: how should we handle errors?
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bufioch, errch = obj.BufioChanScanner(scanner)
|
bufioch, errch = obj.BufioChanScanner(scanner)
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
obj.SetState(resStateWatching) // reset
|
obj.SetState(ResStateWatching) // reset
|
||||||
select {
|
select {
|
||||||
case text := <-bufioch:
|
case text := <-bufioch:
|
||||||
cuuid.SetConverged(false)
|
cuid.SetConverged(false)
|
||||||
// each time we get a line of output, we loop!
|
// each time we get a line of output, we loop!
|
||||||
log.Printf("%v[%v]: Watch output: %s", obj.Kind(), obj.GetName(), text)
|
log.Printf("%v[%v]: Watch output: %s", obj.Kind(), obj.GetName(), text)
|
||||||
if text != "" {
|
if text != "" {
|
||||||
@@ -169,36 +180,39 @@ func (obj *ExecRes) Watch(processChan chan Event) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
case err := <-errch:
|
case err := <-errch:
|
||||||
cuuid.SetConverged(false) // XXX ?
|
cuid.SetConverged(false)
|
||||||
if err == nil { // EOF
|
if err == nil { // EOF
|
||||||
// FIXME: add an "if watch command ends/crashes"
|
// FIXME: add an "if watch command ends/crashes"
|
||||||
// restart or generate error option
|
// restart or generate error option
|
||||||
log.Printf("%v[%v]: Reached EOF", obj.Kind(), obj.GetName())
|
return fmt.Errorf("%s[%s]: Reached EOF", obj.Kind(), obj.GetName())
|
||||||
return
|
|
||||||
}
|
}
|
||||||
log.Printf("%v[%v]: Error reading input?: %v", obj.Kind(), obj.GetName(), err)
|
// error reading input?
|
||||||
log.Fatal(err)
|
return fmt.Errorf("Unknown %s[%s] error: %v", obj.Kind(), obj.GetName(), err)
|
||||||
// XXX: how should we handle errors?
|
|
||||||
|
|
||||||
case event := <-obj.events:
|
case event := <-obj.Events():
|
||||||
cuuid.SetConverged(false)
|
cuid.SetConverged(false)
|
||||||
if exit, send = obj.ReadEvent(&event); exit {
|
if exit, send = obj.ReadEvent(&event); exit {
|
||||||
return // exit
|
return nil // exit
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-cuuid.ConvergedTimer():
|
case <-cuid.ConvergedTimer():
|
||||||
cuuid.SetConverged(true) // converged!
|
cuid.SetConverged(true) // converged!
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
case <-Startup(startup):
|
||||||
|
cuid.SetConverged(false)
|
||||||
|
send = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// do all our event sending all together to avoid duplicate msgs
|
// do all our event sending all together to avoid duplicate msgs
|
||||||
if send {
|
if send {
|
||||||
|
startup = true // startup finished
|
||||||
send = false
|
send = false
|
||||||
// it is okay to invalidate the clean state on poke too
|
// it is okay to invalidate the clean state on poke too
|
||||||
obj.isStateOK = false // something made state dirty
|
obj.isStateOK = false // something made state dirty
|
||||||
resp := NewResp()
|
if exit, err := obj.DoSend(processChan, ""); exit || err != nil {
|
||||||
processChan <- Event{eventNil, resp, "", true} // trigger process
|
return err // we exit or bubble up a NACK...
|
||||||
resp.ACKWait() // wait for the ACK()
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -221,8 +235,8 @@ func (obj *ExecRes) CheckApply(apply bool) (checkok bool, err error) {
|
|||||||
//} else if obj.IfCmd != "" && obj.WatchCmd != "" {
|
//} else if obj.IfCmd != "" && obj.WatchCmd != "" {
|
||||||
|
|
||||||
if obj.PollInt > 0 { // && obj.WatchCmd == ""
|
if obj.PollInt > 0 { // && obj.WatchCmd == ""
|
||||||
// XXX have the Watch() command output onlyif poll events...
|
// XXX: have the Watch() command output onlyif poll events...
|
||||||
// XXX we can optimize by saving those results for returning here
|
// XXX: we can optimize by saving those results for returning here
|
||||||
// return XXX
|
// return XXX
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -300,7 +314,7 @@ func (obj *ExecRes) CheckApply(apply bool) (checkok bool, err error) {
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-TimeAfterOrBlock(timeout):
|
case <-util.TimeAfterOrBlock(timeout):
|
||||||
log.Printf("%v[%v]: Timeout waiting for Cmd", obj.Kind(), obj.GetName())
|
log.Printf("%v[%v]: Timeout waiting for Cmd", obj.Kind(), obj.GetName())
|
||||||
//cmd.Process.Kill() // TODO: is this necessary?
|
//cmd.Process.Kill() // TODO: is this necessary?
|
||||||
return false, errors.New("Timeout waiting for Cmd!")
|
return false, errors.New("Timeout waiting for Cmd!")
|
||||||
@@ -325,18 +339,17 @@ func (obj *ExecRes) CheckApply(apply bool) (checkok bool, err error) {
|
|||||||
return false, nil // success
|
return false, nil // success
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExecUUID is the UUID struct for ExecRes.
|
// ExecUID is the UID struct for ExecRes.
|
||||||
type ExecUUID struct {
|
type ExecUID struct {
|
||||||
BaseUUID
|
BaseUID
|
||||||
Cmd string
|
Cmd string
|
||||||
IfCmd string
|
IfCmd string
|
||||||
// TODO: add more elements here
|
// TODO: add more elements here
|
||||||
}
|
}
|
||||||
|
|
||||||
// if and only if they are equivalent, return true
|
// IFF aka if and only if they are equivalent, return true. If not, false.
|
||||||
// if they are not equivalent, return false
|
func (obj *ExecUID) IFF(uid ResUID) bool {
|
||||||
func (obj *ExecUUID) IFF(uuid ResUUID) bool {
|
res, ok := uid.(*ExecUID)
|
||||||
res, ok := uuid.(*ExecUUID)
|
|
||||||
if !ok {
|
if !ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -368,23 +381,23 @@ func (obj *ExecUUID) IFF(uuid ResUUID) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// The AutoEdges method returns the AutoEdges. In this case none are used.
|
// AutoEdges returns the AutoEdge interface. In this case no autoedges are used.
|
||||||
func (obj *ExecRes) AutoEdges() AutoEdge {
|
func (obj *ExecRes) AutoEdges() AutoEdge {
|
||||||
// TODO: parse as many exec params to look for auto edges, for example
|
// TODO: parse as many exec params to look for auto edges, for example
|
||||||
// the path of the binary in the Cmd variable might be from in a pkg
|
// the path of the binary in the Cmd variable might be from in a pkg
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUUIDs includes all params to make a unique identification of this object.
|
// GetUIDs includes all params to make a unique identification of this object.
|
||||||
// Most resources only return one, although some resources can return multiple.
|
// Most resources only return one, although some resources can return multiple.
|
||||||
func (obj *ExecRes) GetUUIDs() []ResUUID {
|
func (obj *ExecRes) GetUIDs() []ResUID {
|
||||||
x := &ExecUUID{
|
x := &ExecUID{
|
||||||
BaseUUID: BaseUUID{name: obj.GetName(), kind: obj.Kind()},
|
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
||||||
Cmd: obj.Cmd,
|
Cmd: obj.Cmd,
|
||||||
IfCmd: obj.IfCmd,
|
IfCmd: obj.IfCmd,
|
||||||
// TODO: add more params here
|
// TODO: add more params here
|
||||||
}
|
}
|
||||||
return []ResUUID{x}
|
return []ResUID{x}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GroupCmp returns whether two resources can be grouped together or not.
|
// GroupCmp returns whether two resources can be grouped together or not.
|
||||||
815
resources/file.go
Normal file
@@ -0,0 +1,815 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package resources
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/gob"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/event"
|
||||||
|
"github.com/purpleidea/mgmt/global" // XXX: package mgmtmain instead?
|
||||||
|
"github.com/purpleidea/mgmt/recwatch"
|
||||||
|
"github.com/purpleidea/mgmt/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
gob.Register(&FileRes{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileRes is a file and directory resource.
|
||||||
|
type FileRes struct {
|
||||||
|
BaseRes `yaml:",inline"`
|
||||||
|
Path string `yaml:"path"` // path variable (should default to name)
|
||||||
|
Dirname string `yaml:"dirname"`
|
||||||
|
Basename string `yaml:"basename"`
|
||||||
|
Content string `yaml:"content"` // FIXME: how do you describe: "leave content alone" - state = "create" ?
|
||||||
|
Source string `yaml:"source"` // file path for source content
|
||||||
|
State string `yaml:"state"` // state: exists/present?, absent, (undefined?)
|
||||||
|
Recurse bool `yaml:"recurse"`
|
||||||
|
Force bool `yaml:"force"`
|
||||||
|
path string // computed path
|
||||||
|
isDir bool // computed isDir
|
||||||
|
sha256sum string
|
||||||
|
recWatcher *recwatch.RecWatcher
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFileRes is a constructor for this resource. It also calls Init() for you.
|
||||||
|
func NewFileRes(name, path, dirname, basename, content, source, state string, recurse, force bool) (*FileRes, error) {
|
||||||
|
obj := &FileRes{
|
||||||
|
BaseRes: BaseRes{
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
Path: path,
|
||||||
|
Dirname: dirname,
|
||||||
|
Basename: basename,
|
||||||
|
Content: content,
|
||||||
|
Source: source,
|
||||||
|
State: state,
|
||||||
|
Recurse: recurse,
|
||||||
|
Force: force,
|
||||||
|
}
|
||||||
|
return obj, obj.Init()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init runs some startup code for this resource.
|
||||||
|
func (obj *FileRes) Init() error {
|
||||||
|
obj.sha256sum = ""
|
||||||
|
if obj.Path == "" { // use the name as the path default if missing
|
||||||
|
obj.Path = obj.BaseRes.Name
|
||||||
|
}
|
||||||
|
obj.path = obj.GetPath() // compute once
|
||||||
|
obj.isDir = strings.HasSuffix(obj.path, "/") // dirs have trailing slashes
|
||||||
|
|
||||||
|
obj.BaseRes.kind = "File"
|
||||||
|
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPath returns the actual path to use for this resource. It computes this
|
||||||
|
// after analysis of the Path, Dirname and Basename values. Dirs end with slash.
|
||||||
|
func (obj *FileRes) GetPath() string {
|
||||||
|
d := util.Dirname(obj.Path)
|
||||||
|
b := util.Basename(obj.Path)
|
||||||
|
if obj.Dirname == "" && obj.Basename == "" {
|
||||||
|
return obj.Path
|
||||||
|
}
|
||||||
|
if obj.Dirname == "" {
|
||||||
|
return d + obj.Basename
|
||||||
|
}
|
||||||
|
if obj.Basename == "" {
|
||||||
|
return obj.Dirname + b
|
||||||
|
}
|
||||||
|
// if obj.dirname != "" && obj.basename != ""
|
||||||
|
return obj.Dirname + obj.Basename
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate reports any problems with the struct definition.
|
||||||
|
func (obj *FileRes) Validate() error {
|
||||||
|
if obj.Dirname != "" && !strings.HasSuffix(obj.Dirname, "/") {
|
||||||
|
return fmt.Errorf("Dirname must end with a slash.")
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(obj.Basename, "/") {
|
||||||
|
return fmt.Errorf("Basename must not start with a slash.")
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.Content != "" && obj.Source != "" {
|
||||||
|
return fmt.Errorf("Can't specify both Content and Source.")
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.isDir && obj.Content != "" { // makes no sense
|
||||||
|
return fmt.Errorf("Can't specify Content when creating a Dir.")
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX: should this specify that we create an empty directory instead?
|
||||||
|
//if obj.Source == "" && obj.isDir {
|
||||||
|
// return fmt.Errorf("Can't specify an empty source when creating a Dir.")
|
||||||
|
//}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
|
// This one is a file watcher for files and directories.
|
||||||
|
// Modify with caution, it is probably important to write some test cases first!
|
||||||
|
// If the Watch returns an error, it means that something has gone wrong, and it
|
||||||
|
// must be restarted. On a clean exit it returns nil.
|
||||||
|
// FIXME: Also watch the source directory when using obj.Source !!!
|
||||||
|
func (obj *FileRes) Watch(processChan chan event.Event) error {
|
||||||
|
if obj.IsWatching() {
|
||||||
|
return nil // TODO: should this be an error?
|
||||||
|
}
|
||||||
|
obj.SetWatching(true)
|
||||||
|
defer obj.SetWatching(false)
|
||||||
|
cuid := obj.converger.Register()
|
||||||
|
defer cuid.Unregister()
|
||||||
|
|
||||||
|
var startup bool
|
||||||
|
Startup := func(block bool) <-chan time.Time {
|
||||||
|
if block {
|
||||||
|
return nil // blocks forever
|
||||||
|
//return make(chan time.Time) // blocks forever
|
||||||
|
}
|
||||||
|
return time.After(time.Duration(500) * time.Millisecond) // 1/2 the resolution of converged timeout
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
obj.recWatcher, err = recwatch.NewRecWatcher(obj.Path, obj.Recurse)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer obj.recWatcher.Close()
|
||||||
|
|
||||||
|
var send = false // send event?
|
||||||
|
var exit = false
|
||||||
|
var dirty = false
|
||||||
|
|
||||||
|
for {
|
||||||
|
if global.DEBUG {
|
||||||
|
log.Printf("%s[%s]: Watching: %s", obj.Kind(), obj.GetName(), obj.Path) // attempting to watch...
|
||||||
|
}
|
||||||
|
|
||||||
|
obj.SetState(ResStateWatching) // reset
|
||||||
|
select {
|
||||||
|
case event, ok := <-obj.recWatcher.Events():
|
||||||
|
if !ok { // channel shutdown
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
cuid.SetConverged(false)
|
||||||
|
if err := event.Error; err != nil {
|
||||||
|
return fmt.Errorf("Unknown %s[%s] watcher error: %v", obj.Kind(), obj.GetName(), err)
|
||||||
|
}
|
||||||
|
if global.DEBUG { // don't access event.Body if event.Error isn't nil
|
||||||
|
log.Printf("%s[%s]: Event(%s): %v", obj.Kind(), obj.GetName(), event.Body.Name, event.Body.Op)
|
||||||
|
}
|
||||||
|
send = true
|
||||||
|
dirty = true
|
||||||
|
|
||||||
|
case event := <-obj.Events():
|
||||||
|
cuid.SetConverged(false)
|
||||||
|
if exit, send = obj.ReadEvent(&event); exit {
|
||||||
|
return nil // exit
|
||||||
|
}
|
||||||
|
//dirty = false // these events don't invalidate state
|
||||||
|
|
||||||
|
case <-cuid.ConvergedTimer():
|
||||||
|
cuid.SetConverged(true) // converged!
|
||||||
|
continue
|
||||||
|
|
||||||
|
case <-Startup(startup):
|
||||||
|
cuid.SetConverged(false)
|
||||||
|
send = true
|
||||||
|
dirty = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// do all our event sending all together to avoid duplicate msgs
|
||||||
|
if send {
|
||||||
|
startup = true // startup finished
|
||||||
|
send = false
|
||||||
|
// only invalid state on certain types of events
|
||||||
|
if dirty {
|
||||||
|
dirty = false
|
||||||
|
obj.isStateOK = false // something made state dirty
|
||||||
|
}
|
||||||
|
if exit, err := obj.DoSend(processChan, ""); exit || err != nil {
|
||||||
|
return err // we exit or bubble up a NACK...
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// smartPath adds a trailing slash to the path if it is a directory.
|
||||||
|
func smartPath(fileInfo os.FileInfo) string {
|
||||||
|
smartPath := fileInfo.Name() // absolute path
|
||||||
|
if fileInfo.IsDir() {
|
||||||
|
smartPath += "/" // add a trailing slash for dirs
|
||||||
|
}
|
||||||
|
return smartPath
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileInfo is an enhanced variant of the traditional os.FileInfo struct. It can
|
||||||
|
// store both the absolute and the relative paths (when built from our ReadDir),
|
||||||
|
// and those two paths contain a trailing slash when they refer to a directory.
|
||||||
|
type FileInfo struct {
|
||||||
|
os.FileInfo // embed
|
||||||
|
AbsPath string // smart variant
|
||||||
|
RelPath string // smart variant
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadDir reads a directory path, and returns a list of enhanced FileInfo's.
|
||||||
|
func ReadDir(path string) ([]FileInfo, error) {
|
||||||
|
if !strings.HasSuffix(path, "/") { // dirs have trailing slashes
|
||||||
|
return nil, fmt.Errorf("Path must be a directory.")
|
||||||
|
}
|
||||||
|
output := []FileInfo{} // my file info
|
||||||
|
fileInfos, err := ioutil.ReadDir(path)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return output, err // return empty list
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, fi := range fileInfos {
|
||||||
|
abs := path + smartPath(fi)
|
||||||
|
rel, err := filepath.Rel(path, abs) // NOTE: calls Clean()
|
||||||
|
if err != nil { // shouldn't happen
|
||||||
|
return nil, fmt.Errorf("ReadDir: Unhandled error: %v", err)
|
||||||
|
}
|
||||||
|
if fi.IsDir() {
|
||||||
|
rel += "/" // add a trailing slash for dirs
|
||||||
|
}
|
||||||
|
x := FileInfo{
|
||||||
|
FileInfo: fi,
|
||||||
|
AbsPath: abs,
|
||||||
|
RelPath: rel,
|
||||||
|
}
|
||||||
|
output = append(output, x)
|
||||||
|
}
|
||||||
|
return output, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// smartMapPaths adds a trailing slash to every path that is a directory. It
|
||||||
|
// returns the data as a map where the keys are the smart paths and where the
|
||||||
|
// values are the original os.FileInfo entries.
|
||||||
|
func mapPaths(fileInfos []FileInfo) map[string]FileInfo {
|
||||||
|
paths := make(map[string]FileInfo)
|
||||||
|
for _, fileInfo := range fileInfos {
|
||||||
|
paths[fileInfo.RelPath] = fileInfo
|
||||||
|
}
|
||||||
|
return paths
|
||||||
|
}
|
||||||
|
|
||||||
|
// fileCheckApply is the CheckApply operation for a source and destination file.
|
||||||
|
// It can accept an io.Reader as the source, which can be a regular file, or it
|
||||||
|
// can be a bytes Buffer struct. It can take an input sha256 hash to use instead
|
||||||
|
// of computing the source data hash, and it returns the computed value if this
|
||||||
|
// function reaches that stage. As usual, it respects the apply action variable,
|
||||||
|
// and it symmetry with the main CheckApply function returns checkOK and error.
|
||||||
|
func (obj *FileRes) fileCheckApply(apply bool, src io.ReadSeeker, dst string, sha256sum string) (string, bool, error) {
|
||||||
|
// TODO: does it make sense to switch dst to an io.Writer ?
|
||||||
|
// TODO: use obj.Force when dealing with symlinks and other file types!
|
||||||
|
if global.DEBUG {
|
||||||
|
log.Printf("fileCheckApply: %s -> %s", src, dst)
|
||||||
|
}
|
||||||
|
|
||||||
|
srcFile, isFile := src.(*os.File)
|
||||||
|
_, isBytes := src.(*bytes.Reader) // supports seeking!
|
||||||
|
if !isFile && !isBytes {
|
||||||
|
return "", false, fmt.Errorf("Can't open src as either file or buffer!")
|
||||||
|
}
|
||||||
|
|
||||||
|
var srcStat os.FileInfo
|
||||||
|
if isFile {
|
||||||
|
var err error
|
||||||
|
srcStat, err = srcFile.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return "", false, err
|
||||||
|
}
|
||||||
|
// TODO: deal with symlinks
|
||||||
|
if !srcStat.Mode().IsRegular() { // can't copy non-regular files or dirs
|
||||||
|
return "", false, fmt.Errorf("Non-regular src file: %s (%q)", srcStat.Name(), srcStat.Mode())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dstFile, err := os.Open(dst)
|
||||||
|
if err != nil && !os.IsNotExist(err) { // ignore ErrNotExist errors
|
||||||
|
return "", false, err
|
||||||
|
}
|
||||||
|
dstClose := func() error {
|
||||||
|
return dstFile.Close() // calling this twice is safe :)
|
||||||
|
}
|
||||||
|
defer dstClose()
|
||||||
|
dstExists := !os.IsNotExist(err)
|
||||||
|
|
||||||
|
dstStat, err := dstFile.Stat()
|
||||||
|
if err != nil && dstExists {
|
||||||
|
return "", false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if dstExists && dstStat.IsDir() { // oops, dst is a dir, and we want a file...
|
||||||
|
if !apply {
|
||||||
|
return "", false, nil
|
||||||
|
}
|
||||||
|
if !obj.Force {
|
||||||
|
return "", false, fmt.Errorf("Can't force dir into file: %s", dst)
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanDst := path.Clean(dst)
|
||||||
|
if cleanDst == "" || cleanDst == "/" {
|
||||||
|
return "", false, fmt.Errorf("Don't want to remove root!") // safety
|
||||||
|
}
|
||||||
|
// FIXME: respect obj.Recurse here...
|
||||||
|
// there is a dir here, where we want a file...
|
||||||
|
log.Printf("fileCheckApply: Removing (force): %s", cleanDst)
|
||||||
|
if err := os.RemoveAll(cleanDst); err != nil { // dangerous ;)
|
||||||
|
return "", false, err
|
||||||
|
}
|
||||||
|
dstExists = false // now it's gone!
|
||||||
|
|
||||||
|
} else if err == nil {
|
||||||
|
if !dstStat.Mode().IsRegular() {
|
||||||
|
return "", false, fmt.Errorf("Non-regular dst file: %s (%q)", dstStat.Name(), dstStat.Mode())
|
||||||
|
}
|
||||||
|
if isFile && os.SameFile(srcStat, dstStat) { // same inode, we're done!
|
||||||
|
return "", true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if dstExists { // if dst doesn't exist, no need to compare hashes
|
||||||
|
// hash comparison (efficient because we can cache hash of content str)
|
||||||
|
if sha256sum == "" { // cache is invalid
|
||||||
|
hash := sha256.New()
|
||||||
|
// TODO: file existence test?
|
||||||
|
if _, err := io.Copy(hash, src); err != nil {
|
||||||
|
return "", false, err
|
||||||
|
}
|
||||||
|
sha256sum = hex.EncodeToString(hash.Sum(nil))
|
||||||
|
// since we re-use this src handler below, it is
|
||||||
|
// *critical* to seek to 0, or we'll copy nothing!
|
||||||
|
if n, err := src.Seek(0, 0); err != nil || n != 0 {
|
||||||
|
return sha256sum, false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// dst hash
|
||||||
|
hash := sha256.New()
|
||||||
|
if _, err := io.Copy(hash, dstFile); err != nil {
|
||||||
|
return "", false, err
|
||||||
|
}
|
||||||
|
if h := hex.EncodeToString(hash.Sum(nil)); h == sha256sum {
|
||||||
|
return sha256sum, true, nil // same!
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// state is not okay, no work done, exit, but without error
|
||||||
|
if !apply {
|
||||||
|
return sha256sum, false, nil
|
||||||
|
}
|
||||||
|
if global.DEBUG {
|
||||||
|
log.Printf("fileCheckApply: Apply: %s -> %s", src, dst)
|
||||||
|
}
|
||||||
|
|
||||||
|
dstClose() // unlock file usage so we can write to it
|
||||||
|
dstFile, err = os.Create(dst)
|
||||||
|
if err != nil {
|
||||||
|
return sha256sum, false, err
|
||||||
|
}
|
||||||
|
defer dstFile.Close() // TODO: is this redundant because of the earlier defered Close() ?
|
||||||
|
|
||||||
|
if isFile { // set mode because it's a new file
|
||||||
|
if err := dstFile.Chmod(srcStat.Mode()); err != nil {
|
||||||
|
return sha256sum, false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: attempt to reflink with Splice() and int(file.Fd()) as input...
|
||||||
|
// syscall.Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
|
||||||
|
|
||||||
|
// TODO: should we offer a way to cancel the copy on ^C ?
|
||||||
|
if global.DEBUG {
|
||||||
|
log.Printf("fileCheckApply: Copy: %s -> %s", src, dst)
|
||||||
|
}
|
||||||
|
if n, err := io.Copy(dstFile, src); err != nil {
|
||||||
|
return sha256sum, false, err
|
||||||
|
} else if global.DEBUG {
|
||||||
|
log.Printf("fileCheckApply: Copied: %v", n)
|
||||||
|
}
|
||||||
|
return sha256sum, false, dstFile.Sync()
|
||||||
|
}
|
||||||
|
|
||||||
|
// syncCheckApply is the CheckApply operation for a source and destination dir.
|
||||||
|
// It is recursive and can create directories directly, and files via the usual
|
||||||
|
// fileCheckApply method. It returns checkOK and error as is normally expected.
|
||||||
|
func (obj *FileRes) syncCheckApply(apply bool, src, dst string) (bool, error) {
|
||||||
|
if global.DEBUG {
|
||||||
|
log.Printf("syncCheckApply: %s -> %s", src, dst)
|
||||||
|
}
|
||||||
|
if src == "" || dst == "" {
|
||||||
|
return false, fmt.Errorf("The src and dst must not be empty!")
|
||||||
|
}
|
||||||
|
|
||||||
|
var checkOK = true
|
||||||
|
// TODO: handle ./ cases or ../ cases that need cleaning ?
|
||||||
|
|
||||||
|
srcIsDir := strings.HasSuffix(src, "/")
|
||||||
|
dstIsDir := strings.HasSuffix(dst, "/")
|
||||||
|
|
||||||
|
if srcIsDir != dstIsDir {
|
||||||
|
return false, fmt.Errorf("The src and dst must be both either files or directories.")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !srcIsDir && !dstIsDir {
|
||||||
|
if global.DEBUG {
|
||||||
|
log.Printf("syncCheckApply: %s -> %s", src, dst)
|
||||||
|
}
|
||||||
|
fin, err := os.Open(src)
|
||||||
|
if err != nil {
|
||||||
|
if global.DEBUG && os.IsNotExist(err) { // if we get passed an empty src
|
||||||
|
log.Printf("syncCheckApply: Missing src: %s", src)
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, checkOK, err := obj.fileCheckApply(apply, fin, dst, "")
|
||||||
|
if err != nil {
|
||||||
|
fin.Close()
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return checkOK, fin.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// else: if srcIsDir && dstIsDir
|
||||||
|
srcFiles, err := ReadDir(src) // if src does not exist...
|
||||||
|
if err != nil && !os.IsNotExist(err) { // an empty map comes out below!
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
dstFiles, err := ReadDir(dst)
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
//log.Printf("syncCheckApply: srcFiles: %v", srcFiles)
|
||||||
|
//log.Printf("syncCheckApply: dstFiles: %v", dstFiles)
|
||||||
|
smartSrc := mapPaths(srcFiles)
|
||||||
|
smartDst := mapPaths(dstFiles)
|
||||||
|
|
||||||
|
for relPath, fileInfo := range smartSrc {
|
||||||
|
absSrc := fileInfo.AbsPath // absolute path
|
||||||
|
absDst := dst + relPath // absolute dest
|
||||||
|
|
||||||
|
if _, exists := smartDst[relPath]; !exists {
|
||||||
|
if fileInfo.IsDir() {
|
||||||
|
if !apply { // only checking and not identical!
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// file exists, but we want a dir: we need force
|
||||||
|
// we check for the file w/o the smart dir slash
|
||||||
|
relPathFile := strings.TrimSuffix(relPath, "/")
|
||||||
|
if _, ok := smartDst[relPathFile]; ok {
|
||||||
|
absCleanDst := path.Clean(absDst)
|
||||||
|
if !obj.Force {
|
||||||
|
return false, fmt.Errorf("Can't force file into dir: %s", absCleanDst)
|
||||||
|
}
|
||||||
|
if absCleanDst == "" || absCleanDst == "/" {
|
||||||
|
return false, fmt.Errorf("Don't want to remove root!") // safety
|
||||||
|
}
|
||||||
|
log.Printf("syncCheckApply: Removing (force): %s", absCleanDst)
|
||||||
|
if err := os.Remove(absCleanDst); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
delete(smartDst, relPathFile) // rm from purge list
|
||||||
|
}
|
||||||
|
|
||||||
|
if global.DEBUG {
|
||||||
|
log.Printf("syncCheckApply: mkdir -m %s '%s'", fileInfo.Mode(), absDst)
|
||||||
|
}
|
||||||
|
if err := os.Mkdir(absDst, fileInfo.Mode()); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
checkOK = false // we did some work
|
||||||
|
}
|
||||||
|
// if we're a regular file, the recurse will create it
|
||||||
|
}
|
||||||
|
|
||||||
|
if global.DEBUG {
|
||||||
|
log.Printf("syncCheckApply: Recurse: %s -> %s", absSrc, absDst)
|
||||||
|
}
|
||||||
|
if obj.Recurse {
|
||||||
|
if c, err := obj.syncCheckApply(apply, absSrc, absDst); err != nil { // recurse
|
||||||
|
return false, fmt.Errorf("syncCheckApply: Recurse failed: %v", err)
|
||||||
|
} else if !c { // don't let subsequent passes make this true
|
||||||
|
checkOK = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !apply && !checkOK { // check failed, and no apply to do, so exit!
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
delete(smartDst, relPath) // rm from purge list
|
||||||
|
}
|
||||||
|
|
||||||
|
if !apply && len(smartDst) > 0 { // we know there are files to remove!
|
||||||
|
return false, nil // so just exit now
|
||||||
|
}
|
||||||
|
// any files that now remain in smartDst need to be removed...
|
||||||
|
for relPath, fileInfo := range smartDst {
|
||||||
|
absSrc := src + relPath // absolute dest (should not exist!)
|
||||||
|
absDst := fileInfo.AbsPath // absolute path (should get removed)
|
||||||
|
absCleanDst := path.Clean(absDst)
|
||||||
|
if absCleanDst == "" || absCleanDst == "/" {
|
||||||
|
return false, fmt.Errorf("Don't want to remove root!") // safety
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME: respect obj.Recurse here...
|
||||||
|
|
||||||
|
// NOTE: we could use os.RemoveAll instead of recursing, but I
|
||||||
|
// think the symmetry is more elegant and correct here for now
|
||||||
|
// Avoiding this is also useful if we had a recurse limit arg!
|
||||||
|
if true { // switch
|
||||||
|
log.Printf("syncCheckApply: Removing: %s", absCleanDst)
|
||||||
|
if apply {
|
||||||
|
if err := os.RemoveAll(absCleanDst); err != nil { // dangerous ;)
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
checkOK = false
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
_ = absSrc
|
||||||
|
//log.Printf("syncCheckApply: Recurse rm: %s -> %s", absSrc, absDst)
|
||||||
|
//if c, err := obj.syncCheckApply(apply, absSrc, absDst); err != nil {
|
||||||
|
// return false, fmt.Errorf("syncCheckApply: Recurse rm failed: %v", err)
|
||||||
|
//} else if !c { // don't let subsequent passes make this true
|
||||||
|
// checkOK = false
|
||||||
|
//}
|
||||||
|
//log.Printf("syncCheckApply: Removing: %s", absCleanDst)
|
||||||
|
//if apply { // safety
|
||||||
|
// if err := os.Remove(absCleanDst); err != nil {
|
||||||
|
// return false, err
|
||||||
|
// }
|
||||||
|
// checkOK = false
|
||||||
|
//}
|
||||||
|
}
|
||||||
|
|
||||||
|
return checkOK, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// contentCheckApply performs a CheckApply for the file existence and content.
|
||||||
|
func (obj *FileRes) contentCheckApply(apply bool) (checkOK bool, _ error) {
|
||||||
|
log.Printf("%v[%v]: contentCheckApply(%t)", obj.Kind(), obj.GetName(), apply)
|
||||||
|
|
||||||
|
if obj.State == "absent" {
|
||||||
|
if _, err := os.Stat(obj.path); os.IsNotExist(err) {
|
||||||
|
// no such file or directory, but
|
||||||
|
// file should be missing, phew :)
|
||||||
|
return true, nil
|
||||||
|
|
||||||
|
} else if err != nil { // what could this error be?
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// state is not okay, no work done, exit, but without error
|
||||||
|
if !apply {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// apply portion
|
||||||
|
if obj.path == "" || obj.path == "/" {
|
||||||
|
return false, fmt.Errorf("Don't want to remove root!") // safety
|
||||||
|
}
|
||||||
|
log.Printf("contentCheckApply: Removing: %s", obj.path)
|
||||||
|
// FIXME: respect obj.Recurse here...
|
||||||
|
// TODO: add recurse limit here
|
||||||
|
err := os.RemoveAll(obj.path) // dangerous ;)
|
||||||
|
return false, err // either nil or not
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.Source == "" { // do the obj.Content checks first...
|
||||||
|
if obj.isDir { // TODO: should we create an empty dir this way?
|
||||||
|
log.Fatal("XXX: Not implemented!") // XXX
|
||||||
|
}
|
||||||
|
|
||||||
|
bufferSrc := bytes.NewReader([]byte(obj.Content))
|
||||||
|
sha256sum, checkOK, err := obj.fileCheckApply(apply, bufferSrc, obj.path, obj.sha256sum)
|
||||||
|
if sha256sum != "" { // empty values mean errored or didn't hash
|
||||||
|
// this can be valid even when the whole function errors
|
||||||
|
obj.sha256sum = sha256sum // cache value
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
// if no err, but !ok, then...
|
||||||
|
return checkOK, nil // success
|
||||||
|
}
|
||||||
|
|
||||||
|
checkOK, err := obj.syncCheckApply(apply, obj.Source, obj.path)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("syncCheckApply: Error: %v", err)
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return checkOK, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckApply checks the resource state and applies the resource if the bool
|
||||||
|
// input is true. It returns error info and if the state check passed or not.
|
||||||
|
func (obj *FileRes) CheckApply(apply bool) (checkOK bool, _ error) {
|
||||||
|
log.Printf("%v[%v]: CheckApply(%t)", obj.Kind(), obj.GetName(), apply)
|
||||||
|
|
||||||
|
if obj.isStateOK { // cache the state
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
checkOK = true
|
||||||
|
|
||||||
|
if c, err := obj.contentCheckApply(apply); err != nil {
|
||||||
|
return false, err
|
||||||
|
} else if !c {
|
||||||
|
checkOK = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO
|
||||||
|
//if c, err := obj.chmodCheckApply(apply); err != nil {
|
||||||
|
// return false, err
|
||||||
|
//} else if !c {
|
||||||
|
// checkOK = false
|
||||||
|
//}
|
||||||
|
|
||||||
|
// TODO
|
||||||
|
//if c, err := obj.chownCheckApply(apply); err != nil {
|
||||||
|
// return false, err
|
||||||
|
//} else if !c {
|
||||||
|
// checkOK = false
|
||||||
|
//}
|
||||||
|
|
||||||
|
// if we did work successfully, or are in a good state, then state is ok
|
||||||
|
if apply || checkOK {
|
||||||
|
obj.isStateOK = true
|
||||||
|
}
|
||||||
|
return checkOK, nil // w00t
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileUID is the UID struct for FileRes.
|
||||||
|
type FileUID struct {
|
||||||
|
BaseUID
|
||||||
|
path string
|
||||||
|
}
|
||||||
|
|
||||||
|
// IFF aka if and only if they are equivalent, return true. If not, false.
|
||||||
|
func (obj *FileUID) IFF(uid ResUID) bool {
|
||||||
|
res, ok := uid.(*FileUID)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return obj.path == res.path
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileResAutoEdges holds the state of the auto edge generator.
|
||||||
|
type FileResAutoEdges struct {
|
||||||
|
data []ResUID
|
||||||
|
pointer int
|
||||||
|
found bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns the next automatic edge.
|
||||||
|
func (obj *FileResAutoEdges) Next() []ResUID {
|
||||||
|
if obj.found {
|
||||||
|
log.Fatal("Shouldn't be called anymore!")
|
||||||
|
}
|
||||||
|
if len(obj.data) == 0 { // check length for rare scenarios
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
value := obj.data[obj.pointer]
|
||||||
|
obj.pointer++
|
||||||
|
return []ResUID{value} // we return one, even though api supports N
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test gets results of the earlier Next() call, & returns if we should continue!
|
||||||
|
func (obj *FileResAutoEdges) Test(input []bool) bool {
|
||||||
|
// if there aren't any more remaining
|
||||||
|
if len(obj.data) <= obj.pointer {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.found { // already found, done!
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(input) != 1 { // in case we get given bad data
|
||||||
|
log.Fatal("Expecting a single value!")
|
||||||
|
}
|
||||||
|
if input[0] { // if a match is found, we're done!
|
||||||
|
obj.found = true // no more to find!
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true // keep going
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutoEdges generates a simple linear sequence of each parent directory from
|
||||||
|
// the bottom up!
|
||||||
|
func (obj *FileRes) AutoEdges() AutoEdge {
|
||||||
|
var data []ResUID // store linear result chain here...
|
||||||
|
values := util.PathSplitFullReversed(obj.path) // build it
|
||||||
|
_, values = values[0], values[1:] // get rid of first value which is me!
|
||||||
|
for _, x := range values {
|
||||||
|
var reversed = true // cheat by passing a pointer
|
||||||
|
data = append(data, &FileUID{
|
||||||
|
BaseUID: BaseUID{
|
||||||
|
name: obj.GetName(),
|
||||||
|
kind: obj.Kind(),
|
||||||
|
reversed: &reversed,
|
||||||
|
},
|
||||||
|
path: x, // what matters
|
||||||
|
}) // build list
|
||||||
|
}
|
||||||
|
return &FileResAutoEdges{
|
||||||
|
data: data,
|
||||||
|
pointer: 0,
|
||||||
|
found: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUIDs includes all params to make a unique identification of this object.
|
||||||
|
// Most resources only return one, although some resources can return multiple.
|
||||||
|
func (obj *FileRes) GetUIDs() []ResUID {
|
||||||
|
x := &FileUID{
|
||||||
|
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
||||||
|
path: obj.path,
|
||||||
|
}
|
||||||
|
return []ResUID{x}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupCmp returns whether two resources can be grouped together or not.
|
||||||
|
func (obj *FileRes) GroupCmp(r Res) bool {
|
||||||
|
_, ok := r.(*FileRes)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// TODO: we might be able to group directory children into a single
|
||||||
|
// recursive watcher in the future, thus saving fanotify watches
|
||||||
|
return false // not possible atm
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare two resources and return if they are equivalent.
|
||||||
|
func (obj *FileRes) Compare(res Res) bool {
|
||||||
|
switch res.(type) {
|
||||||
|
case *FileRes:
|
||||||
|
res := res.(*FileRes)
|
||||||
|
if !obj.BaseRes.Compare(res) { // call base Compare
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.Name != res.Name {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.path != res.Path {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.Content != res.Content {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.Source != res.Source {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.State != res.State {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.Recurse != res.Recurse {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.Force != res.Force {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// CollectPattern applies the pattern for collection resources.
|
||||||
|
func (obj *FileRes) CollectPattern(pattern string) {
|
||||||
|
// XXX: currently the pattern for files can only override the Dirname variable :P
|
||||||
|
obj.Dirname = pattern // XXX: simplistic for now
|
||||||
|
}
|
||||||
271
resources/msg.go
Normal file
@@ -0,0 +1,271 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package resources
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/gob"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/event"
|
||||||
|
|
||||||
|
"github.com/coreos/go-systemd/journal"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
gob.Register(&MsgRes{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// MsgRes is a resource that writes messages to logs.
|
||||||
|
type MsgRes struct {
|
||||||
|
BaseRes `yaml:",inline"`
|
||||||
|
Body string `yaml:"body"`
|
||||||
|
Priority string `yaml:"priority"`
|
||||||
|
Fields map[string]string `yaml:"fields"`
|
||||||
|
Journal bool `yaml:"journal"` // enable systemd journal output
|
||||||
|
Syslog bool `yaml:"syslog"` // enable syslog output
|
||||||
|
logStateOK bool
|
||||||
|
journalStateOK bool
|
||||||
|
syslogStateOK bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// MsgUID is a unique representation for a MsgRes object.
|
||||||
|
type MsgUID struct {
|
||||||
|
BaseUID
|
||||||
|
body string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMsgRes is a constructor for this resource.
|
||||||
|
func NewMsgRes(name, body, priority string, journal, syslog bool, fields map[string]string) (*MsgRes, error) {
|
||||||
|
message := name
|
||||||
|
if body != "" {
|
||||||
|
message = body
|
||||||
|
}
|
||||||
|
|
||||||
|
obj := &MsgRes{
|
||||||
|
BaseRes: BaseRes{
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
Body: message,
|
||||||
|
Priority: priority,
|
||||||
|
Fields: fields,
|
||||||
|
Journal: journal,
|
||||||
|
Syslog: syslog,
|
||||||
|
}
|
||||||
|
|
||||||
|
return obj, obj.Init()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init runs some startup code for this resource.
|
||||||
|
func (obj *MsgRes) Init() error {
|
||||||
|
obj.BaseRes.kind = "Msg"
|
||||||
|
return obj.BaseRes.Init() // call base init, b/c we're overrriding
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate the params that are passed to MsgRes
|
||||||
|
func (obj *MsgRes) Validate() error {
|
||||||
|
invalidCharacters := regexp.MustCompile("[^a-zA-Z0-9_]")
|
||||||
|
for field := range obj.Fields {
|
||||||
|
if invalidCharacters.FindString(field) != "" {
|
||||||
|
return fmt.Errorf("Invalid character in field %s.", field)
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(field, "_") {
|
||||||
|
return fmt.Errorf("Fields cannot begin with _.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
|
func (obj *MsgRes) Watch(processChan chan event.Event) error {
|
||||||
|
if obj.IsWatching() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
obj.SetWatching(true)
|
||||||
|
defer obj.SetWatching(false)
|
||||||
|
cuid := obj.converger.Register()
|
||||||
|
defer cuid.Unregister()
|
||||||
|
|
||||||
|
var startup bool
|
||||||
|
Startup := func(block bool) <-chan time.Time {
|
||||||
|
if block {
|
||||||
|
return nil // blocks forever
|
||||||
|
//return make(chan time.Time) // blocks forever
|
||||||
|
}
|
||||||
|
return time.After(time.Duration(500) * time.Millisecond) // 1/2 the resolution of converged timeout
|
||||||
|
}
|
||||||
|
|
||||||
|
var send = false // send event?
|
||||||
|
var exit = false
|
||||||
|
for {
|
||||||
|
obj.SetState(ResStateWatching) // reset
|
||||||
|
select {
|
||||||
|
case event := <-obj.Events():
|
||||||
|
cuid.SetConverged(false)
|
||||||
|
// we avoid sending events on unpause
|
||||||
|
if exit, send = obj.ReadEvent(&event); exit {
|
||||||
|
return nil // exit
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
// TODO: invalidate cached state on poke events
|
||||||
|
obj.logStateOK = false
|
||||||
|
if obj.Journal {
|
||||||
|
obj.journalStateOK = false
|
||||||
|
}
|
||||||
|
if obj.Syslog {
|
||||||
|
obj.syslogStateOK = false
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
send = true
|
||||||
|
|
||||||
|
case <-cuid.ConvergedTimer():
|
||||||
|
cuid.SetConverged(true) // converged!
|
||||||
|
continue
|
||||||
|
|
||||||
|
case <-Startup(startup):
|
||||||
|
cuid.SetConverged(false)
|
||||||
|
send = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// do all our event sending all together to avoid duplicate msgs
|
||||||
|
if send {
|
||||||
|
startup = true // startup finished
|
||||||
|
send = false
|
||||||
|
// only do this on certain types of events
|
||||||
|
//obj.isStateOK = false // something made state dirty
|
||||||
|
if exit, err := obj.DoSend(processChan, ""); exit || err != nil {
|
||||||
|
return err // we exit or bubble up a NACK...
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUIDs includes all params to make a unique identification of this object.
|
||||||
|
// Most resources only return one, although some resources can return multiple.
|
||||||
|
func (obj *MsgRes) GetUIDs() []ResUID {
|
||||||
|
x := &MsgUID{
|
||||||
|
BaseUID: BaseUID{
|
||||||
|
name: obj.GetName(),
|
||||||
|
kind: obj.Kind(),
|
||||||
|
},
|
||||||
|
body: obj.Body,
|
||||||
|
}
|
||||||
|
return []ResUID{x}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutoEdges returns the AutoEdges. In this case none are used.
|
||||||
|
func (obj *MsgRes) AutoEdges() AutoEdge {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare two resources and return if they are equivalent.
|
||||||
|
func (obj *MsgRes) Compare(res Res) bool {
|
||||||
|
switch res.(type) {
|
||||||
|
case *MsgRes:
|
||||||
|
res := res.(*MsgRes)
|
||||||
|
if !obj.BaseRes.Compare(res) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.Body != res.Body {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.Priority != res.Priority {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(obj.Fields) != len(res.Fields) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for field, value := range obj.Fields {
|
||||||
|
if res.Fields[field] != value {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsAllStateOK derives a compound state from all internal cache flags that apply to this resource.
|
||||||
|
func (obj *MsgRes) isAllStateOK() bool {
|
||||||
|
if obj.Journal && !obj.journalStateOK {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.Syslog && !obj.syslogStateOK {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return obj.logStateOK
|
||||||
|
}
|
||||||
|
|
||||||
|
// JournalPriority converts a string description to a numeric priority.
|
||||||
|
// XXX: Have Validate() make sure it actually is one of these.
|
||||||
|
func (obj *MsgRes) journalPriority() journal.Priority {
|
||||||
|
switch obj.Priority {
|
||||||
|
case "Emerg":
|
||||||
|
return journal.PriEmerg
|
||||||
|
case "Alert":
|
||||||
|
return journal.PriAlert
|
||||||
|
case "Crit":
|
||||||
|
return journal.PriCrit
|
||||||
|
case "Err":
|
||||||
|
return journal.PriErr
|
||||||
|
case "Warning":
|
||||||
|
return journal.PriWarning
|
||||||
|
case "Notice":
|
||||||
|
return journal.PriNotice
|
||||||
|
case "Info":
|
||||||
|
return journal.PriInfo
|
||||||
|
case "Debug":
|
||||||
|
return journal.PriDebug
|
||||||
|
}
|
||||||
|
return journal.PriNotice
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckApply method for Msg resource.
|
||||||
|
// Every check leads to an apply, meaning that the message is flushed to the journal.
|
||||||
|
func (obj *MsgRes) CheckApply(apply bool) (bool, error) {
|
||||||
|
log.Printf("%s[%s]: CheckApply(%t)", obj.Kind(), obj.GetName(), apply)
|
||||||
|
|
||||||
|
if obj.isAllStateOK() {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !obj.logStateOK {
|
||||||
|
log.Printf("%s[%s]: Body: %s", obj.Kind(), obj.GetName(), obj.Body)
|
||||||
|
obj.logStateOK = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if !apply {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if obj.Journal && !obj.journalStateOK {
|
||||||
|
if err := journal.Send(obj.Body, obj.journalPriority(), obj.Fields); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
obj.journalStateOK = true
|
||||||
|
}
|
||||||
|
if obj.Syslog && !obj.syslogStateOK {
|
||||||
|
// TODO: implement syslog client
|
||||||
|
obj.syslogStateOK = true
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
@@ -15,11 +15,14 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package main
|
package resources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
"log"
|
"log"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/event"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -33,64 +36,77 @@ type NoopRes struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewNoopRes is a constructor for this resource. It also calls Init() for you.
|
// NewNoopRes is a constructor for this resource. It also calls Init() for you.
|
||||||
func NewNoopRes(name string) *NoopRes {
|
func NewNoopRes(name string) (*NoopRes, error) {
|
||||||
obj := &NoopRes{
|
obj := &NoopRes{
|
||||||
BaseRes: BaseRes{
|
BaseRes: BaseRes{
|
||||||
Name: name,
|
Name: name,
|
||||||
},
|
},
|
||||||
Comment: "",
|
Comment: "",
|
||||||
}
|
}
|
||||||
obj.Init()
|
return obj, obj.Init()
|
||||||
return obj
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init runs some startup code for this resource.
|
// Init runs some startup code for this resource.
|
||||||
func (obj *NoopRes) Init() {
|
func (obj *NoopRes) Init() error {
|
||||||
obj.BaseRes.kind = "Noop"
|
obj.BaseRes.kind = "Noop"
|
||||||
obj.BaseRes.Init() // call base init, b/c we're overriding
|
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
}
|
}
|
||||||
|
|
||||||
// validate if the params passed in are valid data
|
// Validate if the params passed in are valid data.
|
||||||
// FIXME: where should this get called ?
|
// FIXME: where should this get called ?
|
||||||
func (obj *NoopRes) Validate() bool {
|
func (obj *NoopRes) Validate() error {
|
||||||
return true
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Watch is the primary listener for this resource and it outputs events.
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
func (obj *NoopRes) Watch(processChan chan Event) {
|
func (obj *NoopRes) Watch(processChan chan event.Event) error {
|
||||||
if obj.IsWatching() {
|
if obj.IsWatching() {
|
||||||
return
|
return nil // TODO: should this be an error?
|
||||||
}
|
}
|
||||||
obj.SetWatching(true)
|
obj.SetWatching(true)
|
||||||
defer obj.SetWatching(false)
|
defer obj.SetWatching(false)
|
||||||
cuuid := obj.converger.Register()
|
cuid := obj.converger.Register()
|
||||||
defer cuuid.Unregister()
|
defer cuid.Unregister()
|
||||||
|
|
||||||
|
var startup bool
|
||||||
|
Startup := func(block bool) <-chan time.Time {
|
||||||
|
if block {
|
||||||
|
return nil // blocks forever
|
||||||
|
//return make(chan time.Time) // blocks forever
|
||||||
|
}
|
||||||
|
return time.After(time.Duration(500) * time.Millisecond) // 1/2 the resolution of converged timeout
|
||||||
|
}
|
||||||
|
|
||||||
var send = false // send event?
|
var send = false // send event?
|
||||||
var exit = false
|
var exit = false
|
||||||
for {
|
for {
|
||||||
obj.SetState(resStateWatching) // reset
|
obj.SetState(ResStateWatching) // reset
|
||||||
select {
|
select {
|
||||||
case event := <-obj.events:
|
case event := <-obj.Events():
|
||||||
cuuid.SetConverged(false)
|
cuid.SetConverged(false)
|
||||||
// we avoid sending events on unpause
|
// we avoid sending events on unpause
|
||||||
if exit, send = obj.ReadEvent(&event); exit {
|
if exit, send = obj.ReadEvent(&event); exit {
|
||||||
return // exit
|
return nil // exit
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-cuuid.ConvergedTimer():
|
case <-cuid.ConvergedTimer():
|
||||||
cuuid.SetConverged(true) // converged!
|
cuid.SetConverged(true) // converged!
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
case <-Startup(startup):
|
||||||
|
cuid.SetConverged(false)
|
||||||
|
send = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// do all our event sending all together to avoid duplicate msgs
|
// do all our event sending all together to avoid duplicate msgs
|
||||||
if send {
|
if send {
|
||||||
|
startup = true // startup finished
|
||||||
send = false
|
send = false
|
||||||
// only do this on certain types of events
|
// only do this on certain types of events
|
||||||
//obj.isStateOK = false // something made state dirty
|
//obj.isStateOK = false // something made state dirty
|
||||||
resp := NewResp()
|
if exit, err := obj.DoSend(processChan, ""); exit || err != nil {
|
||||||
processChan <- Event{eventNil, resp, "", true} // trigger process
|
return err // we exit or bubble up a NACK...
|
||||||
resp.ACKWait() // wait for the ACK()
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -101,25 +117,25 @@ func (obj *NoopRes) CheckApply(apply bool) (checkok bool, err error) {
|
|||||||
return true, nil // state is always okay
|
return true, nil // state is always okay
|
||||||
}
|
}
|
||||||
|
|
||||||
// NoopUUID is the UUID struct for NoopRes.
|
// NoopUID is the UID struct for NoopRes.
|
||||||
type NoopUUID struct {
|
type NoopUID struct {
|
||||||
BaseUUID
|
BaseUID
|
||||||
name string
|
name string
|
||||||
}
|
}
|
||||||
|
|
||||||
// The AutoEdges method returns the AutoEdges. In this case none are used.
|
// AutoEdges returns the AutoEdge interface. In this case no autoedges are used.
|
||||||
func (obj *NoopRes) AutoEdges() AutoEdge {
|
func (obj *NoopRes) AutoEdges() AutoEdge {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUUIDs includes all params to make a unique identification of this object.
|
// GetUIDs includes all params to make a unique identification of this object.
|
||||||
// Most resources only return one, although some resources can return multiple.
|
// Most resources only return one, although some resources can return multiple.
|
||||||
func (obj *NoopRes) GetUUIDs() []ResUUID {
|
func (obj *NoopRes) GetUIDs() []ResUID {
|
||||||
x := &NoopUUID{
|
x := &NoopUID{
|
||||||
BaseUUID: BaseUUID{name: obj.GetName(), kind: obj.Kind()},
|
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
||||||
name: obj.Name,
|
name: obj.Name,
|
||||||
}
|
}
|
||||||
return []ResUUID{x}
|
return []ResUID{x}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GroupCmp returns whether two resources can be grouped together or not.
|
// GroupCmp returns whether two resources can be grouped together or not.
|
||||||
@@ -15,17 +15,20 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
// DOCS: https://www.freedesktop.org/software/PackageKit/gtk-doc/index.html
|
// Package packagekit provides an interface to interact with packagekit.
|
||||||
|
// See: https://www.freedesktop.org/software/PackageKit/gtk-doc/index.html for
|
||||||
//package packagekit // TODO
|
// more information.
|
||||||
package main
|
package packagekit
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/godbus/dbus"
|
|
||||||
"log"
|
"log"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/util"
|
||||||
|
|
||||||
|
"github.com/godbus/dbus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// global tweaks of verbosity and code path
|
// global tweaks of verbosity and code path
|
||||||
@@ -160,7 +163,7 @@ type PkPackageIDActionData struct {
|
|||||||
// NewBus returns a new bus connection.
|
// NewBus returns a new bus connection.
|
||||||
func NewBus() *Conn {
|
func NewBus() *Conn {
|
||||||
// if we share the bus with others, we will get each others messages!!
|
// if we share the bus with others, we will get each others messages!!
|
||||||
bus, err := SystemBusPrivateUsable() // don't share the bus connection!
|
bus, err := util.SystemBusPrivateUsable() // don't share the bus connection!
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -422,7 +425,7 @@ loop:
|
|||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("PackageKit: Error: %v", signal.Body)
|
return fmt.Errorf("PackageKit: Error: %v", signal.Body)
|
||||||
}
|
}
|
||||||
case <-TimeAfterOrBlock(timeout):
|
case <-util.TimeAfterOrBlock(timeout):
|
||||||
if finished {
|
if finished {
|
||||||
log.Println("PackageKit: Timeout: InstallPackages: Waiting for 'Destroy'")
|
log.Println("PackageKit: Timeout: InstallPackages: Waiting for 'Destroy'")
|
||||||
return nil // got tired of waiting for Destroy
|
return nil // got tired of waiting for Destroy
|
||||||
@@ -15,16 +15,21 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package main
|
package resources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
//"packagekit" // TODO
|
|
||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/event"
|
||||||
|
"github.com/purpleidea/mgmt/global" // XXX: package mgmtmain instead?
|
||||||
|
"github.com/purpleidea/mgmt/resources/packagekit"
|
||||||
|
"github.com/purpleidea/mgmt/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -38,12 +43,12 @@ type PkgRes struct {
|
|||||||
AllowUntrusted bool `yaml:"allowuntrusted"` // allow untrusted packages to be installed?
|
AllowUntrusted bool `yaml:"allowuntrusted"` // allow untrusted packages to be installed?
|
||||||
AllowNonFree bool `yaml:"allownonfree"` // allow nonfree packages to be found?
|
AllowNonFree bool `yaml:"allownonfree"` // allow nonfree packages to be found?
|
||||||
AllowUnsupported bool `yaml:"allowunsupported"` // allow unsupported packages to be found?
|
AllowUnsupported bool `yaml:"allowunsupported"` // allow unsupported packages to be found?
|
||||||
//bus *Conn // pk bus connection
|
//bus *packagekit.Conn // pk bus connection
|
||||||
fileList []string // FIXME: update if pkg changes
|
fileList []string // FIXME: update if pkg changes
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPkgRes is a constructor for this resource. It also calls Init() for you.
|
// NewPkgRes is a constructor for this resource. It also calls Init() for you.
|
||||||
func NewPkgRes(name, state string, allowuntrusted, allownonfree, allowunsupported bool) *PkgRes {
|
func NewPkgRes(name, state string, allowuntrusted, allownonfree, allowunsupported bool) (*PkgRes, error) {
|
||||||
obj := &PkgRes{
|
obj := &PkgRes{
|
||||||
BaseRes: BaseRes{
|
BaseRes: BaseRes{
|
||||||
Name: name,
|
Name: name,
|
||||||
@@ -53,71 +58,76 @@ func NewPkgRes(name, state string, allowuntrusted, allownonfree, allowunsupporte
|
|||||||
AllowNonFree: allownonfree,
|
AllowNonFree: allownonfree,
|
||||||
AllowUnsupported: allowunsupported,
|
AllowUnsupported: allowunsupported,
|
||||||
}
|
}
|
||||||
obj.Init()
|
return obj, obj.Init()
|
||||||
return obj
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init runs some startup code for this resource.
|
// Init runs some startup code for this resource.
|
||||||
func (obj *PkgRes) Init() {
|
func (obj *PkgRes) Init() error {
|
||||||
obj.BaseRes.kind = "Pkg"
|
obj.BaseRes.kind = "Pkg"
|
||||||
obj.BaseRes.Init() // call base init, b/c we're overriding
|
if err := obj.BaseRes.Init(); err != nil { // call base init, b/c we're overriding
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
bus := NewBus()
|
bus := packagekit.NewBus()
|
||||||
if bus == nil {
|
if bus == nil {
|
||||||
log.Fatal("Can't connect to PackageKit bus.")
|
return fmt.Errorf("Can't connect to PackageKit bus.")
|
||||||
}
|
}
|
||||||
defer bus.Close()
|
defer bus.Close()
|
||||||
|
|
||||||
result, err := obj.pkgMappingHelper(bus)
|
result, err := obj.pkgMappingHelper(bus)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// FIXME: return error?
|
return fmt.Errorf("The pkgMappingHelper failed with: %v.", err)
|
||||||
log.Fatalf("The pkgMappingHelper failed with: %v.", err)
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
data, ok := result[obj.Name] // lookup single package (init does just one)
|
data, ok := result[obj.Name] // lookup single package (init does just one)
|
||||||
// package doesn't exist, this is an error!
|
// package doesn't exist, this is an error!
|
||||||
if !ok || !data.Found {
|
if !ok || !data.Found {
|
||||||
// FIXME: return error?
|
return fmt.Errorf("Can't find package named '%s'.", obj.Name)
|
||||||
log.Fatalf("Can't find package named '%s'.", obj.Name)
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
packageIDs := []string{data.PackageID} // just one for now
|
packageIDs := []string{data.PackageID} // just one for now
|
||||||
filesMap, err := bus.GetFilesByPackageID(packageIDs)
|
filesMap, err := bus.GetFilesByPackageID(packageIDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// FIXME: return error?
|
return fmt.Errorf("Can't run GetFilesByPackageID: %v", err)
|
||||||
log.Fatalf("Can't run GetFilesByPackageID: %v", err)
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
if files, ok := filesMap[data.PackageID]; ok {
|
if files, ok := filesMap[data.PackageID]; ok {
|
||||||
obj.fileList = DirifyFileList(files, false)
|
obj.fileList = util.DirifyFileList(files, false)
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate checks if the resource data structure was populated correctly.
|
// Validate checks if the resource data structure was populated correctly.
|
||||||
func (obj *PkgRes) Validate() bool {
|
func (obj *PkgRes) Validate() error {
|
||||||
if obj.State == "" {
|
if obj.State == "" {
|
||||||
return false
|
return fmt.Errorf("State cannot be empty!")
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Watch is the primary listener for this resource and it outputs events.
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
// It uses the PackageKit UpdatesChanged signal to watch for changes.
|
// It uses the PackageKit UpdatesChanged signal to watch for changes.
|
||||||
// TODO: https://github.com/hughsie/PackageKit/issues/109
|
// TODO: https://github.com/hughsie/PackageKit/issues/109
|
||||||
// TODO: https://github.com/hughsie/PackageKit/issues/110
|
// TODO: https://github.com/hughsie/PackageKit/issues/110
|
||||||
func (obj *PkgRes) Watch(processChan chan Event) {
|
func (obj *PkgRes) Watch(processChan chan event.Event) error {
|
||||||
if obj.IsWatching() {
|
if obj.IsWatching() {
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
obj.SetWatching(true)
|
obj.SetWatching(true)
|
||||||
defer obj.SetWatching(false)
|
defer obj.SetWatching(false)
|
||||||
cuuid := obj.converger.Register()
|
cuid := obj.converger.Register()
|
||||||
defer cuuid.Unregister()
|
defer cuid.Unregister()
|
||||||
|
|
||||||
bus := NewBus()
|
var startup bool
|
||||||
|
Startup := func(block bool) <-chan time.Time {
|
||||||
|
if block {
|
||||||
|
return nil // blocks forever
|
||||||
|
//return make(chan time.Time) // blocks forever
|
||||||
|
}
|
||||||
|
return time.After(time.Duration(500) * time.Millisecond) // 1/2 the resolution of converged timeout
|
||||||
|
}
|
||||||
|
|
||||||
|
bus := packagekit.NewBus()
|
||||||
if bus == nil {
|
if bus == nil {
|
||||||
log.Fatal("Can't connect to PackageKit bus.")
|
log.Fatal("Can't connect to PackageKit bus.")
|
||||||
}
|
}
|
||||||
@@ -133,17 +143,17 @@ func (obj *PkgRes) Watch(processChan chan Event) {
|
|||||||
var dirty = false
|
var dirty = false
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("%v: Watching...", obj.fmtNames(obj.getNames()))
|
log.Printf("%v: Watching...", obj.fmtNames(obj.getNames()))
|
||||||
}
|
}
|
||||||
|
|
||||||
obj.SetState(resStateWatching) // reset
|
obj.SetState(ResStateWatching) // reset
|
||||||
select {
|
select {
|
||||||
case event := <-ch:
|
case event := <-ch:
|
||||||
cuuid.SetConverged(false)
|
cuid.SetConverged(false)
|
||||||
|
|
||||||
// FIXME: ask packagekit for info on what packages changed
|
// FIXME: ask packagekit for info on what packages changed
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("%v: Event: %v", obj.fmtNames(obj.getNames()), event.Name)
|
log.Printf("%v: Event: %v", obj.fmtNames(obj.getNames()), event.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -156,29 +166,35 @@ func (obj *PkgRes) Watch(processChan chan Event) {
|
|||||||
send = true
|
send = true
|
||||||
dirty = true
|
dirty = true
|
||||||
|
|
||||||
case event := <-obj.events:
|
case event := <-obj.Events():
|
||||||
cuuid.SetConverged(false)
|
cuid.SetConverged(false)
|
||||||
if exit, send = obj.ReadEvent(&event); exit {
|
if exit, send = obj.ReadEvent(&event); exit {
|
||||||
return // exit
|
return nil // exit
|
||||||
}
|
}
|
||||||
dirty = false // these events don't invalidate state
|
dirty = false // these events don't invalidate state
|
||||||
|
|
||||||
case <-cuuid.ConvergedTimer():
|
case <-cuid.ConvergedTimer():
|
||||||
cuuid.SetConverged(true) // converged!
|
cuid.SetConverged(true) // converged!
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
case <-Startup(startup):
|
||||||
|
cuid.SetConverged(false)
|
||||||
|
send = true
|
||||||
|
dirty = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// do all our event sending all together to avoid duplicate msgs
|
// do all our event sending all together to avoid duplicate msgs
|
||||||
if send {
|
if send {
|
||||||
|
startup = true // startup finished
|
||||||
send = false
|
send = false
|
||||||
// only invalid state on certain types of events
|
// only invalid state on certain types of events
|
||||||
if dirty {
|
if dirty {
|
||||||
dirty = false
|
dirty = false
|
||||||
obj.isStateOK = false // something made state dirty
|
obj.isStateOK = false // something made state dirty
|
||||||
}
|
}
|
||||||
resp := NewResp()
|
if exit, err := obj.DoSend(processChan, ""); exit || err != nil {
|
||||||
processChan <- Event{eventNil, resp, "", true} // trigger process
|
return err // we exit or bubble up a NACK...
|
||||||
resp.ACKWait() // wait for the ACK()
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -220,23 +236,23 @@ func (obj *PkgRes) groupMappingHelper() map[string]string {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func (obj *PkgRes) pkgMappingHelper(bus *Conn) (map[string]*PkPackageIDActionData, error) {
|
func (obj *PkgRes) pkgMappingHelper(bus *packagekit.Conn) (map[string]*packagekit.PkPackageIDActionData, error) {
|
||||||
packageMap := obj.groupMappingHelper() // get the grouped values
|
packageMap := obj.groupMappingHelper() // get the grouped values
|
||||||
packageMap[obj.Name] = obj.State // key is pkg name, value is pkg state
|
packageMap[obj.Name] = obj.State // key is pkg name, value is pkg state
|
||||||
var filter uint64 // initializes at the "zero" value of 0
|
var filter uint64 // initializes at the "zero" value of 0
|
||||||
filter += PK_FILTER_ENUM_ARCH // always search in our arch (optional!)
|
filter += packagekit.PK_FILTER_ENUM_ARCH // always search in our arch (optional!)
|
||||||
// we're requesting latest version, or to narrow down install choices!
|
// we're requesting latest version, or to narrow down install choices!
|
||||||
if obj.State == "newest" || obj.State == "installed" {
|
if obj.State == "newest" || obj.State == "installed" {
|
||||||
// if we add this, we'll still see older packages if installed
|
// if we add this, we'll still see older packages if installed
|
||||||
// this is an optimization, and is *optional*, this logic is
|
// this is an optimization, and is *optional*, this logic is
|
||||||
// handled inside of PackagesToPackageIDs now automatically!
|
// handled inside of PackagesToPackageIDs now automatically!
|
||||||
filter += PK_FILTER_ENUM_NEWEST // only search for newest packages
|
filter += packagekit.PK_FILTER_ENUM_NEWEST // only search for newest packages
|
||||||
}
|
}
|
||||||
if !obj.AllowNonFree {
|
if !obj.AllowNonFree {
|
||||||
filter += PK_FILTER_ENUM_FREE
|
filter += packagekit.PK_FILTER_ENUM_FREE
|
||||||
}
|
}
|
||||||
if !obj.AllowUnsupported {
|
if !obj.AllowUnsupported {
|
||||||
filter += PK_FILTER_ENUM_SUPPORTED
|
filter += packagekit.PK_FILTER_ENUM_SUPPORTED
|
||||||
}
|
}
|
||||||
result, e := bus.PackagesToPackageIDs(packageMap, filter)
|
result, e := bus.PackagesToPackageIDs(packageMap, filter)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
@@ -258,7 +274,7 @@ func (obj *PkgRes) CheckApply(apply bool) (checkok bool, err error) {
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
bus := NewBus()
|
bus := packagekit.NewBus()
|
||||||
if bus == nil {
|
if bus == nil {
|
||||||
return false, errors.New("Can't connect to PackageKit bus.")
|
return false, errors.New("Can't connect to PackageKit bus.")
|
||||||
}
|
}
|
||||||
@@ -271,18 +287,18 @@ func (obj *PkgRes) CheckApply(apply bool) (checkok bool, err error) {
|
|||||||
|
|
||||||
packageMap := obj.groupMappingHelper() // map[string]string
|
packageMap := obj.groupMappingHelper() // map[string]string
|
||||||
packageList := []string{obj.Name}
|
packageList := []string{obj.Name}
|
||||||
packageList = append(packageList, StrMapKeys(packageMap)...)
|
packageList = append(packageList, util.StrMapKeys(packageMap)...)
|
||||||
//stateList := []string{obj.State}
|
//stateList := []string{obj.State}
|
||||||
//stateList = append(stateList, StrMapValues(packageMap)...)
|
//stateList = append(stateList, util.StrMapValues(packageMap)...)
|
||||||
|
|
||||||
// TODO: at the moment, all the states are the same, but
|
// TODO: at the moment, all the states are the same, but
|
||||||
// eventually we might be able to drop this constraint!
|
// eventually we might be able to drop this constraint!
|
||||||
states, err := FilterState(result, packageList, obj.State)
|
states, err := packagekit.FilterState(result, packageList, obj.State)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("The FilterState method failed with: %v.", err)
|
return false, fmt.Errorf("The FilterState method failed with: %v.", err)
|
||||||
}
|
}
|
||||||
data, _ := result[obj.Name] // if above didn't error, we won't either!
|
data, _ := result[obj.Name] // if above didn't error, we won't either!
|
||||||
validState := BoolMapTrue(BoolMapValues(states))
|
validState := util.BoolMapTrue(util.BoolMapValues(states))
|
||||||
|
|
||||||
// obj.State == "installed" || "uninstalled" || "newest" || "4.2-1.fc23"
|
// obj.State == "installed" || "uninstalled" || "newest" || "4.2-1.fc23"
|
||||||
switch obj.State {
|
switch obj.State {
|
||||||
@@ -309,20 +325,20 @@ func (obj *PkgRes) CheckApply(apply bool) (checkok bool, err error) {
|
|||||||
|
|
||||||
// apply portion
|
// apply portion
|
||||||
log.Printf("%v: Apply", obj.fmtNames(obj.getNames()))
|
log.Printf("%v: Apply", obj.fmtNames(obj.getNames()))
|
||||||
readyPackages, err := FilterPackageState(result, packageList, obj.State)
|
readyPackages, err := packagekit.FilterPackageState(result, packageList, obj.State)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err // fail
|
return false, err // fail
|
||||||
}
|
}
|
||||||
// these are the packages that actually need their states applied!
|
// these are the packages that actually need their states applied!
|
||||||
applyPackages := StrFilterElementsInList(readyPackages, packageList)
|
applyPackages := util.StrFilterElementsInList(readyPackages, packageList)
|
||||||
packageIDs, _ := FilterPackageIDs(result, applyPackages) // would be same err as above
|
packageIDs, _ := packagekit.FilterPackageIDs(result, applyPackages) // would be same err as above
|
||||||
|
|
||||||
var transactionFlags uint64 // initializes at the "zero" value of 0
|
var transactionFlags uint64 // initializes at the "zero" value of 0
|
||||||
if !obj.AllowUntrusted { // allow
|
if !obj.AllowUntrusted { // allow
|
||||||
transactionFlags += PK_TRANSACTION_FLAG_ENUM_ONLY_TRUSTED
|
transactionFlags += packagekit.PK_TRANSACTION_FLAG_ENUM_ONLY_TRUSTED
|
||||||
}
|
}
|
||||||
// apply correct state!
|
// apply correct state!
|
||||||
log.Printf("%v: Set: %v...", obj.fmtNames(StrListIntersection(applyPackages, obj.getNames())), obj.State)
|
log.Printf("%v: Set: %v...", obj.fmtNames(util.StrListIntersection(applyPackages, obj.getNames())), obj.State)
|
||||||
switch obj.State {
|
switch obj.State {
|
||||||
case "uninstalled": // run remove
|
case "uninstalled": // run remove
|
||||||
// NOTE: packageID is different than when installed, because now
|
// NOTE: packageID is different than when installed, because now
|
||||||
@@ -340,22 +356,21 @@ func (obj *PkgRes) CheckApply(apply bool) (checkok bool, err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err // fail
|
return false, err // fail
|
||||||
}
|
}
|
||||||
log.Printf("%v: Set: %v success!", obj.fmtNames(StrListIntersection(applyPackages, obj.getNames())), obj.State)
|
log.Printf("%v: Set: %v success!", obj.fmtNames(util.StrListIntersection(applyPackages, obj.getNames())), obj.State)
|
||||||
obj.isStateOK = true // reset
|
obj.isStateOK = true // reset
|
||||||
return false, nil // success
|
return false, nil // success
|
||||||
}
|
}
|
||||||
|
|
||||||
// PkgUUID is the UUID struct for PkgRes.
|
// PkgUID is the UID struct for PkgRes.
|
||||||
type PkgUUID struct {
|
type PkgUID struct {
|
||||||
BaseUUID
|
BaseUID
|
||||||
name string // pkg name
|
name string // pkg name
|
||||||
state string // pkg state or "version"
|
state string // pkg state or "version"
|
||||||
}
|
}
|
||||||
|
|
||||||
// if and only if they are equivalent, return true
|
// IFF aka if and only if they are equivalent, return true. If not, false.
|
||||||
// if they are not equivalent, return false
|
func (obj *PkgUID) IFF(uid ResUID) bool {
|
||||||
func (obj *PkgUUID) IFF(uuid ResUUID) bool {
|
res, ok := uid.(*PkgUID)
|
||||||
res, ok := uuid.(*PkgUUID)
|
|
||||||
if !ok {
|
if !ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -366,30 +381,30 @@ func (obj *PkgUUID) IFF(uuid ResUUID) bool {
|
|||||||
// PkgResAutoEdges holds the state of the auto edge generator.
|
// PkgResAutoEdges holds the state of the auto edge generator.
|
||||||
type PkgResAutoEdges struct {
|
type PkgResAutoEdges struct {
|
||||||
fileList []string
|
fileList []string
|
||||||
svcUUIDs []ResUUID
|
svcUIDs []ResUID
|
||||||
testIsNext bool // safety
|
testIsNext bool // safety
|
||||||
name string // saved data from PkgRes obj
|
name string // saved data from PkgRes obj
|
||||||
kind string
|
kind string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Next returns the next automatic edge.
|
// Next returns the next automatic edge.
|
||||||
func (obj *PkgResAutoEdges) Next() []ResUUID {
|
func (obj *PkgResAutoEdges) Next() []ResUID {
|
||||||
if obj.testIsNext {
|
if obj.testIsNext {
|
||||||
log.Fatal("Expecting a call to Test()")
|
log.Fatal("Expecting a call to Test()")
|
||||||
}
|
}
|
||||||
obj.testIsNext = true // set after all the errors paths are past
|
obj.testIsNext = true // set after all the errors paths are past
|
||||||
|
|
||||||
// first return any matching svcUUIDs
|
// first return any matching svcUIDs
|
||||||
if x := obj.svcUUIDs; len(x) > 0 {
|
if x := obj.svcUIDs; len(x) > 0 {
|
||||||
return x
|
return x
|
||||||
}
|
}
|
||||||
|
|
||||||
var result []ResUUID
|
var result []ResUID
|
||||||
// return UUID's for whatever is in obj.fileList
|
// return UID's for whatever is in obj.fileList
|
||||||
for _, x := range obj.fileList {
|
for _, x := range obj.fileList {
|
||||||
var reversed = false // cheat by passing a pointer
|
var reversed = false // cheat by passing a pointer
|
||||||
result = append(result, &FileUUID{
|
result = append(result, &FileUID{
|
||||||
BaseUUID: BaseUUID{
|
BaseUID: BaseUID{
|
||||||
name: obj.name,
|
name: obj.name,
|
||||||
kind: obj.kind,
|
kind: obj.kind,
|
||||||
reversed: &reversed,
|
reversed: &reversed,
|
||||||
@@ -406,12 +421,12 @@ func (obj *PkgResAutoEdges) Test(input []bool) bool {
|
|||||||
log.Fatal("Expecting a call to Next()")
|
log.Fatal("Expecting a call to Next()")
|
||||||
}
|
}
|
||||||
|
|
||||||
// ack the svcUUID's...
|
// ack the svcUID's...
|
||||||
if x := obj.svcUUIDs; len(x) > 0 {
|
if x := obj.svcUIDs; len(x) > 0 {
|
||||||
if y := len(x); y != len(input) {
|
if y := len(x); y != len(input) {
|
||||||
log.Fatalf("Expecting %d value(s)!", y)
|
log.Fatalf("Expecting %d value(s)!", y)
|
||||||
}
|
}
|
||||||
obj.svcUUIDs = []ResUUID{} // empty
|
obj.svcUIDs = []ResUID{} // empty
|
||||||
obj.testIsNext = false
|
obj.testIsNext = false
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -434,16 +449,16 @@ func (obj *PkgResAutoEdges) Test(input []bool) bool {
|
|||||||
var dirs = make([]string, count)
|
var dirs = make([]string, count)
|
||||||
done := []string{}
|
done := []string{}
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
dir := Dirname(obj.fileList[i]) // dirname of /foo/ should be /
|
dir := util.Dirname(obj.fileList[i]) // dirname of /foo/ should be /
|
||||||
dirs[i] = dir
|
dirs[i] = dir
|
||||||
if input[i] {
|
if input[i] {
|
||||||
done = append(done, dir)
|
done = append(done, dir)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
nodupes := StrRemoveDuplicatesInList(dirs) // remove duplicates
|
nodupes := util.StrRemoveDuplicatesInList(dirs) // remove duplicates
|
||||||
nodones := StrFilterElementsInList(done, nodupes) // filter out done
|
nodones := util.StrFilterElementsInList(done, nodupes) // filter out done
|
||||||
noempty := StrFilterElementsInList([]string{""}, nodones) // remove the "" from /
|
noempty := util.StrFilterElementsInList([]string{""}, nodones) // remove the "" from /
|
||||||
obj.fileList = RemoveCommonFilePrefixes(noempty) // magic
|
obj.fileList = util.RemoveCommonFilePrefixes(noempty) // magic
|
||||||
|
|
||||||
if len(obj.fileList) == 0 { // nothing more, don't continue
|
if len(obj.fileList) == 0 { // nothing more, don't continue
|
||||||
return false
|
return false
|
||||||
@@ -459,37 +474,37 @@ func (obj *PkgRes) AutoEdges() AutoEdge {
|
|||||||
// is contained in the Test() method! This design is completely okay!
|
// is contained in the Test() method! This design is completely okay!
|
||||||
|
|
||||||
// add matches for any svc resources found in pkg definition!
|
// add matches for any svc resources found in pkg definition!
|
||||||
var svcUUIDs []ResUUID
|
var svcUIDs []ResUID
|
||||||
for _, x := range ReturnSvcInFileList(obj.fileList) {
|
for _, x := range ReturnSvcInFileList(obj.fileList) {
|
||||||
var reversed = false
|
var reversed = false
|
||||||
svcUUIDs = append(svcUUIDs, &SvcUUID{
|
svcUIDs = append(svcUIDs, &SvcUID{
|
||||||
BaseUUID: BaseUUID{
|
BaseUID: BaseUID{
|
||||||
name: obj.GetName(),
|
name: obj.GetName(),
|
||||||
kind: obj.Kind(),
|
kind: obj.Kind(),
|
||||||
reversed: &reversed,
|
reversed: &reversed,
|
||||||
},
|
},
|
||||||
name: x, // the svc name itself in the SvcUUID object!
|
name: x, // the svc name itself in the SvcUID object!
|
||||||
}) // build list
|
}) // build list
|
||||||
}
|
}
|
||||||
|
|
||||||
return &PkgResAutoEdges{
|
return &PkgResAutoEdges{
|
||||||
fileList: RemoveCommonFilePrefixes(obj.fileList), // clean start!
|
fileList: util.RemoveCommonFilePrefixes(obj.fileList), // clean start!
|
||||||
svcUUIDs: svcUUIDs,
|
svcUIDs: svcUIDs,
|
||||||
testIsNext: false, // start with Next() call
|
testIsNext: false, // start with Next() call
|
||||||
name: obj.GetName(), // save data for PkgResAutoEdges obj
|
name: obj.GetName(), // save data for PkgResAutoEdges obj
|
||||||
kind: obj.Kind(),
|
kind: obj.Kind(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUUIDs includes all params to make a unique identification of this object.
|
// GetUIDs includes all params to make a unique identification of this object.
|
||||||
// Most resources only return one, although some resources can return multiple.
|
// Most resources only return one, although some resources can return multiple.
|
||||||
func (obj *PkgRes) GetUUIDs() []ResUUID {
|
func (obj *PkgRes) GetUIDs() []ResUID {
|
||||||
x := &PkgUUID{
|
x := &PkgUID{
|
||||||
BaseUUID: BaseUUID{name: obj.GetName(), kind: obj.Kind()},
|
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
||||||
name: obj.Name,
|
name: obj.Name,
|
||||||
state: obj.State,
|
state: obj.State,
|
||||||
}
|
}
|
||||||
result := []ResUUID{x}
|
result := []ResUID{x}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -545,7 +560,7 @@ func (obj *PkgRes) Compare(res Res) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// return a list of svc names for matches like /usr/lib/systemd/system/*.service
|
// ReturnSvcInFileList returns a list of svc names for matches like: `/usr/lib/systemd/system/*.service`.
|
||||||
func ReturnSvcInFileList(fileList []string) []string {
|
func ReturnSvcInFileList(fileList []string) []string {
|
||||||
result := []string{}
|
result := []string{}
|
||||||
for _, x := range fileList {
|
for _, x := range fileList {
|
||||||
@@ -557,7 +572,7 @@ func ReturnSvcInFileList(fileList []string) []string {
|
|||||||
if !strings.HasSuffix(basename, ".service") {
|
if !strings.HasSuffix(basename, ".service") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if s := strings.TrimSuffix(basename, ".service"); !StrInList(s, result) {
|
if s := strings.TrimSuffix(basename, ".service"); !util.StrInList(s, result) {
|
||||||
result = append(result, s)
|
result = append(result, s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -15,7 +15,8 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package main
|
// Package resources provides the resource framework and idempotent primitives.
|
||||||
|
package resources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
@@ -23,30 +24,38 @@ import (
|
|||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
|
// TODO: should each resource be a sub-package?
|
||||||
|
"github.com/purpleidea/mgmt/converger"
|
||||||
|
"github.com/purpleidea/mgmt/event"
|
||||||
|
"github.com/purpleidea/mgmt/global"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate stringer -type=resState -output=resstate_stringer.go
|
//go:generate stringer -type=ResState -output=resstate_stringer.go
|
||||||
type resState int
|
|
||||||
|
|
||||||
|
// The ResState type represents the current activity state of each resource.
|
||||||
|
type ResState int
|
||||||
|
|
||||||
|
// Each ResState should be set properly in the relevant part of the resource.
|
||||||
const (
|
const (
|
||||||
resStateNil resState = iota
|
ResStateNil ResState = iota
|
||||||
resStateWatching
|
ResStateWatching
|
||||||
resStateEvent // an event has happened, but we haven't poked yet
|
ResStateEvent // an event has happened, but we haven't poked yet
|
||||||
resStateCheckApply
|
ResStateCheckApply
|
||||||
resStatePoking
|
ResStatePoking
|
||||||
)
|
)
|
||||||
|
|
||||||
// ResUUID is a unique identifier for a resource, namely it's name, and the kind ("type").
|
// ResUID is a unique identifier for a resource, namely it's name, and the kind ("type").
|
||||||
type ResUUID interface {
|
type ResUID interface {
|
||||||
GetName() string
|
GetName() string
|
||||||
Kind() string
|
Kind() string
|
||||||
IFF(ResUUID) bool
|
IFF(ResUID) bool
|
||||||
|
|
||||||
Reversed() bool // true means this resource happens before the generator
|
Reversed() bool // true means this resource happens before the generator
|
||||||
}
|
}
|
||||||
|
|
||||||
// The BaseUUID struct is used to provide a unique resource identifier.
|
// The BaseUID struct is used to provide a unique resource identifier.
|
||||||
type BaseUUID struct {
|
type BaseUID struct {
|
||||||
name string // name and kind are the values of where this is coming from
|
name string // name and kind are the values of where this is coming from
|
||||||
kind string
|
kind string
|
||||||
|
|
||||||
@@ -55,15 +64,43 @@ type BaseUUID struct {
|
|||||||
|
|
||||||
// The AutoEdge interface is used to implement the autoedges feature.
|
// The AutoEdge interface is used to implement the autoedges feature.
|
||||||
type AutoEdge interface {
|
type AutoEdge interface {
|
||||||
Next() []ResUUID // call to get list of edges to add
|
Next() []ResUID // call to get list of edges to add
|
||||||
Test([]bool) bool // call until false
|
Test([]bool) bool // call until false
|
||||||
}
|
}
|
||||||
|
|
||||||
// MetaParams is a struct will all params that apply to every resource.
|
// MetaParams is a struct will all params that apply to every resource.
|
||||||
type MetaParams struct {
|
type MetaParams struct {
|
||||||
AutoEdge bool `yaml:"autoedge"` // metaparam, should we generate auto edges? // XXX should default to true
|
AutoEdge bool `yaml:"autoedge"` // metaparam, should we generate auto edges?
|
||||||
AutoGroup bool `yaml:"autogroup"` // metaparam, should we auto group? // XXX should default to true
|
AutoGroup bool `yaml:"autogroup"` // metaparam, should we auto group?
|
||||||
Noop bool `yaml:"noop"`
|
Noop bool `yaml:"noop"`
|
||||||
|
// NOTE: there are separate Watch and CheckApply retry and delay values,
|
||||||
|
// but I've decided to use the same ones for both until there's a proper
|
||||||
|
// reason to want to do something differently for the Watch errors.
|
||||||
|
Retry int16 `yaml:"retry"` // metaparam, number of times to retry on error. -1 for infinite
|
||||||
|
Delay uint64 `yaml:"delay"` // metaparam, number of milliseconds to wait between retries
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML is the custom unmarshal handler for the MetaParams struct. It
|
||||||
|
// is primarily useful for setting the defaults.
|
||||||
|
func (obj *MetaParams) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
type rawMetaParams MetaParams // indirection to avoid infinite recursion
|
||||||
|
raw := rawMetaParams(DefaultMetaParams) // convert; the defaults go here
|
||||||
|
|
||||||
|
if err := unmarshal(&raw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*obj = MetaParams(raw) // restore from indirection with type conversion!
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultMetaParams are the defaults to be used for undefined metaparams.
|
||||||
|
var DefaultMetaParams = MetaParams{
|
||||||
|
AutoEdge: true,
|
||||||
|
AutoGroup: true,
|
||||||
|
Noop: false,
|
||||||
|
Retry: 0, // TODO: is this a good default?
|
||||||
|
Delay: 0, // TODO: is this a good default?
|
||||||
}
|
}
|
||||||
|
|
||||||
// The Base interface is everything that is common to all resources.
|
// The Base interface is everything that is common to all resources.
|
||||||
@@ -71,16 +108,18 @@ type MetaParams struct {
|
|||||||
type Base interface {
|
type Base interface {
|
||||||
GetName() string // can't be named "Name()" because of struct field
|
GetName() string // can't be named "Name()" because of struct field
|
||||||
SetName(string)
|
SetName(string)
|
||||||
setKind(string)
|
SetKind(string)
|
||||||
Kind() string
|
Kind() string
|
||||||
Meta() *MetaParams
|
Meta() *MetaParams
|
||||||
AssociateData(Converger)
|
Events() chan event.Event
|
||||||
|
AssociateData(converger.Converger)
|
||||||
IsWatching() bool
|
IsWatching() bool
|
||||||
SetWatching(bool)
|
SetWatching(bool)
|
||||||
GetState() resState
|
GetState() ResState
|
||||||
SetState(resState)
|
SetState(ResState)
|
||||||
SendEvent(eventName, bool, bool) bool
|
DoSend(chan event.Event, string) (bool, error)
|
||||||
ReadEvent(*Event) (bool, bool) // TODO: optional here?
|
SendEvent(event.EventName, bool, bool) bool
|
||||||
|
ReadEvent(*event.Event) (bool, bool) // TODO: optional here?
|
||||||
GroupCmp(Res) bool // TODO: is there a better name for this?
|
GroupCmp(Res) bool // TODO: is there a better name for this?
|
||||||
GroupRes(Res) error // group resource (arg) into self
|
GroupRes(Res) error // group resource (arg) into self
|
||||||
IsGrouped() bool // am I grouped?
|
IsGrouped() bool // am I grouped?
|
||||||
@@ -92,11 +131,11 @@ type Base interface {
|
|||||||
// Res is the minimum interface you need to implement to define a new resource.
|
// Res is the minimum interface you need to implement to define a new resource.
|
||||||
type Res interface {
|
type Res interface {
|
||||||
Base // include everything from the Base interface
|
Base // include everything from the Base interface
|
||||||
Init()
|
Init() error
|
||||||
//Validate() bool // TODO: this might one day be added
|
//Validate() error // TODO: this might one day be added
|
||||||
GetUUIDs() []ResUUID // most resources only return one
|
GetUIDs() []ResUID // most resources only return one
|
||||||
Watch(chan Event) // send on channel to signal process() events
|
Watch(chan event.Event) error // send on channel to signal process() events
|
||||||
CheckApply(bool) (bool, error)
|
CheckApply(apply bool) (checkOK bool, err error)
|
||||||
AutoEdges() AutoEdge
|
AutoEdges() AutoEdge
|
||||||
Compare(Res) bool
|
Compare(Res) bool
|
||||||
CollectPattern(string) // XXX: temporary until Res collection is more advanced
|
CollectPattern(string) // XXX: temporary until Res collection is more advanced
|
||||||
@@ -107,19 +146,19 @@ type BaseRes struct {
|
|||||||
Name string `yaml:"name"`
|
Name string `yaml:"name"`
|
||||||
MetaParams MetaParams `yaml:"meta"` // struct of all the metaparams
|
MetaParams MetaParams `yaml:"meta"` // struct of all the metaparams
|
||||||
kind string
|
kind string
|
||||||
events chan Event
|
events chan event.Event
|
||||||
converger Converger // converged tracking
|
converger converger.Converger // converged tracking
|
||||||
state resState
|
state ResState
|
||||||
watching bool // is Watch() loop running ?
|
watching bool // is Watch() loop running ?
|
||||||
isStateOK bool // whether the state is okay based on events or not
|
isStateOK bool // whether the state is okay based on events or not
|
||||||
isGrouped bool // am i contained within a group?
|
isGrouped bool // am i contained within a group?
|
||||||
grouped []Res // list of any grouped resources
|
grouped []Res // list of any grouped resources
|
||||||
}
|
}
|
||||||
|
|
||||||
// UUIDExistsInUUIDs wraps the IFF method when used with a list of UUID's.
|
// UIDExistsInUIDs wraps the IFF method when used with a list of UID's.
|
||||||
func UUIDExistsInUUIDs(uuid ResUUID, uuids []ResUUID) bool {
|
func UIDExistsInUIDs(uid ResUID, uids []ResUID) bool {
|
||||||
for _, u := range uuids {
|
for _, u := range uids {
|
||||||
if uuid.IFF(u) {
|
if uid.IFF(u) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -127,30 +166,30 @@ func UUIDExistsInUUIDs(uuid ResUUID, uuids []ResUUID) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetName returns the name of the resource.
|
// GetName returns the name of the resource.
|
||||||
func (obj *BaseUUID) GetName() string {
|
func (obj *BaseUID) GetName() string {
|
||||||
return obj.name
|
return obj.name
|
||||||
}
|
}
|
||||||
|
|
||||||
// Kind returns the kind of resource.
|
// Kind returns the kind of resource.
|
||||||
func (obj *BaseUUID) Kind() string {
|
func (obj *BaseUID) Kind() string {
|
||||||
return obj.kind
|
return obj.kind
|
||||||
}
|
}
|
||||||
|
|
||||||
// IFF looks at two UUID's and if and only if they are equivalent, returns true.
|
// IFF looks at two UID's and if and only if they are equivalent, returns true.
|
||||||
// If they are not equivalent, it returns false.
|
// If they are not equivalent, it returns false.
|
||||||
// Most resources will want to override this method, since it does the important
|
// Most resources will want to override this method, since it does the important
|
||||||
// work of actually discerning if two resources are identical in function.
|
// work of actually discerning if two resources are identical in function.
|
||||||
func (obj *BaseUUID) IFF(uuid ResUUID) bool {
|
func (obj *BaseUID) IFF(uid ResUID) bool {
|
||||||
res, ok := uuid.(*BaseUUID)
|
res, ok := uid.(*BaseUID)
|
||||||
if !ok {
|
if !ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return obj.name == res.name
|
return obj.name == res.name
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reversed is part of the ResUUID interface, and true means this resource
|
// Reversed is part of the ResUID interface, and true means this resource
|
||||||
// happens before the generator.
|
// happens before the generator.
|
||||||
func (obj *BaseUUID) Reversed() bool {
|
func (obj *BaseUID) Reversed() bool {
|
||||||
if obj.reversed == nil {
|
if obj.reversed == nil {
|
||||||
log.Fatal("Programming error!")
|
log.Fatal("Programming error!")
|
||||||
}
|
}
|
||||||
@@ -158,8 +197,12 @@ func (obj *BaseUUID) Reversed() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Init initializes structures like channels if created without New constructor.
|
// Init initializes structures like channels if created without New constructor.
|
||||||
func (obj *BaseRes) Init() {
|
func (obj *BaseRes) Init() error {
|
||||||
obj.events = make(chan Event) // unbuffered chan size to avoid stale events
|
if obj.kind == "" {
|
||||||
|
return fmt.Errorf("Resource did not set kind!")
|
||||||
|
}
|
||||||
|
obj.events = make(chan event.Event) // unbuffered chan size to avoid stale events
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetName is used by all the resources to Get the name.
|
// GetName is used by all the resources to Get the name.
|
||||||
@@ -172,8 +215,8 @@ func (obj *BaseRes) SetName(name string) {
|
|||||||
obj.Name = name
|
obj.Name = name
|
||||||
}
|
}
|
||||||
|
|
||||||
// setKind sets the kind. This is used internally for exported resources.
|
// SetKind sets the kind. This is used internally for exported resources.
|
||||||
func (obj *BaseRes) setKind(kind string) {
|
func (obj *BaseRes) SetKind(kind string) {
|
||||||
obj.kind = kind
|
obj.kind = kind
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -187,8 +230,13 @@ func (obj *BaseRes) Meta() *MetaParams {
|
|||||||
return &obj.MetaParams
|
return &obj.MetaParams
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Events returns the channel of events to listen on.
|
||||||
|
func (obj *BaseRes) Events() chan event.Event {
|
||||||
|
return obj.events
|
||||||
|
}
|
||||||
|
|
||||||
// AssociateData associates some data with the object in question.
|
// AssociateData associates some data with the object in question.
|
||||||
func (obj *BaseRes) AssociateData(converger Converger) {
|
func (obj *BaseRes) AssociateData(converger converger.Converger) {
|
||||||
obj.converger = converger
|
obj.converger = converger
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -203,65 +251,86 @@ func (obj *BaseRes) SetWatching(b bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetState returns the state of the resource.
|
// GetState returns the state of the resource.
|
||||||
func (obj *BaseRes) GetState() resState {
|
func (obj *BaseRes) GetState() ResState {
|
||||||
return obj.state
|
return obj.state
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetState sets the state of the resource.
|
// SetState sets the state of the resource.
|
||||||
func (obj *BaseRes) SetState(state resState) {
|
func (obj *BaseRes) SetState(state ResState) {
|
||||||
if DEBUG {
|
if global.DEBUG {
|
||||||
log.Printf("%v[%v]: State: %v -> %v", obj.Kind(), obj.GetName(), obj.GetState(), state)
|
log.Printf("%v[%v]: State: %v -> %v", obj.Kind(), obj.GetName(), obj.GetState(), state)
|
||||||
}
|
}
|
||||||
obj.state = state
|
obj.state = state
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DoSend sends off an event, but doesn't block the incoming event queue. It can
|
||||||
|
// also recursively call itself when events need processing during the wait.
|
||||||
|
// I'm not completely comfortable with this fn, but it will have to do for now.
|
||||||
|
func (obj *BaseRes) DoSend(processChan chan event.Event, comment string) (bool, error) {
|
||||||
|
resp := event.NewResp()
|
||||||
|
processChan <- event.Event{Name: event.EventNil, Resp: resp, Msg: comment, Activity: true} // trigger process
|
||||||
|
e := resp.Wait()
|
||||||
|
return false, e // XXX: at the moment, we don't use the exit bool.
|
||||||
|
// XXX: this can cause a deadlock. do we need to recursively send? fix event stuff!
|
||||||
|
//select {
|
||||||
|
//case e := <-resp: // wait for the ACK()
|
||||||
|
// if e != nil { // we got a NACK
|
||||||
|
// return true, e // exit with error
|
||||||
|
// }
|
||||||
|
//case event := <-obj.events:
|
||||||
|
// // NOTE: this code should match the similar code below!
|
||||||
|
// //cuid.SetConverged(false) // TODO: ?
|
||||||
|
// if exit, send := obj.ReadEvent(&event); exit {
|
||||||
|
// return true, nil // exit, without error
|
||||||
|
// } else if send {
|
||||||
|
// return obj.DoSend(processChan, comment) // recurse
|
||||||
|
// }
|
||||||
|
//}
|
||||||
|
//return false, nil // return, no error or exit signal
|
||||||
|
}
|
||||||
|
|
||||||
// SendEvent pushes an event into the message queue for a particular vertex
|
// SendEvent pushes an event into the message queue for a particular vertex
|
||||||
func (obj *BaseRes) SendEvent(event eventName, sync bool, activity bool) bool {
|
func (obj *BaseRes) SendEvent(ev event.EventName, sync bool, activity bool) bool {
|
||||||
// TODO: isn't this race-y ?
|
// TODO: isn't this race-y ?
|
||||||
if !obj.IsWatching() { // element has already exited
|
if !obj.IsWatching() { // element has already exited
|
||||||
return false // if we don't return, we'll block on the send
|
return false // if we don't return, we'll block on the send
|
||||||
}
|
}
|
||||||
if !sync {
|
if !sync {
|
||||||
obj.events <- Event{event, nil, "", activity}
|
obj.events <- event.Event{Name: ev, Resp: nil, Msg: "", Activity: activity}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
resp := make(chan bool)
|
resp := event.NewResp()
|
||||||
obj.events <- Event{event, resp, "", activity}
|
obj.events <- event.Event{Name: ev, Resp: resp, Msg: "", Activity: activity}
|
||||||
for {
|
resp.ACKWait() // waits until true (nil) value
|
||||||
value := <-resp
|
|
||||||
// wait until true value
|
|
||||||
if value {
|
|
||||||
return true
|
return true
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadEvent processes events when a select gets one, and handles the pause
|
// ReadEvent processes events when a select gets one, and handles the pause
|
||||||
// code too! The return values specify if we should exit and poke respectively.
|
// code too! The return values specify if we should exit and poke respectively.
|
||||||
func (obj *BaseRes) ReadEvent(event *Event) (exit, poke bool) {
|
func (obj *BaseRes) ReadEvent(ev *event.Event) (exit, poke bool) {
|
||||||
event.ACK()
|
ev.ACK()
|
||||||
switch event.Name {
|
switch ev.Name {
|
||||||
case eventStart:
|
case event.EventStart:
|
||||||
return false, true
|
return false, true
|
||||||
|
|
||||||
case eventPoke:
|
case event.EventPoke:
|
||||||
return false, true
|
return false, true
|
||||||
|
|
||||||
case eventBackPoke:
|
case event.EventBackPoke:
|
||||||
return false, true // forward poking in response to a back poke!
|
return false, true // forward poking in response to a back poke!
|
||||||
|
|
||||||
case eventExit:
|
case event.EventExit:
|
||||||
return true, false
|
return true, false
|
||||||
|
|
||||||
case eventPause:
|
case event.EventPause:
|
||||||
// wait for next event to continue
|
// wait for next event to continue
|
||||||
select {
|
select {
|
||||||
case e := <-obj.events:
|
case e := <-obj.Events():
|
||||||
e.ACK()
|
e.ACK()
|
||||||
if e.Name == eventExit {
|
if e.Name == event.EventExit {
|
||||||
return true, false
|
return true, false
|
||||||
} else if e.Name == eventStart { // eventContinue
|
} else if e.Name == event.EventStart { // eventContinue
|
||||||
return false, false // don't poke on unpause!
|
return false, false // don't poke on unpause!
|
||||||
} else {
|
} else {
|
||||||
// if we get a poke event here, it's a bug!
|
// if we get a poke event here, it's a bug!
|
||||||
@@ -270,7 +339,7 @@ func (obj *BaseRes) ReadEvent(event *Event) (exit, poke bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
log.Fatal("Unknown event: ", event)
|
log.Fatal("Unknown event: ", ev)
|
||||||
}
|
}
|
||||||
return true, false // required to keep the stupid go compiler happy
|
return true, false // required to keep the stupid go compiler happy
|
||||||
}
|
}
|
||||||
@@ -317,6 +386,13 @@ func (obj *BaseRes) SetGroup(g []Res) {
|
|||||||
|
|
||||||
// Compare is the base compare method, which also handles the metaparams cmp
|
// Compare is the base compare method, which also handles the metaparams cmp
|
||||||
func (obj *BaseRes) Compare(res Res) bool {
|
func (obj *BaseRes) Compare(res Res) bool {
|
||||||
|
// TODO: should the AutoEdge values be compared?
|
||||||
|
if obj.Meta().AutoEdge != res.Meta().AutoEdge {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.Meta().AutoGroup != res.Meta().AutoGroup {
|
||||||
|
return false
|
||||||
|
}
|
||||||
if obj.Meta().Noop != res.Meta().Noop {
|
if obj.Meta().Noop != res.Meta().Noop {
|
||||||
// obj is the existing res, res is the *new* resource
|
// obj is the existing res, res is the *new* resource
|
||||||
// if we go from no-noop -> noop, we can re-use the obj
|
// if we go from no-noop -> noop, we can re-use the obj
|
||||||
@@ -325,6 +401,12 @@ func (obj *BaseRes) Compare(res Res) bool {
|
|||||||
return false // going from noop to no-noop!
|
return false // going from noop to no-noop!
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if obj.Meta().Retry != res.Meta().Retry {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.Meta().Delay != res.Meta().Delay {
|
||||||
|
return false
|
||||||
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -15,7 +15,7 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package main
|
package resources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
@@ -105,16 +105,16 @@ func TestMiscEncodeDecode2(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIFF(t *testing.T) {
|
func TestIFF(t *testing.T) {
|
||||||
uuid := &BaseUUID{name: "/tmp/unit-test"}
|
uid := &BaseUID{name: "/tmp/unit-test"}
|
||||||
same := &BaseUUID{name: "/tmp/unit-test"}
|
same := &BaseUID{name: "/tmp/unit-test"}
|
||||||
diff := &BaseUUID{name: "/tmp/other-file"}
|
diff := &BaseUID{name: "/tmp/other-file"}
|
||||||
|
|
||||||
if !uuid.IFF(same) {
|
if !uid.IFF(same) {
|
||||||
t.Error("basic resource UUIDs with the same name should satisfy each other's IFF condition.")
|
t.Error("basic resource UIDs with the same name should satisfy each other's IFF condition.")
|
||||||
}
|
}
|
||||||
|
|
||||||
if uuid.IFF(diff) {
|
if uid.IFF(diff) {
|
||||||
t.Error("basic resource UUIDs with different names should NOT satisfy each other's IFF condition.")
|
t.Error("basic resource UIDs with different names should NOT satisfy each other's IFF condition.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -134,7 +134,7 @@ func TestReadEvent(t *testing.T) {
|
|||||||
eventExit: false,
|
eventExit: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
for event, _ := range shouldExit {
|
for event := range shouldExit {
|
||||||
exit, poke := res.ReadEvent(&Event{Name: event})
|
exit, poke := res.ReadEvent(&Event{Name: event})
|
||||||
if exit != shouldExit[event] {
|
if exit != shouldExit[event] {
|
||||||
t.Errorf("resource.ReadEvent returned wrong exit flag for a %v event (%v, should be %v)",
|
t.Errorf("resource.ReadEvent returned wrong exit flag for a %v event (%v, should be %v)",
|
||||||
@@ -17,16 +17,21 @@
|
|||||||
|
|
||||||
// DOCS: https://godoc.org/github.com/coreos/go-systemd/dbus
|
// DOCS: https://godoc.org/github.com/coreos/go-systemd/dbus
|
||||||
|
|
||||||
package main
|
package resources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/event"
|
||||||
|
"github.com/purpleidea/mgmt/util"
|
||||||
|
|
||||||
systemd "github.com/coreos/go-systemd/dbus" // change namespace
|
systemd "github.com/coreos/go-systemd/dbus" // change namespace
|
||||||
systemdUtil "github.com/coreos/go-systemd/util"
|
systemdUtil "github.com/coreos/go-systemd/util"
|
||||||
"github.com/godbus/dbus" // namespace collides with systemd wrapper
|
"github.com/godbus/dbus" // namespace collides with systemd wrapper
|
||||||
"log"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -41,7 +46,7 @@ type SvcRes struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewSvcRes is a constructor for this resource. It also calls Init() for you.
|
// NewSvcRes is a constructor for this resource. It also calls Init() for you.
|
||||||
func NewSvcRes(name, state, startup string) *SvcRes {
|
func NewSvcRes(name, state, startup string) (*SvcRes, error) {
|
||||||
obj := &SvcRes{
|
obj := &SvcRes{
|
||||||
BaseRes: BaseRes{
|
BaseRes: BaseRes{
|
||||||
Name: name,
|
Name: name,
|
||||||
@@ -49,52 +54,60 @@ func NewSvcRes(name, state, startup string) *SvcRes {
|
|||||||
State: state,
|
State: state,
|
||||||
Startup: startup,
|
Startup: startup,
|
||||||
}
|
}
|
||||||
obj.Init()
|
return obj, obj.Init()
|
||||||
return obj
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init runs some startup code for this resource.
|
// Init runs some startup code for this resource.
|
||||||
func (obj *SvcRes) Init() {
|
func (obj *SvcRes) Init() error {
|
||||||
obj.BaseRes.kind = "Svc"
|
obj.BaseRes.kind = "Svc"
|
||||||
obj.BaseRes.Init() // call base init, b/c we're overriding
|
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate checks if the resource data structure was populated correctly.
|
// Validate checks if the resource data structure was populated correctly.
|
||||||
func (obj *SvcRes) Validate() bool {
|
func (obj *SvcRes) Validate() error {
|
||||||
if obj.State != "running" && obj.State != "stopped" && obj.State != "" {
|
if obj.State != "running" && obj.State != "stopped" && obj.State != "" {
|
||||||
return false
|
return fmt.Errorf("State must be either `running` or `stopped` or undefined.")
|
||||||
}
|
}
|
||||||
if obj.Startup != "enabled" && obj.Startup != "disabled" && obj.Startup != "" {
|
if obj.Startup != "enabled" && obj.Startup != "disabled" && obj.Startup != "" {
|
||||||
return false
|
return fmt.Errorf("Startup must be either `enabled` or `disabled` or undefined.")
|
||||||
}
|
}
|
||||||
return true
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Watch is the primary listener for this resource and it outputs events.
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
func (obj *SvcRes) Watch(processChan chan Event) {
|
func (obj *SvcRes) Watch(processChan chan event.Event) error {
|
||||||
if obj.IsWatching() {
|
if obj.IsWatching() {
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
obj.SetWatching(true)
|
obj.SetWatching(true)
|
||||||
defer obj.SetWatching(false)
|
defer obj.SetWatching(false)
|
||||||
cuuid := obj.converger.Register()
|
cuid := obj.converger.Register()
|
||||||
defer cuuid.Unregister()
|
defer cuid.Unregister()
|
||||||
|
|
||||||
|
var startup bool
|
||||||
|
Startup := func(block bool) <-chan time.Time {
|
||||||
|
if block {
|
||||||
|
return nil // blocks forever
|
||||||
|
//return make(chan time.Time) // blocks forever
|
||||||
|
}
|
||||||
|
return time.After(time.Duration(500) * time.Millisecond) // 1/2 the resolution of converged timeout
|
||||||
|
}
|
||||||
|
|
||||||
// obj.Name: svc name
|
// obj.Name: svc name
|
||||||
if !systemdUtil.IsRunningSystemd() {
|
if !systemdUtil.IsRunningSystemd() {
|
||||||
log.Fatal("Systemd is not running.")
|
return fmt.Errorf("Systemd is not running.")
|
||||||
}
|
}
|
||||||
|
|
||||||
conn, err := systemd.NewSystemdConnection() // needs root access
|
conn, err := systemd.NewSystemdConnection() // needs root access
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal("Failed to connect to systemd: ", err)
|
return fmt.Errorf("Failed to connect to systemd: %s", err)
|
||||||
}
|
}
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
|
||||||
// if we share the bus with others, we will get each others messages!!
|
// if we share the bus with others, we will get each others messages!!
|
||||||
bus, err := SystemBusPrivateUsable() // don't share the bus connection!
|
bus, err := util.SystemBusPrivateUsable() // don't share the bus connection!
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal("Failed to connect to bus: ", err)
|
return fmt.Errorf("Failed to connect to bus: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// XXX: will this detect new units?
|
// XXX: will this detect new units?
|
||||||
@@ -131,7 +144,7 @@ func (obj *SvcRes) Watch(processChan chan Event) {
|
|||||||
var notFound = (loadstate.Value == dbus.MakeVariant("not-found"))
|
var notFound = (loadstate.Value == dbus.MakeVariant("not-found"))
|
||||||
if notFound { // XXX: in the loop we'll handle changes better...
|
if notFound { // XXX: in the loop we'll handle changes better...
|
||||||
log.Printf("Failed to find svc: %v", svc)
|
log.Printf("Failed to find svc: %v", svc)
|
||||||
invalid = true // XXX ?
|
invalid = true // XXX: ?
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -147,25 +160,30 @@ func (obj *SvcRes) Watch(processChan chan Event) {
|
|||||||
set.Remove(svc) // no return value should ever occur
|
set.Remove(svc) // no return value should ever occur
|
||||||
}
|
}
|
||||||
|
|
||||||
obj.SetState(resStateWatching) // reset
|
obj.SetState(ResStateWatching) // reset
|
||||||
select {
|
select {
|
||||||
case <-buschan: // XXX wait for new units event to unstick
|
case <-buschan: // XXX: wait for new units event to unstick
|
||||||
cuuid.SetConverged(false)
|
cuid.SetConverged(false)
|
||||||
// loop so that we can see the changed invalid signal
|
// loop so that we can see the changed invalid signal
|
||||||
log.Printf("Svc[%v]->DaemonReload()", svc)
|
log.Printf("Svc[%v]->DaemonReload()", svc)
|
||||||
|
|
||||||
case event := <-obj.events:
|
case event := <-obj.Events():
|
||||||
cuuid.SetConverged(false)
|
cuid.SetConverged(false)
|
||||||
if exit, send = obj.ReadEvent(&event); exit {
|
if exit, send = obj.ReadEvent(&event); exit {
|
||||||
return // exit
|
return nil // exit
|
||||||
}
|
}
|
||||||
if event.GetActivity() {
|
if event.GetActivity() {
|
||||||
dirty = true
|
dirty = true
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-cuuid.ConvergedTimer():
|
case <-cuid.ConvergedTimer():
|
||||||
cuuid.SetConverged(true) // converged!
|
cuid.SetConverged(true) // converged!
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
case <-Startup(startup):
|
||||||
|
cuid.SetConverged(false)
|
||||||
|
send = true
|
||||||
|
dirty = true
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if !activeSet {
|
if !activeSet {
|
||||||
@@ -174,7 +192,7 @@ func (obj *SvcRes) Watch(processChan chan Event) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("Watching: %v", svc) // attempting to watch...
|
log.Printf("Watching: %v", svc) // attempting to watch...
|
||||||
obj.SetState(resStateWatching) // reset
|
obj.SetState(ResStateWatching) // reset
|
||||||
select {
|
select {
|
||||||
case event := <-subChannel:
|
case event := <-subChannel:
|
||||||
|
|
||||||
@@ -182,12 +200,16 @@ func (obj *SvcRes) Watch(processChan chan Event) {
|
|||||||
// NOTE: the value returned is a map for some reason...
|
// NOTE: the value returned is a map for some reason...
|
||||||
if event[svc] != nil {
|
if event[svc] != nil {
|
||||||
// event[svc].ActiveState is not nil
|
// event[svc].ActiveState is not nil
|
||||||
if event[svc].ActiveState == "active" {
|
|
||||||
log.Printf("Svc[%v]->Started()", svc)
|
switch event[svc].ActiveState {
|
||||||
} else if event[svc].ActiveState == "inactive" {
|
case "active":
|
||||||
log.Printf("Svc[%v]->Stopped!()", svc)
|
log.Printf("Svc[%v]->Started", svc)
|
||||||
} else {
|
case "inactive":
|
||||||
log.Fatal("Unknown svc state: ", event[svc].ActiveState)
|
log.Printf("Svc[%v]->Stopped", svc)
|
||||||
|
case "reloading":
|
||||||
|
log.Printf("Svc[%v]->Reloading", svc)
|
||||||
|
default:
|
||||||
|
log.Fatalf("Unknown svc state: %s", event[svc].ActiveState)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// svc stopped (and ActiveState is nil...)
|
// svc stopped (and ActiveState is nil...)
|
||||||
@@ -197,37 +219,40 @@ func (obj *SvcRes) Watch(processChan chan Event) {
|
|||||||
dirty = true
|
dirty = true
|
||||||
|
|
||||||
case err := <-subErrors:
|
case err := <-subErrors:
|
||||||
cuuid.SetConverged(false) // XXX ?
|
cuid.SetConverged(false)
|
||||||
log.Printf("error: %v", err)
|
return fmt.Errorf("Unknown %s[%s] error: %v", obj.Kind(), obj.GetName(), err)
|
||||||
log.Fatal(err)
|
|
||||||
//vertex.events <- fmt.Sprintf("svc: %v", "error") // XXX: how should we handle errors?
|
|
||||||
|
|
||||||
case event := <-obj.events:
|
case event := <-obj.Events():
|
||||||
cuuid.SetConverged(false)
|
cuid.SetConverged(false)
|
||||||
if exit, send = obj.ReadEvent(&event); exit {
|
if exit, send = obj.ReadEvent(&event); exit {
|
||||||
return // exit
|
return nil // exit
|
||||||
}
|
}
|
||||||
if event.GetActivity() {
|
if event.GetActivity() {
|
||||||
dirty = true
|
dirty = true
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-cuuid.ConvergedTimer():
|
case <-cuid.ConvergedTimer():
|
||||||
cuuid.SetConverged(true) // converged!
|
cuid.SetConverged(true) // converged!
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
case <-Startup(startup):
|
||||||
|
cuid.SetConverged(false)
|
||||||
|
send = true
|
||||||
|
dirty = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if send {
|
if send {
|
||||||
|
startup = true // startup finished
|
||||||
send = false
|
send = false
|
||||||
if dirty {
|
if dirty {
|
||||||
dirty = false
|
dirty = false
|
||||||
obj.isStateOK = false // something made state dirty
|
obj.isStateOK = false // something made state dirty
|
||||||
}
|
}
|
||||||
resp := NewResp()
|
if exit, err := obj.DoSend(processChan, ""); exit || err != nil {
|
||||||
processChan <- Event{eventNil, resp, "", true} // trigger process
|
return err // we exit or bubble up a NACK...
|
||||||
resp.ACKWait() // wait for the ACK()
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -273,7 +298,7 @@ func (obj *SvcRes) CheckApply(apply bool) (checkok bool, err error) {
|
|||||||
|
|
||||||
var running = (activestate.Value == dbus.MakeVariant("active"))
|
var running = (activestate.Value == dbus.MakeVariant("active"))
|
||||||
var stateOK = ((obj.State == "") || (obj.State == "running" && running) || (obj.State == "stopped" && !running))
|
var stateOK = ((obj.State == "") || (obj.State == "running" && running) || (obj.State == "stopped" && !running))
|
||||||
var startupOK = true // XXX DETECT AND SET
|
var startupOK = true // XXX: DETECT AND SET
|
||||||
|
|
||||||
if stateOK && startupOK {
|
if stateOK && startupOK {
|
||||||
return true, nil // we are in the correct state
|
return true, nil // we are in the correct state
|
||||||
@@ -326,20 +351,19 @@ func (obj *SvcRes) CheckApply(apply bool) (checkok bool, err error) {
|
|||||||
return false, nil // success
|
return false, nil // success
|
||||||
}
|
}
|
||||||
|
|
||||||
// SvcUUID is the UUID struct for SvcRes.
|
// SvcUID is the UID struct for SvcRes.
|
||||||
type SvcUUID struct {
|
type SvcUID struct {
|
||||||
// NOTE: there is also a name variable in the BaseUUID struct, this is
|
// NOTE: there is also a name variable in the BaseUID struct, this is
|
||||||
// information about where this UUID came from, and is unrelated to the
|
// information about where this UID came from, and is unrelated to the
|
||||||
// information about the resource we're matching. That data which is
|
// information about the resource we're matching. That data which is
|
||||||
// used in the IFF function, is what you see in the struct fields here.
|
// used in the IFF function, is what you see in the struct fields here.
|
||||||
BaseUUID
|
BaseUID
|
||||||
name string // the svc name
|
name string // the svc name
|
||||||
}
|
}
|
||||||
|
|
||||||
// if and only if they are equivalent, return true
|
// IFF aka if and only if they are equivalent, return true. If not, false.
|
||||||
// if they are not equivalent, return false
|
func (obj *SvcUID) IFF(uid ResUID) bool {
|
||||||
func (obj *SvcUUID) IFF(uuid ResUUID) bool {
|
res, ok := uid.(*SvcUID)
|
||||||
res, ok := uuid.(*SvcUUID)
|
|
||||||
if !ok {
|
if !ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -348,13 +372,13 @@ func (obj *SvcUUID) IFF(uuid ResUUID) bool {
|
|||||||
|
|
||||||
// SvcResAutoEdges holds the state of the auto edge generator.
|
// SvcResAutoEdges holds the state of the auto edge generator.
|
||||||
type SvcResAutoEdges struct {
|
type SvcResAutoEdges struct {
|
||||||
data []ResUUID
|
data []ResUID
|
||||||
pointer int
|
pointer int
|
||||||
found bool
|
found bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Next returns the next automatic edge.
|
// Next returns the next automatic edge.
|
||||||
func (obj *SvcResAutoEdges) Next() []ResUUID {
|
func (obj *SvcResAutoEdges) Next() []ResUID {
|
||||||
if obj.found {
|
if obj.found {
|
||||||
log.Fatal("Shouldn't be called anymore!")
|
log.Fatal("Shouldn't be called anymore!")
|
||||||
}
|
}
|
||||||
@@ -363,7 +387,7 @@ func (obj *SvcResAutoEdges) Next() []ResUUID {
|
|||||||
}
|
}
|
||||||
value := obj.data[obj.pointer]
|
value := obj.data[obj.pointer]
|
||||||
obj.pointer++
|
obj.pointer++
|
||||||
return []ResUUID{value} // we return one, even though api supports N
|
return []ResUID{value} // we return one, even though api supports N
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test gets results of the earlier Next() call, & returns if we should continue!
|
// Test gets results of the earlier Next() call, & returns if we should continue!
|
||||||
@@ -385,17 +409,17 @@ func (obj *SvcResAutoEdges) Test(input []bool) bool {
|
|||||||
return true // keep going
|
return true // keep going
|
||||||
}
|
}
|
||||||
|
|
||||||
// The AutoEdges method returns the AutoEdges. In this case the systemd units.
|
// AutoEdges returns the AutoEdge interface. In this case the systemd units.
|
||||||
func (obj *SvcRes) AutoEdges() AutoEdge {
|
func (obj *SvcRes) AutoEdges() AutoEdge {
|
||||||
var data []ResUUID
|
var data []ResUID
|
||||||
svcFiles := []string{
|
svcFiles := []string{
|
||||||
fmt.Sprintf("/etc/systemd/system/%s.service", obj.Name), // takes precedence
|
fmt.Sprintf("/etc/systemd/system/%s.service", obj.Name), // takes precedence
|
||||||
fmt.Sprintf("/usr/lib/systemd/system/%s.service", obj.Name), // pkg default
|
fmt.Sprintf("/usr/lib/systemd/system/%s.service", obj.Name), // pkg default
|
||||||
}
|
}
|
||||||
for _, x := range svcFiles {
|
for _, x := range svcFiles {
|
||||||
var reversed = true
|
var reversed = true
|
||||||
data = append(data, &FileUUID{
|
data = append(data, &FileUID{
|
||||||
BaseUUID: BaseUUID{
|
BaseUID: BaseUID{
|
||||||
name: obj.GetName(),
|
name: obj.GetName(),
|
||||||
kind: obj.Kind(),
|
kind: obj.Kind(),
|
||||||
reversed: &reversed,
|
reversed: &reversed,
|
||||||
@@ -410,14 +434,14 @@ func (obj *SvcRes) AutoEdges() AutoEdge {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUUIDs includes all params to make a unique identification of this object.
|
// GetUIDs includes all params to make a unique identification of this object.
|
||||||
// Most resources only return one, although some resources can return multiple.
|
// Most resources only return one, although some resources can return multiple.
|
||||||
func (obj *SvcRes) GetUUIDs() []ResUUID {
|
func (obj *SvcRes) GetUIDs() []ResUID {
|
||||||
x := &SvcUUID{
|
x := &SvcUID{
|
||||||
BaseUUID: BaseUUID{name: obj.GetName(), kind: obj.Kind()},
|
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
||||||
name: obj.Name, // svc name
|
name: obj.Name, // svc name
|
||||||
}
|
}
|
||||||
return []ResUUID{x}
|
return []ResUID{x}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GroupCmp returns whether two resources can be grouped together or not.
|
// GroupCmp returns whether two resources can be grouped together or not.
|
||||||
@@ -15,12 +15,14 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package main
|
package resources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
"log"
|
"log"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/event"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -33,93 +35,106 @@ type TimerRes struct {
|
|||||||
Interval int `yaml:"interval"` // Interval : Interval between runs
|
Interval int `yaml:"interval"` // Interval : Interval between runs
|
||||||
}
|
}
|
||||||
|
|
||||||
// TimerUUID is the UUID struct for TimerRes.
|
// TimerUID is the UID struct for TimerRes.
|
||||||
type TimerUUID struct {
|
type TimerUID struct {
|
||||||
BaseUUID
|
BaseUID
|
||||||
name string
|
name string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTimerRes is a constructor for this resource. It also calls Init() for you.
|
// NewTimerRes is a constructor for this resource. It also calls Init() for you.
|
||||||
func NewTimerRes(name string, interval int) *TimerRes {
|
func NewTimerRes(name string, interval int) (*TimerRes, error) {
|
||||||
obj := &TimerRes{
|
obj := &TimerRes{
|
||||||
BaseRes: BaseRes{
|
BaseRes: BaseRes{
|
||||||
Name: name,
|
Name: name,
|
||||||
},
|
},
|
||||||
Interval: interval,
|
Interval: interval,
|
||||||
}
|
}
|
||||||
obj.Init()
|
return obj, obj.Init()
|
||||||
return obj
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init runs some startup code for this resource.
|
// Init runs some startup code for this resource.
|
||||||
func (obj *TimerRes) Init() {
|
func (obj *TimerRes) Init() error {
|
||||||
obj.BaseRes.kind = "Timer"
|
obj.BaseRes.kind = "Timer"
|
||||||
obj.BaseRes.Init() // call base init, b/c we're overrriding
|
return obj.BaseRes.Init() // call base init, b/c we're overrriding
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate the params that are passed to TimerRes
|
// Validate the params that are passed to TimerRes
|
||||||
// Currently we are getting only an interval in seconds
|
// Currently we are getting only an interval in seconds
|
||||||
// which gets validated by go compiler
|
// which gets validated by go compiler
|
||||||
func (obj *TimerRes) Validate() bool {
|
func (obj *TimerRes) Validate() error {
|
||||||
return true
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Watch is the primary listener for this resource and it outputs events.
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
func (obj *TimerRes) Watch(processChan chan Event) {
|
func (obj *TimerRes) Watch(processChan chan event.Event) error {
|
||||||
if obj.IsWatching() {
|
if obj.IsWatching() {
|
||||||
return
|
return nil
|
||||||
|
}
|
||||||
|
obj.SetWatching(true)
|
||||||
|
defer obj.SetWatching(false)
|
||||||
|
cuid := obj.converger.Register()
|
||||||
|
defer cuid.Unregister()
|
||||||
|
|
||||||
|
var startup bool
|
||||||
|
Startup := func(block bool) <-chan time.Time {
|
||||||
|
if block {
|
||||||
|
return nil // blocks forever
|
||||||
|
//return make(chan time.Time) // blocks forever
|
||||||
|
}
|
||||||
|
return time.After(time.Duration(500) * time.Millisecond) // 1/2 the resolution of converged timeout
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a time.Ticker for the given interval
|
// Create a time.Ticker for the given interval
|
||||||
ticker := time.NewTicker(time.Duration(obj.Interval) * time.Second)
|
ticker := time.NewTicker(time.Duration(obj.Interval) * time.Second)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
|
||||||
obj.SetWatching(true)
|
|
||||||
defer obj.SetWatching(false)
|
|
||||||
cuuid := obj.converger.Register()
|
|
||||||
defer cuuid.Unregister()
|
|
||||||
|
|
||||||
var send = false
|
var send = false
|
||||||
|
|
||||||
for {
|
for {
|
||||||
obj.SetState(resStateWatching)
|
obj.SetState(ResStateWatching)
|
||||||
select {
|
select {
|
||||||
case <-ticker.C: // received the timer event
|
case <-ticker.C: // received the timer event
|
||||||
send = true
|
send = true
|
||||||
log.Printf("%v[%v]: received tick", obj.Kind(), obj.GetName())
|
log.Printf("%v[%v]: received tick", obj.Kind(), obj.GetName())
|
||||||
case event := <-obj.events:
|
|
||||||
cuuid.SetConverged(false)
|
case event := <-obj.Events():
|
||||||
|
cuid.SetConverged(false)
|
||||||
if exit, _ := obj.ReadEvent(&event); exit {
|
if exit, _ := obj.ReadEvent(&event); exit {
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
case <-cuuid.ConvergedTimer():
|
case <-cuid.ConvergedTimer():
|
||||||
cuuid.SetConverged(true)
|
cuid.SetConverged(true)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
case <-Startup(startup):
|
||||||
|
cuid.SetConverged(false)
|
||||||
|
send = true
|
||||||
}
|
}
|
||||||
if send {
|
if send {
|
||||||
|
startup = true // startup finished
|
||||||
send = false
|
send = false
|
||||||
obj.isStateOK = false
|
obj.isStateOK = false
|
||||||
resp := NewResp()
|
if exit, err := obj.DoSend(processChan, "timer ticked"); exit || err != nil {
|
||||||
processChan <- Event{eventNil, resp, "timer ticked", true}
|
return err // we exit or bubble up a NACK...
|
||||||
resp.ACKWait()
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUUIDs includes all params to make a unique identification of this object.
|
// GetUIDs includes all params to make a unique identification of this object.
|
||||||
// Most resources only return one, although some resources can return multiple.
|
// Most resources only return one, although some resources can return multiple.
|
||||||
func (obj *TimerRes) GetUUIDs() []ResUUID {
|
func (obj *TimerRes) GetUIDs() []ResUID {
|
||||||
x := &TimerUUID{
|
x := &TimerUID{
|
||||||
BaseUUID: BaseUUID{
|
BaseUID: BaseUID{
|
||||||
name: obj.GetName(),
|
name: obj.GetName(),
|
||||||
kind: obj.Kind(),
|
kind: obj.Kind(),
|
||||||
},
|
},
|
||||||
name: obj.Name,
|
name: obj.Name,
|
||||||
}
|
}
|
||||||
return []ResUUID{x}
|
return []ResUID{x}
|
||||||
}
|
}
|
||||||
|
|
||||||
// The AutoEdges method returns the AutoEdges. In this case none are used.
|
// AutoEdges returns the AutoEdge interface. In this case no autoedges are used.
|
||||||
func (obj *TimerRes) AutoEdges() AutoEdge {
|
func (obj *TimerRes) AutoEdges() AutoEdge {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
729
resources/virt.go
Normal file
@@ -0,0 +1,729 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package resources
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/gob"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"math/rand"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/event"
|
||||||
|
"github.com/purpleidea/mgmt/global"
|
||||||
|
|
||||||
|
errwrap "github.com/pkg/errors"
|
||||||
|
"github.com/rgbkrk/libvirt-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
gob.Register(&VirtRes{})
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
libvirtInitialized = false
|
||||||
|
)
|
||||||
|
|
||||||
|
// VirtRes is a libvirt resource. A transient virt resource, which has its state
|
||||||
|
// set to `shutoff` is one which does not exist. The parallel equivalent is a
|
||||||
|
// file resource which removes a particular path.
|
||||||
|
type VirtRes struct {
|
||||||
|
BaseRes `yaml:",inline"`
|
||||||
|
URI string `yaml:"uri"` // connection uri, eg: qemu:///session
|
||||||
|
State string `yaml:"state"` // running, paused, shutoff
|
||||||
|
Transient bool `yaml:"transient"` // defined (false) or undefined (true)
|
||||||
|
CPUs uint16 `yaml:"cpus"`
|
||||||
|
Memory uint64 `yaml:"memory"` // in KBytes
|
||||||
|
Boot []string `yaml:"boot"` // boot order. values: fd, hd, cdrom, network
|
||||||
|
Disk []diskDevice `yaml:"disk"`
|
||||||
|
CDRom []cdRomDevice `yaml:"cdrom"`
|
||||||
|
Network []networkDevice `yaml:"network"`
|
||||||
|
Filesystem []filesystemDevice `yaml:"filesystem"`
|
||||||
|
|
||||||
|
conn libvirt.VirConnection
|
||||||
|
absent bool // cached state
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewVirtRes is a constructor for this resource. It also calls Init() for you.
|
||||||
|
func NewVirtRes(name string, uri, state string, transient bool, cpus uint16, memory uint64) (*VirtRes, error) {
|
||||||
|
obj := &VirtRes{
|
||||||
|
BaseRes: BaseRes{
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
URI: uri,
|
||||||
|
State: state,
|
||||||
|
Transient: transient,
|
||||||
|
CPUs: cpus,
|
||||||
|
Memory: memory,
|
||||||
|
}
|
||||||
|
return obj, obj.Init()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init runs some startup code for this resource.
|
||||||
|
func (obj *VirtRes) Init() error {
|
||||||
|
if !libvirtInitialized {
|
||||||
|
if err := libvirt.EventRegisterDefaultImpl(); err != nil {
|
||||||
|
return errwrap.Wrapf(err, "EventRegisterDefaultImpl failed")
|
||||||
|
}
|
||||||
|
libvirtInitialized = true
|
||||||
|
}
|
||||||
|
|
||||||
|
obj.absent = (obj.Transient && obj.State == "shutoff") // machine shouldn't exist
|
||||||
|
|
||||||
|
obj.BaseRes.kind = "Virt"
|
||||||
|
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate if the params passed in are valid data.
|
||||||
|
func (obj *VirtRes) Validate() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
|
func (obj *VirtRes) Watch(processChan chan event.Event) error {
|
||||||
|
if obj.IsWatching() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
obj.SetWatching(true)
|
||||||
|
defer obj.SetWatching(false)
|
||||||
|
cuid := obj.converger.Register()
|
||||||
|
defer cuid.Unregister()
|
||||||
|
|
||||||
|
var startup bool
|
||||||
|
Startup := func(block bool) <-chan time.Time {
|
||||||
|
if block {
|
||||||
|
return nil // blocks forever
|
||||||
|
//return make(chan time.Time) // blocks forever
|
||||||
|
}
|
||||||
|
return time.After(time.Duration(500) * time.Millisecond) // 1/2 the resolution of converged timeout
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := libvirt.NewVirConnection(obj.URI)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Connection to libvirt failed with: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
eventChan := make(chan int) // TODO: do we need to buffer this?
|
||||||
|
errorChan := make(chan error)
|
||||||
|
exitChan := make(chan struct{})
|
||||||
|
defer close(exitChan)
|
||||||
|
|
||||||
|
// run libvirt event loop
|
||||||
|
// TODO: *trigger* EventRunDefaultImpl to unblock so it can shut down...
|
||||||
|
// at the moment this isn't a major issue because it seems to unblock in
|
||||||
|
// bursts every 5 seconds! we can do this by writing to an event handler
|
||||||
|
// in the meantime, terminating the program causes it to exit anyways...
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
// TODO: can we merge this into our main for loop below?
|
||||||
|
select {
|
||||||
|
case <-exitChan:
|
||||||
|
log.Printf("EventRunDefaultImpl exited!")
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
//log.Printf("EventRunDefaultImpl started!")
|
||||||
|
if err := libvirt.EventRunDefaultImpl(); err != nil {
|
||||||
|
errorChan <- errwrap.Wrapf(err, "EventRunDefaultImpl failed")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
//log.Printf("EventRunDefaultImpl looped!")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
callback := libvirt.DomainEventCallback(
|
||||||
|
func(c *libvirt.VirConnection, d *libvirt.VirDomain, eventDetails interface{}, f func()) int {
|
||||||
|
if lifecycleEvent, ok := eventDetails.(libvirt.DomainLifecycleEvent); ok {
|
||||||
|
domName, _ := d.GetName()
|
||||||
|
if domName == obj.GetName() {
|
||||||
|
eventChan <- lifecycleEvent.Event
|
||||||
|
}
|
||||||
|
} else if global.DEBUG {
|
||||||
|
log.Printf("%s[%s]: Event details isn't DomainLifecycleEvent", obj.Kind(), obj.GetName())
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
},
|
||||||
|
)
|
||||||
|
callbackID := conn.DomainEventRegister(
|
||||||
|
libvirt.VirDomain{},
|
||||||
|
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
|
||||||
|
&callback,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
defer conn.DomainEventDeregister(callbackID)
|
||||||
|
|
||||||
|
var send = false
|
||||||
|
var exit = false
|
||||||
|
var dirty = false
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case event := <-eventChan:
|
||||||
|
// TODO: shouldn't we do these checks in CheckApply ?
|
||||||
|
switch event {
|
||||||
|
case libvirt.VIR_DOMAIN_EVENT_DEFINED:
|
||||||
|
if obj.Transient {
|
||||||
|
dirty = true
|
||||||
|
send = true
|
||||||
|
}
|
||||||
|
case libvirt.VIR_DOMAIN_EVENT_UNDEFINED:
|
||||||
|
if !obj.Transient {
|
||||||
|
dirty = true
|
||||||
|
send = true
|
||||||
|
}
|
||||||
|
case libvirt.VIR_DOMAIN_EVENT_STARTED:
|
||||||
|
fallthrough
|
||||||
|
case libvirt.VIR_DOMAIN_EVENT_RESUMED:
|
||||||
|
if obj.State != "running" {
|
||||||
|
dirty = true
|
||||||
|
send = true
|
||||||
|
}
|
||||||
|
case libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
|
||||||
|
if obj.State != "paused" {
|
||||||
|
dirty = true
|
||||||
|
send = true
|
||||||
|
}
|
||||||
|
case libvirt.VIR_DOMAIN_EVENT_STOPPED:
|
||||||
|
fallthrough
|
||||||
|
case libvirt.VIR_DOMAIN_EVENT_SHUTDOWN:
|
||||||
|
if obj.State != "shutoff" {
|
||||||
|
dirty = true
|
||||||
|
send = true
|
||||||
|
}
|
||||||
|
case libvirt.VIR_DOMAIN_EVENT_PMSUSPENDED:
|
||||||
|
fallthrough
|
||||||
|
case libvirt.VIR_DOMAIN_EVENT_CRASHED:
|
||||||
|
dirty = true
|
||||||
|
send = true
|
||||||
|
}
|
||||||
|
|
||||||
|
case err := <-errorChan:
|
||||||
|
cuid.SetConverged(false)
|
||||||
|
return fmt.Errorf("Unknown %s[%s] libvirt error: %s", obj.Kind(), obj.GetName(), err)
|
||||||
|
|
||||||
|
case event := <-obj.Events():
|
||||||
|
cuid.SetConverged(false)
|
||||||
|
if exit, send = obj.ReadEvent(&event); exit {
|
||||||
|
return nil // exit
|
||||||
|
}
|
||||||
|
|
||||||
|
case <-cuid.ConvergedTimer():
|
||||||
|
cuid.SetConverged(true) // converged!
|
||||||
|
continue
|
||||||
|
|
||||||
|
case <-Startup(startup):
|
||||||
|
cuid.SetConverged(false)
|
||||||
|
send = true
|
||||||
|
dirty = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if send {
|
||||||
|
startup = true // startup finished
|
||||||
|
send = false
|
||||||
|
// only invalid state on certain types of events
|
||||||
|
if dirty {
|
||||||
|
dirty = false
|
||||||
|
obj.isStateOK = false // something made state dirty
|
||||||
|
}
|
||||||
|
if exit, err := obj.DoSend(processChan, ""); exit || err != nil {
|
||||||
|
return err // we exit or bubble up a NACK...
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// attrCheckApply performs the CheckApply functions for CPU, Memory and others.
|
||||||
|
// This shouldn't be called when the machine is absent; it won't be found!
|
||||||
|
func (obj *VirtRes) attrCheckApply(apply bool) (bool, error) {
|
||||||
|
var checkOK = true
|
||||||
|
|
||||||
|
dom, err := obj.conn.LookupDomainByName(obj.GetName())
|
||||||
|
if err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "conn.LookupDomainByName failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
domInfo, err := dom.GetInfo()
|
||||||
|
if err != nil {
|
||||||
|
// we don't know if the state is ok
|
||||||
|
return false, errwrap.Wrapf(err, "domain.GetInfo failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
// check memory
|
||||||
|
if domInfo.GetMemory() != obj.Memory {
|
||||||
|
checkOK = false
|
||||||
|
if !apply {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if err := dom.SetMemory(obj.Memory); err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "domain.SetMemory failed")
|
||||||
|
}
|
||||||
|
log.Printf("%s[%s]: Memory changed", obj.Kind(), obj.GetName())
|
||||||
|
}
|
||||||
|
|
||||||
|
// check cpus
|
||||||
|
if domInfo.GetNrVirtCpu() != obj.CPUs {
|
||||||
|
checkOK = false
|
||||||
|
if !apply {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if err := dom.SetVcpus(obj.CPUs); err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "domain.SetVcpus failed")
|
||||||
|
}
|
||||||
|
log.Printf("%s[%s]: CPUs changed", obj.Kind(), obj.GetName())
|
||||||
|
}
|
||||||
|
|
||||||
|
return checkOK, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// domainCreate creates a transient or persistent domain in the correct state. It
|
||||||
|
// doesn't check the state before hand, as it is a simple helper function.
|
||||||
|
func (obj *VirtRes) domainCreate() (libvirt.VirDomain, bool, error) {
|
||||||
|
|
||||||
|
if obj.Transient {
|
||||||
|
var flag uint32
|
||||||
|
var state string
|
||||||
|
switch obj.State {
|
||||||
|
case "running":
|
||||||
|
flag = libvirt.VIR_DOMAIN_NONE
|
||||||
|
state = "started"
|
||||||
|
case "paused":
|
||||||
|
flag = libvirt.VIR_DOMAIN_START_PAUSED
|
||||||
|
state = "paused"
|
||||||
|
case "shutoff":
|
||||||
|
// a transient, shutoff machine, means machine is absent
|
||||||
|
return libvirt.VirDomain{}, true, nil // returned dom is invalid
|
||||||
|
}
|
||||||
|
dom, err := obj.conn.DomainCreateXML(obj.getDomainXML(), flag)
|
||||||
|
if err != nil {
|
||||||
|
return dom, false, err // returned dom is invalid
|
||||||
|
}
|
||||||
|
log.Printf("%s[%s]: Domain transient %s", state, obj.Kind(), obj.GetName())
|
||||||
|
return dom, false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dom, err := obj.conn.DomainDefineXML(obj.getDomainXML())
|
||||||
|
if err != nil {
|
||||||
|
return dom, false, err // returned dom is invalid
|
||||||
|
}
|
||||||
|
log.Printf("%s[%s]: Domain defined", obj.Kind(), obj.GetName())
|
||||||
|
|
||||||
|
if obj.State == "running" {
|
||||||
|
if err := dom.Create(); err != nil {
|
||||||
|
return dom, false, err
|
||||||
|
}
|
||||||
|
log.Printf("%s[%s]: Domain started", obj.Kind(), obj.GetName())
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.State == "paused" {
|
||||||
|
if err := dom.CreateWithFlags(libvirt.VIR_DOMAIN_START_PAUSED); err != nil {
|
||||||
|
return dom, false, err
|
||||||
|
}
|
||||||
|
log.Printf("%s[%s]: Domain created paused", obj.Kind(), obj.GetName())
|
||||||
|
}
|
||||||
|
|
||||||
|
return dom, false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckApply checks the resource state and applies the resource if the bool
|
||||||
|
// input is true. It returns error info and if the state check passed or not.
|
||||||
|
func (obj *VirtRes) CheckApply(apply bool) (bool, error) {
|
||||||
|
log.Printf("%s[%s]: CheckApply(%t)", obj.Kind(), obj.GetName(), apply)
|
||||||
|
|
||||||
|
if obj.isStateOK { // cache the state
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
obj.conn, err = libvirt.NewVirConnection(obj.URI)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("Connection to libvirt failed with: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var checkOK = true
|
||||||
|
|
||||||
|
dom, err := obj.conn.LookupDomainByName(obj.GetName())
|
||||||
|
if err == nil {
|
||||||
|
// pass
|
||||||
|
} else if virErr, ok := err.(libvirt.VirError); ok && virErr.Domain == libvirt.VIR_FROM_QEMU && virErr.Code == libvirt.VIR_ERR_NO_DOMAIN {
|
||||||
|
// domain not found
|
||||||
|
if obj.absent {
|
||||||
|
obj.isStateOK = true
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !apply {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var c = true
|
||||||
|
dom, c, err = obj.domainCreate() // create the domain
|
||||||
|
if err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "domainCreate failed")
|
||||||
|
} else if !c {
|
||||||
|
checkOK = false
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
return false, errwrap.Wrapf(err, "LookupDomainByName failed")
|
||||||
|
}
|
||||||
|
defer dom.Free()
|
||||||
|
// domain exists
|
||||||
|
|
||||||
|
domInfo, err := dom.GetInfo()
|
||||||
|
if err != nil {
|
||||||
|
// we don't know if the state is ok
|
||||||
|
return false, errwrap.Wrapf(err, "domain.GetInfo failed")
|
||||||
|
}
|
||||||
|
isPersistent, err := dom.IsPersistent()
|
||||||
|
if err != nil {
|
||||||
|
// we don't know if the state is ok
|
||||||
|
return false, errwrap.Wrapf(err, "domain.IsPersistent failed")
|
||||||
|
}
|
||||||
|
isActive, err := dom.IsActive()
|
||||||
|
if err != nil {
|
||||||
|
// we don't know if the state is ok
|
||||||
|
return false, errwrap.Wrapf(err, "domain.IsActive failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
// check for persistence
|
||||||
|
if isPersistent == obj.Transient { // if they're different!
|
||||||
|
if !apply {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if isPersistent {
|
||||||
|
if err := dom.Undefine(); err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "domain.Undefine failed")
|
||||||
|
}
|
||||||
|
log.Printf("%s[%s]: Domain undefined", obj.Kind(), obj.GetName())
|
||||||
|
} else {
|
||||||
|
domXML, err := dom.GetXMLDesc(libvirt.VIR_DOMAIN_XML_INACTIVE)
|
||||||
|
if err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "domain.GetXMLDesc failed")
|
||||||
|
}
|
||||||
|
if _, err = obj.conn.DomainDefineXML(domXML); err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "conn.DomainDefineXML failed")
|
||||||
|
}
|
||||||
|
log.Printf("%s[%s]: Domain defined", obj.Kind(), obj.GetName())
|
||||||
|
}
|
||||||
|
checkOK = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// check for valid state
|
||||||
|
domState := domInfo.GetState()
|
||||||
|
switch obj.State {
|
||||||
|
case "running":
|
||||||
|
if domState == libvirt.VIR_DOMAIN_RUNNING {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if domState == libvirt.VIR_DOMAIN_BLOCKED {
|
||||||
|
// TODO: what should happen?
|
||||||
|
return false, fmt.Errorf("Domain %s is blocked!", obj.GetName())
|
||||||
|
}
|
||||||
|
if !apply {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if isActive { // domain must be paused ?
|
||||||
|
if err := dom.Resume(); err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "domain.Resume failed")
|
||||||
|
}
|
||||||
|
checkOK = false
|
||||||
|
log.Printf("%s[%s]: Domain resumed", obj.Kind(), obj.GetName())
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err := dom.Create(); err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "domain.Create failed")
|
||||||
|
}
|
||||||
|
checkOK = false
|
||||||
|
log.Printf("%s[%s]: Domain created", obj.Kind(), obj.GetName())
|
||||||
|
|
||||||
|
case "paused":
|
||||||
|
if domState == libvirt.VIR_DOMAIN_PAUSED {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if !apply {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if isActive { // domain must be running ?
|
||||||
|
if err := dom.Suspend(); err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "domain.Suspend failed")
|
||||||
|
}
|
||||||
|
checkOK = false
|
||||||
|
log.Printf("%s[%s]: Domain paused", obj.Kind(), obj.GetName())
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err := dom.CreateWithFlags(libvirt.VIR_DOMAIN_START_PAUSED); err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "domain.CreateWithFlags failed")
|
||||||
|
}
|
||||||
|
checkOK = false
|
||||||
|
log.Printf("%s[%s]: Domain created paused", obj.Kind(), obj.GetName())
|
||||||
|
|
||||||
|
case "shutoff":
|
||||||
|
if domState == libvirt.VIR_DOMAIN_SHUTOFF || domState == libvirt.VIR_DOMAIN_SHUTDOWN {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if !apply {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := dom.Destroy(); err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "domain.Destroy failed")
|
||||||
|
}
|
||||||
|
checkOK = false
|
||||||
|
log.Printf("%s[%s]: Domain destroyed", obj.Kind(), obj.GetName())
|
||||||
|
}
|
||||||
|
|
||||||
|
if !apply {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
// remaining apply portion
|
||||||
|
|
||||||
|
// mem & cpu checks...
|
||||||
|
if !obj.absent {
|
||||||
|
if c, err := obj.attrCheckApply(apply); err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "attrCheckApply failed")
|
||||||
|
} else if !c {
|
||||||
|
checkOK = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if apply || checkOK {
|
||||||
|
obj.isStateOK = true
|
||||||
|
}
|
||||||
|
return checkOK, nil // w00t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obj *VirtRes) getDomainXML() string {
|
||||||
|
var b string
|
||||||
|
b += "<domain type='kvm'>" // start domain
|
||||||
|
|
||||||
|
b += fmt.Sprintf("<name>%s</name>", obj.GetName())
|
||||||
|
b += fmt.Sprintf("<memory unit='KiB'>%d</memory>", obj.Memory)
|
||||||
|
b += fmt.Sprintf("<vcpu>%d</vcpu>", obj.CPUs)
|
||||||
|
|
||||||
|
b += "<os>"
|
||||||
|
b += "<type>hvm</type>"
|
||||||
|
if obj.Boot != nil {
|
||||||
|
for _, boot := range obj.Boot {
|
||||||
|
b += fmt.Sprintf("<boot dev='%s'/>", boot)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b += fmt.Sprintf("</os>")
|
||||||
|
|
||||||
|
b += fmt.Sprintf("<devices>") // start devices
|
||||||
|
|
||||||
|
if obj.Disk != nil {
|
||||||
|
for i, disk := range obj.Disk {
|
||||||
|
b += fmt.Sprintf(disk.GetXML(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.CDRom != nil {
|
||||||
|
for i, cdrom := range obj.CDRom {
|
||||||
|
b += fmt.Sprintf(cdrom.GetXML(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.Network != nil {
|
||||||
|
for i, net := range obj.Network {
|
||||||
|
b += fmt.Sprintf(net.GetXML(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.Filesystem != nil {
|
||||||
|
for i, fs := range obj.Filesystem {
|
||||||
|
b += fmt.Sprintf(fs.GetXML(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b += "<serial type='pty'><target port='0'/></serial>"
|
||||||
|
b += "<console type='pty'><target type='serial' port='0'/></console>"
|
||||||
|
b += "</devices>" // end devices
|
||||||
|
b += "</domain>" // end domain
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
type virtDevice interface {
|
||||||
|
GetXML(idx int) string
|
||||||
|
}
|
||||||
|
|
||||||
|
type diskDevice struct {
|
||||||
|
Source string `yaml:"source"`
|
||||||
|
Type string `yaml:"type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type cdRomDevice struct {
|
||||||
|
Source string `yaml:"source"`
|
||||||
|
Type string `yaml:"type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type networkDevice struct {
|
||||||
|
Name string `yaml:"name"`
|
||||||
|
MAC string `yaml:"mac"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type filesystemDevice struct {
|
||||||
|
Access string `yaml:"access"`
|
||||||
|
Source string `yaml:"source"`
|
||||||
|
Target string `yaml:"target"`
|
||||||
|
ReadOnly bool `yaml:"read_only"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *diskDevice) GetXML(idx int) string {
|
||||||
|
var b string
|
||||||
|
b += "<disk type='file' device='disk'>"
|
||||||
|
b += fmt.Sprintf("<driver name='qemu' type='%s'/>", d.Type)
|
||||||
|
b += fmt.Sprintf("<source file='%s'/>", d.Source)
|
||||||
|
b += fmt.Sprintf("<target dev='vd%s' bus='virtio'/>", (string)(idx+97)) // TODO: 26, 27... should be 'aa', 'ab'...
|
||||||
|
b += "</disk>"
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *cdRomDevice) GetXML(idx int) string {
|
||||||
|
var b string
|
||||||
|
b += "<disk type='file' device='cdrom'>"
|
||||||
|
b += fmt.Sprintf("<driver name='qemu' type='%s'/>", d.Type)
|
||||||
|
b += fmt.Sprintf("<source file='%s'/>", d.Source)
|
||||||
|
b += fmt.Sprintf("<target dev='hd%s' bus='ide'/>", (string)(idx+97)) // TODO: 26, 27... should be 'aa', 'ab'...
|
||||||
|
b += "<readonly/>"
|
||||||
|
b += "</disk>"
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *networkDevice) GetXML(idx int) string {
|
||||||
|
if d.MAC == "" {
|
||||||
|
d.MAC = randMAC()
|
||||||
|
}
|
||||||
|
var b string
|
||||||
|
b += "<interface type='network'>"
|
||||||
|
b += fmt.Sprintf("<mac address='%s'/>", d.MAC)
|
||||||
|
b += fmt.Sprintf("<source network='%s'/>", d.Name)
|
||||||
|
b += "</interface>"
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *filesystemDevice) GetXML(idx int) string {
|
||||||
|
var b string
|
||||||
|
b += "<filesystem" // open
|
||||||
|
if d.Access != "" {
|
||||||
|
b += fmt.Sprintf(" accessmode='%s'", d.Access)
|
||||||
|
}
|
||||||
|
b += ">" // close
|
||||||
|
b += fmt.Sprintf("<source dir='%s'/>", d.Source)
|
||||||
|
b += fmt.Sprintf("<target dir='%s'/>", d.Target)
|
||||||
|
if d.ReadOnly {
|
||||||
|
b += "<readonly/>"
|
||||||
|
}
|
||||||
|
b += "</filesystem>"
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// VirtUID is the UID struct for FileRes.
|
||||||
|
type VirtUID struct {
|
||||||
|
BaseUID
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUIDs includes all params to make a unique identification of this object.
|
||||||
|
// Most resources only return one, although some resources can return multiple.
|
||||||
|
func (obj *VirtRes) GetUIDs() []ResUID {
|
||||||
|
x := &VirtUID{
|
||||||
|
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
||||||
|
// TODO: add more properties here so we can link to vm dependencies
|
||||||
|
}
|
||||||
|
return []ResUID{x}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupCmp returns whether two resources can be grouped together or not.
|
||||||
|
func (obj *VirtRes) GroupCmp(r Res) bool {
|
||||||
|
_, ok := r.(*VirtRes)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return false // not possible atm
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutoEdges returns the AutoEdge interface. In this case no autoedges are used.
|
||||||
|
func (obj *VirtRes) AutoEdges() AutoEdge {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare two resources and return if they are equivalent.
|
||||||
|
func (obj *VirtRes) Compare(res Res) bool {
|
||||||
|
switch res.(type) {
|
||||||
|
case *VirtRes:
|
||||||
|
res := res.(*VirtRes)
|
||||||
|
if !obj.BaseRes.Compare(res) { // call base Compare
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.Name != res.Name {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.URI != res.URI {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.State != res.State {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.Transient != res.Transient {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.CPUs != res.CPUs {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// TODO: can we skip the compare of certain properties such as
|
||||||
|
// Memory because this object (but with different memory) can be
|
||||||
|
// *converted* into the new version that has more/less memory?
|
||||||
|
// We would need to run some sort of "old struct update", to get
|
||||||
|
// the new values, but that's easy to add.
|
||||||
|
if obj.Memory != res.Memory {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// TODO:
|
||||||
|
//if obj.Boot != res.Boot {
|
||||||
|
// return false
|
||||||
|
//}
|
||||||
|
//if obj.Disk != res.Disk {
|
||||||
|
// return false
|
||||||
|
//}
|
||||||
|
//if obj.CDRom != res.CDRom {
|
||||||
|
// return false
|
||||||
|
//}
|
||||||
|
//if obj.Network != res.Network {
|
||||||
|
// return false
|
||||||
|
//}
|
||||||
|
//if obj.Filesystem != res.Filesystem {
|
||||||
|
// return false
|
||||||
|
//}
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// CollectPattern applies the pattern for collection resources.
|
||||||
|
func (obj *VirtRes) CollectPattern(string) {
|
||||||
|
}
|
||||||
|
|
||||||
|
// randMAC returns a random mac address in the libvirt range.
|
||||||
|
func randMAC() string {
|
||||||
|
rand.Seed(time.Now().UnixNano())
|
||||||
|
return "52:54:00" +
|
||||||
|
fmt.Sprintf(":%x", rand.Intn(255)) +
|
||||||
|
fmt.Sprintf(":%x", rand.Intn(255)) +
|
||||||
|
fmt.Sprintf(":%x", rand.Intn(255))
|
||||||
|
}
|
||||||
7
spec.in
@@ -12,11 +12,14 @@ Source0: https://dl.fedoraproject.org/pub/alt/purpleidea/__PROGRAM__/SOURCES/__P
|
|||||||
# graphviz should really be a "suggests", since technically it's optional
|
# graphviz should really be a "suggests", since technically it's optional
|
||||||
Requires: graphviz
|
Requires: graphviz
|
||||||
|
|
||||||
BuildRequires: golang
|
# If go_compiler is not set to 1, there is no virtual provide. Use golang instead.
|
||||||
|
BuildRequires: %{?go_compiler:compiler(go-compiler)}%{!?go_compiler:golang}
|
||||||
BuildRequires: golang-googlecode-tools-stringer
|
BuildRequires: golang-googlecode-tools-stringer
|
||||||
BuildRequires: git-core
|
BuildRequires: git-core
|
||||||
BuildRequires: mercurial
|
BuildRequires: mercurial
|
||||||
|
|
||||||
|
ExclusiveArch: %{go_arches}
|
||||||
|
|
||||||
%description
|
%description
|
||||||
A next generation config management prototype!
|
A next generation config management prototype!
|
||||||
|
|
||||||
@@ -30,7 +33,7 @@ export GOPATH=`pwd`/vendor/
|
|||||||
go get github.com/coreos/etcd/client
|
go get github.com/coreos/etcd/client
|
||||||
go get gopkg.in/yaml.v2
|
go get gopkg.in/yaml.v2
|
||||||
go get gopkg.in/fsnotify.v1
|
go get gopkg.in/fsnotify.v1
|
||||||
go get github.com/codegangsta/cli
|
go get github.com/urfave/cli
|
||||||
go get github.com/coreos/go-systemd/dbus
|
go get github.com/coreos/go-systemd/dbus
|
||||||
go get github.com/coreos/go-systemd/util
|
go get github.com/coreos/go-systemd/util
|
||||||
make build
|
make build
|
||||||
|
|||||||
@@ -32,7 +32,7 @@
|
|||||||
- iptables -F
|
- iptables -F
|
||||||
- cd /vagrant/mgmt/ && make path
|
- cd /vagrant/mgmt/ && make path
|
||||||
- cd /vagrant/mgmt/ && make deps && make build && cp mgmt ~/bin/
|
- cd /vagrant/mgmt/ && make deps && make build && cp mgmt ~/bin/
|
||||||
- cd && mgmt run --file /vagrant/mgmt/examples/pkg1.yaml --converged-timeout=5
|
- cd && mgmt run --yaml /vagrant/mgmt/examples/pkg1.yaml --converged-timeout=5
|
||||||
:namespace: omv
|
:namespace: omv
|
||||||
:count: 0
|
:count: 0
|
||||||
:username: ''
|
:username: ''
|
||||||
|
|||||||
@@ -33,7 +33,7 @@
|
|||||||
- iptables -F
|
- iptables -F
|
||||||
- cd /vagrant/mgmt/ && make path
|
- cd /vagrant/mgmt/ && make path
|
||||||
- cd /vagrant/mgmt/ && make deps && make build && cp mgmt ~/bin/
|
- cd /vagrant/mgmt/ && make deps && make build && cp mgmt ~/bin/
|
||||||
- cd && mgmt run --file /vagrant/mgmt/examples/pkg1.yaml --converged-timeout=5
|
- cd && mgmt run --yaml /vagrant/mgmt/examples/pkg1.yaml --converged-timeout=5
|
||||||
:namespace: omv
|
:namespace: omv
|
||||||
:count: 0
|
:count: 0
|
||||||
:username: ''
|
:username: ''
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ if env | grep -q -e '^TRAVIS=true$'; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# run till completion
|
# run till completion
|
||||||
timeout --kill-after=15s 10s ./mgmt run --file t2.yaml --converged-timeout=5 --no-watch --tmp-prefix &
|
timeout --kill-after=15s 10s ./mgmt run --yaml t2.yaml --converged-timeout=5 --no-watch --tmp-prefix &
|
||||||
pid=$!
|
pid=$!
|
||||||
wait $pid # get exit status
|
wait $pid # get exit status
|
||||||
e=$?
|
e=$?
|
||||||
|
|||||||
@@ -10,11 +10,11 @@ fi
|
|||||||
mkdir -p "${MGMT_TMPDIR}"mgmt{A..C}
|
mkdir -p "${MGMT_TMPDIR}"mgmt{A..C}
|
||||||
|
|
||||||
# run till completion
|
# run till completion
|
||||||
timeout --kill-after=15s 10s ./mgmt run --file t3-a.yaml --converged-timeout=5 --no-watch --tmp-prefix &
|
timeout --kill-after=15s 10s ./mgmt run --yaml t3-a.yaml --converged-timeout=5 --no-watch --tmp-prefix &
|
||||||
pid1=$!
|
pid1=$!
|
||||||
timeout --kill-after=15s 10s ./mgmt run --file t3-b.yaml --converged-timeout=5 --no-watch --tmp-prefix &
|
timeout --kill-after=15s 10s ./mgmt run --yaml t3-b.yaml --converged-timeout=5 --no-watch --tmp-prefix &
|
||||||
pid2=$!
|
pid2=$!
|
||||||
timeout --kill-after=15s 10s ./mgmt run --file t3-c.yaml --converged-timeout=5 --no-watch --tmp-prefix &
|
timeout --kill-after=15s 10s ./mgmt run --yaml t3-c.yaml --converged-timeout=5 --no-watch --tmp-prefix &
|
||||||
pid3=$!
|
pid3=$!
|
||||||
|
|
||||||
wait $pid1 # get exit status
|
wait $pid1 # get exit status
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
|
|
||||||
# should take slightly more than 25s, but fail if we take 35s)
|
# should take slightly more than 25s, but fail if we take 35s)
|
||||||
timeout --kill-after=35s 30s ./mgmt run --file t4.yaml --converged-timeout=5 --no-watch --tmp-prefix &
|
timeout --kill-after=35s 30s ./mgmt run --yaml t4.yaml --converged-timeout=5 --no-watch --tmp-prefix &
|
||||||
pid=$!
|
pid=$!
|
||||||
wait $pid # get exit status
|
wait $pid # get exit status
|
||||||
exit $?
|
exit $?
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
|
|
||||||
# should take slightly more than 35s, but fail if we take 45s)
|
# should take slightly more than 35s, but fail if we take 45s)
|
||||||
timeout --kill-after=45s 40s ./mgmt run --file t5.yaml --converged-timeout=5 --no-watch --tmp-prefix &
|
timeout --kill-after=45s 40s ./mgmt run --yaml t5.yaml --converged-timeout=5 --no-watch --tmp-prefix &
|
||||||
pid=$!
|
pid=$!
|
||||||
wait $pid # get exit status
|
wait $pid # get exit status
|
||||||
exit $?
|
exit $?
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ if env | grep -q -e '^TRAVIS=true$'; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# run till completion
|
# run till completion
|
||||||
timeout --kill-after=20s 15s ./mgmt run --file t6.yaml --no-watch --tmp-prefix &
|
timeout --kill-after=20s 15s ./mgmt run --yaml t6.yaml --no-watch --tmp-prefix &
|
||||||
pid=$!
|
pid=$!
|
||||||
sleep 1s # let it converge
|
sleep 1s # let it converge
|
||||||
test -e /tmp/mgmt/f1
|
test -e /tmp/mgmt/f1
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
exit 0 # XXX: test temporarily disabled till etcd or mgmt regression is fixed.
|
||||||
|
|
||||||
# run empty graphs, we're just testing etcd clustering
|
# run empty graphs, we're just testing etcd clustering
|
||||||
timeout --kill-after=180s 120s ./mgmt run --hostname h1 --tmp-prefix &
|
timeout --kill-after=180s 120s ./mgmt run --hostname h1 --tmp-prefix &
|
||||||
pid1=$!
|
pid1=$!
|
||||||
|
|||||||
@@ -20,9 +20,10 @@ if [ "$COMMITS" != "" ] && [ "$COMMITS" -gt "1" ]; then
|
|||||||
HACK="yes"
|
HACK="yes"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
LINT=`golint` # current golint output
|
LINT=`find . -maxdepth 3 -iname '*.go' -not -path './old/*' -not -path './tmp/*' -exec golint {} \;` # current golint output
|
||||||
COUNT=`echo -e "$LINT" | wc -l` # number of golint problems in current branch
|
COUNT=`echo -e "$LINT" | wc -l` # number of golint problems in current branch
|
||||||
[ "$LINT" = "" ] && echo PASS && exit # everything is "perfect"
|
[ "$LINT" = "" ] && echo PASS && exit # everything is "perfect"
|
||||||
|
echo "$LINT" # display the issues
|
||||||
|
|
||||||
T=`mktemp --tmpdir -d tmp.XXX`
|
T=`mktemp --tmpdir -d tmp.XXX`
|
||||||
[ "$T" = "" ] && exit 1
|
[ "$T" = "" ] && exit 1
|
||||||
@@ -46,7 +47,7 @@ while read -r line; do
|
|||||||
done <<< "$NUMSTAT1" # three < is the secret to putting a variable into read
|
done <<< "$NUMSTAT1" # three < is the secret to putting a variable into read
|
||||||
|
|
||||||
git checkout "$PREVIOUS" &>/dev/null # previous commit
|
git checkout "$PREVIOUS" &>/dev/null # previous commit
|
||||||
LINT1=`golint`
|
LINT1=`find . -maxdepth 3 -iname '*.go' -not -path './old/*' -not -path './tmp/*' -exec golint {} \;`
|
||||||
COUNT1=`echo -e "$LINT1" | wc -l` # number of golint problems in older branch
|
COUNT1=`echo -e "$LINT1" | wc -l` # number of golint problems in older branch
|
||||||
|
|
||||||
# clean up
|
# clean up
|
||||||
|
|||||||
@@ -4,6 +4,9 @@ echo running test-govet.sh
|
|||||||
ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )" # dir!
|
ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )" # dir!
|
||||||
cd "${ROOT}"
|
cd "${ROOT}"
|
||||||
|
|
||||||
go vet && echo PASS || exit 1 # since it doesn't output an ok message on pass
|
for file in `find . -maxdepth 3 -type f -name '*.go' -not -path './old/*' -not -path './tmp/*'`; do
|
||||||
grep 'log.' *.go | grep '\\n"' && echo 'no \n needed in log.Printf()' && exit 1 || echo PASS # no \n needed in log.Printf()
|
go vet "$file" && echo PASS || exit 1 # since it doesn't output an ok message on pass
|
||||||
grep 'case _ = <-' *.go && echo 'case _ = <- can be simplified to: case <-' && exit 1 || echo PASS # this can be simplified
|
grep 'log.' "$file" | grep '\\n"' && echo 'no \n needed in log.Printf()' && exit 1 || echo PASS # no \n needed in log.Printf()
|
||||||
|
grep 'case _ = <-' "$file" && echo 'case _ = <- can be simplified to: case <-' && exit 1 || echo PASS # this can be simplified
|
||||||
|
grep -Ei "[\/]+[\/]+[ ]*+(FIXME[^:]|TODO[^:]|XXX[^:])" "$file" && echo 'Token is missing a colon' && exit 1 || echo PASS # tokens must end with a colon
|
||||||
|
done
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ done < "$FILE"
|
|||||||
cd "${ROOT}"
|
cd "${ROOT}"
|
||||||
|
|
||||||
find_files() {
|
find_files() {
|
||||||
git ls-files | grep '\.go$'
|
git ls-files | grep '\.go$' | grep -v '^examples/'
|
||||||
}
|
}
|
||||||
|
|
||||||
bad_files=$(
|
bad_files=$(
|
||||||
|
|||||||
@@ -15,14 +15,16 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package main
|
// Package util contains a collection of miscellaneous utility functions.
|
||||||
|
package util
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/godbus/dbus"
|
|
||||||
"path"
|
"path"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/godbus/dbus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FirstToUpper returns the string with the first character capitalized.
|
// FirstToUpper returns the string with the first character capitalized.
|
||||||
@@ -270,11 +272,6 @@ func PathPrefixDelta(p, prefix string) int {
|
|||||||
return len(patharray) - len(prefixarray)
|
return len(patharray) - len(prefixarray)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PathIsDir returns true if there is a trailing slash.
|
|
||||||
func PathIsDir(p string) bool {
|
|
||||||
return p[len(p)-1:] == "/" // a dir has a trailing slash in this context
|
|
||||||
}
|
|
||||||
|
|
||||||
// PathSplitFullReversed returns the full list of "dependency" paths for a given
|
// PathSplitFullReversed returns the full list of "dependency" paths for a given
|
||||||
// path in reverse order.
|
// path in reverse order.
|
||||||
func PathSplitFullReversed(p string) []string {
|
func PathSplitFullReversed(p string) []string {
|
||||||
@@ -284,7 +281,7 @@ func PathSplitFullReversed(p string) []string {
|
|||||||
var x string
|
var x string
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
x = "/" + path.Join(split[0:i+1]...)
|
x = "/" + path.Join(split[0:i+1]...)
|
||||||
if i != 0 && !(i+1 == count && !PathIsDir(p)) {
|
if i != 0 && !(i+1 == count && !strings.HasSuffix(p, "/")) {
|
||||||
x += "/" // add trailing slash
|
x += "/" // add trailing slash
|
||||||
}
|
}
|
||||||
result = append(result, x)
|
result = append(result, x)
|
||||||
@@ -15,7 +15,7 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package main
|
package util
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
"reflect"
|
||||||
@@ -23,7 +23,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMiscT1(t *testing.T) {
|
func TestUtilT1(t *testing.T) {
|
||||||
|
|
||||||
if Dirname("/foo/bar/baz") != "/foo/bar/" {
|
if Dirname("/foo/bar/baz") != "/foo/bar/" {
|
||||||
t.Errorf("Result is incorrect.")
|
t.Errorf("Result is incorrect.")
|
||||||
@@ -62,7 +62,7 @@ func TestMiscT1(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMiscT2(t *testing.T) {
|
func TestUtilT2(t *testing.T) {
|
||||||
|
|
||||||
// TODO: compare the output with the actual list
|
// TODO: compare the output with the actual list
|
||||||
p0 := "/"
|
p0 := "/"
|
||||||
@@ -86,7 +86,7 @@ func TestMiscT2(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMiscT3(t *testing.T) {
|
func TestUtilT3(t *testing.T) {
|
||||||
|
|
||||||
if HasPathPrefix("/foo/bar/baz", "/foo/ba") != false {
|
if HasPathPrefix("/foo/bar/baz", "/foo/ba") != false {
|
||||||
t.Errorf("Result should be false.")
|
t.Errorf("Result should be false.")
|
||||||
@@ -117,7 +117,7 @@ func TestMiscT3(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMiscT4(t *testing.T) {
|
func TestUtilT4(t *testing.T) {
|
||||||
|
|
||||||
if PathPrefixDelta("/foo/bar/baz", "/foo/ba") != -1 {
|
if PathPrefixDelta("/foo/bar/baz", "/foo/ba") != -1 {
|
||||||
t.Errorf("Result should be -1.")
|
t.Errorf("Result should be -1.")
|
||||||
@@ -152,26 +152,7 @@ func TestMiscT4(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMiscT5(t *testing.T) {
|
func TestUtilT8(t *testing.T) {
|
||||||
|
|
||||||
if PathIsDir("/foo/bar/baz/") != true {
|
|
||||||
t.Errorf("Result should be false.")
|
|
||||||
}
|
|
||||||
|
|
||||||
if PathIsDir("/foo/bar/baz") != false {
|
|
||||||
t.Errorf("Result should be false.")
|
|
||||||
}
|
|
||||||
|
|
||||||
if PathIsDir("/foo/") != true {
|
|
||||||
t.Errorf("Result should be true.")
|
|
||||||
}
|
|
||||||
|
|
||||||
if PathIsDir("/") != true {
|
|
||||||
t.Errorf("Result should be true.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMiscT8(t *testing.T) {
|
|
||||||
|
|
||||||
r0 := []string{"/"}
|
r0 := []string{"/"}
|
||||||
if fullList0 := PathSplitFullReversed("/"); !reflect.DeepEqual(r0, fullList0) {
|
if fullList0 := PathSplitFullReversed("/"); !reflect.DeepEqual(r0, fullList0) {
|
||||||
@@ -190,7 +171,7 @@ func TestMiscT8(t *testing.T) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMiscT9(t *testing.T) {
|
func TestUtilT9(t *testing.T) {
|
||||||
fileListIn := []string{ // list taken from drbd-utils package
|
fileListIn := []string{ // list taken from drbd-utils package
|
||||||
"/etc/drbd.conf",
|
"/etc/drbd.conf",
|
||||||
"/etc/drbd.d/global_common.conf",
|
"/etc/drbd.d/global_common.conf",
|
||||||
@@ -334,7 +315,7 @@ func TestMiscT9(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMiscT10(t *testing.T) {
|
func TestUtilT10(t *testing.T) {
|
||||||
fileListIn := []string{ // fake package list
|
fileListIn := []string{ // fake package list
|
||||||
"/etc/drbd.conf",
|
"/etc/drbd.conf",
|
||||||
"/usr/share/man/man8/drbdsetup.8.gz",
|
"/usr/share/man/man8/drbdsetup.8.gz",
|
||||||
@@ -370,7 +351,7 @@ func TestMiscT10(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMiscT11(t *testing.T) {
|
func TestUtilT11(t *testing.T) {
|
||||||
in1 := []string{"/", "/usr/", "/usr/lib/", "/usr/share/"} // input
|
in1 := []string{"/", "/usr/", "/usr/lib/", "/usr/share/"} // input
|
||||||
ex1 := []string{"/usr/lib/", "/usr/share/"} // expected
|
ex1 := []string{"/usr/lib/", "/usr/share/"} // expected
|
||||||
sort.Strings(ex1)
|
sort.Strings(ex1)
|
||||||
@@ -743,7 +724,7 @@ func TestMiscT11(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMiscFlattenListWithSplit1(t *testing.T) {
|
func TestUtilFlattenListWithSplit1(t *testing.T) {
|
||||||
{
|
{
|
||||||
in := []string{} // input
|
in := []string{} // input
|
||||||
ex := []string{} // expected
|
ex := []string{} // expected
|
||||||
2
vendor/github.com/coreos/etcd
generated
vendored
1
vendor/github.com/grpc-ecosystem/grpc-gateway
generated
vendored
Submodule
1
vendor/google.golang.org/grpc
generated
vendored
Submodule
1
vendor/gopkg.in/fsnotify.v1
generated
vendored
Submodule
120
yamlgraph/gapi.go
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package yamlgraph
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/gapi"
|
||||||
|
"github.com/purpleidea/mgmt/pgraph"
|
||||||
|
"github.com/purpleidea/mgmt/recwatch"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GAPI implements the main yamlgraph GAPI interface.
|
||||||
|
type GAPI struct {
|
||||||
|
File *string // yaml graph definition to use; nil if undefined
|
||||||
|
|
||||||
|
data gapi.Data
|
||||||
|
initialized bool
|
||||||
|
closeChan chan struct{}
|
||||||
|
wg sync.WaitGroup // sync group for tunnel go routines
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGAPI creates a new yamlgraph GAPI struct and calls Init().
|
||||||
|
func NewGAPI(data gapi.Data, file *string) (*GAPI, error) {
|
||||||
|
obj := &GAPI{
|
||||||
|
File: file,
|
||||||
|
}
|
||||||
|
return obj, obj.Init(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init initializes the yamlgraph GAPI struct.
|
||||||
|
func (obj *GAPI) Init(data gapi.Data) error {
|
||||||
|
if obj.initialized {
|
||||||
|
return fmt.Errorf("Already initialized!")
|
||||||
|
}
|
||||||
|
if obj.File == nil {
|
||||||
|
return fmt.Errorf("The File param must be specified!")
|
||||||
|
}
|
||||||
|
obj.data = data // store for later
|
||||||
|
obj.closeChan = make(chan struct{})
|
||||||
|
obj.initialized = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Graph returns a current Graph.
|
||||||
|
func (obj *GAPI) Graph() (*pgraph.Graph, error) {
|
||||||
|
if !obj.initialized {
|
||||||
|
return nil, fmt.Errorf("yamlgraph: GAPI is not initialized")
|
||||||
|
}
|
||||||
|
|
||||||
|
config := ParseConfigFromFile(*obj.File)
|
||||||
|
if config == nil {
|
||||||
|
return nil, fmt.Errorf("yamlgraph: ParseConfigFromFile returned nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
g, err := config.NewGraphFromConfig(obj.data.Hostname, obj.data.EmbdEtcd, obj.data.Noop)
|
||||||
|
return g, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SwitchStream returns nil errors every time there could be a new graph.
|
||||||
|
func (obj *GAPI) SwitchStream() chan error {
|
||||||
|
if obj.data.NoWatch {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ch := make(chan error)
|
||||||
|
obj.wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer obj.wg.Done()
|
||||||
|
defer close(ch) // this will run before the obj.wg.Done()
|
||||||
|
if !obj.initialized {
|
||||||
|
ch <- fmt.Errorf("yamlgraph: GAPI is not initialized")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
configChan := recwatch.ConfigWatch(*obj.File)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case err, ok := <-configChan: // returns nil events on ok!
|
||||||
|
if !ok { // the channel closed!
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Printf("yamlgraph: Generating new graph...")
|
||||||
|
ch <- err // trigger a run
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case <-obj.closeChan:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close shuts down the yamlgraph GAPI.
|
||||||
|
func (obj *GAPI) Close() error {
|
||||||
|
if !obj.initialized {
|
||||||
|
return fmt.Errorf("yamlgraph: GAPI is not initialized")
|
||||||
|
}
|
||||||
|
close(obj.closeChan)
|
||||||
|
obj.wg.Wait()
|
||||||
|
obj.initialized = false // closed = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
256
yamlgraph/gconfig.go
Normal file
@@ -0,0 +1,256 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2016+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// Package yamlgraph provides the facilities for loading a graph from a yaml file.
|
||||||
|
package yamlgraph
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/etcd"
|
||||||
|
"github.com/purpleidea/mgmt/global"
|
||||||
|
"github.com/purpleidea/mgmt/pgraph"
|
||||||
|
"github.com/purpleidea/mgmt/resources"
|
||||||
|
"github.com/purpleidea/mgmt/util"
|
||||||
|
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type collectorResConfig struct {
|
||||||
|
Kind string `yaml:"kind"`
|
||||||
|
Pattern string `yaml:"pattern"` // XXX: Not Implemented
|
||||||
|
}
|
||||||
|
|
||||||
|
// Vertex is the data structure of a vertex.
|
||||||
|
type Vertex struct {
|
||||||
|
Kind string `yaml:"kind"`
|
||||||
|
Name string `yaml:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Edge is the data structure of an edge.
|
||||||
|
type Edge struct {
|
||||||
|
Name string `yaml:"name"`
|
||||||
|
From Vertex `yaml:"from"`
|
||||||
|
To Vertex `yaml:"to"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resources is the data structure of the set of resources.
|
||||||
|
type Resources struct {
|
||||||
|
// in alphabetical order
|
||||||
|
Exec []*resources.ExecRes `yaml:"exec"`
|
||||||
|
File []*resources.FileRes `yaml:"file"`
|
||||||
|
Msg []*resources.MsgRes `yaml:"msg"`
|
||||||
|
Noop []*resources.NoopRes `yaml:"noop"`
|
||||||
|
Pkg []*resources.PkgRes `yaml:"pkg"`
|
||||||
|
Svc []*resources.SvcRes `yaml:"svc"`
|
||||||
|
Timer []*resources.TimerRes `yaml:"timer"`
|
||||||
|
Virt []*resources.VirtRes `yaml:"virt"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GraphConfig is the data structure that describes a single graph to run.
|
||||||
|
type GraphConfig struct {
|
||||||
|
Graph string `yaml:"graph"`
|
||||||
|
Resources Resources `yaml:"resources"`
|
||||||
|
Collector []collectorResConfig `yaml:"collect"`
|
||||||
|
Edges []Edge `yaml:"edges"`
|
||||||
|
Comment string `yaml:"comment"`
|
||||||
|
Remote string `yaml:"remote"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse parses a data stream into the graph structure.
|
||||||
|
func (c *GraphConfig) Parse(data []byte) error {
|
||||||
|
if err := yaml.Unmarshal(data, c); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if c.Graph == "" {
|
||||||
|
return errors.New("Graph config: invalid `graph`")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGraphFromConfig transforms a GraphConfig struct into a new graph.
|
||||||
|
// FIXME: remove any possibly left over, now obsolete graph diff code from here!
|
||||||
|
func (c *GraphConfig) NewGraphFromConfig(hostname string, embdEtcd *etcd.EmbdEtcd, noop bool) (*pgraph.Graph, error) {
|
||||||
|
// hostname is the uuid for the host
|
||||||
|
|
||||||
|
var graph *pgraph.Graph // new graph to return
|
||||||
|
graph = pgraph.NewGraph("Graph") // give graph a default name
|
||||||
|
|
||||||
|
var lookup = make(map[string]map[string]*pgraph.Vertex)
|
||||||
|
|
||||||
|
//log.Printf("%+v", config) // debug
|
||||||
|
|
||||||
|
// TODO: if defined (somehow)...
|
||||||
|
graph.SetName(c.Graph) // set graph name
|
||||||
|
|
||||||
|
var keep []*pgraph.Vertex // list of vertex which are the same in new graph
|
||||||
|
var resourceList []resources.Res // list of resources to export
|
||||||
|
// use reflection to avoid duplicating code... better options welcome!
|
||||||
|
value := reflect.Indirect(reflect.ValueOf(c.Resources))
|
||||||
|
vtype := value.Type()
|
||||||
|
for i := 0; i < vtype.NumField(); i++ { // number of fields in struct
|
||||||
|
name := vtype.Field(i).Name // string of field name
|
||||||
|
field := value.FieldByName(name)
|
||||||
|
iface := field.Interface() // interface type of value
|
||||||
|
slice := reflect.ValueOf(iface)
|
||||||
|
// XXX: should we just drop these everywhere and have the kind strings be all lowercase?
|
||||||
|
kind := util.FirstToUpper(name)
|
||||||
|
if global.DEBUG {
|
||||||
|
log.Printf("Config: Processing: %v...", kind)
|
||||||
|
}
|
||||||
|
for j := 0; j < slice.Len(); j++ { // loop through resources of same kind
|
||||||
|
x := slice.Index(j).Interface()
|
||||||
|
res, ok := x.(resources.Res) // convert to Res type
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Config: Error: Can't convert: %v of type: %T to Res.", x, x)
|
||||||
|
}
|
||||||
|
//if noop { // now done in mgmtmain
|
||||||
|
// res.Meta().Noop = noop
|
||||||
|
//}
|
||||||
|
if _, exists := lookup[kind]; !exists {
|
||||||
|
lookup[kind] = make(map[string]*pgraph.Vertex)
|
||||||
|
}
|
||||||
|
// XXX: should we export based on a @@ prefix, or a metaparam
|
||||||
|
// like exported => true || exported => (host pattern)||(other pattern?)
|
||||||
|
if !strings.HasPrefix(res.GetName(), "@@") { // not exported resource
|
||||||
|
v := graph.GetVertexMatch(res)
|
||||||
|
if v == nil { // no match found
|
||||||
|
res.Init()
|
||||||
|
v = pgraph.NewVertex(res)
|
||||||
|
graph.AddVertex(v) // call standalone in case not part of an edge
|
||||||
|
}
|
||||||
|
lookup[kind][res.GetName()] = v // used for constructing edges
|
||||||
|
keep = append(keep, v) // append
|
||||||
|
|
||||||
|
} else if !noop { // do not export any resources if noop
|
||||||
|
// store for addition to etcd storage...
|
||||||
|
res.SetName(res.GetName()[2:]) //slice off @@
|
||||||
|
res.SetKind(kind) // cheap init
|
||||||
|
resourceList = append(resourceList, res)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// store in etcd
|
||||||
|
if err := etcd.EtcdSetResources(embdEtcd, hostname, resourceList); err != nil {
|
||||||
|
return nil, fmt.Errorf("Config: Could not export resources: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// lookup from etcd
|
||||||
|
var hostnameFilter []string // empty to get from everyone
|
||||||
|
kindFilter := []string{}
|
||||||
|
for _, t := range c.Collector {
|
||||||
|
// XXX: should we just drop these everywhere and have the kind strings be all lowercase?
|
||||||
|
kind := util.FirstToUpper(t.Kind)
|
||||||
|
kindFilter = append(kindFilter, kind)
|
||||||
|
}
|
||||||
|
// do all the graph look ups in one single step, so that if the etcd
|
||||||
|
// database changes, we don't have a partial state of affairs...
|
||||||
|
if len(kindFilter) > 0 { // if kindFilter is empty, don't need to do lookups!
|
||||||
|
var err error
|
||||||
|
resourceList, err = etcd.EtcdGetResources(embdEtcd, hostnameFilter, kindFilter)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Config: Could not collect resources: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, res := range resourceList {
|
||||||
|
matched := false
|
||||||
|
// see if we find a collect pattern that matches
|
||||||
|
for _, t := range c.Collector {
|
||||||
|
// XXX: should we just drop these everywhere and have the kind strings be all lowercase?
|
||||||
|
kind := util.FirstToUpper(t.Kind)
|
||||||
|
// use t.Kind and optionally t.Pattern to collect from etcd storage
|
||||||
|
log.Printf("Collect: %v; Pattern: %v", kind, t.Pattern)
|
||||||
|
|
||||||
|
// XXX: expand to more complex pattern matching here...
|
||||||
|
if res.Kind() != kind {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if matched {
|
||||||
|
// we've already matched this resource, should we match again?
|
||||||
|
log.Printf("Config: Warning: Matching %v[%v] again!", kind, res.GetName())
|
||||||
|
}
|
||||||
|
matched = true
|
||||||
|
|
||||||
|
// collect resources but add the noop metaparam
|
||||||
|
//if noop { // now done in mgmtmain
|
||||||
|
// res.Meta().Noop = noop
|
||||||
|
//}
|
||||||
|
|
||||||
|
if t.Pattern != "" { // XXX: simplistic for now
|
||||||
|
res.CollectPattern(t.Pattern) // res.Dirname = t.Pattern
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Collect: %v[%v]: collected!", kind, res.GetName())
|
||||||
|
|
||||||
|
// XXX: similar to other resource add code:
|
||||||
|
if _, exists := lookup[kind]; !exists {
|
||||||
|
lookup[kind] = make(map[string]*pgraph.Vertex)
|
||||||
|
}
|
||||||
|
v := graph.GetVertexMatch(res)
|
||||||
|
if v == nil { // no match found
|
||||||
|
res.Init() // initialize go channels or things won't work!!!
|
||||||
|
v = pgraph.NewVertex(res)
|
||||||
|
graph.AddVertex(v) // call standalone in case not part of an edge
|
||||||
|
}
|
||||||
|
lookup[kind][res.GetName()] = v // used for constructing edges
|
||||||
|
keep = append(keep, v) // append
|
||||||
|
|
||||||
|
//break // let's see if another resource even matches
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, e := range c.Edges {
|
||||||
|
if _, ok := lookup[util.FirstToUpper(e.From.Kind)]; !ok {
|
||||||
|
return nil, fmt.Errorf("Can't find 'from' resource!")
|
||||||
|
}
|
||||||
|
if _, ok := lookup[util.FirstToUpper(e.To.Kind)]; !ok {
|
||||||
|
return nil, fmt.Errorf("Can't find 'to' resource!")
|
||||||
|
}
|
||||||
|
if _, ok := lookup[util.FirstToUpper(e.From.Kind)][e.From.Name]; !ok {
|
||||||
|
return nil, fmt.Errorf("Can't find 'from' name!")
|
||||||
|
}
|
||||||
|
if _, ok := lookup[util.FirstToUpper(e.To.Kind)][e.To.Name]; !ok {
|
||||||
|
return nil, fmt.Errorf("Can't find 'to' name!")
|
||||||
|
}
|
||||||
|
graph.AddEdge(lookup[util.FirstToUpper(e.From.Kind)][e.From.Name], lookup[util.FirstToUpper(e.To.Kind)][e.To.Name], pgraph.NewEdge(e.Name))
|
||||||
|
}
|
||||||
|
|
||||||
|
return graph, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseConfigFromFile takes a filename and returns the graph config structure.
|
||||||
|
func ParseConfigFromFile(filename string) *GraphConfig {
|
||||||
|
data, err := ioutil.ReadFile(filename)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Config: Error: ParseConfigFromFile: File: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var config GraphConfig
|
||||||
|
if err := config.Parse(data); err != nil {
|
||||||
|
log.Printf("Config: Error: ParseConfigFromFile: Parse: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &config
|
||||||
|
}
|
||||||