Compare commits
29 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9546949945 | ||
|
|
8ff048d055 | ||
|
|
95a1c6e7fb | ||
|
|
0b1a4a0f30 | ||
|
|
22b48e296a | ||
|
|
c696ebf53c | ||
|
|
a0686b7d2b | ||
|
|
8d94be8924 | ||
|
|
e97ac5033f | ||
|
|
44771a0049 | ||
|
|
32aae8f57a | ||
|
|
8207e23cd9 | ||
|
|
a469029698 | ||
|
|
203d866643 | ||
|
|
1488e5ec4d | ||
|
|
af66138a17 | ||
|
|
5f060d60a7 | ||
|
|
73ccbb69ea | ||
|
|
be60440b20 | ||
|
|
837efb78e6 | ||
|
|
4a62a290d8 | ||
|
|
018399cb1f | ||
|
|
646a576358 | ||
|
|
d8e19cd79a | ||
|
|
757cb0cf23 | ||
|
|
7d92ab335a | ||
|
|
46c6d6f656 | ||
|
|
46260749c1 | ||
|
|
50664fe115 |
@@ -1,9 +1,10 @@
|
|||||||
language: go
|
language: go
|
||||||
go:
|
go:
|
||||||
- 1.6
|
- 1.6.x
|
||||||
- 1.7
|
- 1.7.x
|
||||||
- 1.8
|
- 1.8.x
|
||||||
- tip
|
- tip
|
||||||
|
go_import_path: github.com/purpleidea/mgmt
|
||||||
sudo: true
|
sudo: true
|
||||||
dist: trusty
|
dist: trusty
|
||||||
before_install:
|
before_install:
|
||||||
@@ -15,7 +16,7 @@ matrix:
|
|||||||
fast_finish: true
|
fast_finish: true
|
||||||
allow_failures:
|
allow_failures:
|
||||||
- go: tip
|
- go: tip
|
||||||
- go: 1.8
|
- go: 1.8.x
|
||||||
notifications:
|
notifications:
|
||||||
irc:
|
irc:
|
||||||
channels:
|
channels:
|
||||||
|
|||||||
4
Makefile
4
Makefile
@@ -41,6 +41,8 @@ ifneq ($(GOTAGS),)
|
|||||||
BUILD_FLAGS = -tags '$(GOTAGS)'
|
BUILD_FLAGS = -tags '$(GOTAGS)'
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
default: build
|
||||||
|
|
||||||
#
|
#
|
||||||
# art
|
# art
|
||||||
#
|
#
|
||||||
@@ -110,7 +112,7 @@ ifneq ($(OLDGOLANG),)
|
|||||||
@# avoid equals sign in old golang versions eg in: -X foo=bar
|
@# avoid equals sign in old golang versions eg in: -X foo=bar
|
||||||
time go build -ldflags "-X main.program $(PROGRAM) -X main.version $(SVERSION)" -o $(PROGRAM) $(BUILD_FLAGS);
|
time go build -ldflags "-X main.program $(PROGRAM) -X main.version $(SVERSION)" -o $(PROGRAM) $(BUILD_FLAGS);
|
||||||
else
|
else
|
||||||
time go build -ldflags "-X main.program=$(PROGRAM) -X main.version=$(SVERSION)" -o $(PROGRAM) $(BUILD_FLAGS);
|
time go build -i -ldflags "-X main.program=$(PROGRAM) -X main.version=$(SVERSION)" -o $(PROGRAM) $(BUILD_FLAGS);
|
||||||
endif
|
endif
|
||||||
|
|
||||||
$(PROGRAM).static: main.go
|
$(PROGRAM).static: main.go
|
||||||
|
|||||||
@@ -77,6 +77,7 @@ We'd love to have your patches! Please send them by email, or as a pull request.
|
|||||||
| James Shubin | video | [Recording from High Load Strategy 2016](https://vimeo.com/191493409) |
|
| James Shubin | video | [Recording from High Load Strategy 2016](https://vimeo.com/191493409) |
|
||||||
| James Shubin | video | [Recording from NLUUG 2016](https://www.youtube.com/watch?v=MmpwOQAb_SE&html5=1) |
|
| James Shubin | video | [Recording from NLUUG 2016](https://www.youtube.com/watch?v=MmpwOQAb_SE&html5=1) |
|
||||||
| James Shubin | blog | [Send/Recv in mgmt](https://ttboj.wordpress.com/2016/12/07/sendrecv-in-mgmt/) |
|
| James Shubin | blog | [Send/Recv in mgmt](https://ttboj.wordpress.com/2016/12/07/sendrecv-in-mgmt/) |
|
||||||
|
| James Shubin | blog | [Metaparameters in mgmt](https://ttboj.wordpress.com/2017/03/01/metaparameters-in-mgmt/) |
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ For more information, you may like to read some blog posts from the author:
|
|||||||
* [Automatic clustering in mgmt](https://ttboj.wordpress.com/2016/06/20/automatic-clustering-in-mgmt/)
|
* [Automatic clustering in mgmt](https://ttboj.wordpress.com/2016/06/20/automatic-clustering-in-mgmt/)
|
||||||
* [Remote execution in mgmt](https://ttboj.wordpress.com/2016/10/07/remote-execution-in-mgmt/)
|
* [Remote execution in mgmt](https://ttboj.wordpress.com/2016/10/07/remote-execution-in-mgmt/)
|
||||||
* [Send/Recv in mgmt](https://ttboj.wordpress.com/2016/12/07/sendrecv-in-mgmt/)
|
* [Send/Recv in mgmt](https://ttboj.wordpress.com/2016/12/07/sendrecv-in-mgmt/)
|
||||||
|
* [Metaparameters in mgmt](https://ttboj.wordpress.com/2017/03/01/metaparameters-in-mgmt/)
|
||||||
|
|
||||||
There is also an [introductory video](http://meetings-archive.debian.net/pub/debian-meetings/2016/debconf16/Next_Generation_Config_Mgmt.webm) available.
|
There is also an [introductory video](http://meetings-archive.debian.net/pub/debian-meetings/2016/debconf16/Next_Generation_Config_Mgmt.webm) available.
|
||||||
Older videos and other material [is available](https://github.com/purpleidea/mgmt/#on-the-web).
|
Older videos and other material [is available](https://github.com/purpleidea/mgmt/#on-the-web).
|
||||||
@@ -181,6 +182,7 @@ parameter with the [Noop](#Noop) resource.
|
|||||||
* [Exec](#Exec): Execute shell commands on the system.
|
* [Exec](#Exec): Execute shell commands on the system.
|
||||||
* [File](#File): Manage files and directories.
|
* [File](#File): Manage files and directories.
|
||||||
* [Hostname](#Hostname): Manages the hostname on the system.
|
* [Hostname](#Hostname): Manages the hostname on the system.
|
||||||
|
* [KV](#KV): Set a key value pair in our shared world database.
|
||||||
* [Msg](#Msg): Send log messages.
|
* [Msg](#Msg): Send log messages.
|
||||||
* [Noop](#Noop): A simple resource that does nothing.
|
* [Noop](#Noop): A simple resource that does nothing.
|
||||||
* [Nspawn](#Nspawn): Manage systemd-machined nspawn containers.
|
* [Nspawn](#Nspawn): Manage systemd-machined nspawn containers.
|
||||||
@@ -267,6 +269,30 @@ The pretty hostname is a free-form UTF8 host name for presentation to the user.
|
|||||||
Hostname is the fallback value for all 3 fields above, if only `hostname` is
|
Hostname is the fallback value for all 3 fields above, if only `hostname` is
|
||||||
specified, it will set all 3 fields to this value.
|
specified, it will set all 3 fields to this value.
|
||||||
|
|
||||||
|
### KV
|
||||||
|
|
||||||
|
The KV resource sets a key and value pair in the global world database. This is
|
||||||
|
quite useful for setting a flag after a number of resources have run. It will
|
||||||
|
ignore database updates to the value that are greater in compare order than the
|
||||||
|
requested key if the `SkipLessThan` parameter is set to true. If we receive a
|
||||||
|
refresh, then the stored value will be reset to the requested value even if the
|
||||||
|
stored value is greater.
|
||||||
|
|
||||||
|
#### Key
|
||||||
|
The string key used to store the key.
|
||||||
|
|
||||||
|
#### Value
|
||||||
|
The string value to set. This can also be set via Send/Recv.
|
||||||
|
|
||||||
|
#### SkipLessThan
|
||||||
|
If this parameter is set to `true`, then it will ignore updating the value as
|
||||||
|
long as the database versions are greater than the requested value. The compare
|
||||||
|
operation used is based on the `SkipCmpStyle` parameter.
|
||||||
|
|
||||||
|
#### SkipCmpStyle
|
||||||
|
By default this converts the string values to integers and compares them as you
|
||||||
|
would expect.
|
||||||
|
|
||||||
### Msg
|
### Msg
|
||||||
|
|
||||||
The msg resource sends messages to the main log, or an external service such
|
The msg resource sends messages to the main log, or an external service such
|
||||||
@@ -457,6 +483,17 @@ the rate limiter as designated by the `Limit` value. If the `Limit` is not set
|
|||||||
to `+Infinity`, this must be a non-zero value. Please see the
|
to `+Infinity`, this must be a non-zero value. Please see the
|
||||||
[rate](https://godoc.org/golang.org/x/time/rate) package for more information.
|
[rate](https://godoc.org/golang.org/x/time/rate) package for more information.
|
||||||
|
|
||||||
|
#### Sema
|
||||||
|
List of string ids. Sema is a P/V style counting semaphore which can be used to
|
||||||
|
limit parallelism during the CheckApply phase of resource execution. Each
|
||||||
|
resource can have `N` different semaphores which share a graph global namespace.
|
||||||
|
Each semaphore has a maximum count associated with it. The default value of the
|
||||||
|
size is 1 (one) if size is unspecified. Each string id is the unique id of the
|
||||||
|
semaphore. If the id contains a trailing colon (:) followed by a positive
|
||||||
|
integer, then that value is the max size for that semaphore. Valid semaphore
|
||||||
|
id's include: `some_id`, `hello:42`, `not:smart:4` and `:13`. It is expected
|
||||||
|
that the last bare example be only used by the engine to add a global semaphore.
|
||||||
|
|
||||||
### Graph definition file
|
### Graph definition file
|
||||||
graph.yaml is the compiled graph definition file. The format is currently
|
graph.yaml is the compiled graph definition file. The format is currently
|
||||||
undocumented, but by looking through the [examples/](https://github.com/purpleidea/mgmt/tree/master/examples)
|
undocumented, but by looking through the [examples/](https://github.com/purpleidea/mgmt/tree/master/examples)
|
||||||
@@ -481,6 +518,15 @@ Globally force all resources into no-op mode. This also disables the export to
|
|||||||
etcd functionality, but does not disable resource collection, however all
|
etcd functionality, but does not disable resource collection, however all
|
||||||
resources that are collected will have their individual noop settings set.
|
resources that are collected will have their individual noop settings set.
|
||||||
|
|
||||||
|
#### `--sema <size>`
|
||||||
|
Globally add a counting semaphore of this size to each resource in the graph.
|
||||||
|
The semaphore will get given an id of `:size`. In other words if you specify a
|
||||||
|
size of 42, you can expect a semaphore if named: `:42`. It is expected that
|
||||||
|
consumers of the semaphore metaparameter always include a prefix to avoid a
|
||||||
|
collision with this globally defined semaphore. The size value must be greater
|
||||||
|
than zero at this time. The traditional non-parallel execution found in config
|
||||||
|
management tools such as `Puppet` can be obtained with `--sema 1`.
|
||||||
|
|
||||||
#### `--remote <graph.yaml>`
|
#### `--remote <graph.yaml>`
|
||||||
Point to a graph file to run on the remote host specified within. This parameter
|
Point to a graph file to run on the remote host specified within. This parameter
|
||||||
can be used multiple times if you'd like to remotely run on multiple hosts in
|
can be used multiple times if you'd like to remotely run on multiple hosts in
|
||||||
|
|||||||
@@ -31,6 +31,7 @@ Here is a list of the metrics we provide:
|
|||||||
- `mgmt_checkapply_total`: The number of CheckApply's that mgmt has run
|
- `mgmt_checkapply_total`: The number of CheckApply's that mgmt has run
|
||||||
- `mgmt_failures_total`: The number of resources that have failed
|
- `mgmt_failures_total`: The number of resources that have failed
|
||||||
- `mgmt_failures_current`: The number of resources that have failed
|
- `mgmt_failures_current`: The number of resources that have failed
|
||||||
|
- `mgmt_graph_start_time_seconds`: Start time of the current graph since unix epoch in seconds
|
||||||
|
|
||||||
For each metric, you will get some extra labels:
|
For each metric, you will get some extra labels:
|
||||||
|
|
||||||
|
|||||||
@@ -80,7 +80,7 @@ work, and finish by calling the `Init` method of the base resource.
|
|||||||
```golang
|
```golang
|
||||||
// Init initializes the Foo resource.
|
// Init initializes the Foo resource.
|
||||||
func (obj *FooRes) Init() error {
|
func (obj *FooRes) Init() error {
|
||||||
obj.BaseRes.kind = "Foo" // must set capitalized resource kind
|
obj.BaseRes.kind = "foo" // must lower case resource kind
|
||||||
// run the resource specific initialization, and error if anything fails
|
// run the resource specific initialization, and error if anything fails
|
||||||
if some_error {
|
if some_error {
|
||||||
return err // something went wrong!
|
return err // something went wrong!
|
||||||
|
|||||||
115
etcd/str.go
Normal file
115
etcd/str.go
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package etcd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/util"
|
||||||
|
|
||||||
|
etcd "github.com/coreos/etcd/clientv3"
|
||||||
|
errwrap "github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WatchStr returns a channel which spits out events on key activity.
|
||||||
|
// FIXME: It should close the channel when it's done, and spit out errors when
|
||||||
|
// something goes wrong.
|
||||||
|
func WatchStr(obj *EmbdEtcd, key string) chan error {
|
||||||
|
// new key structure is /$NS/strings/$key/$hostname = $data
|
||||||
|
path := fmt.Sprintf("/%s/strings/%s", NS, key)
|
||||||
|
ch := make(chan error, 1)
|
||||||
|
// FIXME: fix our API so that we get a close event on shutdown.
|
||||||
|
callback := func(re *RE) error {
|
||||||
|
// TODO: is this even needed? it used to happen on conn errors
|
||||||
|
//log.Printf("Etcd: Watch: Path: %v", path) // event
|
||||||
|
if re == nil || re.response.Canceled {
|
||||||
|
return fmt.Errorf("watch is empty") // will cause a CtxError+retry
|
||||||
|
}
|
||||||
|
if len(ch) == 0 { // send event only if one isn't pending
|
||||||
|
ch <- nil // event
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
_, _ = obj.AddWatcher(path, callback, true, false, etcd.WithPrefix()) // no need to check errors
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStr collects all of the strings which match a namespace in etcd.
|
||||||
|
func GetStr(obj *EmbdEtcd, hostnameFilter []string, key string) (map[string]string, error) {
|
||||||
|
// old key structure is /$NS/strings/$hostname/$key = $data
|
||||||
|
// new key structure is /$NS/strings/$key/$hostname = $data
|
||||||
|
// FIXME: if we have the $key as the last token (old key structure), we
|
||||||
|
// can allow the key to contain the slash char, otherwise we need to
|
||||||
|
// verify that one isn't present in the input string.
|
||||||
|
path := fmt.Sprintf("/%s/strings/%s", NS, key)
|
||||||
|
keyMap, err := obj.Get(path, etcd.WithPrefix(), etcd.WithSort(etcd.SortByKey, etcd.SortAscend))
|
||||||
|
if err != nil {
|
||||||
|
return nil, errwrap.Wrapf(err, "could not get strings in: %s", key)
|
||||||
|
}
|
||||||
|
result := make(map[string]string)
|
||||||
|
for key, val := range keyMap {
|
||||||
|
if !strings.HasPrefix(key, path) { // sanity check
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
str := strings.Split(key[len(path):], "/")
|
||||||
|
if len(str) != 2 {
|
||||||
|
return nil, fmt.Errorf("unexpected chunk count of %d", len(str))
|
||||||
|
}
|
||||||
|
_, hostname := str[0], str[1]
|
||||||
|
|
||||||
|
if hostname == "" {
|
||||||
|
return nil, fmt.Errorf("unexpected chunk length of %d", len(hostname))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME: ideally this would be a server side filter instead!
|
||||||
|
if len(hostnameFilter) > 0 && !util.StrInList(hostname, hostnameFilter) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
//log.Printf("Etcd: GetStr(%s): (Hostname, Data): (%s, %s)", key, hostname, val)
|
||||||
|
result[hostname] = val
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStr sets a key and hostname pair to a certain value. If the value is nil,
|
||||||
|
// then it deletes the key. Otherwise the value should point to a string.
|
||||||
|
// TODO: TTL or delete disconnect?
|
||||||
|
func SetStr(obj *EmbdEtcd, hostname, key string, data *string) error {
|
||||||
|
// key structure is /$NS/strings/$key/$hostname = $data
|
||||||
|
path := fmt.Sprintf("/%s/strings/%s/%s", NS, key, hostname)
|
||||||
|
ifs := []etcd.Cmp{} // list matching the desired state
|
||||||
|
ops := []etcd.Op{} // list of ops in this transaction (then)
|
||||||
|
els := []etcd.Op{} // list of ops in this transaction (else)
|
||||||
|
if data == nil { // perform a delete
|
||||||
|
// TODO: use https://github.com/coreos/etcd/pull/7417 if merged
|
||||||
|
//ifs = append(ifs, etcd.KeyExists(path))
|
||||||
|
ifs = append(ifs, etcd.Compare(etcd.Version(path), ">", 0))
|
||||||
|
ops = append(ops, etcd.OpDelete(path))
|
||||||
|
} else {
|
||||||
|
data := *data // get the real value
|
||||||
|
ifs = append(ifs, etcd.Compare(etcd.Value(path), "=", data)) // desired state
|
||||||
|
els = append(els, etcd.OpPut(path, data))
|
||||||
|
}
|
||||||
|
|
||||||
|
// it's important to do this in one transaction, and atomically, because
|
||||||
|
// this way, we only generate one watch event, and only when it's needed
|
||||||
|
_, err := obj.Txn(ifs, ops, els) // TODO: do we need to look at response?
|
||||||
|
return errwrap.Wrapf(err, "could not set strings in: %s", key)
|
||||||
|
}
|
||||||
@@ -41,3 +41,24 @@ func (obj *World) ResCollect(hostnameFilter, kindFilter []string) ([]resources.R
|
|||||||
// enforce that here if the underlying API supported it... Add this?
|
// enforce that here if the underlying API supported it... Add this?
|
||||||
return GetResources(obj.EmbdEtcd, hostnameFilter, kindFilter)
|
return GetResources(obj.EmbdEtcd, hostnameFilter, kindFilter)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetWatch returns a channel which spits out events on possible string changes.
|
||||||
|
func (obj *World) StrWatch(namespace string) chan error {
|
||||||
|
return WatchStr(obj.EmbdEtcd, namespace)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StrGet returns a map of hostnames to values in the given namespace.
|
||||||
|
func (obj *World) StrGet(namespace string) (map[string]string, error) {
|
||||||
|
return GetStr(obj.EmbdEtcd, []string{}, namespace)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StrSet sets the namespace value to a particular string under the identity of
|
||||||
|
// its own hostname.
|
||||||
|
func (obj *World) StrSet(namespace, value string) error {
|
||||||
|
return SetStr(obj.EmbdEtcd, obj.Hostname, namespace, &value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StrDel deletes the value in a particular namespace.
|
||||||
|
func (obj *World) StrDel(namespace string) error {
|
||||||
|
return SetStr(obj.EmbdEtcd, obj.Hostname, namespace, nil)
|
||||||
|
}
|
||||||
|
|||||||
67
examples/exec3-sema.yaml
Normal file
67
examples/exec3-sema.yaml
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
---
|
||||||
|
graph: parallel
|
||||||
|
resources:
|
||||||
|
exec:
|
||||||
|
- name: pkg10
|
||||||
|
meta:
|
||||||
|
sema: ['mylock:1', 'otherlock:42']
|
||||||
|
cmd: sleep 10s
|
||||||
|
shell: ''
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
- name: svc10
|
||||||
|
meta:
|
||||||
|
sema: ['mylock:1']
|
||||||
|
cmd: sleep 10s
|
||||||
|
shell: ''
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
- name: exec10
|
||||||
|
meta:
|
||||||
|
sema: ['mylock:1']
|
||||||
|
cmd: sleep 10s
|
||||||
|
shell: ''
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
- name: pkg15
|
||||||
|
meta:
|
||||||
|
sema: ['mylock:1', 'otherlock:42']
|
||||||
|
cmd: sleep 15s
|
||||||
|
shell: ''
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
edges:
|
||||||
|
- name: e1
|
||||||
|
from:
|
||||||
|
kind: exec
|
||||||
|
name: pkg10
|
||||||
|
to:
|
||||||
|
kind: exec
|
||||||
|
name: svc10
|
||||||
|
- name: e2
|
||||||
|
from:
|
||||||
|
kind: exec
|
||||||
|
name: svc10
|
||||||
|
to:
|
||||||
|
kind: exec
|
||||||
|
name: exec10
|
||||||
8
examples/kv1.yaml
Normal file
8
examples/kv1.yaml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
resources:
|
||||||
|
kv:
|
||||||
|
- name: kv1
|
||||||
|
key: "hello"
|
||||||
|
value: "world"
|
||||||
|
edges: []
|
||||||
7
examples/kv2.yaml
Normal file
7
examples/kv2.yaml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
resources:
|
||||||
|
kv:
|
||||||
|
- name: kv1
|
||||||
|
key: "iamdeleted"
|
||||||
|
edges: []
|
||||||
9
examples/kv3.yaml
Normal file
9
examples/kv3.yaml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
resources:
|
||||||
|
kv:
|
||||||
|
- name: kv1
|
||||||
|
key: "stage"
|
||||||
|
value: "3"
|
||||||
|
skiplessthan: true
|
||||||
|
edges: []
|
||||||
31
examples/kv4.yaml
Normal file
31
examples/kv4.yaml
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
resources:
|
||||||
|
kv:
|
||||||
|
- name: kv1
|
||||||
|
key: "stage"
|
||||||
|
value: "1"
|
||||||
|
skiplessthan: true
|
||||||
|
- name: kv2
|
||||||
|
key: "stage"
|
||||||
|
value: "2"
|
||||||
|
skiplessthan: true
|
||||||
|
- name: kv3
|
||||||
|
key: "stage"
|
||||||
|
value: "3"
|
||||||
|
skiplessthan: true
|
||||||
|
edges:
|
||||||
|
- name: e1
|
||||||
|
from:
|
||||||
|
kind: kv
|
||||||
|
name: kv1
|
||||||
|
to:
|
||||||
|
kind: kv
|
||||||
|
name: kv2
|
||||||
|
- name: e2
|
||||||
|
from:
|
||||||
|
kind: kv
|
||||||
|
name: kv2
|
||||||
|
to:
|
||||||
|
kind: kv
|
||||||
|
name: kv3
|
||||||
11
gapi/gapi.go
11
gapi/gapi.go
@@ -23,19 +23,10 @@ import (
|
|||||||
"github.com/purpleidea/mgmt/resources"
|
"github.com/purpleidea/mgmt/resources"
|
||||||
)
|
)
|
||||||
|
|
||||||
// World is an interface to the rest of the different graph state. It allows
|
|
||||||
// the GAPI to store state and exchange information throughout the cluster. It
|
|
||||||
// is the interface each machine uses to communicate with the rest of the world.
|
|
||||||
type World interface { // TODO: is there a better name for this interface?
|
|
||||||
ResExport([]resources.Res) error
|
|
||||||
// FIXME: should this method take a "filter" data struct instead of many args?
|
|
||||||
ResCollect(hostnameFilter, kindFilter []string) ([]resources.Res, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Data is the set of input values passed into the GAPI structs via Init.
|
// Data is the set of input values passed into the GAPI structs via Init.
|
||||||
type Data struct {
|
type Data struct {
|
||||||
Hostname string // uuid for the host, required for GAPI
|
Hostname string // uuid for the host, required for GAPI
|
||||||
World World
|
World resources.World
|
||||||
Noop bool
|
Noop bool
|
||||||
NoWatch bool
|
NoWatch bool
|
||||||
// NOTE: we can add more fields here if needed by GAPI endpoints
|
// NOTE: we can add more fields here if needed by GAPI endpoints
|
||||||
|
|||||||
@@ -84,6 +84,7 @@ func run(c *cli.Context) error {
|
|||||||
|
|
||||||
obj.NoWatch = c.Bool("no-watch")
|
obj.NoWatch = c.Bool("no-watch")
|
||||||
obj.Noop = c.Bool("noop")
|
obj.Noop = c.Bool("noop")
|
||||||
|
obj.Sema = c.Int("sema")
|
||||||
obj.Graphviz = c.String("graphviz")
|
obj.Graphviz = c.String("graphviz")
|
||||||
obj.GraphvizFilter = c.String("graphviz-filter")
|
obj.GraphvizFilter = c.String("graphviz-filter")
|
||||||
obj.ConvergedTimeout = c.Int("converged-timeout")
|
obj.ConvergedTimeout = c.Int("converged-timeout")
|
||||||
@@ -228,6 +229,11 @@ func CLI(program, version string, flags Flags) error {
|
|||||||
Name: "noop",
|
Name: "noop",
|
||||||
Usage: "globally force all resources into no-op mode",
|
Usage: "globally force all resources into no-op mode",
|
||||||
},
|
},
|
||||||
|
cli.IntFlag{
|
||||||
|
Name: "sema",
|
||||||
|
Value: -1,
|
||||||
|
Usage: "globally add a semaphore to all resources with this lock count",
|
||||||
|
},
|
||||||
cli.StringFlag{
|
cli.StringFlag{
|
||||||
Name: "graphviz, g",
|
Name: "graphviz, g",
|
||||||
Value: "",
|
Value: "",
|
||||||
@@ -235,7 +241,7 @@ func CLI(program, version string, flags Flags) error {
|
|||||||
},
|
},
|
||||||
cli.StringFlag{
|
cli.StringFlag{
|
||||||
Name: "graphviz-filter, gf",
|
Name: "graphviz-filter, gf",
|
||||||
Value: "dot", // directed graph default
|
Value: "",
|
||||||
Usage: "graphviz filter to use",
|
Usage: "graphviz filter to use",
|
||||||
},
|
},
|
||||||
cli.IntFlag{
|
cli.IntFlag{
|
||||||
|
|||||||
55
lib/main.go
55
lib/main.go
@@ -67,6 +67,7 @@ type Main struct {
|
|||||||
|
|
||||||
NoWatch bool // do not update graph on watched graph definition file changes
|
NoWatch bool // do not update graph on watched graph definition file changes
|
||||||
Noop bool // globally force all resources into no-op mode
|
Noop bool // globally force all resources into no-op mode
|
||||||
|
Sema int // add a semaphore with this lock count to each resource
|
||||||
Graphviz string // output file for graphviz data
|
Graphviz string // output file for graphviz data
|
||||||
GraphvizFilter string // graphviz filter to use
|
GraphvizFilter string // graphviz filter to use
|
||||||
ConvergedTimeout int // exit after approximately this many seconds in a converged state; -1 to disable
|
ConvergedTimeout int // exit after approximately this many seconds in a converged state; -1 to disable
|
||||||
@@ -348,17 +349,19 @@ func (obj *Main) Run() error {
|
|||||||
converger.SetStateFn(convergerStateFn)
|
converger.SetStateFn(convergerStateFn)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// implementation of the World API (alternates can be substituted in)
|
||||||
|
world := &etcd.World{
|
||||||
|
Hostname: hostname,
|
||||||
|
EmbdEtcd: EmbdEtcd,
|
||||||
|
}
|
||||||
|
|
||||||
var gapiChan chan error // stream events are nil errors
|
var gapiChan chan error // stream events are nil errors
|
||||||
if obj.GAPI != nil {
|
if obj.GAPI != nil {
|
||||||
data := gapi.Data{
|
data := gapi.Data{
|
||||||
Hostname: hostname,
|
Hostname: hostname,
|
||||||
// NOTE: alternate implementations can be substituted in
|
World: world,
|
||||||
World: &etcd.World{
|
Noop: obj.Noop,
|
||||||
Hostname: hostname,
|
NoWatch: obj.NoWatch,
|
||||||
EmbdEtcd: EmbdEtcd,
|
|
||||||
},
|
|
||||||
Noop: obj.Noop,
|
|
||||||
NoWatch: obj.NoWatch,
|
|
||||||
}
|
}
|
||||||
if err := obj.GAPI.Init(data); err != nil {
|
if err := obj.GAPI.Init(data); err != nil {
|
||||||
obj.Exit(fmt.Errorf("Main: GAPI: Init failed: %v", err))
|
obj.Exit(fmt.Errorf("Main: GAPI: Init failed: %v", err))
|
||||||
@@ -437,17 +440,25 @@ func (obj *Main) Run() error {
|
|||||||
newGraph.Flags = pgraph.Flags{Debug: obj.Flags.Debug}
|
newGraph.Flags = pgraph.Flags{Debug: obj.Flags.Debug}
|
||||||
// pass in the information we need
|
// pass in the information we need
|
||||||
newGraph.AssociateData(&resources.Data{
|
newGraph.AssociateData(&resources.Data{
|
||||||
|
Hostname: hostname,
|
||||||
Converger: converger,
|
Converger: converger,
|
||||||
Prometheus: prom,
|
Prometheus: prom,
|
||||||
|
World: world,
|
||||||
Prefix: pgraphPrefix,
|
Prefix: pgraphPrefix,
|
||||||
Debug: obj.Flags.Debug,
|
Debug: obj.Flags.Debug,
|
||||||
})
|
})
|
||||||
|
|
||||||
// apply the global noop parameter if requested
|
for _, m := range newGraph.GraphMetas() {
|
||||||
if obj.Noop {
|
// apply the global noop parameter if requested
|
||||||
for _, m := range newGraph.GraphMetas() {
|
if obj.Noop {
|
||||||
m.Noop = obj.Noop
|
m.Noop = obj.Noop
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// append the semaphore to each resource
|
||||||
|
if obj.Sema > 0 { // NOTE: size == 0 would block
|
||||||
|
// a semaphore with an empty id is valid
|
||||||
|
m.Sema = append(m.Sema, fmt.Sprintf(":%d", obj.Sema))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: make sure we "UnGroup()" any semi-destructive
|
// FIXME: make sure we "UnGroup()" any semi-destructive
|
||||||
@@ -472,13 +483,10 @@ func (obj *Main) Run() error {
|
|||||||
// TODO: do we want to do a transitive reduction?
|
// TODO: do we want to do a transitive reduction?
|
||||||
// FIXME: run a type checker that verifies all the send->recv relationships
|
// FIXME: run a type checker that verifies all the send->recv relationships
|
||||||
|
|
||||||
log.Printf("Graph: %v", G) // show graph
|
// Call this here because at this point the graph does not
|
||||||
if obj.GraphvizFilter != "" {
|
// know anything about the prometheus instance.
|
||||||
if err := G.ExecGraphviz(obj.GraphvizFilter, obj.Graphviz); err != nil {
|
if err := prom.UpdatePgraphStartTime(); err != nil {
|
||||||
log.Printf("Graphviz: %v", err)
|
log.Printf("Main: Prometheus.UpdatePgraphStartTime() errored: %v", err)
|
||||||
} else {
|
|
||||||
log.Printf("Graphviz: Successfully generated graph!")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// G.Start(...) needs to be synchronous or wait,
|
// G.Start(...) needs to be synchronous or wait,
|
||||||
// because if half of the nodes are started and
|
// because if half of the nodes are started and
|
||||||
@@ -487,6 +495,19 @@ func (obj *Main) Run() error {
|
|||||||
// even got going, thus causing nil pointer errors
|
// even got going, thus causing nil pointer errors
|
||||||
G.Start(first) // sync
|
G.Start(first) // sync
|
||||||
converger.Start() // after G.Start()
|
converger.Start() // after G.Start()
|
||||||
|
|
||||||
|
log.Printf("Graph: %v", G) // show graph
|
||||||
|
if obj.Graphviz != "" {
|
||||||
|
filter := obj.GraphvizFilter
|
||||||
|
if filter == "" {
|
||||||
|
filter = "dot" // directed graph default
|
||||||
|
}
|
||||||
|
if err := G.ExecGraphviz(filter, obj.Graphviz, hostname); err != nil {
|
||||||
|
log.Printf("Graphviz: %v", err)
|
||||||
|
} else {
|
||||||
|
log.Printf("Graphviz: Successfully generated graph!")
|
||||||
|
}
|
||||||
|
}
|
||||||
first = false
|
first = false
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|||||||
@@ -15,13 +15,14 @@ YUM=`which yum 2>/dev/null`
|
|||||||
DNF=`which dnf 2>/dev/null`
|
DNF=`which dnf 2>/dev/null`
|
||||||
APT=`which apt-get 2>/dev/null`
|
APT=`which apt-get 2>/dev/null`
|
||||||
BREW=`which brew 2>/dev/null`
|
BREW=`which brew 2>/dev/null`
|
||||||
|
PACMAN=`which pacman 2>/dev/null`
|
||||||
|
|
||||||
# if DNF is available use it
|
# if DNF is available use it
|
||||||
if [ -x "$DNF" ]; then
|
if [ -x "$DNF" ]; then
|
||||||
YUM=$DNF
|
YUM=$DNF
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$YUM" -a -z "$APT" -a -z "$BREW" ]; then
|
if [ -z "$YUM" -a -z "$APT" -a -z "$BREW" -a -z "$PACMAN" ]; then
|
||||||
echo "The package managers can't be found."
|
echo "The package managers can't be found."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
@@ -41,6 +42,10 @@ if [ ! -z "$BREW" ]; then
|
|||||||
$BREW install libvirt || true
|
$BREW install libvirt || true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ ! -z "$PACMAN" ]; then
|
||||||
|
$sudo_command $PACMAN -S --noconfirm libvirt augeas libpcap
|
||||||
|
fi
|
||||||
|
|
||||||
if [ $travis -eq 0 ]; then
|
if [ $travis -eq 0 ]; then
|
||||||
if [ ! -z "$YUM" ]; then
|
if [ ! -z "$YUM" ]; then
|
||||||
# some go dependencies are stored in mercurial
|
# some go dependencies are stored in mercurial
|
||||||
@@ -54,6 +59,9 @@ if [ $travis -eq 0 ]; then
|
|||||||
$sudo_command $APT install -y golang-golang-x-tools || true
|
$sudo_command $APT install -y golang-golang-x-tools || true
|
||||||
$sudo_command $APT install -y golang-go.tools || true
|
$sudo_command $APT install -y golang-go.tools || true
|
||||||
fi
|
fi
|
||||||
|
if [ ! -z "$PACMAN" ]; then
|
||||||
|
$sudo_command $PACMAN -S --noconfirm go
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# if golang is too old, we don't want to fail with an obscure error later
|
# if golang is too old, we don't want to fail with an obscure error later
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"math"
|
"math"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -164,8 +165,30 @@ func (g *Graph) Process(v *Vertex) error {
|
|||||||
if g.Flags.Debug {
|
if g.Flags.Debug {
|
||||||
log.Printf("%s[%s]: Process()", obj.Kind(), obj.GetName())
|
log.Printf("%s[%s]: Process()", obj.Kind(), obj.GetName())
|
||||||
}
|
}
|
||||||
|
// FIXME: should these SetState methods be here or after the sema code?
|
||||||
defer obj.SetState(resources.ResStateNil) // reset state when finished
|
defer obj.SetState(resources.ResStateNil) // reset state when finished
|
||||||
obj.SetState(resources.ResStateProcess)
|
obj.SetState(resources.ResStateProcess)
|
||||||
|
|
||||||
|
// semaphores!
|
||||||
|
// These shouldn't ever block an exit, since the graph should eventually
|
||||||
|
// converge causing their them to unlock. More interestingly, since they
|
||||||
|
// run in a DAG alphabetically, there is no way to permanently deadlock,
|
||||||
|
// assuming that resources individually don't ever block from finishing!
|
||||||
|
// The exception is that semaphores with a zero count will always block!
|
||||||
|
// TODO: Add a close mechanism to close/unblock zero count semaphores...
|
||||||
|
semas := obj.Meta().Sema
|
||||||
|
if g.Flags.Debug && len(semas) > 0 {
|
||||||
|
log.Printf("%s[%s]: Sema: P(%s)", obj.Kind(), obj.GetName(), strings.Join(semas, ", "))
|
||||||
|
}
|
||||||
|
if err := g.SemaLock(semas); err != nil { // lock
|
||||||
|
// NOTE: in practice, this might not ever be truly necessary...
|
||||||
|
return fmt.Errorf("shutdown of semaphores")
|
||||||
|
}
|
||||||
|
defer g.SemaUnlock(semas) // unlock
|
||||||
|
if g.Flags.Debug && len(semas) > 0 {
|
||||||
|
defer log.Printf("%s[%s]: Sema: V(%s)", obj.Kind(), obj.GetName(), strings.Join(semas, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
var ok = true
|
var ok = true
|
||||||
var applied = false // did we run an apply?
|
var applied = false // did we run an apply?
|
||||||
// is it okay to run dependency wise right now?
|
// is it okay to run dependency wise right now?
|
||||||
@@ -224,11 +247,9 @@ func (g *Graph) Process(v *Vertex) error {
|
|||||||
// if this fails, don't UpdateTimestamp()
|
// if this fails, don't UpdateTimestamp()
|
||||||
checkOK, err = obj.CheckApply(!noop)
|
checkOK, err = obj.CheckApply(!noop)
|
||||||
|
|
||||||
if obj.Prometheus() != nil {
|
if promErr := obj.Prometheus().UpdateCheckApplyTotal(obj.Kind(), !noop, !checkOK, err != nil); promErr != nil {
|
||||||
if promErr := obj.Prometheus().UpdateCheckApplyTotal(obj.Kind(), !noop, !checkOK, err != nil); promErr != nil {
|
// TODO: how to error correctly
|
||||||
// TODO: how to error correctly
|
log.Printf("%s[%s]: Prometheus.UpdateCheckApplyTotal() errored: %v", v.Kind(), v.GetName(), err)
|
||||||
log.Printf("%s[%s]: Prometheus.UpdateCheckApplyTotal() errored: %v", v.Kind(), v.GetName(), err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// TODO: Can the `Poll` converged timeout tracking be a
|
// TODO: Can the `Poll` converged timeout tracking be a
|
||||||
// more general method for all converged timeouts? this
|
// more general method for all converged timeouts? this
|
||||||
@@ -307,6 +328,7 @@ func (obj *SentinelErr) Error() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// innerWorker is the CheckApply runner that reads from processChan.
|
// innerWorker is the CheckApply runner that reads from processChan.
|
||||||
|
// TODO: would it be better if this was a method on BaseRes that took in *Graph?
|
||||||
func (g *Graph) innerWorker(v *Vertex) {
|
func (g *Graph) innerWorker(v *Vertex) {
|
||||||
obj := v.Res
|
obj := v.Res
|
||||||
running := false
|
running := false
|
||||||
@@ -344,6 +366,7 @@ Loop:
|
|||||||
log.Printf("%s[%s]: Skipped event!", v.Kind(), v.GetName())
|
log.Printf("%s[%s]: Skipped event!", v.Kind(), v.GetName())
|
||||||
}
|
}
|
||||||
ev.ACK() // ready for next message
|
ev.ACK() // ready for next message
|
||||||
|
v.Res.QuiesceGroup().Done()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -355,6 +378,7 @@ Loop:
|
|||||||
}
|
}
|
||||||
playback = true
|
playback = true
|
||||||
ev.ACK() // ready for next message
|
ev.ACK() // ready for next message
|
||||||
|
v.Res.QuiesceGroup().Done()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -363,6 +387,7 @@ Loop:
|
|||||||
e := fmt.Errorf("%s[%s]: Permanently limited (rate != Inf, burst: 0)", v.Kind(), v.GetName())
|
e := fmt.Errorf("%s[%s]: Permanently limited (rate != Inf, burst: 0)", v.Kind(), v.GetName())
|
||||||
v.SendEvent(event.EventExit, &SentinelErr{e})
|
v.SendEvent(event.EventExit, &SentinelErr{e})
|
||||||
ev.ACK() // ready for next message
|
ev.ACK() // ready for next message
|
||||||
|
v.Res.QuiesceGroup().Done()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -382,6 +407,7 @@ Loop:
|
|||||||
timer.Reset(d)
|
timer.Reset(d)
|
||||||
waiting = true // waiting for retry timer
|
waiting = true // waiting for retry timer
|
||||||
ev.ACK()
|
ev.ACK()
|
||||||
|
v.Res.QuiesceGroup().Done()
|
||||||
continue
|
continue
|
||||||
} // otherwise, we run directly!
|
} // otherwise, we run directly!
|
||||||
}
|
}
|
||||||
@@ -398,6 +424,7 @@ Loop:
|
|||||||
if retry == 0 {
|
if retry == 0 {
|
||||||
// wrap the error in the sentinel
|
// wrap the error in the sentinel
|
||||||
v.SendEvent(event.EventExit, &SentinelErr{e})
|
v.SendEvent(event.EventExit, &SentinelErr{e})
|
||||||
|
v.Res.QuiesceGroup().Done()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if retry > 0 { // don't decrement the -1
|
if retry > 0 { // don't decrement the -1
|
||||||
@@ -407,6 +434,8 @@ Loop:
|
|||||||
// start the timer...
|
// start the timer...
|
||||||
timer.Reset(delay)
|
timer.Reset(delay)
|
||||||
waiting = true // waiting for retry timer
|
waiting = true // waiting for retry timer
|
||||||
|
// don't v.Res.QuiesceGroup().Done() b/c
|
||||||
|
// the timer is running and it can exit!
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
retry = v.Meta().Retry // reset on success
|
retry = v.Meta().Retry // reset on success
|
||||||
@@ -436,15 +465,23 @@ Loop:
|
|||||||
done = make(chan struct{}) // reset
|
done = make(chan struct{}) // reset
|
||||||
// re-send this event, to trigger a CheckApply()
|
// re-send this event, to trigger a CheckApply()
|
||||||
if playback {
|
if playback {
|
||||||
playback = false
|
|
||||||
// this lock avoids us sending to
|
// this lock avoids us sending to
|
||||||
// channel after we've closed it!
|
// channel after we've closed it!
|
||||||
// TODO: can this experience indefinite postponement ?
|
// TODO: can this experience indefinite postponement ?
|
||||||
// see: https://github.com/golang/go/issues/11506
|
// see: https://github.com/golang/go/issues/11506
|
||||||
go obj.Event() // replay a new event
|
// pause or exit is in process if not quiescing!
|
||||||
|
if !v.Res.IsQuiescing() {
|
||||||
|
playback = false
|
||||||
|
v.Res.QuiesceGroup().Add(1) // lock around it, b/c still running...
|
||||||
|
go func() {
|
||||||
|
obj.Event() // replay a new event
|
||||||
|
v.Res.QuiesceGroup().Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
running = false
|
running = false
|
||||||
pcuid.SetConverged(true) // "unblock" Process
|
pcuid.SetConverged(true) // "unblock" Process
|
||||||
|
v.Res.QuiesceGroup().Done()
|
||||||
|
|
||||||
case <-wcuid.ConvergedTimer():
|
case <-wcuid.ConvergedTimer():
|
||||||
wcuid.SetConverged(true) // converged!
|
wcuid.SetConverged(true) // converged!
|
||||||
@@ -615,9 +652,16 @@ func (g *Graph) Start(first bool) { // start or continue
|
|||||||
log.Printf("State: %v -> %v", g.setState(graphStateStarting), g.getState())
|
log.Printf("State: %v -> %v", g.setState(graphStateStarting), g.getState())
|
||||||
defer log.Printf("State: %v -> %v", g.setState(graphStateStarted), g.getState())
|
defer log.Printf("State: %v -> %v", g.setState(graphStateStarted), g.getState())
|
||||||
t, _ := g.TopologicalSort()
|
t, _ := g.TopologicalSort()
|
||||||
// TODO: only calculate indegree if `first` is true to save resources
|
|
||||||
indegree := g.InDegree() // compute all of the indegree's
|
indegree := g.InDegree() // compute all of the indegree's
|
||||||
for _, v := range Reverse(t) {
|
reversed := Reverse(t)
|
||||||
|
for _, v := range reversed { // run the Setup() for everyone first
|
||||||
|
if !v.Res.IsWorking() { // if Worker() is not running...
|
||||||
|
v.Res.Setup() // initialize some vars in the resource
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// run through the topological reverse, and start or unpause each vertex
|
||||||
|
for _, v := range reversed {
|
||||||
// selective poke: here we reduce the number of initial pokes
|
// selective poke: here we reduce the number of initial pokes
|
||||||
// to the minimum required to activate every vertex in the
|
// to the minimum required to activate every vertex in the
|
||||||
// graph, either by direct action, or by getting poked by a
|
// graph, either by direct action, or by getting poked by a
|
||||||
@@ -632,10 +676,17 @@ func (g *Graph) Start(first bool) { // start or continue
|
|||||||
// and not just selectively the subset with no indegree.
|
// and not just selectively the subset with no indegree.
|
||||||
|
|
||||||
// let the startup code know to poke or not
|
// let the startup code know to poke or not
|
||||||
v.Res.Starter((!first) || indegree[v] == 0)
|
// this triggers a CheckApply AFTER Watch is Running()
|
||||||
|
// We *don't* need to also do this to new nodes or nodes that
|
||||||
|
// are about to get unpaused, because they'll get poked by one
|
||||||
|
// of the indegree == 0 vertices, and an important aspect of the
|
||||||
|
// Process() function is that even if the state is correct, it
|
||||||
|
// will pass through the Poke so that it flows through the DAG.
|
||||||
|
v.Res.Starter(indegree[v] == 0)
|
||||||
|
|
||||||
|
var unpause = true
|
||||||
if !v.Res.IsWorking() { // if Worker() is not running...
|
if !v.Res.IsWorking() { // if Worker() is not running...
|
||||||
v.Res.Setup()
|
unpause = false // doesn't need unpausing on first start
|
||||||
g.wg.Add(1)
|
g.wg.Add(1)
|
||||||
// must pass in value to avoid races...
|
// must pass in value to avoid races...
|
||||||
// see: https://ttboj.wordpress.com/2015/07/27/golang-parallelism-issues-causing-too-many-open-files-error/
|
// see: https://ttboj.wordpress.com/2015/07/27/golang-parallelism-issues-causing-too-many-open-files-error/
|
||||||
@@ -660,7 +711,7 @@ func (g *Graph) Start(first bool) { // start or continue
|
|||||||
// if the resource Init() fails, we don't hang!
|
// if the resource Init() fails, we don't hang!
|
||||||
}
|
}
|
||||||
|
|
||||||
if !first { // unpause!
|
if unpause { // unpause (if needed)
|
||||||
v.Res.SendEvent(event.EventStart, nil) // sync!
|
v.Res.SendEvent(event.EventStart, nil) // sync!
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
486
pgraph/autogroup_test.go
Normal file
486
pgraph/autogroup_test.go
Normal file
@@ -0,0 +1,486 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package pgraph
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// all of the following test cases are laid out with the following semantics:
|
||||||
|
// * vertices which start with the same single letter are considered "like"
|
||||||
|
// * "like" elements should be merged
|
||||||
|
// * vertices can have any integer after their single letter "family" type
|
||||||
|
// * grouped vertices should have a name with a comma separated list of names
|
||||||
|
// * edges follow the same conventions about grouping
|
||||||
|
|
||||||
|
// empty graph
|
||||||
|
func TestPgraphGrouping1(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// single vertex
|
||||||
|
func TestPgraphGrouping2(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{ // grouping to limit variable scope
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
g1.AddVertex(a1)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
g2.AddVertex(a1)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// two vertices
|
||||||
|
func TestPgraphGrouping3(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
g1.AddVertex(a1, b1)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
g2.AddVertex(a1, b1)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// two vertices merge
|
||||||
|
func TestPgraphGrouping4(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
g1.AddVertex(a1, a2)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a := NewVertex(NewNoopResTest("a1,a2"))
|
||||||
|
g2.AddVertex(a)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// three vertices merge
|
||||||
|
func TestPgraphGrouping5(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
a3 := NewVertex(NewNoopResTest("a3"))
|
||||||
|
g1.AddVertex(a1, a2, a3)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a := NewVertex(NewNoopResTest("a1,a2,a3"))
|
||||||
|
g2.AddVertex(a)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// three vertices, two merge
|
||||||
|
func TestPgraphGrouping6(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
g1.AddVertex(a1, a2, b1)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a := NewVertex(NewNoopResTest("a1,a2"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
g2.AddVertex(a, b1)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// four vertices, three merge
|
||||||
|
func TestPgraphGrouping7(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
a3 := NewVertex(NewNoopResTest("a3"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
g1.AddVertex(a1, a2, a3, b1)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a := NewVertex(NewNoopResTest("a1,a2,a3"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
g2.AddVertex(a, b1)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// four vertices, two&two merge
|
||||||
|
func TestPgraphGrouping8(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
b2 := NewVertex(NewNoopResTest("b2"))
|
||||||
|
g1.AddVertex(a1, a2, b1, b2)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a := NewVertex(NewNoopResTest("a1,a2"))
|
||||||
|
b := NewVertex(NewNoopResTest("b1,b2"))
|
||||||
|
g2.AddVertex(a, b)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// five vertices, two&three merge
|
||||||
|
func TestPgraphGrouping9(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
b2 := NewVertex(NewNoopResTest("b2"))
|
||||||
|
b3 := NewVertex(NewNoopResTest("b3"))
|
||||||
|
g1.AddVertex(a1, a2, b1, b2, b3)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a := NewVertex(NewNoopResTest("a1,a2"))
|
||||||
|
b := NewVertex(NewNoopResTest("b1,b2,b3"))
|
||||||
|
g2.AddVertex(a, b)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// three unique vertices
|
||||||
|
func TestPgraphGrouping10(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
c1 := NewVertex(NewNoopResTest("c1"))
|
||||||
|
g1.AddVertex(a1, b1, c1)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
c1 := NewVertex(NewNoopResTest("c1"))
|
||||||
|
g2.AddVertex(a1, b1, c1)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// three unique vertices, two merge
|
||||||
|
func TestPgraphGrouping11(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
b2 := NewVertex(NewNoopResTest("b2"))
|
||||||
|
c1 := NewVertex(NewNoopResTest("c1"))
|
||||||
|
g1.AddVertex(a1, b1, b2, c1)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
b := NewVertex(NewNoopResTest("b1,b2"))
|
||||||
|
c1 := NewVertex(NewNoopResTest("c1"))
|
||||||
|
g2.AddVertex(a1, b, c1)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// simple merge 1
|
||||||
|
// a1 a2 a1,a2
|
||||||
|
// \ / >>> | (arrows point downwards)
|
||||||
|
// b b
|
||||||
|
func TestPgraphGrouping12(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
e1 := NewEdge("e1")
|
||||||
|
e2 := NewEdge("e2")
|
||||||
|
g1.AddEdge(a1, b1, e1)
|
||||||
|
g1.AddEdge(a2, b1, e2)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a := NewVertex(NewNoopResTest("a1,a2"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
e := NewEdge("e1,e2")
|
||||||
|
g2.AddEdge(a, b1, e)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// simple merge 2
|
||||||
|
// b b
|
||||||
|
// / \ >>> | (arrows point downwards)
|
||||||
|
// a1 a2 a1,a2
|
||||||
|
func TestPgraphGrouping13(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
e1 := NewEdge("e1")
|
||||||
|
e2 := NewEdge("e2")
|
||||||
|
g1.AddEdge(b1, a1, e1)
|
||||||
|
g1.AddEdge(b1, a2, e2)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a := NewVertex(NewNoopResTest("a1,a2"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
e := NewEdge("e1,e2")
|
||||||
|
g2.AddEdge(b1, a, e)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// triple merge
|
||||||
|
// a1 a2 a3 a1,a2,a3
|
||||||
|
// \ | / >>> | (arrows point downwards)
|
||||||
|
// b b
|
||||||
|
func TestPgraphGrouping14(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
a3 := NewVertex(NewNoopResTest("a3"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
e1 := NewEdge("e1")
|
||||||
|
e2 := NewEdge("e2")
|
||||||
|
e3 := NewEdge("e3")
|
||||||
|
g1.AddEdge(a1, b1, e1)
|
||||||
|
g1.AddEdge(a2, b1, e2)
|
||||||
|
g1.AddEdge(a3, b1, e3)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a := NewVertex(NewNoopResTest("a1,a2,a3"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
e := NewEdge("e1,e2,e3")
|
||||||
|
g2.AddEdge(a, b1, e)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// chain merge
|
||||||
|
// a1 a1
|
||||||
|
// / \ |
|
||||||
|
// b1 b2 >>> b1,b2 (arrows point downwards)
|
||||||
|
// \ / |
|
||||||
|
// c1 c1
|
||||||
|
func TestPgraphGrouping15(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
b2 := NewVertex(NewNoopResTest("b2"))
|
||||||
|
c1 := NewVertex(NewNoopResTest("c1"))
|
||||||
|
e1 := NewEdge("e1")
|
||||||
|
e2 := NewEdge("e2")
|
||||||
|
e3 := NewEdge("e3")
|
||||||
|
e4 := NewEdge("e4")
|
||||||
|
g1.AddEdge(a1, b1, e1)
|
||||||
|
g1.AddEdge(a1, b2, e2)
|
||||||
|
g1.AddEdge(b1, c1, e3)
|
||||||
|
g1.AddEdge(b2, c1, e4)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
b := NewVertex(NewNoopResTest("b1,b2"))
|
||||||
|
c1 := NewVertex(NewNoopResTest("c1"))
|
||||||
|
e1 := NewEdge("e1,e2")
|
||||||
|
e2 := NewEdge("e3,e4")
|
||||||
|
g2.AddEdge(a1, b, e1)
|
||||||
|
g2.AddEdge(b, c1, e2)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// re-attach 1 (outer)
|
||||||
|
// technically the second possibility is valid too, depending on which order we
|
||||||
|
// merge edges in, and if we don't filter out any unnecessary edges afterwards!
|
||||||
|
// a1 a2 a1,a2 a1,a2
|
||||||
|
// | / | | \
|
||||||
|
// b1 / >>> b1 OR b1 / (arrows point downwards)
|
||||||
|
// | / | | /
|
||||||
|
// c1 c1 c1
|
||||||
|
func TestPgraphGrouping16(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
c1 := NewVertex(NewNoopResTest("c1"))
|
||||||
|
e1 := NewEdge("e1")
|
||||||
|
e2 := NewEdge("e2")
|
||||||
|
e3 := NewEdge("e3")
|
||||||
|
g1.AddEdge(a1, b1, e1)
|
||||||
|
g1.AddEdge(b1, c1, e2)
|
||||||
|
g1.AddEdge(a2, c1, e3)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a := NewVertex(NewNoopResTest("a1,a2"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
c1 := NewVertex(NewNoopResTest("c1"))
|
||||||
|
e1 := NewEdge("e1,e3")
|
||||||
|
e2 := NewEdge("e2,e3") // e3 gets "merged through" to BOTH edges!
|
||||||
|
g2.AddEdge(a, b1, e1)
|
||||||
|
g2.AddEdge(b1, c1, e2)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// re-attach 2 (inner)
|
||||||
|
// a1 b2 a1
|
||||||
|
// | / |
|
||||||
|
// b1 / >>> b1,b2 (arrows point downwards)
|
||||||
|
// | / |
|
||||||
|
// c1 c1
|
||||||
|
func TestPgraphGrouping17(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
b2 := NewVertex(NewNoopResTest("b2"))
|
||||||
|
c1 := NewVertex(NewNoopResTest("c1"))
|
||||||
|
e1 := NewEdge("e1")
|
||||||
|
e2 := NewEdge("e2")
|
||||||
|
e3 := NewEdge("e3")
|
||||||
|
g1.AddEdge(a1, b1, e1)
|
||||||
|
g1.AddEdge(b1, c1, e2)
|
||||||
|
g1.AddEdge(b2, c1, e3)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
b := NewVertex(NewNoopResTest("b1,b2"))
|
||||||
|
c1 := NewVertex(NewNoopResTest("c1"))
|
||||||
|
e1 := NewEdge("e1")
|
||||||
|
e2 := NewEdge("e2,e3")
|
||||||
|
g2.AddEdge(a1, b, e1)
|
||||||
|
g2.AddEdge(b, c1, e2)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// re-attach 3 (double)
|
||||||
|
// similar to "re-attach 1", technically there is a second possibility for this
|
||||||
|
// a2 a1 b2 a1,a2
|
||||||
|
// \ | / |
|
||||||
|
// \ b1 / >>> b1,b2 (arrows point downwards)
|
||||||
|
// \ | / |
|
||||||
|
// c1 c1
|
||||||
|
func TestPgraphGrouping18(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
b1 := NewVertex(NewNoopResTest("b1"))
|
||||||
|
b2 := NewVertex(NewNoopResTest("b2"))
|
||||||
|
c1 := NewVertex(NewNoopResTest("c1"))
|
||||||
|
e1 := NewEdge("e1")
|
||||||
|
e2 := NewEdge("e2")
|
||||||
|
e3 := NewEdge("e3")
|
||||||
|
e4 := NewEdge("e4")
|
||||||
|
g1.AddEdge(a1, b1, e1)
|
||||||
|
g1.AddEdge(b1, c1, e2)
|
||||||
|
g1.AddEdge(a2, c1, e3)
|
||||||
|
g1.AddEdge(b2, c1, e4)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a := NewVertex(NewNoopResTest("a1,a2"))
|
||||||
|
b := NewVertex(NewNoopResTest("b1,b2"))
|
||||||
|
c1 := NewVertex(NewNoopResTest("c1"))
|
||||||
|
e1 := NewEdge("e1,e3")
|
||||||
|
e2 := NewEdge("e2,e3,e4") // e3 gets "merged through" to BOTH edges!
|
||||||
|
g2.AddEdge(a, b, e1)
|
||||||
|
g2.AddEdge(b, c1, e2)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// connected merge 0, (no change!)
|
||||||
|
// a1 a1
|
||||||
|
// \ >>> \ (arrows point downwards)
|
||||||
|
// a2 a2
|
||||||
|
func TestPgraphGroupingConnected0(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
e1 := NewEdge("e1")
|
||||||
|
g1.AddEdge(a1, a2, e1)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result ?
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
e1 := NewEdge("e1")
|
||||||
|
g2.AddEdge(a1, a2, e1)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// connected merge 1, (no change!)
|
||||||
|
// a1 a1
|
||||||
|
// \ \
|
||||||
|
// b >>> b (arrows point downwards)
|
||||||
|
// \ \
|
||||||
|
// a2 a2
|
||||||
|
func TestPgraphGroupingConnected1(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
b := NewVertex(NewNoopResTest("b"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
e1 := NewEdge("e1")
|
||||||
|
e2 := NewEdge("e2")
|
||||||
|
g1.AddEdge(a1, b, e1)
|
||||||
|
g1.AddEdge(b, a2, e2)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result ?
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTest("a1"))
|
||||||
|
b := NewVertex(NewNoopResTest("b"))
|
||||||
|
a2 := NewVertex(NewNoopResTest("a2"))
|
||||||
|
e1 := NewEdge("e1")
|
||||||
|
e2 := NewEdge("e2")
|
||||||
|
g2.AddEdge(a1, b, e1)
|
||||||
|
g2.AddEdge(b, a2, e2)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
@@ -15,7 +15,7 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package pgraph
|
package pgraph // TODO: this should be a subpackage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -46,14 +46,14 @@ func (g *Graph) Graphviz() (out string) {
|
|||||||
//out += "\tnode [shape=box];\n"
|
//out += "\tnode [shape=box];\n"
|
||||||
str := ""
|
str := ""
|
||||||
for i := range g.Adjacency { // reverse paths
|
for i := range g.Adjacency { // reverse paths
|
||||||
out += fmt.Sprintf("\t%s [label=\"%s[%s]\"];\n", i.GetName(), i.Kind(), i.GetName())
|
out += fmt.Sprintf("\t\"%s\" [label=\"%s[%s]\"];\n", i.GetName(), i.Kind(), i.GetName())
|
||||||
for j := range g.Adjacency[i] {
|
for j := range g.Adjacency[i] {
|
||||||
k := g.Adjacency[i][j]
|
k := g.Adjacency[i][j]
|
||||||
// use str for clearer output ordering
|
// use str for clearer output ordering
|
||||||
if k.Notify {
|
if k.Notify {
|
||||||
str += fmt.Sprintf("\t%s -> %s [label=%s,style=bold];\n", i.GetName(), j.GetName(), k.Name)
|
str += fmt.Sprintf("\t\"%s\" -> \"%s\" [label=\"%s\",style=bold];\n", i.GetName(), j.GetName(), k.Name)
|
||||||
} else {
|
} else {
|
||||||
str += fmt.Sprintf("\t%s -> %s [label=%s];\n", i.GetName(), j.GetName(), k.Name)
|
str += fmt.Sprintf("\t\"%s\" -> \"%s\" [label=\"%s\"];\n", i.GetName(), j.GetName(), k.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -64,7 +64,7 @@ func (g *Graph) Graphviz() (out string) {
|
|||||||
|
|
||||||
// ExecGraphviz writes out the graphviz data and runs the correct graphviz
|
// ExecGraphviz writes out the graphviz data and runs the correct graphviz
|
||||||
// filter command.
|
// filter command.
|
||||||
func (g *Graph) ExecGraphviz(program, filename string) error {
|
func (g *Graph) ExecGraphviz(program, filename, hostname string) error {
|
||||||
|
|
||||||
switch program {
|
switch program {
|
||||||
case "dot", "neato", "twopi", "circo", "fdp":
|
case "dot", "neato", "twopi", "circo", "fdp":
|
||||||
@@ -76,6 +76,10 @@ func (g *Graph) ExecGraphviz(program, filename string) error {
|
|||||||
return fmt.Errorf("no filename given")
|
return fmt.Errorf("no filename given")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if hostname != "" {
|
||||||
|
filename = fmt.Sprintf("%s@%s", filename, hostname)
|
||||||
|
}
|
||||||
|
|
||||||
// run as a normal user if possible when run with sudo
|
// run as a normal user if possible when run with sudo
|
||||||
uid, err1 := strconv.Atoi(os.Getenv("SUDO_UID"))
|
uid, err1 := strconv.Atoi(os.Getenv("SUDO_UID"))
|
||||||
gid, err2 := strconv.Atoi(os.Getenv("SUDO_GID"))
|
gid, err2 := strconv.Atoi(os.Getenv("SUDO_GID"))
|
||||||
|
|||||||
@@ -24,7 +24,9 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/purpleidea/mgmt/event"
|
"github.com/purpleidea/mgmt/event"
|
||||||
|
"github.com/purpleidea/mgmt/prometheus"
|
||||||
"github.com/purpleidea/mgmt/resources"
|
"github.com/purpleidea/mgmt/resources"
|
||||||
|
"github.com/purpleidea/mgmt/util/semaphore"
|
||||||
|
|
||||||
errwrap "github.com/pkg/errors"
|
errwrap "github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
@@ -58,6 +60,9 @@ type Graph struct {
|
|||||||
state graphState
|
state graphState
|
||||||
mutex *sync.Mutex // used when modifying graph State variable
|
mutex *sync.Mutex // used when modifying graph State variable
|
||||||
wg *sync.WaitGroup
|
wg *sync.WaitGroup
|
||||||
|
semas map[string]*semaphore.Semaphore
|
||||||
|
|
||||||
|
prometheus *prometheus.Prometheus // the prometheus instance
|
||||||
}
|
}
|
||||||
|
|
||||||
// Vertex is the primary vertex struct in this library.
|
// Vertex is the primary vertex struct in this library.
|
||||||
@@ -83,6 +88,7 @@ func NewGraph(name string) *Graph {
|
|||||||
// ptr b/c: Mutex/WaitGroup must not be copied after first use
|
// ptr b/c: Mutex/WaitGroup must not be copied after first use
|
||||||
mutex: &sync.Mutex{},
|
mutex: &sync.Mutex{},
|
||||||
wg: &sync.WaitGroup{},
|
wg: &sync.WaitGroup{},
|
||||||
|
semas: make(map[string]*semaphore.Semaphore),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -119,6 +125,9 @@ func (g *Graph) Copy() *Graph {
|
|||||||
state: g.state,
|
state: g.state,
|
||||||
mutex: g.mutex,
|
mutex: g.mutex,
|
||||||
wg: g.wg,
|
wg: g.wg,
|
||||||
|
semas: g.semas,
|
||||||
|
|
||||||
|
prometheus: g.prometheus,
|
||||||
}
|
}
|
||||||
for k, v := range g.Adjacency {
|
for k, v := range g.Adjacency {
|
||||||
newGraph.Adjacency[k] = v // copy
|
newGraph.Adjacency[k] = v // copy
|
||||||
@@ -645,8 +654,11 @@ func (g *Graph) GraphMetas() []*resources.MetaParams {
|
|||||||
|
|
||||||
// AssociateData associates some data with the object in the graph in question.
|
// AssociateData associates some data with the object in the graph in question.
|
||||||
func (g *Graph) AssociateData(data *resources.Data) {
|
func (g *Graph) AssociateData(data *resources.Data) {
|
||||||
|
// prometheus needs to be associated to this graph as well
|
||||||
|
g.prometheus = data.Prometheus
|
||||||
|
|
||||||
for k := range g.Adjacency {
|
for k := range g.Adjacency {
|
||||||
k.Res.AssociateData(data)
|
*k.Res.Data() = *data
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -718,7 +718,7 @@ Loop:
|
|||||||
continue Loop
|
continue Loop
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return fmt.Errorf("fraph g1, has no match in g2 for: %v", v1.GetName())
|
return fmt.Errorf("graph g1, has no match in g2 for: %v", v1.GetName())
|
||||||
}
|
}
|
||||||
// vertices (and groups) match :)
|
// vertices (and groups) match :)
|
||||||
|
|
||||||
@@ -764,6 +764,18 @@ Loop:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// check meta parameters
|
||||||
|
for v1 := range g1.Adjacency { // for each vertex in g1
|
||||||
|
for v2 := range g2.Adjacency { // does it match in g2 ?
|
||||||
|
s1, s2 := v1.Meta().Sema, v2.Meta().Sema
|
||||||
|
sort.Strings(s1)
|
||||||
|
sort.Strings(s2)
|
||||||
|
if !reflect.DeepEqual(s1, s2) {
|
||||||
|
return fmt.Errorf("vertex %s and vertex %s have different semaphores", v1.GetName(), v2.GetName())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil // success!
|
return nil // success!
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -805,7 +817,11 @@ func (ag *testGrouper) edgeMerge(e1, e2 *Edge) *Edge {
|
|||||||
func (g *Graph) fullPrint() (str string) {
|
func (g *Graph) fullPrint() (str string) {
|
||||||
str += "\n"
|
str += "\n"
|
||||||
for v := range g.Adjacency {
|
for v := range g.Adjacency {
|
||||||
str += fmt.Sprintf("* v: %v\n", v.GetName())
|
if semas := v.Meta().Sema; len(semas) > 0 {
|
||||||
|
str += fmt.Sprintf("* v: %v; sema: %v\n", v.GetName(), semas)
|
||||||
|
} else {
|
||||||
|
str += fmt.Sprintf("* v: %v\n", v.GetName())
|
||||||
|
}
|
||||||
// TODO: add explicit grouping data?
|
// TODO: add explicit grouping data?
|
||||||
}
|
}
|
||||||
for v1 := range g.Adjacency {
|
for v1 := range g.Adjacency {
|
||||||
@@ -831,470 +847,6 @@ func runGraphCmp(t *testing.T, g1, g2 *Graph) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// all of the following test cases are laid out with the following semantics:
|
|
||||||
// * vertices which start with the same single letter are considered "like"
|
|
||||||
// * "like" elements should be merged
|
|
||||||
// * vertices can have any integer after their single letter "family" type
|
|
||||||
// * grouped vertices should have a name with a comma separated list of names
|
|
||||||
// * edges follow the same conventions about grouping
|
|
||||||
|
|
||||||
// empty graph
|
|
||||||
func TestPgraphGrouping1(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// single vertex
|
|
||||||
func TestPgraphGrouping2(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{ // grouping to limit variable scope
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
g1.AddVertex(a1)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
g2.AddVertex(a1)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// two vertices
|
|
||||||
func TestPgraphGrouping3(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
g1.AddVertex(a1, b1)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
g2.AddVertex(a1, b1)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// two vertices merge
|
|
||||||
func TestPgraphGrouping4(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
g1.AddVertex(a1, a2)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a := NewVertex(NewNoopResTest("a1,a2"))
|
|
||||||
g2.AddVertex(a)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// three vertices merge
|
|
||||||
func TestPgraphGrouping5(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
a3 := NewVertex(NewNoopResTest("a3"))
|
|
||||||
g1.AddVertex(a1, a2, a3)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a := NewVertex(NewNoopResTest("a1,a2,a3"))
|
|
||||||
g2.AddVertex(a)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// three vertices, two merge
|
|
||||||
func TestPgraphGrouping6(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
g1.AddVertex(a1, a2, b1)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a := NewVertex(NewNoopResTest("a1,a2"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
g2.AddVertex(a, b1)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// four vertices, three merge
|
|
||||||
func TestPgraphGrouping7(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
a3 := NewVertex(NewNoopResTest("a3"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
g1.AddVertex(a1, a2, a3, b1)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a := NewVertex(NewNoopResTest("a1,a2,a3"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
g2.AddVertex(a, b1)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// four vertices, two&two merge
|
|
||||||
func TestPgraphGrouping8(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
b2 := NewVertex(NewNoopResTest("b2"))
|
|
||||||
g1.AddVertex(a1, a2, b1, b2)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a := NewVertex(NewNoopResTest("a1,a2"))
|
|
||||||
b := NewVertex(NewNoopResTest("b1,b2"))
|
|
||||||
g2.AddVertex(a, b)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// five vertices, two&three merge
|
|
||||||
func TestPgraphGrouping9(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
b2 := NewVertex(NewNoopResTest("b2"))
|
|
||||||
b3 := NewVertex(NewNoopResTest("b3"))
|
|
||||||
g1.AddVertex(a1, a2, b1, b2, b3)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a := NewVertex(NewNoopResTest("a1,a2"))
|
|
||||||
b := NewVertex(NewNoopResTest("b1,b2,b3"))
|
|
||||||
g2.AddVertex(a, b)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// three unique vertices
|
|
||||||
func TestPgraphGrouping10(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
c1 := NewVertex(NewNoopResTest("c1"))
|
|
||||||
g1.AddVertex(a1, b1, c1)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
c1 := NewVertex(NewNoopResTest("c1"))
|
|
||||||
g2.AddVertex(a1, b1, c1)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// three unique vertices, two merge
|
|
||||||
func TestPgraphGrouping11(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
b2 := NewVertex(NewNoopResTest("b2"))
|
|
||||||
c1 := NewVertex(NewNoopResTest("c1"))
|
|
||||||
g1.AddVertex(a1, b1, b2, c1)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
b := NewVertex(NewNoopResTest("b1,b2"))
|
|
||||||
c1 := NewVertex(NewNoopResTest("c1"))
|
|
||||||
g2.AddVertex(a1, b, c1)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// simple merge 1
|
|
||||||
// a1 a2 a1,a2
|
|
||||||
// \ / >>> | (arrows point downwards)
|
|
||||||
// b b
|
|
||||||
func TestPgraphGrouping12(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
e1 := NewEdge("e1")
|
|
||||||
e2 := NewEdge("e2")
|
|
||||||
g1.AddEdge(a1, b1, e1)
|
|
||||||
g1.AddEdge(a2, b1, e2)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a := NewVertex(NewNoopResTest("a1,a2"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
e := NewEdge("e1,e2")
|
|
||||||
g2.AddEdge(a, b1, e)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// simple merge 2
|
|
||||||
// b b
|
|
||||||
// / \ >>> | (arrows point downwards)
|
|
||||||
// a1 a2 a1,a2
|
|
||||||
func TestPgraphGrouping13(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
e1 := NewEdge("e1")
|
|
||||||
e2 := NewEdge("e2")
|
|
||||||
g1.AddEdge(b1, a1, e1)
|
|
||||||
g1.AddEdge(b1, a2, e2)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a := NewVertex(NewNoopResTest("a1,a2"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
e := NewEdge("e1,e2")
|
|
||||||
g2.AddEdge(b1, a, e)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// triple merge
|
|
||||||
// a1 a2 a3 a1,a2,a3
|
|
||||||
// \ | / >>> | (arrows point downwards)
|
|
||||||
// b b
|
|
||||||
func TestPgraphGrouping14(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
a3 := NewVertex(NewNoopResTest("a3"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
e1 := NewEdge("e1")
|
|
||||||
e2 := NewEdge("e2")
|
|
||||||
e3 := NewEdge("e3")
|
|
||||||
g1.AddEdge(a1, b1, e1)
|
|
||||||
g1.AddEdge(a2, b1, e2)
|
|
||||||
g1.AddEdge(a3, b1, e3)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a := NewVertex(NewNoopResTest("a1,a2,a3"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
e := NewEdge("e1,e2,e3")
|
|
||||||
g2.AddEdge(a, b1, e)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// chain merge
|
|
||||||
// a1 a1
|
|
||||||
// / \ |
|
|
||||||
// b1 b2 >>> b1,b2 (arrows point downwards)
|
|
||||||
// \ / |
|
|
||||||
// c1 c1
|
|
||||||
func TestPgraphGrouping15(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
b2 := NewVertex(NewNoopResTest("b2"))
|
|
||||||
c1 := NewVertex(NewNoopResTest("c1"))
|
|
||||||
e1 := NewEdge("e1")
|
|
||||||
e2 := NewEdge("e2")
|
|
||||||
e3 := NewEdge("e3")
|
|
||||||
e4 := NewEdge("e4")
|
|
||||||
g1.AddEdge(a1, b1, e1)
|
|
||||||
g1.AddEdge(a1, b2, e2)
|
|
||||||
g1.AddEdge(b1, c1, e3)
|
|
||||||
g1.AddEdge(b2, c1, e4)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
b := NewVertex(NewNoopResTest("b1,b2"))
|
|
||||||
c1 := NewVertex(NewNoopResTest("c1"))
|
|
||||||
e1 := NewEdge("e1,e2")
|
|
||||||
e2 := NewEdge("e3,e4")
|
|
||||||
g2.AddEdge(a1, b, e1)
|
|
||||||
g2.AddEdge(b, c1, e2)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// re-attach 1 (outer)
|
|
||||||
// technically the second possibility is valid too, depending on which order we
|
|
||||||
// merge edges in, and if we don't filter out any unnecessary edges afterwards!
|
|
||||||
// a1 a2 a1,a2 a1,a2
|
|
||||||
// | / | | \
|
|
||||||
// b1 / >>> b1 OR b1 / (arrows point downwards)
|
|
||||||
// | / | | /
|
|
||||||
// c1 c1 c1
|
|
||||||
func TestPgraphGrouping16(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
c1 := NewVertex(NewNoopResTest("c1"))
|
|
||||||
e1 := NewEdge("e1")
|
|
||||||
e2 := NewEdge("e2")
|
|
||||||
e3 := NewEdge("e3")
|
|
||||||
g1.AddEdge(a1, b1, e1)
|
|
||||||
g1.AddEdge(b1, c1, e2)
|
|
||||||
g1.AddEdge(a2, c1, e3)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a := NewVertex(NewNoopResTest("a1,a2"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
c1 := NewVertex(NewNoopResTest("c1"))
|
|
||||||
e1 := NewEdge("e1,e3")
|
|
||||||
e2 := NewEdge("e2,e3") // e3 gets "merged through" to BOTH edges!
|
|
||||||
g2.AddEdge(a, b1, e1)
|
|
||||||
g2.AddEdge(b1, c1, e2)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// re-attach 2 (inner)
|
|
||||||
// a1 b2 a1
|
|
||||||
// | / |
|
|
||||||
// b1 / >>> b1,b2 (arrows point downwards)
|
|
||||||
// | / |
|
|
||||||
// c1 c1
|
|
||||||
func TestPgraphGrouping17(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
b2 := NewVertex(NewNoopResTest("b2"))
|
|
||||||
c1 := NewVertex(NewNoopResTest("c1"))
|
|
||||||
e1 := NewEdge("e1")
|
|
||||||
e2 := NewEdge("e2")
|
|
||||||
e3 := NewEdge("e3")
|
|
||||||
g1.AddEdge(a1, b1, e1)
|
|
||||||
g1.AddEdge(b1, c1, e2)
|
|
||||||
g1.AddEdge(b2, c1, e3)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
b := NewVertex(NewNoopResTest("b1,b2"))
|
|
||||||
c1 := NewVertex(NewNoopResTest("c1"))
|
|
||||||
e1 := NewEdge("e1")
|
|
||||||
e2 := NewEdge("e2,e3")
|
|
||||||
g2.AddEdge(a1, b, e1)
|
|
||||||
g2.AddEdge(b, c1, e2)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// re-attach 3 (double)
|
|
||||||
// similar to "re-attach 1", technically there is a second possibility for this
|
|
||||||
// a2 a1 b2 a1,a2
|
|
||||||
// \ | / |
|
|
||||||
// \ b1 / >>> b1,b2 (arrows point downwards)
|
|
||||||
// \ | / |
|
|
||||||
// c1 c1
|
|
||||||
func TestPgraphGrouping18(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
b1 := NewVertex(NewNoopResTest("b1"))
|
|
||||||
b2 := NewVertex(NewNoopResTest("b2"))
|
|
||||||
c1 := NewVertex(NewNoopResTest("c1"))
|
|
||||||
e1 := NewEdge("e1")
|
|
||||||
e2 := NewEdge("e2")
|
|
||||||
e3 := NewEdge("e3")
|
|
||||||
e4 := NewEdge("e4")
|
|
||||||
g1.AddEdge(a1, b1, e1)
|
|
||||||
g1.AddEdge(b1, c1, e2)
|
|
||||||
g1.AddEdge(a2, c1, e3)
|
|
||||||
g1.AddEdge(b2, c1, e4)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result
|
|
||||||
{
|
|
||||||
a := NewVertex(NewNoopResTest("a1,a2"))
|
|
||||||
b := NewVertex(NewNoopResTest("b1,b2"))
|
|
||||||
c1 := NewVertex(NewNoopResTest("c1"))
|
|
||||||
e1 := NewEdge("e1,e3")
|
|
||||||
e2 := NewEdge("e2,e3,e4") // e3 gets "merged through" to BOTH edges!
|
|
||||||
g2.AddEdge(a, b, e1)
|
|
||||||
g2.AddEdge(b, c1, e2)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// connected merge 0, (no change!)
|
|
||||||
// a1 a1
|
|
||||||
// \ >>> \ (arrows point downwards)
|
|
||||||
// a2 a2
|
|
||||||
func TestPgraphGroupingConnected0(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
e1 := NewEdge("e1")
|
|
||||||
g1.AddEdge(a1, a2, e1)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result ?
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
e1 := NewEdge("e1")
|
|
||||||
g2.AddEdge(a1, a2, e1)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// connected merge 1, (no change!)
|
|
||||||
// a1 a1
|
|
||||||
// \ \
|
|
||||||
// b >>> b (arrows point downwards)
|
|
||||||
// \ \
|
|
||||||
// a2 a2
|
|
||||||
func TestPgraphGroupingConnected1(t *testing.T) {
|
|
||||||
g1 := NewGraph("g1") // original graph
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
b := NewVertex(NewNoopResTest("b"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
e1 := NewEdge("e1")
|
|
||||||
e2 := NewEdge("e2")
|
|
||||||
g1.AddEdge(a1, b, e1)
|
|
||||||
g1.AddEdge(b, a2, e2)
|
|
||||||
}
|
|
||||||
g2 := NewGraph("g2") // expected result ?
|
|
||||||
{
|
|
||||||
a1 := NewVertex(NewNoopResTest("a1"))
|
|
||||||
b := NewVertex(NewNoopResTest("b"))
|
|
||||||
a2 := NewVertex(NewNoopResTest("a2"))
|
|
||||||
e1 := NewEdge("e1")
|
|
||||||
e2 := NewEdge("e2")
|
|
||||||
g2.AddEdge(a1, b, e1)
|
|
||||||
g2.AddEdge(b, a2, e2)
|
|
||||||
}
|
|
||||||
runGraphCmp(t, g1, g2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDurationAssumptions(t *testing.T) {
|
func TestDurationAssumptions(t *testing.T) {
|
||||||
var d time.Duration
|
var d time.Duration
|
||||||
if (d == 0) != true {
|
if (d == 0) != true {
|
||||||
|
|||||||
78
pgraph/semaphore.go
Normal file
78
pgraph/semaphore.go
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package pgraph
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/util/semaphore"
|
||||||
|
|
||||||
|
multierr "github.com/hashicorp/go-multierror"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SemaSep is the trailing separator to split the semaphore id from the size.
|
||||||
|
const SemaSep = ":"
|
||||||
|
|
||||||
|
// SemaLock acquires the list of semaphores in the graph.
|
||||||
|
func (g *Graph) SemaLock(semas []string) error {
|
||||||
|
var reterr error
|
||||||
|
sort.Strings(semas) // very important to avoid deadlock in the dag!
|
||||||
|
for _, id := range semas {
|
||||||
|
|
||||||
|
size := 1 // default semaphore size
|
||||||
|
// valid id's include "some_id", "hello:42" and ":13"
|
||||||
|
if index := strings.LastIndex(id, SemaSep); index > -1 && (len(id)-index+len(SemaSep)) >= 1 {
|
||||||
|
// NOTE: we only allow size > 0 here!
|
||||||
|
if i, err := strconv.Atoi(id[index+len(SemaSep):]); err == nil && i > 0 {
|
||||||
|
size = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sema, ok := g.semas[id] // lookup
|
||||||
|
if !ok {
|
||||||
|
g.semas[id] = semaphore.NewSemaphore(size)
|
||||||
|
sema = g.semas[id]
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := sema.P(1); err != nil { // lock!
|
||||||
|
reterr = multierr.Append(reterr, err) // list of errors
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return reterr
|
||||||
|
}
|
||||||
|
|
||||||
|
// SemaUnlock releases the list of semaphores in the graph.
|
||||||
|
func (g *Graph) SemaUnlock(semas []string) error {
|
||||||
|
var reterr error
|
||||||
|
sort.Strings(semas) // unlock in the same order to remove partial locks
|
||||||
|
for _, id := range semas {
|
||||||
|
sema, ok := g.semas[id] // lookup
|
||||||
|
if !ok {
|
||||||
|
// programming error!
|
||||||
|
panic(fmt.Sprintf("graph: sema: %s does not exist", id))
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := sema.V(1); err != nil { // unlock!
|
||||||
|
reterr = multierr.Append(reterr, err) // list of errors
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return reterr
|
||||||
|
}
|
||||||
93
pgraph/semaphore_test.go
Normal file
93
pgraph/semaphore_test.go
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package pgraph
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/purpleidea/mgmt/resources"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewNoopResTestSema(name string, semas []string) *NoopResTest {
|
||||||
|
obj := &NoopResTest{
|
||||||
|
NoopRes: resources.NoopRes{
|
||||||
|
BaseRes: resources.BaseRes{
|
||||||
|
Name: name,
|
||||||
|
MetaParams: resources.MetaParams{
|
||||||
|
AutoGroup: true, // always autogroup
|
||||||
|
Sema: semas,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPgraphSemaphoreGrouping1(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTestSema("a1", []string{"s:1"}))
|
||||||
|
a2 := NewVertex(NewNoopResTestSema("a2", []string{"s:2"}))
|
||||||
|
a3 := NewVertex(NewNoopResTestSema("a3", []string{"s:3"}))
|
||||||
|
g1.AddVertex(a1)
|
||||||
|
g1.AddVertex(a2)
|
||||||
|
g1.AddVertex(a3)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a123 := NewVertex(NewNoopResTestSema("a1,a2,a3", []string{"s:1", "s:2", "s:3"}))
|
||||||
|
g2.AddVertex(a123)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPgraphSemaphoreGrouping2(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTestSema("a1", []string{"s:10", "s:11"}))
|
||||||
|
a2 := NewVertex(NewNoopResTestSema("a2", []string{"s:2"}))
|
||||||
|
a3 := NewVertex(NewNoopResTestSema("a3", []string{"s:3"}))
|
||||||
|
g1.AddVertex(a1)
|
||||||
|
g1.AddVertex(a2)
|
||||||
|
g1.AddVertex(a3)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a123 := NewVertex(NewNoopResTestSema("a1,a2,a3", []string{"s:10", "s:11", "s:2", "s:3"}))
|
||||||
|
g2.AddVertex(a123)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPgraphSemaphoreGrouping3(t *testing.T) {
|
||||||
|
g1 := NewGraph("g1") // original graph
|
||||||
|
{
|
||||||
|
a1 := NewVertex(NewNoopResTestSema("a1", []string{"s:1", "s:2"}))
|
||||||
|
a2 := NewVertex(NewNoopResTestSema("a2", []string{"s:2"}))
|
||||||
|
a3 := NewVertex(NewNoopResTestSema("a3", []string{"s:3"}))
|
||||||
|
g1.AddVertex(a1)
|
||||||
|
g1.AddVertex(a2)
|
||||||
|
g1.AddVertex(a3)
|
||||||
|
}
|
||||||
|
g2 := NewGraph("g2") // expected result
|
||||||
|
{
|
||||||
|
a123 := NewVertex(NewNoopResTestSema("a1,a2,a3", []string{"s:1", "s:2", "s:3"}))
|
||||||
|
g2.AddVertex(a123)
|
||||||
|
}
|
||||||
|
runGraphCmp(t, g1, g2)
|
||||||
|
}
|
||||||
@@ -36,7 +36,8 @@ const DefaultPrometheusListen = "127.0.0.1:9233"
|
|||||||
type Prometheus struct {
|
type Prometheus struct {
|
||||||
Listen string // the listen specification for the net/http server
|
Listen string // the listen specification for the net/http server
|
||||||
|
|
||||||
checkApplyTotal *prometheus.CounterVec // total of CheckApplies that have been triggered
|
checkApplyTotal *prometheus.CounterVec // total of CheckApplies that have been triggered
|
||||||
|
pgraphStartTimeSeconds prometheus.Gauge // process start time in seconds since unix epoch
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -59,6 +60,14 @@ func (obj *Prometheus) Init() error {
|
|||||||
)
|
)
|
||||||
prometheus.MustRegister(obj.checkApplyTotal)
|
prometheus.MustRegister(obj.checkApplyTotal)
|
||||||
|
|
||||||
|
obj.pgraphStartTimeSeconds = prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "mgmt_graph_start_time_seconds",
|
||||||
|
Help: "Start time of the current graph since unix epoch in seconds.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
prometheus.MustRegister(obj.pgraphStartTimeSeconds)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -80,8 +89,21 @@ func (obj *Prometheus) Stop() error {
|
|||||||
// UpdateCheckApplyTotal refreshes the failing gauge by parsing the internal
|
// UpdateCheckApplyTotal refreshes the failing gauge by parsing the internal
|
||||||
// state map.
|
// state map.
|
||||||
func (obj *Prometheus) UpdateCheckApplyTotal(kind string, apply, eventful, errorful bool) error {
|
func (obj *Prometheus) UpdateCheckApplyTotal(kind string, apply, eventful, errorful bool) error {
|
||||||
|
if obj == nil {
|
||||||
|
return nil // happens when mgmt is launched without --prometheus
|
||||||
|
}
|
||||||
labels := prometheus.Labels{"kind": kind, "apply": strconv.FormatBool(apply), "eventful": strconv.FormatBool(eventful), "errorful": strconv.FormatBool(errorful)}
|
labels := prometheus.Labels{"kind": kind, "apply": strconv.FormatBool(apply), "eventful": strconv.FormatBool(eventful), "errorful": strconv.FormatBool(errorful)}
|
||||||
metric := obj.checkApplyTotal.With(labels)
|
metric := obj.checkApplyTotal.With(labels)
|
||||||
metric.Inc()
|
metric.Inc()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UpdatePgraphStartTime updates the mgmt_graph_start_time_seconds metric
|
||||||
|
// to the current timestamp.
|
||||||
|
func (obj *Prometheus) UpdatePgraphStartTime() error {
|
||||||
|
if obj == nil {
|
||||||
|
return nil // happens when mgmt is launched without --prometheus
|
||||||
|
}
|
||||||
|
obj.pgraphStartTimeSeconds.SetToCurrentTime()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -63,6 +63,7 @@ import (
|
|||||||
|
|
||||||
cv "github.com/purpleidea/mgmt/converger"
|
cv "github.com/purpleidea/mgmt/converger"
|
||||||
"github.com/purpleidea/mgmt/util"
|
"github.com/purpleidea/mgmt/util"
|
||||||
|
"github.com/purpleidea/mgmt/util/semaphore"
|
||||||
"github.com/purpleidea/mgmt/yamlgraph"
|
"github.com/purpleidea/mgmt/yamlgraph"
|
||||||
|
|
||||||
multierr "github.com/hashicorp/go-multierror"
|
multierr "github.com/hashicorp/go-multierror"
|
||||||
@@ -698,16 +699,16 @@ type Remotes struct {
|
|||||||
converger cv.Converger
|
converger cv.Converger
|
||||||
convergerCb func(func(map[string]bool) error) (func(), error)
|
convergerCb func(func(map[string]bool) error) (func(), error)
|
||||||
|
|
||||||
wg sync.WaitGroup // keep track of each running SSH connection
|
wg sync.WaitGroup // keep track of each running SSH connection
|
||||||
lock sync.Mutex // mutex for access to sshmap
|
lock sync.Mutex // mutex for access to sshmap
|
||||||
sshmap map[string]*SSH // map to each SSH struct with the remote as the key
|
sshmap map[string]*SSH // map to each SSH struct with the remote as the key
|
||||||
exiting bool // flag to let us know if we're exiting
|
exiting bool // flag to let us know if we're exiting
|
||||||
exitChan chan struct{} // closes when we should exit
|
exitChan chan struct{} // closes when we should exit
|
||||||
semaphore Semaphore // counting semaphore to limit concurrent connections
|
semaphore *semaphore.Semaphore // counting semaphore to limit concurrent connections
|
||||||
hostnames []string // list of hostnames we've seen so far
|
hostnames []string // list of hostnames we've seen so far
|
||||||
cuid cv.UID // convergerUID for the remote itself
|
cuid cv.UID // convergerUID for the remote itself
|
||||||
cuids map[string]cv.UID // map to each SSH struct with the remote as the key
|
cuids map[string]cv.UID // map to each SSH struct with the remote as the key
|
||||||
callbackCancelFunc func() // stored callback function cancel function
|
callbackCancelFunc func() // stored callback function cancel function
|
||||||
|
|
||||||
flags Flags // constant runtime values
|
flags Flags // constant runtime values
|
||||||
}
|
}
|
||||||
@@ -730,7 +731,7 @@ func NewRemotes(clientURLs, remoteURLs []string, noop bool, remotes []string, fi
|
|||||||
convergerCb: convergerCb,
|
convergerCb: convergerCb,
|
||||||
sshmap: make(map[string]*SSH),
|
sshmap: make(map[string]*SSH),
|
||||||
exitChan: make(chan struct{}),
|
exitChan: make(chan struct{}),
|
||||||
semaphore: NewSemaphore(int(cConns)),
|
semaphore: semaphore.NewSemaphore(int(cConns)),
|
||||||
hostnames: make([]string, len(remotes)),
|
hostnames: make([]string, len(remotes)),
|
||||||
cuids: make(map[string]cv.UID),
|
cuids: make(map[string]cv.UID),
|
||||||
flags: flags,
|
flags: flags,
|
||||||
@@ -1078,29 +1079,6 @@ func cleanURL(s string) string {
|
|||||||
return u.Host
|
return u.Host
|
||||||
}
|
}
|
||||||
|
|
||||||
// Semaphore is a counting semaphore.
|
|
||||||
type Semaphore chan struct{}
|
|
||||||
|
|
||||||
// NewSemaphore creates a new semaphore.
|
|
||||||
func NewSemaphore(size int) Semaphore {
|
|
||||||
return make(Semaphore, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
// P acquires n resources.
|
|
||||||
func (s Semaphore) P(n int) {
|
|
||||||
e := struct{}{}
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
s <- e // acquire one
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// V releases n resources.
|
|
||||||
func (s Semaphore) V(n int) {
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
<-s // release one
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// combinedWriter mimics what the ssh.CombinedOutput command does.
|
// combinedWriter mimics what the ssh.CombinedOutput command does.
|
||||||
type combinedWriter struct {
|
type combinedWriter struct {
|
||||||
b bytes.Buffer
|
b bytes.Buffer
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ func init() {
|
|||||||
type AugeasRes struct {
|
type AugeasRes struct {
|
||||||
BaseRes `yaml:",inline"`
|
BaseRes `yaml:",inline"`
|
||||||
|
|
||||||
// File is the path to the file targetted by this resource.
|
// File is the path to the file targeted by this resource.
|
||||||
File string `yaml:"file"`
|
File string `yaml:"file"`
|
||||||
|
|
||||||
// Lens is the lens used by this resource. If specified, mgmt
|
// Lens is the lens used by this resource. If specified, mgmt
|
||||||
@@ -93,7 +93,7 @@ func (obj *AugeasRes) Validate() error {
|
|||||||
|
|
||||||
// Init initiates the resource.
|
// Init initiates the resource.
|
||||||
func (obj *AugeasRes) Init() error {
|
func (obj *AugeasRes) Init() error {
|
||||||
obj.BaseRes.kind = "Augeas"
|
obj.BaseRes.kind = "augeas"
|
||||||
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ func (obj *ExecRes) Validate() error {
|
|||||||
|
|
||||||
// Init runs some startup code for this resource.
|
// Init runs some startup code for this resource.
|
||||||
func (obj *ExecRes) Init() error {
|
func (obj *ExecRes) Init() error {
|
||||||
obj.BaseRes.kind = "Exec"
|
obj.BaseRes.kind = "exec"
|
||||||
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -147,7 +147,7 @@ func (obj *FileRes) Init() error {
|
|||||||
obj.path = obj.GetPath() // compute once
|
obj.path = obj.GetPath() // compute once
|
||||||
obj.isDir = strings.HasSuffix(obj.path, "/") // dirs have trailing slashes
|
obj.isDir = strings.HasSuffix(obj.path, "/") // dirs have trailing slashes
|
||||||
|
|
||||||
obj.BaseRes.kind = "File"
|
obj.BaseRes.kind = "file"
|
||||||
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -87,7 +87,7 @@ func (obj *HostnameRes) Validate() error {
|
|||||||
|
|
||||||
// Init runs some startup code for this resource.
|
// Init runs some startup code for this resource.
|
||||||
func (obj *HostnameRes) Init() error {
|
func (obj *HostnameRes) Init() error {
|
||||||
obj.BaseRes.kind = "Hostname"
|
obj.BaseRes.kind = "hostname"
|
||||||
if obj.PrettyHostname == "" {
|
if obj.PrettyHostname == "" {
|
||||||
obj.PrettyHostname = obj.Hostname
|
obj.PrettyHostname = obj.Hostname
|
||||||
}
|
}
|
||||||
|
|||||||
304
resources/kv.go
Normal file
304
resources/kv.go
Normal file
@@ -0,0 +1,304 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package resources
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/gob"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
errwrap "github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
gob.Register(&KVRes{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// KVResSkipCmpStyle represents the different styles of comparison when using SkipLessThan.
|
||||||
|
type KVResSkipCmpStyle int
|
||||||
|
|
||||||
|
// These are the different allowed comparison styles. Most folks will want SkipCmpStyleInt.
|
||||||
|
const (
|
||||||
|
SkipCmpStyleInt KVResSkipCmpStyle = iota
|
||||||
|
SkipCmpStyleString
|
||||||
|
)
|
||||||
|
|
||||||
|
// KVRes is a resource which writes a key/value pair into cluster wide storage.
|
||||||
|
// It will ensure that the key is set to the requested value. The one exception
|
||||||
|
// is that if you use the SkipLessThan parameter, then it will only replace the
|
||||||
|
// stored value with the requested value if it is greater than that stored one.
|
||||||
|
// This allows the KV resource to be used in fast acting, finite state machines
|
||||||
|
// which have monotonically increasing state values that represent progression.
|
||||||
|
// The one exception is that when this resource receives a refresh signal, then
|
||||||
|
// it will set the value to be the exact one if they are not identical already.
|
||||||
|
type KVRes struct {
|
||||||
|
BaseRes `yaml:",inline"`
|
||||||
|
Key string `yaml:"key"` // key to set
|
||||||
|
Value *string `yaml:"value"` // value to set (nil to delete)
|
||||||
|
SkipLessThan bool `yaml:"skiplessthan"` // skip updates as long as stored value is greater
|
||||||
|
SkipCmpStyle KVResSkipCmpStyle `yaml:"skipcmpstyle"` // how to do the less than cmp
|
||||||
|
// TODO: does it make sense to have different backends here? (eg: local)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default returns some sensible defaults for this resource.
|
||||||
|
func (obj *KVRes) Default() Res {
|
||||||
|
return &KVRes{
|
||||||
|
BaseRes: BaseRes{
|
||||||
|
MetaParams: DefaultMetaParams, // force a default
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate if the params passed in are valid data.
|
||||||
|
// FIXME: This will catch most issues unless data is passed in after Init with
|
||||||
|
// the Send/Recv mechanism. Should the engine re-call Validate after Send/Recv?
|
||||||
|
func (obj *KVRes) Validate() error {
|
||||||
|
if obj.Key == "" {
|
||||||
|
return fmt.Errorf("key must not be empty")
|
||||||
|
}
|
||||||
|
if obj.SkipLessThan {
|
||||||
|
if obj.SkipCmpStyle != SkipCmpStyleInt && obj.SkipCmpStyle != SkipCmpStyleString {
|
||||||
|
return fmt.Errorf("the SkipCmpStyle of %v is invalid", obj.SkipCmpStyle)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v := obj.Value; obj.SkipCmpStyle == SkipCmpStyleInt && v != nil {
|
||||||
|
if _, err := strconv.Atoi(*v); err != nil {
|
||||||
|
return fmt.Errorf("the set value of %v can't convert to int", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return obj.BaseRes.Validate()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init initializes the resource.
|
||||||
|
func (obj *KVRes) Init() error {
|
||||||
|
obj.BaseRes.kind = "kv"
|
||||||
|
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch is the primary listener for this resource and it outputs events.
|
||||||
|
func (obj *KVRes) Watch() error {
|
||||||
|
|
||||||
|
// notify engine that we're running
|
||||||
|
if err := obj.Running(); err != nil {
|
||||||
|
return err // bubble up a NACK...
|
||||||
|
}
|
||||||
|
|
||||||
|
ch := obj.Data().World.StrWatch(obj.Key) // get possible events!
|
||||||
|
|
||||||
|
var send = false // send event?
|
||||||
|
var exit *error
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
// NOTE: this part is very similar to the file resource code
|
||||||
|
case err, ok := <-ch:
|
||||||
|
if !ok { // channel shutdown
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return errwrap.Wrapf(err, "unknown %s[%s] watcher error", obj.Kind(), obj.GetName())
|
||||||
|
}
|
||||||
|
if obj.Data().Debug {
|
||||||
|
log.Printf("%s[%s]: Event!", obj.Kind(), obj.GetName())
|
||||||
|
}
|
||||||
|
send = true
|
||||||
|
obj.StateOK(false) // dirty
|
||||||
|
|
||||||
|
case event := <-obj.Events():
|
||||||
|
// we avoid sending events on unpause
|
||||||
|
if exit, send = obj.ReadEvent(event); exit != nil {
|
||||||
|
return *exit // exit
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// do all our event sending all together to avoid duplicate msgs
|
||||||
|
if send {
|
||||||
|
send = false
|
||||||
|
obj.Event()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// lessThanCheck checks for less than validity.
|
||||||
|
func (obj *KVRes) lessThanCheck(value string) (checkOK bool, err error) {
|
||||||
|
|
||||||
|
v := *obj.Value
|
||||||
|
if value == v { // redundant check for safety
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var refresh = obj.Refresh() // do we have a pending reload to apply?
|
||||||
|
if !obj.SkipLessThan || refresh { // update lessthan on refresh
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch obj.SkipCmpStyle {
|
||||||
|
case SkipCmpStyleInt:
|
||||||
|
intValue, err := strconv.Atoi(value)
|
||||||
|
if err != nil {
|
||||||
|
// NOTE: We don't error here since we're going to write
|
||||||
|
// over the value anyways. It could be from an old run!
|
||||||
|
return false, nil // value is bad (old/corrupt), fix it
|
||||||
|
}
|
||||||
|
if vint, err := strconv.Atoi(v); err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "can't convert %v to int", v)
|
||||||
|
} else if vint < intValue {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case SkipCmpStyleString:
|
||||||
|
if v < value { // weird way to cmp, but valid
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return false, fmt.Errorf("unmatches SkipCmpStyle style %v", obj.SkipCmpStyle)
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckApply method for Password resource. Does nothing, returns happy!
|
||||||
|
func (obj *KVRes) CheckApply(apply bool) (checkOK bool, err error) {
|
||||||
|
log.Printf("%s[%s]: CheckApply(%t)", obj.Kind(), obj.GetName(), apply)
|
||||||
|
|
||||||
|
if val, exists := obj.Recv["Value"]; exists && val.Changed {
|
||||||
|
// if we received on Value, and it changed, wooo, nothing to do.
|
||||||
|
log.Printf("CheckApply: `Value` was updated!")
|
||||||
|
}
|
||||||
|
|
||||||
|
hostname := obj.Data().Hostname // me
|
||||||
|
keyMap, err := obj.Data().World.StrGet(obj.Key)
|
||||||
|
if err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "check error during StrGet")
|
||||||
|
}
|
||||||
|
|
||||||
|
if value, ok := keyMap[hostname]; ok && obj.Value != nil {
|
||||||
|
if value == *obj.Value {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if c, err := obj.lessThanCheck(value); err != nil {
|
||||||
|
return false, err
|
||||||
|
} else if c {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
} else if !ok && obj.Value == nil {
|
||||||
|
return true, nil // nothing to delete, we're good!
|
||||||
|
|
||||||
|
} else if ok && obj.Value == nil { // delete
|
||||||
|
err := obj.Data().World.StrDel(obj.Key)
|
||||||
|
return false, errwrap.Wrapf(err, "apply error during StrDel")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !apply {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := obj.Data().World.StrSet(obj.Key, *obj.Value); err != nil {
|
||||||
|
return false, errwrap.Wrapf(err, "apply error during StrSet")
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// KVUID is the UID struct for KVRes.
|
||||||
|
type KVUID struct {
|
||||||
|
BaseUID
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutoEdges returns the AutoEdge interface. In this case no autoedges are used.
|
||||||
|
func (obj *KVRes) AutoEdges() AutoEdge {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UIDs includes all params to make a unique identification of this object.
|
||||||
|
// Most resources only return one, although some resources can return multiple.
|
||||||
|
func (obj *KVRes) UIDs() []ResUID {
|
||||||
|
x := &KVUID{
|
||||||
|
BaseUID: BaseUID{name: obj.GetName(), kind: obj.Kind()},
|
||||||
|
name: obj.Name,
|
||||||
|
}
|
||||||
|
return []ResUID{x}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupCmp returns whether two resources can be grouped together or not.
|
||||||
|
func (obj *KVRes) GroupCmp(r Res) bool {
|
||||||
|
_, ok := r.(*KVRes)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return false // TODO: this is doable!
|
||||||
|
// TODO: it could be useful to group our writes and watches!
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare two resources and return if they are equivalent.
|
||||||
|
func (obj *KVRes) Compare(res Res) bool {
|
||||||
|
switch res.(type) {
|
||||||
|
// we can only compare KVRes to others of the same resource
|
||||||
|
case *KVRes:
|
||||||
|
res := res.(*KVRes)
|
||||||
|
if !obj.BaseRes.Compare(res) { // call base Compare
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.Key != res.Key {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if (obj.Value == nil) != (res.Value == nil) { // xor
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.Value != nil && res.Value != nil {
|
||||||
|
if *obj.Value != *res.Value { // compare the strings
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if obj.SkipLessThan != res.SkipLessThan {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if obj.SkipCmpStyle != res.SkipCmpStyle {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML is the custom unmarshal handler for this struct.
|
||||||
|
// It is primarily useful for setting the defaults.
|
||||||
|
func (obj *KVRes) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
type rawRes KVRes // indirection to avoid infinite recursion
|
||||||
|
|
||||||
|
def := obj.Default() // get the default
|
||||||
|
res, ok := def.(*KVRes) // put in the right format
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("could not convert to KVRes")
|
||||||
|
}
|
||||||
|
raw := rawRes(*res) // convert; the defaults go here
|
||||||
|
|
||||||
|
if err := unmarshal(&raw); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*obj = KVRes(raw) // restore from indirection with type conversion!
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -75,7 +75,7 @@ func (obj *MsgRes) Validate() error {
|
|||||||
|
|
||||||
// Init runs some startup code for this resource.
|
// Init runs some startup code for this resource.
|
||||||
func (obj *MsgRes) Init() error {
|
func (obj *MsgRes) Init() error {
|
||||||
obj.BaseRes.kind = "Msg"
|
obj.BaseRes.kind = "msg"
|
||||||
return obj.BaseRes.Init() // call base init, b/c we're overrriding
|
return obj.BaseRes.Init() // call base init, b/c we're overrriding
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ func (obj *NoopRes) Validate() error {
|
|||||||
|
|
||||||
// Init runs some startup code for this resource.
|
// Init runs some startup code for this resource.
|
||||||
func (obj *NoopRes) Init() error {
|
func (obj *NoopRes) Init() error {
|
||||||
obj.BaseRes.kind = "Noop"
|
obj.BaseRes.kind = "noop"
|
||||||
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -92,7 +92,7 @@ func (obj *NspawnRes) Init() error {
|
|||||||
if err := obj.svc.Init(); err != nil {
|
if err := obj.svc.Init(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
obj.BaseRes.kind = "Nspawn"
|
obj.BaseRes.kind = "nspawn"
|
||||||
return obj.BaseRes.Init()
|
return obj.BaseRes.Init()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -73,7 +73,7 @@ func (obj *PasswordRes) Validate() error {
|
|||||||
// Init generates a new password for this resource if one was not provided. It
|
// Init generates a new password for this resource if one was not provided. It
|
||||||
// will save this into a local file. It will load it back in from previous runs.
|
// will save this into a local file. It will load it back in from previous runs.
|
||||||
func (obj *PasswordRes) Init() error {
|
func (obj *PasswordRes) Init() error {
|
||||||
obj.BaseRes.kind = "Password" // must be set before using VarDir
|
obj.BaseRes.kind = "password" // must be set before using VarDir
|
||||||
|
|
||||||
dir, err := obj.VarDir("")
|
dir, err := obj.VarDir("")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -66,7 +66,7 @@ func (obj *PkgRes) Validate() error {
|
|||||||
|
|
||||||
// Init runs some startup code for this resource.
|
// Init runs some startup code for this resource.
|
||||||
func (obj *PkgRes) Init() error {
|
func (obj *PkgRes) Init() error {
|
||||||
obj.BaseRes.kind = "Pkg"
|
obj.BaseRes.kind = "pkg"
|
||||||
if err := obj.BaseRes.Init(); err != nil { // call base init, b/c we're overriding
|
if err := obj.BaseRes.Init(); err != nil { // call base init, b/c we're overriding
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,14 +19,12 @@
|
|||||||
package resources
|
package resources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/gob"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -34,6 +32,7 @@ import (
|
|||||||
"github.com/purpleidea/mgmt/converger"
|
"github.com/purpleidea/mgmt/converger"
|
||||||
"github.com/purpleidea/mgmt/event"
|
"github.com/purpleidea/mgmt/event"
|
||||||
"github.com/purpleidea/mgmt/prometheus"
|
"github.com/purpleidea/mgmt/prometheus"
|
||||||
|
"github.com/purpleidea/mgmt/util"
|
||||||
|
|
||||||
errwrap "github.com/pkg/errors"
|
errwrap "github.com/pkg/errors"
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
@@ -54,12 +53,27 @@ const (
|
|||||||
|
|
||||||
const refreshPathToken = "refresh"
|
const refreshPathToken = "refresh"
|
||||||
|
|
||||||
|
// World is an interface to the rest of the different graph state. It allows
|
||||||
|
// the GAPI to store state and exchange information throughout the cluster. It
|
||||||
|
// is the interface each machine uses to communicate with the rest of the world.
|
||||||
|
type World interface { // TODO: is there a better name for this interface?
|
||||||
|
ResExport([]Res) error
|
||||||
|
// FIXME: should this method take a "filter" data struct instead of many args?
|
||||||
|
ResCollect(hostnameFilter, kindFilter []string) ([]Res, error)
|
||||||
|
|
||||||
|
StrWatch(namespace string) chan error
|
||||||
|
StrGet(namespace string) (map[string]string, error)
|
||||||
|
StrSet(namespace, value string) error
|
||||||
|
StrDel(namespace string) error
|
||||||
|
}
|
||||||
|
|
||||||
// Data is the set of input values passed into the pgraph for the resources.
|
// Data is the set of input values passed into the pgraph for the resources.
|
||||||
type Data struct {
|
type Data struct {
|
||||||
//Hostname string // uuid for the host
|
Hostname string // uuid for the host
|
||||||
//Noop bool
|
//Noop bool
|
||||||
Converger converger.Converger
|
Converger converger.Converger
|
||||||
Prometheus *prometheus.Prometheus
|
Prometheus *prometheus.Prometheus
|
||||||
|
World World
|
||||||
Prefix string // the prefix to be used for the pgraph namespace
|
Prefix string // the prefix to be used for the pgraph namespace
|
||||||
Debug bool
|
Debug bool
|
||||||
// NOTE: we can add more fields here if needed for the resources.
|
// NOTE: we can add more fields here if needed for the resources.
|
||||||
@@ -101,6 +115,7 @@ type MetaParams struct {
|
|||||||
Poll uint32 `yaml:"poll"` // metaparam, number of seconds between poll intervals, 0 to watch
|
Poll uint32 `yaml:"poll"` // metaparam, number of seconds between poll intervals, 0 to watch
|
||||||
Limit rate.Limit `yaml:"limit"` // metaparam, number of events per second to allow through
|
Limit rate.Limit `yaml:"limit"` // metaparam, number of events per second to allow through
|
||||||
Burst int `yaml:"burst"` // metaparam, number of events to allow in a burst
|
Burst int `yaml:"burst"` // metaparam, number of events to allow in a burst
|
||||||
|
Sema []string `yaml:"sema"` // metaparam, list of semaphore ids (id | id:count)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML is the custom unmarshal handler for the MetaParams struct. It
|
// UnmarshalYAML is the custom unmarshal handler for the MetaParams struct. It
|
||||||
@@ -127,6 +142,7 @@ var DefaultMetaParams = MetaParams{
|
|||||||
Poll: 0, // defaults to watching for events
|
Poll: 0, // defaults to watching for events
|
||||||
Limit: rate.Inf, // defaults to no limit
|
Limit: rate.Inf, // defaults to no limit
|
||||||
Burst: 0, // no burst needed on an infinite rate // TODO: is this a good default?
|
Burst: 0, // no burst needed on an infinite rate // TODO: is this a good default?
|
||||||
|
//Sema: []string{},
|
||||||
}
|
}
|
||||||
|
|
||||||
// The Base interface is everything that is common to all resources.
|
// The Base interface is everything that is common to all resources.
|
||||||
@@ -138,8 +154,10 @@ type Base interface {
|
|||||||
Kind() string
|
Kind() string
|
||||||
Meta() *MetaParams
|
Meta() *MetaParams
|
||||||
Events() chan *event.Event
|
Events() chan *event.Event
|
||||||
AssociateData(*Data)
|
Data() *Data
|
||||||
IsWorking() bool
|
IsWorking() bool
|
||||||
|
IsQuiescing() bool
|
||||||
|
QuiesceGroup() *sync.WaitGroup
|
||||||
WaitGroup() *sync.WaitGroup
|
WaitGroup() *sync.WaitGroup
|
||||||
Setup()
|
Setup()
|
||||||
Reset()
|
Reset()
|
||||||
@@ -196,6 +214,7 @@ type BaseRes struct {
|
|||||||
Recv map[string]*Send // mapping of key to receive on from value
|
Recv map[string]*Send // mapping of key to receive on from value
|
||||||
|
|
||||||
kind string
|
kind string
|
||||||
|
data Data
|
||||||
state ResState
|
state ResState
|
||||||
prefix string // base prefix for this resource
|
prefix string // base prefix for this resource
|
||||||
|
|
||||||
@@ -218,10 +237,12 @@ type BaseRes struct {
|
|||||||
isStarted bool // did the started chan already close?
|
isStarted bool // did the started chan already close?
|
||||||
starter bool // does this have indegree == 0 ? XXX: usually?
|
starter bool // does this have indegree == 0 ? XXX: usually?
|
||||||
|
|
||||||
waitGroup *sync.WaitGroup
|
quiescing bool // are we quiescing (pause or exit)
|
||||||
working bool // is the Worker() loop running ?
|
quiesceGroup *sync.WaitGroup
|
||||||
debug bool
|
waitGroup *sync.WaitGroup
|
||||||
isStateOK bool // whether the state is okay based on events or not
|
working bool // is the Worker() loop running ?
|
||||||
|
debug bool
|
||||||
|
isStateOK bool // whether the state is okay based on events or not
|
||||||
|
|
||||||
isGrouped bool // am i contained within a group?
|
isGrouped bool // am i contained within a group?
|
||||||
grouped []Res // list of any grouped resources
|
grouped []Res // list of any grouped resources
|
||||||
@@ -314,19 +335,18 @@ func (obj *BaseRes) Init() error {
|
|||||||
return fmt.Errorf("resource did not set kind")
|
return fmt.Errorf("resource did not set kind")
|
||||||
}
|
}
|
||||||
|
|
||||||
obj.cuid = obj.converger.Register()
|
obj.cuid = obj.Converger().Register()
|
||||||
obj.wcuid = obj.converger.Register() // get a cuid for the worker!
|
obj.wcuid = obj.Converger().Register() // get a cuid for the worker!
|
||||||
obj.pcuid = obj.converger.Register() // get a cuid for the process
|
obj.pcuid = obj.Converger().Register() // get a cuid for the process
|
||||||
|
|
||||||
obj.eventsLock = &sync.Mutex{}
|
|
||||||
obj.eventsDone = false
|
|
||||||
obj.eventsChan = make(chan *event.Event) // unbuffered chan to avoid stale events
|
|
||||||
|
|
||||||
obj.processLock = &sync.Mutex{} // lock around processChan closing and sending
|
obj.processLock = &sync.Mutex{} // lock around processChan closing and sending
|
||||||
obj.processDone = false // did we close processChan ?
|
obj.processDone = false // did we close processChan ?
|
||||||
obj.processChan = make(chan *event.Event)
|
obj.processChan = make(chan *event.Event)
|
||||||
obj.processSync = &sync.WaitGroup{}
|
obj.processSync = &sync.WaitGroup{}
|
||||||
|
|
||||||
|
obj.quiescing = false // no quiesce operation is happening at the moment
|
||||||
|
obj.quiesceGroup = &sync.WaitGroup{}
|
||||||
|
|
||||||
obj.waitGroup = &sync.WaitGroup{} // Init and Close must be 1-1 matched!
|
obj.waitGroup = &sync.WaitGroup{} // Init and Close must be 1-1 matched!
|
||||||
obj.waitGroup.Add(1)
|
obj.waitGroup.Add(1)
|
||||||
obj.working = true // Worker method should now be running...
|
obj.working = true // Worker method should now be running...
|
||||||
@@ -396,12 +416,9 @@ func (obj *BaseRes) Events() chan *event.Event {
|
|||||||
return obj.eventsChan
|
return obj.eventsChan
|
||||||
}
|
}
|
||||||
|
|
||||||
// AssociateData associates some data with the object in question.
|
// Data returns an associable handle to some data passed in to the resource.
|
||||||
func (obj *BaseRes) AssociateData(data *Data) {
|
func (obj *BaseRes) Data() *Data {
|
||||||
obj.converger = data.Converger
|
return &obj.data
|
||||||
obj.prometheus = data.Prometheus
|
|
||||||
obj.prefix = data.Prefix
|
|
||||||
obj.debug = data.Debug
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsWorking tells us if the Worker() function is running. Not thread safe.
|
// IsWorking tells us if the Worker() function is running. Not thread safe.
|
||||||
@@ -409,6 +426,14 @@ func (obj *BaseRes) IsWorking() bool {
|
|||||||
return obj.working
|
return obj.working
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsQuiescing returns if there is a quiesce operation in progress. Pause and
|
||||||
|
// exit both meet this criteria, and this tells some systems to wind down, such
|
||||||
|
// as the event replay mechanism.
|
||||||
|
func (obj *BaseRes) IsQuiescing() bool { return obj.quiescing }
|
||||||
|
|
||||||
|
// QuiesceGroup returns the sync group associated with the quiesce operations.
|
||||||
|
func (obj *BaseRes) QuiesceGroup() *sync.WaitGroup { return obj.quiesceGroup }
|
||||||
|
|
||||||
// WaitGroup returns a sync.WaitGroup which is open when the resource is done.
|
// WaitGroup returns a sync.WaitGroup which is open when the resource is done.
|
||||||
// This is more useful than a closed channel signal, since it can be re-used
|
// This is more useful than a closed channel signal, since it can be re-used
|
||||||
// safely without having to recreate it and worry about stale channel handles.
|
// safely without having to recreate it and worry about stale channel handles.
|
||||||
@@ -419,7 +444,10 @@ func (obj *BaseRes) WaitGroup() *sync.WaitGroup { return obj.waitGroup }
|
|||||||
func (obj *BaseRes) Setup() {
|
func (obj *BaseRes) Setup() {
|
||||||
obj.started = make(chan struct{}) // closes when started
|
obj.started = make(chan struct{}) // closes when started
|
||||||
obj.stopped = make(chan struct{}) // closes when stopped
|
obj.stopped = make(chan struct{}) // closes when stopped
|
||||||
return
|
|
||||||
|
obj.eventsLock = &sync.Mutex{}
|
||||||
|
obj.eventsDone = false
|
||||||
|
obj.eventsChan = make(chan *event.Event) // unbuffered chan to avoid stale events
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset from Setup.
|
// Reset from Setup.
|
||||||
@@ -430,7 +458,7 @@ func (obj *BaseRes) Reset() {
|
|||||||
// Converger returns the converger object used by the system. It can be used to
|
// Converger returns the converger object used by the system. It can be used to
|
||||||
// register new convergers if needed.
|
// register new convergers if needed.
|
||||||
func (obj *BaseRes) Converger() converger.Converger {
|
func (obj *BaseRes) Converger() converger.Converger {
|
||||||
return obj.converger
|
return obj.data.Converger
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConvergerUIDs returns the ConvergerUIDs for the resource. This is called by
|
// ConvergerUIDs returns the ConvergerUIDs for the resource. This is called by
|
||||||
@@ -493,6 +521,12 @@ func (obj *BaseRes) GroupRes(res Res) error {
|
|||||||
return fmt.Errorf("the %v resource is already grouped", res)
|
return fmt.Errorf("the %v resource is already grouped", res)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// merging two resources into one should yield the sum of their semas
|
||||||
|
if semas := res.Meta().Sema; len(semas) > 0 {
|
||||||
|
obj.Meta().Sema = append(obj.Meta().Sema, semas...)
|
||||||
|
obj.Meta().Sema = util.StrRemoveDuplicatesInList(obj.Meta().Sema)
|
||||||
|
}
|
||||||
|
|
||||||
obj.grouped = append(obj.grouped, res)
|
obj.grouped = append(obj.grouped, res)
|
||||||
res.SetGrouped(true) // i am contained _in_ a group
|
res.SetGrouped(true) // i am contained _in_ a group
|
||||||
return nil
|
return nil
|
||||||
@@ -550,6 +584,24 @@ func (obj *BaseRes) Compare(res Res) bool {
|
|||||||
if obj.Meta().Burst != res.Meta().Burst {
|
if obj.Meta().Burst != res.Meta().Burst {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// are the two slices the same?
|
||||||
|
cmpSlices := func(a, b []string) bool {
|
||||||
|
if len(a) != len(b) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
sort.Strings(a)
|
||||||
|
sort.Strings(b)
|
||||||
|
for i := range a {
|
||||||
|
if a[i] != b[i] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if !cmpSlices(obj.Meta().Sema, res.Meta().Sema) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -633,35 +685,3 @@ func (obj *BaseRes) Poll() error {
|
|||||||
func (obj *BaseRes) Prometheus() *prometheus.Prometheus {
|
func (obj *BaseRes) Prometheus() *prometheus.Prometheus {
|
||||||
return obj.prometheus
|
return obj.prometheus
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResToB64 encodes a resource to a base64 encoded string (after serialization)
|
|
||||||
func ResToB64(res Res) (string, error) {
|
|
||||||
b := bytes.Buffer{}
|
|
||||||
e := gob.NewEncoder(&b)
|
|
||||||
err := e.Encode(&res) // pass with &
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("Gob failed to encode: %v", err)
|
|
||||||
}
|
|
||||||
return base64.StdEncoding.EncodeToString(b.Bytes()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// B64ToRes decodes a resource from a base64 encoded string (after deserialization)
|
|
||||||
func B64ToRes(str string) (Res, error) {
|
|
||||||
var output interface{}
|
|
||||||
bb, err := base64.StdEncoding.DecodeString(str)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Base64 failed to decode: %v", err)
|
|
||||||
}
|
|
||||||
b := bytes.NewBuffer(bb)
|
|
||||||
d := gob.NewDecoder(b)
|
|
||||||
err = d.Decode(&output) // pass with &
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Gob failed to decode: %v", err)
|
|
||||||
}
|
|
||||||
res, ok := output.(Res)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("Output %v is not a Res", res)
|
|
||||||
|
|
||||||
}
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -36,6 +36,7 @@ func (obj *BaseRes) Event() error {
|
|||||||
obj.processLock.Unlock()
|
obj.processLock.Unlock()
|
||||||
return fmt.Errorf("processChan is already closed")
|
return fmt.Errorf("processChan is already closed")
|
||||||
}
|
}
|
||||||
|
obj.quiesceGroup.Add(1) // add to processChan queue count
|
||||||
obj.processChan <- &event.Event{Kind: event.EventNil, Resp: resp} // trigger process
|
obj.processChan <- &event.Event{Kind: event.EventNil, Resp: resp} // trigger process
|
||||||
obj.processLock.Unlock()
|
obj.processLock.Unlock()
|
||||||
return resp.Wait()
|
return resp.Wait()
|
||||||
@@ -69,42 +70,69 @@ func (obj *BaseRes) SendEvent(ev event.Kind, err error) error {
|
|||||||
// ReadEvent processes events when a select gets one, and handles the pause
|
// ReadEvent processes events when a select gets one, and handles the pause
|
||||||
// code too! The return values specify if we should exit and poke respectively.
|
// code too! The return values specify if we should exit and poke respectively.
|
||||||
func (obj *BaseRes) ReadEvent(ev *event.Event) (exit *error, send bool) {
|
func (obj *BaseRes) ReadEvent(ev *event.Event) (exit *error, send bool) {
|
||||||
ev.ACK()
|
//ev.ACK()
|
||||||
err := ev.Error()
|
err := ev.Error()
|
||||||
|
|
||||||
switch ev.Kind {
|
switch ev.Kind {
|
||||||
case event.EventStart:
|
case event.EventStart:
|
||||||
|
ev.ACK()
|
||||||
return nil, true
|
return nil, true
|
||||||
|
|
||||||
case event.EventPoke:
|
case event.EventPoke:
|
||||||
|
ev.ACK()
|
||||||
return nil, true
|
return nil, true
|
||||||
|
|
||||||
case event.EventBackPoke:
|
case event.EventBackPoke:
|
||||||
|
ev.ACK()
|
||||||
return nil, true // forward poking in response to a back poke!
|
return nil, true // forward poking in response to a back poke!
|
||||||
|
|
||||||
case event.EventExit:
|
case event.EventExit:
|
||||||
|
obj.quiescing = true
|
||||||
|
obj.quiesceGroup.Wait()
|
||||||
|
obj.quiescing = false // for symmetry
|
||||||
|
ev.ACK()
|
||||||
// FIXME: what do we do if we have a pending refresh (poke) and an exit?
|
// FIXME: what do we do if we have a pending refresh (poke) and an exit?
|
||||||
return &err, false
|
return &err, false
|
||||||
|
|
||||||
case event.EventPause:
|
case event.EventPause:
|
||||||
// wait for next event to continue
|
obj.quiescing = true // set the quiesce flag to avoid event replays
|
||||||
select {
|
obj.quiesceGroup.Wait()
|
||||||
case e, ok := <-obj.Events():
|
obj.quiescing = false // reset
|
||||||
if !ok { // shutdown
|
ev.ACK()
|
||||||
err := error(nil)
|
|
||||||
return &err, false
|
// wait for next event to continue, but discard any backpoking!
|
||||||
|
for {
|
||||||
|
// Consider a graph (V2->V3). If while paused, we add a
|
||||||
|
// new resource (V1->V2), when we unpause, V3 will run,
|
||||||
|
// and then V2 followed by V1 (reverse topo sort) which
|
||||||
|
// can cause V2 to BackPoke to V1 (since V1 needs to go
|
||||||
|
// first) which can panic if V1 is not running yet! The
|
||||||
|
// solution is to ignore the BackPoke because once that
|
||||||
|
// V1 vertex gets running, it will then send off a poke
|
||||||
|
// to V2 that it did without the need for the BackPoke!
|
||||||
|
select {
|
||||||
|
case e, ok := <-obj.Events():
|
||||||
|
if !ok { // shutdown
|
||||||
|
err := error(nil)
|
||||||
|
return &err, false
|
||||||
|
}
|
||||||
|
//obj.quiescing = true
|
||||||
|
//obj.quiesceGroup.Wait() // unnecessary, but symmetrically correct
|
||||||
|
//obj.quiescing = false
|
||||||
|
e.ACK()
|
||||||
|
err := e.Error()
|
||||||
|
if e.Kind == event.EventExit {
|
||||||
|
return &err, false
|
||||||
|
} else if e.Kind == event.EventStart { // eventContinue
|
||||||
|
return nil, false // don't poke on unpause!
|
||||||
|
} else if e.Kind == event.EventBackPoke {
|
||||||
|
continue // silently discard this event while paused
|
||||||
|
}
|
||||||
|
// if we get a poke event here, it's a bug!
|
||||||
|
err = fmt.Errorf("%s[%s]: unknown event: %v, while paused", obj.Kind(), obj.GetName(), e)
|
||||||
|
panic(err) // TODO: return a special sentinel instead?
|
||||||
|
//return &err, false
|
||||||
}
|
}
|
||||||
e.ACK()
|
|
||||||
err := e.Error()
|
|
||||||
if e.Kind == event.EventExit {
|
|
||||||
return &err, false
|
|
||||||
} else if e.Kind == event.EventStart { // eventContinue
|
|
||||||
return nil, false // don't poke on unpause!
|
|
||||||
}
|
|
||||||
// if we get a poke event here, it's a bug!
|
|
||||||
err = fmt.Errorf("%s[%s]: unknown event: %v, while paused", obj.Kind(), obj.GetName(), e)
|
|
||||||
panic(err) // TODO: return a special sentinel instead?
|
|
||||||
//return &err, false
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err = fmt.Errorf("unknown event: %v", ev)
|
err = fmt.Errorf("unknown event: %v", ev)
|
||||||
@@ -117,7 +145,7 @@ func (obj *BaseRes) ReadEvent(ev *event.Event) (exit *error, send bool) {
|
|||||||
func (obj *BaseRes) Running() error {
|
func (obj *BaseRes) Running() error {
|
||||||
// TODO: If a non-polling resource wants to use the converger, then it
|
// TODO: If a non-polling resource wants to use the converger, then it
|
||||||
// should probably tell Running (via an arg) to not do this. Currently
|
// should probably tell Running (via an arg) to not do this. Currently
|
||||||
// it is a very unlikey race that could cause an early converge if the
|
// it's a very unlikely race that could cause an early converge if the
|
||||||
// converge timeout is very short ( ~ 1s) and the Watch method doesn't
|
// converge timeout is very short ( ~ 1s) and the Watch method doesn't
|
||||||
// immediately SetConverged(false) to stop possible early termination.
|
// immediately SetConverged(false) to stop possible early termination.
|
||||||
if obj.Meta().Poll == 0 { // if not polling, unblock this...
|
if obj.Meta().Poll == 0 { // if not polling, unblock this...
|
||||||
|
|||||||
@@ -65,7 +65,7 @@ func (obj *SvcRes) Validate() error {
|
|||||||
|
|
||||||
// Init runs some startup code for this resource.
|
// Init runs some startup code for this resource.
|
||||||
func (obj *SvcRes) Init() error {
|
func (obj *SvcRes) Init() error {
|
||||||
obj.BaseRes.kind = "Svc"
|
obj.BaseRes.kind = "svc"
|
||||||
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -174,6 +174,8 @@ func (obj *SvcRes) Watch() error {
|
|||||||
log.Printf("Svc[%s]->Stopped", svc)
|
log.Printf("Svc[%s]->Stopped", svc)
|
||||||
case "reloading":
|
case "reloading":
|
||||||
log.Printf("Svc[%s]->Reloading", svc)
|
log.Printf("Svc[%s]->Reloading", svc)
|
||||||
|
case "failed":
|
||||||
|
log.Printf("Svc[%s]->Failed", svc)
|
||||||
default:
|
default:
|
||||||
log.Fatalf("Unknown svc state: %s", event[svc].ActiveState)
|
log.Fatalf("Unknown svc state: %s", event[svc].ActiveState)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ func (obj *TimerRes) Validate() error {
|
|||||||
|
|
||||||
// Init runs some startup code for this resource.
|
// Init runs some startup code for this resource.
|
||||||
func (obj *TimerRes) Init() error {
|
func (obj *TimerRes) Init() error {
|
||||||
obj.BaseRes.kind = "Timer"
|
obj.BaseRes.kind = "timer"
|
||||||
return obj.BaseRes.Init() // call base init, b/c we're overrriding
|
return obj.BaseRes.Init() // call base init, b/c we're overrriding
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
59
resources/util.go
Normal file
59
resources/util.go
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package resources
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/gob"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
errwrap "github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ResToB64 encodes a resource to a base64 encoded string (after serialization).
|
||||||
|
func ResToB64(res Res) (string, error) {
|
||||||
|
b := bytes.Buffer{}
|
||||||
|
e := gob.NewEncoder(&b)
|
||||||
|
err := e.Encode(&res) // pass with &
|
||||||
|
if err != nil {
|
||||||
|
return "", errwrap.Wrapf(err, "gob failed to encode")
|
||||||
|
}
|
||||||
|
return base64.StdEncoding.EncodeToString(b.Bytes()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// B64ToRes decodes a resource from a base64 encoded string (after deserialization).
|
||||||
|
func B64ToRes(str string) (Res, error) {
|
||||||
|
var output interface{}
|
||||||
|
bb, err := base64.StdEncoding.DecodeString(str)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errwrap.Wrapf(err, "base64 failed to decode")
|
||||||
|
}
|
||||||
|
b := bytes.NewBuffer(bb)
|
||||||
|
d := gob.NewDecoder(b)
|
||||||
|
err = d.Decode(&output) // pass with &
|
||||||
|
if err != nil {
|
||||||
|
return nil, errwrap.Wrapf(err, "gob failed to decode")
|
||||||
|
}
|
||||||
|
res, ok := output.(Res)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Output %v is not a Res", res)
|
||||||
|
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
@@ -191,7 +191,7 @@ func (obj *VirtRes) Init() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
obj.wg = &sync.WaitGroup{}
|
obj.wg = &sync.WaitGroup{}
|
||||||
obj.BaseRes.kind = "Virt"
|
obj.BaseRes.kind = "virt"
|
||||||
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
return obj.BaseRes.Init() // call base init, b/c we're overriding
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
2
tag.sh
2
tag.sh
@@ -7,5 +7,5 @@ echo "Version $t is now tagged!"
|
|||||||
echo "Pushing $t to origin..."
|
echo "Pushing $t to origin..."
|
||||||
echo "Press ^C within 3s to abort."
|
echo "Press ^C within 3s to abort."
|
||||||
sleep 3s
|
sleep 3s
|
||||||
git tag $t
|
echo "release: tag $t" | git tag --file=- --sign $t
|
||||||
git push origin $t
|
git push origin $t
|
||||||
|
|||||||
@@ -8,19 +8,19 @@ timeout --kill-after=20s 15s ./mgmt run --tmp-prefix --yaml=file-move.yaml 2>&1
|
|||||||
pid=$!
|
pid=$!
|
||||||
sleep 5s # let it converge
|
sleep 5s # let it converge
|
||||||
|
|
||||||
initial=$(grep -c 'File\[file1\]: contentCheckApply(true)' /tmp/mgmt/file-move.log)
|
initial=$(grep -c 'file\[file1\]: contentCheckApply(true)' /tmp/mgmt/file-move.log)
|
||||||
|
|
||||||
mv /tmp/mgmt/f1 /tmp/mgmt/f2
|
mv /tmp/mgmt/f1 /tmp/mgmt/f2
|
||||||
|
|
||||||
sleep 3
|
sleep 3
|
||||||
|
|
||||||
after_move_count=$(grep -c 'File\[file1\]: contentCheckApply(true)' /tmp/mgmt/file-move.log)
|
after_move_count=$(grep -c 'file\[file1\]: contentCheckApply(true)' /tmp/mgmt/file-move.log)
|
||||||
|
|
||||||
sleep 3
|
sleep 3
|
||||||
|
|
||||||
echo f2 > /tmp/mgmt/f2
|
echo f2 > /tmp/mgmt/f2
|
||||||
|
|
||||||
after_moved_file_count=$(grep -c 'File\[file1\]: contentCheckApply(true)' /tmp/mgmt/file-move.log)
|
after_moved_file_count=$(grep -c 'file\[file1\]: contentCheckApply(true)' /tmp/mgmt/file-move.log)
|
||||||
|
|
||||||
|
|
||||||
if [[ ${after_move_count} -le ${initial} ]]
|
if [[ ${after_move_count} -le ${initial} ]]
|
||||||
|
|||||||
7
test/shell/graph-fanin-1.sh
Executable file
7
test/shell/graph-fanin-1.sh
Executable file
@@ -0,0 +1,7 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
# should take slightly more than 25s, but fail if we take 35s)
|
||||||
|
$timeout --kill-after=35s 30s ./mgmt run --yaml graph-fanin-1.yaml --converged-timeout=5 --no-watch --tmp-prefix --no-pgp &
|
||||||
|
pid=$!
|
||||||
|
wait $pid # get exit status
|
||||||
|
exit $?
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
graph: mygraph
|
graph: mygraph
|
||||||
comment: simple exec fan in example to demonstrate optimization)
|
comment: simple exec fan in example to demonstrate optimization
|
||||||
resources:
|
resources:
|
||||||
exec:
|
exec:
|
||||||
- name: exec1
|
- name: exec1
|
||||||
@@ -1,5 +1,7 @@
|
|||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
exit 0 # FIXME: disabled until intermittent failures can be resolved
|
||||||
|
|
||||||
# run a graph, with prometheus support
|
# run a graph, with prometheus support
|
||||||
timeout --kill-after=30s 25s ./mgmt run --tmp-prefix --no-pgp --prometheus --yaml prometheus-3.yaml &
|
timeout --kill-after=30s 25s ./mgmt run --tmp-prefix --no-pgp --prometheus --yaml prometheus-3.yaml &
|
||||||
pid=$!
|
pid=$!
|
||||||
@@ -9,10 +11,13 @@ sleep 10s # let it converge
|
|||||||
curl 127.0.0.1:9233/metrics
|
curl 127.0.0.1:9233/metrics
|
||||||
|
|
||||||
# Three CheckApply for a File ; with events
|
# Three CheckApply for a File ; with events
|
||||||
curl 127.0.0.1:9233/metrics | grep '^mgmt_checkapply_total{apply="true",errorful="false",eventful="true",kind="File"} 3$'
|
curl 127.0.0.1:9233/metrics | grep '^mgmt_checkapply_total{apply="true",errorful="false",eventful="true",kind="file"} 3$'
|
||||||
|
|
||||||
# One CheckApply for a File ; in noop mode.
|
# One CheckApply for a File ; in noop mode.
|
||||||
curl 127.0.0.1:9233/metrics | grep '^mgmt_checkapply_total{apply="false",errorful="false",eventful="true",kind="File"} 1$'
|
curl 127.0.0.1:9233/metrics | grep '^mgmt_checkapply_total{apply="false",errorful="false",eventful="true",kind="file"} 1$'
|
||||||
|
|
||||||
|
# Check mgmt_graph_start_time_seconds
|
||||||
|
curl 127.0.0.1:9233/metrics | grep "^mgmt_graph_start_time_seconds [1-9]\+"
|
||||||
|
|
||||||
killall -SIGINT mgmt # send ^C to exit mgmt
|
killall -SIGINT mgmt # send ^C to exit mgmt
|
||||||
wait $pid # get exit status
|
wait $pid # get exit status
|
||||||
|
|||||||
8
test/shell/sema-1.sh
Executable file
8
test/shell/sema-1.sh
Executable file
@@ -0,0 +1,8 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
# should take at least 55s, but fail if we block this
|
||||||
|
# TODO: it would be nice to make sure this test doesn't exit too early!
|
||||||
|
$timeout --kill-after=120s 110s ./mgmt run --yaml sema-1.yaml --sema 2 --converged-timeout=5 --no-watch --no-pgp --tmp-prefix &
|
||||||
|
pid=$!
|
||||||
|
wait $pid # get exit status
|
||||||
|
exit $?
|
||||||
128
test/shell/sema-1.yaml
Normal file
128
test/shell/sema-1.yaml
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
---
|
||||||
|
graph: mygraph
|
||||||
|
comment: simple exec fan in to fan out example with semaphore
|
||||||
|
resources:
|
||||||
|
exec:
|
||||||
|
- name: exec1
|
||||||
|
cmd: sleep 10s
|
||||||
|
shell: ''
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
- name: exec2
|
||||||
|
cmd: sleep 10s
|
||||||
|
shell: ''
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
- name: exec3
|
||||||
|
cmd: sleep 10s
|
||||||
|
shell: ''
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
- name: exec4
|
||||||
|
cmd: sleep 10s
|
||||||
|
shell: ''
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
- name: exec5
|
||||||
|
cmd: sleep 10s
|
||||||
|
shell: ''
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
- name: exec6
|
||||||
|
cmd: sleep 10s
|
||||||
|
shell: ''
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
- name: exec7
|
||||||
|
cmd: sleep 10s
|
||||||
|
shell: ''
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
- name: exec8
|
||||||
|
cmd: sleep 10s
|
||||||
|
shell: ''
|
||||||
|
timeout: 0
|
||||||
|
watchcmd: ''
|
||||||
|
watchshell: ''
|
||||||
|
ifcmd: ''
|
||||||
|
ifshell: ''
|
||||||
|
pollint: 0
|
||||||
|
state: present
|
||||||
|
edges:
|
||||||
|
- name: e1
|
||||||
|
from:
|
||||||
|
kind: exec
|
||||||
|
name: exec1
|
||||||
|
to:
|
||||||
|
kind: exec
|
||||||
|
name: exec4
|
||||||
|
- name: e2
|
||||||
|
from:
|
||||||
|
kind: exec
|
||||||
|
name: exec2
|
||||||
|
to:
|
||||||
|
kind: exec
|
||||||
|
name: exec4
|
||||||
|
- name: e3
|
||||||
|
from:
|
||||||
|
kind: exec
|
||||||
|
name: exec3
|
||||||
|
to:
|
||||||
|
kind: exec
|
||||||
|
name: exec4
|
||||||
|
- name: e4
|
||||||
|
from:
|
||||||
|
kind: exec
|
||||||
|
name: exec4
|
||||||
|
to:
|
||||||
|
kind: exec
|
||||||
|
name: exec5
|
||||||
|
- name: e5
|
||||||
|
from:
|
||||||
|
kind: exec
|
||||||
|
name: exec4
|
||||||
|
to:
|
||||||
|
kind: exec
|
||||||
|
name: exec6
|
||||||
|
- name: e6
|
||||||
|
from:
|
||||||
|
kind: exec
|
||||||
|
name: exec4
|
||||||
|
to:
|
||||||
|
kind: exec
|
||||||
|
name: exec7
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
#!/bin/bash -e
|
|
||||||
|
|
||||||
# should take slightly more than 25s, but fail if we take 35s)
|
|
||||||
$timeout --kill-after=35s 30s ./mgmt run --yaml t4.yaml --converged-timeout=5 --no-watch --tmp-prefix &
|
|
||||||
pid=$!
|
|
||||||
wait $pid # get exit status
|
|
||||||
exit $?
|
|
||||||
@@ -28,7 +28,7 @@ COUNT=`echo -e "$LINT" | wc -l` # number of golint problems in current branch
|
|||||||
[ "$LINT" = "" ] && echo PASS && exit # everything is "perfect"
|
[ "$LINT" = "" ] && echo PASS && exit # everything is "perfect"
|
||||||
echo "$LINT" # display the issues
|
echo "$LINT" # display the issues
|
||||||
|
|
||||||
T=`mktemp --tmpdir -d tmp.XXX`
|
T=`mktemp --tmpdir -d tmp.X'X'X` # add quotes to avoid matching three X's
|
||||||
[ "$T" = "" ] && fail_test "Could not create tmpdir"
|
[ "$T" = "" ] && fail_test "Could not create tmpdir"
|
||||||
cd $T || fail_test "Could not change into tmpdir $T"
|
cd $T || fail_test "Could not change into tmpdir $T"
|
||||||
git clone --recursive "${ROOT}" 2>/dev/null # make a copy
|
git clone --recursive "${ROOT}" 2>/dev/null # make a copy
|
||||||
|
|||||||
@@ -22,7 +22,8 @@ function simplify-gocase() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function token-coloncheck() {
|
function token-coloncheck() {
|
||||||
if grep -Ei "[\/]+[\/]+[ ]*+(FIXME[^:]|TODO[^:]|XXX[^:])" "$1"; then
|
# add quotes to avoid matching three X's
|
||||||
|
if grep -Ei "[\/]+[\/]+[ ]*+(FIXME[^:]|TODO[^:]|X"'X'"X[^:])" "$1"; then
|
||||||
return 1 # tokens must end with a colon
|
return 1 # tokens must end with a colon
|
||||||
fi
|
fi
|
||||||
return 0
|
return 0
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ set -o pipefail
|
|||||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )" # dir!
|
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )" # dir!
|
||||||
cd "$DIR" >/dev/null # work from main mgmt directory
|
cd "$DIR" >/dev/null # work from main mgmt directory
|
||||||
make build
|
make build
|
||||||
T=`mktemp --tmpdir -d tmp.XXX`
|
T=`mktemp --tmpdir -d tmp.X'X'X` # add quotes to avoid matching three X's
|
||||||
cp -a ./mgmt "$T"/mgmt.1
|
cp -a ./mgmt "$T"/mgmt.1
|
||||||
make clean
|
make clean
|
||||||
make build
|
make build
|
||||||
|
|||||||
77
util/semaphore/semaphore.go
Normal file
77
util/semaphore/semaphore.go
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
// Mgmt
|
||||||
|
// Copyright (C) 2013-2017+ James Shubin and the project contributors
|
||||||
|
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// Package semaphore contains an implementation of a counting semaphore.
|
||||||
|
package semaphore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Semaphore is a counting semaphore. It must be initialized before use.
|
||||||
|
type Semaphore struct {
|
||||||
|
C chan struct{}
|
||||||
|
closed chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSemaphore creates a new semaphore.
|
||||||
|
func NewSemaphore(size int) *Semaphore {
|
||||||
|
obj := &Semaphore{}
|
||||||
|
obj.Init(size)
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init initializes the semaphore.
|
||||||
|
func (obj *Semaphore) Init(size int) {
|
||||||
|
obj.C = make(chan struct{}, size)
|
||||||
|
obj.closed = make(chan struct{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close shuts down the semaphore and releases all the locks.
|
||||||
|
func (obj *Semaphore) Close() {
|
||||||
|
// TODO: we could return an error if any semaphores were killed, but
|
||||||
|
// it's not particularly useful to know that for this application...
|
||||||
|
close(obj.closed)
|
||||||
|
}
|
||||||
|
|
||||||
|
// P acquires n resources.
|
||||||
|
func (obj *Semaphore) P(n int) error {
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
select {
|
||||||
|
case obj.C <- struct{}{}: // acquire one
|
||||||
|
case <-obj.closed: // exit signal
|
||||||
|
return fmt.Errorf("closed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// V releases n resources.
|
||||||
|
func (obj *Semaphore) V(n int) error {
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
select {
|
||||||
|
case <-obj.C: // release one
|
||||||
|
// TODO: is the closed signal needed if unlocks should always pass?
|
||||||
|
case <-obj.closed: // exit signal
|
||||||
|
return fmt.Errorf("closed")
|
||||||
|
// TODO: is it true you shouldn't call a release before a lock?
|
||||||
|
default: // trying to release something that isn't locked
|
||||||
|
panic("semaphore: V > P")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -57,7 +57,7 @@ func Uint64KeyFromStrInMap(needle string, haystack map[uint64]string) (uint64, b
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StrRemoveDuplicatesInList removes any duplicate values in the list.
|
// StrRemoveDuplicatesInList removes any duplicate values in the list.
|
||||||
// This is a possibly sub-optimal, O(n^2)? implementation.
|
// This implementation is possibly sub-optimal (O(n^2)?) but preserves ordering.
|
||||||
func StrRemoveDuplicatesInList(list []string) []string {
|
func StrRemoveDuplicatesInList(list []string) []string {
|
||||||
unique := []string{}
|
unique := []string{}
|
||||||
for _, x := range list {
|
for _, x := range list {
|
||||||
|
|||||||
@@ -26,10 +26,8 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/purpleidea/mgmt/gapi"
|
|
||||||
"github.com/purpleidea/mgmt/pgraph"
|
"github.com/purpleidea/mgmt/pgraph"
|
||||||
"github.com/purpleidea/mgmt/resources"
|
"github.com/purpleidea/mgmt/resources"
|
||||||
"github.com/purpleidea/mgmt/util"
|
|
||||||
|
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
@@ -60,6 +58,7 @@ type Resources struct {
|
|||||||
Exec []*resources.ExecRes `yaml:"exec"`
|
Exec []*resources.ExecRes `yaml:"exec"`
|
||||||
File []*resources.FileRes `yaml:"file"`
|
File []*resources.FileRes `yaml:"file"`
|
||||||
Hostname []*resources.HostnameRes `yaml:"hostname"`
|
Hostname []*resources.HostnameRes `yaml:"hostname"`
|
||||||
|
KV []*resources.KVRes `yaml:"kv"`
|
||||||
Msg []*resources.MsgRes `yaml:"msg"`
|
Msg []*resources.MsgRes `yaml:"msg"`
|
||||||
Noop []*resources.NoopRes `yaml:"noop"`
|
Noop []*resources.NoopRes `yaml:"noop"`
|
||||||
Nspawn []*resources.NspawnRes `yaml:"nspawn"`
|
Nspawn []*resources.NspawnRes `yaml:"nspawn"`
|
||||||
@@ -93,7 +92,7 @@ func (c *GraphConfig) Parse(data []byte) error {
|
|||||||
|
|
||||||
// NewGraphFromConfig transforms a GraphConfig struct into a new graph.
|
// NewGraphFromConfig transforms a GraphConfig struct into a new graph.
|
||||||
// FIXME: remove any possibly left over, now obsolete graph diff code from here!
|
// FIXME: remove any possibly left over, now obsolete graph diff code from here!
|
||||||
func (c *GraphConfig) NewGraphFromConfig(hostname string, world gapi.World, noop bool) (*pgraph.Graph, error) {
|
func (c *GraphConfig) NewGraphFromConfig(hostname string, world resources.World, noop bool) (*pgraph.Graph, error) {
|
||||||
// hostname is the uuid for the host
|
// hostname is the uuid for the host
|
||||||
|
|
||||||
var graph *pgraph.Graph // new graph to return
|
var graph *pgraph.Graph // new graph to return
|
||||||
@@ -116,8 +115,7 @@ func (c *GraphConfig) NewGraphFromConfig(hostname string, world gapi.World, noop
|
|||||||
field := value.FieldByName(name)
|
field := value.FieldByName(name)
|
||||||
iface := field.Interface() // interface type of value
|
iface := field.Interface() // interface type of value
|
||||||
slice := reflect.ValueOf(iface)
|
slice := reflect.ValueOf(iface)
|
||||||
// XXX: should we just drop these everywhere and have the kind strings be all lowercase?
|
kind := strings.ToLower(name)
|
||||||
kind := util.FirstToUpper(name)
|
|
||||||
for j := 0; j < slice.Len(); j++ { // loop through resources of same kind
|
for j := 0; j < slice.Len(); j++ { // loop through resources of same kind
|
||||||
x := slice.Index(j).Interface()
|
x := slice.Index(j).Interface()
|
||||||
res, ok := x.(resources.Res) // convert to Res type
|
res, ok := x.(resources.Res) // convert to Res type
|
||||||
@@ -158,8 +156,7 @@ func (c *GraphConfig) NewGraphFromConfig(hostname string, world gapi.World, noop
|
|||||||
var hostnameFilter []string // empty to get from everyone
|
var hostnameFilter []string // empty to get from everyone
|
||||||
kindFilter := []string{}
|
kindFilter := []string{}
|
||||||
for _, t := range c.Collector {
|
for _, t := range c.Collector {
|
||||||
// XXX: should we just drop these everywhere and have the kind strings be all lowercase?
|
kind := strings.ToLower(t.Kind)
|
||||||
kind := util.FirstToUpper(t.Kind)
|
|
||||||
kindFilter = append(kindFilter, kind)
|
kindFilter = append(kindFilter, kind)
|
||||||
}
|
}
|
||||||
// do all the graph look ups in one single step, so that if the backend
|
// do all the graph look ups in one single step, so that if the backend
|
||||||
@@ -175,8 +172,7 @@ func (c *GraphConfig) NewGraphFromConfig(hostname string, world gapi.World, noop
|
|||||||
matched := false
|
matched := false
|
||||||
// see if we find a collect pattern that matches
|
// see if we find a collect pattern that matches
|
||||||
for _, t := range c.Collector {
|
for _, t := range c.Collector {
|
||||||
// XXX: should we just drop these everywhere and have the kind strings be all lowercase?
|
kind := strings.ToLower(t.Kind)
|
||||||
kind := util.FirstToUpper(t.Kind)
|
|
||||||
// use t.Kind and optionally t.Pattern to collect from storage
|
// use t.Kind and optionally t.Pattern to collect from storage
|
||||||
log.Printf("Collect: %v; Pattern: %v", kind, t.Pattern)
|
log.Printf("Collect: %v; Pattern: %v", kind, t.Pattern)
|
||||||
|
|
||||||
@@ -219,20 +215,20 @@ func (c *GraphConfig) NewGraphFromConfig(hostname string, world gapi.World, noop
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, e := range c.Edges {
|
for _, e := range c.Edges {
|
||||||
if _, ok := lookup[util.FirstToUpper(e.From.Kind)]; !ok {
|
if _, ok := lookup[strings.ToLower(e.From.Kind)]; !ok {
|
||||||
return nil, fmt.Errorf("can't find 'from' resource")
|
return nil, fmt.Errorf("can't find 'from' resource")
|
||||||
}
|
}
|
||||||
if _, ok := lookup[util.FirstToUpper(e.To.Kind)]; !ok {
|
if _, ok := lookup[strings.ToLower(e.To.Kind)]; !ok {
|
||||||
return nil, fmt.Errorf("can't find 'to' resource")
|
return nil, fmt.Errorf("can't find 'to' resource")
|
||||||
}
|
}
|
||||||
if _, ok := lookup[util.FirstToUpper(e.From.Kind)][e.From.Name]; !ok {
|
if _, ok := lookup[strings.ToLower(e.From.Kind)][e.From.Name]; !ok {
|
||||||
return nil, fmt.Errorf("can't find 'from' name")
|
return nil, fmt.Errorf("can't find 'from' name")
|
||||||
}
|
}
|
||||||
if _, ok := lookup[util.FirstToUpper(e.To.Kind)][e.To.Name]; !ok {
|
if _, ok := lookup[strings.ToLower(e.To.Kind)][e.To.Name]; !ok {
|
||||||
return nil, fmt.Errorf("can't find 'to' name")
|
return nil, fmt.Errorf("can't find 'to' name")
|
||||||
}
|
}
|
||||||
from := lookup[util.FirstToUpper(e.From.Kind)][e.From.Name]
|
from := lookup[strings.ToLower(e.From.Kind)][e.From.Name]
|
||||||
to := lookup[util.FirstToUpper(e.To.Kind)][e.To.Name]
|
to := lookup[strings.ToLower(e.To.Kind)][e.To.Name]
|
||||||
edge := pgraph.NewEdge(e.Name)
|
edge := pgraph.NewEdge(e.Name)
|
||||||
edge.Notify = e.Notify
|
edge.Notify = e.Notify
|
||||||
graph.AddEdge(from, to, edge)
|
graph.AddEdge(from, to, edge)
|
||||||
|
|||||||
Reference in New Issue
Block a user