Compare commits
177 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d88874845c | ||
|
|
5e38c1c8fe | ||
|
|
ae7ebeedd1 | ||
|
|
652b657809 | ||
|
|
62a6e0da1d | ||
|
|
0d0d48d9f6 | ||
|
|
ab5957f1e9 | ||
|
|
463ba23003 | ||
|
|
ccad6e7e1a | ||
|
|
aa165b5e17 | ||
|
|
f06e87377c | ||
|
|
4c3bf9fc7a | ||
|
|
253ed78cc6 | ||
|
|
4860d833c7 | ||
|
|
450d5c1a59 | ||
|
|
88fcda2c99 | ||
|
|
00db953c9f | ||
|
|
a0df4829a8 | ||
|
|
b0e1f12c22 | ||
|
|
ee56155ec4 | ||
|
|
16d7c6a933 | ||
|
|
f7a06c1da9 | ||
|
|
4c8086977a | ||
|
|
b1f088e5fa | ||
|
|
1247c789aa | ||
|
|
749038c76d | ||
|
|
0a052494c4 | ||
|
|
90fa83a5cf | ||
|
|
4eaff892c1 | ||
|
|
f368f75209 | ||
|
|
04048b13ed | ||
|
|
5acc33c751 | ||
|
|
b449be89a7 | ||
|
|
dac019290d | ||
|
|
bdc424e39d | ||
|
|
10193a2796 | ||
|
|
2c9a12e941 | ||
|
|
8ba6c40f0c | ||
|
|
bbfeb49cdf | ||
|
|
f61e1cb36d | ||
|
|
4a3e2c3611 | ||
|
|
81faec508c | ||
|
|
9966ca2e85 | ||
|
|
35c26f9ee5 | ||
|
|
b5e29771ab | ||
|
|
f5f09d3640 | ||
|
|
5a531b7948 | ||
|
|
f716a3a73b | ||
|
|
ce8c8c8eea | ||
|
|
fc48fda7e5 | ||
|
|
78936c5ce8 | ||
|
|
5d0efce278 | ||
|
|
0c17a0b4f2 | ||
|
|
3f396a7c52 | ||
|
|
8697f8f91f | ||
|
|
06c67685f1 | ||
|
|
dc2e7de9e5 | ||
|
|
db1dbe7a27 | ||
|
|
d6bbb94be5 | ||
|
|
e3b4c0aee3 | ||
|
|
a1fbe152bb | ||
|
|
9d28ff9b23 | ||
|
|
43f0ddd25d | ||
|
|
7a28b00d75 | ||
|
|
32e29862f2 | ||
|
|
6c5c38f5a7 | ||
|
|
2da7854b24 | ||
|
|
6d0c5ab2d5 | ||
|
|
9398deeabc | ||
|
|
bf63d2e844 | ||
|
|
b808592fb3 | ||
|
|
e2296a631b | ||
|
|
e20555d4bc | ||
|
|
b89e2dcd3c | ||
|
|
165d11b2ca | ||
|
|
d4046c0acf | ||
|
|
88498695ac | ||
|
|
354a1c23b0 | ||
|
|
34550246f4 | ||
|
|
db1cc846dc | ||
|
|
74484bcbdf | ||
|
|
d5ecf8ce16 | ||
|
|
b1ffb1d4a4 | ||
|
|
451e1122a7 | ||
|
|
10dcf32f3c | ||
|
|
7f1477b26d | ||
|
|
33b68c09d3 | ||
|
|
7ec48ca845 | ||
|
|
5c92cef983 | ||
|
|
75eba466c6 | ||
|
|
ad30737119 | ||
|
|
8e0bde3071 | ||
|
|
7d641427d2 | ||
|
|
3b62beed26 | ||
|
|
2d3cf68261 | ||
|
|
7d6080d13f | ||
|
|
e3eefeb3fe | ||
|
|
f10dddadd6 | ||
|
|
d166112917 | ||
|
|
8ed5c1bedf | ||
|
|
4489076fac | ||
|
|
bdc33cd421 | ||
|
|
889dae2955 | ||
|
|
9ff21b68e4 | ||
|
|
a69a7009f8 | ||
|
|
d413fac4cb | ||
|
|
246ecd8607 | ||
|
|
22105af720 | ||
|
|
880c4d2f48 | ||
|
|
443f489152 | ||
|
|
39fdfdfd8c | ||
|
|
96dccca475 | ||
|
|
948a3c6d08 | ||
|
|
dc13d5d26b | ||
|
|
aae714db6b | ||
|
|
a7c9673bcf | ||
|
|
3d06775ddc | ||
|
|
48beea3884 | ||
|
|
958d3f6094 | ||
|
|
08f24fb272 | ||
|
|
07d57e1a64 | ||
|
|
cd7711bdfe | ||
|
|
433ffa05a5 | ||
|
|
046b21b907 | ||
|
|
c32183eb70 | ||
|
|
73b11045f2 | ||
|
|
57ce3fa587 | ||
|
|
a26620da38 | ||
|
|
86b8099eb9 | ||
|
|
c8e9a100a6 | ||
|
|
a287f028d1 | ||
|
|
cf50fb3568 | ||
|
|
4c8193876f | ||
|
|
158bc1eb2a | ||
|
|
3f42e5f702 | ||
|
|
75633817a7 | ||
|
|
83b00fce3e | ||
|
|
38befb53ad | ||
|
|
d0b5c4de68 | ||
|
|
1b68845b00 | ||
|
|
a7bc72540d | ||
|
|
27ac7481f9 | ||
|
|
9bc36be513 | ||
|
|
e62e35bc88 | ||
|
|
bd80ced9b2 | ||
|
|
bb2f2e5e54 | ||
|
|
b1eb6711b7 | ||
|
|
da0ffa5e56 | ||
|
|
68ef312233 | ||
|
|
9fefadca24 | ||
|
|
e14b14b88c | ||
|
|
d5bfb7257e | ||
|
|
8282f3b59c | ||
|
|
dbf0c84f0b | ||
|
|
a5977b993a | ||
|
|
27df3ae876 | ||
|
|
a49d07cf01 | ||
|
|
28f343ac50 | ||
|
|
4297a39d03 | ||
|
|
bd996e441c | ||
|
|
086a89fad6 | ||
|
|
70ac38e66c | ||
|
|
d990d2ad86 | ||
|
|
56db31ca43 | ||
|
|
b902e2d30b | ||
|
|
d2bab32b0e | ||
|
|
b2d726051b | ||
|
|
8e25667f87 | ||
|
|
9b5c4c50e7 | ||
|
|
d2ce70a673 | ||
|
|
9db0fc4ee4 | ||
|
|
9ed830bb81 | ||
|
|
4e42d9ed03 | ||
|
|
4c93bc3599 | ||
|
|
7c817802a8 | ||
|
|
de90b592fb | ||
|
|
b9d0cc2e28 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -5,6 +5,7 @@
|
||||
.envrc
|
||||
old/
|
||||
tmp/
|
||||
*WIP
|
||||
*_stringer.go
|
||||
bindata/*.go
|
||||
mgmt
|
||||
@@ -14,3 +15,5 @@ build/mgmt-*
|
||||
mgmt.iml
|
||||
rpmbuild/
|
||||
releases/
|
||||
# vim swap files
|
||||
.*.sw[op]
|
||||
|
||||
33
.travis.yml
33
.travis.yml
@@ -1,17 +1,18 @@
|
||||
language: go
|
||||
os:
|
||||
- linux
|
||||
go:
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- tip
|
||||
go_import_path: github.com/purpleidea/mgmt
|
||||
sudo: true
|
||||
dist: trusty
|
||||
dist: xenial
|
||||
# travis requires that you update manually, and provides this key to trigger it
|
||||
apt:
|
||||
update: true
|
||||
before_install:
|
||||
# print some debug information to help catch the constant travis regressions
|
||||
- if [ -e /etc/apt/sources.list.d/ ]; then sudo ls -l /etc/apt/sources.list.d/; fi
|
||||
# workaround broken travis NO_PUBKEY errors
|
||||
- if [ -e /etc/apt/sources.list.d/rabbitmq_rabbitmq-server.list ]; then sudo rm -f /etc/apt/sources.list.d/rabbitmq_rabbitmq-server.list; fi
|
||||
- if [ -e /etc/apt/sources.list.d/github_git-lfs.list ]; then sudo rm -f /etc/apt/sources.list.d/github_git-lfs.list; fi
|
||||
# as per a number of comments online, this might mitigate some flaky fails...
|
||||
- if [[ "$TRAVIS_OS_NAME" != "osx" ]]; then sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 0C49F3730359A14518585931BC711F9BA15703C6; fi
|
||||
# apt update tends to be flaky in travis, retry up to 3 times on failure
|
||||
@@ -20,17 +21,27 @@ before_install:
|
||||
- git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*"
|
||||
- git fetch --unshallow
|
||||
install: 'make deps'
|
||||
script: 'make test'
|
||||
matrix:
|
||||
fast_finish: false
|
||||
allow_failures:
|
||||
- go: 1.10.x
|
||||
- go: tip
|
||||
- os: osx
|
||||
- go: 1.11.x
|
||||
- go: tip
|
||||
- os: osx
|
||||
# include only one build for osx for a quicker build as the nr. of these runners are sparse
|
||||
include:
|
||||
- os: osx
|
||||
go: 1.9.x
|
||||
- name: "basic tests"
|
||||
go: 1.10.x
|
||||
env: TEST_BLOCK=basic
|
||||
- name: "shell tests"
|
||||
go: 1.10.x
|
||||
env: TEST_BLOCK=shell
|
||||
- name: "race tests"
|
||||
go: 1.10.x
|
||||
env: TEST_BLOCK=race
|
||||
- go: 1.11.x
|
||||
- go: tip
|
||||
- os: osx
|
||||
script: 'TEST_BLOCK="$TEST_BLOCK" make test'
|
||||
|
||||
# the "secure" channel value is the result of running: ./misc/travis-encrypt.sh
|
||||
# with a value of: irc.freenode.net#mgmtconfig to eliminate noise from forks...
|
||||
|
||||
34
Makefile
34
Makefile
@@ -16,7 +16,7 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
SHELL = /usr/bin/env bash
|
||||
.PHONY: all art cleanart version program lang path deps run race bindata generate build build-debug crossbuild clean test gofmt yamlfmt format docs rpmbuild mkdirs rpm srpm spec tar upload upload-sources upload-srpms upload-rpms copr tag release
|
||||
.PHONY: all art cleanart version program lang path deps run race bindata generate build build-debug crossbuild clean test gofmt yamlfmt format docs rpmbuild mkdirs rpm srpm spec tar upload upload-sources upload-srpms upload-rpms copr tag release funcgen
|
||||
.SILENT: clean bindata
|
||||
|
||||
# a large amount of output from this `find`, can cause `make` to be much slower!
|
||||
@@ -117,16 +117,15 @@ race:
|
||||
|
||||
# generate go files from non-go source
|
||||
bindata: ## generate go files from non-go sources
|
||||
@echo "Generating: bindata..."
|
||||
$(MAKE) --quiet -C bindata
|
||||
$(MAKE) --quiet -C lang/funcs
|
||||
|
||||
generate:
|
||||
go generate
|
||||
|
||||
lang: ## generates the lexer/parser for the language frontend
|
||||
@# recursively run make in child dir named lang
|
||||
@echo "Generating: lang..."
|
||||
$(MAKE) --quiet -C lang
|
||||
@$(MAKE) --quiet -C lang
|
||||
|
||||
# build a `mgmt` binary for current host os/arch
|
||||
$(PROGRAM): build/mgmt-${GOHOSTOS}-${GOHOSTARCH} ## build an mgmt binary for current host os/arch
|
||||
@@ -147,7 +146,7 @@ build-debug: $(PROGRAM)
|
||||
# extract os and arch from target pattern
|
||||
GOOS=$(firstword $(subst -, ,$*))
|
||||
GOARCH=$(lastword $(subst -, ,$*))
|
||||
build/mgmt-%: $(GO_FILES) | bindata lang
|
||||
build/mgmt-%: $(GO_FILES) | bindata lang funcgen
|
||||
@echo "Building: $(PROGRAM), os/arch: $*, version: $(SVERSION)..."
|
||||
@# reassigning GOOS and GOARCH to make build command copy/pastable
|
||||
@# go 1.10 requires specifying the package for ldflags
|
||||
@@ -163,7 +162,10 @@ crossbuild: ${crossbuild_targets}
|
||||
|
||||
clean: ## clean things up
|
||||
$(MAKE) --quiet -C bindata clean
|
||||
$(MAKE) --quiet -C lang/funcs clean
|
||||
$(MAKE) --quiet -C lang clean
|
||||
rm -f lang/funcs/core/generated_funcs.go || true
|
||||
rm -f lang/funcs/core/generated_funcs_test.go || true
|
||||
[ ! -e $(PROGRAM) ] || rm $(PROGRAM)
|
||||
rm -f *_stringer.go # generated by `go generate`
|
||||
rm -f *_mock.go # generated by `go generate`
|
||||
@@ -358,28 +360,28 @@ releases/$(VERSION)/.mkdir:
|
||||
mkdir -p releases/$(VERSION)/{deb,rpm,pacman}/ && touch releases/$(VERSION)/.mkdir
|
||||
|
||||
releases/$(VERSION)/rpm/changelog: $(PROGRAM) releases/$(VERSION)/.mkdir
|
||||
@echo "Generating rpm changelog..."
|
||||
@echo "Generating: rpm changelog..."
|
||||
./misc/make-rpm-changelog.sh $(VERSION)
|
||||
|
||||
$(RPM_PKG): releases/$(VERSION)/rpm/changelog
|
||||
@echo "Building rpm package..."
|
||||
@echo "Building: rpm package..."
|
||||
./misc/fpm-pack.sh rpm $(VERSION) libvirt-devel augeas-devel
|
||||
|
||||
releases/$(VERSION)/deb/changelog: $(PROGRAM) releases/$(VERSION)/.mkdir
|
||||
@echo "Generating deb changelog..."
|
||||
@echo "Generating: deb changelog..."
|
||||
./misc/make-deb-changelog.sh $(VERSION)
|
||||
|
||||
$(DEB_PKG): releases/$(VERSION)/deb/changelog
|
||||
@echo "Building deb package..."
|
||||
@echo "Building: deb package..."
|
||||
./misc/fpm-pack.sh deb $(VERSION) libvirt-dev libaugeas-dev
|
||||
|
||||
$(PACMAN_PKG): $(PROGRAM) releases/$(VERSION)/.mkdir
|
||||
@echo "Building pacman package..."
|
||||
@echo "Building: pacman package..."
|
||||
./misc/fpm-pack.sh pacman $(VERSION) libvirt augeas
|
||||
|
||||
$(SHA256SUMS): $(RPM_PKG) $(DEB_PKG) $(PACMAN_PKG)
|
||||
@# remove the directory separator in the SHA256SUMS file
|
||||
@echo "Generating sha256 sum..."
|
||||
@echo "Generating: sha256 sum..."
|
||||
sha256sum $(RPM_PKG) $(DEB_PKG) $(PACMAN_PKG) | awk -F '/| ' '{print $$1" "$$6}' > $(SHA256SUMS)
|
||||
|
||||
$(SHA256SUMS_ASC): $(SHA256SUMS)
|
||||
@@ -406,4 +408,14 @@ help: ## show this help screen
|
||||
awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}'
|
||||
@echo ''
|
||||
|
||||
funcgen: lang/funcs/core/generated_funcs_test.go lang/funcs/core/generated_funcs.go
|
||||
|
||||
lang/funcs/core/generated_funcs_test.go: lang/funcs/funcgen/*.go lang/funcs/core/funcgen.yaml lang/funcs/funcgen/templates/generated_funcs_test.go.tpl
|
||||
@echo "Generating: funcs test..."
|
||||
@go run lang/funcs/funcgen/*.go -templates lang/funcs/funcgen/templates/generated_funcs_test.go.tpl 2>/dev/null
|
||||
|
||||
lang/funcs/core/generated_funcs.go: lang/funcs/funcgen/*.go lang/funcs/core/funcgen.yaml lang/funcs/funcgen/templates/generated_funcs.go.tpl
|
||||
@echo "Generating: funcs..."
|
||||
@go run lang/funcs/funcgen/*.go -templates lang/funcs/funcgen/templates/generated_funcs.go.tpl 2>/dev/null
|
||||
|
||||
# vim: ts=8
|
||||
|
||||
@@ -16,9 +16,12 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# The bindata target generates go files from any source defined below. To use
|
||||
# the files, import the "bindata" package and use:
|
||||
# the files, import the generated "bindata" package and use:
|
||||
# `bytes, err := bindata.Asset("FILEPATH")`
|
||||
# where FILEPATH is the path of the original input file relative to `bindata/`.
|
||||
# To get a list of files stored in this "bindata" package, you can use:
|
||||
# `paths := bindata.AssetNames()` and `paths, err := bindata.AssetDir(name)`
|
||||
# to get a list of files with a directory prefix.
|
||||
|
||||
.PHONY: build clean
|
||||
default: build
|
||||
@@ -27,6 +30,7 @@ build: bindata.go
|
||||
|
||||
# add more input files as dependencies at the end here...
|
||||
bindata.go: ../COPYING
|
||||
@echo "Generating: bindata..."
|
||||
# go-bindata --pkg bindata -o <OUTPUT> <INPUT>
|
||||
go-bindata --pkg bindata -o ./$@ $^
|
||||
# gofmt the output file
|
||||
@@ -34,5 +38,5 @@ bindata.go: ../COPYING
|
||||
@ROOT=$$(dirname "$${BASH_SOURCE}")/.. && $$ROOT/misc/header.sh '$@'
|
||||
|
||||
clean:
|
||||
# remove generated bindata/*.go
|
||||
@ROOT=$$(dirname "$${BASH_SOURCE}")/.. && rm -f *.go
|
||||
# remove generated bindata.go
|
||||
@ROOT=$$(dirname "$${BASH_SOURCE}")/.. && rm -f bindata.go
|
||||
|
||||
@@ -29,135 +29,248 @@ import (
|
||||
multierr "github.com/hashicorp/go-multierror"
|
||||
)
|
||||
|
||||
// TODO: we could make a new function that masks out the state of certain
|
||||
// UID's, but at the moment the new Timer code has obsoleted the need...
|
||||
// New builds a new converger coordinator.
|
||||
func New(timeout int64) *Coordinator {
|
||||
return &Coordinator{
|
||||
timeout: timeout,
|
||||
|
||||
// Converger is the general interface for implementing a convergence watcher.
|
||||
type Converger interface { // TODO: need a better name
|
||||
Register() UID
|
||||
IsConverged(UID) bool // is the UID converged ?
|
||||
SetConverged(UID, bool) error // set the converged state of the UID
|
||||
Unregister(UID)
|
||||
Start()
|
||||
Pause()
|
||||
Loop(bool)
|
||||
ConvergedTimer(UID) <-chan time.Time
|
||||
Status() map[uint64]bool
|
||||
Timeout() int // returns the timeout that this was created with
|
||||
AddStateFn(string, func(bool) error) error // adds a stateFn with a name
|
||||
RemoveStateFn(string) error // remove a stateFn with a given name
|
||||
}
|
||||
mutex: &sync.RWMutex{},
|
||||
|
||||
// UID is the interface resources can use to notify with if converged. You'll
|
||||
// need to use part of the Converger interface to Register initially too.
|
||||
type UID interface {
|
||||
ID() uint64 // get Id
|
||||
Name() string // get a friendly name
|
||||
SetName(string)
|
||||
IsValid() bool // has Id been initialized ?
|
||||
InvalidateID() // set Id to nil
|
||||
IsConverged() bool
|
||||
SetConverged(bool) error
|
||||
Unregister()
|
||||
ConvergedTimer() <-chan time.Time
|
||||
StartTimer() (func() error, error) // cancellable is the same as StopTimer()
|
||||
ResetTimer() error // resets counter to zero
|
||||
StopTimer() error
|
||||
}
|
||||
//lastid: 0,
|
||||
status: make(map[*UID]struct{}),
|
||||
|
||||
// converger is an implementation of the Converger interface.
|
||||
type converger struct {
|
||||
timeout int // must be zero (instant) or greater seconds to run
|
||||
converged bool // did we converge (state changes of this run Fn)
|
||||
channel chan struct{} // signal here to run an isConverged check
|
||||
control chan bool // control channel for start/pause
|
||||
mutex *sync.RWMutex // used for controlling access to status and lastid
|
||||
lastid uint64
|
||||
status map[uint64]bool
|
||||
stateFns map[string]func(bool) error // run on converged state changes with state bool
|
||||
smutex *sync.RWMutex // used for controlling access to stateFns
|
||||
}
|
||||
//converged: false, // initial state
|
||||
|
||||
// cuid is an implementation of the UID interface.
|
||||
type cuid struct {
|
||||
converger Converger
|
||||
id uint64
|
||||
name string // user defined, friendly name
|
||||
mutex *sync.Mutex
|
||||
timer chan struct{}
|
||||
running bool // is the above timer running?
|
||||
wg *sync.WaitGroup
|
||||
}
|
||||
pokeChan: make(chan struct{}, 1), // must be buffered
|
||||
|
||||
readyChan: make(chan struct{}), // ready signal
|
||||
|
||||
//paused: false, // starts off as started
|
||||
pauseSignal: make(chan struct{}),
|
||||
//resumeSignal: make(chan struct{}), // happens on pause
|
||||
//pausedAck: util.NewEasyAck(), // happens on pause
|
||||
|
||||
// NewConverger builds a new converger struct.
|
||||
func NewConverger(timeout int) Converger {
|
||||
return &converger{
|
||||
timeout: timeout,
|
||||
channel: make(chan struct{}),
|
||||
control: make(chan bool),
|
||||
mutex: &sync.RWMutex{},
|
||||
lastid: 0,
|
||||
status: make(map[uint64]bool),
|
||||
stateFns: make(map[string]func(bool) error),
|
||||
smutex: &sync.RWMutex{},
|
||||
}
|
||||
}
|
||||
|
||||
// Register assigns a UID to the caller.
|
||||
func (obj *converger) Register() UID {
|
||||
obj.mutex.Lock()
|
||||
defer obj.mutex.Unlock()
|
||||
obj.lastid++
|
||||
obj.status[obj.lastid] = false // initialize as not converged
|
||||
return &cuid{
|
||||
converger: obj,
|
||||
id: obj.lastid,
|
||||
name: fmt.Sprintf("%d", obj.lastid), // some default
|
||||
mutex: &sync.Mutex{},
|
||||
timer: nil,
|
||||
running: false,
|
||||
closeChan: make(chan struct{}),
|
||||
wg: &sync.WaitGroup{},
|
||||
}
|
||||
}
|
||||
|
||||
// IsConverged gets the converged status of a uid.
|
||||
func (obj *converger) IsConverged(uid UID) bool {
|
||||
if !uid.IsValid() {
|
||||
panic(fmt.Sprintf("the ID of UID(%s) is nil", uid.Name()))
|
||||
}
|
||||
obj.mutex.RLock()
|
||||
isConverged, found := obj.status[uid.ID()] // lookup
|
||||
obj.mutex.RUnlock()
|
||||
if !found {
|
||||
panic("the ID of UID is unregistered")
|
||||
}
|
||||
return isConverged
|
||||
// Coordinator is the central converger engine.
|
||||
type Coordinator struct {
|
||||
// timeout must be zero (instant) or greater seconds to run. If it's -1
|
||||
// then this is disabled, and we never run stateFns.
|
||||
timeout int64
|
||||
|
||||
// mutex is used for controlling access to status and lastid.
|
||||
mutex *sync.RWMutex
|
||||
|
||||
// lastid contains the last uid we used for registration.
|
||||
//lastid uint64
|
||||
// status contains a reference to each active UID.
|
||||
status map[*UID]struct{}
|
||||
|
||||
// converged stores the last convergence state. When this changes, we
|
||||
// run the stateFns.
|
||||
converged bool
|
||||
|
||||
// pokeChan receives a message every time we might need to re-calculate.
|
||||
pokeChan chan struct{}
|
||||
|
||||
// readyChan closes to notify any interested parties that the main loop
|
||||
// is running.
|
||||
readyChan chan struct{}
|
||||
|
||||
// paused represents if this coordinator is paused or not.
|
||||
paused bool
|
||||
// pauseSignal closes to request a pause of this coordinator.
|
||||
pauseSignal chan struct{}
|
||||
// resumeSignal closes to request a resume of this coordinator.
|
||||
resumeSignal chan struct{}
|
||||
// pausedAck is used to send an ack message saying that we've paused.
|
||||
pausedAck *util.EasyAck
|
||||
|
||||
// stateFns run on converged state changes.
|
||||
stateFns map[string]func(bool) error
|
||||
// smutex is used for controlling access to the stateFns map.
|
||||
smutex *sync.RWMutex
|
||||
|
||||
// closeChan closes when we've been requested to shutdown.
|
||||
closeChan chan struct{}
|
||||
// wg waits for everything to finish.
|
||||
wg *sync.WaitGroup
|
||||
}
|
||||
|
||||
// SetConverged updates the converger with the converged state of the UID.
|
||||
func (obj *converger) SetConverged(uid UID, isConverged bool) error {
|
||||
if !uid.IsValid() {
|
||||
return fmt.Errorf("the ID of UID(%s) is nil", uid.Name())
|
||||
}
|
||||
// Register creates a new UID which can be used to report converged state. You
|
||||
// must Unregister each UID before Shutdown will be able to finish running.
|
||||
func (obj *Coordinator) Register() *UID {
|
||||
obj.wg.Add(1) // additional tracking for each UID
|
||||
obj.mutex.Lock()
|
||||
if _, found := obj.status[uid.ID()]; !found {
|
||||
panic("the ID of UID is unregistered")
|
||||
defer obj.mutex.Unlock()
|
||||
//obj.lastid++
|
||||
uid := &UID{
|
||||
timeout: obj.timeout, // copy the timeout here
|
||||
//id: obj.lastid,
|
||||
//name: fmt.Sprintf("%d", obj.lastid), // some default
|
||||
|
||||
poke: obj.poke,
|
||||
|
||||
// timer
|
||||
mutex: &sync.Mutex{},
|
||||
timer: nil,
|
||||
running: false,
|
||||
wg: &sync.WaitGroup{},
|
||||
}
|
||||
obj.status[uid.ID()] = isConverged // set
|
||||
obj.mutex.Unlock() // unlock *before* poke or deadlock!
|
||||
if isConverged != obj.converged { // only poke if it would be helpful
|
||||
// run in a go routine so that we never block... just queue up!
|
||||
// this allows us to send events, even if we haven't started...
|
||||
go func() { obj.channel <- struct{}{} }()
|
||||
uid.unregister = func() { obj.Unregister(uid) } // add unregister func
|
||||
obj.status[uid] = struct{}{} // TODO: add converged state here?
|
||||
return uid
|
||||
}
|
||||
|
||||
// Unregister removes the UID from the converger coordinator. If you supply an
|
||||
// invalid or unregistered uid to this function, it will panic. An unregistered
|
||||
// UID is no longer part of the convergence checking.
|
||||
func (obj *Coordinator) Unregister(uid *UID) {
|
||||
defer obj.wg.Done() // additional tracking for each UID
|
||||
obj.mutex.Lock()
|
||||
defer obj.mutex.Unlock()
|
||||
|
||||
if _, exists := obj.status[uid]; !exists {
|
||||
panic("uid is not registered")
|
||||
}
|
||||
uid.StopTimer() // ignore any errors
|
||||
delete(obj.status, uid)
|
||||
}
|
||||
|
||||
// Run starts the main loop for the converger coordinator. It is commonly run
|
||||
// from a go routine. It blocks until the Shutdown method is run to close it.
|
||||
// NOTE: when we have very short timeouts, if we start before all the resources
|
||||
// have joined the map, then it might appear as if we converged before we did!
|
||||
func (obj *Coordinator) Run(startPaused bool) {
|
||||
obj.wg.Add(1)
|
||||
wg := &sync.WaitGroup{} // needed for the startPaused
|
||||
defer wg.Wait() // don't leave any leftover go routines running
|
||||
if startPaused {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
obj.Pause() // ignore any errors
|
||||
close(obj.readyChan)
|
||||
}()
|
||||
} else {
|
||||
close(obj.readyChan) // we must wait till the wg.Add(1) has happened...
|
||||
}
|
||||
defer obj.wg.Done()
|
||||
for {
|
||||
// pause if one was requested...
|
||||
select {
|
||||
case <-obj.pauseSignal: // channel closes
|
||||
obj.pausedAck.Ack() // send ack
|
||||
// we are paused now, and waiting for resume or exit...
|
||||
select {
|
||||
case <-obj.resumeSignal: // channel closes
|
||||
// resumed!
|
||||
|
||||
case <-obj.closeChan: // we can always escape
|
||||
return
|
||||
}
|
||||
|
||||
case _, ok := <-obj.pokeChan: // we got an event (re-calculate)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if err := obj.test(); err != nil {
|
||||
// FIXME: what to do on error ?
|
||||
}
|
||||
|
||||
case <-obj.closeChan: // we can always escape
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ready blocks until the Run loop has started up. This is useful so that we
|
||||
// don't run Shutdown before we've even started up properly.
|
||||
func (obj *Coordinator) Ready() {
|
||||
select {
|
||||
case <-obj.readyChan:
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown sends a signal to the Run loop that it should exit. This blocks
|
||||
// until it does.
|
||||
func (obj *Coordinator) Shutdown() {
|
||||
close(obj.closeChan)
|
||||
obj.wg.Wait()
|
||||
close(obj.pokeChan) // free memory?
|
||||
}
|
||||
|
||||
// Pause pauses the coordinator. It should not be called on an already paused
|
||||
// coordinator. It will block until the coordinator pauses with an
|
||||
// acknowledgment, or until an exit is requested. If the latter happens it will
|
||||
// error. It is NOT thread-safe with the Resume() method so only call either one
|
||||
// at a time.
|
||||
func (obj *Coordinator) Pause() error {
|
||||
if obj.paused {
|
||||
return fmt.Errorf("already paused")
|
||||
}
|
||||
|
||||
obj.pausedAck = util.NewEasyAck()
|
||||
obj.resumeSignal = make(chan struct{}) // build the resume signal
|
||||
close(obj.pauseSignal)
|
||||
|
||||
// wait for ack (or exit signal)
|
||||
select {
|
||||
case <-obj.pausedAck.Wait(): // we got it!
|
||||
// we're paused
|
||||
case <-obj.closeChan:
|
||||
return fmt.Errorf("closing")
|
||||
}
|
||||
obj.paused = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isConverged returns true if *every* registered uid has converged.
|
||||
func (obj *converger) isConverged() bool {
|
||||
obj.mutex.RLock() // take a read lock
|
||||
defer obj.mutex.RUnlock()
|
||||
for _, v := range obj.status {
|
||||
// Resume unpauses the coordinator. It can be safely called on a brand-new
|
||||
// coordinator that has just started running without incident. It is NOT
|
||||
// thread-safe with the Pause() method, so only call either one at a time.
|
||||
func (obj *Coordinator) Resume() {
|
||||
// TODO: do we need a mutex around Resume?
|
||||
if !obj.paused { // no need to unpause brand-new resources
|
||||
return
|
||||
}
|
||||
|
||||
obj.pauseSignal = make(chan struct{}) // rebuild for next pause
|
||||
close(obj.resumeSignal)
|
||||
obj.poke() // unblock and notice the resume if necessary
|
||||
|
||||
obj.paused = false
|
||||
|
||||
// no need to wait for it to resume
|
||||
//return // implied
|
||||
}
|
||||
|
||||
// poke sends a message to the coordinator telling it that it should re-evaluate
|
||||
// whether we're converged or not. This does not block. Do not run this in a
|
||||
// goroutine. It must not be called after Shutdown has been called.
|
||||
func (obj *Coordinator) poke() {
|
||||
// redundant
|
||||
//if len(obj.pokeChan) > 0 {
|
||||
// return
|
||||
//}
|
||||
|
||||
select {
|
||||
case obj.pokeChan <- struct{}{}:
|
||||
default: // if chan is now full because more than one poke happened...
|
||||
}
|
||||
}
|
||||
|
||||
// IsConverged returns true if *every* registered uid has converged. If there
|
||||
// are no registered UID's, then this will return true.
|
||||
func (obj *Coordinator) IsConverged() bool {
|
||||
for _, v := range obj.Status() {
|
||||
if !v { // everyone must be converged for this to be true
|
||||
return false
|
||||
}
|
||||
@@ -165,145 +278,40 @@ func (obj *converger) isConverged() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Unregister dissociates the ConvergedUID from the converged checking.
|
||||
func (obj *converger) Unregister(uid UID) {
|
||||
if !uid.IsValid() {
|
||||
panic(fmt.Sprintf("the ID of UID(%s) is nil", uid.Name()))
|
||||
// test evaluates whether we're converged or not and runs the state change. It
|
||||
// is NOT thread-safe.
|
||||
func (obj *Coordinator) test() error {
|
||||
// TODO: add these checks elsewhere to prevent anything from running?
|
||||
if obj.timeout < 0 {
|
||||
return nil // nothing to do (only run if timeout is valid)
|
||||
}
|
||||
obj.mutex.Lock()
|
||||
uid.StopTimer() // ignore any errors
|
||||
delete(obj.status, uid.ID())
|
||||
obj.mutex.Unlock()
|
||||
uid.InvalidateID()
|
||||
}
|
||||
|
||||
// Start causes a Converger object to start or resume running.
|
||||
func (obj *converger) Start() {
|
||||
obj.control <- true
|
||||
}
|
||||
converged := obj.IsConverged()
|
||||
defer func() {
|
||||
obj.converged = converged // set this only at the end...
|
||||
}()
|
||||
|
||||
// Pause causes a Converger object to stop running temporarily.
|
||||
func (obj *converger) Pause() { // FIXME: add a sync ACK on pause before return
|
||||
obj.control <- false
|
||||
}
|
||||
|
||||
// Loop is the main loop for a Converger object. It usually runs in a goroutine.
|
||||
// TODO: we could eventually have each resource tell us as soon as it converges,
|
||||
// and then keep track of the time delays here, to avoid callers needing select.
|
||||
// NOTE: when we have very short timeouts, if we start before all the resources
|
||||
// have joined the map, then it might appear as if we converged before we did!
|
||||
func (obj *converger) Loop(startPaused bool) {
|
||||
if obj.control == nil {
|
||||
panic("converger not initialized correctly")
|
||||
}
|
||||
if startPaused { // start paused without racing
|
||||
select {
|
||||
case e := <-obj.control:
|
||||
if !e {
|
||||
panic("converger expected true")
|
||||
}
|
||||
if !converged {
|
||||
if !obj.converged { // were we previously also not converged?
|
||||
return nil // nothing to do
|
||||
}
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case e := <-obj.control: // expecting "false" which means pause!
|
||||
if e {
|
||||
panic("converger expected false")
|
||||
}
|
||||
// now i'm paused...
|
||||
select {
|
||||
case e := <-obj.control:
|
||||
if !e {
|
||||
panic("converger expected true")
|
||||
}
|
||||
// restart
|
||||
// kick once to refresh the check...
|
||||
go func() { obj.channel <- struct{}{} }()
|
||||
continue
|
||||
}
|
||||
|
||||
case <-obj.channel:
|
||||
if !obj.isConverged() {
|
||||
if obj.converged { // we're doing a state change
|
||||
// call the arbitrary functions (takes a read lock!)
|
||||
if err := obj.runStateFns(false); err != nil {
|
||||
// FIXME: what to do on error ?
|
||||
}
|
||||
}
|
||||
obj.converged = false
|
||||
continue
|
||||
}
|
||||
|
||||
// we have converged!
|
||||
if obj.timeout >= 0 { // only run if timeout is valid
|
||||
if !obj.converged { // we're doing a state change
|
||||
// call the arbitrary functions (takes a read lock!)
|
||||
if err := obj.runStateFns(true); err != nil {
|
||||
// FIXME: what to do on error ?
|
||||
}
|
||||
}
|
||||
}
|
||||
obj.converged = true
|
||||
// loop and wait again...
|
||||
}
|
||||
// we're doing a state change
|
||||
// call the arbitrary functions (takes a read lock!)
|
||||
return obj.runStateFns(false)
|
||||
}
|
||||
|
||||
// we have converged!
|
||||
if obj.converged { // were we previously also converged?
|
||||
return nil // nothing to do
|
||||
}
|
||||
|
||||
// call the arbitrary functions (takes a read lock!)
|
||||
return obj.runStateFns(true)
|
||||
}
|
||||
|
||||
// ConvergedTimer adds a timeout to a select call and blocks until then.
|
||||
// TODO: this means we could eventually have per resource converged timeouts
|
||||
func (obj *converger) ConvergedTimer(uid UID) <-chan time.Time {
|
||||
// be clever: if i'm already converged, this timeout should block which
|
||||
// avoids unnecessary new signals being sent! this avoids fast loops if
|
||||
// we have a low timeout, or in particular a timeout == 0
|
||||
if uid.IsConverged() {
|
||||
// blocks the case statement in select forever!
|
||||
return util.TimeAfterOrBlock(-1)
|
||||
}
|
||||
return util.TimeAfterOrBlock(obj.timeout)
|
||||
}
|
||||
|
||||
// Status returns a map of the converged status of each UID.
|
||||
func (obj *converger) Status() map[uint64]bool {
|
||||
status := make(map[uint64]bool)
|
||||
obj.mutex.RLock() // take a read lock
|
||||
defer obj.mutex.RUnlock()
|
||||
for k, v := range obj.status { // make a copy to avoid the mutex
|
||||
status[k] = v
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
// Timeout returns the timeout in seconds that converger was created with. This
|
||||
// is useful to avoid passing in the timeout value separately when you're
|
||||
// already passing in the Converger struct.
|
||||
func (obj *converger) Timeout() int {
|
||||
return obj.timeout
|
||||
}
|
||||
|
||||
// AddStateFn adds a state function to be run on change of converged state.
|
||||
func (obj *converger) AddStateFn(name string, stateFn func(bool) error) error {
|
||||
obj.smutex.Lock()
|
||||
defer obj.smutex.Unlock()
|
||||
if _, exists := obj.stateFns[name]; exists {
|
||||
return fmt.Errorf("a stateFn with that name already exists")
|
||||
}
|
||||
obj.stateFns[name] = stateFn
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveStateFn adds a state function to be run on change of converged state.
|
||||
func (obj *converger) RemoveStateFn(name string) error {
|
||||
obj.smutex.Lock()
|
||||
defer obj.smutex.Unlock()
|
||||
if _, exists := obj.stateFns[name]; !exists {
|
||||
return fmt.Errorf("a stateFn with that name doesn't exist")
|
||||
}
|
||||
delete(obj.stateFns, name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// runStateFns runs the listed of stored state functions.
|
||||
func (obj *converger) runStateFns(converged bool) error {
|
||||
// runStateFns runs the list of stored state functions.
|
||||
func (obj *Coordinator) runStateFns(converged bool) error {
|
||||
obj.smutex.RLock()
|
||||
defer obj.smutex.RUnlock()
|
||||
var keys []string
|
||||
@@ -322,70 +330,119 @@ func (obj *converger) runStateFns(converged bool) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// ID returns the unique id of this UID object.
|
||||
func (obj *cuid) ID() uint64 {
|
||||
return obj.id
|
||||
// AddStateFn adds a state function to be run on change of converged state.
|
||||
func (obj *Coordinator) AddStateFn(name string, stateFn func(bool) error) error {
|
||||
obj.smutex.Lock()
|
||||
defer obj.smutex.Unlock()
|
||||
if _, exists := obj.stateFns[name]; exists {
|
||||
return fmt.Errorf("a stateFn with that name already exists")
|
||||
}
|
||||
obj.stateFns[name] = stateFn
|
||||
return nil
|
||||
}
|
||||
|
||||
// Name returns a user defined name for the specific cuid.
|
||||
func (obj *cuid) Name() string {
|
||||
return obj.name
|
||||
// RemoveStateFn removes a state function from running on change of converged
|
||||
// state.
|
||||
func (obj *Coordinator) RemoveStateFn(name string) error {
|
||||
obj.smutex.Lock()
|
||||
defer obj.smutex.Unlock()
|
||||
if _, exists := obj.stateFns[name]; !exists {
|
||||
return fmt.Errorf("a stateFn with that name doesn't exist")
|
||||
}
|
||||
delete(obj.stateFns, name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetName sets a user defined name for the specific cuid.
|
||||
func (obj *cuid) SetName(name string) {
|
||||
obj.name = name
|
||||
// Status returns a map of the converged status of each UID.
|
||||
func (obj *Coordinator) Status() map[*UID]bool {
|
||||
status := make(map[*UID]bool)
|
||||
obj.mutex.RLock() // take a read lock
|
||||
defer obj.mutex.RUnlock()
|
||||
for k := range obj.status {
|
||||
status[k] = k.IsConverged()
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
// IsValid tells us if the id is valid or has already been destroyed.
|
||||
func (obj *cuid) IsValid() bool {
|
||||
return obj.id != 0 // an id of 0 is invalid
|
||||
// Timeout returns the timeout in seconds that converger was created with. This
|
||||
// is useful to avoid passing in the timeout value separately when you're
|
||||
// already passing in the Coordinator struct.
|
||||
func (obj *Coordinator) Timeout() int64 {
|
||||
return obj.timeout
|
||||
}
|
||||
|
||||
// InvalidateID marks the id as no longer valid.
|
||||
func (obj *cuid) InvalidateID() {
|
||||
obj.id = 0 // an id of 0 is invalid
|
||||
// UID represents one of the probes for the converger coordinator. It is created
|
||||
// by calling the Register method of the Coordinator struct. It should be freed
|
||||
// after use with Unregister.
|
||||
type UID struct {
|
||||
// timeout is a copy of the main timeout. It could eventually be used
|
||||
// for per-UID timeouts too.
|
||||
timeout int64
|
||||
// isConverged stores the convergence state of this particular UID.
|
||||
isConverged bool
|
||||
|
||||
// poke stores a reference to the main poke function.
|
||||
poke func()
|
||||
// unregister stores a reference to the unregister function.
|
||||
unregister func()
|
||||
|
||||
// timer
|
||||
mutex *sync.Mutex
|
||||
timer chan struct{}
|
||||
running bool // is the timer running?
|
||||
wg *sync.WaitGroup
|
||||
}
|
||||
|
||||
// IsConverged is a helper function to the regular IsConverged method.
|
||||
func (obj *cuid) IsConverged() bool {
|
||||
return obj.converger.IsConverged(obj)
|
||||
// Unregister removes this UID from the converger coordinator. An unregistered
|
||||
// UID is no longer part of the convergence checking.
|
||||
func (obj *UID) Unregister() {
|
||||
obj.unregister()
|
||||
}
|
||||
|
||||
// SetConverged is a helper function to the regular SetConverged notification.
|
||||
func (obj *cuid) SetConverged(isConverged bool) error {
|
||||
return obj.converger.SetConverged(obj, isConverged)
|
||||
// IsConverged reports whether this UID is converged or not.
|
||||
func (obj *UID) IsConverged() bool {
|
||||
return obj.isConverged
|
||||
}
|
||||
|
||||
// Unregister is a helper function to unregister myself.
|
||||
func (obj *cuid) Unregister() {
|
||||
obj.converger.Unregister(obj)
|
||||
// SetConverged sets the convergence state of this UID. This is used by the
|
||||
// running timer if one is started. The timer will overwrite any value set by
|
||||
// this method.
|
||||
func (obj *UID) SetConverged(isConverged bool) {
|
||||
obj.isConverged = isConverged
|
||||
obj.poke() // notify of change
|
||||
}
|
||||
|
||||
// ConvergedTimer is a helper around the regular ConvergedTimer method.
|
||||
func (obj *cuid) ConvergedTimer() <-chan time.Time {
|
||||
return obj.converger.ConvergedTimer(obj)
|
||||
// ConvergedTimer adds a timeout to a select call and blocks until then.
|
||||
// TODO: this means we could eventually have per resource converged timeouts
|
||||
func (obj *UID) ConvergedTimer() <-chan time.Time {
|
||||
// be clever: if i'm already converged, this timeout should block which
|
||||
// avoids unnecessary new signals being sent! this avoids fast loops if
|
||||
// we have a low timeout, or in particular a timeout == 0
|
||||
if obj.IsConverged() {
|
||||
// blocks the case statement in select forever!
|
||||
return util.TimeAfterOrBlock(-1)
|
||||
}
|
||||
return util.TimeAfterOrBlock(int(obj.timeout))
|
||||
}
|
||||
|
||||
// StartTimer runs an invisible timer that automatically converges on timeout.
|
||||
func (obj *cuid) StartTimer() (func() error, error) {
|
||||
// StartTimer runs a timer that sets us as converged on timeout. It also returns
|
||||
// a handle to the StopTimer function which should be run before exit.
|
||||
func (obj *UID) StartTimer() (func() error, error) {
|
||||
obj.mutex.Lock()
|
||||
if !obj.running {
|
||||
obj.timer = make(chan struct{})
|
||||
obj.running = true
|
||||
} else {
|
||||
obj.mutex.Unlock()
|
||||
defer obj.mutex.Unlock()
|
||||
if obj.running {
|
||||
return obj.StopTimer, fmt.Errorf("timer already started")
|
||||
}
|
||||
obj.mutex.Unlock()
|
||||
obj.timer = make(chan struct{})
|
||||
obj.running = true
|
||||
obj.wg.Add(1)
|
||||
go func() {
|
||||
defer obj.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case _, ok := <-obj.timer: // reset signal channel
|
||||
if !ok { // channel is closed
|
||||
return // false to exit
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
obj.SetConverged(false)
|
||||
|
||||
@@ -393,8 +450,8 @@ func (obj *cuid) StartTimer() (func() error, error) {
|
||||
obj.SetConverged(true) // converged!
|
||||
select {
|
||||
case _, ok := <-obj.timer: // reset signal channel
|
||||
if !ok { // channel is closed
|
||||
return // false to exit
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -403,8 +460,8 @@ func (obj *cuid) StartTimer() (func() error, error) {
|
||||
return obj.StopTimer, nil
|
||||
}
|
||||
|
||||
// ResetTimer resets the counter to zero if using a StartTimer internally.
|
||||
func (obj *cuid) ResetTimer() error {
|
||||
// ResetTimer resets the timer to zero.
|
||||
func (obj *UID) ResetTimer() error {
|
||||
obj.mutex.Lock()
|
||||
defer obj.mutex.Unlock()
|
||||
if obj.running {
|
||||
@@ -414,8 +471,8 @@ func (obj *cuid) ResetTimer() error {
|
||||
return fmt.Errorf("timer hasn't been started")
|
||||
}
|
||||
|
||||
// StopTimer stops the running timer permanently until a StartTimer is run.
|
||||
func (obj *cuid) StopTimer() error {
|
||||
// StopTimer stops the running timer.
|
||||
func (obj *UID) StopTimer() error {
|
||||
obj.mutex.Lock()
|
||||
defer obj.mutex.Unlock()
|
||||
if !obj.running {
|
||||
|
||||
31
converger/converger_test.go
Normal file
31
converger/converger_test.go
Normal file
@@ -0,0 +1,31 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2018+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build !root
|
||||
|
||||
package converger
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBufferedChan1(t *testing.T) {
|
||||
ch := make(chan bool, 1)
|
||||
ch <- true
|
||||
close(ch) // closing a channel that's not empty should not block
|
||||
// must be able to exit without blocking anywhere
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
FROM golang:1.9
|
||||
FROM golang:1.11
|
||||
|
||||
MAINTAINER Michał Czeraszkiewicz <contact@czerasz.com>
|
||||
|
||||
# Set the reset cache variable
|
||||
# Read more here: http://czerasz.com/2014/11/13/docker-tip-and-tricks/#use-refreshedat-variable-for-better-cache-control
|
||||
ENV REFRESHED_AT 2017-11-16
|
||||
ENV REFRESHED_AT 2019-02-06
|
||||
|
||||
RUN apt-get update
|
||||
|
||||
|
||||
@@ -137,15 +137,15 @@ Invoke `mgmt` with the `--puppet` switch, which supports 3 variants:
|
||||
|
||||
1. Request the configuration from the Puppet Master (like `puppet agent` does)
|
||||
|
||||
`mgmt run --puppet agent`
|
||||
`mgmt run puppet --puppet agent`
|
||||
|
||||
2. Compile a local manifest file (like `puppet apply`)
|
||||
|
||||
`mgmt run --puppet /path/to/my/manifest.pp`
|
||||
`mgmt run puppet --puppet /path/to/my/manifest.pp`
|
||||
|
||||
3. Compile an ad hoc manifest from the commandline (like `puppet apply -e`)
|
||||
|
||||
`mgmt run --puppet 'file { "/etc/ntp.conf": ensure => file }'`
|
||||
`mgmt run puppet --puppet 'file { "/etc/ntp.conf": ensure => file }'`
|
||||
|
||||
For more details and caveats see [Puppet.md](Puppet.md).
|
||||
|
||||
@@ -164,6 +164,7 @@ If you feel that a well used option needs documenting here, please patch it!
|
||||
### Overview of reference
|
||||
|
||||
* [Meta parameters](#meta-parameters): List of available resource meta parameters.
|
||||
* [Lang metadata file](#lang-metadata-file): Lang metadata file format.
|
||||
* [Graph definition file](#graph-definition-file): Main graph definition file.
|
||||
* [Command line](#command-line): Command line parameters.
|
||||
* [Compilation options](#compilation-options): Compilation options.
|
||||
@@ -249,11 +250,48 @@ integer, then that value is the max size for that semaphore. Valid semaphore
|
||||
id's include: `some_id`, `hello:42`, `not:smart:4` and `:13`. It is expected
|
||||
that the last bare example be only used by the engine to add a global semaphore.
|
||||
|
||||
### Lang metadata file
|
||||
|
||||
Any module *must* have a metadata file in its root. It must be named
|
||||
`metadata.yaml`, even if it's empty. You can specify zero or more values in yaml
|
||||
format which can change how your module behaves, and where the `mcl` language
|
||||
looks for code and other files. The most important top level keys are: `main`,
|
||||
`path`, `files`, and `license`.
|
||||
|
||||
#### Main
|
||||
|
||||
The `main` key points to the default entry point of your code. It must be a
|
||||
relative path if specified. If it's empty it defaults to `main.mcl`. It should
|
||||
generally not be changed. It is sometimes set to `main/main.mcl` if you'd like
|
||||
your modules code out of the root and into a child directory for cases where you
|
||||
don't plan on having a lot deeper imports relative to `main.mcl` and all those
|
||||
files would clutter things up.
|
||||
|
||||
#### Path
|
||||
|
||||
The `path` key specifies the modules import search directory to use for this
|
||||
module. You can specify this if you'd like to vendor something for your module.
|
||||
In general, if you use it, please use the convention: `path/`. If it's not
|
||||
specified, you will default to the parent modules directory.
|
||||
|
||||
#### Files
|
||||
|
||||
The `files` key specifies some additional files that will get included in your
|
||||
deploy. It defaults to `files/`.
|
||||
|
||||
#### License
|
||||
|
||||
The `license` key allows you to specify a license for the module. Please specify
|
||||
one so that everyone can enjoy your code! Use a "short license identifier", like
|
||||
`LGPLv3+`, or `MIT`. The former is a safe choice if you're not sure what to use.
|
||||
|
||||
### Graph definition file
|
||||
|
||||
graph.yaml is the compiled graph definition file. The format is currently
|
||||
undocumented, but by looking through the [examples/](https://github.com/purpleidea/mgmt/tree/master/examples)
|
||||
you can probably figure out most of it, as it's fairly intuitive.
|
||||
undocumented, but by looking through the [examples/](https://github.com/purpleidea/mgmt/tree/master/examples/yaml/)
|
||||
you can probably figure out most of it, as it's fairly intuitive. It's not
|
||||
recommended that you use this, since it's preferable to write code in the
|
||||
[mcl language](language-guide.md) front-end.
|
||||
|
||||
### Command line
|
||||
|
||||
|
||||
13
docs/faq.md
13
docs/faq.md
@@ -57,6 +57,8 @@ hacking!
|
||||
|
||||
### Is this project ready for production?
|
||||
|
||||
It's getting pretty close. I'm able to write modules for it now!
|
||||
|
||||
Compared to some existing automation tools out there, mgmt is a relatively new
|
||||
project. It is probably not as feature complete as some other software, but it
|
||||
also offers a number of features which are not currently available elsewhere.
|
||||
@@ -146,7 +148,7 @@ requires a number of seconds as an argument.
|
||||
#### Example:
|
||||
|
||||
```
|
||||
./mgmt run --lang examples/lang/hello0.mcl --converged-timeout=5
|
||||
./mgmt run lang --lang examples/lang/hello0.mcl --converged-timeout=5
|
||||
```
|
||||
|
||||
### What does the error message about an inconsistent dataDir mean?
|
||||
@@ -167,14 +169,15 @@ starting up, and as a result, a default endpoint never gets added. The solution
|
||||
is to either reconcile the mistake, and if there is no important data saved, you
|
||||
can remove the etcd dataDir. This is typically `/var/lib/mgmt/etcd/member/`.
|
||||
|
||||
### Why do resources have both a `Compare` method and an `IFF` (on the UID) method?
|
||||
### Why do resources have both a `Cmp` method and an `IFF` (on the UID) method?
|
||||
|
||||
The `Compare()` methods are for determining if two resources are effectively the
|
||||
The `Cmp()` methods are for determining if two resources are effectively the
|
||||
same, which is used to make graph change delta's efficient. This is when we want
|
||||
to change from the current running graph to a new graph, but preserve the common
|
||||
vertices. Since we want to make this process efficient, we only update the parts
|
||||
that are different, and leave everything else alone. This `Compare()` method can
|
||||
tell us if two resources are the same.
|
||||
that are different, and leave everything else alone. This `Cmp()` method can
|
||||
tell us if two resources are the same. In case it is not obvious, `cmp` is an
|
||||
abbrev. for compare.
|
||||
|
||||
The `IFF()` method is part of the whole UID system, which is for discerning if a
|
||||
resource meets the requirements another expects for an automatic edge. This is
|
||||
|
||||
@@ -124,16 +124,15 @@ An example explains it best:
|
||||
### Example
|
||||
|
||||
```golang
|
||||
package simplepoly
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/purpleidea/mgmt/lang/types"
|
||||
"github.com/purpleidea/mgmt/lang/funcs/simplepoly"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Register("len", []*types.FuncValue{
|
||||
simplepoly.Register("len", []*types.FuncValue{
|
||||
{
|
||||
T: types.NewType("func([]variant) int"),
|
||||
V: Len,
|
||||
@@ -343,11 +342,21 @@ also ensures they can be encoded and decoded. Make sure to include the following
|
||||
code snippet for this to work.
|
||||
|
||||
```golang
|
||||
import "github.com/purpleidea/mgmt/lang/funcs"
|
||||
|
||||
func init() { // special golang method that runs once
|
||||
funcs.Register("foo", func() interfaces.Func { return &FooFunc{} })
|
||||
}
|
||||
```
|
||||
|
||||
Functions inside of built-in modules will need to use the `ModuleRegister`
|
||||
method instead.
|
||||
|
||||
```golang
|
||||
// moduleName is already set to "math" by the math package. Do this in `init`.
|
||||
funcs.ModuleRegister(moduleName, "cos", func() interfaces.Func { return &CosFunc{} })
|
||||
```
|
||||
|
||||
### Composite functions
|
||||
|
||||
Composite functions are functions which import one or more existing functions.
|
||||
|
||||
@@ -140,6 +140,31 @@ expression
|
||||
include bar("world", 13) # an include can be called multiple times
|
||||
```
|
||||
|
||||
- **import**: import a particular scope from this location at a given namespace
|
||||
|
||||
```mcl
|
||||
# a system module import
|
||||
import "fmt"
|
||||
|
||||
# a local, single file import (relative path, not a module)
|
||||
import "dir1/file.mcl"
|
||||
|
||||
# a local, module import (relative path, contents are a module)
|
||||
import "dir2/"
|
||||
|
||||
# a remote module import (absolute remote path, contents are a module)
|
||||
import "git://github.com/purpleidea/mgmt-example1/"
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```mcl
|
||||
import "fmt" as * # contents namespaced into top-level names
|
||||
import "foo.mcl" # namespaced as foo
|
||||
import "dir1/" as bar # namespaced as bar
|
||||
import "git://github.com/purpleidea/mgmt-example1/" # namespaced as example1
|
||||
```
|
||||
|
||||
All statements produce _output_. Output consists of between zero and more
|
||||
`edges` and `resources`. A resource statement can produce a resource, whereas an
|
||||
`if` statement produces whatever the chosen branch produces. Ultimately the goal
|
||||
@@ -165,6 +190,8 @@ resource to control how it behaves. For example, setting the `content` parameter
|
||||
of a `file` resource to the string `hello`, will cause the contents of that file
|
||||
to contain the string `hello` after it has run.
|
||||
|
||||
##### Undefined parameters
|
||||
|
||||
For some parameters, there is a distinction between an unspecified parameter,
|
||||
and a parameter with a `zero` value. For example, for the file resource, you
|
||||
might choose to set the `content` parameter to be the empty string, which would
|
||||
@@ -189,6 +216,75 @@ it evaluates to `true`, then the parameter will be used. If no `elvis` operator
|
||||
is specified, then the parameter value will also be used. If the parameter is
|
||||
not specified, then it will obviously not be used.
|
||||
|
||||
##### Meta parameters
|
||||
|
||||
Resources may specify meta parameters. To do so, you must add them as you would
|
||||
a regular parameter, except that they start with `Meta` and are capitalized. Eg:
|
||||
|
||||
```mcl
|
||||
file "/tmp/f1" {
|
||||
content => "hello!\n",
|
||||
|
||||
Meta:noop => true,
|
||||
Meta:delay => $b ?: 42,
|
||||
Meta:autoedge => false,
|
||||
}
|
||||
```
|
||||
|
||||
As you can see, they also support the elvis operator, and you can add as many as
|
||||
you like. While it is not recommended to add the same meta parameter more than
|
||||
once, it does not currently cause an error, and even though the result of doing
|
||||
so is officially undefined, it will currently take the last specified value.
|
||||
|
||||
You may also specify a single meta parameter struct. This is useful if you'd
|
||||
like to reuse a value, or build a combined value programmatically. For example:
|
||||
|
||||
```mcl
|
||||
file "/tmp/f1" {
|
||||
content => "hello!\n",
|
||||
|
||||
Meta => $b ?: struct{
|
||||
noop => false,
|
||||
retry => -1,
|
||||
delay => 0,
|
||||
poll => 5,
|
||||
limit => 4.2,
|
||||
burst => 3,
|
||||
sema => ["foo:1", "bar:3",],
|
||||
autoedge => true,
|
||||
autogroup => false,
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
Remember that the top-level `Meta` field supports the elvis operator, while the
|
||||
individual struct fields in the struct type do not. This is to be expected, but
|
||||
since they are syntactically similar, it is worth mentioning to avoid confusion.
|
||||
|
||||
Please note that at the moment, you must specify a full metaparams struct, since
|
||||
partial struct types are currently not supported in the language. Patches are
|
||||
welcome if you'd like to add this tricky feature!
|
||||
|
||||
##### Resource naming
|
||||
|
||||
Each resource must have a unique name of type `str` that is used to uniquely
|
||||
identify that resource, and can be used in the functioning of the resource at
|
||||
that resources discretion. For example, the `file` resource uses the unique name
|
||||
value to specify the path.
|
||||
|
||||
Alternatively, the name value may be a list of strings `[]str` to build a list
|
||||
of resources, each with a name from that list. When this is done, each resource
|
||||
will use the same set of parameters. The list of internal edges specified in the
|
||||
same resource block is created intelligently to have the appropriate edge for
|
||||
each separate resource.
|
||||
|
||||
Using this construct is a veiled form of looping (iteration). This technique is
|
||||
one of many ways you can perform iterative tasks that you might have
|
||||
traditionally used a `for` loop for instead. This is preferred, because flow
|
||||
control is error-prone and can make for less readable code.
|
||||
|
||||
##### Internal edges
|
||||
|
||||
Resources may also declare edges internally. The edges may point to or from
|
||||
another resource, and may optionally include a notification. The four properties
|
||||
are: `Before`, `Depend`, `Notify` and `Listen`. The first two represent normal
|
||||
@@ -285,11 +381,12 @@ class baz($a str, $b) {
|
||||
Classes can also be nested within other classes. Here's a contrived example:
|
||||
|
||||
```mcl
|
||||
import "fmt"
|
||||
class c1($a, $b) {
|
||||
# nested class definition
|
||||
class c2($c) {
|
||||
test $a {
|
||||
stringptr => printf("%s is %d", $b, $c),
|
||||
stringptr => fmt.printf("%s is %d", $b, $c),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -317,6 +414,45 @@ parameters, then the same class can even be called with different signatures.
|
||||
Whether the output is useful and whether there is a unique type unification
|
||||
solution is dependent on your code.
|
||||
|
||||
#### Import
|
||||
|
||||
The `import` statement imports a scope into the specified namespace. A scope can
|
||||
contain variable, class, and function definitions. All are statements.
|
||||
Furthermore, since each of these have different logical uses, you could
|
||||
theoretically import a scope that contains an `int` variable named `foo`, a
|
||||
class named `foo`, and a function named `foo` as well. Keep in mind that
|
||||
variables can contain functions (they can have a type of function) and are
|
||||
commonly called lambdas.
|
||||
|
||||
There are a few different kinds of imports. They differ by the string contents
|
||||
that you specify. Short single word, or multiple-word tokens separated by zero
|
||||
or more slashes are system imports. Eg: `math`, `fmt`, or even `math/trig`.
|
||||
Local imports are path imports that are relative to the current directory. They
|
||||
can either import a single `mcl` file, or an entire well-formed module. Eg:
|
||||
`file1.mcl` or `dir1/`. Lastly, you can have a remote import. This must be an
|
||||
absolute path to a well-formed module. The common transport is `git`, and it can
|
||||
be represented via an FQDN. Eg: `git://github.com/purpleidea/mgmt-example1/`.
|
||||
|
||||
The namespace that any of these are imported into depends on how you use the
|
||||
import statement. By default, each kind of import will have a logic namespace
|
||||
identifier associated with it. System imports use the last token in their name.
|
||||
Eg: `fmt` would be imported as `fmt` and `math/trig` would be imported as
|
||||
`trig`. Local imports do the same, except the required `.mcl` extension, or
|
||||
trailing slash are removed. Eg: `foo/file1.mcl` would be imported as `file1` and
|
||||
`bar/baz/` would be imported as `baz`. Remote imports use some more complex
|
||||
rules. In general, well-named modules that contain a final directory name in the
|
||||
form: `mgmt-whatever/` will be named `whatever`. Otherwise, the last path token
|
||||
will be converted to lowercase and the dashes will be converted to underscores.
|
||||
The rules for remote imports might change, and should not be considered stable.
|
||||
|
||||
In any of the import cases, you can change the namespace that you're imported
|
||||
into. Simply add the `as whatever` text at the end of the import, and `whatever`
|
||||
will be the name of the namespace. Please note that `whatever` is not surrounded
|
||||
by quotes, since it is an identifier, and not a `string`. If you'd like to add
|
||||
all of the import contents into the top-level scope, you can use the `as *` text
|
||||
to dump all of the contents in. This is generally not recommended, as it might
|
||||
cause a conflict with another identifier.
|
||||
|
||||
### Stages
|
||||
|
||||
The mgmt compiler runs in a number of stages. In order of execution they are:
|
||||
|
||||
@@ -143,7 +143,7 @@ you to specify which `puppet.conf` file should be used during
|
||||
translation.
|
||||
|
||||
```
|
||||
mgmt run --puppet /opt/my-manifest.pp --puppet-conf /etc/mgmt/puppet.conf
|
||||
mgmt run puppet --puppet /opt/my-manifest.pp --puppet-conf /etc/mgmt/puppet.conf
|
||||
```
|
||||
|
||||
Within this file, you can just specify any needed options in the
|
||||
@@ -164,3 +164,152 @@ language features.
|
||||
You should probably make sure to always use the latest release of
|
||||
both `ffrank-mgmtgraph` and `ffrank-yamlresource` (the latter is
|
||||
getting pulled in as a dependency of the former).
|
||||
|
||||
## Using Puppet in conjunction with the mcl lang
|
||||
|
||||
The graph that Puppet generates for `mgmt` can be united with a graph
|
||||
that is created from native `mgmt` code in its mcl language. This is
|
||||
useful when you are in the process of replacing Puppet with mgmt. You
|
||||
can translate your custom modules into mgmt's language one by one,
|
||||
and let mgmt run the current mix.
|
||||
|
||||
Instead of the usual `--puppet`, `--puppet-conf`, and `--lang` for mcl,
|
||||
you need to use alternative flags to make this work:
|
||||
|
||||
* `--lp-lang` to specify the mcl input
|
||||
* `--lp-puppet` to specify the puppet input
|
||||
* `--lp-puppet-conf` to point to the optional puppet.conf file
|
||||
|
||||
`mgmt` will derive a graph that contains all edges and vertices from
|
||||
both inputs. You essentially get two unrelated subgraphs that run in
|
||||
parallel. To form edges between these subgraphs, you have to define
|
||||
special vertices that will be merged. This works through a hard-coded
|
||||
naming scheme.
|
||||
|
||||
### Mixed graph example 1 - No merges
|
||||
|
||||
```mcl
|
||||
# lang
|
||||
file "/tmp/mgmt_dir/" { state => "present" }
|
||||
file "/tmp/mgmt_dir/a" { state => "present" }
|
||||
```
|
||||
|
||||
```puppet
|
||||
# puppet
|
||||
file { "/tmp/puppet_dir": ensure => "directory" }
|
||||
file { "/tmp/puppet_dir/a": ensure => "file" }
|
||||
```
|
||||
|
||||
These very simple inputs (including implicit edges from directory to
|
||||
respective file) result in two subgraphs that do not relate.
|
||||
|
||||
```
|
||||
File[/tmp/mgmt_dir/] -> File[/tmp/mgmt_dir/a]
|
||||
|
||||
File[/tmp/puppet_dir] -> File[/tmp/puppet_dir/a]
|
||||
```
|
||||
|
||||
### Mixed graph example 2 - Merged vertex
|
||||
|
||||
In order to have merged vertices in the resulting graph, you will
|
||||
need to include special resources and classes in the respective
|
||||
input code.
|
||||
|
||||
* On the lang side, add `noop` resources with names starting in `puppet_`.
|
||||
* On the Puppet side, add **empty** classes with names starting in `mgmt_`.
|
||||
|
||||
```mcl
|
||||
# lang
|
||||
noop "puppet_handover_to_mgmt" {}
|
||||
file "/tmp/mgmt_dir/" { state => "present" }
|
||||
file "/tmp/mgmt_dir/a" { state => "present" }
|
||||
|
||||
Noop["puppet_handover_to_mgmt"] -> File["/tmp/mgmt_dir/"]
|
||||
```
|
||||
|
||||
```puppet
|
||||
# puppet
|
||||
class mgmt_handover_to_mgmt {}
|
||||
include mgmt_handover_to_mgmt
|
||||
|
||||
file { "/tmp/puppet_dir": ensure => "directory" }
|
||||
file { "/tmp/puppet_dir/a": ensure => "file" }
|
||||
|
||||
File["/tmp/puppet_dir/a"] -> Class["mgmt_handover_to_mgmt"]
|
||||
```
|
||||
|
||||
The new `noop` resource is merged with the new class, resulting in
|
||||
the following graph:
|
||||
|
||||
```
|
||||
File[/tmp/puppet_dir] -> File[/tmp/puppet_dir/a]
|
||||
|
|
||||
V
|
||||
Noop[handover_to_mgmt]
|
||||
|
|
||||
V
|
||||
File[/tmp/mgmt_dir/] -> File[/tmp/mgmt_dir/a]
|
||||
```
|
||||
|
||||
You put all your ducks in a row, and the resources from the Puppet input
|
||||
run before those from the mcl input.
|
||||
|
||||
**Note:** The names of the `noop` and the class must be identical after the
|
||||
respective prefix. The common part (here, `handover_to_mgmt`) becomes the name
|
||||
of the merged resource.
|
||||
|
||||
## Mixed graph example 3 - Multiple merges
|
||||
|
||||
In most scenarios, it will not be possible to define a single handover
|
||||
point like in the previous example. For example, if some Puppet resources
|
||||
need to run in between two stages of native resources, you need at least
|
||||
two merged vertices:
|
||||
|
||||
```mcl
|
||||
# lang
|
||||
noop "puppet_handover" {}
|
||||
noop "puppet_handback" {}
|
||||
file "/tmp/mgmt_dir/" { state => "present" }
|
||||
file "/tmp/mgmt_dir/a" { state => "present" }
|
||||
file "/tmp/mgmt_dir/puppet_subtree/state-file" { state => "present" }
|
||||
|
||||
File["/tmp/mgmt_dir/"] -> Noop["puppet_handover"]
|
||||
Noop["puppet_handback"] -> File["/tmp/mgmt_dir/puppet_subtree/state-file"]
|
||||
```
|
||||
|
||||
```puppet
|
||||
# puppet
|
||||
class mgmt_handover {}
|
||||
class mgmt_handback {}
|
||||
|
||||
include mgmt_handover, mgmt_handback
|
||||
|
||||
class important_stuff {
|
||||
file { "/tmp/mgmt_dir/puppet_subtree":
|
||||
ensure => "directory"
|
||||
}
|
||||
# ...
|
||||
}
|
||||
|
||||
Class["mgmt_handover"] -> Class["important_stuff"] -> Class["mgmt_handback"]
|
||||
```
|
||||
|
||||
The resulting graph looks roughly like this:
|
||||
|
||||
```
|
||||
File[/tmp/mgmt_dir/] -> File[/tmp/mgmt_dir/a]
|
||||
|
|
||||
V
|
||||
Noop[handover] -> ( class important_stuff resources )
|
||||
|
|
||||
V
|
||||
Noop[handback]
|
||||
|
|
||||
V
|
||||
File[/tmp/mgmt_dir/puppet_subtree/state-file]
|
||||
```
|
||||
|
||||
You can add arbitrary numbers of merge pairs to your code bases,
|
||||
with relationships as needed. From our limited experience, code
|
||||
readability suffers quite a lot from these, however. We advise
|
||||
to keep these structures simple.
|
||||
|
||||
@@ -13,7 +13,7 @@ Once you're familiar with the general idea, please start hacking...
|
||||
|
||||
### Installing golang
|
||||
|
||||
* You need golang version 1.9 or greater installed.
|
||||
* You need golang version 1.10 or greater installed.
|
||||
* To install on rpm style systems: `sudo dnf install golang`
|
||||
* To install on apt style systems: `sudo apt install golang`
|
||||
* To install on macOS systems install [Homebrew](https://brew.sh)
|
||||
@@ -57,8 +57,8 @@ export PATH=$PATH:$GOPATH/bin
|
||||
|
||||
### Running mgmt
|
||||
|
||||
* Run `time ./mgmt run --lang examples/lang/hello0.mcl --tmp-prefix` to try out
|
||||
a very simple example!
|
||||
* Run `time ./mgmt run --tmp-prefix lang --lang examples/lang/hello0.mcl` to try
|
||||
out a very simple example!
|
||||
* Look in that example file that you ran to see if you can figure out what it
|
||||
did!
|
||||
* Have fun hacking on our future technology and get involved to shape the
|
||||
@@ -89,7 +89,7 @@ required for running the _test_ suite.
|
||||
|
||||
### Build
|
||||
|
||||
* `golang` 1.9 or higher (required, available in some distros and distributed
|
||||
* `golang` 1.10 or higher (required, available in some distros and distributed
|
||||
as a binary officially by [golang.org](https://golang.org/dl/))
|
||||
|
||||
### Runtime
|
||||
@@ -181,5 +181,5 @@ Other examples:
|
||||
|
||||
```
|
||||
docker/scripts/exec-development make build
|
||||
docker/scripts/exec-development ./mgmt run --tmp-prefix --lang examples/lang/load0.mcl
|
||||
docker/scripts/exec-development ./mgmt run --tmp-prefix lang --lang examples/lang/load0.mcl
|
||||
```
|
||||
|
||||
@@ -307,21 +307,18 @@ running.
|
||||
The lifetime of most resources `Watch` method should be spent in an infinite
|
||||
loop that is bounded by a `select` call. The `select` call is the point where
|
||||
our method hands back control to the engine (and the kernel) so that we can
|
||||
sleep until something of interest wakes us up. In this loop we must process
|
||||
events from the engine via the `<-obj.init.Events` channel, and receive events
|
||||
for our resource itself!
|
||||
sleep until something of interest wakes us up. In this loop we must wait until
|
||||
we get a shutdown event from the engine via the `<-obj.init.Done` channel, which
|
||||
closes when we'd like to shut everything down. At this point you should cleanup,
|
||||
and let `Watch` close.
|
||||
|
||||
#### Events
|
||||
|
||||
If we receive an internal event from the `<-obj.init.Events` channel, we should
|
||||
read it with the `obj.init.Read` helper function. This function tells us if we
|
||||
should shutdown our resource. It also handles pause functionality which blocks
|
||||
our resource temporarily in this method. If this channel shuts down, then we
|
||||
should treat that as an exit signal.
|
||||
|
||||
When we want to send an event, we use the `Event` helper function. It is also
|
||||
important to mark the resource state as `dirty` if we believe it might have
|
||||
changed. We do this by calling the `obj.init.Dirty` function.
|
||||
If the `<-obj.init.Done` channel closes, we should shutdown our resource. When
|
||||
When we want to send an event, we use the `Event` helper function. This
|
||||
automatically marks the resource state as `dirty`. If you're unsure, it's not
|
||||
harmful to send the event. This will ultimately cause `CheckApply` to run. This
|
||||
method can block if the resource is being paused.
|
||||
|
||||
#### Startup
|
||||
|
||||
@@ -330,8 +327,7 @@ to generate one event to notify the `mgmt` engine that we're now listening
|
||||
successfully, so that it can run an initial `CheckApply` to ensure we're safely
|
||||
tracking a healthy state and that we didn't miss anything when `Watch` was down
|
||||
or from before `mgmt` was running. You must do this by calling the
|
||||
`obj.init.Running` method. If it returns an error, you must exit and return that
|
||||
error.
|
||||
`obj.init.Running` method.
|
||||
|
||||
#### Converged
|
||||
|
||||
@@ -358,41 +354,29 @@ func (obj *FooRes) Watch() error {
|
||||
defer obj.whatever.CloseFoo() // shutdown our Foo
|
||||
|
||||
// notify engine that we're running
|
||||
if err := obj.init.Running(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
var send = false // send event?
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-obj.init.Events:
|
||||
if !ok {
|
||||
// shutdown engine
|
||||
// (it is okay if some `defer` code runs first)
|
||||
return nil
|
||||
}
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// the actual events!
|
||||
case event := <-obj.foo.Events:
|
||||
if is_an_event {
|
||||
send = true
|
||||
obj.init.Dirty() // dirty
|
||||
}
|
||||
|
||||
// event errors
|
||||
case err := <-obj.foo.Errors:
|
||||
return err // will cause a retry or permanent failure
|
||||
|
||||
case <-obj.init.Done: // signal for shutdown request
|
||||
return nil
|
||||
}
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
if err := obj.init.Event(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Event()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -567,23 +551,10 @@ ready to detect changes.
|
||||
Event sends an event notifying the engine of a possible state change. It is
|
||||
only called from within `Watch`.
|
||||
|
||||
### Events
|
||||
### Done
|
||||
|
||||
Events is a channel that we must watch for messages from the engine. When it
|
||||
closes, this is a signal to shutdown. It is
|
||||
only called from within `Watch`.
|
||||
|
||||
### Read
|
||||
|
||||
Read processes messages that come in from the `Events` channel. It is a helper
|
||||
method that knows how to handle the pause mechanism correctly. It is
|
||||
only called from within `Watch`.
|
||||
|
||||
### Dirty
|
||||
|
||||
Dirty marks the resource state as dirty. This signals to the engine that
|
||||
CheckApply will have some work to do in order to converge it. It is
|
||||
only called from within `Watch`.
|
||||
Done is a channel that closes when the engine wants us to shutdown. It is only
|
||||
called from within `Watch`.
|
||||
|
||||
### Refresh
|
||||
|
||||
|
||||
@@ -68,7 +68,7 @@ identified by a trailing slash in their path name. File have no such slash.
|
||||
|
||||
It has the following properties:
|
||||
|
||||
* `path`: file path (directories have a trailing slash here)
|
||||
* `path`: absolute file path (directories have a trailing slash here)
|
||||
* `content`: raw file content
|
||||
* `state`: either `exists` (the default value) or `absent`
|
||||
* `mode`: octal unix file permissions
|
||||
|
||||
@@ -1,22 +1,28 @@
|
||||
# Style guide
|
||||
|
||||
## Overview
|
||||
This document aims to be a reference for the desired style for patches to mgmt,
|
||||
and the associated `mcl` language. In particular it describes conventions which
|
||||
are not officially enforced by tools and in test cases, or that aren't clearly
|
||||
defined elsewhere. We try to turn as many of these into automated tests as we
|
||||
can. If something here is not defined in a test, or you think it should be,
|
||||
please write one! Even better, you can write a tool to automatically fix it,
|
||||
since this is more useful and can easily be turned into a test!
|
||||
|
||||
This document aims to be a reference for the desired style for patches to mgmt.
|
||||
In particular it describes conventions which we use which are not officially
|
||||
enforced by the `gofmt` tool, and which might not be clearly defined elsewhere.
|
||||
Most of these are common sense to seasoned programmers, and we hope this will be
|
||||
a useful reference for new programmers.
|
||||
## Overview for golang code
|
||||
|
||||
Most style issues are enforced by the `gofmt` tool. Other style aspects are
|
||||
often common sense to seasoned programmers, and we hope this will be a useful
|
||||
reference for new programmers.
|
||||
|
||||
There are a lot of useful code review comments described
|
||||
[here](https://github.com/golang/go/wiki/CodeReviewComments). We don't
|
||||
necessarily follow everything strictly, but it is in general a very good guide.
|
||||
|
||||
## Basics
|
||||
### Basics
|
||||
|
||||
* All of our golang code is formatted with `gofmt`.
|
||||
|
||||
## Comments
|
||||
### Comments
|
||||
|
||||
All of our code is commented with the minimums required for `godoc` to function,
|
||||
and so that our comments pass `golint`. Code comments should either be full
|
||||
@@ -28,7 +34,7 @@ They should explain algorithms, describe non-obvious behaviour, or situations
|
||||
which would otherwise need explanation or additional research during a code
|
||||
review. Notes about use of unfamiliar API's is a good idea for a code comment.
|
||||
|
||||
### Example
|
||||
#### Example
|
||||
|
||||
Here you can see a function with the correct `godoc` string. The first word must
|
||||
match the name of the function. It is _not_ capitalized because the function is
|
||||
@@ -41,7 +47,7 @@ func square(x int) int {
|
||||
}
|
||||
```
|
||||
|
||||
## Line length
|
||||
### Line length
|
||||
|
||||
In general we try to stick to 80 character lines when it is appropriate. It is
|
||||
almost *always* appropriate for function `godoc` comments and most longer
|
||||
@@ -55,7 +61,7 @@ Occasionally inline, two line source code comments are used within a function.
|
||||
These should usually be balanced so that you don't have one line with 78
|
||||
characters and the second with only four. Split the comment between the two.
|
||||
|
||||
## Method receiver naming
|
||||
### Method receiver naming
|
||||
|
||||
[Contrary](https://github.com/golang/go/wiki/CodeReviewComments#receiver-names)
|
||||
to the specialized naming of the method receiver variable, we usually name all
|
||||
@@ -65,7 +71,7 @@ makes the code easier to read since you don't need to remember the name of the
|
||||
method receiver variable in each different method. This is very similar to what
|
||||
is done in `python`.
|
||||
|
||||
### Example
|
||||
#### Example
|
||||
|
||||
```golang
|
||||
// Bar does a thing, and returns the number of baz results found in our
|
||||
@@ -78,7 +84,7 @@ func (obj *Foo) Bar(baz string) int {
|
||||
}
|
||||
```
|
||||
|
||||
## Consistent ordering
|
||||
### Consistent ordering
|
||||
|
||||
In general we try to preserve a logical ordering in source files which usually
|
||||
matches the common order of execution that a _lazy evaluator_ would follow.
|
||||
@@ -90,6 +96,55 @@ declared in the interface.
|
||||
When implementing code for the various types in the language, please follow this
|
||||
order: `bool`, `str`, `int`, `float`, `list`, `map`, `struct`, `func`.
|
||||
|
||||
## Overview for mcl code
|
||||
|
||||
The `mcl` language is quite new, so this guide will probably change over time as
|
||||
we find what's best, and hopefully we'll be able to add an `mclfmt` tool in the
|
||||
future so that less of this needs to be documented. (Patches welcome!)
|
||||
|
||||
### Indentation
|
||||
|
||||
Code indentation is done with tabs. The tab-width is a private preference, which
|
||||
is the beauty of using tabs: you can have your own personal preference. The
|
||||
inventor of `mgmt` uses and recommends a width of eight, and that is what should
|
||||
be used if your tool requires a modeline to be publicly committed.
|
||||
|
||||
### Line length
|
||||
|
||||
We recommend you stick to 80 char line width. If you find yourself with deeper
|
||||
nesting, it might be a hint that your code could be refactored in a more
|
||||
pleasant way.
|
||||
|
||||
### Capitalization
|
||||
|
||||
At the moment, variables, function names, and classes are all lowercase and do
|
||||
not contain underscores. We will probably figure out what style to recommend
|
||||
when the language is a bit further along. For example, we haven't decided if we
|
||||
should have a notion of public and private variables, and if we'd like to
|
||||
reserve capitalization for this situation.
|
||||
|
||||
### Module naming
|
||||
|
||||
We recommend you name your modules with an `mgmt-` prefix. For example, a module
|
||||
about bananas might be named `mgmt-banana`. This is helpful for the useful magic
|
||||
built-in to the module import code, which will by default take a remote import
|
||||
like: `import "https://github.com/purpleidea/mgmt-banana/"` and namespace it as
|
||||
`banana`. Of course you can always pick the namespace yourself on import with:
|
||||
`import "https://github.com/purpleidea/mgmt-banana/" as tomato` or something
|
||||
similar.
|
||||
|
||||
### Licensing
|
||||
|
||||
We believe that sharing code helps reduce unnecessary re-invention, so that we
|
||||
can [stand on the shoulders of giants](https://en.wikipedia.org/wiki/Standing_on_the_shoulders_of_giants)
|
||||
and hopefully make faster progress in science, medicine, exploration, etc... As
|
||||
a result, we recommend releasing your modules under the [LGPLv3+](https://www.gnu.org/licenses/lgpl-3.0.en.html)
|
||||
license for the maximum balance of freedom and re-usability. We strongly oppose
|
||||
any [CLA](https://en.wikipedia.org/wiki/Contributor_License_Agreement)
|
||||
requirements and believe that the ["inbound==outbound"](https://ref.fedorapeople.org/fontana-linuxcon.html#slide2)
|
||||
rule applies. Lastly, we do not support software patents and we hope you don't
|
||||
either!
|
||||
|
||||
## Suggestions
|
||||
|
||||
If you have any ideas for suggestions or other improvements to this guide,
|
||||
|
||||
@@ -31,6 +31,10 @@ type EdgeableRes interface {
|
||||
// trait.
|
||||
AutoEdgeMeta() *AutoEdgeMeta
|
||||
|
||||
// SetAutoEdgeMeta lets you set all of the meta params for the automatic
|
||||
// edges trait in a single call.
|
||||
SetAutoEdgeMeta(*AutoEdgeMeta)
|
||||
|
||||
// UIDs includes all params to make a unique identification of this
|
||||
// object.
|
||||
UIDs() []ResUID // most resources only return one
|
||||
|
||||
@@ -34,6 +34,10 @@ type GroupableRes interface {
|
||||
// grouping trait.
|
||||
AutoGroupMeta() *AutoGroupMeta
|
||||
|
||||
// SetAutoGroupMeta lets you set all of the meta params for the
|
||||
// automatic grouping trait in a single call.
|
||||
SetAutoGroupMeta(*AutoGroupMeta)
|
||||
|
||||
// GroupCmp compares two resources and decides if they're suitable for
|
||||
//grouping. This usually needs to be unique to your resource.
|
||||
GroupCmp(res GroupableRes) error
|
||||
|
||||
195
engine/cmp.go
195
engine/cmp.go
@@ -24,7 +24,8 @@ import (
|
||||
)
|
||||
|
||||
// ResCmp compares two resources by checking multiple aspects. This is the main
|
||||
// entry point for running all the compare steps on two resource.
|
||||
// entry point for running all the compare steps on two resources. This code is
|
||||
// very similar to AdaptCmp.
|
||||
func ResCmp(r1, r2 Res) error {
|
||||
if r1.Kind() != r2.Kind() {
|
||||
return fmt.Errorf("kind differs")
|
||||
@@ -37,6 +38,30 @@ func ResCmp(r1, r2 Res) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: do we need to compare other traits/metaparams?
|
||||
|
||||
m1 := r1.MetaParams()
|
||||
m2 := r2.MetaParams()
|
||||
if (m1 == nil) != (m2 == nil) { // xor
|
||||
return fmt.Errorf("meta params differ")
|
||||
}
|
||||
if m1 != nil && m2 != nil {
|
||||
if err := m1.Cmp(m2); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
r1x, ok1 := r1.(RefreshableRes)
|
||||
r2x, ok2 := r2.(RefreshableRes)
|
||||
if ok1 != ok2 {
|
||||
return fmt.Errorf("refreshable differs") // they must be different (optional)
|
||||
}
|
||||
if ok1 && ok2 {
|
||||
if r1x.Refresh() != r2x.Refresh() {
|
||||
return fmt.Errorf("refresh differs")
|
||||
}
|
||||
}
|
||||
|
||||
// compare meta params for resources with auto edges
|
||||
r1e, ok1 := r1.(EdgeableRes)
|
||||
r2e, ok2 := r2.(EdgeableRes)
|
||||
@@ -87,6 +112,174 @@ func ResCmp(r1, r2 Res) error {
|
||||
}
|
||||
}
|
||||
|
||||
r1r, ok1 := r1.(RecvableRes)
|
||||
r2r, ok2 := r2.(RecvableRes)
|
||||
if ok1 != ok2 {
|
||||
return fmt.Errorf("recvable differs") // they must be different (optional)
|
||||
}
|
||||
if ok1 && ok2 {
|
||||
v1 := r1r.Recv()
|
||||
v2 := r2r.Recv()
|
||||
|
||||
if (v1 == nil) != (v2 == nil) { // xor
|
||||
return fmt.Errorf("recv params differ")
|
||||
}
|
||||
if v1 != nil && v2 != nil {
|
||||
// TODO: until we hit this code path, don't allow
|
||||
// comparing anything that has this set to non-zero
|
||||
if len(v1) != 0 || len(v2) != 0 {
|
||||
return fmt.Errorf("recv params exist")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r1s, ok1 := r1.(SendableRes)
|
||||
r2s, ok2 := r2.(SendableRes)
|
||||
if ok1 != ok2 {
|
||||
return fmt.Errorf("sendable differs") // they must be different (optional)
|
||||
}
|
||||
if ok1 && ok2 {
|
||||
s1 := r1s.Sent()
|
||||
s2 := r2s.Sent()
|
||||
|
||||
if (s1 == nil) != (s2 == nil) { // xor
|
||||
return fmt.Errorf("send params differ")
|
||||
}
|
||||
if s1 != nil && s2 != nil {
|
||||
// TODO: until we hit this code path, don't allow
|
||||
// adapting anything that has this set to non-nil
|
||||
return fmt.Errorf("send params exist")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AdaptCmp compares two resources by checking multiple aspects. This is the
|
||||
// main entry point for running all the compatible compare steps on two
|
||||
// resources. This code is very similar to ResCmp.
|
||||
func AdaptCmp(r1, r2 CompatibleRes) error {
|
||||
if r1.Kind() != r2.Kind() {
|
||||
return fmt.Errorf("kind differs")
|
||||
}
|
||||
if r1.Name() != r2.Name() {
|
||||
return fmt.Errorf("name differs")
|
||||
}
|
||||
|
||||
// run `Adapts` instead of `Cmp`
|
||||
if err := r1.Adapts(r2); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: do we need to compare other traits/metaparams?
|
||||
|
||||
m1 := r1.MetaParams()
|
||||
m2 := r2.MetaParams()
|
||||
if (m1 == nil) != (m2 == nil) { // xor
|
||||
return fmt.Errorf("meta params differ")
|
||||
}
|
||||
if m1 != nil && m2 != nil {
|
||||
if err := m1.Cmp(m2); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// we don't need to compare refresh, since those can always be merged...
|
||||
|
||||
// compare meta params for resources with auto edges
|
||||
r1e, ok1 := r1.(EdgeableRes)
|
||||
r2e, ok2 := r2.(EdgeableRes)
|
||||
if ok1 != ok2 {
|
||||
return fmt.Errorf("edgeable differs") // they must be different (optional)
|
||||
}
|
||||
if ok1 && ok2 {
|
||||
if r1e.AutoEdgeMeta().Cmp(r2e.AutoEdgeMeta()) != nil {
|
||||
return fmt.Errorf("autoedge differs")
|
||||
}
|
||||
}
|
||||
|
||||
// compare meta params for resources with auto grouping
|
||||
r1g, ok1 := r1.(GroupableRes)
|
||||
r2g, ok2 := r2.(GroupableRes)
|
||||
if ok1 != ok2 {
|
||||
return fmt.Errorf("groupable differs") // they must be different (optional)
|
||||
}
|
||||
if ok1 && ok2 {
|
||||
if r1g.AutoGroupMeta().Cmp(r2g.AutoGroupMeta()) != nil {
|
||||
return fmt.Errorf("autogroup differs")
|
||||
}
|
||||
|
||||
// if resources are grouped, are the groups the same?
|
||||
if i, j := r1g.GetGroup(), r2g.GetGroup(); len(i) != len(j) {
|
||||
return fmt.Errorf("autogroup groups differ")
|
||||
} else if len(i) > 0 { // trick the golinter
|
||||
|
||||
// Sort works with Res, so convert the lists to that
|
||||
iRes := []Res{}
|
||||
for _, r := range i {
|
||||
res := r.(Res)
|
||||
iRes = append(iRes, res)
|
||||
}
|
||||
jRes := []Res{}
|
||||
for _, r := range j {
|
||||
res := r.(Res)
|
||||
jRes = append(jRes, res)
|
||||
}
|
||||
|
||||
ix, jx := Sort(iRes), Sort(jRes) // now sort :)
|
||||
for k := range ix {
|
||||
// compare sub resources
|
||||
// TODO: should we use AdaptCmp here?
|
||||
// TODO: how would they run `Merge` ? (we don't)
|
||||
// this code path will probably not run, because
|
||||
// it is called in the lang before autogrouping!
|
||||
if err := ResCmp(ix[k], jx[k]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r1r, ok1 := r1.(RecvableRes)
|
||||
r2r, ok2 := r2.(RecvableRes)
|
||||
if ok1 != ok2 {
|
||||
return fmt.Errorf("recvable differs") // they must be different (optional)
|
||||
}
|
||||
if ok1 && ok2 {
|
||||
v1 := r1r.Recv()
|
||||
v2 := r2r.Recv()
|
||||
|
||||
if (v1 == nil) != (v2 == nil) { // xor
|
||||
return fmt.Errorf("recv params differ")
|
||||
}
|
||||
if v1 != nil && v2 != nil {
|
||||
// TODO: until we hit this code path, don't allow
|
||||
// adapting anything that has this set to non-zero
|
||||
if len(v1) != 0 || len(v2) != 0 {
|
||||
return fmt.Errorf("recv params exist")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r1s, ok1 := r1.(SendableRes)
|
||||
r2s, ok2 := r2.(SendableRes)
|
||||
if ok1 != ok2 {
|
||||
return fmt.Errorf("sendable differs") // they must be different (optional)
|
||||
}
|
||||
if ok1 && ok2 {
|
||||
s1 := r1s.Sent()
|
||||
s2 := r2s.Sent()
|
||||
|
||||
if (s1 == nil) != (s2 == nil) { // xor
|
||||
return fmt.Errorf("send params differ")
|
||||
}
|
||||
if s1 != nil && s2 != nil {
|
||||
// TODO: until we hit this code path, don't allow
|
||||
// adapting anything that has this set to non-nil
|
||||
return fmt.Errorf("send params exist")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
160
engine/copy.go
Normal file
160
engine/copy.go
Normal file
@@ -0,0 +1,160 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2018+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package engine
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
errwrap "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ResCopy copies a resource. This is the main entry point for copying a
|
||||
// resource since it does all the common engine-level copying as well.
|
||||
func ResCopy(r CopyableRes) (CopyableRes, error) {
|
||||
res := r.Copy()
|
||||
res.SetKind(r.Kind())
|
||||
res.SetName(r.Name())
|
||||
|
||||
if x, ok := r.(MetaRes); ok {
|
||||
dst, ok := res.(MetaRes)
|
||||
if !ok {
|
||||
// programming error
|
||||
panic("meta interfaces are illogical")
|
||||
}
|
||||
dst.SetMetaParams(x.MetaParams().Copy()) // copy b/c we have it
|
||||
}
|
||||
|
||||
if x, ok := r.(RefreshableRes); ok {
|
||||
dst, ok := res.(RefreshableRes)
|
||||
if !ok {
|
||||
// programming error
|
||||
panic("refresh interfaces are illogical")
|
||||
}
|
||||
dst.SetRefresh(x.Refresh()) // no need to copy atm
|
||||
}
|
||||
|
||||
// copy meta params for resources with auto edges
|
||||
if x, ok := r.(EdgeableRes); ok {
|
||||
dst, ok := res.(EdgeableRes)
|
||||
if !ok {
|
||||
// programming error
|
||||
panic("autoedge interfaces are illogical")
|
||||
}
|
||||
dst.SetAutoEdgeMeta(x.AutoEdgeMeta()) // no need to copy atm
|
||||
}
|
||||
|
||||
// copy meta params for resources with auto grouping
|
||||
if x, ok := r.(GroupableRes); ok {
|
||||
dst, ok := res.(GroupableRes)
|
||||
if !ok {
|
||||
// programming error
|
||||
panic("autogroup interfaces are illogical")
|
||||
}
|
||||
dst.SetAutoGroupMeta(x.AutoGroupMeta()) // no need to copy atm
|
||||
|
||||
grouped := []GroupableRes{}
|
||||
for _, g := range x.GetGroup() {
|
||||
g0, ok := g.(CopyableRes)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("resource wasn't copyable")
|
||||
}
|
||||
g1, err := ResCopy(g0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
g2, ok := g1.(GroupableRes)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("resource wasn't groupable")
|
||||
}
|
||||
grouped = append(grouped, g2)
|
||||
}
|
||||
dst.SetGroup(grouped)
|
||||
}
|
||||
|
||||
if x, ok := r.(RecvableRes); ok {
|
||||
dst, ok := res.(RecvableRes)
|
||||
if !ok {
|
||||
// programming error
|
||||
panic("recv interfaces are illogical")
|
||||
}
|
||||
dst.SetRecv(x.Recv()) // no need to copy atm
|
||||
}
|
||||
|
||||
if x, ok := r.(SendableRes); ok {
|
||||
dst, ok := res.(SendableRes)
|
||||
if !ok {
|
||||
// programming error
|
||||
panic("send interfaces are illogical")
|
||||
}
|
||||
if err := dst.Send(x.Sent()); err != nil { // no need to copy atm
|
||||
return nil, errwrap.Wrapf(err, "can't copy send")
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// ResMerge merges a set of resources that are compatible with each other. This
|
||||
// is the main entry point for the merging. They must each successfully be able
|
||||
// to run AdaptCmp without error.
|
||||
func ResMerge(r ...CompatibleRes) (CompatibleRes, error) {
|
||||
if len(r) == 0 {
|
||||
return nil, fmt.Errorf("zero resources given")
|
||||
}
|
||||
if len(r) == 1 {
|
||||
return r[0], nil
|
||||
}
|
||||
if len(r) > 2 {
|
||||
r0 := r[0]
|
||||
r1, err := ResMerge(r[1:]...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ResMerge(r0, r1)
|
||||
}
|
||||
// now we have r[0] and r[1] to merge here...
|
||||
r0 := r[0]
|
||||
r1 := r[1]
|
||||
if err := AdaptCmp(r0, r1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res, err := r0.Merge(r1) // resource method of this interface
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// meta should have come over in the copy
|
||||
|
||||
if x, ok := res.(RefreshableRes); ok {
|
||||
x0, ok0 := r0.(RefreshableRes)
|
||||
x1, ok1 := r1.(RefreshableRes)
|
||||
if !ok0 || !ok1 {
|
||||
// programming error
|
||||
panic("refresh interfaces are illogical")
|
||||
}
|
||||
|
||||
x.SetRefresh(x0.Refresh() || x1.Refresh()) // true if either is!
|
||||
}
|
||||
|
||||
// the other traits and metaparams can't be merged easily... so we don't
|
||||
// merge them, and if they were present and differed, and weren't copied
|
||||
// in the ResCopy method, then we should have errored above in AdaptCmp!
|
||||
|
||||
return res, nil
|
||||
}
|
||||
@@ -24,9 +24,6 @@ type Error string
|
||||
func (e Error) Error() string { return string(e) }
|
||||
|
||||
const (
|
||||
// ErrWatchExit represents an exit from the Watch loop via chan closure.
|
||||
ErrWatchExit = Error("watch exit")
|
||||
|
||||
// ErrSignalExit represents an exit from the Watch loop via exit signal.
|
||||
ErrSignalExit = Error("signal exit")
|
||||
// ErrClosed means we couldn't complete a task because we had closed.
|
||||
ErrClosed = Error("closed")
|
||||
)
|
||||
|
||||
@@ -48,7 +48,7 @@ type Fs interface {
|
||||
//IsDir(path string) (bool, error)
|
||||
//IsEmpty(path string) (bool, error)
|
||||
//NeuterAccents(s string) string
|
||||
//ReadAll(r io.Reader) ([]byte, error) // not needed
|
||||
//ReadAll(r io.Reader) ([]byte, error) // not needed, same as ioutil
|
||||
ReadDir(dirname string) ([]os.FileInfo, error)
|
||||
ReadFile(filename string) ([]byte, error)
|
||||
//SafeWriteReader(path string, r io.Reader) (err error)
|
||||
|
||||
@@ -24,10 +24,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/purpleidea/mgmt/engine"
|
||||
"github.com/purpleidea/mgmt/engine/event"
|
||||
"github.com/purpleidea/mgmt/pgraph"
|
||||
|
||||
//multierr "github.com/hashicorp/go-multierror"
|
||||
multierr "github.com/hashicorp/go-multierror"
|
||||
errwrap "github.com/pkg/errors"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
@@ -67,26 +66,24 @@ func (obj *Engine) Process(vertex pgraph.Vertex) error {
|
||||
return fmt.Errorf("vertex is not a Res")
|
||||
}
|
||||
|
||||
// Engine Guarantee: Do not allow CheckApply to run while we are paused.
|
||||
// This makes the resource able to know that synchronous channel sending
|
||||
// to the main loop select in Watch from within CheckApply, will succeed
|
||||
// without blocking because the resource went into a paused state. If we
|
||||
// are using the Poll metaparam, then Watch will (of course) not be run.
|
||||
// FIXME: should this lock be here, or wrapped right around CheckApply ?
|
||||
obj.state[vertex].eventsLock.Lock() // this lock is taken within Event()
|
||||
defer obj.state[vertex].eventsLock.Unlock()
|
||||
|
||||
// backpoke! (can be async)
|
||||
if vs := obj.BadTimestamps(vertex); len(vs) > 0 {
|
||||
// back poke in parallel (sync b/c of waitgroup)
|
||||
wg := &sync.WaitGroup{}
|
||||
for _, v := range obj.graph.IncomingGraphVertices(vertex) {
|
||||
if !pgraph.VertexContains(v, vs) { // only poke what's needed
|
||||
continue
|
||||
}
|
||||
|
||||
go obj.state[v].Poke() // async
|
||||
// doesn't really need to be in parallel, but we can...
|
||||
wg.Add(1)
|
||||
go func(vv pgraph.Vertex) {
|
||||
defer wg.Done()
|
||||
obj.state[vv].Poke() // async
|
||||
}(v)
|
||||
|
||||
}
|
||||
wg.Wait()
|
||||
return nil // can't continue until timestamp is in sequence
|
||||
}
|
||||
|
||||
@@ -119,6 +116,7 @@ func (obj *Engine) Process(vertex pgraph.Vertex) error {
|
||||
for _, changed := range updated {
|
||||
if changed { // at least one was updated
|
||||
// invalidate cache, mark as dirty
|
||||
obj.state[vertex].tuid.StopTimer()
|
||||
obj.state[vertex].isStateOK = false
|
||||
break
|
||||
}
|
||||
@@ -174,6 +172,7 @@ func (obj *Engine) Process(vertex pgraph.Vertex) error {
|
||||
|
||||
// if CheckApply ran without noop and without error, state should be good
|
||||
if !noop && err == nil { // aka !noop || checkOK
|
||||
obj.state[vertex].tuid.StartTimer()
|
||||
obj.state[vertex].isStateOK = true // reset
|
||||
if refresh {
|
||||
obj.SetUpstreamRefresh(vertex, false) // refresh happened, clear the request
|
||||
@@ -242,26 +241,57 @@ func (obj *Engine) Process(vertex pgraph.Vertex) error {
|
||||
|
||||
// Worker is the common run frontend of the vertex. It handles all of the retry
|
||||
// and retry delay common code, and ultimately returns the final status of this
|
||||
// vertex execution.
|
||||
// vertex execution. This function cannot be "re-run" for the same vertex. The
|
||||
// retry mechanism stuff happens inside of this. To actually "re-run" you need
|
||||
// to remove the vertex and build a new one. The engine guarantees that we do
|
||||
// not allow CheckApply to run while we are paused. That is enforced here.
|
||||
func (obj *Engine) Worker(vertex pgraph.Vertex) error {
|
||||
res, isRes := vertex.(engine.Res)
|
||||
if !isRes {
|
||||
return fmt.Errorf("vertex is not a resource")
|
||||
}
|
||||
|
||||
defer close(obj.state[vertex].stopped) // done signal
|
||||
// bonus safety check
|
||||
if res.MetaParams().Burst == 0 && !(res.MetaParams().Limit == rate.Inf) { // blocked
|
||||
return fmt.Errorf("permanently limited (rate != Inf, burst = 0)")
|
||||
}
|
||||
|
||||
//defer close(obj.state[vertex].stopped) // done signal
|
||||
|
||||
obj.state[vertex].cuid = obj.Converger.Register()
|
||||
obj.state[vertex].tuid = obj.Converger.Register()
|
||||
// must wait for all users of the cuid to finish *before* we unregister!
|
||||
// as a result, this defer happens *before* the below wait group Wait...
|
||||
defer obj.state[vertex].cuid.Unregister()
|
||||
defer obj.state[vertex].tuid.Unregister()
|
||||
|
||||
defer obj.state[vertex].wg.Wait() // this Worker is the last to exit!
|
||||
|
||||
obj.state[vertex].wg.Add(1)
|
||||
go func() {
|
||||
defer obj.state[vertex].wg.Done()
|
||||
defer close(obj.state[vertex].outputChan) // we close this on behalf of res
|
||||
defer close(obj.state[vertex].eventsChan) // we close this on behalf of res
|
||||
|
||||
// This is a close reverse-multiplexer. If any of the channels
|
||||
// close, then it will cause the doneChan to close. That way,
|
||||
// multiple different folks can send a close signal, without
|
||||
// every worrying about duplicate channel close panics.
|
||||
obj.state[vertex].wg.Add(1)
|
||||
go func() {
|
||||
defer obj.state[vertex].wg.Done()
|
||||
|
||||
// reverse-multiplexer: any close, causes *the* close!
|
||||
select {
|
||||
case <-obj.state[vertex].processDone:
|
||||
case <-obj.state[vertex].watchDone:
|
||||
case <-obj.state[vertex].limitDone:
|
||||
case <-obj.state[vertex].removeDone:
|
||||
case <-obj.state[vertex].eventsDone:
|
||||
}
|
||||
|
||||
// the main "done" signal gets activated here!
|
||||
close(obj.state[vertex].doneChan)
|
||||
}()
|
||||
|
||||
var err error
|
||||
var retry = res.MetaParams().Retry // lookup the retry value
|
||||
@@ -279,13 +309,8 @@ func (obj *Engine) Worker(vertex pgraph.Vertex) error {
|
||||
case <-timer.C: // the wait is over
|
||||
return errDelayExpired // special
|
||||
|
||||
case event, ok := <-obj.state[vertex].init.Events:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := obj.state[vertex].init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-obj.state[vertex].init.Done:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -304,68 +329,121 @@ func (obj *Engine) Worker(vertex pgraph.Vertex) error {
|
||||
obj.Logf("Watch(%s): Exited(%+v)", vertex, err)
|
||||
obj.state[vertex].cuid.StopTimer() // clean up nicely
|
||||
}
|
||||
if err == nil || err == engine.ErrWatchExit || err == engine.ErrSignalExit {
|
||||
if err == nil { // || err == engine.ErrClosed
|
||||
return // exited cleanly, we're done
|
||||
}
|
||||
// we've got an error...
|
||||
delay = res.MetaParams().Delay
|
||||
|
||||
if retry < 0 { // infinite retries
|
||||
obj.state[vertex].reset()
|
||||
continue
|
||||
}
|
||||
if retry > 0 { // don't decrement past 0
|
||||
retry--
|
||||
obj.state[vertex].init.Logf("retrying Watch after %.4f seconds (%d left)", float64(delay)/1000, retry)
|
||||
obj.state[vertex].reset()
|
||||
continue
|
||||
}
|
||||
//if retry == 0 { // optional
|
||||
// err = errwrap.Wrapf(err, "permanent watch error")
|
||||
//}
|
||||
break // break out of this and send the error
|
||||
}
|
||||
} // for retry loop
|
||||
|
||||
// this section sends an error...
|
||||
// If the CheckApply loop exits and THEN the Watch fails with an
|
||||
// error, then we'd be stuck here if exit signal didn't unblock!
|
||||
select {
|
||||
case obj.state[vertex].outputChan <- errwrap.Wrapf(err, "watch failed"):
|
||||
case obj.state[vertex].eventsChan <- errwrap.Wrapf(err, "watch failed"):
|
||||
// send
|
||||
case <-obj.state[vertex].exit.Signal():
|
||||
// pass
|
||||
}
|
||||
}()
|
||||
|
||||
// bonus safety check
|
||||
if res.MetaParams().Burst == 0 && !(res.MetaParams().Limit == rate.Inf) { // blocked
|
||||
return fmt.Errorf("permanently limited (rate != Inf, burst = 0)")
|
||||
}
|
||||
var limiter = rate.NewLimiter(res.MetaParams().Limit, res.MetaParams().Burst)
|
||||
// It is important that we shutdown the Watch loop if this exits.
|
||||
// Example, if Process errors permanently, we should ask Watch to exit.
|
||||
defer obj.state[vertex].Event(event.EventExit) // signal an exit
|
||||
for {
|
||||
// If this exits cleanly, we must unblock the reverse-multiplexer.
|
||||
// I think this additional close is unnecessary, but it's not harmful.
|
||||
defer close(obj.state[vertex].eventsDone) // causes doneChan to close
|
||||
limiter := rate.NewLimiter(res.MetaParams().Limit, res.MetaParams().Burst)
|
||||
var reserv *rate.Reservation
|
||||
var reterr error
|
||||
var failed bool // has Process permanently failed?
|
||||
Loop:
|
||||
for { // process loop
|
||||
select {
|
||||
case err, ok := <-obj.state[vertex].outputChan: // read from watch channel
|
||||
case err, ok := <-obj.state[vertex].eventsChan: // read from watch channel
|
||||
if !ok {
|
||||
return nil
|
||||
return reterr // we only return when chan closes
|
||||
}
|
||||
// If the Watch method exits with an error, then this
|
||||
// channel will get that error propagated to it, which
|
||||
// we then save so we can return it to the caller of us.
|
||||
if err != nil {
|
||||
return err // permanent failure
|
||||
failed = true
|
||||
close(obj.state[vertex].watchDone) // causes doneChan to close
|
||||
reterr = multierr.Append(reterr, err) // permanent failure
|
||||
continue
|
||||
}
|
||||
if obj.Debug {
|
||||
obj.Logf("event received")
|
||||
}
|
||||
reserv = limiter.ReserveN(time.Now(), 1) // one event
|
||||
// reserv.OK() seems to always be true here!
|
||||
|
||||
// safe to go run the process...
|
||||
case <-obj.state[vertex].exit.Signal(): // TODO: is this needed?
|
||||
return nil
|
||||
case _, ok := <-obj.state[vertex].pokeChan: // read from buffered poke channel
|
||||
if !ok { // we never close it
|
||||
panic("unexpected close of poke channel")
|
||||
}
|
||||
if obj.Debug {
|
||||
obj.Logf("poke received")
|
||||
}
|
||||
reserv = nil // we didn't receive a real event here...
|
||||
}
|
||||
if failed { // don't Process anymore if we've already failed...
|
||||
continue Loop
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
r := limiter.ReserveN(now, 1) // one event
|
||||
// r.OK() seems to always be true here!
|
||||
d := r.DelayFrom(now)
|
||||
if d > 0 { // delay
|
||||
// drop redundant pokes
|
||||
for len(obj.state[vertex].pokeChan) > 0 {
|
||||
select {
|
||||
case <-obj.state[vertex].pokeChan:
|
||||
default:
|
||||
// race, someone else read one!
|
||||
}
|
||||
}
|
||||
|
||||
// pause if one was requested...
|
||||
select {
|
||||
case <-obj.state[vertex].pauseSignal: // channel closes
|
||||
// NOTE: If we allowed a doneChan below to let us out
|
||||
// of the resumeSignal wait, then we could loop around
|
||||
// and run this again, causing a panic. Instead of this
|
||||
// being made safe with a sync.Once, we instead run a
|
||||
// Resume() call inside of the vertexRemoveFn function,
|
||||
// which should unblock it when we're going to need to.
|
||||
obj.state[vertex].pausedAck.Ack() // send ack
|
||||
// we are paused now, and waiting for resume or exit...
|
||||
select {
|
||||
case <-obj.state[vertex].resumeSignal: // channel closes
|
||||
// resumed!
|
||||
// pass through to allow a Process to try to run
|
||||
// TODO: consider adding this fast pause here...
|
||||
//if obj.fastPause {
|
||||
// obj.Logf("fast pausing on resume")
|
||||
// continue
|
||||
//}
|
||||
}
|
||||
default:
|
||||
// no pause requested, keep going...
|
||||
}
|
||||
if failed { // don't Process anymore if we've already failed...
|
||||
continue Loop
|
||||
}
|
||||
|
||||
// limit delay
|
||||
d := time.Duration(0)
|
||||
if reserv != nil {
|
||||
d = reserv.DelayFrom(time.Now())
|
||||
}
|
||||
if reserv != nil && d > 0 { // delay
|
||||
obj.state[vertex].init.Logf("limited (rate: %v/sec, burst: %d, next: %v)", res.MetaParams().Limit, res.MetaParams().Burst, d)
|
||||
var count int
|
||||
timer := time.NewTimer(time.Duration(d) * time.Millisecond)
|
||||
LimitWait:
|
||||
for {
|
||||
@@ -374,35 +452,38 @@ func (obj *Engine) Worker(vertex pgraph.Vertex) error {
|
||||
break LimitWait
|
||||
|
||||
// consume other events while we're waiting...
|
||||
case e, ok := <-obj.state[vertex].outputChan: // read from watch channel
|
||||
case e, ok := <-obj.state[vertex].eventsChan: // read from watch channel
|
||||
if !ok {
|
||||
// FIXME: is this logic correct?
|
||||
if count == 0 {
|
||||
return nil
|
||||
}
|
||||
// loop, because we have
|
||||
// the previous event to
|
||||
// run process on first!
|
||||
continue
|
||||
return reterr // we only return when chan closes
|
||||
}
|
||||
if e != nil {
|
||||
return e // permanent failure
|
||||
failed = true
|
||||
close(obj.state[vertex].limitDone) // causes doneChan to close
|
||||
reterr = multierr.Append(reterr, e) // permanent failure
|
||||
break LimitWait
|
||||
}
|
||||
count++ // count the events...
|
||||
if obj.Debug {
|
||||
obj.Logf("event received in limit")
|
||||
}
|
||||
// TODO: does this get added in properly?
|
||||
limiter.ReserveN(time.Now(), 1) // one event
|
||||
}
|
||||
}
|
||||
timer.Stop() // it's nice to cleanup
|
||||
obj.state[vertex].init.Logf("rate limiting expired!")
|
||||
}
|
||||
if failed { // don't Process anymore if we've already failed...
|
||||
continue Loop
|
||||
}
|
||||
// end of limit delay
|
||||
|
||||
// retry...
|
||||
var err error
|
||||
var retry = res.MetaParams().Retry // lookup the retry value
|
||||
var delay uint64
|
||||
Loop:
|
||||
RetryLoop:
|
||||
for { // retry loop
|
||||
if delay > 0 {
|
||||
var count int
|
||||
timer := time.NewTimer(time.Duration(delay) * time.Millisecond)
|
||||
RetryWait:
|
||||
for {
|
||||
@@ -411,22 +492,20 @@ func (obj *Engine) Worker(vertex pgraph.Vertex) error {
|
||||
break RetryWait
|
||||
|
||||
// consume other events while we're waiting...
|
||||
case e, ok := <-obj.state[vertex].outputChan: // read from watch channel
|
||||
case e, ok := <-obj.state[vertex].eventsChan: // read from watch channel
|
||||
if !ok {
|
||||
// FIXME: is this logic correct?
|
||||
if count == 0 {
|
||||
// last process error
|
||||
return err
|
||||
}
|
||||
// loop, because we have
|
||||
// the previous event to
|
||||
// run process on first!
|
||||
continue
|
||||
return reterr // we only return when chan closes
|
||||
}
|
||||
if e != nil {
|
||||
return e // permanent failure
|
||||
failed = true
|
||||
close(obj.state[vertex].limitDone) // causes doneChan to close
|
||||
reterr = multierr.Append(reterr, e) // permanent failure
|
||||
break RetryWait
|
||||
}
|
||||
count++ // count the events...
|
||||
if obj.Debug {
|
||||
obj.Logf("event received in retry")
|
||||
}
|
||||
// TODO: does this get added in properly?
|
||||
limiter.ReserveN(time.Now(), 1) // one event
|
||||
}
|
||||
}
|
||||
@@ -434,6 +513,9 @@ func (obj *Engine) Worker(vertex pgraph.Vertex) error {
|
||||
delay = 0 // reset
|
||||
obj.state[vertex].init.Logf("the CheckApply delay expired!")
|
||||
}
|
||||
if failed { // don't Process anymore if we've already failed...
|
||||
continue Loop
|
||||
}
|
||||
|
||||
if obj.Debug {
|
||||
obj.Logf("Process(%s)", vertex)
|
||||
@@ -443,7 +525,7 @@ func (obj *Engine) Worker(vertex pgraph.Vertex) error {
|
||||
obj.Logf("Process(%s): Return(%+v)", vertex, err)
|
||||
}
|
||||
if err == nil {
|
||||
break Loop
|
||||
break RetryLoop
|
||||
}
|
||||
// we've got an error...
|
||||
delay = res.MetaParams().Delay
|
||||
@@ -460,15 +542,23 @@ func (obj *Engine) Worker(vertex pgraph.Vertex) error {
|
||||
// err = errwrap.Wrapf(err, "permanent process error")
|
||||
//}
|
||||
|
||||
// If this exits, defer calls Event(event.EventExit),
|
||||
// which will cause the Watch loop to shutdown. Also,
|
||||
// if the Watch loop shuts down, that will cause this
|
||||
// Process loop to shut down. Also the graph sync can
|
||||
// run an Event(event.EventExit) which causes this to
|
||||
// shutdown as well. Lastly, it is possible that more
|
||||
// that one of these scenarios happens simultaneously.
|
||||
return err
|
||||
}
|
||||
}
|
||||
// It is important that we shutdown the Watch loop if
|
||||
// this dies. If Process fails permanently, we ask it
|
||||
// to exit right here... (It happens when we loop...)
|
||||
failed = true
|
||||
close(obj.state[vertex].processDone) // causes doneChan to close
|
||||
reterr = multierr.Append(reterr, err) // permanent failure
|
||||
continue
|
||||
|
||||
} // retry loop
|
||||
|
||||
// When this Process loop exits, it's because something has
|
||||
// caused Watch() to shutdown (even if it's our permanent
|
||||
// failure from Process), which caused this channel to close.
|
||||
// On or more exit signals are possible, and more than one can
|
||||
// happen simultaneously.
|
||||
|
||||
} // process loop
|
||||
|
||||
//return nil // unreachable
|
||||
}
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
|
||||
"github.com/purpleidea/mgmt/converger"
|
||||
"github.com/purpleidea/mgmt/engine"
|
||||
"github.com/purpleidea/mgmt/engine/event"
|
||||
"github.com/purpleidea/mgmt/pgraph"
|
||||
"github.com/purpleidea/mgmt/util/semaphore"
|
||||
|
||||
@@ -42,7 +41,7 @@ type Engine struct {
|
||||
// Prefix is a unique directory prefix which can be used. It should be
|
||||
// created if needed.
|
||||
Prefix string
|
||||
Converger converger.Converger
|
||||
Converger *converger.Coordinator
|
||||
|
||||
Debug bool
|
||||
Logf func(format string, v ...interface{})
|
||||
@@ -50,13 +49,14 @@ type Engine struct {
|
||||
graph *pgraph.Graph
|
||||
nextGraph *pgraph.Graph
|
||||
state map[pgraph.Vertex]*State
|
||||
waits map[pgraph.Vertex]*sync.WaitGroup
|
||||
waits map[pgraph.Vertex]*sync.WaitGroup // wg for the Worker func
|
||||
|
||||
slock *sync.Mutex // semaphore lock
|
||||
semas map[string]*semaphore.Semaphore
|
||||
|
||||
wg *sync.WaitGroup
|
||||
wg *sync.WaitGroup // wg for the whole engine (only used for close)
|
||||
|
||||
paused bool // are we paused?
|
||||
fastPause bool
|
||||
}
|
||||
|
||||
@@ -84,6 +84,8 @@ func (obj *Engine) Init() error {
|
||||
|
||||
obj.wg = &sync.WaitGroup{}
|
||||
|
||||
obj.paused = true // start off true, so we can Resume after first Commit
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -125,7 +127,7 @@ func (obj *Engine) Validate() error {
|
||||
}
|
||||
|
||||
// Apply a function to the pending graph. You must pass in a function which will
|
||||
// receive this graph as input, and return an error if it something does not
|
||||
// receive this graph as input, and return an error if something does not
|
||||
// succeed.
|
||||
func (obj *Engine) Apply(fn func(*pgraph.Graph) error) error {
|
||||
return fn(obj.nextGraph)
|
||||
@@ -137,6 +139,7 @@ func (obj *Engine) Apply(fn func(*pgraph.Graph) error) error {
|
||||
func (obj *Engine) Commit() error {
|
||||
// TODO: Does this hurt performance or graph changes ?
|
||||
|
||||
start := []func() error{} // functions to run after graphsync to start...
|
||||
vertexAddFn := func(vertex pgraph.Vertex) error {
|
||||
// some of these validation steps happen before this Commit step
|
||||
// in Validate() to avoid erroring here. These are redundant.
|
||||
@@ -192,12 +195,37 @@ func (obj *Engine) Commit() error {
|
||||
if err := obj.state[vertex].Init(); err != nil {
|
||||
return errwrap.Wrapf(err, "the Res did not Init")
|
||||
}
|
||||
|
||||
fn := func() error {
|
||||
// start the Worker
|
||||
obj.wg.Add(1)
|
||||
obj.waits[vertex].Add(1)
|
||||
go func(v pgraph.Vertex) {
|
||||
defer obj.wg.Done()
|
||||
defer obj.waits[v].Done()
|
||||
|
||||
obj.Logf("Worker(%s)", v)
|
||||
// contains the Watch and CheckApply loops
|
||||
err := obj.Worker(v)
|
||||
obj.Logf("Worker(%s): Exited(%+v)", v, err)
|
||||
obj.state[v].workerErr = err // store the error
|
||||
// If the Rewatch metaparam is true, then this will get
|
||||
// restarted if we do a graph cmp swap. This is why the
|
||||
// graph cmp function runs the removes before the adds.
|
||||
// XXX: This should feed into an $error var in the lang.
|
||||
}(vertex)
|
||||
return nil
|
||||
}
|
||||
start = append(start, fn) // do this at the end, if it's needed
|
||||
return nil
|
||||
}
|
||||
|
||||
free := []func() error{} // functions to run after graphsync to reset...
|
||||
vertexRemoveFn := func(vertex pgraph.Vertex) error {
|
||||
// wait for exit before starting new graph!
|
||||
obj.state[vertex].Event(event.EventExit) // signal an exit
|
||||
obj.waits[vertex].Wait() // sync
|
||||
close(obj.state[vertex].removeDone) // causes doneChan to close
|
||||
obj.state[vertex].Resume() // unblock from resume
|
||||
obj.waits[vertex].Wait() // sync
|
||||
|
||||
// close the state and resource
|
||||
// FIXME: will this mess up the sync and block the engine?
|
||||
@@ -206,18 +234,72 @@ func (obj *Engine) Commit() error {
|
||||
}
|
||||
|
||||
// delete to free up memory from old graphs
|
||||
delete(obj.state, vertex)
|
||||
delete(obj.waits, vertex)
|
||||
fn := func() error {
|
||||
delete(obj.state, vertex)
|
||||
delete(obj.waits, vertex)
|
||||
return nil
|
||||
}
|
||||
free = append(free, fn) // do this at the end, so we don't panic
|
||||
return nil
|
||||
}
|
||||
|
||||
// add the Worker swap (reload) on error decision into this vertexCmpFn
|
||||
vertexCmpFn := func(v1, v2 pgraph.Vertex) (bool, error) {
|
||||
r1, ok1 := v1.(engine.Res)
|
||||
r2, ok2 := v2.(engine.Res)
|
||||
if !ok1 || !ok2 { // should not happen, previously validated
|
||||
return false, fmt.Errorf("not a Res")
|
||||
}
|
||||
m1 := r1.MetaParams()
|
||||
m2 := r2.MetaParams()
|
||||
swap1, swap2 := true, true // assume default of true
|
||||
if m1 != nil {
|
||||
swap1 = m1.Rewatch
|
||||
}
|
||||
if m2 != nil {
|
||||
swap2 = m2.Rewatch
|
||||
}
|
||||
|
||||
s1, ok1 := obj.state[v1]
|
||||
s2, ok2 := obj.state[v2]
|
||||
x1, x2 := false, false
|
||||
if ok1 {
|
||||
x1 = s1.workerErr != nil && swap1
|
||||
}
|
||||
if ok2 {
|
||||
x2 = s2.workerErr != nil && swap2
|
||||
}
|
||||
|
||||
if x1 || x2 {
|
||||
// We swap, even if they're the same, so that we reload!
|
||||
// This causes an add and remove of the "same" vertex...
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return engine.VertexCmpFn(v1, v2) // do the normal cmp otherwise
|
||||
}
|
||||
|
||||
// If GraphSync succeeds, it updates the receiver graph accordingly...
|
||||
// Running the shutdown in vertexRemoveFn does not need to happen in a
|
||||
// topologically sorted order because it already paused in that order.
|
||||
obj.Logf("graph sync...")
|
||||
if err := obj.graph.GraphSync(obj.nextGraph, engine.VertexCmpFn, vertexAddFn, vertexRemoveFn, engine.EdgeCmpFn); err != nil {
|
||||
if err := obj.graph.GraphSync(obj.nextGraph, vertexCmpFn, vertexAddFn, vertexRemoveFn, engine.EdgeCmpFn); err != nil {
|
||||
return errwrap.Wrapf(err, "error running graph sync")
|
||||
}
|
||||
// We run these afterwards, so that we don't unnecessarily start anyone
|
||||
// if GraphSync failed in some way. Otherwise we'd have to do clean up!
|
||||
for _, fn := range start {
|
||||
if err := fn(); err != nil {
|
||||
return errwrap.Wrapf(err, "error running start fn")
|
||||
}
|
||||
}
|
||||
// We run these afterwards, so that the state structs (that might get
|
||||
// referenced) are not destroyed while someone might poke or use one.
|
||||
for _, fn := range free {
|
||||
if err := fn(); err != nil {
|
||||
return errwrap.Wrapf(err, "error running free fn")
|
||||
}
|
||||
}
|
||||
obj.nextGraph = nil
|
||||
|
||||
// After this point, we must not error or we'd need to restore all of
|
||||
@@ -236,50 +318,28 @@ func (obj *Engine) Commit() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start runs the currently active graph. It also un-pauses the graph if it was
|
||||
// paused.
|
||||
func (obj *Engine) Start() error {
|
||||
// Resume runs the currently active graph. It also un-pauses the graph if it was
|
||||
// paused. Very little that is interesting should happen here. It all happens in
|
||||
// the Commit method. After Commit, new things are already started, but we still
|
||||
// need to Resume any pre-existing resources.
|
||||
func (obj *Engine) Resume() error {
|
||||
if !obj.paused {
|
||||
return fmt.Errorf("already resumed")
|
||||
}
|
||||
|
||||
topoSort, err := obj.graph.TopologicalSort()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indegree := obj.graph.InDegree() // compute all of the indegree's
|
||||
//indegree := obj.graph.InDegree() // compute all of the indegree's
|
||||
reversed := pgraph.Reverse(topoSort)
|
||||
|
||||
for _, vertex := range reversed {
|
||||
state := obj.state[vertex]
|
||||
state.starter = (indegree[vertex] == 0)
|
||||
var unpause = true // assume true
|
||||
|
||||
if !state.working { // if not running...
|
||||
state.working = true
|
||||
unpause = false // doesn't need unpausing if starting
|
||||
obj.wg.Add(1)
|
||||
obj.waits[vertex].Add(1)
|
||||
go func(v pgraph.Vertex) {
|
||||
defer obj.wg.Done()
|
||||
defer obj.waits[vertex].Done()
|
||||
defer func() {
|
||||
obj.state[v].working = false
|
||||
}()
|
||||
|
||||
obj.Logf("Worker(%s)", v)
|
||||
// contains the Watch and CheckApply loops
|
||||
err := obj.Worker(v)
|
||||
obj.Logf("Worker(%s): Exited(%+v)", v, err)
|
||||
}(vertex)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-state.started:
|
||||
case <-state.stopped: // we failed on Watch start
|
||||
}
|
||||
|
||||
if unpause { // unpause (if needed)
|
||||
obj.state[vertex].Event(event.EventStart)
|
||||
}
|
||||
//obj.state[vertex].starter = (indegree[vertex] == 0)
|
||||
obj.state[vertex].Resume() // doesn't error
|
||||
}
|
||||
// we wait for everyone to start before exiting!
|
||||
obj.paused = false
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -290,22 +350,32 @@ func (obj *Engine) Start() error {
|
||||
// This is because once you've started a fast pause, some dependencies might
|
||||
// have been skipped when fast pausing, and future resources might have missed a
|
||||
// poke. In general this is only called when you're trying to hurry up the exit.
|
||||
// XXX: Not implemented
|
||||
func (obj *Engine) SetFastPause() {
|
||||
obj.fastPause = true
|
||||
}
|
||||
|
||||
// Pause the active, running graph. At the moment this cannot error.
|
||||
func (obj *Engine) Pause(fastPause bool) {
|
||||
// Pause the active, running graph.
|
||||
func (obj *Engine) Pause(fastPause bool) error {
|
||||
if obj.paused {
|
||||
return fmt.Errorf("already paused")
|
||||
}
|
||||
|
||||
obj.fastPause = fastPause
|
||||
topoSort, _ := obj.graph.TopologicalSort()
|
||||
for _, vertex := range topoSort { // squeeze out the events...
|
||||
// The Event is sent to an unbuffered channel, so this event is
|
||||
// synchronous, and as a result it blocks until it is received.
|
||||
obj.state[vertex].Event(event.EventPause)
|
||||
if err := obj.state[vertex].Pause(); err != nil && err != engine.ErrClosed {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
obj.paused = true
|
||||
|
||||
// we are now completely paused...
|
||||
obj.fastPause = false // reset
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close triggers a shutdown. Engine must be already paused before this is run.
|
||||
|
||||
37
engine/graph/graph_test.go
Normal file
37
engine/graph/graph_test.go
Normal file
@@ -0,0 +1,37 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2018+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build !root
|
||||
|
||||
package graph
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
multierr "github.com/hashicorp/go-multierror"
|
||||
)
|
||||
|
||||
func TestMultiErr(t *testing.T) {
|
||||
var err error
|
||||
e := fmt.Errorf("some error")
|
||||
err = multierr.Append(err, e) // build an error from a nil base
|
||||
// ensure that this lib allows us to append to a nil
|
||||
if err == nil {
|
||||
t.Errorf("missing error")
|
||||
}
|
||||
}
|
||||
@@ -19,14 +19,11 @@ package graph
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/purpleidea/mgmt/converger"
|
||||
"github.com/purpleidea/mgmt/engine"
|
||||
"github.com/purpleidea/mgmt/engine/event"
|
||||
"github.com/purpleidea/mgmt/pgraph"
|
||||
"github.com/purpleidea/mgmt/util"
|
||||
|
||||
@@ -51,7 +48,7 @@ type State struct {
|
||||
// created if needed.
|
||||
Prefix string
|
||||
|
||||
//Converger converger.Converger
|
||||
//Converger *converger.Coordinator
|
||||
|
||||
// Debug turns on additional output and behaviours.
|
||||
Debug bool
|
||||
@@ -61,48 +58,62 @@ type State struct {
|
||||
|
||||
timestamp int64 // last updated timestamp
|
||||
isStateOK bool // is state OK or do we need to run CheckApply ?
|
||||
workerErr error // did the Worker error?
|
||||
|
||||
// events is a channel of incoming events which is read by the Watch
|
||||
// loop for that resource. It receives events like pause, start, and
|
||||
// poke. The channel shuts down to signal for Watch to exit.
|
||||
eventsChan chan event.Kind // incoming to resource
|
||||
eventsLock *sync.Mutex // lock around sending and closing of events channel
|
||||
eventsDone bool // is channel closed?
|
||||
// doneChan closes when Watch should shut down. When any of the
|
||||
// following channels close, it causes this to close.
|
||||
doneChan chan struct{}
|
||||
|
||||
// outputChan is the channel that the engine listens on for events from
|
||||
// processDone is closed when the Process/CheckApply function fails
|
||||
// permanently, and wants to cause Watch to exit.
|
||||
processDone chan struct{}
|
||||
// watchDone is closed when the Watch function fails permanently, and we
|
||||
// close this to signal we should definitely exit. (Often redundant.)
|
||||
watchDone chan struct{} // could be shared with limitDone
|
||||
// limitDone is closed when the Watch function fails permanently, and we
|
||||
// close this to signal we should definitely exit. This happens inside
|
||||
// of the limit loop of the Process section of Worker.
|
||||
limitDone chan struct{} // could be shared with watchDone
|
||||
// removeDone is closed when the vertexRemoveFn method asks for an exit.
|
||||
// This happens when we're switching graphs. The switch to an "empty" is
|
||||
// the equivalent of asking for a final shutdown.
|
||||
removeDone chan struct{}
|
||||
// eventsDone is closed when we shutdown the Process loop because we
|
||||
// closed without error. In theory this shouldn't happen, but it could
|
||||
// if Watch returns without error for some reason.
|
||||
eventsDone chan struct{}
|
||||
|
||||
// eventsChan is the channel that the engine listens on for events from
|
||||
// the Watch loop for that resource. The event is nil normally, except
|
||||
// when events are sent on this channel from the engine. This only
|
||||
// happens as a signaling mechanism when Watch has shutdown and we want
|
||||
// to notify the Process loop which reads from this.
|
||||
outputChan chan error // outgoing from resource
|
||||
eventsChan chan error // outgoing from resource
|
||||
|
||||
wg *sync.WaitGroup
|
||||
exit *util.EasyExit
|
||||
// pokeChan is a separate channel that the Process loop listens on to
|
||||
// know when we might need to run Process. It never closes, and is safe
|
||||
// to send on since it is buffered.
|
||||
pokeChan chan struct{} // outgoing from resource
|
||||
|
||||
started chan struct{} // closes when it's started
|
||||
stopped chan struct{} // closes when it's stopped
|
||||
// paused represents if this particular res is paused or not.
|
||||
paused bool
|
||||
// pauseSignal closes to request a pause of this resource.
|
||||
pauseSignal chan struct{}
|
||||
// resumeSignal closes to request a resume of this resource.
|
||||
resumeSignal chan struct{}
|
||||
// pausedAck is used to send an ack message saying that we've paused.
|
||||
pausedAck *util.EasyAck
|
||||
|
||||
starter bool // do we have an indegree of 0 ?
|
||||
working bool // is the Main() loop running ?
|
||||
wg *sync.WaitGroup // used for all vertex specific processes
|
||||
|
||||
cuid converger.UID // primary converger
|
||||
cuid *converger.UID // primary converger
|
||||
tuid *converger.UID // secondary converger
|
||||
|
||||
init *engine.Init // a copy of the init struct passed to res Init
|
||||
}
|
||||
|
||||
// Init initializes structures like channels.
|
||||
func (obj *State) Init() error {
|
||||
obj.eventsChan = make(chan event.Kind)
|
||||
obj.eventsLock = &sync.Mutex{}
|
||||
|
||||
obj.outputChan = make(chan error)
|
||||
|
||||
obj.wg = &sync.WaitGroup{}
|
||||
obj.exit = util.NewEasyExit()
|
||||
|
||||
obj.started = make(chan struct{})
|
||||
obj.stopped = make(chan struct{})
|
||||
|
||||
res, isRes := obj.Vertex.(engine.Res)
|
||||
if !isRes {
|
||||
return fmt.Errorf("vertex is not a Res")
|
||||
@@ -120,29 +131,36 @@ func (obj *State) Init() error {
|
||||
return fmt.Errorf("the Logf function is missing")
|
||||
}
|
||||
|
||||
obj.doneChan = make(chan struct{})
|
||||
|
||||
obj.processDone = make(chan struct{})
|
||||
obj.watchDone = make(chan struct{})
|
||||
obj.limitDone = make(chan struct{})
|
||||
obj.removeDone = make(chan struct{})
|
||||
obj.eventsDone = make(chan struct{})
|
||||
|
||||
obj.eventsChan = make(chan error)
|
||||
|
||||
obj.pokeChan = make(chan struct{}, 1) // must be buffered
|
||||
|
||||
//obj.paused = false // starts off as started
|
||||
obj.pauseSignal = make(chan struct{})
|
||||
//obj.resumeSignal = make(chan struct{}) // happens on pause
|
||||
//obj.pausedAck = util.NewEasyAck() // happens on pause
|
||||
|
||||
obj.wg = &sync.WaitGroup{}
|
||||
|
||||
//obj.cuid = obj.Converger.Register() // gets registered in Worker()
|
||||
//obj.tuid = obj.Converger.Register() // gets registered in Worker()
|
||||
|
||||
obj.init = &engine.Init{
|
||||
Program: obj.Program,
|
||||
Hostname: obj.Hostname,
|
||||
|
||||
// Watch:
|
||||
Running: func() error {
|
||||
close(obj.started) // this is reset in the reset func
|
||||
obj.isStateOK = false // assume we're initially dirty
|
||||
// optimization: skip the initial send if not a starter
|
||||
// because we'll get poked from a starter soon anyways!
|
||||
if !obj.starter {
|
||||
return nil
|
||||
}
|
||||
return obj.event()
|
||||
},
|
||||
Event: obj.event,
|
||||
Events: obj.eventsChan,
|
||||
Read: obj.read,
|
||||
Dirty: func() { // TODO: should we rename this SetDirty?
|
||||
obj.isStateOK = false
|
||||
},
|
||||
Running: obj.event,
|
||||
Event: obj.event,
|
||||
Done: obj.doneChan,
|
||||
|
||||
// CheckApply:
|
||||
Refresh: func() bool {
|
||||
@@ -208,6 +226,9 @@ func (obj *State) Close() error {
|
||||
//if obj.cuid != nil {
|
||||
// obj.cuid.Unregister() // gets unregistered in Worker()
|
||||
//}
|
||||
//if obj.tuid != nil {
|
||||
// obj.tuid.Unregister() // gets unregistered in Worker()
|
||||
//}
|
||||
|
||||
// redundant safety
|
||||
obj.wg.Wait() // wait until all poke's and events on me have exited
|
||||
@@ -224,177 +245,91 @@ func (obj *State) Close() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// reset is run to reset the state so that Watch can run a second time. Thus is
|
||||
// needed for the Watch retry in particular.
|
||||
func (obj *State) reset() {
|
||||
obj.started = make(chan struct{})
|
||||
obj.stopped = make(chan struct{})
|
||||
}
|
||||
|
||||
// Poke sends a nil message on the outputChan. This channel is used by the
|
||||
// resource to signal a possible change. This will cause the Process loop to
|
||||
// run if it can.
|
||||
// Poke sends a notification on the poke channel. This channel is used to notify
|
||||
// the Worker to run the Process/CheckApply when it can. This is used when there
|
||||
// is a need to schedule or reschedule some work which got postponed or dropped.
|
||||
// This doesn't contain any internal synchronization primitives or wait groups,
|
||||
// callers are expected to make sure that they don't leave any of these running
|
||||
// by the time the Worker() shuts down.
|
||||
func (obj *State) Poke() {
|
||||
// add a wait group on the vertex we're poking!
|
||||
obj.wg.Add(1)
|
||||
defer obj.wg.Done()
|
||||
// redundant
|
||||
//if len(obj.pokeChan) > 0 {
|
||||
// return
|
||||
//}
|
||||
|
||||
select {
|
||||
case obj.outputChan <- nil:
|
||||
|
||||
case <-obj.exit.Signal():
|
||||
case obj.pokeChan <- struct{}{}:
|
||||
default: // if chan is now full because more than one poke happened...
|
||||
}
|
||||
}
|
||||
|
||||
// Event sends a Pause or Start event to the resource. It can also be used to
|
||||
// send Poke events, but it's much more efficient to send them directly instead
|
||||
// of passing them through the resource.
|
||||
func (obj *State) Event(kind event.Kind) {
|
||||
// TODO: should these happen after the lock?
|
||||
obj.wg.Add(1)
|
||||
defer obj.wg.Done()
|
||||
// Pause pauses this resource. It should not be called on any already paused
|
||||
// resource. It will block until the resource pauses with an acknowledgment, or
|
||||
// until an exit for that resource is seen. If the latter happens it will error.
|
||||
// It is NOT thread-safe with the Resume() method so only call either one at a
|
||||
// time.
|
||||
func (obj *State) Pause() error {
|
||||
if obj.paused {
|
||||
return fmt.Errorf("already paused")
|
||||
}
|
||||
|
||||
obj.eventsLock.Lock()
|
||||
defer obj.eventsLock.Unlock()
|
||||
obj.pausedAck = util.NewEasyAck()
|
||||
obj.resumeSignal = make(chan struct{}) // build the resume signal
|
||||
close(obj.pauseSignal)
|
||||
obj.Poke() // unblock and notice the pause if necessary
|
||||
|
||||
if obj.eventsDone { // closing, skip events...
|
||||
// wait for ack (or exit signal)
|
||||
select {
|
||||
case <-obj.pausedAck.Wait(): // we got it!
|
||||
// we're paused
|
||||
case <-obj.doneChan:
|
||||
return engine.ErrClosed
|
||||
}
|
||||
obj.paused = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resume unpauses this resource. It can be safely called on a brand-new
|
||||
// resource that has just started running without incident. It is NOT
|
||||
// thread-safe with the Pause() method, so only call either one at a time.
|
||||
func (obj *State) Resume() {
|
||||
// TODO: do we need a mutex around Resume?
|
||||
if !obj.paused { // no need to unpause brand-new resources
|
||||
return
|
||||
}
|
||||
|
||||
if kind == event.EventExit { // set this so future events don't deadlock
|
||||
obj.Logf("exit event...")
|
||||
obj.eventsDone = true
|
||||
close(obj.eventsChan) // causes resource Watch loop to close
|
||||
obj.exit.Done(nil) // trigger exit signal to unblock some cases
|
||||
return
|
||||
}
|
||||
obj.pauseSignal = make(chan struct{}) // rebuild for next pause
|
||||
close(obj.resumeSignal)
|
||||
//obj.Poke() // not needed, we're already waiting for resume
|
||||
|
||||
obj.paused = false
|
||||
|
||||
// no need to wait for it to resume
|
||||
//return // implied
|
||||
}
|
||||
|
||||
// event is a helper function to send an event to the CheckApply process loop.
|
||||
// It can be used for the initial `running` event, or any regular event. You
|
||||
// should instead use Poke() to "schedule" a new Process/CheckApply loop when
|
||||
// one might be needed. This method will block until we're unpaused and ready to
|
||||
// receive on the events channel.
|
||||
func (obj *State) event() {
|
||||
obj.setDirty() // assume we're initially dirty
|
||||
|
||||
select {
|
||||
case obj.eventsChan <- kind:
|
||||
|
||||
case <-obj.exit.Signal():
|
||||
case obj.eventsChan <- nil:
|
||||
// send!
|
||||
}
|
||||
|
||||
//return // implied
|
||||
}
|
||||
|
||||
// read is a helper function used inside the main select statement of resources.
|
||||
// If it returns an error, then this is a signal for the resource to exit.
|
||||
func (obj *State) read(kind event.Kind) error {
|
||||
switch kind {
|
||||
case event.EventPoke:
|
||||
return obj.event() // a poke needs to cause an event...
|
||||
case event.EventStart:
|
||||
return fmt.Errorf("unexpected start")
|
||||
case event.EventPause:
|
||||
// pass
|
||||
case event.EventExit:
|
||||
return engine.ErrSignalExit
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unhandled event: %+v", kind)
|
||||
}
|
||||
|
||||
// we're paused now
|
||||
select {
|
||||
case kind, ok := <-obj.eventsChan:
|
||||
if !ok {
|
||||
return engine.ErrWatchExit
|
||||
}
|
||||
switch kind {
|
||||
case event.EventPoke:
|
||||
return fmt.Errorf("unexpected poke")
|
||||
case event.EventPause:
|
||||
return fmt.Errorf("unexpected pause")
|
||||
case event.EventStart:
|
||||
// resumed
|
||||
return nil
|
||||
case event.EventExit:
|
||||
return engine.ErrSignalExit
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unhandled event: %+v", kind)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// event is a helper function to send an event from the resource Watch loop. It
|
||||
// can be used for the initial `running` event, or any regular event. If it
|
||||
// returns an error, then the Watch loop must return this error and shutdown.
|
||||
func (obj *State) event() error {
|
||||
// loop until we sent on obj.outputChan or exit with error
|
||||
for {
|
||||
select {
|
||||
// send "activity" event
|
||||
case obj.outputChan <- nil:
|
||||
return nil // sent event!
|
||||
|
||||
// make sure to keep handling incoming
|
||||
case kind, ok := <-obj.eventsChan:
|
||||
if !ok {
|
||||
return engine.ErrWatchExit
|
||||
}
|
||||
switch kind {
|
||||
case event.EventPoke:
|
||||
// we're trying to send an event, so swallow the
|
||||
// poke: it's what we wanted to have happen here
|
||||
continue
|
||||
case event.EventStart:
|
||||
return fmt.Errorf("unexpected start")
|
||||
case event.EventPause:
|
||||
// pass
|
||||
case event.EventExit:
|
||||
return engine.ErrSignalExit
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unhandled event: %+v", kind)
|
||||
}
|
||||
}
|
||||
|
||||
// we're paused now
|
||||
select {
|
||||
case kind, ok := <-obj.eventsChan:
|
||||
if !ok {
|
||||
return engine.ErrWatchExit
|
||||
}
|
||||
switch kind {
|
||||
case event.EventPoke:
|
||||
return fmt.Errorf("unexpected poke")
|
||||
case event.EventPause:
|
||||
return fmt.Errorf("unexpected pause")
|
||||
case event.EventStart:
|
||||
// resumed
|
||||
case event.EventExit:
|
||||
return engine.ErrSignalExit
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unhandled event: %+v", kind)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// varDir returns the path to a working directory for the resource. It will try
|
||||
// and create the directory first, and return an error if this failed. The dir
|
||||
// should be cleaned up by the resource on Close if it wishes to discard the
|
||||
// contents. If it does not, then a future resource with the same kind and name
|
||||
// may see those contents in that directory. The resource should clean up the
|
||||
// contents before use if it is important that nothing exist. It is always
|
||||
// possible that contents could remain after an abrupt crash, so do not store
|
||||
// overly sensitive data unless you're aware of the risks.
|
||||
func (obj *State) varDir(extra string) (string, error) {
|
||||
// Using extra adds additional dirs onto our namespace. An empty extra
|
||||
// adds no additional directories.
|
||||
if obj.Prefix == "" { // safety
|
||||
return "", fmt.Errorf("the VarDir prefix is empty")
|
||||
}
|
||||
|
||||
// an empty string at the end has no effect
|
||||
p := fmt.Sprintf("%s/", path.Join(obj.Prefix, extra))
|
||||
if err := os.MkdirAll(p, 0770); err != nil {
|
||||
return "", errwrap.Wrapf(err, "can't create prefix in: %s", p)
|
||||
}
|
||||
|
||||
// returns with a trailing slash as per the mgmt file res convention
|
||||
return p, nil
|
||||
// setDirty marks the resource state as dirty. This signals to the engine that
|
||||
// CheckApply will have some work to do in order to converge it.
|
||||
func (obj *State) setDirty() {
|
||||
obj.tuid.StopTimer()
|
||||
obj.isStateOK = false
|
||||
}
|
||||
|
||||
// poll is a replacement for Watch when the Poll metaparameter is used.
|
||||
@@ -403,34 +338,17 @@ func (obj *State) poll(interval uint32) error {
|
||||
ticker := time.NewTicker(time.Duration(interval) * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
// notify engine that we're running
|
||||
if err := obj.init.Running(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
var send = false // send event?
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C: // received the timer event
|
||||
obj.init.Logf("polling...")
|
||||
send = true
|
||||
obj.init.Dirty() // dirty
|
||||
|
||||
case event, ok := <-obj.init.Events:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-obj.init.Done: // signal for shutdown request
|
||||
return nil
|
||||
}
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
if err := obj.init.Event(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
}
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
|
||||
51
engine/graph/vardir.go
Normal file
51
engine/graph/vardir.go
Normal file
@@ -0,0 +1,51 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2018+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package graph
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
errwrap "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// varDir returns the path to a working directory for the resource. It will try
|
||||
// and create the directory first, and return an error if this failed. The dir
|
||||
// should be cleaned up by the resource on Close if it wishes to discard the
|
||||
// contents. If it does not, then a future resource with the same kind and name
|
||||
// may see those contents in that directory. The resource should clean up the
|
||||
// contents before use if it is important that nothing exist. It is always
|
||||
// possible that contents could remain after an abrupt crash, so do not store
|
||||
// overly sensitive data unless you're aware of the risks.
|
||||
func (obj *State) varDir(extra string) (string, error) {
|
||||
// Using extra adds additional dirs onto our namespace. An empty extra
|
||||
// adds no additional directories.
|
||||
if obj.Prefix == "" { // safety
|
||||
return "", fmt.Errorf("the VarDir prefix is empty")
|
||||
}
|
||||
|
||||
// an empty string at the end has no effect
|
||||
p := fmt.Sprintf("%s/", path.Join(obj.Prefix, extra))
|
||||
if err := os.MkdirAll(p, 0770); err != nil {
|
||||
return "", errwrap.Wrapf(err, "can't create prefix in: %s", p)
|
||||
}
|
||||
|
||||
// returns with a trailing slash as per the mgmt file res convention
|
||||
return p, nil
|
||||
}
|
||||
@@ -37,6 +37,8 @@ var DefaultMetaParams = &MetaParams{
|
||||
Limit: rate.Inf, // defaults to no limit
|
||||
Burst: 0, // no burst needed on an infinite rate
|
||||
//Sema: []string{},
|
||||
Rewatch: true,
|
||||
Realize: false, // true would be more awesome, but unexpected for users
|
||||
}
|
||||
|
||||
// MetaRes is the interface a resource must implement to support meta params.
|
||||
@@ -44,6 +46,10 @@ var DefaultMetaParams = &MetaParams{
|
||||
type MetaRes interface {
|
||||
// MetaParams lets you get or set meta params for the resource.
|
||||
MetaParams() *MetaParams
|
||||
|
||||
// SetMetaParams lets you set all of the meta params for the resource in
|
||||
// a single call.
|
||||
SetMetaParams(*MetaParams)
|
||||
}
|
||||
|
||||
// MetaParams provides some meta parameters that apply to every resource.
|
||||
@@ -77,6 +83,24 @@ type MetaParams struct {
|
||||
// has a count equal to 1, is different from a sema named `foo:1` which
|
||||
// also has a count equal to 1, but is a different semaphore.
|
||||
Sema []string `yaml:"sema"`
|
||||
|
||||
// Rewatch specifies whether we re-run the Watch worker during a swap
|
||||
// if it has errored. When doing a GraphCmp to swap the graphs, if this
|
||||
// is true, and this particular worker has errored, then we'll remove it
|
||||
// and add it back as a new vertex, thus causing it to run again. This
|
||||
// is different from the Retry metaparam which applies during the normal
|
||||
// execution. It is only when this is exhausted that we're in permanent
|
||||
// worker failure, and only then can we rely on this metaparam.
|
||||
Rewatch bool `yaml:"rewatch"`
|
||||
|
||||
// Realize ensures that the resource is guaranteed to converge at least
|
||||
// once before a potential graph swap removes or changes it. This
|
||||
// guarantee is useful for fast changing graphs, to ensure that the
|
||||
// brief creation of a resource is seen. This guarantee does not prevent
|
||||
// against the engine quitting normally, and it can't guarantee it if
|
||||
// the resource is blocked because of a failed pre-requisite resource.
|
||||
// XXX: Not implemented!
|
||||
Realize bool `yaml:"realize"`
|
||||
}
|
||||
|
||||
// Cmp compares two AutoGroupMeta structs and determines if they're equivalent.
|
||||
@@ -114,6 +138,13 @@ func (obj *MetaParams) Cmp(meta *MetaParams) error {
|
||||
return errwrap.Wrapf(err, "values for Sema are different")
|
||||
}
|
||||
|
||||
if obj.Rewatch != meta.Rewatch {
|
||||
return fmt.Errorf("values for Rewatch are different")
|
||||
}
|
||||
if obj.Realize != meta.Realize {
|
||||
return fmt.Errorf("values for Realize are different")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -143,13 +174,15 @@ func (obj *MetaParams) Copy() *MetaParams {
|
||||
copy(sema, obj.Sema)
|
||||
}
|
||||
return &MetaParams{
|
||||
Noop: obj.Noop,
|
||||
Retry: obj.Retry,
|
||||
Delay: obj.Delay,
|
||||
Poll: obj.Poll,
|
||||
Limit: obj.Limit, // FIXME: can we copy this type like this? test me!
|
||||
Burst: obj.Burst,
|
||||
Sema: sema,
|
||||
Noop: obj.Noop,
|
||||
Retry: obj.Retry,
|
||||
Delay: obj.Delay,
|
||||
Poll: obj.Poll,
|
||||
Limit: obj.Limit, // FIXME: can we copy this type like this? test me!
|
||||
Burst: obj.Burst,
|
||||
Sema: sema,
|
||||
Rewatch: obj.Rewatch,
|
||||
Realize: obj.Realize,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -21,8 +21,6 @@ import (
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
|
||||
"github.com/purpleidea/mgmt/engine/event"
|
||||
|
||||
errwrap "github.com/pkg/errors"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
@@ -93,22 +91,14 @@ type Init struct {
|
||||
// Called from within Watch:
|
||||
|
||||
// Running must be called after your watches are all started and ready.
|
||||
Running func() error
|
||||
Running func()
|
||||
|
||||
// Event sends an event notifying the engine of a possible state change.
|
||||
Event func() error
|
||||
Event func()
|
||||
|
||||
// Events returns a channel that we must watch for messages from the
|
||||
// engine. When it closes, this is a signal to shutdown.
|
||||
Events chan event.Kind
|
||||
|
||||
// Read processes messages that come in from the Events channel. It is a
|
||||
// helper method that knows how to handle the pause mechanism correctly.
|
||||
Read func(event.Kind) error
|
||||
|
||||
// Dirty marks the resource state as dirty. This signals to the engine
|
||||
// that CheckApply will have some work to do in order to converge it.
|
||||
Dirty func()
|
||||
// Done returns a channel that will close to signal to us that it's time
|
||||
// for us to shutdown.
|
||||
Done chan struct{}
|
||||
|
||||
// Called from within CheckApply:
|
||||
|
||||
@@ -192,12 +182,14 @@ type Res interface {
|
||||
// in response.
|
||||
Watch() error
|
||||
|
||||
// CheckApply determines if the state of the resource is connect and if
|
||||
// CheckApply determines if the state of the resource is correct and if
|
||||
// asked to with the `apply` variable, applies the requested state.
|
||||
CheckApply(apply bool) (checkOK bool, err error)
|
||||
|
||||
// Cmp compares itself to another resource and returns an error if they
|
||||
// are not equivalent.
|
||||
// are not equivalent. This is more strict than the Adapts method of the
|
||||
// CompatibleRes interface which allows for equivalent differences if
|
||||
// the have a compatible result in CheckApply.
|
||||
Cmp(Res) error
|
||||
}
|
||||
|
||||
@@ -246,15 +238,50 @@ type InterruptableRes interface {
|
||||
// is designed to unblock any long running operation that is occurring
|
||||
// in the CheckApply portion of the life cycle. If the resource has
|
||||
// already exited, running this method should not block. (That is to say
|
||||
// that you should not expect CheckApply or Watch to be able to alive
|
||||
// and able to read from a channel to satisfy your request.) It is best
|
||||
// to probably have this close a channel to multicast that signal around
|
||||
// to anyone who can detect it in a select. If you are in a situation
|
||||
// which cannot interrupt, then you can return an error.
|
||||
// that you should not expect CheckApply or Watch to be alive and be
|
||||
// able to read from a channel to satisfy your request.) It is best to
|
||||
// probably have this close a channel to multicast that signal around to
|
||||
// anyone who can detect it in a select. If you are in a situation which
|
||||
// cannot interrupt, then you can return an error.
|
||||
// FIXME: implement, and check the above description is what we expect!
|
||||
Interrupt() error
|
||||
}
|
||||
|
||||
// CopyableRes is an interface that a resource can implement if we want to be
|
||||
// able to copy the resource to build another one.
|
||||
type CopyableRes interface {
|
||||
Res
|
||||
|
||||
// Copy returns a new resource which has a copy of the public data.
|
||||
// Don't call this directly, use engine.ResCopy instead.
|
||||
// TODO: should we copy any private state or not?
|
||||
Copy() CopyableRes
|
||||
}
|
||||
|
||||
// CompatibleRes is an interface that a resource can implement to express if a
|
||||
// similar variant of itself is functionally equivalent. For example, two `pkg`
|
||||
// resources that install `cowsay` could be equivalent if one requests a state
|
||||
// of `installed` and the other requests `newest`, since they'll finish with a
|
||||
// compatible result. This doesn't need to be behind a metaparam flag or trait,
|
||||
// because it is never beneficial to turn it off, unless there is a bug to fix.
|
||||
type CompatibleRes interface {
|
||||
//Res // causes "duplicate method" error
|
||||
CopyableRes // we'll need to use the Copy method in the Merge function!
|
||||
|
||||
// Adapts compares itself to another resource and returns an error if
|
||||
// they are not compatibly equivalent. This is less strict than the
|
||||
// default `Cmp` method which should be used for most cases. Don't call
|
||||
// this directly, use engine.AdaptCmp instead.
|
||||
Adapts(CompatibleRes) error
|
||||
|
||||
// Merge returns the combined resource to use when two are equivalent.
|
||||
// This might get called multiple times for N different resources that
|
||||
// need to get merged, and so it should produce a consistent result no
|
||||
// matter which order it is called in. Don't call this directly, use
|
||||
// engine.ResMerge instead.
|
||||
Merge(CompatibleRes) (CompatibleRes, error)
|
||||
}
|
||||
|
||||
// CollectableRes is an interface for resources that support collection. It is
|
||||
// currently temporary until a proper API for all resources is invented.
|
||||
type CollectableRes interface {
|
||||
|
||||
@@ -135,10 +135,7 @@ func (obj *AugeasRes) Watch() error {
|
||||
}
|
||||
defer obj.recWatcher.Close()
|
||||
|
||||
// notify engine that we're running
|
||||
if err := obj.init.Running(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
var send = false // send event?
|
||||
for {
|
||||
@@ -158,23 +155,15 @@ func (obj *AugeasRes) Watch() error {
|
||||
obj.init.Logf("Event(%s): %v", event.Body.Name, event.Body.Op)
|
||||
}
|
||||
send = true
|
||||
obj.init.Dirty() // dirty
|
||||
|
||||
case event, ok := <-obj.init.Events:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-obj.init.Done: // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
if err := obj.init.Event(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -423,9 +423,7 @@ func (obj *AwsEc2Res) longpollWatch() error {
|
||||
|
||||
// We tell the engine that we're running right away. This is not correct,
|
||||
// but the api doesn't have a way to signal when the waiters are ready.
|
||||
if err := obj.init.Running(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
// cancellable context used for exiting cleanly
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
@@ -488,14 +486,6 @@ func (obj *AwsEc2Res) longpollWatch() error {
|
||||
// process events from the goroutine
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-obj.init.Events:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case msg, ok := <-obj.awsChan:
|
||||
if !ok {
|
||||
return nil
|
||||
@@ -509,15 +499,16 @@ func (obj *AwsEc2Res) longpollWatch() error {
|
||||
continue
|
||||
default:
|
||||
obj.init.Logf("State: %v", msg.state)
|
||||
obj.init.Dirty() // dirty
|
||||
send = true
|
||||
}
|
||||
|
||||
case <-obj.init.Done: // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
|
||||
if send {
|
||||
send = false
|
||||
if err := obj.init.Event(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -587,14 +578,6 @@ func (obj *AwsEc2Res) snsWatch() error {
|
||||
// process events
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-obj.init.Events:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case msg, ok := <-obj.awsChan:
|
||||
if !ok {
|
||||
return nil
|
||||
@@ -607,20 +590,19 @@ func (obj *AwsEc2Res) snsWatch() error {
|
||||
// is confirmed, we are ready to receive events, so we
|
||||
// can notify the engine that we're running.
|
||||
if msg.event == awsEc2EventWatchReady {
|
||||
if err := obj.init.Running(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
continue
|
||||
}
|
||||
obj.init.Logf("State: %v", msg.event)
|
||||
obj.init.Dirty() // dirty
|
||||
send = true
|
||||
|
||||
case <-obj.init.Done: // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
|
||||
if send {
|
||||
send = false
|
||||
if err := obj.init.Event(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
558
engine/resources/cron.go
Normal file
558
engine/resources/cron.go
Normal file
@@ -0,0 +1,558 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2018+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os/user"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/purpleidea/mgmt/engine"
|
||||
"github.com/purpleidea/mgmt/engine/traits"
|
||||
engineUtil "github.com/purpleidea/mgmt/engine/util"
|
||||
"github.com/purpleidea/mgmt/recwatch"
|
||||
"github.com/purpleidea/mgmt/util"
|
||||
|
||||
sdbus "github.com/coreos/go-systemd/dbus"
|
||||
"github.com/coreos/go-systemd/unit"
|
||||
systemdUtil "github.com/coreos/go-systemd/util"
|
||||
"github.com/godbus/dbus"
|
||||
errwrap "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// OnCalendar is a systemd-timer trigger, whose behaviour is defined in
|
||||
// 'man systemd-timer', and whose format is defined in the 'Calendar
|
||||
// Events' section of 'man systemd-time'.
|
||||
OnCalendar = "OnCalendar"
|
||||
// OnActiveSec is a systemd-timer trigger, whose behaviour is defined in
|
||||
// 'man systemd-timer', and whose format is a time span as defined in
|
||||
// 'man systemd-time'.
|
||||
OnActiveSec = "OnActiveSec"
|
||||
// OnBootSec is a systemd-timer trigger, whose behaviour is defined in
|
||||
// 'man systemd-timer', and whose format is a time span as defined in
|
||||
// 'man systemd-time'.
|
||||
OnBootSec = "OnBootSec"
|
||||
// OnStartupSec is a systemd-timer trigger, whose behaviour is defined in
|
||||
// 'man systemd-timer', and whose format is a time span as defined in
|
||||
// 'man systemd-time'.
|
||||
OnStartupSec = "OnStartupSec"
|
||||
// OnUnitActiveSec is a systemd-timer trigger, whose behaviour is defined
|
||||
// in 'man systemd-timer', and whose format is a time span as defined in
|
||||
// 'man systemd-time'.
|
||||
OnUnitActiveSec = "OnUnitActiveSec"
|
||||
// OnUnitInactiveSec is a systemd-timer trigger, whose behaviour is defined
|
||||
// in 'man systemd-timer', and whose format is a time span as defined in
|
||||
// 'man systemd-time'.
|
||||
OnUnitInactiveSec = "OnUnitInactiveSec"
|
||||
|
||||
// ctxTimeout is the delay, in seconds, before the calls to restart or stop
|
||||
// the systemd unit will error due to timeout.
|
||||
ctxTimeout = 30
|
||||
)
|
||||
|
||||
func init() {
|
||||
engine.RegisterResource("cron", func() engine.Res { return &CronRes{} })
|
||||
}
|
||||
|
||||
// CronRes is a systemd-timer cron resource.
|
||||
type CronRes struct {
|
||||
traits.Base
|
||||
traits.Edgeable
|
||||
traits.Recvable
|
||||
traits.Refreshable // needed because we embed a svc res
|
||||
|
||||
init *engine.Init
|
||||
|
||||
// Unit is the name of the systemd service unit. It is only necessary to
|
||||
// set if you want to specify a service with a different name than the
|
||||
// resource.
|
||||
Unit string `yaml:"unit"`
|
||||
// State must be 'exists' or 'absent'.
|
||||
State string `yaml:"state"`
|
||||
|
||||
// Session, if true, creates the timer as the current user, rather than
|
||||
// root. The service it points to must also be a user unit. It defaults to
|
||||
// false.
|
||||
Session bool `yaml:"session"`
|
||||
|
||||
// Trigger is the type of timer. Valid types are 'OnCalendar',
|
||||
// 'OnActiveSec'. 'OnBootSec'. 'OnStartupSec'. 'OnUnitActiveSec', and
|
||||
// 'OnUnitInactiveSec'. For more information see 'man systemd.timer'.
|
||||
Trigger string `yaml:"trigger"`
|
||||
// Time must be used with all triggers. For 'OnCalendar', it must be in
|
||||
// the format defined in 'man systemd-time' under the heading 'Calendar
|
||||
// Events'. For all other triggers, time should be a valid time span as
|
||||
// defined in 'man systemd-time'
|
||||
Time string `yaml:"time"`
|
||||
|
||||
// AccuracySec is the accuracy of the timer in systemd-time time span
|
||||
// format. It defaults to one minute.
|
||||
AccuracySec string `yaml:"accuracysec"`
|
||||
// RandomizedDelaySec delays the timer by a randomly selected, evenly
|
||||
// distributed amount of time between 0 and the specified time value. The
|
||||
// value must be a valid systemd-time time span.
|
||||
RandomizedDelaySec string `yaml:"randomizeddelaysec"`
|
||||
|
||||
// Persistent, if true, means the time when the service unit was last
|
||||
// triggered is stored on disk. When the timer is activated, the service
|
||||
// unit is triggered immediately if it would have been triggered at least
|
||||
// once during the time when the timer was inactive. It defaults to false.
|
||||
Persistent bool `yaml:"persistent"`
|
||||
// WakeSystem, if true, will cause the system to resume from suspend,
|
||||
// should it be suspended and if the system supports this. It defaults to
|
||||
// false.
|
||||
WakeSystem bool `yaml:"wakesystem"`
|
||||
// RemainAfterElapse, if true, means an elapsed timer will stay loaded, and
|
||||
// its state remains queriable. If false, an elapsed timer unit that cannot
|
||||
// elapse anymore is unloaded. It defaults to true.
|
||||
RemainAfterElapse bool `yaml:"remainafterelapse"`
|
||||
|
||||
file *FileRes // nested file resource
|
||||
recWatcher *recwatch.RecWatcher // recwatcher for nested file
|
||||
}
|
||||
|
||||
// Default returns some sensible defaults for this resource.
|
||||
func (obj *CronRes) Default() engine.Res {
|
||||
return &CronRes{
|
||||
State: "exists",
|
||||
RemainAfterElapse: true,
|
||||
}
|
||||
}
|
||||
|
||||
// makeComposite creates a pointer to a FileRes. The pointer is used to
|
||||
// validate and initialize the nested file resource and to apply the file state
|
||||
// in CheckApply.
|
||||
func (obj *CronRes) makeComposite() (*FileRes, error) {
|
||||
p, err := obj.UnitFilePath()
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf(err, "error generating unit file path")
|
||||
}
|
||||
res, err := engine.NewNamedResource("file", p)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf(err, "error creating nested file resource")
|
||||
}
|
||||
file, ok := res.(*FileRes)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error casting fileres")
|
||||
}
|
||||
file.State = obj.State
|
||||
if obj.State != "absent" {
|
||||
s := obj.unitFileContents()
|
||||
file.Content = &s
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// Validate if the params passed in are valid data.
|
||||
func (obj *CronRes) Validate() error {
|
||||
// validate state
|
||||
if obj.State != "absent" && obj.State != "exists" {
|
||||
return fmt.Errorf("state must be 'absent' or 'exists'")
|
||||
}
|
||||
|
||||
// validate trigger
|
||||
if obj.State == "absent" && obj.Trigger == "" {
|
||||
return nil // if trigger is undefined we can't make a unit file
|
||||
}
|
||||
if obj.Trigger == "" || obj.Time == "" {
|
||||
return fmt.Errorf("trigger and must be set together")
|
||||
}
|
||||
if obj.Trigger != OnCalendar &&
|
||||
obj.Trigger != OnActiveSec &&
|
||||
obj.Trigger != OnBootSec &&
|
||||
obj.Trigger != OnStartupSec &&
|
||||
obj.Trigger != OnUnitActiveSec &&
|
||||
obj.Trigger != OnUnitInactiveSec {
|
||||
|
||||
return fmt.Errorf("invalid trigger")
|
||||
}
|
||||
|
||||
// TODO: Validate time (regex?)
|
||||
|
||||
// validate nested file
|
||||
file, err := obj.makeComposite()
|
||||
if err != nil {
|
||||
return errwrap.Wrapf(err, "makeComposite failed in validate")
|
||||
}
|
||||
if err := file.Validate(); err != nil { // composite resource
|
||||
return errwrap.Wrapf(err, "validate failed for embedded file: %s", obj.file)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Init runs some startup code for this resource.
|
||||
func (obj *CronRes) Init(init *engine.Init) error {
|
||||
var err error
|
||||
obj.init = init // save for later
|
||||
|
||||
obj.file, err = obj.makeComposite()
|
||||
if err != nil {
|
||||
return errwrap.Wrapf(err, "makeComposite failed in init")
|
||||
}
|
||||
return obj.file.Init(init)
|
||||
}
|
||||
|
||||
// Close is run by the engine to clean up after the resource is done.
|
||||
func (obj *CronRes) Close() error {
|
||||
if obj.file != nil {
|
||||
return obj.file.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Watch for state changes and sends a message to the bus if there is a change.
|
||||
func (obj *CronRes) Watch() error {
|
||||
var bus *dbus.Conn
|
||||
var err error
|
||||
|
||||
// this resource depends on systemd
|
||||
if !systemdUtil.IsRunningSystemd() {
|
||||
return fmt.Errorf("systemd is not running")
|
||||
}
|
||||
|
||||
// create a private message bus
|
||||
if obj.Session {
|
||||
bus, err = util.SessionBusPrivateUsable()
|
||||
} else {
|
||||
bus, err = util.SystemBusPrivateUsable()
|
||||
}
|
||||
if err != nil {
|
||||
return errwrap.Wrapf(err, "failed to connect to bus")
|
||||
}
|
||||
defer bus.Close()
|
||||
|
||||
// dbus addmatch arguments for the timer unit
|
||||
args := []string{}
|
||||
args = append(args, "type='signal'")
|
||||
args = append(args, "interface='org.freedesktop.systemd1.Manager'")
|
||||
args = append(args, "eavesdrop='true'")
|
||||
args = append(args, fmt.Sprintf("arg2='%s.timer'", obj.Name()))
|
||||
|
||||
// match dbus messsages
|
||||
if call := bus.BusObject().Call(engineUtil.DBusAddMatch, 0, strings.Join(args, ",")); call.Err != nil {
|
||||
return err
|
||||
}
|
||||
defer bus.BusObject().Call(engineUtil.DBusRemoveMatch, 0, args) // ignore the error
|
||||
|
||||
// channels for dbus signal
|
||||
dbusChan := make(chan *dbus.Signal)
|
||||
defer close(dbusChan)
|
||||
bus.Signal(dbusChan)
|
||||
defer bus.RemoveSignal(dbusChan) // not needed here, but nice for symmetry
|
||||
|
||||
p, err := obj.UnitFilePath()
|
||||
if err != nil {
|
||||
return errwrap.Wrapf(err, "error generating unit file path")
|
||||
}
|
||||
// recwatcher for the systemd-timer unit file
|
||||
obj.recWatcher, err = recwatch.NewRecWatcher(p, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer obj.recWatcher.Close()
|
||||
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
var send = false // send event?
|
||||
for {
|
||||
select {
|
||||
case event := <-dbusChan:
|
||||
// process dbus events
|
||||
if obj.init.Debug {
|
||||
obj.init.Logf("%+v", event)
|
||||
}
|
||||
send = true
|
||||
|
||||
case event, ok := <-obj.recWatcher.Events():
|
||||
// process unit file recwatch events
|
||||
if !ok { // channel shutdown
|
||||
return nil
|
||||
}
|
||||
if err := event.Error; err != nil {
|
||||
return errwrap.Wrapf(err, "Unknown %s watcher error", obj)
|
||||
}
|
||||
if obj.init.Debug {
|
||||
obj.init.Logf("Event(%s): %v", event.Body.Name, event.Body.Op)
|
||||
}
|
||||
send = true
|
||||
|
||||
case <-obj.init.Done: // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CheckApply is run to check the state and, if apply is true, to apply the
|
||||
// necessary changes to reach the desired state. This is run before Watch and
|
||||
// again if Watch finds a change occurring to the state.
|
||||
func (obj *CronRes) CheckApply(apply bool) (checkOK bool, err error) {
|
||||
ok := true
|
||||
// use the embedded file resource to apply the correct state
|
||||
if c, err := obj.file.CheckApply(apply); err != nil {
|
||||
return false, errwrap.Wrapf(err, "nested file failed")
|
||||
} else if !c {
|
||||
ok = false
|
||||
}
|
||||
// check timer state and apply the defined state if needed
|
||||
if c, err := obj.unitCheckApply(apply); err != nil {
|
||||
return false, errwrap.Wrapf(err, "unitCheckApply error")
|
||||
} else if !c {
|
||||
ok = false
|
||||
}
|
||||
return ok, nil
|
||||
}
|
||||
|
||||
// unitCheckApply checks the state of the systemd-timer unit and, if apply is
|
||||
// true, applies the defined state.
|
||||
func (obj *CronRes) unitCheckApply(apply bool) (checkOK bool, err error) {
|
||||
var conn *sdbus.Conn
|
||||
var godbusConn *dbus.Conn
|
||||
|
||||
// this resource depends on systemd to ensure that it's running
|
||||
if !systemdUtil.IsRunningSystemd() {
|
||||
return false, fmt.Errorf("systemd is not running")
|
||||
}
|
||||
// go-systemd connection
|
||||
if obj.Session {
|
||||
conn, err = sdbus.NewUserConnection()
|
||||
} else {
|
||||
conn, err = sdbus.New() // system bus
|
||||
}
|
||||
if err != nil {
|
||||
return false, errwrap.Wrapf(err, "error making go-systemd dbus connection")
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// get the load state and active state of the timer unit
|
||||
loadState, err := conn.GetUnitProperty(fmt.Sprintf("%s.timer", obj.Name()), "LoadState")
|
||||
if err != nil {
|
||||
return false, errwrap.Wrapf(err, "failed to get load state")
|
||||
}
|
||||
activeState, err := conn.GetUnitProperty(fmt.Sprintf("%s.timer", obj.Name()), "ActiveState")
|
||||
if err != nil {
|
||||
return false, errwrap.Wrapf(err, "failed to get active state")
|
||||
}
|
||||
// check the timer unit state
|
||||
if obj.State == "absent" && loadState.Value == dbus.MakeVariant("not-found") {
|
||||
return true, nil
|
||||
}
|
||||
if obj.State == "exists" && activeState.Value == dbus.MakeVariant("active") {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if !apply {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// systemctl daemon-reload
|
||||
if err := conn.Reload(); err != nil {
|
||||
return false, errwrap.Wrapf(err, "error reloading daemon")
|
||||
}
|
||||
|
||||
// context for stopping/restarting the unit
|
||||
ctx, cancel := context.WithTimeout(context.Background(), ctxTimeout*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// godbus connection for stopping/restarting the unit
|
||||
if obj.Session {
|
||||
godbusConn, err = util.SessionBusPrivateUsable()
|
||||
} else {
|
||||
godbusConn, err = util.SystemBusPrivateUsable()
|
||||
}
|
||||
if err != nil {
|
||||
return false, errwrap.Wrapf(err, "error making godbus connection")
|
||||
}
|
||||
defer godbusConn.Close()
|
||||
|
||||
// stop or restart the unit
|
||||
if obj.State == "absent" {
|
||||
return false, engineUtil.StopUnit(ctx, godbusConn, fmt.Sprintf("%s.timer", obj.Name()))
|
||||
}
|
||||
return false, engineUtil.RestartUnit(ctx, godbusConn, fmt.Sprintf("%s.timer", obj.Name()))
|
||||
}
|
||||
|
||||
// Cmp compares two resources and returns an error if they are not equivalent.
|
||||
func (obj *CronRes) Cmp(r engine.Res) error {
|
||||
res, ok := r.(*CronRes)
|
||||
if !ok {
|
||||
return fmt.Errorf("res is not the same kind")
|
||||
}
|
||||
|
||||
if obj.State != res.State {
|
||||
return fmt.Errorf("state differs: %s vs %s", obj.State, res.State)
|
||||
}
|
||||
if obj.Trigger != res.Trigger {
|
||||
return fmt.Errorf("trigger differs: %s vs %s", obj.Trigger, res.Trigger)
|
||||
}
|
||||
if obj.Time != res.Time {
|
||||
return fmt.Errorf("time differs: %s vs %s", obj.Time, res.Time)
|
||||
}
|
||||
if obj.AccuracySec != res.AccuracySec {
|
||||
return fmt.Errorf("accuracysec differs: %s vs %s", obj.AccuracySec, res.AccuracySec)
|
||||
}
|
||||
if obj.RandomizedDelaySec != res.RandomizedDelaySec {
|
||||
return fmt.Errorf("randomizeddelaysec differs: %s vs %s", obj.RandomizedDelaySec, res.RandomizedDelaySec)
|
||||
}
|
||||
if obj.Unit != res.Unit {
|
||||
return fmt.Errorf("unit differs: %s vs %s", obj.Unit, res.Unit)
|
||||
}
|
||||
if obj.Persistent != res.Persistent {
|
||||
return fmt.Errorf("persistent differs: %t vs %t", obj.Persistent, res.Persistent)
|
||||
}
|
||||
if obj.WakeSystem != res.WakeSystem {
|
||||
return fmt.Errorf("wakesystem differs: %t vs %t", obj.WakeSystem, res.WakeSystem)
|
||||
}
|
||||
if obj.RemainAfterElapse != res.RemainAfterElapse {
|
||||
return fmt.Errorf("remainafterelapse differs: %t vs %t", obj.RemainAfterElapse, res.RemainAfterElapse)
|
||||
}
|
||||
return obj.file.Cmp(r)
|
||||
}
|
||||
|
||||
// CronUID is a unique resource identifier.
|
||||
type CronUID struct {
|
||||
// NOTE: There is also a name variable in the BaseUID struct, this is
|
||||
// information about where this UID came from, and is unrelated to the
|
||||
// information about the resource we're matching. That data which is
|
||||
// used in the IFF function, is what you see in the struct fields here.
|
||||
engine.BaseUID
|
||||
|
||||
unit string // name of target unit
|
||||
session bool // user session
|
||||
}
|
||||
|
||||
// IFF aka if and only if they are equivalent, return true. If not, false.
|
||||
func (obj *CronUID) IFF(uid engine.ResUID) bool {
|
||||
res, ok := uid.(*CronUID)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if obj.unit != res.unit {
|
||||
return false
|
||||
}
|
||||
if obj.session != res.session {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// AutoEdges returns the AutoEdge interface.
|
||||
func (obj *CronRes) AutoEdges() (engine.AutoEdge, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// UIDs includes all params to make a unique identification of this object.
|
||||
// Most resources only return one although some resources can return multiple.
|
||||
func (obj *CronRes) UIDs() []engine.ResUID {
|
||||
unit := fmt.Sprintf("%s.service", obj.Name())
|
||||
if obj.Unit != "" {
|
||||
unit = obj.Unit
|
||||
}
|
||||
uids := []engine.ResUID{
|
||||
&CronUID{
|
||||
BaseUID: engine.BaseUID{Name: obj.Name(), Kind: obj.Kind()},
|
||||
unit: unit, // name of target unit
|
||||
session: obj.Session, // user session
|
||||
},
|
||||
}
|
||||
if file, err := obj.makeComposite(); err == nil {
|
||||
uids = append(uids, file.UIDs()...) // add the file uid if we can
|
||||
}
|
||||
return uids
|
||||
}
|
||||
|
||||
// UnmarshalYAML is the custom unmarshal handler for this struct.
|
||||
// It is primarily useful for setting the defaults.
|
||||
func (obj *CronRes) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
type rawRes CronRes // indirection to avoid infinite recursion
|
||||
|
||||
def := obj.Default() // get the default
|
||||
res, ok := def.(*CronRes) // put in the right format
|
||||
if !ok {
|
||||
return fmt.Errorf("could not convert to CronRes")
|
||||
}
|
||||
raw := rawRes(*res) // convert; the defaults go here
|
||||
|
||||
if err := unmarshal(&raw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*obj = CronRes(raw) // restore from indirection with type conversion!
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnitFilePath returns the path to the systemd-timer unit file.
|
||||
func (obj *CronRes) UnitFilePath() (string, error) {
|
||||
// root timer
|
||||
if !obj.Session {
|
||||
return fmt.Sprintf("/etc/systemd/system/%s.timer", obj.Name()), nil
|
||||
}
|
||||
// user timer
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
return "", errwrap.Wrapf(err, "error getting current user")
|
||||
}
|
||||
if u.HomeDir == "" {
|
||||
return "", fmt.Errorf("user has no home directory")
|
||||
}
|
||||
return path.Join(u.HomeDir, "/.config/systemd/user/", fmt.Sprintf("%s.timer", obj.Name())), nil
|
||||
}
|
||||
|
||||
// unitFileContents returns the contents of the unit file representing the
|
||||
// CronRes struct.
|
||||
func (obj *CronRes) unitFileContents() string {
|
||||
u := []*unit.UnitOption{}
|
||||
|
||||
// [Unit]
|
||||
u = append(u, &unit.UnitOption{Section: "Unit", Name: "Description", Value: "timer generated by mgmt"})
|
||||
// [Timer]
|
||||
u = append(u, &unit.UnitOption{Section: "Timer", Name: obj.Trigger, Value: obj.Time})
|
||||
if obj.AccuracySec != "" {
|
||||
u = append(u, &unit.UnitOption{Section: "Timer", Name: "AccuracySec", Value: obj.AccuracySec})
|
||||
}
|
||||
if obj.RandomizedDelaySec != "" {
|
||||
u = append(u, &unit.UnitOption{Section: "Timer", Name: "RandomizedDelaySec", Value: obj.RandomizedDelaySec})
|
||||
}
|
||||
if obj.Unit != "" {
|
||||
u = append(u, &unit.UnitOption{Section: "Timer", Name: "Unit", Value: obj.Unit})
|
||||
}
|
||||
if obj.Persistent != false { // defaults to false
|
||||
u = append(u, &unit.UnitOption{Section: "Timer", Name: "Persistent", Value: "true"})
|
||||
}
|
||||
if obj.WakeSystem != false { // defaults to false
|
||||
u = append(u, &unit.UnitOption{Section: "Timer", Name: "WakeSystem", Value: "true"})
|
||||
}
|
||||
if obj.RemainAfterElapse != true { // defaults to true
|
||||
u = append(u, &unit.UnitOption{Section: "Timer", Name: "RemainAfterElapse", Value: "false"})
|
||||
}
|
||||
// [Install]
|
||||
u = append(u, &unit.UnitOption{Section: "Install", Name: "WantedBy", Value: "timers.target"})
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(unit.Serialize(u))
|
||||
return buf.String()
|
||||
}
|
||||
@@ -168,10 +168,7 @@ func (obj *DockerContainerRes) Watch() error {
|
||||
|
||||
eventChan, errChan := obj.client.Events(ctx, types.EventsOptions{})
|
||||
|
||||
// notify engine that we're running
|
||||
if err := obj.init.Running(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
var send = false // send event?
|
||||
for {
|
||||
@@ -184,27 +181,21 @@ func (obj *DockerContainerRes) Watch() error {
|
||||
obj.init.Logf("%+v", event)
|
||||
}
|
||||
send = true
|
||||
obj.init.Dirty() // dirty
|
||||
|
||||
case err, ok := <-errChan:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
case event, ok := <-obj.init.Events:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case <-obj.init.Done: // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
if err := obj.init.Event(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ package resources
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
@@ -47,11 +48,14 @@ type ExecRes struct {
|
||||
init *engine.Init
|
||||
|
||||
Cmd string `yaml:"cmd"` // the command to run
|
||||
Cwd string `yaml:"cwd"` // the dir to run the command in (empty means use `pwd` of command)
|
||||
Shell string `yaml:"shell"` // the (optional) shell to use to run the cmd
|
||||
Timeout int `yaml:"timeout"` // the cmd timeout in seconds
|
||||
WatchCmd string `yaml:"watchcmd"` // the watch command to run
|
||||
WatchCwd string `yaml:"watchcwd"` // the dir to run the watch command in (empty means use `pwd` of command)
|
||||
WatchShell string `yaml:"watchshell"` // the (optional) shell to use to run the watch cmd
|
||||
IfCmd string `yaml:"ifcmd"` // the if command to run
|
||||
IfCwd string `yaml:"ifcwd"` // the dir to run the if command in (empty means use `pwd` of command)
|
||||
IfShell string `yaml:"ifshell"` // the (optional) shell to use to run the if cmd
|
||||
User string `yaml:"user"` // the (optional) user to use to execute the command
|
||||
Group string `yaml:"group"` // the (optional) group to use to execute the command
|
||||
@@ -118,11 +122,11 @@ func (obj *ExecRes) Watch() error {
|
||||
//cmdName = path.Join(d, cmdName)
|
||||
cmdArgs = split[1:]
|
||||
} else {
|
||||
cmdName = obj.Shell // usually bash, or sh
|
||||
cmdName = obj.WatchShell // usually bash, or sh
|
||||
cmdArgs = []string{"-c", obj.WatchCmd}
|
||||
}
|
||||
cmd := exec.Command(cmdName, cmdArgs...)
|
||||
//cmd.Dir = "" // look for program in pwd ?
|
||||
cmd.Dir = obj.WatchCwd // run program in pwd if ""
|
||||
// ignore signals sent to parent process (we're in our own group)
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||
Setpgid: true,
|
||||
@@ -151,13 +155,12 @@ func (obj *ExecRes) Watch() error {
|
||||
return errwrap.Wrapf(err, "error starting Cmd")
|
||||
}
|
||||
|
||||
ioChan = obj.bufioChanScanner(scanner)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel() // unblock and cleanup
|
||||
ioChan = obj.bufioChanScanner(ctx, scanner)
|
||||
}
|
||||
|
||||
// notify engine that we're running
|
||||
if err := obj.init.Running(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
var send = false // send event?
|
||||
for {
|
||||
@@ -177,24 +180,16 @@ func (obj *ExecRes) Watch() error {
|
||||
obj.init.Logf("watch output: %s", data.text)
|
||||
if data.text != "" {
|
||||
send = true
|
||||
obj.init.Dirty() // dirty
|
||||
}
|
||||
|
||||
case event, ok := <-obj.init.Events:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-obj.init.Done: // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
if err := obj.init.Event(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -208,7 +203,6 @@ func (obj *ExecRes) CheckApply(apply bool) (bool, error) {
|
||||
// have a chance to execute, and all without the check of obj.Refresh()!
|
||||
|
||||
if obj.IfCmd != "" { // if there is no onlyif check, we should just run
|
||||
|
||||
var cmdName string
|
||||
var cmdArgs []string
|
||||
if obj.IfShell == "" {
|
||||
@@ -224,6 +218,7 @@ func (obj *ExecRes) CheckApply(apply bool) (bool, error) {
|
||||
cmdArgs = []string{"-c", obj.IfCmd}
|
||||
}
|
||||
cmd := exec.Command(cmdName, cmdArgs...)
|
||||
cmd.Dir = obj.IfCwd // run program in pwd if ""
|
||||
// ignore signals sent to parent process (we're in our own group)
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||
Setpgid: true,
|
||||
@@ -266,7 +261,7 @@ func (obj *ExecRes) CheckApply(apply bool) (bool, error) {
|
||||
cmdArgs = []string{"-c", obj.Cmd}
|
||||
}
|
||||
cmd := exec.Command(cmdName, cmdArgs...)
|
||||
//cmd.Dir = "" // look for program in pwd ?
|
||||
cmd.Dir = obj.Cwd // run program in pwd if ""
|
||||
// ignore signals sent to parent process (we're in our own group)
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||
Setpgid: true,
|
||||
@@ -373,6 +368,9 @@ func (obj *ExecRes) Compare(r engine.Res) bool {
|
||||
if obj.Cmd != res.Cmd {
|
||||
return false
|
||||
}
|
||||
if obj.Cwd != res.Cwd {
|
||||
return false
|
||||
}
|
||||
if obj.Shell != res.Shell {
|
||||
return false
|
||||
}
|
||||
@@ -382,12 +380,18 @@ func (obj *ExecRes) Compare(r engine.Res) bool {
|
||||
if obj.WatchCmd != res.WatchCmd {
|
||||
return false
|
||||
}
|
||||
if obj.WatchCwd != res.WatchCwd {
|
||||
return false
|
||||
}
|
||||
if obj.WatchShell != res.WatchShell {
|
||||
return false
|
||||
}
|
||||
if obj.IfCmd != res.IfCmd {
|
||||
return false
|
||||
}
|
||||
if obj.IfCwd != res.IfCwd {
|
||||
return false
|
||||
}
|
||||
if obj.IfShell != res.IfShell {
|
||||
return false
|
||||
}
|
||||
@@ -535,18 +539,26 @@ type bufioOutput struct {
|
||||
}
|
||||
|
||||
// bufioChanScanner wraps the scanner output in a channel.
|
||||
func (obj *ExecRes) bufioChanScanner(scanner *bufio.Scanner) chan *bufioOutput {
|
||||
func (obj *ExecRes) bufioChanScanner(ctx context.Context, scanner *bufio.Scanner) chan *bufioOutput {
|
||||
ch := make(chan *bufioOutput)
|
||||
obj.wg.Add(1)
|
||||
go func() {
|
||||
defer obj.wg.Done()
|
||||
defer close(ch)
|
||||
for scanner.Scan() {
|
||||
ch <- &bufioOutput{text: scanner.Text()} // blocks here ?
|
||||
select {
|
||||
case ch <- &bufioOutput{text: scanner.Text()}: // blocks here ?
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
// on EOF, scanner.Err() will be nil
|
||||
if err := scanner.Err(); err != nil {
|
||||
ch <- &bufioOutput{err: err} // send any misc errors we encounter
|
||||
select {
|
||||
case ch <- &bufioOutput{err: err}: // send any misc errors we encounter
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return ch
|
||||
|
||||
@@ -31,9 +31,6 @@ func fakeInit(t *testing.T) *engine.Init {
|
||||
t.Logf("test: "+format, v...)
|
||||
}
|
||||
return &engine.Init{
|
||||
Running: func() error {
|
||||
return nil
|
||||
},
|
||||
Debug: debug,
|
||||
Logf: logf,
|
||||
}
|
||||
|
||||
@@ -54,7 +54,10 @@ type FileRes struct {
|
||||
|
||||
init *engine.Init
|
||||
|
||||
Path string `yaml:"path"` // path variable (usually defaults to name)
|
||||
// Path variable, which usually defaults to the name, represents the
|
||||
// destination path for the file or directory being managed. It must be
|
||||
// an absolute path, and as a result must start with a slash.
|
||||
Path string `yaml:"path"`
|
||||
Dirname string `yaml:"dirname"` // override the path dirname
|
||||
Basename string `yaml:"basename"` // override the path basename
|
||||
Content *string `yaml:"content"` // nil to mark as undefined
|
||||
@@ -93,6 +96,10 @@ func (obj *FileRes) Validate() error {
|
||||
return fmt.Errorf("basename must not start with a slash")
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(obj.GetPath(), "/") {
|
||||
return fmt.Errorf("resultant path must be absolute")
|
||||
}
|
||||
|
||||
if obj.Content != nil && obj.Source != "" {
|
||||
return fmt.Errorf("can't specify both Content and Source")
|
||||
}
|
||||
@@ -187,10 +194,7 @@ func (obj *FileRes) Watch() error {
|
||||
}
|
||||
defer obj.recWatcher.Close()
|
||||
|
||||
// notify engine that we're running
|
||||
if err := obj.init.Running(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
var send = false // send event?
|
||||
for {
|
||||
@@ -210,23 +214,15 @@ func (obj *FileRes) Watch() error {
|
||||
obj.init.Logf("Event(%s): %v", event.Body.Name, event.Body.Op)
|
||||
}
|
||||
send = true
|
||||
obj.init.Dirty() // dirty
|
||||
|
||||
case event, ok := <-obj.init.Events:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-obj.init.Done: // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
if err := obj.init.Event(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -241,7 +237,7 @@ func (obj *FileRes) fileCheckApply(apply bool, src io.ReadSeeker, dst string, sh
|
||||
// TODO: does it make sense to switch dst to an io.Writer ?
|
||||
// TODO: use obj.Force when dealing with symlinks and other file types!
|
||||
if obj.init.Debug {
|
||||
obj.init.Logf("fileCheckApply: %s -> %s", src, dst)
|
||||
obj.init.Logf("fileCheckApply: %v -> %s", src, dst)
|
||||
}
|
||||
|
||||
srcFile, isFile := src.(*os.File)
|
||||
@@ -338,7 +334,7 @@ func (obj *FileRes) fileCheckApply(apply bool, src io.ReadSeeker, dst string, sh
|
||||
return sha256sum, false, nil
|
||||
}
|
||||
if obj.init.Debug {
|
||||
obj.init.Logf("fileCheckApply: Apply: %s -> %s", src, dst)
|
||||
obj.init.Logf("fileCheckApply: Apply: %v -> %s", src, dst)
|
||||
}
|
||||
|
||||
dstClose() // unlock file usage so we can write to it
|
||||
@@ -359,7 +355,7 @@ func (obj *FileRes) fileCheckApply(apply bool, src io.ReadSeeker, dst string, sh
|
||||
|
||||
// TODO: should we offer a way to cancel the copy on ^C ?
|
||||
if obj.init.Debug {
|
||||
obj.init.Logf("fileCheckApply: Copy: %s -> %s", src, dst)
|
||||
obj.init.Logf("fileCheckApply: Copy: %v -> %s", src, dst)
|
||||
}
|
||||
if n, err := io.Copy(dstFile, src); err != nil {
|
||||
return sha256sum, false, err
|
||||
@@ -608,7 +604,7 @@ func (obj *FileRes) contentCheckApply(apply bool) (checkOK bool, _ error) {
|
||||
}
|
||||
|
||||
// content is not defined, leave it alone...
|
||||
if obj.Content == nil {
|
||||
if obj.Content == nil && obj.Source == "" {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -146,3 +146,19 @@ func TestMiscEncodeDecode2(t *testing.T) {
|
||||
t.Errorf("The input and output Res values do not match: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileAbsolute1(t *testing.T) {
|
||||
// file resource paths should be absolute
|
||||
f1 := &FileRes{
|
||||
Path: "tmp/a/b", // some relative file
|
||||
}
|
||||
f2 := &FileRes{
|
||||
Path: "tmp/a/b/", // some relative dir
|
||||
}
|
||||
f3 := &FileRes{
|
||||
Path: "tmp", // some short relative file
|
||||
}
|
||||
if f1.Validate() == nil || f2.Validate() == nil || f3.Validate() == nil {
|
||||
t.Errorf("file res should have failed validate")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,6 +41,7 @@ const groupFile = "/etc/group"
|
||||
// GroupRes is a user group resource.
|
||||
type GroupRes struct {
|
||||
traits.Base // add the base methods without re-implementation
|
||||
traits.Edgeable
|
||||
|
||||
init *engine.Init
|
||||
|
||||
@@ -84,10 +85,7 @@ func (obj *GroupRes) Watch() error {
|
||||
}
|
||||
defer obj.recWatcher.Close()
|
||||
|
||||
// notify engine that we're running
|
||||
if err := obj.init.Running(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
var send = false // send event?
|
||||
for {
|
||||
@@ -107,23 +105,15 @@ func (obj *GroupRes) Watch() error {
|
||||
obj.init.Logf("Event(%s): %v", event.Body.Name, event.Body.Op)
|
||||
}
|
||||
send = true
|
||||
obj.init.Dirty() // dirty
|
||||
|
||||
case event, ok := <-obj.init.Events:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-obj.init.Done: // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
if err := obj.init.Event(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -266,6 +256,11 @@ type GroupUID struct {
|
||||
gid *uint32
|
||||
}
|
||||
|
||||
// AutoEdges returns the AutoEdge interface.
|
||||
func (obj *GroupRes) AutoEdges() (engine.AutoEdge, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// IFF aka if and only if they are equivalent, return true. If not, false.
|
||||
func (obj *GroupUID) IFF(uid engine.ResUID) bool {
|
||||
res, ok := uid.(*GroupUID)
|
||||
|
||||
@@ -127,33 +127,22 @@ func (obj *HostnameRes) Watch() error {
|
||||
signals := make(chan *dbus.Signal, 10) // closed by dbus package
|
||||
bus.Signal(signals)
|
||||
|
||||
// notify engine that we're running
|
||||
if err := obj.init.Running(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
var send = false // send event?
|
||||
for {
|
||||
select {
|
||||
case <-signals:
|
||||
send = true
|
||||
obj.init.Dirty() // dirty
|
||||
|
||||
case event, ok := <-obj.init.Events:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-obj.init.Done: // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
if err := obj.init.Event(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,11 +102,7 @@ func (obj *KVRes) Close() error {
|
||||
|
||||
// Watch is the primary listener for this resource and it outputs events.
|
||||
func (obj *KVRes) Watch() error {
|
||||
|
||||
// notify engine that we're running
|
||||
if err := obj.init.Running(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
ch := obj.init.World.StrMapWatch(obj.Key) // get possible events!
|
||||
|
||||
@@ -125,23 +121,15 @@ func (obj *KVRes) Watch() error {
|
||||
obj.init.Logf("Event!")
|
||||
}
|
||||
send = true
|
||||
obj.init.Dirty() // dirty
|
||||
|
||||
case event, ok := <-obj.init.Events:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-obj.init.Done: // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
if err := obj.init.Event(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -224,10 +224,7 @@ func (obj *MountRes) Watch() error {
|
||||
// close the recwatcher when we're done
|
||||
defer recWatcher.Close()
|
||||
|
||||
// notify engine that we're running
|
||||
if err := obj.init.Running(); err != nil {
|
||||
return err // bubble up a NACK...
|
||||
}
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
var send bool
|
||||
var done bool
|
||||
@@ -248,7 +245,6 @@ func (obj *MountRes) Watch() error {
|
||||
obj.init.Logf("event(%s): %v", event.Body.Name, event.Body.Op)
|
||||
}
|
||||
|
||||
obj.init.Dirty()
|
||||
send = true
|
||||
|
||||
case event, ok := <-ch:
|
||||
@@ -263,24 +259,16 @@ func (obj *MountRes) Watch() error {
|
||||
obj.init.Logf("event: %+v", event)
|
||||
}
|
||||
|
||||
obj.init.Dirty()
|
||||
send = true
|
||||
|
||||
case event, ok := <-obj.init.Events:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-obj.init.Done: // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
if err := obj.init.Event(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
76
engine/resources/mount_linux_test.go
Normal file
76
engine/resources/mount_linux_test.go
Normal file
@@ -0,0 +1,76 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2018+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build !root !darwin
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
fstab "github.com/deniswernert/go-fstab"
|
||||
)
|
||||
|
||||
func TestMountExists(t *testing.T) {
|
||||
const procMock1 = `/tmp/mount0 /mnt/proctest ext4 rw,seclabel,relatime,data=ordered 0 0` + "\n"
|
||||
|
||||
var mountExistsTests = []struct {
|
||||
procMock []byte
|
||||
in *fstab.Mount
|
||||
out bool
|
||||
}{
|
||||
{
|
||||
[]byte(procMock1),
|
||||
&fstab.Mount{
|
||||
Spec: "/tmp/mount0",
|
||||
File: "/mnt/proctest",
|
||||
VfsType: "ext4",
|
||||
MntOps: map[string]string{"defaults": ""},
|
||||
Freq: 1,
|
||||
PassNo: 1,
|
||||
},
|
||||
true,
|
||||
},
|
||||
}
|
||||
|
||||
file, err := ioutil.TempFile("", "proc")
|
||||
if err != nil {
|
||||
t.Errorf("error creating temp file: %v", err)
|
||||
return
|
||||
}
|
||||
defer os.Remove(file.Name())
|
||||
for _, test := range mountExistsTests {
|
||||
if err := ioutil.WriteFile(file.Name(), test.procMock, 0664); err != nil {
|
||||
t.Errorf("error writing proc file: %s: %v", file.Name(), err)
|
||||
return
|
||||
}
|
||||
if err := ioutil.WriteFile(test.in.Spec, []byte{}, 0664); err != nil {
|
||||
t.Errorf("error writing fstab file: %s: %v", file.Name(), err)
|
||||
return
|
||||
}
|
||||
result, err := mountExists(file.Name(), test.in)
|
||||
if err != nil {
|
||||
t.Errorf("error checking if fstab entry %s exists: %v", test.in.String(), err)
|
||||
return
|
||||
}
|
||||
if result != test.out {
|
||||
t.Errorf("mountExistsTests test wanted: %t, got: %t", test.out, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -29,8 +29,6 @@ import (
|
||||
|
||||
const fstabMock1 = `UUID=ef5726f2-615c-4350-b0ab-f106e5fc90ad / ext4 defaults 1 1` + "\n"
|
||||
|
||||
const procMock1 = `/tmp/mount0 /mnt/proctest ext4 rw,seclabel,relatime,data=ordered 0 0` + "\n"
|
||||
|
||||
var fstabWriteTests = []struct {
|
||||
in fstab.Mounts
|
||||
}{
|
||||
@@ -295,49 +293,3 @@ func TestMountCompare(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var mountExistsTests = []struct {
|
||||
procMock []byte
|
||||
in *fstab.Mount
|
||||
out bool
|
||||
}{
|
||||
{
|
||||
[]byte(procMock1),
|
||||
&fstab.Mount{
|
||||
Spec: "/tmp/mount0",
|
||||
File: "/mnt/proctest",
|
||||
VfsType: "ext4",
|
||||
MntOps: map[string]string{"defaults": ""},
|
||||
Freq: 1,
|
||||
PassNo: 1,
|
||||
},
|
||||
true,
|
||||
},
|
||||
}
|
||||
|
||||
func TestMountExists(t *testing.T) {
|
||||
file, err := ioutil.TempFile("", "proc")
|
||||
if err != nil {
|
||||
t.Errorf("error creating temp file: %v", err)
|
||||
return
|
||||
}
|
||||
defer os.Remove(file.Name())
|
||||
for _, test := range mountExistsTests {
|
||||
if err := ioutil.WriteFile(file.Name(), test.procMock, 0664); err != nil {
|
||||
t.Errorf("error writing proc file: %s: %v", file.Name(), err)
|
||||
return
|
||||
}
|
||||
if err := ioutil.WriteFile(test.in.Spec, []byte{}, 0664); err != nil {
|
||||
t.Errorf("error writing fstab file: %s: %v", file.Name(), err)
|
||||
return
|
||||
}
|
||||
result, err := mountExists(file.Name(), test.in)
|
||||
if err != nil {
|
||||
t.Errorf("error checking if fstab entry %s exists: %v", test.in.String(), err)
|
||||
return
|
||||
}
|
||||
if result != test.out {
|
||||
t.Errorf("mountExistsTests test wanted: %t, got: %t", test.out, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -94,30 +94,20 @@ func (obj *MsgRes) Close() error {
|
||||
|
||||
// Watch is the primary listener for this resource and it outputs events.
|
||||
func (obj *MsgRes) Watch() error {
|
||||
// notify engine that we're running
|
||||
if err := obj.init.Running(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
var send = false // send event?
|
||||
//var send = false // send event?
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-obj.init.Events:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-obj.init.Done: // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
if err := obj.init.Event(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
}
|
||||
//if send {
|
||||
// send = false
|
||||
// obj.init.Event() // notify engine of an event (this can block)
|
||||
//}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -137,7 +127,7 @@ func (obj *MsgRes) isAllStateOK() bool {
|
||||
func (obj *MsgRes) updateStateOK() {
|
||||
// XXX: this resource doesn't entirely make sense to me at the moment.
|
||||
if !obj.isAllStateOK() {
|
||||
obj.init.Dirty()
|
||||
//obj.init.Dirty() // XXX: removed with API cleanup
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -34,6 +34,7 @@ import (
|
||||
"github.com/purpleidea/mgmt/engine/traits"
|
||||
"github.com/purpleidea/mgmt/recwatch"
|
||||
"github.com/purpleidea/mgmt/util"
|
||||
"github.com/purpleidea/mgmt/util/socketset"
|
||||
|
||||
multierr "github.com/hashicorp/go-multierror"
|
||||
errwrap "github.com/pkg/errors"
|
||||
@@ -119,9 +120,6 @@ func (obj *NetRes) Validate() error {
|
||||
}
|
||||
|
||||
// validate network address input
|
||||
if (obj.Addrs == nil) != (obj.Gateway == "") {
|
||||
return fmt.Errorf("addrs and gateway must both be set or both be empty")
|
||||
}
|
||||
if obj.Addrs != nil {
|
||||
for _, addr := range obj.Addrs {
|
||||
if _, _, err := net.ParseCIDR(addr); err != nil {
|
||||
@@ -193,16 +191,20 @@ func (obj *NetRes) Close() error {
|
||||
// TODO: currently gets events from ALL interfaces, would be nice to reject
|
||||
// events from other interfaces.
|
||||
func (obj *NetRes) Watch() error {
|
||||
// waitgroup for netlink receive goroutine
|
||||
wg := &sync.WaitGroup{}
|
||||
defer wg.Wait()
|
||||
|
||||
// create a netlink socket for receiving network interface events
|
||||
conn, err := newSocketSet(rtmGrps, obj.socketFile)
|
||||
conn, err := socketset.NewSocketSet(rtmGrps, obj.socketFile, unix.NETLINK_ROUTE)
|
||||
if err != nil {
|
||||
return errwrap.Wrapf(err, "error creating socket set")
|
||||
}
|
||||
defer conn.shutdown() // close the netlink socket and unblock conn.receive()
|
||||
|
||||
// waitgroup for netlink receive goroutine
|
||||
wg := &sync.WaitGroup{}
|
||||
defer conn.Close()
|
||||
// We must wait for the Shutdown() AND the select inside of SocketSet to
|
||||
// complete before we Close, since the unblocking in SocketSet is not a
|
||||
// synchronous operation.
|
||||
defer wg.Wait()
|
||||
defer conn.Shutdown() // close the netlink socket and unblock conn.receive()
|
||||
|
||||
// watch the systemd-networkd configuration file
|
||||
recWatcher, err := recwatch.NewRecWatcher(obj.unitFilePath, false)
|
||||
@@ -222,11 +224,10 @@ func (obj *NetRes) Watch() error {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer conn.close() // close the pipe when we're done with it
|
||||
defer close(nlChan)
|
||||
for {
|
||||
// receive messages from the socket set
|
||||
msgs, err := conn.receive()
|
||||
msgs, err := conn.ReceiveNetlinkMessages()
|
||||
if err != nil {
|
||||
select {
|
||||
case nlChan <- &nlChanStruct{
|
||||
@@ -246,10 +247,7 @@ func (obj *NetRes) Watch() error {
|
||||
}
|
||||
}()
|
||||
|
||||
// notify engine that we're running
|
||||
if err := obj.init.Running(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
var send = false // send event?
|
||||
var done bool
|
||||
@@ -271,7 +269,6 @@ func (obj *NetRes) Watch() error {
|
||||
}
|
||||
|
||||
send = true
|
||||
obj.init.Dirty() // dirty
|
||||
|
||||
case event, ok := <-recWatcher.Events():
|
||||
if !ok {
|
||||
@@ -289,23 +286,15 @@ func (obj *NetRes) Watch() error {
|
||||
}
|
||||
|
||||
send = true
|
||||
obj.init.Dirty() // dirty
|
||||
|
||||
case event, ok := <-obj.init.Events:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-obj.init.Done: // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
if err := obj.init.Event(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -771,118 +760,3 @@ func (obj *iface) addrApplyAdd(objAddrs []string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// socketSet is used to receive events from a socket and shut it down cleanly
|
||||
// when asked. It contains a socket for events and a pipe socket to unblock
|
||||
// receive on shutdown.
|
||||
type socketSet struct {
|
||||
fdEvents int
|
||||
fdPipe int
|
||||
pipeFile string
|
||||
}
|
||||
|
||||
// newSocketSet returns a socketSet, initialized with the given parameters.
|
||||
func newSocketSet(groups uint32, file string) (*socketSet, error) {
|
||||
// make a netlink socket file descriptor
|
||||
fdEvents, err := unix.Socket(unix.AF_NETLINK, unix.SOCK_RAW, unix.NETLINK_ROUTE)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf(err, "error creating netlink socket")
|
||||
}
|
||||
// bind to the socket and add add the netlink groups we need to get events
|
||||
if err := unix.Bind(fdEvents, &unix.SockaddrNetlink{
|
||||
Family: unix.AF_NETLINK,
|
||||
Groups: groups,
|
||||
}); err != nil {
|
||||
return nil, errwrap.Wrapf(err, "error binding netlink socket")
|
||||
}
|
||||
|
||||
// create a pipe socket to unblock unix.Select when we close
|
||||
fdPipe, err := unix.Socket(unix.AF_UNIX, unix.SOCK_RAW, unix.PROT_NONE)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf(err, "error creating pipe socket")
|
||||
}
|
||||
// bind the pipe to a file
|
||||
if err = unix.Bind(fdPipe, &unix.SockaddrUnix{
|
||||
Name: file,
|
||||
}); err != nil {
|
||||
return nil, errwrap.Wrapf(err, "error binding pipe socket")
|
||||
}
|
||||
return &socketSet{
|
||||
fdEvents: fdEvents,
|
||||
fdPipe: fdPipe,
|
||||
pipeFile: file,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// shutdown closes the event file descriptor and unblocks receive by sending
|
||||
// a message to the pipe file descriptor. It must be called before close, and
|
||||
// should only be called once.
|
||||
func (obj *socketSet) shutdown() error {
|
||||
// close the event socket so no more events are produced
|
||||
if err := unix.Close(obj.fdEvents); err != nil {
|
||||
return err
|
||||
}
|
||||
// send a message to the pipe to unblock select
|
||||
return unix.Sendto(obj.fdPipe, nil, 0, &unix.SockaddrUnix{
|
||||
Name: path.Join(obj.pipeFile),
|
||||
})
|
||||
}
|
||||
|
||||
// close closes the pipe file descriptor. It must only be called after
|
||||
// shutdown has closed fdEvents, and unblocked receive. It should only be
|
||||
// called once.
|
||||
func (obj *socketSet) close() error {
|
||||
return unix.Close(obj.fdPipe)
|
||||
}
|
||||
|
||||
// receive waits for bytes from fdEvents and parses them into a slice of
|
||||
// netlink messages. It will block until an event is produced, or shutdown
|
||||
// is called.
|
||||
func (obj *socketSet) receive() ([]syscall.NetlinkMessage, error) {
|
||||
// Select will return when any fd in fdSet (fdEvents and fdPipe) is ready
|
||||
// to read.
|
||||
_, err := unix.Select(obj.nfd(), obj.fdSet(), nil, nil, nil)
|
||||
if err != nil {
|
||||
// if a system interrupt is caught
|
||||
if err == unix.EINTR { // signal interrupt
|
||||
return nil, nil
|
||||
}
|
||||
return nil, errwrap.Wrapf(err, "error selecting on fd")
|
||||
}
|
||||
// receive the message from the netlink socket into b
|
||||
b := make([]byte, os.Getpagesize())
|
||||
n, _, err := unix.Recvfrom(obj.fdEvents, b, unix.MSG_DONTWAIT) // non-blocking receive
|
||||
if err != nil {
|
||||
// if fdEvents is closed
|
||||
if err == unix.EBADF { // bad file descriptor
|
||||
return nil, nil
|
||||
}
|
||||
return nil, errwrap.Wrapf(err, "error receiving messages")
|
||||
}
|
||||
// if we didn't get enough bytes for a header, something went wrong
|
||||
if n < unix.NLMSG_HDRLEN {
|
||||
return nil, fmt.Errorf("received short header")
|
||||
}
|
||||
b = b[:n] // truncate b to message length
|
||||
// use syscall to parse, as func does not exist in x/sys/unix
|
||||
return syscall.ParseNetlinkMessage(b)
|
||||
}
|
||||
|
||||
// nfd returns one more than the highest fd value in the struct, for use as as
|
||||
// the nfds parameter in select. It represents the file descriptor set maximum
|
||||
// size. See man select for more info.
|
||||
func (obj *socketSet) nfd() int {
|
||||
if obj.fdEvents > obj.fdPipe {
|
||||
return obj.fdEvents + 1
|
||||
}
|
||||
return obj.fdPipe + 1
|
||||
}
|
||||
|
||||
// fdSet returns a bitmask representation of the integer values of fdEvents
|
||||
// and fdPipe. See man select for more info.
|
||||
func (obj *socketSet) fdSet() *unix.FdSet {
|
||||
fdSet := &unix.FdSet{}
|
||||
fdSet.Bits[obj.fdEvents/64] |= 1 << uint(obj.fdEvents)
|
||||
fdSet.Bits[obj.fdPipe/64] |= 1 << uint(obj.fdPipe) // fd = 3 becomes 100 if we add 5, we get 10100
|
||||
return fdSet
|
||||
}
|
||||
|
||||
84
engine/resources/net_test.go
Normal file
84
engine/resources/net_test.go
Normal file
@@ -0,0 +1,84 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2018+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build !darwin
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// test cases for NetRes.unitFileContents()
|
||||
var unitFileContentsTests = []struct {
|
||||
dev string
|
||||
in *NetRes
|
||||
out []byte
|
||||
}{
|
||||
{
|
||||
"eth0",
|
||||
&NetRes{
|
||||
State: "up",
|
||||
Addrs: []string{"192.168.42.13/24"},
|
||||
Gateway: "192.168.42.1",
|
||||
},
|
||||
[]byte(
|
||||
strings.Join(
|
||||
[]string{
|
||||
"[Match]",
|
||||
"Name=eth0",
|
||||
"[Network]",
|
||||
"Address=192.168.42.13/24",
|
||||
"Gateway=192.168.42.1",
|
||||
},
|
||||
"\n"),
|
||||
),
|
||||
},
|
||||
{
|
||||
"wlp5s0",
|
||||
&NetRes{
|
||||
State: "up",
|
||||
Addrs: []string{"10.0.2.13/24", "10.0.2.42/24"},
|
||||
Gateway: "10.0.2.1",
|
||||
},
|
||||
[]byte(
|
||||
strings.Join(
|
||||
[]string{
|
||||
"[Match]",
|
||||
"Name=wlp5s0",
|
||||
"[Network]",
|
||||
"Address=10.0.2.13/24",
|
||||
"Address=10.0.2.42/24",
|
||||
"Gateway=10.0.2.1",
|
||||
},
|
||||
"\n"),
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
// test NetRes.unitFileContents()
|
||||
func TestUnitFileContents(t *testing.T) {
|
||||
for _, test := range unitFileContentsTests {
|
||||
test.in.SetName(test.dev)
|
||||
result := test.in.unitFileContents()
|
||||
if !bytes.Equal(test.out, result) {
|
||||
t.Errorf("nfd test wanted:\n %s, got:\n %s", test.out, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -63,31 +63,15 @@ func (obj *NoopRes) Close() error {
|
||||
|
||||
// Watch is the primary listener for this resource and it outputs events.
|
||||
func (obj *NoopRes) Watch() error {
|
||||
// notify engine that we're running
|
||||
if err := obj.init.Running(); err != nil {
|
||||
return err // exit if requested
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
select {
|
||||
case <-obj.init.Done: // closed by the engine to signal shutdown
|
||||
}
|
||||
|
||||
var send = false // send event?
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-obj.init.Events:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
//obj.init.Event() // notify engine of an event (this can block)
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
if err := obj.init.Event(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckApply method for Noop resource. Does nothing, returns happy!
|
||||
|
||||
@@ -52,6 +52,7 @@ func init() {
|
||||
type NspawnRes struct {
|
||||
traits.Base // add the base methods without re-implementation
|
||||
//traits.Groupable // TODO: this would be quite useful for this resource
|
||||
traits.Refreshable // needed because we embed a svc res
|
||||
|
||||
init *engine.Init
|
||||
|
||||
@@ -166,10 +167,7 @@ func (obj *NspawnRes) Watch() error {
|
||||
bus.Signal(busChan)
|
||||
defer bus.RemoveSignal(busChan) // not needed here, but nice for symmetry
|
||||
|
||||
// notify engine that we're running
|
||||
if err := obj.init.Running(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
var send = false // send event?
|
||||
for {
|
||||
@@ -186,24 +184,16 @@ func (obj *NspawnRes) Watch() error {
|
||||
return fmt.Errorf("unknown event: %s", event.Name)
|
||||
}
|
||||
send = true
|
||||
obj.init.Dirty() // dirty
|
||||
}
|
||||
|
||||
case event, ok := <-obj.init.Events:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-obj.init.Done: // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
if err := obj.init.Event(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -214,7 +214,7 @@ func (obj *Conn) matchSignal(ch chan *dbus.Signal, path dbus.ObjectPath, iface s
|
||||
call = bus.Call(engineUtil.DBusAddMatch, 0, args)
|
||||
} else {
|
||||
for _, signal := range signals {
|
||||
args := fmt.Sprintf("type='signal', path='%s', interface='%s', member'%s'", pathStr, iface, signal)
|
||||
args := fmt.Sprintf("type='signal', path='%s', interface='%s', member='%s'", pathStr, iface, signal)
|
||||
argsList = append(argsList, args)
|
||||
if call = bus.Call(engineUtil.DBusAddMatch, 0, args); call.Err != nil {
|
||||
break // fail if any one fails
|
||||
|
||||
@@ -182,10 +182,7 @@ func (obj *PasswordRes) Watch() error {
|
||||
}
|
||||
defer obj.recWatcher.Close()
|
||||
|
||||
// notify engine that we're running
|
||||
if err := obj.init.Running(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
var send = false // send event?
|
||||
for {
|
||||
@@ -199,23 +196,15 @@ func (obj *PasswordRes) Watch() error {
|
||||
return errwrap.Wrapf(err, "unknown %s watcher error", obj)
|
||||
}
|
||||
send = true
|
||||
obj.init.Dirty() // dirty
|
||||
|
||||
case event, ok := <-obj.init.Events:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-obj.init.Done: // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
if err := obj.init.Event(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,6 +34,20 @@ func init() {
|
||||
engine.RegisterResource("pkg", func() engine.Res { return &PkgRes{} })
|
||||
}
|
||||
|
||||
const (
|
||||
// PkgStateInstalled is the string that represents that the package
|
||||
// should be installed.
|
||||
PkgStateInstalled = "installed"
|
||||
|
||||
// PkgStateUninstalled is the string that represents that the package
|
||||
// should be uninstalled.
|
||||
PkgStateUninstalled = "uninstalled"
|
||||
|
||||
// PkgStateNewest is the string that represents that the package should
|
||||
// be installed in the newest available version.
|
||||
PkgStateNewest = "newest"
|
||||
)
|
||||
|
||||
// PkgRes is a package resource for packagekit.
|
||||
type PkgRes struct {
|
||||
traits.Base // add the base methods without re-implementation
|
||||
@@ -53,7 +67,7 @@ type PkgRes struct {
|
||||
// Default returns some sensible defaults for this resource.
|
||||
func (obj *PkgRes) Default() engine.Res {
|
||||
return &PkgRes{
|
||||
State: "installed", // i think this is preferable to "latest"
|
||||
State: PkgStateInstalled, // this *is* preferable to "newest"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -62,6 +76,9 @@ func (obj *PkgRes) Validate() error {
|
||||
if obj.State == "" {
|
||||
return fmt.Errorf("state cannot be empty")
|
||||
}
|
||||
if obj.State == "latest" {
|
||||
return fmt.Errorf("state is invalid, did you mean `newest` ?")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -104,10 +121,7 @@ func (obj *PkgRes) Watch() error {
|
||||
return errwrap.Wrapf(err, "error adding signal match")
|
||||
}
|
||||
|
||||
// notify engine that we're running
|
||||
if err := obj.init.Running(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
var send = false // send event?
|
||||
for {
|
||||
@@ -129,20 +143,15 @@ func (obj *PkgRes) Watch() error {
|
||||
}
|
||||
|
||||
send = true
|
||||
obj.init.Dirty() // dirty
|
||||
|
||||
case event := <-obj.init.Events:
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-obj.init.Done: // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
if err := obj.init.Event(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -189,8 +198,8 @@ func (obj *PkgRes) pkgMappingHelper(bus *packagekit.Conn) (map[string]*packageki
|
||||
packageMap[obj.Name()] = obj.State // key is pkg name, value is pkg state
|
||||
var filter uint64 // initializes at the "zero" value of 0
|
||||
filter += packagekit.PkFilterEnumArch // always search in our arch (optional!)
|
||||
// we're requesting latest version, or to narrow down install choices!
|
||||
if obj.State == "newest" || obj.State == "installed" {
|
||||
// we're requesting newest version, or to narrow down install choices!
|
||||
if obj.State == PkgStateNewest || obj.State == PkgStateInstalled {
|
||||
// if we add this, we'll still see older packages if installed
|
||||
// this is an optimization, and is *optional*, this logic is
|
||||
// handled inside of PackagesToPackageIDs now automatically!
|
||||
@@ -204,7 +213,7 @@ func (obj *PkgRes) pkgMappingHelper(bus *packagekit.Conn) (map[string]*packageki
|
||||
}
|
||||
result, err := bus.PackagesToPackageIDs(packageMap, filter)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf(err, "Can't run PackagesToPackageIDs")
|
||||
return nil, errwrap.Wrapf(err, "can't run PackagesToPackageIDs")
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
@@ -235,6 +244,10 @@ func (obj *PkgRes) populateFileList() error {
|
||||
if !ok || !data.Found {
|
||||
return fmt.Errorf("can't find package named '%s'", obj.Name())
|
||||
}
|
||||
if data.PackageID == "" {
|
||||
// this can happen if you specify a bad version like "latest"
|
||||
return fmt.Errorf("empty PackageID found for '%s'", obj.Name())
|
||||
}
|
||||
|
||||
packageIDs := []string{data.PackageID} // just one for now
|
||||
filesMap, err := bus.GetFilesByPackageID(packageIDs)
|
||||
@@ -283,13 +296,13 @@ func (obj *PkgRes) CheckApply(apply bool) (checkOK bool, err error) {
|
||||
data, _ := result[obj.Name()] // if above didn't error, we won't either!
|
||||
validState := util.BoolMapTrue(util.BoolMapValues(states))
|
||||
|
||||
// obj.State == "installed" || "uninstalled" || "newest" || "4.2-1.fc23"
|
||||
// obj.State == PkgStateInstalled || PkgStateUninstalled || PkgStateNewest || "4.2-1.fc23"
|
||||
switch obj.State {
|
||||
case "installed":
|
||||
case PkgStateInstalled:
|
||||
fallthrough
|
||||
case "uninstalled":
|
||||
case PkgStateUninstalled:
|
||||
fallthrough
|
||||
case "newest":
|
||||
case PkgStateNewest:
|
||||
if validState {
|
||||
return true, nil // state is correct, exit!
|
||||
}
|
||||
@@ -321,15 +334,15 @@ func (obj *PkgRes) CheckApply(apply bool) (checkOK bool, err error) {
|
||||
// apply correct state!
|
||||
obj.init.Logf("Set(%s): %s...", obj.State, obj.fmtNames(util.StrListIntersection(applyPackages, obj.getNames())))
|
||||
switch obj.State {
|
||||
case "uninstalled": // run remove
|
||||
case PkgStateUninstalled: // run remove
|
||||
// NOTE: packageID is different than when installed, because now
|
||||
// it has the "installed" flag added to the data portion if it!!
|
||||
// it has the "installed" flag added to the data portion of it!!
|
||||
err = bus.RemovePackages(packageIDs, transactionFlags)
|
||||
|
||||
case "newest": // TODO: isn't this the same operation as install, below?
|
||||
case PkgStateNewest: // TODO: isn't this the same operation as install, below?
|
||||
err = bus.UpdatePackages(packageIDs, transactionFlags)
|
||||
|
||||
case "installed":
|
||||
case PkgStateInstalled:
|
||||
fallthrough // same method as for "set specific version", below
|
||||
default: // version string
|
||||
err = bus.InstallPackages(packageIDs, transactionFlags)
|
||||
@@ -343,38 +356,93 @@ func (obj *PkgRes) CheckApply(apply bool) (checkOK bool, err error) {
|
||||
|
||||
// Cmp compares two resources and returns an error if they are not equivalent.
|
||||
func (obj *PkgRes) Cmp(r engine.Res) error {
|
||||
if !obj.Compare(r) {
|
||||
return fmt.Errorf("did not compare")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compare two resources and return if they are equivalent.
|
||||
func (obj *PkgRes) Compare(r engine.Res) bool {
|
||||
// we can only compare PkgRes to others of the same resource kind
|
||||
res, ok := r.(*PkgRes)
|
||||
if !ok {
|
||||
return false
|
||||
return fmt.Errorf("res is not the same kind")
|
||||
}
|
||||
|
||||
// if obj.Name != res.Name {
|
||||
// return false
|
||||
// }
|
||||
|
||||
if obj.State != res.State {
|
||||
return false
|
||||
}
|
||||
if obj.AllowUntrusted != res.AllowUntrusted {
|
||||
return false
|
||||
}
|
||||
if obj.AllowNonFree != res.AllowNonFree {
|
||||
return false
|
||||
}
|
||||
if obj.AllowUnsupported != res.AllowUnsupported {
|
||||
return false
|
||||
return fmt.Errorf("state differs: %s vs %s", obj.State, res.State)
|
||||
}
|
||||
|
||||
return true
|
||||
return obj.Adapts(res)
|
||||
}
|
||||
|
||||
// Adapts compares two resources and returns an error if they are not able to be
|
||||
// equivalently output compatible.
|
||||
func (obj *PkgRes) Adapts(r engine.CompatibleRes) error {
|
||||
res, ok := r.(*PkgRes)
|
||||
if !ok {
|
||||
return fmt.Errorf("res is not the same kind")
|
||||
}
|
||||
|
||||
if obj.State != res.State {
|
||||
e := fmt.Errorf("state differs in an incompatible way: %s vs %s", obj.State, res.State)
|
||||
if obj.State == PkgStateUninstalled || res.State == PkgStateUninstalled {
|
||||
return e
|
||||
}
|
||||
if stateIsVersion(obj.State) || stateIsVersion(res.State) {
|
||||
return e
|
||||
}
|
||||
// one must be installed, and the other must be "newest"
|
||||
}
|
||||
|
||||
if obj.AllowUntrusted != res.AllowUntrusted {
|
||||
return fmt.Errorf("allowuntrusted differs: %t vs %t", obj.AllowUntrusted, res.AllowUntrusted)
|
||||
}
|
||||
if obj.AllowNonFree != res.AllowNonFree {
|
||||
return fmt.Errorf("allownonfree differs: %t vs %t", obj.AllowNonFree, res.AllowNonFree)
|
||||
}
|
||||
if obj.AllowUnsupported != res.AllowUnsupported {
|
||||
return fmt.Errorf("allowunsupported differs: %t vs %t", obj.AllowUnsupported, res.AllowUnsupported)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Merge returns the best equivalent of the two resources. They must satisfy the
|
||||
// Adapts test for this to work.
|
||||
func (obj *PkgRes) Merge(r engine.CompatibleRes) (engine.CompatibleRes, error) {
|
||||
res, ok := r.(*PkgRes)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("res is not the same kind")
|
||||
}
|
||||
|
||||
if err := obj.Adapts(r); err != nil {
|
||||
return nil, errwrap.Wrapf(err, "can't merge resources that aren't compatible")
|
||||
}
|
||||
|
||||
// modify the copy, not the original
|
||||
x, err := engine.ResCopy(obj) // don't call our .Copy() directly!
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result, ok := x.(*PkgRes)
|
||||
if !ok {
|
||||
// bug!
|
||||
return nil, fmt.Errorf("res is not the same kind")
|
||||
}
|
||||
|
||||
// if these two were compatible then if they're not identical, then one
|
||||
// must be PkgStateNewest and the other is PkgStateInstalled, so we
|
||||
// upgrade to the best common denominator
|
||||
if obj.State != res.State {
|
||||
result.State = PkgStateNewest
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Copy copies the resource. Don't call it directly, use engine.ResCopy instead.
|
||||
// TODO: should this copy internal state?
|
||||
func (obj *PkgRes) Copy() engine.CopyableRes {
|
||||
return &PkgRes{
|
||||
State: obj.State,
|
||||
AllowUntrusted: obj.AllowUntrusted,
|
||||
AllowNonFree: obj.AllowNonFree,
|
||||
AllowUnsupported: obj.AllowUnsupported,
|
||||
}
|
||||
}
|
||||
|
||||
// PkgUID is the main UID struct for PkgRes.
|
||||
@@ -552,9 +620,8 @@ func (obj *PkgRes) GroupCmp(r engine.GroupableRes) error {
|
||||
if !ok {
|
||||
return fmt.Errorf("resource is not the same kind")
|
||||
}
|
||||
objStateIsVersion := (obj.State != "installed" && obj.State != "uninstalled" && obj.State != "newest") // must be a ver. string
|
||||
resStateIsVersion := (res.State != "installed" && res.State != "uninstalled" && res.State != "newest") // must be a ver. string
|
||||
if objStateIsVersion || resStateIsVersion {
|
||||
// TODO: what should we do about the empty string?
|
||||
if stateIsVersion(obj.State) || stateIsVersion(res.State) {
|
||||
// can't merge specific version checks atm
|
||||
return fmt.Errorf("resource uses a version string")
|
||||
}
|
||||
@@ -603,3 +670,10 @@ func ReturnSvcInFileList(fileList []string) []string {
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// stateIsVersion is a simple test to see if the state string is an existing
|
||||
// well-known flag.
|
||||
// TODO: what should we do about the empty string?
|
||||
func stateIsVersion(state string) bool {
|
||||
return (state != PkgStateInstalled && state != PkgStateUninstalled && state != PkgStateNewest) // must be a ver. string
|
||||
}
|
||||
|
||||
@@ -66,31 +66,15 @@ func (obj *PrintRes) Close() error {
|
||||
|
||||
// Watch is the primary listener for this resource and it outputs events.
|
||||
func (obj *PrintRes) Watch() error {
|
||||
// notify engine that we're running
|
||||
if err := obj.init.Running(); err != nil {
|
||||
return err // exit if requested
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
select {
|
||||
case <-obj.init.Done: // closed by the engine to signal shutdown
|
||||
}
|
||||
|
||||
var send = false // send event?
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-obj.init.Events:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
//obj.init.Event() // notify engine of an event (this can block)
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
if err := obj.init.Event(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckApply method for Print resource. Does nothing, returns happy!
|
||||
|
||||
457
engine/resources/resources_test.go
Normal file
457
engine/resources/resources_test.go
Normal file
@@ -0,0 +1,457 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2018+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build !root
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/purpleidea/mgmt/engine"
|
||||
"github.com/purpleidea/mgmt/util"
|
||||
)
|
||||
|
||||
// TODO: consider providing this as a lib so that we can add tests into the
|
||||
// specific _test.go file of each resource.
|
||||
|
||||
// makeRes is a helper function to build a res. It should only be called in
|
||||
// tests, because it panics if something goes wrong.
|
||||
func makeRes(kind, name string) engine.Res {
|
||||
res, err := engine.NewNamedResource(kind, name)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create resource: %+v", err))
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Step is used for the timeline in tests.
|
||||
type Step interface {
|
||||
Action() error
|
||||
Expect() error
|
||||
}
|
||||
|
||||
type manualStep struct {
|
||||
action func() error
|
||||
expect func() error
|
||||
}
|
||||
|
||||
func (obj *manualStep) Action() error {
|
||||
return obj.action()
|
||||
}
|
||||
func (obj *manualStep) Expect() error {
|
||||
return obj.expect()
|
||||
}
|
||||
|
||||
// NewManualStep creates a new manual step with an action and an expect test.
|
||||
func NewManualStep(action, expect func() error) Step {
|
||||
return &manualStep{
|
||||
action: action,
|
||||
expect: expect,
|
||||
}
|
||||
}
|
||||
|
||||
type startupStep struct {
|
||||
ms uint
|
||||
ch chan struct{} // set by test harness
|
||||
}
|
||||
|
||||
func (obj *startupStep) Action() error {
|
||||
select {
|
||||
case <-obj.ch: // called by Running() in Watch
|
||||
case <-time.After(time.Duration(obj.ms) * time.Millisecond):
|
||||
return fmt.Errorf("took too long to startup")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (obj *startupStep) Expect() error { return nil }
|
||||
|
||||
// NewStartupStep waits up to this many ms for the Watch function to startup.
|
||||
func NewStartupStep(ms uint) Step {
|
||||
return &startupStep{
|
||||
ms: ms,
|
||||
}
|
||||
}
|
||||
|
||||
type changedStep struct {
|
||||
ms uint
|
||||
expect bool // what checkOK value we're expecting
|
||||
ch chan bool // set by test harness, filled with checkOK values
|
||||
}
|
||||
|
||||
func (obj *changedStep) Action() error {
|
||||
select {
|
||||
case checkOK, ok := <-obj.ch: // from CheckApply() in test Process loop
|
||||
if !ok {
|
||||
return fmt.Errorf("channel closed unexpectedly")
|
||||
}
|
||||
if checkOK != obj.expect {
|
||||
return fmt.Errorf("got unexpected checkOK value of: %t", checkOK)
|
||||
}
|
||||
case <-time.After(time.Duration(obj.ms) * time.Millisecond):
|
||||
return fmt.Errorf("took too long to startup")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (obj *changedStep) Expect() error { return nil }
|
||||
|
||||
// NewChangedStep waits up to this many ms for a CheckApply action to occur. Watch function to startup.
|
||||
func NewChangedStep(ms uint, expect bool) Step {
|
||||
return &changedStep{
|
||||
ms: ms,
|
||||
expect: expect,
|
||||
}
|
||||
}
|
||||
|
||||
type clearChangedStep struct {
|
||||
ms uint
|
||||
ch chan bool // set by test harness, filled with checkOK values
|
||||
}
|
||||
|
||||
func (obj *clearChangedStep) Action() error {
|
||||
// read all pending events...
|
||||
for {
|
||||
select {
|
||||
case _, ok := <-obj.ch: // from CheckApply() in test Process loop
|
||||
if !ok {
|
||||
return fmt.Errorf("channel closed unexpectedly")
|
||||
}
|
||||
case <-time.After(time.Duration(obj.ms) * time.Millisecond):
|
||||
return nil // done waiting
|
||||
}
|
||||
}
|
||||
}
|
||||
func (obj *clearChangedStep) Expect() error { return nil }
|
||||
|
||||
// NewClearChangedStep waits up to this many ms for additional CheckApply
|
||||
// actions to occur, and flushes them all so that a future NewChangedStep won't
|
||||
// see unwanted events.
|
||||
func NewClearChangedStep(ms uint) Step {
|
||||
return &clearChangedStep{
|
||||
ms: ms,
|
||||
}
|
||||
}
|
||||
|
||||
func TestResources1(t *testing.T) {
|
||||
type test struct { // an individual test
|
||||
name string
|
||||
res engine.Res // a resource
|
||||
fail bool
|
||||
experr error // expected error if fail == true (nil ignores it)
|
||||
experrstr string // expected error prefix
|
||||
timeline []Step // TODO: this could be a generator that keeps pushing out steps until it's done!
|
||||
expect func() error // function to check for expected state
|
||||
cleanup func() error // function to run as cleanup
|
||||
}
|
||||
|
||||
// helpers
|
||||
// TODO: make a series of helps to orchestrate the resources (eg: edit
|
||||
// file, wait for event w/ timeout, run command w/ timeout, etc...)
|
||||
sleep := func(ms uint) Step {
|
||||
return &manualStep{
|
||||
action: func() error {
|
||||
time.Sleep(time.Duration(ms) * time.Millisecond)
|
||||
return nil
|
||||
},
|
||||
expect: func() error { return nil },
|
||||
}
|
||||
}
|
||||
fileExpect := func(p, s string) Step { // path & string
|
||||
return &manualStep{
|
||||
action: func() error { return nil },
|
||||
expect: func() error {
|
||||
content, err := ioutil.ReadFile(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if string(content) != s {
|
||||
return fmt.Errorf("contents did not match in %s", p)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
fileWrite := func(p, s string) Step { // path & string
|
||||
return &manualStep{
|
||||
action: func() error {
|
||||
// TODO: apparently using 0666 is equivalent to respecting the current umask
|
||||
const umask = 0666
|
||||
return ioutil.WriteFile(p, []byte(s), umask)
|
||||
},
|
||||
expect: func() error { return nil },
|
||||
}
|
||||
}
|
||||
|
||||
testCases := []test{}
|
||||
{
|
||||
res := makeRes("file", "r1")
|
||||
r := res.(*FileRes) // if this panics, the test will panic
|
||||
p := "/tmp/whatever"
|
||||
s := "hello, world\n"
|
||||
r.Path = p
|
||||
contents := s
|
||||
r.Content = &contents
|
||||
|
||||
timeline := []Step{
|
||||
NewStartupStep(1000 * 60), // startup
|
||||
NewChangedStep(1000*60, false), // did we do something?
|
||||
fileExpect(p, s), // check initial state
|
||||
NewClearChangedStep(1000 * 15), // did we do something?
|
||||
fileWrite(p, "this is whatever\n"), // change state
|
||||
NewChangedStep(1000*60, false), // did we do something?
|
||||
fileExpect(p, s), // check again
|
||||
sleep(1), // we can sleep too!
|
||||
}
|
||||
|
||||
testCases = append(testCases, test{
|
||||
name: "simple res",
|
||||
res: res,
|
||||
fail: false,
|
||||
timeline: timeline,
|
||||
expect: func() error { return nil },
|
||||
cleanup: func() error { return os.Remove(p) },
|
||||
})
|
||||
}
|
||||
|
||||
names := []string{}
|
||||
for index, tc := range testCases { // run all the tests
|
||||
if tc.name == "" {
|
||||
t.Errorf("test #%d: not named", index)
|
||||
continue
|
||||
}
|
||||
if util.StrInList(tc.name, names) {
|
||||
t.Errorf("test #%d: duplicate sub test name of: %s", index, tc.name)
|
||||
continue
|
||||
}
|
||||
names = append(names, tc.name)
|
||||
t.Run(fmt.Sprintf("test #%d (%s)", index, tc.name), func(t *testing.T) {
|
||||
res, fail, experr, experrstr, timeline, expect, cleanup := tc.res, tc.fail, tc.experr, tc.experrstr, tc.timeline, tc.expect, tc.cleanup
|
||||
|
||||
t.Logf("\n\ntest #%d: Res: %+v\n", index, res)
|
||||
defer t.Logf("test #%d: done!", index)
|
||||
|
||||
// run validate!
|
||||
err := res.Validate()
|
||||
|
||||
if !fail && err != nil {
|
||||
t.Errorf("test #%d: FAIL", index)
|
||||
t.Errorf("test #%d: could not validate Res: %+v", index, err)
|
||||
return
|
||||
}
|
||||
if fail && err == nil {
|
||||
t.Errorf("test #%d: FAIL", index)
|
||||
t.Errorf("test #%d: validate passed, expected fail", index)
|
||||
return
|
||||
}
|
||||
if fail && experr != nil && err != experr { // test for specific error!
|
||||
t.Errorf("test #%d: FAIL", index)
|
||||
t.Errorf("test #%d: expected validate fail, got wrong error", index)
|
||||
t.Errorf("test #%d: got error: %+v", index, err)
|
||||
t.Errorf("test #%d: exp error: %+v", index, experr)
|
||||
return
|
||||
}
|
||||
// test for specific error string!
|
||||
if fail && experrstr != "" && !strings.HasPrefix(err.Error(), experrstr) {
|
||||
t.Errorf("test #%d: FAIL", index)
|
||||
t.Errorf("test #%d: expected validate fail, got wrong error", index)
|
||||
t.Errorf("test #%d: got error: %s", index, err.Error())
|
||||
t.Errorf("test #%d: exp error: %s", index, experrstr)
|
||||
return
|
||||
}
|
||||
if fail && err != nil {
|
||||
t.Logf("test #%d: err: %+v", index, err)
|
||||
}
|
||||
|
||||
changedChan := make(chan bool, 1) // buffered!
|
||||
readyChan := make(chan struct{})
|
||||
eventChan := make(chan struct{})
|
||||
doneChan := make(chan struct{})
|
||||
debug := testing.Verbose() // set via the -test.v flag to `go test`
|
||||
logf := func(format string, v ...interface{}) {
|
||||
t.Logf(fmt.Sprintf("test #%d: Res: ", index)+format, v...)
|
||||
}
|
||||
init := &engine.Init{
|
||||
Running: func() {
|
||||
close(readyChan)
|
||||
select { // this always sends one!
|
||||
case eventChan <- struct{}{}:
|
||||
|
||||
}
|
||||
},
|
||||
// Watch runs this to send a changed event.
|
||||
Event: func() {
|
||||
select {
|
||||
case eventChan <- struct{}{}:
|
||||
|
||||
}
|
||||
},
|
||||
|
||||
// Watch listens on this for close/pause events.
|
||||
Done: doneChan,
|
||||
Debug: debug,
|
||||
Logf: logf,
|
||||
|
||||
// unused
|
||||
Recv: func() map[string]*engine.Send {
|
||||
return map[string]*engine.Send{}
|
||||
},
|
||||
}
|
||||
|
||||
// run init
|
||||
t.Logf("test #%d: running Init", index)
|
||||
err = res.Init(init)
|
||||
defer func() {
|
||||
t.Logf("test #%d: running cleanup()", index)
|
||||
if err := cleanup(); err != nil {
|
||||
t.Errorf("test #%d: FAIL", index)
|
||||
t.Errorf("test #%d: could not cleanup: %+v", index, err)
|
||||
}
|
||||
}()
|
||||
closeFn := func() {
|
||||
// run close (we don't ever expect an error on close!)
|
||||
t.Logf("test #%d: running Close", index)
|
||||
if err := res.Close(); err != nil {
|
||||
t.Errorf("test #%d: FAIL", index)
|
||||
t.Errorf("test #%d: could not close Res: %+v", index, err)
|
||||
//return
|
||||
}
|
||||
}
|
||||
|
||||
if !fail && err != nil {
|
||||
t.Errorf("test #%d: FAIL", index)
|
||||
t.Errorf("test #%d: could not init Res: %+v", index, err)
|
||||
return
|
||||
}
|
||||
if fail && err == nil {
|
||||
closeFn() // close if Init didn't fail
|
||||
t.Errorf("test #%d: FAIL", index)
|
||||
t.Errorf("test #%d: init passed, expected fail", index)
|
||||
return
|
||||
}
|
||||
if fail && experr != nil && err != experr { // test for specific error!
|
||||
t.Errorf("test #%d: FAIL", index)
|
||||
t.Errorf("test #%d: expected init fail, got wrong error", index)
|
||||
t.Errorf("test #%d: got error: %+v", index, err)
|
||||
t.Errorf("test #%d: exp error: %+v", index, experr)
|
||||
return
|
||||
}
|
||||
// test for specific error string!
|
||||
if fail && experrstr != "" && !strings.HasPrefix(err.Error(), experrstr) {
|
||||
t.Errorf("test #%d: FAIL", index)
|
||||
t.Errorf("test #%d: expected init fail, got wrong error", index)
|
||||
t.Errorf("test #%d: got error: %s", index, err.Error())
|
||||
t.Errorf("test #%d: exp error: %s", index, experrstr)
|
||||
return
|
||||
}
|
||||
if fail && err != nil {
|
||||
t.Logf("test #%d: err: %+v", index, err)
|
||||
}
|
||||
defer closeFn()
|
||||
|
||||
// run watch
|
||||
wg := &sync.WaitGroup{}
|
||||
defer wg.Wait() // if we return early
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
t.Logf("test #%d: running Watch", index)
|
||||
if err := res.Watch(); err != nil {
|
||||
t.Errorf("test #%d: FAIL", index)
|
||||
t.Errorf("test #%d: Watch failed: %s", index, err.Error())
|
||||
}
|
||||
close(eventChan) // done with this part
|
||||
}()
|
||||
|
||||
// TODO: can we block here if the test fails early?
|
||||
select {
|
||||
case <-readyChan: // called by Running() in Watch
|
||||
}
|
||||
wg.Add(1)
|
||||
go func() { // run timeline
|
||||
t.Logf("test #%d: executing timeline", index)
|
||||
defer wg.Done()
|
||||
for ix, step := range timeline {
|
||||
|
||||
// magic setting of important values...
|
||||
if s, ok := step.(*startupStep); ok {
|
||||
s.ch = readyChan
|
||||
}
|
||||
if s, ok := step.(*changedStep); ok {
|
||||
s.ch = changedChan
|
||||
}
|
||||
if s, ok := step.(*clearChangedStep); ok {
|
||||
s.ch = changedChan
|
||||
}
|
||||
|
||||
t.Logf("test #%d: step(%d)...", index, ix)
|
||||
if err := step.Action(); err != nil {
|
||||
t.Errorf("test #%d: FAIL", index)
|
||||
t.Errorf("test #%d: step(%d) action failed: %s", index, ix, err.Error())
|
||||
break
|
||||
}
|
||||
if err := step.Expect(); err != nil {
|
||||
t.Errorf("test #%d: FAIL", index)
|
||||
t.Errorf("test #%d: step(%d) expect failed: %s", index, ix, err.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
t.Logf("test #%d: shutting down Watch", index)
|
||||
close(doneChan) // send Watch shutdown command
|
||||
}()
|
||||
Loop:
|
||||
for {
|
||||
select {
|
||||
case _, ok := <-eventChan: // from Watch()
|
||||
if !ok {
|
||||
//t.Logf("test #%d: break!", index)
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("test #%d: running CheckApply", index)
|
||||
checkOK, err := res.CheckApply(true) // no noop!
|
||||
if err != nil {
|
||||
t.Errorf("test #%d: FAIL", index)
|
||||
t.Errorf("test #%d: CheckApply failed: %s", index, err.Error())
|
||||
return
|
||||
}
|
||||
select {
|
||||
// send a msg if we can, but never block
|
||||
case changedChan <- checkOK:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("test #%d: waiting for shutdown", index)
|
||||
wg.Wait()
|
||||
|
||||
if err := expect(); err != nil {
|
||||
t.Errorf("test #%d: FAIL", index)
|
||||
t.Errorf("test #%d: expect failed: %s", index, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// all done!
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -21,9 +21,12 @@ package resources
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/user"
|
||||
"path"
|
||||
|
||||
"github.com/purpleidea/mgmt/engine"
|
||||
"github.com/purpleidea/mgmt/engine/traits"
|
||||
engineUtil "github.com/purpleidea/mgmt/engine/util"
|
||||
"github.com/purpleidea/mgmt/util"
|
||||
|
||||
systemd "github.com/coreos/go-systemd/dbus" // change namespace
|
||||
@@ -69,7 +72,6 @@ func (obj *SvcRes) Validate() error {
|
||||
// Init runs some startup code for this resource.
|
||||
func (obj *SvcRes) Init(init *engine.Init) error {
|
||||
obj.init = init // save for later
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -86,6 +88,7 @@ func (obj *SvcRes) Watch() error {
|
||||
}
|
||||
|
||||
var conn *systemd.Conn
|
||||
var bus *dbus.Conn
|
||||
var err error
|
||||
if obj.Session {
|
||||
conn, err = systemd.NewUserConnection() // user session
|
||||
@@ -99,28 +102,36 @@ func (obj *SvcRes) Watch() error {
|
||||
defer conn.Close()
|
||||
|
||||
// if we share the bus with others, we will get each others messages!!
|
||||
bus, err := util.SystemBusPrivateUsable() // don't share the bus connection!
|
||||
if obj.Session {
|
||||
bus, err = util.SessionBusPrivateUsable()
|
||||
} else {
|
||||
bus, err = util.SystemBusPrivateUsable()
|
||||
}
|
||||
if err != nil {
|
||||
return errwrap.Wrapf(err, "failed to connect to bus")
|
||||
}
|
||||
defer bus.Close()
|
||||
|
||||
// XXX: will this detect new units?
|
||||
bus.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
|
||||
"type='signal',interface='org.freedesktop.systemd1.Manager',member='Reloading'")
|
||||
buschan := make(chan *dbus.Signal, 10)
|
||||
defer close(buschan) // NOTE: closing a chan that contains a value is ok
|
||||
bus.Signal(buschan)
|
||||
defer bus.RemoveSignal(buschan) // not needed here, but nice for symmetry
|
||||
|
||||
// notify engine that we're running
|
||||
if err := obj.init.Running(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
var svc = fmt.Sprintf("%s.service", obj.Name()) // systemd name
|
||||
var send = false // send event?
|
||||
var invalid = false // does the svc exist or not?
|
||||
var previous bool // previous invalid value
|
||||
set := conn.NewSubscriptionSet() // no error should be returned
|
||||
|
||||
// TODO: do we first need to call conn.Subscribe() ?
|
||||
set := conn.NewSubscriptionSet() // no error should be returned
|
||||
subChannel, subErrors := set.Subscribe()
|
||||
//defer close(subChannel) // cannot close receive-only channel
|
||||
//defer close(subErrors) // cannot close receive-only channel
|
||||
var activeSet = false
|
||||
|
||||
for {
|
||||
@@ -147,7 +158,6 @@ func (obj *SvcRes) Watch() error {
|
||||
|
||||
if previous != invalid { // if invalid changed, send signal
|
||||
send = true
|
||||
obj.init.Dirty() // dirty
|
||||
}
|
||||
|
||||
if invalid {
|
||||
@@ -162,10 +172,8 @@ func (obj *SvcRes) Watch() error {
|
||||
// loop so that we can see the changed invalid signal
|
||||
obj.init.Logf("daemon reload")
|
||||
|
||||
case event := <-obj.init.Events:
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-obj.init.Done: // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
if !activeSet {
|
||||
@@ -203,23 +211,18 @@ func (obj *SvcRes) Watch() error {
|
||||
obj.init.Logf("stopped")
|
||||
}
|
||||
send = true
|
||||
obj.init.Dirty() // dirty
|
||||
|
||||
case err := <-subErrors:
|
||||
return errwrap.Wrapf(err, "unknown %s error", obj)
|
||||
|
||||
case event := <-obj.init.Events:
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-obj.init.Done: // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if send {
|
||||
send = false
|
||||
if err := obj.init.Event(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -266,7 +269,17 @@ func (obj *SvcRes) CheckApply(apply bool) (checkOK bool, err error) {
|
||||
|
||||
var running = (activestate.Value == dbus.MakeVariant("active"))
|
||||
var stateOK = ((obj.State == "") || (obj.State == "running" && running) || (obj.State == "stopped" && !running))
|
||||
var startupOK = true // XXX: DETECT AND SET
|
||||
var startupOK = true // XXX: DETECT AND SET
|
||||
|
||||
// NOTE: if this svc resource is embedded as a composite resource inside
|
||||
// of another resource using a technique such as `makeComposite()`, then
|
||||
// the Init of the embedded resource is traditionally passed through and
|
||||
// identical to the parent's Init. As a result, the data matches what is
|
||||
// expected from the parent. (So this luckily turns out to be actually a
|
||||
// thing that does help, although it is important to add the Refreshable
|
||||
// trait to the parent resource, or we'll panic when we call this line.)
|
||||
// It might not be recommended to use the Watch method without a thought
|
||||
// to what actually happens when we would run Send(), and other methods.
|
||||
var refresh = obj.init.Refresh() // do we have a pending reload to apply?
|
||||
|
||||
if stateOK && startupOK && !refresh {
|
||||
@@ -319,7 +332,12 @@ func (obj *SvcRes) CheckApply(apply bool) (checkOK bool, err error) {
|
||||
if &status == nil {
|
||||
return false, fmt.Errorf("systemd service action result is nil")
|
||||
}
|
||||
if status != "done" {
|
||||
switch status {
|
||||
case "done":
|
||||
// pass
|
||||
case "failed":
|
||||
return false, fmt.Errorf("svc failed (selinux?)")
|
||||
default:
|
||||
return false, fmt.Errorf("unknown systemd return string: %v", status)
|
||||
}
|
||||
|
||||
@@ -369,7 +387,8 @@ type SvcUID struct {
|
||||
// information about the resource we're matching. That data which is
|
||||
// used in the IFF function, is what you see in the struct fields here.
|
||||
engine.BaseUID
|
||||
name string // the svc name
|
||||
name string // the svc name
|
||||
session bool // user session
|
||||
}
|
||||
|
||||
// IFF aka if and only if they are equivalent, return true. If not, false.
|
||||
@@ -378,7 +397,13 @@ func (obj *SvcUID) IFF(uid engine.ResUID) bool {
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return obj.name == res.name
|
||||
if obj.name != res.name {
|
||||
return false
|
||||
}
|
||||
if obj.session != res.session {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// SvcResAutoEdges holds the state of the auto edge generator.
|
||||
@@ -420,13 +445,56 @@ func (obj *SvcResAutoEdges) Test(input []bool) bool {
|
||||
return true // keep going
|
||||
}
|
||||
|
||||
// AutoEdges returns the AutoEdge interface. In this case the systemd units.
|
||||
// SvcResAutoEdgesCron holds the state of the svc -> cron auto edge generator.
|
||||
type SvcResAutoEdgesCron struct {
|
||||
unit string // target unit
|
||||
session bool // user session
|
||||
}
|
||||
|
||||
// Next returns the next automatic edge.
|
||||
func (obj *SvcResAutoEdgesCron) Next() []engine.ResUID {
|
||||
// XXX: should this be true if SvcRes State == "stopped"?
|
||||
reversed := false
|
||||
value := &CronUID{
|
||||
BaseUID: engine.BaseUID{
|
||||
Kind: "CronRes",
|
||||
Reversed: &reversed,
|
||||
},
|
||||
unit: obj.unit, // target unit
|
||||
session: obj.session, // user session
|
||||
}
|
||||
return []engine.ResUID{value} // we return one, even though api supports N
|
||||
}
|
||||
|
||||
// Test takes the output of the last call to Next() and outputs true if we
|
||||
// should continue.
|
||||
func (obj *SvcResAutoEdgesCron) Test([]bool) bool {
|
||||
return false // only get one svc -> cron edge
|
||||
}
|
||||
|
||||
// AutoEdges returns the AutoEdge interface. In this case, systemd unit file
|
||||
// resources and cron (systemd-timer) resources.
|
||||
func (obj *SvcRes) AutoEdges() (engine.AutoEdge, error) {
|
||||
var data []engine.ResUID
|
||||
svcFiles := []string{
|
||||
var svcFiles []string
|
||||
svcFiles = []string{
|
||||
// root svc
|
||||
fmt.Sprintf("/etc/systemd/system/%s.service", obj.Name()), // takes precedence
|
||||
fmt.Sprintf("/usr/lib/systemd/system/%s.service", obj.Name()), // pkg default
|
||||
}
|
||||
if obj.Session {
|
||||
// user svc
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf(err, "error getting current user")
|
||||
}
|
||||
if u.HomeDir == "" {
|
||||
return nil, fmt.Errorf("user has no home directory")
|
||||
}
|
||||
svcFiles = []string{
|
||||
path.Join(u.HomeDir, "/.config/systemd/user/", fmt.Sprintf("%s.service", obj.Name())),
|
||||
}
|
||||
}
|
||||
for _, x := range svcFiles {
|
||||
var reversed = true
|
||||
data = append(data, &FileUID{
|
||||
@@ -438,11 +506,18 @@ func (obj *SvcRes) AutoEdges() (engine.AutoEdge, error) {
|
||||
path: x, // what matters
|
||||
})
|
||||
}
|
||||
return &FileResAutoEdges{
|
||||
|
||||
fileEdge := &FileResAutoEdges{
|
||||
data: data,
|
||||
pointer: 0,
|
||||
found: false,
|
||||
}, nil
|
||||
}
|
||||
cronEdge := &SvcResAutoEdgesCron{
|
||||
session: obj.Session,
|
||||
unit: fmt.Sprintf("%s.service", obj.Name()),
|
||||
}
|
||||
|
||||
return engineUtil.AutoEdgeCombiner(fileEdge, cronEdge)
|
||||
}
|
||||
|
||||
// UIDs includes all params to make a unique identification of this object.
|
||||
@@ -450,7 +525,8 @@ func (obj *SvcRes) AutoEdges() (engine.AutoEdge, error) {
|
||||
func (obj *SvcRes) UIDs() []engine.ResUID {
|
||||
x := &SvcUID{
|
||||
BaseUID: engine.BaseUID{Name: obj.Name(), Kind: obj.Kind()},
|
||||
name: obj.Name(), // svc name
|
||||
name: obj.Name(), // svc name
|
||||
session: obj.Session, // user session
|
||||
}
|
||||
return []engine.ResUID{x}
|
||||
}
|
||||
|
||||
@@ -125,31 +125,15 @@ func (obj *TestRes) Close() error {
|
||||
|
||||
// Watch is the primary listener for this resource and it outputs events.
|
||||
func (obj *TestRes) Watch() error {
|
||||
// notify engine that we're running
|
||||
if err := obj.init.Running(); err != nil {
|
||||
return err // exit if requested
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
select {
|
||||
case <-obj.init.Done: // closed by the engine to signal shutdown
|
||||
}
|
||||
|
||||
var send = false // send event?
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-obj.init.Events:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
//obj.init.Event() // notify engine of an event (this can block)
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
if err := obj.init.Event(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckApply method for Test resource. Does nothing, returns happy!
|
||||
|
||||
@@ -75,10 +75,7 @@ func (obj *TimerRes) Watch() error {
|
||||
obj.ticker = obj.newTicker()
|
||||
defer obj.ticker.Stop()
|
||||
|
||||
// notify engine that we're running
|
||||
if err := obj.init.Running(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
var send = false // send event?
|
||||
for {
|
||||
@@ -87,20 +84,13 @@ func (obj *TimerRes) Watch() error {
|
||||
send = true
|
||||
obj.init.Logf("received tick")
|
||||
|
||||
case event, ok := <-obj.init.Events:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-obj.init.Done: // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
|
||||
if send {
|
||||
send = false
|
||||
if err := obj.init.Event(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -119,10 +119,7 @@ func (obj *UserRes) Watch() error {
|
||||
}
|
||||
defer obj.recWatcher.Close()
|
||||
|
||||
// notify engine that we're running
|
||||
if err := obj.init.Running(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
var send = false // send event?
|
||||
for {
|
||||
@@ -142,23 +139,15 @@ func (obj *UserRes) Watch() error {
|
||||
obj.init.Logf("Event(%s): %v", event.Body.Name, event.Body.Op)
|
||||
}
|
||||
send = true
|
||||
obj.init.Dirty() // dirty
|
||||
|
||||
case event, ok := <-obj.init.Events:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-obj.init.Done: // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
if err := obj.init.Event(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,24 +71,24 @@ type VirtRes struct {
|
||||
|
||||
init *engine.Init
|
||||
|
||||
URI string `yaml:"uri"` // connection uri, eg: qemu:///session
|
||||
State string `yaml:"state"` // running, paused, shutoff
|
||||
Transient bool `yaml:"transient"` // defined (false) or undefined (true)
|
||||
CPUs uint `yaml:"cpus"`
|
||||
MaxCPUs uint `yaml:"maxcpus"`
|
||||
Memory uint64 `yaml:"memory"` // in KBytes
|
||||
OSInit string `yaml:"osinit"` // init used by lxc
|
||||
Boot []string `yaml:"boot"` // boot order. values: fd, hd, cdrom, network
|
||||
Disk []diskDevice `yaml:"disk"`
|
||||
CDRom []cdRomDevice `yaml:"cdrom"`
|
||||
Network []networkDevice `yaml:"network"`
|
||||
Filesystem []filesystemDevice `yaml:"filesystem"`
|
||||
Auth *VirtAuth `yaml:"auth"`
|
||||
URI string `lang:"uri" yaml:"uri"` // connection uri, eg: qemu:///session
|
||||
State string `lang:"state" yaml:"state"` // running, paused, shutoff
|
||||
Transient bool `lang:"transient" yaml:"transient"` // defined (false) or undefined (true)
|
||||
CPUs uint `lang:"cpus" yaml:"cpus"`
|
||||
MaxCPUs uint `lang:"maxcpus" yaml:"maxcpus"`
|
||||
Memory uint64 `lang:"memory" yaml:"memory"` // in KBytes
|
||||
OSInit string `lang:"osinit" yaml:"osinit"` // init used by lxc
|
||||
Boot []string `lang:"boot" yaml:"boot"` // boot order. values: fd, hd, cdrom, network
|
||||
Disk []DiskDevice `lang:"disk" yaml:"disk"`
|
||||
CDRom []CDRomDevice `lang:"cdrom" yaml:"cdrom"`
|
||||
Network []NetworkDevice `lang:"network" yaml:"network"`
|
||||
Filesystem []FilesystemDevice `lang:"filesystem" yaml:"filesystem"`
|
||||
Auth *VirtAuth `lang:"auth" yaml:"auth"`
|
||||
|
||||
HotCPUs bool `yaml:"hotcpus"` // allow hotplug of cpus?
|
||||
HotCPUs bool `lang:"hotcpus" yaml:"hotcpus"` // allow hotplug of cpus?
|
||||
// FIXME: values here should be enum's!
|
||||
RestartOnDiverge string `yaml:"restartondiverge"` // restart policy: "ignore", "ifneeded", "error"
|
||||
RestartOnRefresh bool `yaml:"restartonrefresh"` // restart on refresh?
|
||||
RestartOnDiverge string `lang:"restartondiverge" yaml:"restartondiverge"` // restart policy: "ignore", "ifneeded", "error"
|
||||
RestartOnRefresh bool `lang:"restartonrefresh" yaml:"restartonrefresh"` // restart on refresh?
|
||||
|
||||
wg *sync.WaitGroup
|
||||
conn *libvirt.Connect
|
||||
@@ -103,8 +103,8 @@ type VirtRes struct {
|
||||
|
||||
// VirtAuth is used to pass credentials to libvirt.
|
||||
type VirtAuth struct {
|
||||
Username string `yaml:"username"`
|
||||
Password string `yaml:"password"`
|
||||
Username string `lang:"username" yaml:"username"`
|
||||
Password string `lang:"password" yaml:"password"`
|
||||
}
|
||||
|
||||
// Default returns some sensible defaults for this resource.
|
||||
@@ -326,10 +326,7 @@ func (obj *VirtRes) Watch() error {
|
||||
}
|
||||
defer obj.conn.DomainEventDeregister(gaCallbackID)
|
||||
|
||||
// notify engine that we're running
|
||||
if err := obj.init.Running(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Running() // when started, notify engine that we're running
|
||||
|
||||
var send = false // send event?
|
||||
for {
|
||||
@@ -340,31 +337,26 @@ func (obj *VirtRes) Watch() error {
|
||||
switch event {
|
||||
case libvirt.DOMAIN_EVENT_DEFINED:
|
||||
if obj.Transient {
|
||||
obj.init.Dirty() // dirty
|
||||
send = true
|
||||
}
|
||||
case libvirt.DOMAIN_EVENT_UNDEFINED:
|
||||
if !obj.Transient {
|
||||
obj.init.Dirty() // dirty
|
||||
send = true
|
||||
}
|
||||
case libvirt.DOMAIN_EVENT_STARTED:
|
||||
fallthrough
|
||||
case libvirt.DOMAIN_EVENT_RESUMED:
|
||||
if obj.State != "running" {
|
||||
obj.init.Dirty() // dirty
|
||||
send = true
|
||||
}
|
||||
case libvirt.DOMAIN_EVENT_SUSPENDED:
|
||||
if obj.State != "paused" {
|
||||
obj.init.Dirty() // dirty
|
||||
send = true
|
||||
}
|
||||
case libvirt.DOMAIN_EVENT_STOPPED:
|
||||
fallthrough
|
||||
case libvirt.DOMAIN_EVENT_SHUTDOWN:
|
||||
if obj.State != "shutoff" {
|
||||
obj.init.Dirty() // dirty
|
||||
send = true
|
||||
}
|
||||
processExited = true
|
||||
@@ -375,7 +367,6 @@ func (obj *VirtRes) Watch() error {
|
||||
// verify, detect and patch appropriately!
|
||||
fallthrough
|
||||
case libvirt.DOMAIN_EVENT_CRASHED:
|
||||
obj.init.Dirty() // dirty
|
||||
send = true
|
||||
processExited = true // FIXME: is this okay for PMSUSPENDED ?
|
||||
}
|
||||
@@ -390,7 +381,6 @@ func (obj *VirtRes) Watch() error {
|
||||
|
||||
if state == libvirt.CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_STATE_CONNECTED {
|
||||
obj.guestAgentConnected = true
|
||||
obj.init.Dirty() // dirty
|
||||
send = true
|
||||
obj.init.Logf("Guest agent connected")
|
||||
|
||||
@@ -409,21 +399,14 @@ func (obj *VirtRes) Watch() error {
|
||||
case err := <-errorChan:
|
||||
return fmt.Errorf("unknown %s libvirt error: %s", obj, err)
|
||||
|
||||
case event, ok := <-obj.init.Events:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := obj.init.Read(event); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-obj.init.Done: // closed by the engine to signal shutdown
|
||||
return nil
|
||||
}
|
||||
|
||||
// do all our event sending all together to avoid duplicate msgs
|
||||
if send {
|
||||
send = false
|
||||
if err := obj.init.Event(); err != nil {
|
||||
return err // exit if requested
|
||||
}
|
||||
obj.init.Event() // notify engine of an event (this can block)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -970,44 +953,51 @@ type virtDevice interface {
|
||||
GetXML(idx int) string
|
||||
}
|
||||
|
||||
type diskDevice struct {
|
||||
Source string `yaml:"source"`
|
||||
Type string `yaml:"type"`
|
||||
// DiskDevice represents a disk that is attached to the virt machine.
|
||||
type DiskDevice struct {
|
||||
Source string `lang:"source" yaml:"source"`
|
||||
Type string `lang:"type" yaml:"type"`
|
||||
}
|
||||
|
||||
type cdRomDevice struct {
|
||||
Source string `yaml:"source"`
|
||||
Type string `yaml:"type"`
|
||||
// CDRomDevice represents a CDRom device that is attached to the virt machine.
|
||||
type CDRomDevice struct {
|
||||
Source string `lang:"source" yaml:"source"`
|
||||
Type string `lang:"type" yaml:"type"`
|
||||
}
|
||||
|
||||
type networkDevice struct {
|
||||
Name string `yaml:"name"`
|
||||
MAC string `yaml:"mac"`
|
||||
// NetworkDevice represents a network card that is attached to the virt machine.
|
||||
type NetworkDevice struct {
|
||||
Name string `lang:"name" yaml:"name"`
|
||||
MAC string `lang:"mac" yaml:"mac"`
|
||||
}
|
||||
|
||||
type filesystemDevice struct {
|
||||
Access string `yaml:"access"`
|
||||
Source string `yaml:"source"`
|
||||
Target string `yaml:"target"`
|
||||
ReadOnly bool `yaml:"read_only"`
|
||||
// FilesystemDevice represents a filesystem that is attached to the virt
|
||||
// machine.
|
||||
type FilesystemDevice struct {
|
||||
Access string `lang:"access" yaml:"access"`
|
||||
Source string `lang:"source" yaml:"source"`
|
||||
Target string `lang:"target" yaml:"target"`
|
||||
ReadOnly bool `lang:"read_only" yaml:"read_only"`
|
||||
}
|
||||
|
||||
func (d *diskDevice) GetXML(idx int) string {
|
||||
source, _ := util.ExpandHome(d.Source) // TODO: should we handle errors?
|
||||
// GetXML returns the XML representation of this device.
|
||||
func (obj *DiskDevice) GetXML(idx int) string {
|
||||
source, _ := util.ExpandHome(obj.Source) // TODO: should we handle errors?
|
||||
var b string
|
||||
b += "<disk type='file' device='disk'>"
|
||||
b += fmt.Sprintf("<driver name='qemu' type='%s'/>", d.Type)
|
||||
b += fmt.Sprintf("<driver name='qemu' type='%s'/>", obj.Type)
|
||||
b += fmt.Sprintf("<source file='%s'/>", source)
|
||||
b += fmt.Sprintf("<target dev='vd%s' bus='virtio'/>", util.NumToAlpha(idx))
|
||||
b += "</disk>"
|
||||
return b
|
||||
}
|
||||
|
||||
func (d *cdRomDevice) GetXML(idx int) string {
|
||||
source, _ := util.ExpandHome(d.Source) // TODO: should we handle errors?
|
||||
// GetXML returns the XML representation of this device.
|
||||
func (obj *CDRomDevice) GetXML(idx int) string {
|
||||
source, _ := util.ExpandHome(obj.Source) // TODO: should we handle errors?
|
||||
var b string
|
||||
b += "<disk type='file' device='cdrom'>"
|
||||
b += fmt.Sprintf("<driver name='qemu' type='%s'/>", d.Type)
|
||||
b += fmt.Sprintf("<driver name='qemu' type='%s'/>", obj.Type)
|
||||
b += fmt.Sprintf("<source file='%s'/>", source)
|
||||
b += fmt.Sprintf("<target dev='hd%s' bus='ide'/>", util.NumToAlpha(idx))
|
||||
b += "<readonly/>"
|
||||
@@ -1015,29 +1005,31 @@ func (d *cdRomDevice) GetXML(idx int) string {
|
||||
return b
|
||||
}
|
||||
|
||||
func (d *networkDevice) GetXML(idx int) string {
|
||||
if d.MAC == "" {
|
||||
d.MAC = randMAC()
|
||||
// GetXML returns the XML representation of this device.
|
||||
func (obj *NetworkDevice) GetXML(idx int) string {
|
||||
if obj.MAC == "" {
|
||||
obj.MAC = randMAC()
|
||||
}
|
||||
var b string
|
||||
b += "<interface type='network'>"
|
||||
b += fmt.Sprintf("<mac address='%s'/>", d.MAC)
|
||||
b += fmt.Sprintf("<source network='%s'/>", d.Name)
|
||||
b += fmt.Sprintf("<mac address='%s'/>", obj.MAC)
|
||||
b += fmt.Sprintf("<source network='%s'/>", obj.Name)
|
||||
b += "</interface>"
|
||||
return b
|
||||
}
|
||||
|
||||
func (d *filesystemDevice) GetXML(idx int) string {
|
||||
source, _ := util.ExpandHome(d.Source) // TODO: should we handle errors?
|
||||
// GetXML returns the XML representation of this device.
|
||||
func (obj *FilesystemDevice) GetXML(idx int) string {
|
||||
source, _ := util.ExpandHome(obj.Source) // TODO: should we handle errors?
|
||||
var b string
|
||||
b += "<filesystem" // open
|
||||
if d.Access != "" {
|
||||
b += fmt.Sprintf(" accessmode='%s'", d.Access)
|
||||
if obj.Access != "" {
|
||||
b += fmt.Sprintf(" accessmode='%s'", obj.Access)
|
||||
}
|
||||
b += ">" // close
|
||||
b += fmt.Sprintf("<source dir='%s'/>", source)
|
||||
b += fmt.Sprintf("<target dir='%s'/>", d.Target)
|
||||
if d.ReadOnly {
|
||||
b += fmt.Sprintf("<target dir='%s'/>", obj.Target)
|
||||
if obj.ReadOnly {
|
||||
b += "<readonly/>"
|
||||
}
|
||||
b += "</filesystem>"
|
||||
|
||||
65
engine/reverse.go
Normal file
65
engine/reverse.go
Normal file
@@ -0,0 +1,65 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2018+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package engine
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ReversibleRes is an interface that a resource can implement if it wants to
|
||||
// have some resource run when it disappears. A disappearance happens when a
|
||||
// resource is defined in one instance of the graph, and is gone in the
|
||||
// subsequent one. This is helpful for building robust programs with the engine.
|
||||
// Default implementations for most of the methods declared in this interface
|
||||
// can be obtained for your resource by anonymously adding the traits.Reversible
|
||||
// struct to your resource implementation.
|
||||
type ReversibleRes interface {
|
||||
Res
|
||||
|
||||
// ReversibleMeta lets you get or set meta params for the reversible
|
||||
// trait.
|
||||
ReversibleMeta() *ReversibleMeta
|
||||
|
||||
// SetReversibleMeta lets you set all of the meta params for the
|
||||
// reversible trait in a single call.
|
||||
SetReversibleMeta(*ReversibleMeta)
|
||||
|
||||
// Reversed returns the "reverse" or "reciprocal" resource. This is used
|
||||
// to "clean" up after a previously defined resource has been removed.
|
||||
// Interestingly, this returns the core Res interface instead of a
|
||||
// ReversibleRes, because there is no requirement that the reverse of a
|
||||
// Res be the same kind of Res, and the reverse might not be reversible!
|
||||
Reversed() (Res, error)
|
||||
}
|
||||
|
||||
// ReversibleMeta provides some parameters specific to reversible resources.
|
||||
type ReversibleMeta struct {
|
||||
// Disabled specifies that reversing should be disabled for this
|
||||
// resource.
|
||||
Disabled bool
|
||||
|
||||
// TODO: add options here, including whether to reverse edges, etc...
|
||||
}
|
||||
|
||||
// Cmp compares two ReversibleMeta structs and determines if they're equivalent.
|
||||
func (obj *ReversibleMeta) Cmp(rm *ReversibleMeta) error {
|
||||
if obj.Disabled != rm.Disabled {
|
||||
return fmt.Errorf("values for Disabled are different")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -40,3 +40,9 @@ func (obj *Edgeable) AutoEdgeMeta() *engine.AutoEdgeMeta {
|
||||
}
|
||||
return obj.meta
|
||||
}
|
||||
|
||||
// SetAutoEdgeMeta lets you set all of the meta params for the automatic edges
|
||||
// trait in a single call.
|
||||
func (obj *Edgeable) SetAutoEdgeMeta(meta *engine.AutoEdgeMeta) {
|
||||
obj.meta = meta
|
||||
}
|
||||
|
||||
@@ -47,6 +47,12 @@ func (obj *Groupable) AutoGroupMeta() *engine.AutoGroupMeta {
|
||||
return obj.meta
|
||||
}
|
||||
|
||||
// SetAutoGroupMeta lets you set all of the meta params for the automatic
|
||||
// grouping trait in a single call.
|
||||
func (obj *Groupable) SetAutoGroupMeta(meta *engine.AutoGroupMeta) {
|
||||
obj.meta = meta
|
||||
}
|
||||
|
||||
// GroupCmp compares two resources and decides if they're suitable for grouping.
|
||||
// You'll probably want to override this method when implementing a resource...
|
||||
// This base implementation assumes not, so override me!
|
||||
|
||||
@@ -38,3 +38,9 @@ func (obj *Meta) MetaParams() *engine.MetaParams {
|
||||
}
|
||||
return obj.meta
|
||||
}
|
||||
|
||||
// SetMetaParams lets you set all of the meta params for the resource in a
|
||||
// single call.
|
||||
func (obj *Meta) SetMetaParams(meta *engine.MetaParams) {
|
||||
obj.meta = meta
|
||||
}
|
||||
|
||||
48
engine/traits/reverse.go
Normal file
48
engine/traits/reverse.go
Normal file
@@ -0,0 +1,48 @@
|
||||
// Mgmt
|
||||
// Copyright (C) 2013-2018+ James Shubin and the project contributors
|
||||
// Written by James Shubin <james@shubin.ca> and the project contributors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package traits
|
||||
|
||||
import (
|
||||
"github.com/purpleidea/mgmt/engine"
|
||||
)
|
||||
|
||||
// Reversible contains a general implementation with most of the properties and
|
||||
// methods needed to support reversing resources. It may be used as a starting
|
||||
// point to avoid re-implementing the straightforward methods.
|
||||
type Reversible struct {
|
||||
meta *engine.ReversibleMeta
|
||||
|
||||
// Bug5819 works around issue https://github.com/golang/go/issues/5819
|
||||
Bug5819 interface{} // XXX: workaround
|
||||
}
|
||||
|
||||
// ReversibleMeta lets you get or set meta params for the reversing trait.
|
||||
func (obj *Reversible) ReversibleMeta() *engine.ReversibleMeta {
|
||||
if obj.meta == nil { // set the defaults if previously empty
|
||||
obj.meta = &engine.ReversibleMeta{
|
||||
Disabled: true, // by default we're disabled
|
||||
}
|
||||
}
|
||||
return obj.meta
|
||||
}
|
||||
|
||||
// SetReversibleMeta lets you set all of the meta params for the reversing trait
|
||||
// in a single call.
|
||||
func (obj *Reversible) SetReversibleMeta(meta *engine.ReversibleMeta) {
|
||||
obj.meta = meta
|
||||
}
|
||||
@@ -19,6 +19,7 @@ package util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
@@ -30,6 +31,7 @@ import (
|
||||
"github.com/purpleidea/mgmt/engine"
|
||||
"github.com/purpleidea/mgmt/lang/types"
|
||||
|
||||
"github.com/godbus/dbus"
|
||||
errwrap "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -44,6 +46,20 @@ const (
|
||||
// DBusRemoveMatch is the dbus method to remove a previously defined
|
||||
// AddMatch rule.
|
||||
DBusRemoveMatch = DBusInterface + ".RemoveMatch"
|
||||
// DBusSystemd1Path is the base systemd1 path.
|
||||
DBusSystemd1Path = "/org/freedesktop/systemd1"
|
||||
// DBusSystemd1Iface is the base systemd1 interface.
|
||||
DBusSystemd1Iface = "org.freedesktop.systemd1"
|
||||
// DBusSystemd1ManagerIface is the systemd manager interface used for
|
||||
// interfacing with systemd units.
|
||||
DBusSystemd1ManagerIface = DBusSystemd1Iface + ".Manager"
|
||||
// DBusRestartUnit is the dbus method for restarting systemd units.
|
||||
DBusRestartUnit = DBusSystemd1ManagerIface + ".RestartUnit"
|
||||
// DBusStopUnit is the dbus method for stopping systemd units.
|
||||
DBusStopUnit = DBusSystemd1ManagerIface + ".StopUnit"
|
||||
// DBusSignalJobRemoved is the name of the dbus signal that produces a
|
||||
// message when a dbus job is done (or has errored.)
|
||||
DBusSignalJobRemoved = "JobRemoved"
|
||||
)
|
||||
|
||||
// ResToB64 encodes a resource to a base64 encoded string (after serialization).
|
||||
@@ -259,3 +275,94 @@ func GetGID(group string) (int, error) {
|
||||
|
||||
return -1, errwrap.Wrapf(err, "group lookup error (%s)", group)
|
||||
}
|
||||
|
||||
// RestartUnit resarts the given dbus unit and waits for it to finish starting.
|
||||
func RestartUnit(ctx context.Context, conn *dbus.Conn, unit string) error {
|
||||
return unitStateAction(ctx, conn, unit, DBusRestartUnit)
|
||||
}
|
||||
|
||||
// StopUnit stops the given dbus unit and waits for it to finish stopping.
|
||||
func StopUnit(ctx context.Context, conn *dbus.Conn, unit string) error {
|
||||
return unitStateAction(ctx, conn, unit, DBusStopUnit)
|
||||
}
|
||||
|
||||
// unitStateAction is a helper function to perform state actions on systemd
|
||||
// units. It waits for the requested job to be complete before it returns.
|
||||
func unitStateAction(ctx context.Context, conn *dbus.Conn, unit, action string) error {
|
||||
// Add a dbus rule to watch the systemd1 JobRemoved signal, used to wait
|
||||
// until the job completes.
|
||||
args := []string{
|
||||
"type='signal'",
|
||||
fmt.Sprintf("path='%s'", DBusSystemd1Path),
|
||||
fmt.Sprintf("interface='%s'", DBusSystemd1ManagerIface),
|
||||
fmt.Sprintf("member='%s'", DBusSignalJobRemoved),
|
||||
fmt.Sprintf("arg2='%s'", unit),
|
||||
}
|
||||
// match dbus messages
|
||||
if call := conn.BusObject().Call(DBusAddMatch, 0, strings.Join(args, ",")); call.Err != nil {
|
||||
return errwrap.Wrapf(call.Err, "error creating dbus call")
|
||||
}
|
||||
defer conn.BusObject().Call(DBusRemoveMatch, 0, args) // ignore the error
|
||||
|
||||
// channel for godbus signal
|
||||
ch := make(chan *dbus.Signal)
|
||||
defer close(ch)
|
||||
// subscribe the channel to the signal
|
||||
conn.Signal(ch)
|
||||
defer conn.RemoveSignal(ch)
|
||||
|
||||
// perform requested action on specified unit
|
||||
sd1 := conn.Object(DBusSystemd1Iface, dbus.ObjectPath(DBusSystemd1Path))
|
||||
if call := sd1.Call(action, 0, unit, "fail"); call.Err != nil {
|
||||
return errwrap.Wrapf(call.Err, "error stopping unit: %s", unit)
|
||||
}
|
||||
|
||||
// wait for the job to be removed, indicating completion
|
||||
select {
|
||||
case event, ok := <-ch:
|
||||
if !ok {
|
||||
return fmt.Errorf("channel closed unexpectedly")
|
||||
}
|
||||
if event.Body[3] != "done" {
|
||||
return fmt.Errorf("unexpected job status: %s", event.Body[3])
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("action %s on %s failed due to context timeout", action, unit)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// autoEdgeCombiner holds the state of the auto edge generator.
|
||||
type autoEdgeCombiner struct {
|
||||
ae []engine.AutoEdge
|
||||
ptr int
|
||||
}
|
||||
|
||||
// Next returns the next automatic edge.
|
||||
func (obj *autoEdgeCombiner) Next() []engine.ResUID {
|
||||
if len(obj.ae) <= obj.ptr {
|
||||
panic("shouldn't be called anymore!")
|
||||
}
|
||||
return obj.ae[obj.ptr].Next() // return the next edge
|
||||
}
|
||||
|
||||
// Test takes the output of the last call to Next() and outputs true if we
|
||||
// should continue.
|
||||
func (obj *autoEdgeCombiner) Test(input []bool) bool {
|
||||
if !obj.ae[obj.ptr].Test(input) {
|
||||
obj.ptr++ // match found, on to the next
|
||||
}
|
||||
return len(obj.ae) > obj.ptr // are there any auto edges left?
|
||||
}
|
||||
|
||||
// AutoEdgeCombiner takes any number of AutoEdge structs, and combines them
|
||||
// into a single one, so that the logic from each one can be built separately,
|
||||
// and then combined using this utility. This makes implementing different
|
||||
// AutoEdge generators much easier. This respects the Next() and Test() API,
|
||||
// and ratchets through each AutoEdge entry until they have all run their
|
||||
// course.
|
||||
func AutoEdgeCombiner(ae ...engine.AutoEdge) (engine.AutoEdge, error) {
|
||||
return &autoEdgeCombiner{
|
||||
ae: ae,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -91,32 +91,53 @@ func GetDeploys(obj Client) (map[uint64]string, error) {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetDeploy gets the latest deploy if id == 0, otherwise it returns the deploy
|
||||
// with the specified id if it exists.
|
||||
// calculateMax is a helper function.
|
||||
func calculateMax(deploys map[uint64]string) uint64 {
|
||||
var max uint64
|
||||
for i := range deploys {
|
||||
if i > max {
|
||||
max = i
|
||||
}
|
||||
}
|
||||
return max
|
||||
}
|
||||
|
||||
// GetDeploy returns the deploy with the specified id if it exists. If you input
|
||||
// an id of 0, you'll get back an empty deploy without error. This is useful so
|
||||
// that you can pass through this function easily.
|
||||
// FIXME: implement this more efficiently so that it doesn't have to download *all* the old deploys from etcd!
|
||||
func GetDeploy(obj Client, id uint64) (string, error) {
|
||||
result, err := GetDeploys(obj)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if id != 0 {
|
||||
str, exists := result[id]
|
||||
if !exists {
|
||||
return "", fmt.Errorf("can't find id `%d`", id)
|
||||
}
|
||||
return str, nil
|
||||
}
|
||||
// find the latest id
|
||||
var max uint64
|
||||
for i := range result {
|
||||
if i > max {
|
||||
max = i
|
||||
}
|
||||
}
|
||||
if max == 0 {
|
||||
|
||||
// don't optimize this test to the top, because it's better to catch an
|
||||
// etcd failure early if we can, rather than fail later when we deploy!
|
||||
if id == 0 {
|
||||
return "", nil // no results yet
|
||||
}
|
||||
return result[max], nil
|
||||
|
||||
str, exists := result[id]
|
||||
if !exists {
|
||||
return "", fmt.Errorf("can't find id `%d`", id)
|
||||
}
|
||||
return str, nil
|
||||
}
|
||||
|
||||
// GetMaxDeployID returns the maximum deploy id. If none are found, this returns
|
||||
// zero. You must increment the returned value by one when you add a deploy. If
|
||||
// two or more clients race for this deploy id, then the loser is not committed,
|
||||
// and must repeat this GetMaxDeployID process until it succeeds with a commit!
|
||||
func GetMaxDeployID(obj Client) (uint64, error) {
|
||||
// TODO: this was all implemented super inefficiently, fix up for perf!
|
||||
deploys, err := GetDeploys(obj) // get previous deploys
|
||||
if err != nil {
|
||||
return 0, errwrap.Wrapf(err, "error getting previous deploys")
|
||||
}
|
||||
// find the latest id
|
||||
max := calculateMax(deploys)
|
||||
return max, nil // found! (or zero)
|
||||
}
|
||||
|
||||
// AddDeploy adds a new deploy. It takes an id and ensures it's sequential. If
|
||||
@@ -162,7 +183,7 @@ func AddDeploy(obj Client, id uint64, hash, pHash string, data *string) error {
|
||||
// this way, we only generate one watch event, and only when it's needed
|
||||
result, err := obj.Txn(ifs, ops, nil)
|
||||
if err != nil {
|
||||
return errwrap.Wrapf(err, "error creating deploy id %d: %s", id)
|
||||
return errwrap.Wrapf(err, "error creating deploy id %d", id)
|
||||
}
|
||||
if !result.Succeeded {
|
||||
return fmt.Errorf("could not create deploy id %d", id)
|
||||
|
||||
48
etcd/etcd.go
48
etcd/etcd.go
@@ -37,12 +37,12 @@
|
||||
//
|
||||
// Smoke testing:
|
||||
// mkdir /tmp/mgmt{A..E}
|
||||
// ./mgmt run --yaml examples/etcd1a.yaml --hostname h1 --tmp-prefix --no-pgp
|
||||
// ./mgmt run --yaml examples/etcd1b.yaml --hostname h2 --tmp-prefix --no-pgp --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2381 --server-urls http://127.0.0.1:2382
|
||||
// ./mgmt run --yaml examples/etcd1c.yaml --hostname h3 --tmp-prefix --no-pgp --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2383 --server-urls http://127.0.0.1:2384
|
||||
// ./mgmt run --hostname h1 --tmp-prefix --no-pgp yaml --yaml examples/yaml/etcd1a.yaml
|
||||
// ./mgmt run --hostname h2 --tmp-prefix --no-pgp --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2381 --server-urls http://127.0.0.1:2382 yaml --yaml examples/yaml/etcd1b.yaml
|
||||
// ./mgmt run --hostname h3 --tmp-prefix --no-pgp --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2383 --server-urls http://127.0.0.1:2384 yaml --yaml examples/yaml/etcd1c.yaml
|
||||
// ETCDCTL_API=3 etcdctl --endpoints 127.0.0.1:2379 put /_mgmt/idealClusterSize 3
|
||||
// ./mgmt run --yaml examples/etcd1d.yaml --hostname h4 --tmp-prefix --no-pgp --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2385 --server-urls http://127.0.0.1:2386
|
||||
// ./mgmt run --yaml examples/etcd1e.yaml --hostname h5 --tmp-prefix --no-pgp --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2387 --server-urls http://127.0.0.1:2388
|
||||
// ./mgmt run --hostname h4 --tmp-prefix --no-pgp --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2385 --server-urls http://127.0.0.1:2386 yaml --yaml examples/yaml/etcd1d.yaml
|
||||
// ./mgmt run --hostname h5 --tmp-prefix --no-pgp --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2387 --server-urls http://127.0.0.1:2388 yaml --yaml examples/yaml/etcd1e.yaml
|
||||
// ETCDCTL_API=3 etcdctl --endpoints 127.0.0.1:2379 member list
|
||||
// ETCDCTL_API=3 etcdctl --endpoints 127.0.0.1:2381 put /_mgmt/idealClusterSize 5
|
||||
// ETCDCTL_API=3 etcdctl --endpoints 127.0.0.1:2381 member list
|
||||
@@ -194,6 +194,7 @@ type EmbdEtcd struct { // EMBeddeD etcd
|
||||
advertiseClientURLs etcdtypes.URLs // client urls to advertise
|
||||
advertiseServerURLs etcdtypes.URLs // server urls to advertise
|
||||
noServer bool // disable all server peering if true
|
||||
noNetwork bool // use unix:// sockets instead of TCP for clients/servers
|
||||
|
||||
// local tracked state
|
||||
nominated etcdtypes.URLsMap // copy of who's nominated to locally track state
|
||||
@@ -209,8 +210,8 @@ type EmbdEtcd struct { // EMBeddeD etcd
|
||||
txnq chan *TN // txn queue
|
||||
|
||||
flags Flags
|
||||
prefix string // folder prefix to use for misc storage
|
||||
converger converger.Converger // converged tracking
|
||||
prefix string // folder prefix to use for misc storage
|
||||
converger *converger.Coordinator // converged tracking
|
||||
|
||||
// etcd server related
|
||||
serverwg sync.WaitGroup // wait for server to shutdown
|
||||
@@ -220,7 +221,7 @@ type EmbdEtcd struct { // EMBeddeD etcd
|
||||
}
|
||||
|
||||
// NewEmbdEtcd creates the top level embedded etcd struct client and server obj.
|
||||
func NewEmbdEtcd(hostname string, seeds, clientURLs, serverURLs, advertiseClientURLs, advertiseServerURLs etcdtypes.URLs, noServer bool, idealClusterSize uint16, flags Flags, prefix string, converger converger.Converger) *EmbdEtcd {
|
||||
func NewEmbdEtcd(hostname string, seeds, clientURLs, serverURLs, advertiseClientURLs, advertiseServerURLs etcdtypes.URLs, noServer bool, noNetwork bool, idealClusterSize uint16, flags Flags, prefix string, converger *converger.Coordinator) *EmbdEtcd {
|
||||
endpoints := make(etcdtypes.URLsMap)
|
||||
if hostname == seedSentinel { // safety
|
||||
return nil
|
||||
@@ -229,6 +230,15 @@ func NewEmbdEtcd(hostname string, seeds, clientURLs, serverURLs, advertiseClient
|
||||
log.Printf("Etcd: need at least one seed if running with --no-server!")
|
||||
return nil
|
||||
}
|
||||
if noNetwork {
|
||||
if len(clientURLs) != 0 || len(serverURLs) != 0 || len(seeds) != 0 {
|
||||
log.Printf("--no-network is mutual exclusive with --seeds, --client-urls and --server-urls")
|
||||
return nil
|
||||
}
|
||||
clientURLs, _ = etcdtypes.NewURLs([]string{"unix://clients.sock:0"})
|
||||
serverURLs, _ = etcdtypes.NewURLs([]string{"unix://servers.sock:0"})
|
||||
}
|
||||
|
||||
if len(seeds) > 0 {
|
||||
endpoints[seedSentinel] = seeds
|
||||
idealClusterSize = 0 // unset, get from running cluster
|
||||
@@ -253,6 +263,7 @@ func NewEmbdEtcd(hostname string, seeds, clientURLs, serverURLs, advertiseClient
|
||||
advertiseClientURLs: advertiseClientURLs,
|
||||
advertiseServerURLs: advertiseServerURLs,
|
||||
noServer: noServer,
|
||||
noNetwork: noNetwork,
|
||||
|
||||
idealClusterSize: idealClusterSize,
|
||||
converger: converger,
|
||||
@@ -304,7 +315,7 @@ func (obj *EmbdEtcd) GetConfig() etcd.Config {
|
||||
// XXX: filter out any urls which wouldn't resolve here ?
|
||||
for _, eps := range obj.endpoints { // flatten map
|
||||
for _, u := range eps {
|
||||
endpoints = append(endpoints, u.Host) // remove http:// prefix
|
||||
endpoints = append(endpoints, u.String()) // use full url including scheme
|
||||
}
|
||||
}
|
||||
sort.Strings(endpoints) // sort for determinism
|
||||
@@ -753,7 +764,6 @@ func (obj *EmbdEtcd) CbLoop() {
|
||||
obj.exitwg.Add(1)
|
||||
defer obj.exitwg.Done()
|
||||
cuid := obj.converger.Register()
|
||||
cuid.SetName("Etcd: CbLoop")
|
||||
defer cuid.Unregister()
|
||||
if e := obj.Connect(false); e != nil {
|
||||
return // fatal
|
||||
@@ -822,7 +832,6 @@ func (obj *EmbdEtcd) Loop() {
|
||||
obj.exitwg.Add(1) // TODO: add these to other go routines?
|
||||
defer obj.exitwg.Done()
|
||||
cuid := obj.converger.Register()
|
||||
cuid.SetName("Etcd: Loop")
|
||||
defer cuid.Unregister()
|
||||
if e := obj.Connect(false); e != nil {
|
||||
return // fatal
|
||||
@@ -1049,6 +1058,15 @@ func (obj *EmbdEtcd) rawGet(ctx context.Context, gq *GQ) (result map[string]stri
|
||||
log.Printf("Trace: Etcd: rawGet()")
|
||||
}
|
||||
obj.rLock.RLock()
|
||||
// TODO: we're checking if this is nil to workaround a nil ptr bug...
|
||||
if obj.client == nil { // bug?
|
||||
obj.rLock.RUnlock()
|
||||
return nil, fmt.Errorf("client is nil")
|
||||
}
|
||||
if obj.client.KV == nil { // bug?
|
||||
obj.rLock.RUnlock()
|
||||
return nil, fmt.Errorf("client.KV is nil")
|
||||
}
|
||||
response, err := obj.client.KV.Get(ctx, gq.path, gq.opts...)
|
||||
obj.rLock.RUnlock()
|
||||
if err != nil || response == nil {
|
||||
@@ -1683,8 +1701,12 @@ func (obj *EmbdEtcd) LocalhostClientURLs() etcdtypes.URLs {
|
||||
// look through obj.clientURLs and return the localhost ones
|
||||
urls := etcdtypes.URLs{}
|
||||
for _, x := range obj.clientURLs {
|
||||
// "localhost" or anything in 127.0.0.0/8 is valid!
|
||||
if s := x.Host; strings.HasPrefix(s, "localhost") || strings.HasPrefix(s, "127.") {
|
||||
// "localhost", ::1 or anything in 127.0.0.0/8 is valid!
|
||||
if s := x.Host; strings.HasPrefix(s, "localhost") || strings.HasPrefix(s, "127.") || strings.HasPrefix(s, "[::1]") {
|
||||
urls = append(urls, x)
|
||||
}
|
||||
// or local unix domain socket
|
||||
if x.Scheme == "unix" {
|
||||
urls = append(urls, x)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ func TestNewEmbdEtcd(t *testing.T) {
|
||||
noServer := false
|
||||
var flags Flags
|
||||
|
||||
obj := NewEmbdEtcd("", nil, nil, nil, nil, nil, noServer, 0, flags, "", nil)
|
||||
obj := NewEmbdEtcd("", nil, nil, nil, nil, nil, noServer, false, 0, flags, "", nil)
|
||||
if obj == nil {
|
||||
t.Fatal("failed to create server object")
|
||||
}
|
||||
@@ -44,7 +44,7 @@ func TestNewEmbdEtcdConfigValidation(t *testing.T) {
|
||||
noServer := true
|
||||
var flags Flags
|
||||
|
||||
obj := NewEmbdEtcd("", seeds, nil, nil, nil, nil, noServer, 0, flags, "", nil)
|
||||
obj := NewEmbdEtcd("", seeds, nil, nil, nil, nil, noServer, false, 0, flags, "", nil)
|
||||
if obj != nil {
|
||||
t.Fatal("server initialization should fail on invalid configuration")
|
||||
}
|
||||
|
||||
@@ -341,6 +341,18 @@ func (obj *Fs) Create(name string) (afero.File, error) {
|
||||
return fileCreate(obj, name)
|
||||
}
|
||||
|
||||
// Chown is the equivalent of os.Chown. It returns ErrNotImplemented.
|
||||
func (obj *Fs) Chown(name string, uid, gid int) error {
|
||||
// FIXME: Implement Chown
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
// Lchown is the equivalent of os.Lchown. It returns ErrNotImplemented.
|
||||
func (obj *Fs) Lchown(name string, uid, gid int) error {
|
||||
// FIXME: Implement Lchown
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
// Mkdir makes a new directory.
|
||||
func (obj *Fs) Mkdir(name string, perm os.FileMode) error {
|
||||
if err := obj.mount(); err != nil {
|
||||
|
||||
@@ -20,17 +20,21 @@
|
||||
package fs_test // named this way to make it easier for examples
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/purpleidea/mgmt/etcd"
|
||||
etcdfs "github.com/purpleidea/mgmt/etcd/fs"
|
||||
"github.com/purpleidea/mgmt/integration"
|
||||
"github.com/purpleidea/mgmt/util"
|
||||
|
||||
errwrap "github.com/pkg/errors"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
// XXX: spawn etcd for this test, like `cdtmpmkdir && etcd` and then kill it...
|
||||
// XXX: write a bunch more tests to test this
|
||||
|
||||
// TODO: apparently using 0666 is equivalent to respecting the current umask
|
||||
@@ -39,13 +43,48 @@ const (
|
||||
superblock = "/some/superblock" // TODO: generate randomly per test?
|
||||
)
|
||||
|
||||
// Ensure that etcdfs.Fs implements afero.Fs.
|
||||
var _ afero.Fs = &etcdfs.Fs{}
|
||||
|
||||
// runEtcd starts etcd locally via the mgmt binary. It returns a function to
|
||||
// kill the process which the caller must use to clean up.
|
||||
func runEtcd() (func() error, error) {
|
||||
// Run mgmt as etcd backend to ensure that we are testing against the
|
||||
// appropriate vendored version of etcd rather than some unknown version.
|
||||
cmdName, err := integration.BinaryPath()
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf(err, "error getting binary path")
|
||||
}
|
||||
cmd := exec.Command(cmdName, "run", "--tmp-prefix", "empty") // empty GAPI
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, errwrap.Wrapf(err, "error starting command %v", cmd)
|
||||
}
|
||||
|
||||
return func() error {
|
||||
// cleanup when we're done
|
||||
if err := cmd.Process.Signal(syscall.SIGQUIT); err != nil {
|
||||
fmt.Printf("error sending quit signal: %+v\n", err)
|
||||
}
|
||||
if err := cmd.Process.Kill(); err != nil {
|
||||
return errwrap.Wrapf(err, "error killing process")
|
||||
}
|
||||
return nil
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestFs1(t *testing.T) {
|
||||
stopEtcd, err := runEtcd()
|
||||
if err != nil {
|
||||
t.Errorf("setup error: %+v", err)
|
||||
}
|
||||
defer stopEtcd() // ignore the error
|
||||
|
||||
etcdClient := &etcd.ClientEtcd{
|
||||
Seeds: []string{"localhost:2379"}, // endpoints
|
||||
}
|
||||
|
||||
if err := etcdClient.Connect(); err != nil {
|
||||
t.Logf("client connection error: %+v", err)
|
||||
t.Errorf("client connection error: %+v", err)
|
||||
return
|
||||
}
|
||||
defer etcdClient.Destroy()
|
||||
@@ -58,22 +97,21 @@ func TestFs1(t *testing.T) {
|
||||
//var etcdFs afero.Fs = NewEtcdFs()
|
||||
|
||||
if err := etcdFs.Mkdir("/", umask); err != nil {
|
||||
t.Logf("error: %+v", err)
|
||||
t.Logf("mkdir error: %+v", err)
|
||||
if err != etcdfs.ErrExist {
|
||||
t.Errorf("mkdir error: %+v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := etcdFs.Mkdir("/tmp", umask); err != nil {
|
||||
t.Logf("error: %+v", err)
|
||||
if err != etcdfs.ErrExist {
|
||||
return
|
||||
}
|
||||
t.Errorf("mkdir2 error: %+v", err)
|
||||
return
|
||||
}
|
||||
|
||||
fi, err := etcdFs.Stat("/tmp")
|
||||
if err != nil {
|
||||
t.Logf("stat error: %+v", err)
|
||||
t.Errorf("stat error: %+v", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -82,7 +120,7 @@ func TestFs1(t *testing.T) {
|
||||
|
||||
f, err := etcdFs.Create("/tmp/foo")
|
||||
if err != nil {
|
||||
t.Logf("error: %+v", err)
|
||||
t.Errorf("create error: %+v", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -90,104 +128,77 @@ func TestFs1(t *testing.T) {
|
||||
|
||||
i, err := f.WriteString("hello world!\n")
|
||||
if err != nil {
|
||||
t.Logf("error: %+v", err)
|
||||
t.Errorf("writestring error: %+v", err)
|
||||
return
|
||||
}
|
||||
t.Logf("wrote: %d", i)
|
||||
|
||||
if err := etcdFs.Mkdir("/tmp/d1", umask); err != nil {
|
||||
t.Logf("error: %+v", err)
|
||||
if err != etcdfs.ErrExist {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := etcdFs.Rename("/tmp/foo", "/tmp/bar"); err != nil {
|
||||
t.Logf("rename error: %+v", err)
|
||||
t.Errorf("mkdir3 error: %+v", err)
|
||||
return
|
||||
}
|
||||
|
||||
//f2, err := etcdFs.Create("/tmp/bar")
|
||||
//if err != nil {
|
||||
// t.Logf("error: %+v", err)
|
||||
// return
|
||||
//}
|
||||
if err := etcdFs.Rename("/tmp/foo", "/tmp/bar"); err != nil {
|
||||
t.Errorf("rename error: %+v", err)
|
||||
return
|
||||
}
|
||||
|
||||
//i2, err := f2.WriteString("hello bar!\n")
|
||||
//if err != nil {
|
||||
// t.Logf("error: %+v", err)
|
||||
// return
|
||||
//}
|
||||
//t.Logf("wrote: %d", i2)
|
||||
f2, err := etcdFs.Create("/tmp/bar")
|
||||
if err != nil {
|
||||
t.Errorf("create2 error: %+v", err)
|
||||
return
|
||||
}
|
||||
|
||||
i2, err := f2.WriteString("hello bar!\n")
|
||||
if err != nil {
|
||||
t.Errorf("writestring2 error: %+v", err)
|
||||
return
|
||||
}
|
||||
t.Logf("wrote: %d", i2)
|
||||
|
||||
dir, err := etcdFs.Open("/tmp")
|
||||
if err != nil {
|
||||
t.Logf("error: %+v", err)
|
||||
t.Errorf("open error: %+v", err)
|
||||
return
|
||||
}
|
||||
names, err := dir.Readdirnames(-1)
|
||||
if err != nil && err != io.EOF {
|
||||
t.Logf("error: %+v", err)
|
||||
t.Errorf("readdirnames error: %+v", err)
|
||||
return
|
||||
}
|
||||
for _, name := range names {
|
||||
t.Logf("name in /tmp: %+v", name)
|
||||
return
|
||||
}
|
||||
|
||||
//dir, err := etcdFs.Open("/")
|
||||
//if err != nil {
|
||||
// t.Logf("error: %+v", err)
|
||||
// return
|
||||
//}
|
||||
//names, err := dir.Readdirnames(-1)
|
||||
//if err != nil && err != io.EOF {
|
||||
// t.Logf("error: %+v", err)
|
||||
// return
|
||||
//}
|
||||
//for _, name := range names {
|
||||
// t.Logf("name in /: %+v", name)
|
||||
//}
|
||||
dir, err = etcdFs.Open("/")
|
||||
if err != nil {
|
||||
t.Errorf("open2 error: %+v", err)
|
||||
return
|
||||
}
|
||||
names, err = dir.Readdirnames(-1)
|
||||
if err != nil && err != io.EOF {
|
||||
t.Errorf("readdirnames2 error: %+v", err)
|
||||
return
|
||||
}
|
||||
for _, name := range names {
|
||||
t.Logf("name in /: %+v", name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFs2(t *testing.T) {
|
||||
stopEtcd, err := runEtcd()
|
||||
if err != nil {
|
||||
t.Errorf("setup error: %+v", err)
|
||||
}
|
||||
defer stopEtcd() // ignore the error
|
||||
|
||||
etcdClient := &etcd.ClientEtcd{
|
||||
Seeds: []string{"localhost:2379"}, // endpoints
|
||||
}
|
||||
|
||||
if err := etcdClient.Connect(); err != nil {
|
||||
t.Logf("client connection error: %+v", err)
|
||||
return
|
||||
}
|
||||
defer etcdClient.Destroy()
|
||||
|
||||
etcdFs := &etcdfs.Fs{
|
||||
Client: etcdClient.GetClient(),
|
||||
Metadata: superblock,
|
||||
DataPrefix: etcdfs.DefaultDataPrefix,
|
||||
}
|
||||
|
||||
tree, err := util.FsTree(etcdFs, "/")
|
||||
if err != nil {
|
||||
t.Errorf("tree error: %+v", err)
|
||||
return
|
||||
}
|
||||
t.Logf("tree: \n%s", tree)
|
||||
|
||||
tree2, err := util.FsTree(etcdFs, "/tmp")
|
||||
if err != nil {
|
||||
t.Errorf("tree2 error: %+v", err)
|
||||
return
|
||||
}
|
||||
t.Logf("tree2: \n%s", tree2)
|
||||
}
|
||||
|
||||
func TestFs3(t *testing.T) {
|
||||
etcdClient := &etcd.ClientEtcd{
|
||||
Seeds: []string{"localhost:2379"}, // endpoints
|
||||
}
|
||||
|
||||
if err := etcdClient.Connect(); err != nil {
|
||||
t.Logf("client connection error: %+v", err)
|
||||
t.Errorf("client connection error: %+v", err)
|
||||
return
|
||||
}
|
||||
defer etcdClient.Destroy()
|
||||
@@ -208,15 +219,15 @@ func TestFs3(t *testing.T) {
|
||||
var memFs = afero.NewMemMapFs()
|
||||
|
||||
if err := util.CopyFs(etcdFs, memFs, "/", "/", false); err != nil {
|
||||
t.Errorf("CopyFs error: %+v", err)
|
||||
t.Errorf("copyfs error: %+v", err)
|
||||
return
|
||||
}
|
||||
if err := util.CopyFs(etcdFs, memFs, "/", "/", true); err != nil {
|
||||
t.Errorf("CopyFs2 error: %+v", err)
|
||||
t.Errorf("copyfs2 error: %+v", err)
|
||||
return
|
||||
}
|
||||
if err := util.CopyFs(etcdFs, memFs, "/", "/tmp/d1/", false); err != nil {
|
||||
t.Errorf("CopyFs3 error: %+v", err)
|
||||
t.Errorf("copyfs3 error: %+v", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -227,3 +238,180 @@ func TestFs3(t *testing.T) {
|
||||
}
|
||||
t.Logf("tree2: \n%s", tree2)
|
||||
}
|
||||
|
||||
func TestFs3(t *testing.T) {
|
||||
stopEtcd, err := runEtcd()
|
||||
if err != nil {
|
||||
t.Errorf("setup error: %+v", err)
|
||||
}
|
||||
defer stopEtcd() // ignore the error
|
||||
|
||||
etcdClient := &etcd.ClientEtcd{
|
||||
Seeds: []string{"localhost:2379"}, // endpoints
|
||||
}
|
||||
|
||||
if err := etcdClient.Connect(); err != nil {
|
||||
t.Errorf("client connection error: %+v", err)
|
||||
return
|
||||
}
|
||||
defer etcdClient.Destroy()
|
||||
|
||||
etcdFs := &etcdfs.Fs{
|
||||
Client: etcdClient.GetClient(),
|
||||
Metadata: superblock,
|
||||
DataPrefix: etcdfs.DefaultDataPrefix,
|
||||
}
|
||||
|
||||
if err := etcdFs.Mkdir("/tmp", umask); err != nil {
|
||||
t.Errorf("mkdir error: %+v", err)
|
||||
}
|
||||
if err := etcdFs.Mkdir("/tmp/foo", umask); err != nil {
|
||||
t.Errorf("mkdir2 error: %+v", err)
|
||||
}
|
||||
if err := etcdFs.Mkdir("/tmp/foo/bar", umask); err != nil {
|
||||
t.Errorf("mkdir3 error: %+v", err)
|
||||
}
|
||||
|
||||
tree, err := util.FsTree(etcdFs, "/")
|
||||
if err != nil {
|
||||
t.Errorf("tree error: %+v", err)
|
||||
return
|
||||
}
|
||||
t.Logf("tree: \n%s", tree)
|
||||
|
||||
var memFs = afero.NewMemMapFs()
|
||||
|
||||
if err := util.CopyFs(etcdFs, memFs, "/tmp/foo/bar", "/", false); err != nil {
|
||||
t.Errorf("copyfs error: %+v", err)
|
||||
return
|
||||
}
|
||||
if err := util.CopyFs(etcdFs, memFs, "/tmp/foo/bar", "/baz/", false); err != nil {
|
||||
t.Errorf("copyfs2 error: %+v", err)
|
||||
return
|
||||
}
|
||||
|
||||
tree2, err := util.FsTree(memFs, "/")
|
||||
if err != nil {
|
||||
t.Errorf("tree2 error: %+v", err)
|
||||
return
|
||||
}
|
||||
t.Logf("tree2: \n%s", tree2)
|
||||
|
||||
if _, err := memFs.Stat("/bar"); err != nil {
|
||||
t.Errorf("stat error: %+v", err)
|
||||
return
|
||||
}
|
||||
if _, err := memFs.Stat("/baz/bar"); err != nil {
|
||||
t.Errorf("stat2 error: %+v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func TestEtcdCopyFs0(t *testing.T) {
|
||||
tests := []struct {
|
||||
mkdir, cpsrc, cpdst, check string
|
||||
force bool
|
||||
}{
|
||||
{
|
||||
mkdir: "/",
|
||||
cpsrc: "/",
|
||||
cpdst: "/",
|
||||
check: "/",
|
||||
force: false,
|
||||
},
|
||||
{
|
||||
mkdir: "/",
|
||||
cpsrc: "/",
|
||||
cpdst: "/",
|
||||
check: "/",
|
||||
force: true,
|
||||
},
|
||||
{
|
||||
mkdir: "/",
|
||||
cpsrc: "/",
|
||||
cpdst: "/tmp/d1",
|
||||
check: "/tmp/d1",
|
||||
force: false,
|
||||
},
|
||||
{
|
||||
mkdir: "/tmp/foo/bar",
|
||||
cpsrc: "/tmp/foo/bar",
|
||||
cpdst: "/",
|
||||
check: "/bar",
|
||||
force: false,
|
||||
},
|
||||
{
|
||||
mkdir: "/tmp/foo/bar",
|
||||
cpsrc: "/tmp/foo/bar",
|
||||
cpdst: "/baz/",
|
||||
check: "/baz/bar",
|
||||
force: false,
|
||||
},
|
||||
{
|
||||
mkdir: "/tmp/foo/bar",
|
||||
cpsrc: "/tmp/foo",
|
||||
cpdst: "/baz/",
|
||||
check: "/baz/foo/bar",
|
||||
force: false,
|
||||
},
|
||||
{
|
||||
mkdir: "/tmp/this/is/a/really/deep/directory/to/make/sure/we/can/handle/deep/copies",
|
||||
cpsrc: "/tmp/this/is/a",
|
||||
cpdst: "/that/was/",
|
||||
check: "/that/was/a/really/deep/directory/to/make/sure/we/can/handle/deep/copies",
|
||||
force: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
stopEtcd, err := runEtcd()
|
||||
if err != nil {
|
||||
t.Errorf("setup error: %+v", err)
|
||||
return
|
||||
}
|
||||
defer stopEtcd() // ignore the error
|
||||
|
||||
etcdClient := &etcd.ClientEtcd{
|
||||
Seeds: []string{"localhost:2379"}, // endpoints
|
||||
}
|
||||
|
||||
if err := etcdClient.Connect(); err != nil {
|
||||
t.Errorf("client connection error: %+v", err)
|
||||
return
|
||||
}
|
||||
defer etcdClient.Destroy()
|
||||
|
||||
etcdFs := &etcdfs.Fs{
|
||||
Client: etcdClient.GetClient(),
|
||||
Metadata: superblock,
|
||||
DataPrefix: etcdfs.DefaultDataPrefix,
|
||||
}
|
||||
|
||||
if err := etcdFs.MkdirAll(tt.mkdir, umask); err != nil {
|
||||
t.Errorf("mkdir error: %+v", err)
|
||||
return
|
||||
}
|
||||
tree, err := util.FsTree(etcdFs, "/")
|
||||
if err != nil {
|
||||
t.Errorf("tree error: %+v", err)
|
||||
return
|
||||
}
|
||||
t.Logf("tree: \n%s", tree)
|
||||
|
||||
var memFs = afero.NewMemMapFs()
|
||||
if err := util.CopyFs(etcdFs, memFs, tt.cpsrc, tt.cpdst, tt.force); err != nil {
|
||||
t.Errorf("copyfs error: %+v", err)
|
||||
return
|
||||
}
|
||||
tree2, err := util.FsTree(memFs, "/")
|
||||
if err != nil {
|
||||
t.Errorf("tree2 error: %+v", err)
|
||||
return
|
||||
}
|
||||
t.Logf("tree2: \n%s", tree2)
|
||||
if _, err := memFs.Stat(tt.check); err != nil {
|
||||
t.Errorf("stat error: %+v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,7 +28,8 @@ import (
|
||||
// A successful call returns err == nil, not err == EOF. Because ReadAll is
|
||||
// defined to read from src until EOF, it does not treat an EOF from Read
|
||||
// as an error to be reported.
|
||||
//func ReadAll(r io.Reader) ([]byte, error) {
|
||||
//func (obj *Fs) ReadAll(r io.Reader) ([]byte, error) {
|
||||
// // NOTE: doesn't need Fs, same as ioutil.ReadAll package
|
||||
// return afero.ReadAll(r)
|
||||
//}
|
||||
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
resource "file" "file1" {
|
||||
path = "/tmp/mgmt-hello-world"
|
||||
content = "hello, world"
|
||||
state = "exists"
|
||||
depends_on = ["noop.noop1", "exec.sleep"]
|
||||
}
|
||||
|
||||
resource "noop" "noop1" {
|
||||
test = "nil"
|
||||
}
|
||||
|
||||
resource "exec" "sleep" {
|
||||
cmd = "sleep 10s"
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
resource "exec" "exec1" {
|
||||
cmd = "cat /tmp/mgmt-hello-world"
|
||||
state = "present"
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
resource "file" "file1" {
|
||||
path = "/tmp/mgmt-hello-world"
|
||||
content = "${exec.sleep.Output}"
|
||||
state = "exists"
|
||||
}
|
||||
|
||||
resource "exec" "sleep" {
|
||||
cmd = "echo hello"
|
||||
}
|
||||
@@ -1,4 +1,6 @@
|
||||
# it was a lovely surprise to me, when i realized that mgmt had the answer!
|
||||
import "fmt"
|
||||
import "example"
|
||||
print "answer" {
|
||||
msg => printf("the answer to life, the universe, and everything is: %d", answer()),
|
||||
msg => fmt.printf("the answer to life, the universe, and everything is: %d", example.answer()),
|
||||
}
|
||||
|
||||
30
examples/lang/autoedges1.mcl
Normal file
30
examples/lang/autoedges1.mcl
Normal file
@@ -0,0 +1,30 @@
|
||||
pkg "drbd-utils" {
|
||||
state => "installed",
|
||||
|
||||
Meta:autoedge => true,
|
||||
Meta:noop => true,
|
||||
}
|
||||
|
||||
file "/etc/drbd.conf" {
|
||||
content => "this is an mgmt test",
|
||||
state => "exists",
|
||||
|
||||
Meta:autoedge => true,
|
||||
Meta:noop => true,
|
||||
}
|
||||
|
||||
file "/etc/drbd.d/" {
|
||||
source => "/dev/null",
|
||||
state => "exists",
|
||||
|
||||
Meta:autoedge => true,
|
||||
Meta:noop => true,
|
||||
}
|
||||
|
||||
# note that the autoedges between the files and the svc don't exist yet :(
|
||||
svc "drbd" {
|
||||
state => "stopped",
|
||||
|
||||
Meta:autoedge => true,
|
||||
Meta:noop => true,
|
||||
}
|
||||
17
examples/lang/autogroup1.mcl
Normal file
17
examples/lang/autogroup1.mcl
Normal file
@@ -0,0 +1,17 @@
|
||||
pkg "powertop" {
|
||||
state => "installed",
|
||||
|
||||
Meta:autogroup => true,
|
||||
}
|
||||
|
||||
pkg "sl" {
|
||||
state => "installed",
|
||||
|
||||
Meta:autogroup => true,
|
||||
}
|
||||
|
||||
pkg "cowsay" {
|
||||
state => "installed",
|
||||
|
||||
Meta:autogroup => true,
|
||||
}
|
||||
22
examples/lang/class-include.mcl
Normal file
22
examples/lang/class-include.mcl
Normal file
@@ -0,0 +1,22 @@
|
||||
import "fmt"
|
||||
class foo {
|
||||
print "foo1" {
|
||||
msg => "inside foo",
|
||||
|
||||
Meta:autogroup => false,
|
||||
}
|
||||
}
|
||||
|
||||
class bar($a, $b) { # a parameterized class
|
||||
print "bar-"+ $a {
|
||||
msg => fmt.printf("inside bar: %s", $b),
|
||||
|
||||
Meta:autogroup => false,
|
||||
}
|
||||
}
|
||||
|
||||
include foo
|
||||
include foo # duplicate
|
||||
include bar("b1", "hello")
|
||||
include bar("b2", "world")
|
||||
include bar("b2", "world") # duplicate
|
||||
@@ -1,3 +1,6 @@
|
||||
import "fmt"
|
||||
import "sys"
|
||||
|
||||
$set = ["a", "b", "c", "d",]
|
||||
|
||||
$c1 = "x1" in ["x1", "x2", "x3",]
|
||||
@@ -5,18 +8,18 @@ $c2 = 42 in [4, 13, 42,]
|
||||
$c3 = "x" in $set
|
||||
$c4 = "b" in $set
|
||||
|
||||
$s = printf("1: %t, 2: %t, 3: %t, 4: %t\n", $c1, $c2, $c3, $c4)
|
||||
$s = fmt.printf("1: %t, 2: %t, 3: %t, 4: %t\n", $c1, $c2, $c3, $c4)
|
||||
|
||||
file "/tmp/mgmt/contains" {
|
||||
content => $s,
|
||||
}
|
||||
|
||||
$x = if hostname() in ["h1", "h3",] {
|
||||
printf("i (%s) am one of the chosen few!\n", hostname())
|
||||
$x = if sys.hostname() in ["h1", "h3",] {
|
||||
fmt.printf("i (%s) am one of the chosen few!\n", sys.hostname())
|
||||
} else {
|
||||
printf("i (%s) was not chosen :(\n", hostname())
|
||||
fmt.printf("i (%s) was not chosen :(\n", sys.hostname())
|
||||
}
|
||||
|
||||
file "/tmp/mgmt/hello-${hostname()}" {
|
||||
file "/tmp/mgmt/hello-${sys.hostname()}" {
|
||||
content => $x,
|
||||
}
|
||||
|
||||
9
examples/lang/cron0.mcl
Normal file
9
examples/lang/cron0.mcl
Normal file
@@ -0,0 +1,9 @@
|
||||
cron "purpleidea-oneshot" {
|
||||
session => true,
|
||||
trigger => "OnBootSec",
|
||||
time => "60",
|
||||
}
|
||||
|
||||
svc "purpleidea-oneshot" {
|
||||
session => true,
|
||||
}
|
||||
3
examples/lang/cron1.mcl
Normal file
3
examples/lang/cron1.mcl
Normal file
@@ -0,0 +1,3 @@
|
||||
cron "purpleidea-oneshot" {
|
||||
state => "absent",
|
||||
}
|
||||
8
examples/lang/cron2.mcl
Normal file
8
examples/lang/cron2.mcl
Normal file
@@ -0,0 +1,8 @@
|
||||
cron "purpleidea-oneshot" {
|
||||
trigger => "OnUnitActiveSec",
|
||||
time => "2minutes",
|
||||
}
|
||||
|
||||
svc "purpleidea-oneshot" {}
|
||||
|
||||
file "/etc/systemd/system/purpleidea-oneshot.service" {}
|
||||
13
examples/lang/cron3.mcl
Normal file
13
examples/lang/cron3.mcl
Normal file
@@ -0,0 +1,13 @@
|
||||
$home = getenv("HOME")
|
||||
|
||||
cron "purpleidea-oneshot" {
|
||||
session => true,
|
||||
trigger => "OnCalendar",
|
||||
time => "*:*:0",
|
||||
}
|
||||
|
||||
svc "purpleidea-oneshot" {
|
||||
session => true,
|
||||
}
|
||||
|
||||
file printf("%s/.config/systemd/user/purpleidea-oneshot.service", $home) {}
|
||||
17
examples/lang/cron4.mcl
Normal file
17
examples/lang/cron4.mcl
Normal file
@@ -0,0 +1,17 @@
|
||||
$home = getenv("HOME")
|
||||
|
||||
cron "purpleidea-oneshot" {
|
||||
state => "absent",
|
||||
session => true,
|
||||
trigger => "OnCalendar",
|
||||
time => "*:*:0",
|
||||
}
|
||||
|
||||
svc "purpleidea-oneshot" {
|
||||
state => "stopped",
|
||||
session => true,
|
||||
}
|
||||
|
||||
file printf("%s/.config/systemd/user/purpleidea-oneshot.service", $home) {
|
||||
state => "absent",
|
||||
}
|
||||
@@ -1,4 +1,6 @@
|
||||
$d = datetime()
|
||||
import "datetime"
|
||||
|
||||
$d = datetime.now()
|
||||
file "/tmp/mgmt/datetime" {
|
||||
content => template("Hello! It is now: {{ datetime_print . }}\n", $d),
|
||||
}
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
$secplusone = datetime() + $ayear
|
||||
import "datetime"
|
||||
import "sys"
|
||||
|
||||
$secplusone = datetime.now() + $ayear
|
||||
|
||||
# note the order of the assignment (year can come later in the code)
|
||||
$ayear = 60 * 60 * 24 * 365 # is a year in seconds (31536000)
|
||||
|
||||
$tmplvalues = struct{year => $secplusone, load => $theload,}
|
||||
|
||||
$theload = structlookup(load(), "x1")
|
||||
$theload = structlookup(sys.load(), "x1")
|
||||
|
||||
if 5 > 3 {
|
||||
file "/tmp/mgmt/datetime" {
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
$secplusone = datetime() + $ayear
|
||||
import "datetime"
|
||||
import "sys"
|
||||
import "example"
|
||||
|
||||
$secplusone = datetime.now() + $ayear
|
||||
|
||||
# note the order of the assignment (year can come later in the code)
|
||||
$ayear = 60 * 60 * 24 * 365 # is a year in seconds (31536000)
|
||||
|
||||
$tmplvalues = struct{year => $secplusone, load => $theload, vumeter => $vumeter,}
|
||||
|
||||
$theload = structlookup(load(), "x1")
|
||||
$theload = structlookup(sys.load(), "x1")
|
||||
|
||||
$vumeter = vumeter("====", 10, 0.9)
|
||||
$vumeter = example.vumeter("====", 10, 0.9)
|
||||
|
||||
file "/tmp/mgmt/datetime" {
|
||||
content => template("Now + 1 year is: {{ .year }} seconds, aka: {{ datetime_print .year }}\n\nload average: {{ .load }}\n\nvu: {{ .vumeter }}\n", $tmplvalues),
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
docker:container "mgmt-nginx" {
|
||||
state => "running",
|
||||
image => "nginx",
|
||||
cmd => ["nginx", "-g", "daemon off;",],
|
||||
ports => {"tcp" => {80 => 8080,},},
|
||||
state => "running",
|
||||
image => "nginx",
|
||||
cmd => ["nginx", "-g", "daemon off;",],
|
||||
ports => {"tcp" => {80 => 8080,},},
|
||||
}
|
||||
|
||||
8
examples/lang/duplicate-error.mcl
Normal file
8
examples/lang/duplicate-error.mcl
Normal file
@@ -0,0 +1,8 @@
|
||||
# this combination should error
|
||||
pkg "cowsay" {
|
||||
state => "uninstalled",
|
||||
}
|
||||
|
||||
pkg "cowsay" {
|
||||
state => "installed",
|
||||
}
|
||||
7
examples/lang/duplicate.mcl
Normal file
7
examples/lang/duplicate.mcl
Normal file
@@ -0,0 +1,7 @@
|
||||
pkg "cowsay" {
|
||||
state => "newest",
|
||||
}
|
||||
|
||||
pkg "cowsay" {
|
||||
state => "installed",
|
||||
}
|
||||
8
examples/lang/env-bad.mcl
Normal file
8
examples/lang/env-bad.mcl
Normal file
@@ -0,0 +1,8 @@
|
||||
import "fmt"
|
||||
import "sys"
|
||||
|
||||
$x = sys.getenv("TEST", "321")
|
||||
|
||||
print "print1" {
|
||||
msg => fmt.printf("TEST is: %s", $x),
|
||||
}
|
||||
@@ -1,20 +1,23 @@
|
||||
# read and print environment variable
|
||||
# env TEST=123 EMPTY= ./mgmt run --tmp-prefix --lang=examples/lang/env0.mcl --converged-timeout=5
|
||||
# env TEST=123 EMPTY= ./mgmt run --tmp-prefix --converged-timeout=5 lang --lang=examples/lang/env0.mcl
|
||||
|
||||
$x = getenv("TEST", "321")
|
||||
import "fmt"
|
||||
import "sys"
|
||||
|
||||
$x = sys.getenv("TEST", "321")
|
||||
|
||||
print "print1" {
|
||||
msg => printf("the value of the environment variable TEST is: %s", $x),
|
||||
msg => fmt.printf("the value of the environment variable TEST is: %s", $x),
|
||||
}
|
||||
|
||||
$y = getenv("DOESNOTEXIT", "321")
|
||||
$y = sys.getenv("DOESNOTEXIT", "321")
|
||||
|
||||
print "print2" {
|
||||
msg => printf("environment variable DOESNOTEXIT does not exist, defaulting to: %s", $y),
|
||||
msg => fmt.printf("environment variable DOESNOTEXIT does not exist, defaulting to: %s", $y),
|
||||
}
|
||||
|
||||
$z = getenv("EMPTY", "456")
|
||||
$z = sys.getenv("EMPTY", "456")
|
||||
|
||||
print "print3" {
|
||||
msg => printf("same goes for epmty variables like EMPTY: %s", $z),
|
||||
msg => fmt.printf("same goes for epmty variables like EMPTY: %s", $z),
|
||||
}
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
$env = env()
|
||||
import "fmt"
|
||||
import "sys"
|
||||
|
||||
$env = sys.env()
|
||||
$m = maplookup($env, "GOPATH", "")
|
||||
|
||||
print "print0" {
|
||||
msg => if hasenv("GOPATH") {
|
||||
printf("GOPATH is: %s", $m)
|
||||
msg => if sys.hasenv("GOPATH") {
|
||||
fmt.printf("GOPATH is: %s", $m)
|
||||
} else {
|
||||
"GOPATH is missing!"
|
||||
},
|
||||
|
||||
@@ -1,13 +1,16 @@
|
||||
# run this example with these commands
|
||||
# watch -n 0.1 'tail *' # run this in /tmp/mgmt/
|
||||
# time ./mgmt run --lang examples/lang/exchange0.mcl --hostname h1 --ideal-cluster-size 1 --tmp-prefix --no-pgp
|
||||
# time ./mgmt run --lang examples/lang/exchange0.mcl --hostname h2 --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2381 --server-urls http://127.0.0.1:2382 --tmp-prefix --no-pgp
|
||||
# time ./mgmt run --lang examples/lang/exchange0.mcl --hostname h3 --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2383 --server-urls http://127.0.0.1:2384 --tmp-prefix --no-pgp
|
||||
# time ./mgmt run --lang examples/lang/exchange0.mcl --hostname h4 --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2385 --server-urls http://127.0.0.1:2386 --tmp-prefix --no-pgp
|
||||
# time ./mgmt run --hostname h1 --ideal-cluster-size 1 --tmp-prefix --no-pgp lang --lang examples/lang/exchange0.mcl
|
||||
# time ./mgmt run --hostname h2 --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2381 --server-urls http://127.0.0.1:2382 --tmp-prefix --no-pgp lang --lang examples/lang/exchange0.mcl
|
||||
# time ./mgmt run --hostname h3 --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2383 --server-urls http://127.0.0.1:2384 --tmp-prefix --no-pgp lang --lang examples/lang/exchange0.mcl
|
||||
# time ./mgmt run --hostname h4 --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2385 --server-urls http://127.0.0.1:2386 --tmp-prefix --no-pgp lang --lang examples/lang/exchange0.mcl
|
||||
|
||||
import "sys"
|
||||
import "world"
|
||||
|
||||
$rand = random1(8)
|
||||
$exchanged = exchange("keyns", $rand)
|
||||
$exchanged = world.exchange("keyns", $rand)
|
||||
|
||||
file "/tmp/mgmt/exchange-${hostname()}" {
|
||||
file "/tmp/mgmt/exchange-${sys.hostname()}" {
|
||||
content => template("Found: {{ . }}\n", $exchanged),
|
||||
}
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
$dt = datetime()
|
||||
import "datetime"
|
||||
|
||||
$dt = datetime.now()
|
||||
|
||||
$hystvalues = {"ix0" => $dt, "ix1" => $dt{1}, "ix2" => $dt{2}, "ix3" => $dt{3},}
|
||||
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
file "/tmp/mgmt/${hostname()}" {
|
||||
content => "hello from ${hostname()}!\n",
|
||||
import "sys"
|
||||
|
||||
file "/tmp/mgmt/${sys.hostname()}" {
|
||||
content => "hello from ${sys.hostname()}!\n",
|
||||
state => "exists",
|
||||
}
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
import "sys"
|
||||
|
||||
file "/tmp/mgmt/systemload" {
|
||||
content => template("load average: {{ .load }} threshold: {{ .threshold }}\n", $tmplvalues),
|
||||
}
|
||||
|
||||
$tmplvalues = struct{load => $theload, threshold => $threshold,}
|
||||
|
||||
$theload = structlookup(load(), "x1")
|
||||
$theload = structlookup(sys.load(), "x1")
|
||||
$threshold = 1.5 # change me if you like
|
||||
|
||||
# simple hysteresis implementation
|
||||
|
||||
5
examples/lang/import0.mcl
Normal file
5
examples/lang/import0.mcl
Normal file
@@ -0,0 +1,5 @@
|
||||
import "fmt"
|
||||
|
||||
test "printf" {
|
||||
anotherstr => fmt.printf("the answer is: %d", 42),
|
||||
}
|
||||
21
examples/lang/iteration1.mcl
Normal file
21
examples/lang/iteration1.mcl
Normal file
@@ -0,0 +1,21 @@
|
||||
# single resource
|
||||
print "name" {}
|
||||
|
||||
# single resource, defined by list variable
|
||||
$names = ["hey", "there",]
|
||||
print $names {
|
||||
Meta:autogroup => false,
|
||||
}
|
||||
|
||||
# multiples resources, defined by list
|
||||
print ["hello", "world",] {
|
||||
Meta:autogroup => false,
|
||||
Depend => Print[$names],
|
||||
}
|
||||
|
||||
$morenames = ["wow", "cool", "amazing",]
|
||||
print $morenames {
|
||||
Meta:autogroup => false,
|
||||
}
|
||||
|
||||
Print[$names] -> Print[$morenames]
|
||||
@@ -1,9 +1,11 @@
|
||||
import "fmt"
|
||||
|
||||
$x1 = ["a", "b", "c", "d",]
|
||||
print "print4" {
|
||||
msg => printf("length is: %d", len($x1)),
|
||||
msg => fmt.printf("length is: %d", len($x1)),
|
||||
}
|
||||
|
||||
$x2 = {"a" => 1, "b" => 2, "c" => 3,}
|
||||
print "print3" {
|
||||
msg => printf("length is: %d", len($x2)),
|
||||
msg => fmt.printf("length is: %d", len($x2)),
|
||||
}
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
$theload = load()
|
||||
import "fmt"
|
||||
import "sys"
|
||||
|
||||
$theload = sys.load()
|
||||
|
||||
$x1 = structlookup($theload, "x1")
|
||||
$x5 = structlookup($theload, "x5")
|
||||
$x15 = structlookup($theload, "x15")
|
||||
|
||||
print "print1" {
|
||||
msg => printf("load average: %f, %f, %f", $x1, $x5, $x15),
|
||||
msg => fmt.printf("load average: %f, %f, %f", $x1, $x5, $x15),
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user