diff --git a/lib/main.go b/lib/main.go index 9e79923e..ede395c1 100644 --- a/lib/main.go +++ b/lib/main.go @@ -425,8 +425,8 @@ func (obj *Main) Run() error { // we need the vertices to be paused to work on them, so // run graph vertex LOCK... if !first { // TODO: we can flatten this check out I think - converger.Pause() // FIXME: add sync wait? - G.Pause(false) // sync + converger.Pause() // FIXME: add sync wait? + resources.Pause(G, false) // sync //G.UnGroup() // FIXME: implement me if needed! } @@ -437,8 +437,8 @@ func (obj *Main) Run() error { log.Printf("Main: Error creating new graph: %v", err) // unpause! if !first { - G.Start(first) // sync - converger.Start() // after G.Start() + resources.Start(G, first) // sync + converger.Start() // after G.Start() } continue } @@ -470,21 +470,21 @@ func (obj *Main) Run() error { // changes to the resources so our efficient GraphSync // will be able to re-use and cmp to the old graph. log.Printf("Main: GraphSync...") - newFullGraph, err := newGraph.GraphSync(oldGraph) + newFullGraph, err := resources.GraphSync(newGraph, oldGraph) if err != nil { log.Printf("Main: Error running graph sync: %v", err) // unpause! if !first { - G.Start(first) // sync - converger.Start() // after G.Start() + resources.Start(G, first) // sync + converger.Start() // after Start(G) } continue } oldGraph = newFullGraph // save old graph G = oldGraph.Copy() // copy to active graph - resources.AutoEdges(G) // add autoedges; modifies the graph - G.AutoGroup() // run autogroup; modifies the graph + resources.AutoEdges(G) // add autoedges; modifies the graph + resources.AutoGroup(G, &resources.NonReachabilityGrouper{}) // run autogroup; modifies the graph // TODO: do we want to do a transitive reduction? // FIXME: run a type checker that verifies all the send->recv relationships @@ -493,13 +493,13 @@ func (obj *Main) Run() error { if err := prom.UpdatePgraphStartTime(); err != nil { log.Printf("Main: Prometheus.UpdatePgraphStartTime() errored: %v", err) } - // G.Start(...) needs to be synchronous or wait, + // Start(G) needs to be synchronous or wait, // because if half of the nodes are started and // some are not ready yet and the EtcdWatch - // loops, we'll cause G.Pause(...) before we + // loops, we'll cause Pause(G) before we // even got going, thus causing nil pointer errors - G.Start(first) // sync - converger.Start() // after G.Start() + resources.Start(G, first) // sync + converger.Start() // after Start(G) log.Printf("Main: Graph: %v", G) // show graph if obj.Graphviz != "" { @@ -590,7 +590,7 @@ func (obj *Main) Run() error { // tell inner main loop to exit close(exitchan) - G.Exit() // tells all the children to exit, and waits for them to do so + resources.Exit(G) // tells all the children to exit, and waits for them to do so // cleanup etcd main loop last so it can process everything first if err := EmbdEtcd.Destroy(); err != nil { // shutdown and cleanup etcd @@ -619,7 +619,7 @@ func (obj *Main) Run() error { func graphMetas(g *pgraph.Graph) []*resources.MetaParams { metas := []*resources.MetaParams{} for _, v := range g.Vertices() { // loop through the vertices (resources) - res := v.Res // resource + res := resources.VtoR(v) // resource meta := res.Meta() metas = append(metas, meta) } @@ -632,6 +632,6 @@ func associateData(g *pgraph.Graph, data *resources.Data) { g.SetValue("prometheus", data.Prometheus) for _, v := range g.Vertices() { - *v.Res.Data() = *data + *resources.VtoR(v).Data() = *data } } diff --git a/pgraph/autogroup_test.go b/pgraph/autogroup_test.go deleted file mode 100644 index 7c6d6dd4..00000000 --- a/pgraph/autogroup_test.go +++ /dev/null @@ -1,486 +0,0 @@ -// Mgmt -// Copyright (C) 2013-2017+ James Shubin and the project contributors -// Written by James Shubin and the project contributors -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package pgraph - -import ( - "testing" -) - -// all of the following test cases are laid out with the following semantics: -// * vertices which start with the same single letter are considered "like" -// * "like" elements should be merged -// * vertices can have any integer after their single letter "family" type -// * grouped vertices should have a name with a comma separated list of names -// * edges follow the same conventions about grouping - -// empty graph -func TestPgraphGrouping1(t *testing.T) { - g1, _ := NewGraph("g1") // original graph - g2, _ := NewGraph("g2") // expected result - runGraphCmp(t, g1, g2) -} - -// single vertex -func TestPgraphGrouping2(t *testing.T) { - g1, _ := NewGraph("g1") // original graph - { // grouping to limit variable scope - a1 := NewVertex(NewNoopResTest("a1")) - g1.AddVertex(a1) - } - g2, _ := NewGraph("g2") // expected result - { - a1 := NewVertex(NewNoopResTest("a1")) - g2.AddVertex(a1) - } - runGraphCmp(t, g1, g2) -} - -// two vertices -func TestPgraphGrouping3(t *testing.T) { - g1, _ := NewGraph("g1") // original graph - { - a1 := NewVertex(NewNoopResTest("a1")) - b1 := NewVertex(NewNoopResTest("b1")) - g1.AddVertex(a1, b1) - } - g2, _ := NewGraph("g2") // expected result - { - a1 := NewVertex(NewNoopResTest("a1")) - b1 := NewVertex(NewNoopResTest("b1")) - g2.AddVertex(a1, b1) - } - runGraphCmp(t, g1, g2) -} - -// two vertices merge -func TestPgraphGrouping4(t *testing.T) { - g1, _ := NewGraph("g1") // original graph - { - a1 := NewVertex(NewNoopResTest("a1")) - a2 := NewVertex(NewNoopResTest("a2")) - g1.AddVertex(a1, a2) - } - g2, _ := NewGraph("g2") // expected result - { - a := NewVertex(NewNoopResTest("a1,a2")) - g2.AddVertex(a) - } - runGraphCmp(t, g1, g2) -} - -// three vertices merge -func TestPgraphGrouping5(t *testing.T) { - g1, _ := NewGraph("g1") // original graph - { - a1 := NewVertex(NewNoopResTest("a1")) - a2 := NewVertex(NewNoopResTest("a2")) - a3 := NewVertex(NewNoopResTest("a3")) - g1.AddVertex(a1, a2, a3) - } - g2, _ := NewGraph("g2") // expected result - { - a := NewVertex(NewNoopResTest("a1,a2,a3")) - g2.AddVertex(a) - } - runGraphCmp(t, g1, g2) -} - -// three vertices, two merge -func TestPgraphGrouping6(t *testing.T) { - g1, _ := NewGraph("g1") // original graph - { - a1 := NewVertex(NewNoopResTest("a1")) - a2 := NewVertex(NewNoopResTest("a2")) - b1 := NewVertex(NewNoopResTest("b1")) - g1.AddVertex(a1, a2, b1) - } - g2, _ := NewGraph("g2") // expected result - { - a := NewVertex(NewNoopResTest("a1,a2")) - b1 := NewVertex(NewNoopResTest("b1")) - g2.AddVertex(a, b1) - } - runGraphCmp(t, g1, g2) -} - -// four vertices, three merge -func TestPgraphGrouping7(t *testing.T) { - g1, _ := NewGraph("g1") // original graph - { - a1 := NewVertex(NewNoopResTest("a1")) - a2 := NewVertex(NewNoopResTest("a2")) - a3 := NewVertex(NewNoopResTest("a3")) - b1 := NewVertex(NewNoopResTest("b1")) - g1.AddVertex(a1, a2, a3, b1) - } - g2, _ := NewGraph("g2") // expected result - { - a := NewVertex(NewNoopResTest("a1,a2,a3")) - b1 := NewVertex(NewNoopResTest("b1")) - g2.AddVertex(a, b1) - } - runGraphCmp(t, g1, g2) -} - -// four vertices, two&two merge -func TestPgraphGrouping8(t *testing.T) { - g1, _ := NewGraph("g1") // original graph - { - a1 := NewVertex(NewNoopResTest("a1")) - a2 := NewVertex(NewNoopResTest("a2")) - b1 := NewVertex(NewNoopResTest("b1")) - b2 := NewVertex(NewNoopResTest("b2")) - g1.AddVertex(a1, a2, b1, b2) - } - g2, _ := NewGraph("g2") // expected result - { - a := NewVertex(NewNoopResTest("a1,a2")) - b := NewVertex(NewNoopResTest("b1,b2")) - g2.AddVertex(a, b) - } - runGraphCmp(t, g1, g2) -} - -// five vertices, two&three merge -func TestPgraphGrouping9(t *testing.T) { - g1, _ := NewGraph("g1") // original graph - { - a1 := NewVertex(NewNoopResTest("a1")) - a2 := NewVertex(NewNoopResTest("a2")) - b1 := NewVertex(NewNoopResTest("b1")) - b2 := NewVertex(NewNoopResTest("b2")) - b3 := NewVertex(NewNoopResTest("b3")) - g1.AddVertex(a1, a2, b1, b2, b3) - } - g2, _ := NewGraph("g2") // expected result - { - a := NewVertex(NewNoopResTest("a1,a2")) - b := NewVertex(NewNoopResTest("b1,b2,b3")) - g2.AddVertex(a, b) - } - runGraphCmp(t, g1, g2) -} - -// three unique vertices -func TestPgraphGrouping10(t *testing.T) { - g1, _ := NewGraph("g1") // original graph - { - a1 := NewVertex(NewNoopResTest("a1")) - b1 := NewVertex(NewNoopResTest("b1")) - c1 := NewVertex(NewNoopResTest("c1")) - g1.AddVertex(a1, b1, c1) - } - g2, _ := NewGraph("g2") // expected result - { - a1 := NewVertex(NewNoopResTest("a1")) - b1 := NewVertex(NewNoopResTest("b1")) - c1 := NewVertex(NewNoopResTest("c1")) - g2.AddVertex(a1, b1, c1) - } - runGraphCmp(t, g1, g2) -} - -// three unique vertices, two merge -func TestPgraphGrouping11(t *testing.T) { - g1, _ := NewGraph("g1") // original graph - { - a1 := NewVertex(NewNoopResTest("a1")) - b1 := NewVertex(NewNoopResTest("b1")) - b2 := NewVertex(NewNoopResTest("b2")) - c1 := NewVertex(NewNoopResTest("c1")) - g1.AddVertex(a1, b1, b2, c1) - } - g2, _ := NewGraph("g2") // expected result - { - a1 := NewVertex(NewNoopResTest("a1")) - b := NewVertex(NewNoopResTest("b1,b2")) - c1 := NewVertex(NewNoopResTest("c1")) - g2.AddVertex(a1, b, c1) - } - runGraphCmp(t, g1, g2) -} - -// simple merge 1 -// a1 a2 a1,a2 -// \ / >>> | (arrows point downwards) -// b b -func TestPgraphGrouping12(t *testing.T) { - g1, _ := NewGraph("g1") // original graph - { - a1 := NewVertex(NewNoopResTest("a1")) - a2 := NewVertex(NewNoopResTest("a2")) - b1 := NewVertex(NewNoopResTest("b1")) - e1 := NewEdge("e1") - e2 := NewEdge("e2") - g1.AddEdge(a1, b1, e1) - g1.AddEdge(a2, b1, e2) - } - g2, _ := NewGraph("g2") // expected result - { - a := NewVertex(NewNoopResTest("a1,a2")) - b1 := NewVertex(NewNoopResTest("b1")) - e := NewEdge("e1,e2") - g2.AddEdge(a, b1, e) - } - runGraphCmp(t, g1, g2) -} - -// simple merge 2 -// b b -// / \ >>> | (arrows point downwards) -// a1 a2 a1,a2 -func TestPgraphGrouping13(t *testing.T) { - g1, _ := NewGraph("g1") // original graph - { - a1 := NewVertex(NewNoopResTest("a1")) - a2 := NewVertex(NewNoopResTest("a2")) - b1 := NewVertex(NewNoopResTest("b1")) - e1 := NewEdge("e1") - e2 := NewEdge("e2") - g1.AddEdge(b1, a1, e1) - g1.AddEdge(b1, a2, e2) - } - g2, _ := NewGraph("g2") // expected result - { - a := NewVertex(NewNoopResTest("a1,a2")) - b1 := NewVertex(NewNoopResTest("b1")) - e := NewEdge("e1,e2") - g2.AddEdge(b1, a, e) - } - runGraphCmp(t, g1, g2) -} - -// triple merge -// a1 a2 a3 a1,a2,a3 -// \ | / >>> | (arrows point downwards) -// b b -func TestPgraphGrouping14(t *testing.T) { - g1, _ := NewGraph("g1") // original graph - { - a1 := NewVertex(NewNoopResTest("a1")) - a2 := NewVertex(NewNoopResTest("a2")) - a3 := NewVertex(NewNoopResTest("a3")) - b1 := NewVertex(NewNoopResTest("b1")) - e1 := NewEdge("e1") - e2 := NewEdge("e2") - e3 := NewEdge("e3") - g1.AddEdge(a1, b1, e1) - g1.AddEdge(a2, b1, e2) - g1.AddEdge(a3, b1, e3) - } - g2, _ := NewGraph("g2") // expected result - { - a := NewVertex(NewNoopResTest("a1,a2,a3")) - b1 := NewVertex(NewNoopResTest("b1")) - e := NewEdge("e1,e2,e3") - g2.AddEdge(a, b1, e) - } - runGraphCmp(t, g1, g2) -} - -// chain merge -// a1 a1 -// / \ | -// b1 b2 >>> b1,b2 (arrows point downwards) -// \ / | -// c1 c1 -func TestPgraphGrouping15(t *testing.T) { - g1, _ := NewGraph("g1") // original graph - { - a1 := NewVertex(NewNoopResTest("a1")) - b1 := NewVertex(NewNoopResTest("b1")) - b2 := NewVertex(NewNoopResTest("b2")) - c1 := NewVertex(NewNoopResTest("c1")) - e1 := NewEdge("e1") - e2 := NewEdge("e2") - e3 := NewEdge("e3") - e4 := NewEdge("e4") - g1.AddEdge(a1, b1, e1) - g1.AddEdge(a1, b2, e2) - g1.AddEdge(b1, c1, e3) - g1.AddEdge(b2, c1, e4) - } - g2, _ := NewGraph("g2") // expected result - { - a1 := NewVertex(NewNoopResTest("a1")) - b := NewVertex(NewNoopResTest("b1,b2")) - c1 := NewVertex(NewNoopResTest("c1")) - e1 := NewEdge("e1,e2") - e2 := NewEdge("e3,e4") - g2.AddEdge(a1, b, e1) - g2.AddEdge(b, c1, e2) - } - runGraphCmp(t, g1, g2) -} - -// re-attach 1 (outer) -// technically the second possibility is valid too, depending on which order we -// merge edges in, and if we don't filter out any unnecessary edges afterwards! -// a1 a2 a1,a2 a1,a2 -// | / | | \ -// b1 / >>> b1 OR b1 / (arrows point downwards) -// | / | | / -// c1 c1 c1 -func TestPgraphGrouping16(t *testing.T) { - g1, _ := NewGraph("g1") // original graph - { - a1 := NewVertex(NewNoopResTest("a1")) - a2 := NewVertex(NewNoopResTest("a2")) - b1 := NewVertex(NewNoopResTest("b1")) - c1 := NewVertex(NewNoopResTest("c1")) - e1 := NewEdge("e1") - e2 := NewEdge("e2") - e3 := NewEdge("e3") - g1.AddEdge(a1, b1, e1) - g1.AddEdge(b1, c1, e2) - g1.AddEdge(a2, c1, e3) - } - g2, _ := NewGraph("g2") // expected result - { - a := NewVertex(NewNoopResTest("a1,a2")) - b1 := NewVertex(NewNoopResTest("b1")) - c1 := NewVertex(NewNoopResTest("c1")) - e1 := NewEdge("e1,e3") - e2 := NewEdge("e2,e3") // e3 gets "merged through" to BOTH edges! - g2.AddEdge(a, b1, e1) - g2.AddEdge(b1, c1, e2) - } - runGraphCmp(t, g1, g2) -} - -// re-attach 2 (inner) -// a1 b2 a1 -// | / | -// b1 / >>> b1,b2 (arrows point downwards) -// | / | -// c1 c1 -func TestPgraphGrouping17(t *testing.T) { - g1, _ := NewGraph("g1") // original graph - { - a1 := NewVertex(NewNoopResTest("a1")) - b1 := NewVertex(NewNoopResTest("b1")) - b2 := NewVertex(NewNoopResTest("b2")) - c1 := NewVertex(NewNoopResTest("c1")) - e1 := NewEdge("e1") - e2 := NewEdge("e2") - e3 := NewEdge("e3") - g1.AddEdge(a1, b1, e1) - g1.AddEdge(b1, c1, e2) - g1.AddEdge(b2, c1, e3) - } - g2, _ := NewGraph("g2") // expected result - { - a1 := NewVertex(NewNoopResTest("a1")) - b := NewVertex(NewNoopResTest("b1,b2")) - c1 := NewVertex(NewNoopResTest("c1")) - e1 := NewEdge("e1") - e2 := NewEdge("e2,e3") - g2.AddEdge(a1, b, e1) - g2.AddEdge(b, c1, e2) - } - runGraphCmp(t, g1, g2) -} - -// re-attach 3 (double) -// similar to "re-attach 1", technically there is a second possibility for this -// a2 a1 b2 a1,a2 -// \ | / | -// \ b1 / >>> b1,b2 (arrows point downwards) -// \ | / | -// c1 c1 -func TestPgraphGrouping18(t *testing.T) { - g1, _ := NewGraph("g1") // original graph - { - a1 := NewVertex(NewNoopResTest("a1")) - a2 := NewVertex(NewNoopResTest("a2")) - b1 := NewVertex(NewNoopResTest("b1")) - b2 := NewVertex(NewNoopResTest("b2")) - c1 := NewVertex(NewNoopResTest("c1")) - e1 := NewEdge("e1") - e2 := NewEdge("e2") - e3 := NewEdge("e3") - e4 := NewEdge("e4") - g1.AddEdge(a1, b1, e1) - g1.AddEdge(b1, c1, e2) - g1.AddEdge(a2, c1, e3) - g1.AddEdge(b2, c1, e4) - } - g2, _ := NewGraph("g2") // expected result - { - a := NewVertex(NewNoopResTest("a1,a2")) - b := NewVertex(NewNoopResTest("b1,b2")) - c1 := NewVertex(NewNoopResTest("c1")) - e1 := NewEdge("e1,e3") - e2 := NewEdge("e2,e3,e4") // e3 gets "merged through" to BOTH edges! - g2.AddEdge(a, b, e1) - g2.AddEdge(b, c1, e2) - } - runGraphCmp(t, g1, g2) -} - -// connected merge 0, (no change!) -// a1 a1 -// \ >>> \ (arrows point downwards) -// a2 a2 -func TestPgraphGroupingConnected0(t *testing.T) { - g1, _ := NewGraph("g1") // original graph - { - a1 := NewVertex(NewNoopResTest("a1")) - a2 := NewVertex(NewNoopResTest("a2")) - e1 := NewEdge("e1") - g1.AddEdge(a1, a2, e1) - } - g2, _ := NewGraph("g2") // expected result ? - { - a1 := NewVertex(NewNoopResTest("a1")) - a2 := NewVertex(NewNoopResTest("a2")) - e1 := NewEdge("e1") - g2.AddEdge(a1, a2, e1) - } - runGraphCmp(t, g1, g2) -} - -// connected merge 1, (no change!) -// a1 a1 -// \ \ -// b >>> b (arrows point downwards) -// \ \ -// a2 a2 -func TestPgraphGroupingConnected1(t *testing.T) { - g1, _ := NewGraph("g1") // original graph - { - a1 := NewVertex(NewNoopResTest("a1")) - b := NewVertex(NewNoopResTest("b")) - a2 := NewVertex(NewNoopResTest("a2")) - e1 := NewEdge("e1") - e2 := NewEdge("e2") - g1.AddEdge(a1, b, e1) - g1.AddEdge(b, a2, e2) - } - g2, _ := NewGraph("g2") // expected result ? - { - a1 := NewVertex(NewNoopResTest("a1")) - b := NewVertex(NewNoopResTest("b")) - a2 := NewVertex(NewNoopResTest("a2")) - e1 := NewEdge("e1") - e2 := NewEdge("e2") - g2.AddEdge(a1, b, e1) - g2.AddEdge(b, a2, e2) - } - runGraphCmp(t, g1, g2) -} diff --git a/pgraph/graphviz.go b/pgraph/graphviz.go index e9b97876..e1c051c2 100644 --- a/pgraph/graphviz.go +++ b/pgraph/graphviz.go @@ -45,15 +45,15 @@ func (g *Graph) Graphviz() (out string) { out += fmt.Sprintf("\tlabel=\"%s\";\n", g.GetName()) //out += "\tnode [shape=box];\n" str := "" - for i := range g.adjacency { // reverse paths - out += fmt.Sprintf("\t\"%s\" [label=\"%s[%s]\"];\n", i.GetName(), i.GetKind(), i.GetName()) - for j := range g.adjacency[i] { - k := g.adjacency[i][j] + for i := range g.Adjacency() { // reverse paths + out += fmt.Sprintf("\t\"%s\" [label=\"%s\"];\n", i, i) + for j := range g.Adjacency()[i] { + k := g.Adjacency()[i][j] // use str for clearer output ordering if k.Notify { - str += fmt.Sprintf("\t\"%s\" -> \"%s\" [label=\"%s\",style=bold];\n", i.GetName(), j.GetName(), k.Name) + str += fmt.Sprintf("\t\"%s\" -> \"%s\" [label=\"%s\",style=bold];\n", i, j, k.Name) } else { - str += fmt.Sprintf("\t\"%s\" -> \"%s\" [label=\"%s\"];\n", i.GetName(), j.GetName(), k.Name) + str += fmt.Sprintf("\t\"%s\" -> \"%s\" [label=\"%s\"];\n", i, j, k.Name) } } } diff --git a/pgraph/pgraph.go b/pgraph/pgraph.go index e803bce0..431fb523 100644 --- a/pgraph/pgraph.go +++ b/pgraph/pgraph.go @@ -21,10 +21,6 @@ package pgraph import ( "fmt" "sort" - "sync" - - "github.com/purpleidea/mgmt/event" - "github.com/purpleidea/mgmt/resources" errwrap "github.com/pkg/errors" ) @@ -38,17 +34,14 @@ import ( type Graph struct { Name string - adjacency map[*Vertex]map[*Vertex]*Edge // *Vertex -> *Vertex (edge) - kv map[string]interface{} // some values associated with the graph - - // legacy - fastPause bool // used to disable pokes for a fast pause - wg *sync.WaitGroup + adjacency map[Vertex]map[Vertex]*Edge // Vertex -> Vertex (edge) + kv map[string]interface{} // some values associated with the graph } -// Vertex is the primary vertex struct in this library. -type Vertex struct { - resources.Res // anonymous field +// Vertex is the primary vertex struct in this library. It can be anything that +// implements Stringer. The string output must be stable and unique in a graph. +type Vertex interface { + fmt.Stringer // String() string } // Edge is the primary edge struct in this library. @@ -65,12 +58,8 @@ func (g *Graph) Init() error { return fmt.Errorf("can't initialize graph with empty name") } - g.adjacency = make(map[*Vertex]map[*Vertex]*Edge) + g.adjacency = make(map[Vertex]map[Vertex]*Edge) g.kv = make(map[string]interface{}) - - // legacy - // ptr b/c: Mutex/WaitGroup must not be copied after first use - g.wg = &sync.WaitGroup{} return nil } @@ -85,11 +74,11 @@ func NewGraph(name string) (*Graph, error) { return g, nil } -// NewVertex returns a new graph vertex struct with a contained resource. -func NewVertex(r resources.Res) *Vertex { - return &Vertex{ - Res: r, - } +// NewVertex returns whatever was passed in. This is for compatibility with the +// usage of the old NewVertex method. This is considered deprecated. +// FIXME: remove me +func NewVertex(x Vertex) Vertex { + return x } // NewEdge returns a new graph edge struct. @@ -120,16 +109,12 @@ func (g *Graph) SetValue(key string, val interface{}) { g.kv[key] = val } -// Copy makes a copy of the graph struct +// Copy makes a copy of the graph struct. func (g *Graph) Copy() *Graph { newGraph := &Graph{ Name: g.Name, - adjacency: make(map[*Vertex]map[*Vertex]*Edge, len(g.adjacency)), + adjacency: make(map[Vertex]map[Vertex]*Edge, len(g.adjacency)), kv: g.kv, - - // legacy - wg: g.wg, - fastPause: g.fastPause, } for k, v := range g.adjacency { newGraph.adjacency[k] = v // copy @@ -147,17 +132,17 @@ func (g *Graph) SetName(name string) { g.Name = name } -// AddVertex uses variadic input to add all listed vertices to the graph -func (g *Graph) AddVertex(xv ...*Vertex) { +// AddVertex uses variadic input to add all listed vertices to the graph. +func (g *Graph) AddVertex(xv ...Vertex) { for _, v := range xv { if _, exists := g.adjacency[v]; !exists { - g.adjacency[v] = make(map[*Vertex]*Edge) + g.adjacency[v] = make(map[Vertex]*Edge) } } } // DeleteVertex deletes a particular vertex from the graph. -func (g *Graph) DeleteVertex(v *Vertex) { +func (g *Graph) DeleteVertex(v Vertex) { delete(g.adjacency, v) for k := range g.adjacency { delete(g.adjacency[k], v) @@ -165,7 +150,7 @@ func (g *Graph) DeleteVertex(v *Vertex) { } // AddEdge adds a directed edge to the graph from v1 to v2. -func (g *Graph) AddEdge(v1, v2 *Vertex, e *Edge) { +func (g *Graph) AddEdge(v1, v2 Vertex, e *Edge) { // NOTE: this doesn't allow more than one edge between two vertexes... g.AddVertex(v1, v2) // supports adding N vertices now // TODO: check if an edge exists to avoid overwriting it! @@ -188,7 +173,7 @@ func (g *Graph) DeleteEdge(e *Edge) { // VertexMatchFn searches for a vertex in the graph and returns the vertex if // one matches. It uses a user defined function to match. That function must // return true on match, and an error if anything goes wrong. -func (g *Graph) VertexMatchFn(fn func(*Vertex) (bool, error)) (*Vertex, error) { +func (g *Graph) VertexMatchFn(fn func(Vertex) (bool, error)) (Vertex, error) { for v := range g.adjacency { if b, err := fn(v); err != nil { return nil, errwrap.Wrapf(err, "fn in VertexMatchFn() errored") @@ -199,19 +184,8 @@ func (g *Graph) VertexMatchFn(fn func(*Vertex) (bool, error)) (*Vertex, error) { return nil, nil // nothing found } -// TODO: consider adding a mutate API. -//func (g *Graph) MutateMatch(obj resources.Res) *Vertex { -// for v := range g.adjacency { -// if err := v.Res.Mutate(obj); err == nil { -// // transmogrified! -// return v -// } -// } -// return nil -//} - // HasVertex returns if the input vertex exists in the graph. -func (g *Graph) HasVertex(v *Vertex) bool { +func (g *Graph) HasVertex(v Vertex) bool { if _, exists := g.adjacency[v]; exists { return true } @@ -234,14 +208,15 @@ func (g *Graph) NumEdges() int { // Adjacency returns the adjacency map representing this graph. This is useful // for users who which to operate on the raw data structure more efficiently. -func (g *Graph) Adjacency() map[*Vertex]map[*Vertex]*Edge { +// This works because maps are reference types so we can edit this at will. +func (g *Graph) Adjacency() map[Vertex]map[Vertex]*Edge { return g.adjacency } // Vertices returns a randomly sorted slice of all vertices in the graph. // The order is random, because the map implementation is intentionally so! -func (g *Graph) Vertices() []*Vertex { - var vertices []*Vertex +func (g *Graph) Vertices() []Vertex { + var vertices []Vertex for k := range g.adjacency { vertices = append(vertices, k) } @@ -249,9 +224,9 @@ func (g *Graph) Vertices() []*Vertex { } // VerticesChan returns a channel of all vertices in the graph. -func (g *Graph) VerticesChan() chan *Vertex { - ch := make(chan *Vertex) - go func(ch chan *Vertex) { +func (g *Graph) VerticesChan() chan Vertex { + ch := make(chan Vertex) + go func(ch chan Vertex) { for k := range g.adjacency { ch <- k } @@ -261,7 +236,7 @@ func (g *Graph) VerticesChan() chan *Vertex { } // VertexSlice is a linear list of vertices. It can be sorted. -type VertexSlice []*Vertex +type VertexSlice []Vertex func (vs VertexSlice) Len() int { return len(vs) } func (vs VertexSlice) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } @@ -269,8 +244,8 @@ func (vs VertexSlice) Less(i, j int) bool { return vs[i].String() < vs[j].String // VerticesSorted returns a sorted slice of all vertices in the graph // The order is sorted by String() to avoid the non-determinism in the map type -func (g *Graph) VerticesSorted() []*Vertex { - var vertices []*Vertex +func (g *Graph) VerticesSorted() []Vertex { + var vertices []Vertex for k := range g.adjacency { vertices = append(vertices, k) } @@ -283,17 +258,12 @@ func (g *Graph) String() string { return fmt.Sprintf("Vertices(%d), Edges(%d)", g.NumVertices(), g.NumEdges()) } -// String returns the canonical form for a vertex -func (v *Vertex) String() string { - return fmt.Sprintf("%s[%s]", v.Res.GetKind(), v.Res.GetName()) -} - // IncomingGraphVertices returns an array (slice) of all directed vertices to // vertex v (??? -> v). OKTimestamp should probably use this. -func (g *Graph) IncomingGraphVertices(v *Vertex) []*Vertex { +func (g *Graph) IncomingGraphVertices(v Vertex) []Vertex { // TODO: we might be able to implement this differently by reversing // the Adjacency graph and then looping through it again... - var s []*Vertex + var s []Vertex for k := range g.adjacency { // reverse paths for w := range g.adjacency[k] { if w == v { @@ -306,8 +276,8 @@ func (g *Graph) IncomingGraphVertices(v *Vertex) []*Vertex { // OutgoingGraphVertices returns an array (slice) of all vertices that vertex v // points to (v -> ???). Poke should probably use this. -func (g *Graph) OutgoingGraphVertices(v *Vertex) []*Vertex { - var s []*Vertex +func (g *Graph) OutgoingGraphVertices(v Vertex) []Vertex { + var s []Vertex for k := range g.adjacency[v] { // forward paths s = append(s, k) } @@ -316,15 +286,15 @@ func (g *Graph) OutgoingGraphVertices(v *Vertex) []*Vertex { // GraphVertices returns an array (slice) of all vertices that connect to vertex v. // This is the union of IncomingGraphVertices and OutgoingGraphVertices. -func (g *Graph) GraphVertices(v *Vertex) []*Vertex { - var s []*Vertex +func (g *Graph) GraphVertices(v Vertex) []Vertex { + var s []Vertex s = append(s, g.IncomingGraphVertices(v)...) s = append(s, g.OutgoingGraphVertices(v)...) return s } // IncomingGraphEdges returns all of the edges that point to vertex v (??? -> v). -func (g *Graph) IncomingGraphEdges(v *Vertex) []*Edge { +func (g *Graph) IncomingGraphEdges(v Vertex) []*Edge { var edges []*Edge for v1 := range g.adjacency { // reverse paths for v2, e := range g.adjacency[v1] { @@ -337,7 +307,7 @@ func (g *Graph) IncomingGraphEdges(v *Vertex) []*Edge { } // OutgoingGraphEdges returns all of the edges that point from vertex v (v -> ???). -func (g *Graph) OutgoingGraphEdges(v *Vertex) []*Edge { +func (g *Graph) OutgoingGraphEdges(v Vertex) []*Edge { var edges []*Edge for _, e := range g.adjacency[v] { // forward paths edges = append(edges, e) @@ -347,7 +317,7 @@ func (g *Graph) OutgoingGraphEdges(v *Vertex) []*Edge { // GraphEdges returns an array (slice) of all edges that connect to vertex v. // This is the union of IncomingGraphEdges and OutgoingGraphEdges. -func (g *Graph) GraphEdges(v *Vertex) []*Edge { +func (g *Graph) GraphEdges(v Vertex) []*Edge { var edges []*Edge edges = append(edges, g.IncomingGraphEdges(v)...) edges = append(edges, g.OutgoingGraphEdges(v)...) @@ -355,9 +325,9 @@ func (g *Graph) GraphEdges(v *Vertex) []*Edge { } // DFS returns a depth first search for the graph, starting at the input vertex. -func (g *Graph) DFS(start *Vertex) []*Vertex { - var d []*Vertex // discovered - var s []*Vertex // stack +func (g *Graph) DFS(start Vertex) []Vertex { + var d []Vertex // discovered + var s []Vertex // stack if _, exists := g.adjacency[start]; !exists { return nil // TODO: error } @@ -378,7 +348,7 @@ func (g *Graph) DFS(start *Vertex) []*Vertex { } // FilterGraph builds a new graph containing only vertices from the list. -func (g *Graph) FilterGraph(name string, vertices []*Vertex) (*Graph, error) { +func (g *Graph) FilterGraph(name string, vertices []Vertex) (*Graph, error) { newGraph := &Graph{Name: name} if err := newGraph.Init(); err != nil { return nil, errwrap.Wrapf(err, "could not run FilterGraph() properly") @@ -397,8 +367,8 @@ func (g *Graph) FilterGraph(name string, vertices []*Vertex) (*Graph, error) { // DisconnectedGraphs returns a list containing the N disconnected graphs. func (g *Graph) DisconnectedGraphs() ([]*Graph, error) { graphs := []*Graph{} - var start *Vertex - var d []*Vertex // discovered + var start Vertex + var d []Vertex // discovered c := g.NumVertices() for len(d) < c { @@ -429,8 +399,8 @@ func (g *Graph) DisconnectedGraphs() ([]*Graph, error) { } // InDegree returns the count of vertices that point to me in one big lookup map. -func (g *Graph) InDegree() map[*Vertex]int { - result := make(map[*Vertex]int) +func (g *Graph) InDegree() map[Vertex]int { + result := make(map[Vertex]int) for k := range g.adjacency { result[k] = 0 // initialize } @@ -444,8 +414,8 @@ func (g *Graph) InDegree() map[*Vertex]int { } // OutDegree returns the count of vertices that point away in one big lookup map. -func (g *Graph) OutDegree() map[*Vertex]int { - result := make(map[*Vertex]int) +func (g *Graph) OutDegree() map[Vertex]int { + result := make(map[Vertex]int) for k := range g.adjacency { result[k] = 0 // initialize @@ -457,12 +427,12 @@ func (g *Graph) OutDegree() map[*Vertex]int { } // TopologicalSort returns the sort of graph vertices in that order. -// based on descriptions and code from wikipedia and rosetta code +// It is based on descriptions and code from wikipedia and rosetta code. // TODO: add memoization, and cache invalidation to speed this up :) -func (g *Graph) TopologicalSort() ([]*Vertex, error) { // kahn's algorithm - var L []*Vertex // empty list that will contain the sorted elements - var S []*Vertex // set of all nodes with no incoming edges - remaining := make(map[*Vertex]int) // amount of edges remaining +func (g *Graph) TopologicalSort() ([]Vertex, error) { // kahn's algorithm + var L []Vertex // empty list that will contain the sorted elements + var S []Vertex // set of all nodes with no incoming edges + remaining := make(map[Vertex]int) // amount of edges remaining for v, d := range g.InDegree() { if d == 0 { @@ -513,19 +483,19 @@ func (g *Graph) TopologicalSort() ([]*Vertex, error) { // kahn's algorithm // actually return a tree if we cared about correctness. // This operates by a recursive algorithm; a more efficient version is likely. // If you don't give this function a DAG, you might cause infinite recursion! -func (g *Graph) Reachability(a, b *Vertex) []*Vertex { +func (g *Graph) Reachability(a, b Vertex) []Vertex { if a == nil || b == nil { return nil } vertices := g.OutgoingGraphVertices(a) // what points away from a ? if len(vertices) == 0 { - return []*Vertex{} // nope + return []Vertex{} // nope } if VertexContains(b, vertices) { - return []*Vertex{a, b} // found + return []Vertex{a, b} // found } // TODO: parallelize this with go routines? - var collected = make([][]*Vertex, len(vertices)) + var collected = make([][]Vertex, len(vertices)) pick := -1 for i, v := range vertices { collected[i] = g.Reachability(v, b) // find b by recursion @@ -538,116 +508,15 @@ func (g *Graph) Reachability(a, b *Vertex) []*Vertex { } } if pick < 0 { - return []*Vertex{} // nope + return []Vertex{} // nope } - result := []*Vertex{a} // tack on a + result := []Vertex{a} // tack on a result = append(result, collected[pick]...) return result } -// GraphSync updates the oldGraph so that it matches the newGraph receiver. It -// leaves identical elements alone so that they don't need to be refreshed. It -// tries to mutate existing elements into new ones, if they support this. -// FIXME: add test cases -func (g *Graph) GraphSync(oldGraph *Graph) (*Graph, error) { - - if oldGraph == nil { - var err error - oldGraph, err = NewGraph(g.GetName()) // copy over the name - if err != nil { - return nil, errwrap.Wrapf(err, "could not run GraphSync() properly") - } - } - oldGraph.SetName(g.GetName()) // overwrite the name - - var lookup = make(map[*Vertex]*Vertex) - var vertexKeep []*Vertex // list of vertices which are the same in new graph - var edgeKeep []*Edge // list of vertices which are the same in new graph - - for v := range g.adjacency { // loop through the vertices (resources) - res := v.Res // resource - var vertex *Vertex - - // step one, direct compare with res.Compare - if vertex == nil { // redundant guard for consistency - fn := func(v *Vertex) (bool, error) { - return v.Res.Compare(res), nil - } - var err error - vertex, err = oldGraph.VertexMatchFn(fn) - if err != nil { - return nil, errwrap.Wrapf(err, "could not VertexMatchFn() resource") - } - } - - // TODO: consider adding a mutate API. - // step two, try and mutate with res.Mutate - //if vertex == nil { // not found yet... - // vertex = oldGraph.MutateMatch(res) - //} - - if vertex == nil { // no match found yet - if err := res.Validate(); err != nil { - return nil, errwrap.Wrapf(err, "could not Validate() resource") - } - vertex = v - oldGraph.AddVertex(vertex) // call standalone in case not part of an edge - } - lookup[v] = vertex // used for constructing edges - vertexKeep = append(vertexKeep, vertex) // append - } - - // get rid of any vertices we shouldn't keep (that aren't in new graph) - for v := range oldGraph.adjacency { - if !VertexContains(v, vertexKeep) { - // wait for exit before starting new graph! - v.SendEvent(event.EventExit, nil) // sync - v.Res.WaitGroup().Wait() - oldGraph.DeleteVertex(v) - } - } - - // compare edges - for v1 := range g.adjacency { // loop through the vertices (resources) - for v2, e := range g.adjacency[v1] { - // we have an edge! - - // lookup vertices (these should exist now) - //res1 := v1.Res // resource - //res2 := v2.Res - //vertex1 := oldGraph.CompareMatch(res1) // now: VertexMatchFn - //vertex2 := oldGraph.CompareMatch(res2) // now: VertexMatchFn - vertex1, exists1 := lookup[v1] - vertex2, exists2 := lookup[v2] - if !exists1 || !exists2 { // no match found, bug? - //if vertex1 == nil || vertex2 == nil { // no match found - return nil, fmt.Errorf("new vertices weren't found") // programming error - } - - edge, exists := oldGraph.adjacency[vertex1][vertex2] - if !exists || edge.Name != e.Name { // TODO: edgeCmp - edge = e // use or overwrite edge - } - oldGraph.adjacency[vertex1][vertex2] = edge // store it (AddEdge) - edgeKeep = append(edgeKeep, edge) // mark as saved - } - } - - // delete unused edges - for v1 := range oldGraph.adjacency { - for _, e := range oldGraph.adjacency[v1] { - // we have an edge! - if !EdgeContains(e, edgeKeep) { - oldGraph.DeleteEdge(e) - } - } - } - - return oldGraph, nil -} - // VertexContains is an "in array" function to test for a vertex in a slice of vertices. -func VertexContains(needle *Vertex, haystack []*Vertex) bool { +func VertexContains(needle Vertex, haystack []Vertex) bool { for _, v := range haystack { if needle == v { return true @@ -667,9 +536,8 @@ func EdgeContains(needle *Edge, haystack []*Edge) bool { } // Reverse reverses a list of vertices. -func Reverse(vs []*Vertex) []*Vertex { - //var out []*Vertex // XXX: golint suggests, but it fails testing - out := make([]*Vertex, 0) // empty list +func Reverse(vs []Vertex) []Vertex { + out := []Vertex{} l := len(vs) for i := range vs { out = append(out, vs[l-i-1]) diff --git a/pgraph/pgraph_test.go b/pgraph/pgraph_test.go index 3f038714..86723f22 100644 --- a/pgraph/pgraph_test.go +++ b/pgraph/pgraph_test.go @@ -18,25 +18,23 @@ package pgraph import ( - "fmt" "reflect" - "sort" - "strings" "testing" - "time" - - "github.com/purpleidea/mgmt/resources" - "github.com/purpleidea/mgmt/util" ) +// vertex is a test struct to test the library. +type vertex struct { + name string +} + +// String is a required method of the Vertex interface we must fulfill. +func (v *vertex) String() string { + return v.name +} + // NV is a helper function to make testing easier. It creates a new noop vertex. -func NV(s string) *Vertex { - obj := &resources.NoopRes{ - BaseRes: resources.BaseRes{ - Name: s, - }, - Comment: "Testing!", - } +func NV(s string) Vertex { + obj := &vertex{s} return NewVertex(obj) } @@ -120,7 +118,7 @@ func TestPgraphT3(t *testing.T) { t.Errorf("should have 3 vertices instead of: %d", i) t.Errorf("found: %v", out1) for _, v := range out1 { - t.Errorf("value: %v", v.GetName()) + t.Errorf("value: %s", v) } } @@ -129,7 +127,7 @@ func TestPgraphT3(t *testing.T) { t.Errorf("should have 3 vertices instead of: %d", i) t.Errorf("found: %v", out1) for _, v := range out1 { - t.Errorf("value: %v", v.GetName()) + t.Errorf("value: %s", v) } } } @@ -152,7 +150,7 @@ func TestPgraphT4(t *testing.T) { t.Errorf("should have 3 vertices instead of: %d", i) t.Errorf("found: %v", out) for _, v := range out { - t.Errorf("value: %v", v.GetName()) + t.Errorf("value: %s", v) } } } @@ -179,7 +177,7 @@ func TestPgraphT5(t *testing.T) { G.AddEdge(v5, v6, e5) //G.AddEdge(v6, v4, e6) - save := []*Vertex{v1, v2, v3} + save := []Vertex{v1, v2, v3} out, err := G.FilterGraph("new g5", save) if err != nil { t.Errorf("failed with: %v", err) @@ -269,26 +267,26 @@ func TestPgraphT8(t *testing.T) { v1 := NV("v1") v2 := NV("v2") v3 := NV("v3") - if VertexContains(v1, []*Vertex{v1, v2, v3}) != true { + if VertexContains(v1, []Vertex{v1, v2, v3}) != true { t.Errorf("should be true instead of false.") } v4 := NV("v4") v5 := NV("v5") v6 := NV("v6") - if VertexContains(v4, []*Vertex{v5, v6}) != false { + if VertexContains(v4, []Vertex{v5, v6}) != false { t.Errorf("should be false instead of true.") } v7 := NV("v7") v8 := NV("v8") v9 := NV("v9") - if VertexContains(v8, []*Vertex{v7, v8, v9}) != true { + if VertexContains(v8, []Vertex{v7, v8, v9}) != true { t.Errorf("should be true instead of false.") } v1b := NV("v1") // same value, different objects - if VertexContains(v1b, []*Vertex{v1, v2, v3}) != false { + if VertexContains(v1b, []Vertex{v1, v2, v3}) != false { t.Errorf("should be false instead of true.") } } @@ -316,7 +314,7 @@ func TestPgraphT9(t *testing.T) { G.AddEdge(v4, v5, e5) G.AddEdge(v5, v6, e6) - indegree := G.InDegree() // map[*Vertex]int + indegree := G.InDegree() // map[Vertex]int if i := indegree[v1]; i != 0 { t.Errorf("indegree of v1 should be 0 instead of: %d", i) } @@ -336,7 +334,7 @@ func TestPgraphT9(t *testing.T) { t.Errorf("indegree of v6 should be 1 instead of: %d", i) } - outdegree := G.OutDegree() // map[*Vertex]int + outdegree := G.OutDegree() // map[Vertex]int if i := outdegree[v1]; i != 2 { t.Errorf("outdegree of v1 should be 2 instead of: %d", i) } @@ -358,12 +356,12 @@ func TestPgraphT9(t *testing.T) { s, err := G.TopologicalSort() // either possibility is a valid toposort - match := reflect.DeepEqual(s, []*Vertex{v1, v2, v3, v4, v5, v6}) || reflect.DeepEqual(s, []*Vertex{v1, v3, v2, v4, v5, v6}) + match := reflect.DeepEqual(s, []Vertex{v1, v2, v3, v4, v5, v6}) || reflect.DeepEqual(s, []Vertex{v1, v3, v2, v4, v5, v6}) if err != nil || !match { t.Errorf("topological sort failed, error: %v", err) str := "Found:" for _, v := range s { - str += " " + v.Res.GetName() + str += " " + v.String() } t.Errorf(str) } @@ -405,7 +403,7 @@ func TestPgraphReachability0(t *testing.T) { t.Logf("reachability failed") str := "Got:" for _, v := range result { - str += " " + v.Res.GetName() + str += " " + v.String() } t.Errorf(str) } @@ -416,13 +414,13 @@ func TestPgraphReachability0(t *testing.T) { v6 := NV("v6") result := G.Reachability(v1, v6) - expected := []*Vertex{} + expected := []Vertex{} if !reflect.DeepEqual(result, expected) { t.Logf("reachability failed") str := "Got:" for _, v := range result { - str += " " + v.Res.GetName() + str += " " + v.String() } t.Errorf(str) } @@ -447,13 +445,13 @@ func TestPgraphReachability0(t *testing.T) { G.AddEdge(v3, v5, e5) result := G.Reachability(v1, v6) - expected := []*Vertex{} + expected := []Vertex{} if !reflect.DeepEqual(result, expected) { t.Logf("reachability failed") str := "Got:" for _, v := range result { - str += " " + v.Res.GetName() + str += " " + v.String() } t.Errorf(str) } @@ -482,13 +480,13 @@ func TestPgraphReachability1(t *testing.T) { G.AddEdge(v5, v6, e5) result := G.Reachability(v1, v6) - expected := []*Vertex{v1, v2, v3, v4, v5, v6} + expected := []Vertex{v1, v2, v3, v4, v5, v6} if !reflect.DeepEqual(result, expected) { t.Logf("reachability failed") str := "Got:" for _, v := range result { - str += " " + v.Res.GetName() + str += " " + v.String() } t.Errorf(str) } @@ -517,15 +515,15 @@ func TestPgraphReachability2(t *testing.T) { G.AddEdge(v5, v6, e6) result := G.Reachability(v1, v6) - expected1 := []*Vertex{v1, v2, v4, v5, v6} - expected2 := []*Vertex{v1, v3, v4, v5, v6} + expected1 := []Vertex{v1, v2, v4, v5, v6} + expected2 := []Vertex{v1, v3, v4, v5, v6} // !xor test if reflect.DeepEqual(result, expected1) == reflect.DeepEqual(result, expected2) { t.Logf("reachability failed") str := "Got:" for _, v := range result { - str += " " + v.Res.GetName() + str += " " + v.String() } t.Errorf(str) } @@ -554,13 +552,13 @@ func TestPgraphReachability3(t *testing.T) { G.AddEdge(v5, v6, e6) result := G.Reachability(v1, v6) - expected := []*Vertex{v1, v5, v6} + expected := []Vertex{v1, v5, v6} if !reflect.DeepEqual(result, expected) { t.Logf("reachability failed") str := "Got:" for _, v := range result { - str += " " + v.Res.GetName() + str += " " + v.String() } t.Errorf(str) } @@ -589,13 +587,13 @@ func TestPgraphReachability4(t *testing.T) { G.AddEdge(v1, v6, e6) result := G.Reachability(v1, v6) - expected := []*Vertex{v1, v6} + expected := []Vertex{v1, v6} if !reflect.DeepEqual(result, expected) { t.Logf("reachability failed") str := "Got:" for _, v := range result { - str += " " + v.Res.GetName() + str += " " + v.String() } t.Errorf(str) } @@ -609,249 +607,19 @@ func TestPgraphT11(t *testing.T) { v5 := NV("v5") v6 := NV("v6") - if rev := Reverse([]*Vertex{}); !reflect.DeepEqual(rev, []*Vertex{}) { - t.Errorf("reverse of vertex slice failed") + if rev := Reverse([]Vertex{}); !reflect.DeepEqual(rev, []Vertex{}) { + t.Errorf("reverse of vertex slice failed (empty)") } - if rev := Reverse([]*Vertex{v1}); !reflect.DeepEqual(rev, []*Vertex{v1}) { - t.Errorf("reverse of vertex slice failed") + if rev := Reverse([]Vertex{v1}); !reflect.DeepEqual(rev, []Vertex{v1}) { + t.Errorf("reverse of vertex slice failed (single)") } - if rev := Reverse([]*Vertex{v1, v2, v3, v4, v5, v6}); !reflect.DeepEqual(rev, []*Vertex{v6, v5, v4, v3, v2, v1}) { - t.Errorf("reverse of vertex slice failed") + if rev := Reverse([]Vertex{v1, v2, v3, v4, v5, v6}); !reflect.DeepEqual(rev, []Vertex{v6, v5, v4, v3, v2, v1}) { + t.Errorf("reverse of vertex slice failed (1..6)") } - if rev := Reverse([]*Vertex{v6, v5, v4, v3, v2, v1}); !reflect.DeepEqual(rev, []*Vertex{v1, v2, v3, v4, v5, v6}) { - t.Errorf("reverse of vertex slice failed") - } -} - -type NoopResTest struct { - resources.NoopRes -} - -func (obj *NoopResTest) GroupCmp(r resources.Res) bool { - res, ok := r.(*NoopResTest) - if !ok { - return false - } - - // TODO: implement this in vertexCmp for *testGrouper instead? - if strings.Contains(res.Name, ",") { // HACK - return false // element to be grouped is already grouped! - } - - // group if they start with the same letter! (helpful hack for testing) - return obj.Name[0] == res.Name[0] -} - -func NewNoopResTest(name string) *NoopResTest { - obj := &NoopResTest{ - NoopRes: resources.NoopRes{ - BaseRes: resources.BaseRes{ - Name: name, - MetaParams: resources.MetaParams{ - AutoGroup: true, // always autogroup - }, - }, - }, - } - return obj -} - -// ListStrCmp compares two lists of strings -func ListStrCmp(a, b []string) bool { - //fmt.Printf("CMP: %v with %v\n", a, b) // debugging - if a == nil && b == nil { - return true - } - if a == nil || b == nil { - return false - } - if len(a) != len(b) { - return false - } - for i := range a { - if a[i] != b[i] { - return false - } - } - return true -} - -// GraphCmp compares the topology of two graphs and returns nil if they're equal -// It also compares if grouped element groups are identical -func GraphCmp(g1, g2 *Graph) error { - if n1, n2 := g1.NumVertices(), g2.NumVertices(); n1 != n2 { - return fmt.Errorf("graph g1 has %d vertices, while g2 has %d", n1, n2) - } - if e1, e2 := g1.NumEdges(), g2.NumEdges(); e1 != e2 { - return fmt.Errorf("graph g1 has %d edges, while g2 has %d", e1, e2) - } - - var m = make(map[*Vertex]*Vertex) // g1 to g2 vertex correspondence -Loop: - // check vertices - for v1 := range g1.adjacency { // for each vertex in g1 - - l1 := strings.Split(v1.GetName(), ",") // make list of everyone's names... - for _, x1 := range v1.GetGroup() { - l1 = append(l1, x1.GetName()) // add my contents - } - l1 = util.StrRemoveDuplicatesInList(l1) // remove duplicates - sort.Strings(l1) - - // inner loop - for v2 := range g2.adjacency { // does it match in g2 ? - - l2 := strings.Split(v2.GetName(), ",") - for _, x2 := range v2.GetGroup() { - l2 = append(l2, x2.GetName()) - } - l2 = util.StrRemoveDuplicatesInList(l2) // remove duplicates - sort.Strings(l2) - - // does l1 match l2 ? - if ListStrCmp(l1, l2) { // cmp! - m[v1] = v2 - continue Loop - } - } - return fmt.Errorf("graph g1, has no match in g2 for: %v", v1.GetName()) - } - // vertices (and groups) match :) - - // check edges - for v1 := range g1.adjacency { // for each vertex in g1 - v2 := m[v1] // lookup in map to get correspondance - // g1.adjacency[v1] corresponds to g2.adjacency[v2] - if e1, e2 := len(g1.adjacency[v1]), len(g2.adjacency[v2]); e1 != e2 { - return fmt.Errorf("graph g1, vertex(%v) has %d edges, while g2, vertex(%v) has %d", v1.GetName(), e1, v2.GetName(), e2) - } - - for vv1, ee1 := range g1.adjacency[v1] { - vv2 := m[vv1] - ee2 := g2.adjacency[v2][vv2] - - // these are edges from v1 -> vv1 via ee1 (graph 1) - // to cmp to edges from v2 -> vv2 via ee2 (graph 2) - - // check: (1) vv1 == vv2 ? (we've already checked this!) - l1 := strings.Split(vv1.GetName(), ",") // make list of everyone's names... - for _, x1 := range vv1.GetGroup() { - l1 = append(l1, x1.GetName()) // add my contents - } - l1 = util.StrRemoveDuplicatesInList(l1) // remove duplicates - sort.Strings(l1) - - l2 := strings.Split(vv2.GetName(), ",") - for _, x2 := range vv2.GetGroup() { - l2 = append(l2, x2.GetName()) - } - l2 = util.StrRemoveDuplicatesInList(l2) // remove duplicates - sort.Strings(l2) - - // does l1 match l2 ? - if !ListStrCmp(l1, l2) { // cmp! - return fmt.Errorf("graph g1 and g2 don't agree on: %v and %v", vv1.GetName(), vv2.GetName()) - } - - // check: (2) ee1 == ee2 - if ee1.Name != ee2.Name { - return fmt.Errorf("graph g1 edge(%v) doesn't match g2 edge(%v)", ee1.Name, ee2.Name) - } - } - } - - // check meta parameters - for v1 := range g1.adjacency { // for each vertex in g1 - for v2 := range g2.adjacency { // does it match in g2 ? - s1, s2 := v1.Meta().Sema, v2.Meta().Sema - sort.Strings(s1) - sort.Strings(s2) - if !reflect.DeepEqual(s1, s2) { - return fmt.Errorf("vertex %s and vertex %s have different semaphores", v1.GetName(), v2.GetName()) - } - } - } - - return nil // success! -} - -type testGrouper struct { - // TODO: this algorithm may not be correct in all cases. replace if needed! - nonReachabilityGrouper // "inherit" what we want, and reimplement the rest -} - -func (ag *testGrouper) name() string { - return "testGrouper" -} - -func (ag *testGrouper) vertexMerge(v1, v2 *Vertex) (v *Vertex, err error) { - if err := v1.Res.GroupRes(v2.Res); err != nil { // group them first - return nil, err - } - // HACK: update the name so it matches full list of self+grouped - obj := v1.Res - names := strings.Split(obj.GetName(), ",") // load in stored names - for _, n := range obj.GetGroup() { - names = append(names, n.GetName()) // add my contents - } - names = util.StrRemoveDuplicatesInList(names) // remove duplicates - sort.Strings(names) - obj.SetName(strings.Join(names, ",")) - return // success or fail, and no need to merge the actual vertices! -} - -func (ag *testGrouper) edgeMerge(e1, e2 *Edge) *Edge { - // HACK: update the name so it makes a union of both names - n1 := strings.Split(e1.Name, ",") // load - n2 := strings.Split(e2.Name, ",") // load - names := append(n1, n2...) - names = util.StrRemoveDuplicatesInList(names) // remove duplicates - sort.Strings(names) - return NewEdge(strings.Join(names, ",")) -} - -func (g *Graph) fullPrint() (str string) { - str += "\n" - for v := range g.adjacency { - if semas := v.Meta().Sema; len(semas) > 0 { - str += fmt.Sprintf("* v: %v; sema: %v\n", v.GetName(), semas) - } else { - str += fmt.Sprintf("* v: %v\n", v.GetName()) - } - // TODO: add explicit grouping data? - } - for v1 := range g.adjacency { - for v2, e := range g.adjacency[v1] { - str += fmt.Sprintf("* e: %v -> %v # %v\n", v1.GetName(), v2.GetName(), e.Name) - } - } - return -} - -// helper function -func runGraphCmp(t *testing.T, g1, g2 *Graph) { - ch := g1.autoGroup(&testGrouper{}) // edits the graph - for range ch { // bleed the channel or it won't run :( - // pass - } - err := GraphCmp(g1, g2) - if err != nil { - t.Logf(" actual (g1): %v%v", g1, g1.fullPrint()) - t.Logf("expected (g2): %v%v", g2, g2.fullPrint()) - t.Logf("Cmp error:") - t.Errorf("%v", err) - } -} - -func TestDurationAssumptions(t *testing.T) { - var d time.Duration - if (d == 0) != true { - t.Errorf("empty time.Duration is no longer equal to zero") - } - if (d > 0) != false { - t.Errorf("empty time.Duration is now greater than zero") + if rev := Reverse([]Vertex{v6, v5, v4, v3, v2, v1}); !reflect.DeepEqual(rev, []Vertex{v1, v2, v3, v4, v5, v6}) { + t.Errorf("reverse of vertex slice failed (6..1)") } } diff --git a/pgraph/actions.go b/resources/actions.go similarity index 73% rename from pgraph/actions.go rename to resources/actions.go index aa5634ae..d0155fde 100644 --- a/pgraph/actions.go +++ b/resources/actions.go @@ -15,7 +15,7 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package pgraph +package resources import ( "fmt" @@ -26,8 +26,8 @@ import ( "time" "github.com/purpleidea/mgmt/event" + "github.com/purpleidea/mgmt/pgraph" "github.com/purpleidea/mgmt/prometheus" - "github.com/purpleidea/mgmt/resources" "github.com/purpleidea/mgmt/util" multierr "github.com/hashicorp/go-multierror" @@ -36,16 +36,16 @@ import ( ) // OKTimestamp returns true if this element can run right now? -func (g *Graph) OKTimestamp(v *Vertex) bool { +func OKTimestamp(g *pgraph.Graph, v pgraph.Vertex) bool { // these are all the vertices pointing TO v, eg: ??? -> v for _, n := range g.IncomingGraphVertices(v) { // if the vertex has a greater timestamp than any pre-req (n) // then we can't run right now... // if they're equal (eg: on init of 0) then we also can't run // b/c we should let our pre-req's go first... - x, y := v.Res.Timestamp(), n.Res.Timestamp() + x, y := VtoR(v).Timestamp(), VtoR(n).Timestamp() if b, ok := g.Value("debug"); ok && util.Bool(b) { - log.Printf("%s[%s]: OKTimestamp: (%v) >= %s[%s](%v): !%v", v.GetKind(), v.GetName(), x, n.GetKind(), n.GetName(), y, x >= y) + log.Printf("%s: OKTimestamp: (%v) >= %s(%v): !%v", VtoR(v).String(), x, VtoR(n).String(), y, x >= y) } if x >= y { return false @@ -55,12 +55,12 @@ func (g *Graph) OKTimestamp(v *Vertex) bool { } // Poke tells nodes after me in the dependency graph that they need to refresh. -func (g *Graph) Poke(v *Vertex) error { +func Poke(g *pgraph.Graph, v pgraph.Vertex) error { // if we're pausing (or exiting) then we should suspend poke's so that // the graph doesn't go on running forever until it's completely done! // this is an optional feature which we can do by default on user exit - if g.fastPause { + if b, ok := g.Value("fastpause"); ok && util.Bool(b) { return nil // TODO: should this be an error instead? } @@ -70,21 +70,21 @@ func (g *Graph) Poke(v *Vertex) error { // we can skip this poke if resource hasn't done work yet... it // needs to be poked if already running, or not running though! // TODO: does this need an || activity flag? - if n.Res.GetState() != resources.ResStateProcess { + if VtoR(n).GetState() != ResStateProcess { if b, ok := g.Value("debug"); ok && util.Bool(b) { - log.Printf("%s[%s]: Poke: %s[%s]", v.GetKind(), v.GetName(), n.GetKind(), n.GetName()) + log.Printf("%s: Poke: %s", VtoR(v).String(), VtoR(n).String()) } wg.Add(1) - go func(nn *Vertex) error { + go func(nn pgraph.Vertex) error { defer wg.Done() //edge := g.adjacency[v][nn] // lookup //notify := edge.Notify && edge.Refresh() - return nn.SendEvent(event.EventPoke, nil) + return VtoR(nn).SendEvent(event.EventPoke, nil) }(n) } else { if b, ok := g.Value("debug"); ok && util.Bool(b) { - log.Printf("%s[%s]: Poke: %s[%s]: Skipped!", v.GetKind(), v.GetName(), n.GetKind(), n.GetName()) + log.Printf("%s: Poke: %s: Skipped!", VtoR(v).String(), VtoR(n).String()) } } } @@ -94,30 +94,30 @@ func (g *Graph) Poke(v *Vertex) error { } // BackPoke pokes the pre-requisites that are stale and need to run before I can run. -func (g *Graph) BackPoke(v *Vertex) { +func BackPoke(g *pgraph.Graph, v pgraph.Vertex) { var wg sync.WaitGroup // these are all the vertices pointing TO v, eg: ??? -> v for _, n := range g.IncomingGraphVertices(v) { - x, y, s := v.Res.Timestamp(), n.Res.Timestamp(), n.Res.GetState() + x, y, s := VtoR(v).Timestamp(), VtoR(n).Timestamp(), VtoR(n).GetState() // If the parent timestamp needs poking AND it's not running // Process, then poke it. If the parent is in ResStateProcess it // means that an event is pending, so we'll be expecting a poke // back soon, so we can safely discard the extra parent poke... // TODO: implement a stateLT (less than) to tell if something // happens earlier in the state cycle and that doesn't wrap nil - if x >= y && (s != resources.ResStateProcess && s != resources.ResStateCheckApply) { + if x >= y && (s != ResStateProcess && s != ResStateCheckApply) { if b, ok := g.Value("debug"); ok && util.Bool(b) { - log.Printf("%s[%s]: BackPoke: %s[%s]", v.GetKind(), v.GetName(), n.GetKind(), n.GetName()) + log.Printf("%s: BackPoke: %s", VtoR(v).String(), VtoR(n).String()) } wg.Add(1) - go func(nn *Vertex) error { + go func(nn pgraph.Vertex) error { defer wg.Done() - return nn.SendEvent(event.EventBackPoke, nil) + return VtoR(nn).SendEvent(event.EventBackPoke, nil) }(n) } else { if b, ok := g.Value("debug"); ok && util.Bool(b) { - log.Printf("%s[%s]: BackPoke: %s[%s]: Skipped!", v.GetKind(), v.GetName(), n.GetKind(), n.GetName()) + log.Printf("%s: BackPoke: %s: Skipped!", VtoR(v).String(), VtoR(n).String()) } } } @@ -127,7 +127,7 @@ func (g *Graph) BackPoke(v *Vertex) { // RefreshPending determines if any previous nodes have a refresh pending here. // If this is true, it means I am expected to apply a refresh when I next run. -func (g *Graph) RefreshPending(v *Vertex) bool { +func RefreshPending(g *pgraph.Graph, v pgraph.Vertex) bool { var refresh bool for _, edge := range g.IncomingGraphEdges(v) { // if we asked for a notify *and* if one is pending! @@ -140,7 +140,7 @@ func (g *Graph) RefreshPending(v *Vertex) bool { } // SetUpstreamRefresh sets the refresh value to any upstream vertices. -func (g *Graph) SetUpstreamRefresh(v *Vertex, b bool) { +func SetUpstreamRefresh(g *pgraph.Graph, v pgraph.Vertex, b bool) { for _, edge := range g.IncomingGraphEdges(v) { if edge.Notify { edge.SetRefresh(b) @@ -149,7 +149,7 @@ func (g *Graph) SetUpstreamRefresh(v *Vertex, b bool) { } // SetDownstreamRefresh sets the refresh value to any downstream vertices. -func (g *Graph) SetDownstreamRefresh(v *Vertex, b bool) { +func SetDownstreamRefresh(g *pgraph.Graph, v pgraph.Vertex, b bool) { for _, edge := range g.OutgoingGraphEdges(v) { // if we asked for a notify *and* if one is pending! if edge.Notify { @@ -159,25 +159,25 @@ func (g *Graph) SetDownstreamRefresh(v *Vertex, b bool) { } // Process is the primary function to execute for a particular vertex in the graph. -func (g *Graph) Process(v *Vertex) error { - obj := v.Res +func Process(g *pgraph.Graph, v pgraph.Vertex) error { + obj := VtoR(v) if b, ok := g.Value("debug"); ok && util.Bool(b) { log.Printf("%s[%s]: Process()", obj.GetKind(), obj.GetName()) } // FIXME: should these SetState methods be here or after the sema code? - defer obj.SetState(resources.ResStateNil) // reset state when finished - obj.SetState(resources.ResStateProcess) + defer obj.SetState(ResStateNil) // reset state when finished + obj.SetState(ResStateProcess) // is it okay to run dependency wise right now? // if not, that's okay because when the dependency runs, it will poke // us back and we will run if needed then! - if !g.OKTimestamp(v) { - go g.BackPoke(v) + if !OKTimestamp(g, v) { + go BackPoke(g, v) return nil } // timestamp must be okay... if b, ok := g.Value("debug"); ok && util.Bool(b) { - log.Printf("%s[%s]: OKTimestamp(%v)", obj.GetKind(), obj.GetName(), v.Res.Timestamp()) + log.Printf("%s[%s]: OKTimestamp(%v)", obj.GetKind(), obj.GetName(), VtoR(v).Timestamp()) } // semaphores! @@ -191,11 +191,11 @@ func (g *Graph) Process(v *Vertex) error { if b, ok := g.Value("debug"); ok && util.Bool(b) && len(semas) > 0 { log.Printf("%s[%s]: Sema: P(%s)", obj.GetKind(), obj.GetName(), strings.Join(semas, ", ")) } - if err := g.SemaLock(semas); err != nil { // lock + if err := SemaLock(g, semas); err != nil { // lock // NOTE: in practice, this might not ever be truly necessary... return fmt.Errorf("shutdown of semaphores") } - defer g.SemaUnlock(semas) // unlock + defer SemaUnlock(g, semas) // unlock if b, ok := g.Value("debug"); ok && util.Bool(b) && len(semas) > 0 { defer log.Printf("%s[%s]: Sema: V(%s)", obj.GetKind(), obj.GetName(), strings.Join(semas, ", ")) } @@ -225,11 +225,11 @@ func (g *Graph) Process(v *Vertex) error { } // lookup the refresh (notification) variable - refresh = g.RefreshPending(v) // do i need to perform a refresh? - obj.SetRefresh(refresh) // tell the resource + refresh = RefreshPending(g, v) // do i need to perform a refresh? + obj.SetRefresh(refresh) // tell the resource // changes can occur after this... - obj.SetState(resources.ResStateCheckApply) + obj.SetState(ResStateCheckApply) // check cached state, to skip CheckApply; can't skip if refreshing if !refresh && obj.IsStateOK() { @@ -248,15 +248,15 @@ func (g *Graph) Process(v *Vertex) error { if promErr := obj.Prometheus().UpdateCheckApplyTotal(obj.GetKind(), !noop, !checkOK, err != nil); promErr != nil { // TODO: how to error correctly - log.Printf("%s[%s]: Prometheus.UpdateCheckApplyTotal() errored: %v", v.GetKind(), v.GetName(), err) + log.Printf("%s: Prometheus.UpdateCheckApplyTotal() errored: %v", VtoR(v).String(), err) } // TODO: Can the `Poll` converged timeout tracking be a // more general method for all converged timeouts? this // would simplify the resources by removing boilerplate - if v.Meta().Poll > 0 { + if VtoR(v).Meta().Poll > 0 { if !checkOK { // something changed, restart timer - cuid, _, _ := v.Res.ConvergerUIDs() // get the converger uid used to report status - cuid.ResetTimer() // activity! + cuid, _, _ := VtoR(v).ConvergerUIDs() // get the converger uid used to report status + cuid.ResetTimer() // activity! if b, ok := g.Value("debug"); ok && util.Bool(b) { log.Printf("%s[%s]: Converger: ResetTimer", obj.GetKind(), obj.GetName()) } @@ -275,7 +275,7 @@ func (g *Graph) Process(v *Vertex) error { if !noop && err == nil { // aka !noop || checkOK obj.StateOK(true) // reset if refresh { - g.SetUpstreamRefresh(v, false) // refresh happened, clear the request + SetUpstreamRefresh(g, v, false) // refresh happened, clear the request obj.SetRefresh(false) } } @@ -301,14 +301,14 @@ func (g *Graph) Process(v *Vertex) error { } if activity { // add refresh flag to downstream edges... - g.SetDownstreamRefresh(v, true) + SetDownstreamRefresh(g, v, true) } // update this timestamp *before* we poke or the poked // nodes might fail due to having a too old timestamp! - v.Res.UpdateTimestamp() // this was touched... - obj.SetState(resources.ResStatePoking) // can't cancel parent poke - if err := g.Poke(v); err != nil { + VtoR(v).UpdateTimestamp() // this was touched... + obj.SetState(ResStatePoking) // can't cancel parent poke + if err := Poke(g, v); err != nil { return errwrap.Wrapf(err, "the Poke() failed") } } @@ -327,9 +327,9 @@ func (obj *SentinelErr) Error() string { } // innerWorker is the CheckApply runner that reads from processChan. -// TODO: would it be better if this was a method on BaseRes that took in *Graph? -func (g *Graph) innerWorker(v *Vertex) { - obj := v.Res +// TODO: would it be better if this was a method on BaseRes that took in *pgraph.Graph? +func innerWorker(g *pgraph.Graph, v pgraph.Vertex) { + obj := VtoR(v) running := false done := make(chan struct{}) playback := false // do we need to run another one? @@ -341,9 +341,9 @@ func (g *Graph) innerWorker(v *Vertex) { <-timer.C // unnecessary, shouldn't happen } - var delay = time.Duration(v.Meta().Delay) * time.Millisecond - var retry = v.Meta().Retry // number of tries left, -1 for infinite - var limiter = rate.NewLimiter(v.Meta().Limit, v.Meta().Burst) + var delay = time.Duration(VtoR(v).Meta().Delay) * time.Millisecond + var retry = VtoR(v).Meta().Retry // number of tries left, -1 for infinite + var limiter = rate.NewLimiter(VtoR(v).Meta().Limit, VtoR(v).Meta().Burst) limited := false wg := &sync.WaitGroup{} // wait for Process routine to exit @@ -355,17 +355,17 @@ Loop: if !ok { // processChan closed, let's exit break Loop // no event, so no ack! } - if v.Res.Meta().Poll == 0 { // skip for polling + if VtoR(v).Meta().Poll == 0 { // skip for polling wcuid.SetConverged(false) } // if process started, but no action yet, skip! - if v.Res.GetState() == resources.ResStateProcess { + if VtoR(v).GetState() == ResStateProcess { if b, ok := g.Value("debug"); ok && util.Bool(b) { - log.Printf("%s[%s]: Skipped event!", v.GetKind(), v.GetName()) + log.Printf("%s: Skipped event!", VtoR(v).String()) } ev.ACK() // ready for next message - v.Res.QuiesceGroup().Done() + VtoR(v).QuiesceGroup().Done() continue } @@ -373,27 +373,27 @@ Loop: // if waiting, we skip running a new execution! if running || waiting { if b, ok := g.Value("debug"); ok && util.Bool(b) { - log.Printf("%s[%s]: Playback added!", v.GetKind(), v.GetName()) + log.Printf("%s: Playback added!", VtoR(v).String()) } playback = true ev.ACK() // ready for next message - v.Res.QuiesceGroup().Done() + VtoR(v).QuiesceGroup().Done() continue } // catch invalid rates - if v.Meta().Burst == 0 && !(v.Meta().Limit == rate.Inf) { // blocked - e := fmt.Errorf("%s[%s]: Permanently limited (rate != Inf, burst: 0)", v.GetKind(), v.GetName()) + if VtoR(v).Meta().Burst == 0 && !(VtoR(v).Meta().Limit == rate.Inf) { // blocked + e := fmt.Errorf("%s: Permanently limited (rate != Inf, burst: 0)", VtoR(v).String()) ev.ACK() // ready for next message - v.Res.QuiesceGroup().Done() - v.SendEvent(event.EventExit, &SentinelErr{e}) + VtoR(v).QuiesceGroup().Done() + VtoR(v).SendEvent(event.EventExit, &SentinelErr{e}) continue } // rate limit // FIXME: consider skipping rate limit check if // the event is a poke instead of a watch event - if !limited && !(v.Meta().Limit == rate.Inf) { // skip over the playback event... + if !limited && !(VtoR(v).Meta().Limit == rate.Inf) { // skip over the playback event... now := time.Now() r := limiter.ReserveN(now, 1) // one event // r.OK() seems to always be true here! @@ -401,12 +401,12 @@ Loop: if d > 0 { // delay limited = true playback = true - log.Printf("%s[%s]: Limited (rate: %v/sec, burst: %d, next: %v)", v.GetKind(), v.GetName(), v.Meta().Limit, v.Meta().Burst, d) + log.Printf("%s: Limited (rate: %v/sec, burst: %d, next: %v)", VtoR(v).String(), VtoR(v).Meta().Limit, VtoR(v).Meta().Burst, d) // start the timer... timer.Reset(d) waiting = true // waiting for retry timer ev.ACK() - v.Res.QuiesceGroup().Done() + VtoR(v).QuiesceGroup().Done() continue } // otherwise, we run directly! } @@ -417,58 +417,58 @@ Loop: go func(ev *event.Event) { pcuid.SetConverged(false) // "block" Process defer wg.Done() - if e := g.Process(v); e != nil { + if e := Process(g, v); e != nil { playback = true - log.Printf("%s[%s]: CheckApply errored: %v", v.GetKind(), v.GetName(), e) + log.Printf("%s: CheckApply errored: %v", VtoR(v).String(), e) if retry == 0 { - if err := obj.Prometheus().UpdateState(fmt.Sprintf("%s[%s]", v.GetKind(), v.GetName()), v.GetKind(), prometheus.ResStateHardFail); err != nil { + if err := obj.Prometheus().UpdateState(VtoR(v).String(), VtoR(v).GetKind(), prometheus.ResStateHardFail); err != nil { // TODO: how to error this? - log.Printf("%s[%s]: Prometheus.UpdateState() errored: %v", v.GetKind(), v.GetName(), err) + log.Printf("%s: Prometheus.UpdateState() errored: %v", VtoR(v).String(), err) } // wrap the error in the sentinel - v.Res.QuiesceGroup().Done() // before the Wait that happens in SendEvent! - v.SendEvent(event.EventExit, &SentinelErr{e}) + VtoR(v).QuiesceGroup().Done() // before the Wait that happens in SendEvent! + VtoR(v).SendEvent(event.EventExit, &SentinelErr{e}) return } if retry > 0 { // don't decrement the -1 retry-- } - if err := obj.Prometheus().UpdateState(fmt.Sprintf("%s[%s]", v.GetKind(), v.GetName()), v.GetKind(), prometheus.ResStateSoftFail); err != nil { + if err := obj.Prometheus().UpdateState(VtoR(v).String(), VtoR(v).GetKind(), prometheus.ResStateSoftFail); err != nil { // TODO: how to error this? - log.Printf("%s[%s]: Prometheus.UpdateState() errored: %v", v.GetKind(), v.GetName(), err) + log.Printf("%s: Prometheus.UpdateState() errored: %v", VtoR(v).String(), err) } - log.Printf("%s[%s]: CheckApply: Retrying after %.4f seconds (%d left)", v.GetKind(), v.GetName(), delay.Seconds(), retry) + log.Printf("%s: CheckApply: Retrying after %.4f seconds (%d left)", VtoR(v).String(), delay.Seconds(), retry) // start the timer... timer.Reset(delay) waiting = true // waiting for retry timer - // don't v.Res.QuiesceGroup().Done() b/c + // don't VtoR(v).QuiesceGroup().Done() b/c // the timer is running and it can exit! return } - retry = v.Meta().Retry // reset on success - close(done) // trigger + retry = VtoR(v).Meta().Retry // reset on success + close(done) // trigger }(ev) ev.ACK() // sync (now mostly useless) case <-timer.C: - if v.Res.Meta().Poll == 0 { // skip for polling + if VtoR(v).Meta().Poll == 0 { // skip for polling wcuid.SetConverged(false) } waiting = false if !timer.Stop() { //<-timer.C // blocks, docs are wrong! } - log.Printf("%s[%s]: CheckApply delay expired!", v.GetKind(), v.GetName()) + log.Printf("%s: CheckApply delay expired!", VtoR(v).String()) close(done) // a CheckApply run (with possibly retry pause) finished case <-done: - if v.Res.Meta().Poll == 0 { // skip for polling + if VtoR(v).Meta().Poll == 0 { // skip for polling wcuid.SetConverged(false) } if b, ok := g.Value("debug"); ok && util.Bool(b) { - log.Printf("%s[%s]: CheckApply finished!", v.GetKind(), v.GetName()) + log.Printf("%s: CheckApply finished!", VtoR(v).String()) } done = make(chan struct{}) // reset // re-send this event, to trigger a CheckApply() @@ -478,18 +478,18 @@ Loop: // TODO: can this experience indefinite postponement ? // see: https://github.com/golang/go/issues/11506 // pause or exit is in process if not quiescing! - if !v.Res.IsQuiescing() { + if !VtoR(v).IsQuiescing() { playback = false - v.Res.QuiesceGroup().Add(1) // lock around it, b/c still running... + VtoR(v).QuiesceGroup().Add(1) // lock around it, b/c still running... go func() { obj.Event() // replay a new event - v.Res.QuiesceGroup().Done() + VtoR(v).QuiesceGroup().Done() }() } } running = false pcuid.SetConverged(true) // "unblock" Process - v.Res.QuiesceGroup().Done() + VtoR(v).QuiesceGroup().Done() case <-wcuid.ConvergedTimer(): wcuid.SetConverged(true) // converged! @@ -503,16 +503,16 @@ Loop: // Worker is the common run frontend of the vertex. It handles all of the retry // and retry delay common code, and ultimately returns the final status of this // vertex execution. -func (g *Graph) Worker(v *Vertex) error { +func Worker(g *pgraph.Graph, v pgraph.Vertex) error { // listen for chan events from Watch() and run // the Process() function when they're received // this avoids us having to pass the data into // the Watch() function about which graph it is // running on, which isolates things nicely... - obj := v.Res + obj := VtoR(v) if b, ok := g.Value("debug"); ok && util.Bool(b) { - log.Printf("%s[%s]: Worker: Running", v.GetKind(), v.GetName()) - defer log.Printf("%s[%s]: Worker: Stopped", v.GetKind(), v.GetName()) + log.Printf("%s: Worker: Running", VtoR(v).String()) + defer log.Printf("%s: Worker: Stopped", VtoR(v).String()) } // run the init (should match 1-1 with Close function) if err := obj.Init(); err != nil { @@ -537,7 +537,7 @@ func (g *Graph) Worker(v *Vertex) error { wg.Add(1) go func() { defer wg.Done() - g.innerWorker(v) + innerWorker(g, v) }() var err error // propagate the error up (this is a permanent BAD error!) @@ -547,7 +547,7 @@ func (g *Graph) Worker(v *Vertex) error { // NOTE: we're using the same retry and delay metaparams that CheckApply // uses. This is for practicality. We can separate them later if needed! var watchDelay time.Duration - var watchRetry = v.Meta().Retry // number of tries left, -1 for infinite + var watchRetry = VtoR(v).Meta().Retry // number of tries left, -1 for infinite // watch blocks until it ends, & errors to retry for { // TODO: do we have to stop the converged-timeout when in this block (perhaps we're in the delay block!) @@ -600,7 +600,7 @@ func (g *Graph) Worker(v *Vertex) error { } } timer.Stop() // it's nice to cleanup - log.Printf("%s[%s]: Watch delay expired!", v.GetKind(), v.GetName()) + log.Printf("%s: Watch delay expired!", VtoR(v).String()) // NOTE: we can avoid the send if running Watch guarantees // one CheckApply event on startup! //if pendingSendEvent { // TODO: should this become a list in the future? @@ -612,13 +612,13 @@ func (g *Graph) Worker(v *Vertex) error { // TODO: reset the watch retry count after some amount of success var e error - if v.Res.Meta().Poll > 0 { // poll instead of watching :( - cuid, _, _ := v.Res.ConvergerUIDs() // get the converger uid used to report status + if VtoR(v).Meta().Poll > 0 { // poll instead of watching :( + cuid, _, _ := VtoR(v).ConvergerUIDs() // get the converger uid used to report status cuid.StartTimer() - e = v.Res.Poll() + e = VtoR(v).Poll() cuid.StopTimer() // clean up nicely } else { - e = v.Res.Watch() // run the watch normally + e = VtoR(v).Watch() // run the watch normally } if e == nil { // exit signal err = nil // clean exit @@ -628,7 +628,7 @@ func (g *Graph) Worker(v *Vertex) error { err = sentinelErr.err break // sentinel means, perma-exit } - log.Printf("%s[%s]: Watch errored: %v", v.GetKind(), v.GetName(), e) + log.Printf("%s: Watch errored: %v", VtoR(v).String(), e) if watchRetry == 0 { err = fmt.Errorf("Permanent watch error: %v", e) break @@ -636,8 +636,8 @@ func (g *Graph) Worker(v *Vertex) error { if watchRetry > 0 { // don't decrement the -1 watchRetry-- } - watchDelay = time.Duration(v.Meta().Delay) * time.Millisecond - log.Printf("%s[%s]: Watch: Retrying after %.4f seconds (%d left)", v.GetKind(), v.GetName(), watchDelay.Seconds(), watchRetry) + watchDelay = time.Duration(VtoR(v).Meta().Delay) * time.Millisecond + log.Printf("%s: Watch: Retrying after %.4f seconds (%d left)", VtoR(v).String(), watchDelay.Seconds(), watchRetry) // We need to trigger a CheckApply after Watch restarts, so that // we catch any lost events that happened while down. We do this // by getting the Watch resource to send one event once it's up! @@ -656,25 +656,28 @@ func (g *Graph) Worker(v *Vertex) error { // Start is a main kick to start the graph. It goes through in reverse topological // sort order so that events can't hit un-started vertices. -func (g *Graph) Start(first bool) { // start or continue - log.Printf("State: %v -> %v", g.setState(graphStateStarting), g.getState()) - defer log.Printf("State: %v -> %v", g.setState(graphStateStarted), g.getState()) +func Start(g *pgraph.Graph, first bool) { // start or continue + log.Printf("State: %v -> %v", setState(g, graphStateStarting), getState(g)) + defer log.Printf("State: %v -> %v", setState(g, graphStateStarted), getState(g)) t, _ := g.TopologicalSort() indegree := g.InDegree() // compute all of the indegree's - reversed := Reverse(t) + reversed := pgraph.Reverse(t) wg := &sync.WaitGroup{} for _, v := range reversed { // run the Setup() for everyone first // run these in parallel, as long as we wait before continuing wg.Add(1) - go func(vv *Vertex) { + go func(vv pgraph.Vertex) { defer wg.Done() - if !vv.Res.IsWorking() { // if Worker() is not running... - vv.Res.Setup() // initialize some vars in the resource + if !VtoR(vv).IsWorking() { // if Worker() is not running... + VtoR(vv).Setup() // initialize some vars in the resource } }(v) } wg.Wait() + // ptr b/c: Mutex/WaitGroup must not be copied after first use + gwg := WgFromGraph(g) + // run through the topological reverse, and start or unpause each vertex for _, v := range reversed { // selective poke: here we reduce the number of initial pokes @@ -697,37 +700,37 @@ func (g *Graph) Start(first bool) { // start or continue // of the indegree == 0 vertices, and an important aspect of the // Process() function is that even if the state is correct, it // will pass through the Poke so that it flows through the DAG. - v.Res.Starter(indegree[v] == 0) + VtoR(v).Starter(indegree[v] == 0) var unpause = true - if !v.Res.IsWorking() { // if Worker() is not running... + if !VtoR(v).IsWorking() { // if Worker() is not running... unpause = false // doesn't need unpausing on first start - g.wg.Add(1) + gwg.Add(1) // must pass in value to avoid races... // see: https://ttboj.wordpress.com/2015/07/27/golang-parallelism-issues-causing-too-many-open-files-error/ - go func(vv *Vertex) { - defer g.wg.Done() - defer vv.Res.Reset() + go func(vv pgraph.Vertex) { + defer gwg.Done() + defer VtoR(vv).Reset() // TODO: if a sufficient number of workers error, // should something be done? Should these restart // after perma-failure if we have a graph change? - log.Printf("%s[%s]: Started", vv.GetKind(), vv.GetName()) - if err := g.Worker(vv); err != nil { // contains the Watch and CheckApply loops - log.Printf("%s[%s]: Exited with failure: %v", vv.GetKind(), vv.GetName(), err) + log.Printf("%s: Started", VtoR(vv).String()) + if err := Worker(g, vv); err != nil { // contains the Watch and CheckApply loops + log.Printf("%s: Exited with failure: %v", VtoR(vv).String(), err) return } - log.Printf("%s[%s]: Exited", vv.GetKind(), vv.GetName()) + log.Printf("%s: Exited", VtoR(vv).String()) }(v) } select { - case <-v.Res.Started(): // block until started - case <-v.Res.Stopped(): // we failed on init + case <-VtoR(v).Started(): // block until started + case <-VtoR(v).Stopped(): // we failed on init // if the resource Init() fails, we don't hang! } if unpause { // unpause (if needed) - v.Res.SendEvent(event.EventStart, nil) // sync! + VtoR(v).SendEvent(event.EventStart, nil) // sync! } } // we wait for everyone to start before exiting! @@ -736,27 +739,27 @@ func (g *Graph) Start(first bool) { // start or continue // Pause sends pause events to the graph in a topological sort order. If you set // the fastPause argument to true, then it will ask future propagation waves to // not run through the graph before exiting, and instead will exit much quicker. -func (g *Graph) Pause(fastPause bool) { - log.Printf("State: %v -> %v", g.setState(graphStatePausing), g.getState()) - defer log.Printf("State: %v -> %v", g.setState(graphStatePaused), g.getState()) +func Pause(g *pgraph.Graph, fastPause bool) { + log.Printf("State: %v -> %v", setState(g, graphStatePausing), getState(g)) + defer log.Printf("State: %v -> %v", setState(g, graphStatePaused), getState(g)) if fastPause { - g.fastPause = true // set flag + g.SetValue("fastpause", true) // set flag } t, _ := g.TopologicalSort() for _, v := range t { // squeeze out the events... - v.SendEvent(event.EventPause, nil) // sync + VtoR(v).SendEvent(event.EventPause, nil) // sync } - g.fastPause = false // reset flag + g.SetValue("fastpause", false) // reset flag } // Exit sends exit events to the graph in a topological sort order. -func (g *Graph) Exit() { +func Exit(g *pgraph.Graph) { if g == nil { // empty graph that wasn't populated yet return } // FIXME: a second ^C could put this into fast pause, but do it for now! - g.Pause(true) // implement this with pause to avoid duplicating the code + Pause(g, true) // implement this with pause to avoid duplicating the code t, _ := g.TopologicalSort() for _, v := range t { // squeeze out the events... @@ -766,8 +769,25 @@ func (g *Graph) Exit() { // when we hit the 'default' in the select statement! // XXX: we can do this to quiesce, but it's not necessary now - v.SendEvent(event.EventExit, nil) - v.Res.WaitGroup().Wait() + VtoR(v).SendEvent(event.EventExit, nil) + VtoR(v).WaitGroup().Wait() } - g.wg.Wait() // for now, this doesn't need to be a separate Wait() method + gwg := WgFromGraph(g) + gwg.Wait() // for now, this doesn't need to be a separate Wait() method +} + +// WgFromGraph returns a pointer to the waitgroup stored with the graph, +// otherwise it panics. If one does not exist, it will create it. +func WgFromGraph(g *pgraph.Graph) *sync.WaitGroup { + x, exists := g.Value("waitgroup") + if !exists { + g.SetValue("waitgroup", &sync.WaitGroup{}) + x, _ = g.Value("waitgroup") + } + + wg, ok := x.(*sync.WaitGroup) + if !ok { + panic("not a *sync.WaitGroup") + } + return wg } diff --git a/resources/autoedge.go b/resources/autoedge.go index ba1815f2..5102bac4 100644 --- a/resources/autoedge.go +++ b/resources/autoedge.go @@ -37,7 +37,7 @@ func UIDExistsInUIDs(uid ResUID, uids []ResUID) bool { // addEdgesByMatchingUIDS adds edges to the vertex in a graph based on if it // matches a uid list. -func addEdgesByMatchingUIDS(g *pgraph.Graph, v *pgraph.Vertex, uids []ResUID) []bool { +func addEdgesByMatchingUIDS(g *pgraph.Graph, v pgraph.Vertex, uids []ResUID) []bool { // search for edges and see what matches! var result []bool @@ -50,22 +50,22 @@ func addEdgesByMatchingUIDS(g *pgraph.Graph, v *pgraph.Vertex, uids []ResUID) [] continue } if b, ok := g.Value("debug"); ok && util.Bool(b) { - log.Printf("Compile: AutoEdge: Match: %s[%s] with UID: %s[%s]", vv.GetKind(), vv.GetName(), uid.GetKind(), uid.GetName()) + log.Printf("Compile: AutoEdge: Match: %s with UID: %s[%s]", VtoR(vv).String(), uid.GetKind(), uid.GetName()) } // we must match to an effective UID for the resource, // that is to say, the name value of a res is a helpful // handle, but it is not necessarily a unique identity! // remember, resources can return multiple UID's each! - if UIDExistsInUIDs(uid, vv.UIDs()) { + if UIDExistsInUIDs(uid, VtoR(vv).UIDs()) { // add edge from: vv -> v if uid.IsReversed() { - txt := fmt.Sprintf("AutoEdge: %s[%s] -> %s[%s]", vv.GetKind(), vv.GetName(), v.GetKind(), v.GetName()) + txt := fmt.Sprintf("AutoEdge: %s -> %s", VtoR(vv).String(), VtoR(v).String()) log.Printf("Compile: Adding %s", txt) - g.AddEdge(vv, v, NewEdge(txt)) + g.AddEdge(vv, v, pgraph.NewEdge(txt)) } else { // edges go the "normal" way, eg: pkg resource - txt := fmt.Sprintf("AutoEdge: %s[%s] -> %s[%s]", v.GetKind(), v.GetName(), vv.GetKind(), vv.GetName()) + txt := fmt.Sprintf("AutoEdge: %s -> %s", VtoR(v).String(), VtoR(vv).String()) log.Printf("Compile: Adding %s", txt) - g.AddEdge(v, vv, NewEdge(txt)) + g.AddEdge(v, vv, pgraph.NewEdge(txt)) } found = true break @@ -80,19 +80,19 @@ func addEdgesByMatchingUIDS(g *pgraph.Graph, v *pgraph.Vertex, uids []ResUID) [] func AutoEdges(g *pgraph.Graph) { log.Println("Compile: Adding AutoEdges...") for _, v := range g.Vertices() { // for each vertexes autoedges - if !v.Meta().AutoEdge { // is the metaparam true? + if !VtoR(v).Meta().AutoEdge { // is the metaparam true? continue } - autoEdgeObj := v.AutoEdges() + autoEdgeObj := VtoR(v).AutoEdges() if autoEdgeObj == nil { - log.Printf("%s[%s]: Config: No auto edges were found!", v.GetKind(), v.GetName()) + log.Printf("%s: Config: No auto edges were found!", VtoR(v).String()) continue // next vertex } for { // while the autoEdgeObj has more uids to add... uids := autoEdgeObj.Next() // get some! if uids == nil { - log.Printf("%s[%s]: Config: The auto edge list is empty!", v.GetKind(), v.GetName()) + log.Printf("%s: Config: The auto edge list is empty!", VtoR(v).String()) break // inner loop } if b, ok := g.Value("debug"); ok && util.Bool(b) { diff --git a/pgraph/autogroup.go b/resources/autogroup.go similarity index 74% rename from pgraph/autogroup.go rename to resources/autogroup.go index e83dbd74..0790ae71 100644 --- a/pgraph/autogroup.go +++ b/resources/autogroup.go @@ -15,12 +15,13 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package pgraph +package resources import ( "fmt" "log" + "github.com/purpleidea/mgmt/pgraph" "github.com/purpleidea/mgmt/util" errwrap "github.com/pkg/errors" @@ -29,19 +30,19 @@ import ( // AutoGrouper is the required interface to implement for an autogroup algorithm type AutoGrouper interface { // listed in the order these are typically called in... - name() string // friendly identifier - init(*Graph) error // only call once - vertexNext() (*Vertex, *Vertex, error) // mostly algorithmic - vertexCmp(*Vertex, *Vertex) error // can we merge these ? - vertexMerge(*Vertex, *Vertex) (*Vertex, error) // vertex merge fn to use - edgeMerge(*Edge, *Edge) *Edge // edge merge fn to use - vertexTest(bool) (bool, error) // call until false + name() string // friendly identifier + init(*pgraph.Graph) error // only call once + vertexNext() (pgraph.Vertex, pgraph.Vertex, error) // mostly algorithmic + vertexCmp(pgraph.Vertex, pgraph.Vertex) error // can we merge these ? + vertexMerge(pgraph.Vertex, pgraph.Vertex) (pgraph.Vertex, error) // vertex merge fn to use + edgeMerge(*pgraph.Edge, *pgraph.Edge) *pgraph.Edge // edge merge fn to use + vertexTest(bool) (bool, error) // call until false } // baseGrouper is the base type for implementing the AutoGrouper interface type baseGrouper struct { - graph *Graph // store a pointer to the graph - vertices []*Vertex // cached list of vertices + graph *pgraph.Graph // store a pointer to the graph + vertices []pgraph.Vertex // cached list of vertices i int j int done bool @@ -54,7 +55,7 @@ func (ag *baseGrouper) name() string { // init is called only once and before using other AutoGrouper interface methods // the name method is the only exception: call it any time without side effects! -func (ag *baseGrouper) init(g *Graph) error { +func (ag *baseGrouper) init(g *pgraph.Graph) error { if ag.graph != nil { return fmt.Errorf("the init method has already been called") } @@ -73,7 +74,7 @@ func (ag *baseGrouper) init(g *Graph) error { // an intelligent algorithm would selectively offer only valid pairs of vertices // these should satisfy logical grouping requirements for the autogroup designs! // the desired algorithms can override, but keep this method as a base iterator! -func (ag *baseGrouper) vertexNext() (v1, v2 *Vertex, err error) { +func (ag *baseGrouper) vertexNext() (v1, v2 pgraph.Vertex, err error) { // this does a for v... { for w... { return v, w }} but stepwise! l := len(ag.vertices) if ag.i < l { @@ -108,43 +109,43 @@ func (ag *baseGrouper) vertexNext() (v1, v2 *Vertex, err error) { return } -func (ag *baseGrouper) vertexCmp(v1, v2 *Vertex) error { +func (ag *baseGrouper) vertexCmp(v1, v2 pgraph.Vertex) error { if v1 == nil || v2 == nil { return fmt.Errorf("the vertex is nil") } if v1 == v2 { // skip yourself return fmt.Errorf("the vertices are the same") } - if v1.GetKind() != v2.GetKind() { // we must group similar kinds + if VtoR(v1).GetKind() != VtoR(v2).GetKind() { // we must group similar kinds // TODO: maybe future resources won't need this limitation? return fmt.Errorf("the two resources aren't the same kind") } // someone doesn't want to group! - if !v1.Meta().AutoGroup || !v2.Meta().AutoGroup { + if !VtoR(v1).Meta().AutoGroup || !VtoR(v2).Meta().AutoGroup { return fmt.Errorf("one of the autogroup flags is false") } - if v1.Res.IsGrouped() { // already grouped! + if VtoR(v1).IsGrouped() { // already grouped! return fmt.Errorf("already grouped") } - if len(v2.Res.GetGroup()) > 0 { // already has children grouped! + if len(VtoR(v2).GetGroup()) > 0 { // already has children grouped! return fmt.Errorf("already has groups") } - if !v1.Res.GroupCmp(v2.Res) { // resource groupcmp failed! + if !VtoR(v1).GroupCmp(VtoR(v2)) { // resource groupcmp failed! return fmt.Errorf("the GroupCmp failed") } return nil // success } -func (ag *baseGrouper) vertexMerge(v1, v2 *Vertex) (v *Vertex, err error) { +func (ag *baseGrouper) vertexMerge(v1, v2 pgraph.Vertex) (v pgraph.Vertex, err error) { // NOTE: it's important to use w.Res instead of w, b/c // the w by itself is the *Vertex obj, not the *Res obj // which is contained within it! They both satisfy the // Res interface, which is why both will compile! :( - err = v1.Res.GroupRes(v2.Res) // GroupRes skips stupid groupings - return // success or fail, and no need to merge the actual vertices! + err = VtoR(v1).GroupRes(VtoR(v2)) // GroupRes skips stupid groupings + return // success or fail, and no need to merge the actual vertices! } -func (ag *baseGrouper) edgeMerge(e1, e2 *Edge) *Edge { +func (ag *baseGrouper) edgeMerge(e1, e2 *pgraph.Edge) *pgraph.Edge { return e1 // noop } @@ -160,18 +161,18 @@ func (ag *baseGrouper) vertexTest(b bool) (bool, error) { } // TODO: this algorithm may not be correct in all cases. replace if needed! -type nonReachabilityGrouper struct { +type NonReachabilityGrouper struct { baseGrouper // "inherit" what we want, and reimplement the rest } -func (ag *nonReachabilityGrouper) name() string { - return "nonReachabilityGrouper" +func (ag *NonReachabilityGrouper) name() string { + return "NonReachabilityGrouper" } // this algorithm relies on the observation that if there's a path from a to b, // then they *can't* be merged (b/c of the existing dependency) so therefore we // merge anything that *doesn't* satisfy this condition or that of the reverse! -func (ag *nonReachabilityGrouper) vertexNext() (v1, v2 *Vertex, err error) { +func (ag *NonReachabilityGrouper) vertexNext() (v1, v2 pgraph.Vertex, err error) { for { v1, v2, err = ag.baseGrouper.vertexNext() // get all iterable pairs if err != nil { @@ -202,15 +203,15 @@ func (ag *nonReachabilityGrouper) vertexNext() (v1, v2 *Vertex, err error) { // and then by deleting v2 from the graph. Since more than one edge between two // vertices is not allowed, duplicate edges are merged as well. an edge merge // function can be provided if you'd like to control how you merge the edges! -func (g *Graph) VertexMerge(v1, v2 *Vertex, vertexMergeFn func(*Vertex, *Vertex) (*Vertex, error), edgeMergeFn func(*Edge, *Edge) *Edge) error { +func VertexMerge(g *pgraph.Graph, v1, v2 pgraph.Vertex, vertexMergeFn func(pgraph.Vertex, pgraph.Vertex) (pgraph.Vertex, error), edgeMergeFn func(*pgraph.Edge, *pgraph.Edge) *pgraph.Edge) error { // methodology // 1) edges between v1 and v2 are removed //Loop: - for k1 := range g.adjacency { - for k2 := range g.adjacency[k1] { + for k1 := range g.Adjacency() { + for k2 := range g.Adjacency()[k1] { // v1 -> v2 || v2 -> v1 if (k1 == v1 && k2 == v2) || (k1 == v2 && k2 == v1) { - delete(g.adjacency[k1], k2) // delete map & edge + delete(g.Adjacency()[k1], k2) // delete map & edge // NOTE: if we assume this is a DAG, then we can // assume only v1 -> v2 OR v2 -> v1 exists, and // we can break out of these loops immediately! @@ -222,10 +223,10 @@ func (g *Graph) VertexMerge(v1, v2 *Vertex, vertexMergeFn func(*Vertex, *Vertex) // 2) edges that point towards v2 from X now point to v1 from X (no dupes) for _, x := range g.IncomingGraphVertices(v2) { // all to vertex v (??? -> v) - e := g.adjacency[x][v2] // previous edge + e := g.Adjacency()[x][v2] // previous edge r := g.Reachability(x, v1) - // merge e with ex := g.adjacency[x][v1] if it exists! - if ex, exists := g.adjacency[x][v1]; exists && edgeMergeFn != nil && len(r) == 0 { + // merge e with ex := g.Adjacency()[x][v1] if it exists! + if ex, exists := g.Adjacency()[x][v1]; exists && edgeMergeFn != nil && len(r) == 0 { e = edgeMergeFn(e, ex) } if len(r) == 0 { // if not reachable, add it @@ -238,21 +239,21 @@ func (g *Graph) VertexMerge(v1, v2 *Vertex, vertexMergeFn func(*Vertex, *Vertex) continue } // this edge is from: prev, to: next - ex, _ := g.adjacency[prev][next] // get + ex, _ := g.Adjacency()[prev][next] // get ex = edgeMergeFn(ex, e) - g.adjacency[prev][next] = ex // set + g.Adjacency()[prev][next] = ex // set prev = next } } - delete(g.adjacency[x], v2) // delete old edge + delete(g.Adjacency()[x], v2) // delete old edge } // 3) edges that point from v2 to X now point from v1 to X (no dupes) for _, x := range g.OutgoingGraphVertices(v2) { // all from vertex v (v -> ???) - e := g.adjacency[v2][x] // previous edge + e := g.Adjacency()[v2][x] // previous edge r := g.Reachability(v1, x) - // merge e with ex := g.adjacency[v1][x] if it exists! - if ex, exists := g.adjacency[v1][x]; exists && edgeMergeFn != nil && len(r) == 0 { + // merge e with ex := g.Adjacency()[v1][x] if it exists! + if ex, exists := g.Adjacency()[v1][x]; exists && edgeMergeFn != nil && len(r) == 0 { e = edgeMergeFn(e, ex) } if len(r) == 0 { @@ -265,13 +266,13 @@ func (g *Graph) VertexMerge(v1, v2 *Vertex, vertexMergeFn func(*Vertex, *Vertex) continue } // this edge is from: prev, to: next - ex, _ := g.adjacency[prev][next] + ex, _ := g.Adjacency()[prev][next] ex = edgeMergeFn(ex, e) - g.adjacency[prev][next] = ex + g.Adjacency()[prev][next] = ex prev = next } } - delete(g.adjacency[v2], x) + delete(g.Adjacency()[v2], x) } // 4) merge and then remove the (now merged/grouped) vertex @@ -279,7 +280,8 @@ func (g *Graph) VertexMerge(v1, v2 *Vertex, vertexMergeFn func(*Vertex, *Vertex) if v, err := vertexMergeFn(v1, v2); err != nil { return err } else if v != nil { // replace v1 with the "merged" version... - *v1 = *v // TODO: is this safe? (replacing mutexes is undefined!) + //*v1 = *v // TODO: is this safe? (replacing mutexes is undefined!) + v1 = v } } g.DeleteVertex(v2) // remove grouped vertex @@ -292,7 +294,7 @@ func (g *Graph) VertexMerge(v1, v2 *Vertex, vertexMergeFn func(*Vertex, *Vertex) } // autoGroup is the mechanical auto group "runner" that runs the interface spec -func (g *Graph) autoGroup(ag AutoGrouper) chan string { +func autoGroup(g *pgraph.Graph, ag AutoGrouper) chan string { strch := make(chan string) // output log messages here go func(strch chan string) { strch <- fmt.Sprintf("Compile: Grouping: Algorithm: %v...", ag.name()) @@ -301,7 +303,7 @@ func (g *Graph) autoGroup(ag AutoGrouper) chan string { } for { - var v, w *Vertex + var v, w pgraph.Vertex v, w, err := ag.vertexNext() // get pair to compare if err != nil { log.Fatalf("error running autoGroup(vertexNext): %v", err) @@ -317,7 +319,7 @@ func (g *Graph) autoGroup(ag AutoGrouper) chan string { } // remove grouped vertex and merge edges (res is safe) - } else if err := g.VertexMerge(v, w, ag.vertexMerge, ag.edgeMerge); err != nil { // merge... + } else if err := VertexMerge(g, v, w, ag.vertexMerge, ag.edgeMerge); err != nil { // merge... strch <- fmt.Sprintf("Compile: Grouping: !VertexMerge for: %s into %s", wStr, vStr) } else { // success! @@ -340,11 +342,11 @@ func (g *Graph) autoGroup(ag AutoGrouper) chan string { } // AutoGroup runs the auto grouping on the graph and prints out log messages -func (g *Graph) AutoGroup() { +func AutoGroup(g *pgraph.Graph, ag AutoGrouper) { // receive log messages from channel... // this allows test cases to avoid printing them when they're unwanted! // TODO: this algorithm may not be correct in all cases. replace if needed! - for str := range g.autoGroup(&nonReachabilityGrouper{}) { + for str := range autoGroup(g, ag) { log.Println(str) } } diff --git a/resources/autogroup_test.go b/resources/autogroup_test.go new file mode 100644 index 00000000..a93fa9c1 --- /dev/null +++ b/resources/autogroup_test.go @@ -0,0 +1,721 @@ +// Mgmt +// Copyright (C) 2013-2017+ James Shubin and the project contributors +// Written by James Shubin and the project contributors +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package resources + +import ( + "fmt" + "reflect" + "sort" + "strings" + "testing" + "time" + + "github.com/purpleidea/mgmt/pgraph" + "github.com/purpleidea/mgmt/util" +) + +type testGrouper struct { + // TODO: this algorithm may not be correct in all cases. replace if needed! + NonReachabilityGrouper // "inherit" what we want, and reimplement the rest +} + +func (ag *testGrouper) name() string { + return "testGrouper" +} + +func (ag *testGrouper) vertexMerge(v1, v2 pgraph.Vertex) (v pgraph.Vertex, err error) { + if err := VtoR(v1).GroupRes(VtoR(v2)); err != nil { // group them first + return nil, err + } + // HACK: update the name so it matches full list of self+grouped + obj := VtoR(v1) + names := strings.Split(obj.GetName(), ",") // load in stored names + for _, n := range obj.GetGroup() { + names = append(names, n.GetName()) // add my contents + } + names = util.StrRemoveDuplicatesInList(names) // remove duplicates + sort.Strings(names) + obj.SetName(strings.Join(names, ",")) + return // success or fail, and no need to merge the actual vertices! +} + +func (ag *testGrouper) edgeMerge(e1, e2 *pgraph.Edge) *pgraph.Edge { + // HACK: update the name so it makes a union of both names + n1 := strings.Split(e1.Name, ",") // load + n2 := strings.Split(e2.Name, ",") // load + names := append(n1, n2...) + names = util.StrRemoveDuplicatesInList(names) // remove duplicates + sort.Strings(names) + return pgraph.NewEdge(strings.Join(names, ",")) +} + +// helper function +func runGraphCmp(t *testing.T, g1, g2 *pgraph.Graph) { + AutoGroup(g1, &testGrouper{}) // edits the graph + err := GraphCmp(g1, g2) + if err != nil { + t.Logf(" actual (g1): %v%v", g1, fullPrint(g1)) + t.Logf("expected (g2): %v%v", g2, fullPrint(g2)) + t.Logf("Cmp error:") + t.Errorf("%v", err) + } +} + +type NoopResTest struct { + NoopRes +} + +func (obj *NoopResTest) GroupCmp(r Res) bool { + res, ok := r.(*NoopResTest) + if !ok { + return false + } + + // TODO: implement this in vertexCmp for *testGrouper instead? + if strings.Contains(res.Name, ",") { // HACK + return false // element to be grouped is already grouped! + } + + // group if they start with the same letter! (helpful hack for testing) + return obj.Name[0] == res.Name[0] +} + +func NewNoopResTest(name string) *NoopResTest { + obj := &NoopResTest{ + NoopRes: NoopRes{ + BaseRes: BaseRes{ + Name: name, + MetaParams: MetaParams{ + AutoGroup: true, // always autogroup + }, + }, + }, + } + return obj +} + +// GraphCmp compares the topology of two graphs and returns nil if they're equal +// It also compares if grouped element groups are identical +func GraphCmp(g1, g2 *pgraph.Graph) error { + if n1, n2 := g1.NumVertices(), g2.NumVertices(); n1 != n2 { + return fmt.Errorf("graph g1 has %d vertices, while g2 has %d", n1, n2) + } + if e1, e2 := g1.NumEdges(), g2.NumEdges(); e1 != e2 { + return fmt.Errorf("graph g1 has %d edges, while g2 has %d", e1, e2) + } + + var m = make(map[pgraph.Vertex]pgraph.Vertex) // g1 to g2 vertex correspondence +Loop: + // check vertices + for v1 := range g1.Adjacency() { // for each vertex in g1 + + l1 := strings.Split(VtoR(v1).GetName(), ",") // make list of everyone's names... + for _, x1 := range VtoR(v1).GetGroup() { + l1 = append(l1, x1.GetName()) // add my contents + } + l1 = util.StrRemoveDuplicatesInList(l1) // remove duplicates + sort.Strings(l1) + + // inner loop + for v2 := range g2.Adjacency() { // does it match in g2 ? + + l2 := strings.Split(VtoR(v2).GetName(), ",") + for _, x2 := range VtoR(v2).GetGroup() { + l2 = append(l2, x2.GetName()) + } + l2 = util.StrRemoveDuplicatesInList(l2) // remove duplicates + sort.Strings(l2) + + // does l1 match l2 ? + if ListStrCmp(l1, l2) { // cmp! + m[v1] = v2 + continue Loop + } + } + return fmt.Errorf("graph g1, has no match in g2 for: %v", VtoR(v1).GetName()) + } + // vertices (and groups) match :) + + // check edges + for v1 := range g1.Adjacency() { // for each vertex in g1 + v2 := m[v1] // lookup in map to get correspondance + // g1.Adjacency()[v1] corresponds to g2.Adjacency()[v2] + if e1, e2 := len(g1.Adjacency()[v1]), len(g2.Adjacency()[v2]); e1 != e2 { + return fmt.Errorf("graph g1, vertex(%v) has %d edges, while g2, vertex(%v) has %d", VtoR(v1).GetName(), e1, VtoR(v2).GetName(), e2) + } + + for vv1, ee1 := range g1.Adjacency()[v1] { + vv2 := m[vv1] + ee2 := g2.Adjacency()[v2][vv2] + + // these are edges from v1 -> vv1 via ee1 (graph 1) + // to cmp to edges from v2 -> vv2 via ee2 (graph 2) + + // check: (1) vv1 == vv2 ? (we've already checked this!) + l1 := strings.Split(VtoR(vv1).GetName(), ",") // make list of everyone's names... + for _, x1 := range VtoR(vv1).GetGroup() { + l1 = append(l1, x1.GetName()) // add my contents + } + l1 = util.StrRemoveDuplicatesInList(l1) // remove duplicates + sort.Strings(l1) + + l2 := strings.Split(VtoR(vv2).GetName(), ",") + for _, x2 := range VtoR(vv2).GetGroup() { + l2 = append(l2, x2.GetName()) + } + l2 = util.StrRemoveDuplicatesInList(l2) // remove duplicates + sort.Strings(l2) + + // does l1 match l2 ? + if !ListStrCmp(l1, l2) { // cmp! + return fmt.Errorf("graph g1 and g2 don't agree on: %v and %v", VtoR(vv1).GetName(), VtoR(vv2).GetName()) + } + + // check: (2) ee1 == ee2 + if ee1.Name != ee2.Name { + return fmt.Errorf("graph g1 edge(%v) doesn't match g2 edge(%v)", ee1.Name, ee2.Name) + } + } + } + + // check meta parameters + for v1 := range g1.Adjacency() { // for each vertex in g1 + for v2 := range g2.Adjacency() { // does it match in g2 ? + s1, s2 := VtoR(v1).Meta().Sema, VtoR(v2).Meta().Sema + sort.Strings(s1) + sort.Strings(s2) + if !reflect.DeepEqual(s1, s2) { + return fmt.Errorf("vertex %s and vertex %s have different semaphores", VtoR(v1).GetName(), VtoR(v2).GetName()) + } + } + } + + return nil // success! +} + +// ListStrCmp compares two lists of strings +func ListStrCmp(a, b []string) bool { + //fmt.Printf("CMP: %v with %v\n", a, b) // debugging + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + +func fullPrint(g *pgraph.Graph) (str string) { + str += "\n" + for v := range g.Adjacency() { + if semas := VtoR(v).Meta().Sema; len(semas) > 0 { + str += fmt.Sprintf("* v: %v; sema: %v\n", VtoR(v).GetName(), semas) + } else { + str += fmt.Sprintf("* v: %v\n", VtoR(v).GetName()) + } + // TODO: add explicit grouping data? + } + for v1 := range g.Adjacency() { + for v2, e := range g.Adjacency()[v1] { + str += fmt.Sprintf("* e: %v -> %v # %v\n", VtoR(v1).GetName(), VtoR(v2).GetName(), e.Name) + } + } + return +} + +func TestDurationAssumptions(t *testing.T) { + var d time.Duration + if (d == 0) != true { + t.Errorf("empty time.Duration is no longer equal to zero") + } + if (d > 0) != false { + t.Errorf("empty time.Duration is now greater than zero") + } +} + +// all of the following test cases are laid out with the following semantics: +// * vertices which start with the same single letter are considered "like" +// * "like" elements should be merged +// * vertices can have any integer after their single letter "family" type +// * grouped vertices should have a name with a comma separated list of names +// * edges follow the same conventions about grouping + +// empty graph +func TestPgraphGrouping1(t *testing.T) { + g1, _ := pgraph.NewGraph("g1") // original graph + g2, _ := pgraph.NewGraph("g2") // expected result + runGraphCmp(t, g1, g2) +} + +// single vertex +func TestPgraphGrouping2(t *testing.T) { + g1, _ := pgraph.NewGraph("g1") // original graph + { // grouping to limit variable scope + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + g1.AddVertex(a1) + } + g2, _ := pgraph.NewGraph("g2") // expected result + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + g2.AddVertex(a1) + } + runGraphCmp(t, g1, g2) +} + +// two vertices +func TestPgraphGrouping3(t *testing.T) { + g1, _ := pgraph.NewGraph("g1") // original graph + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + b1 := pgraph.NewVertex(NewNoopResTest("b1")) + g1.AddVertex(a1, b1) + } + g2, _ := pgraph.NewGraph("g2") // expected result + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + b1 := pgraph.NewVertex(NewNoopResTest("b1")) + g2.AddVertex(a1, b1) + } + runGraphCmp(t, g1, g2) +} + +// two vertices merge +func TestPgraphGrouping4(t *testing.T) { + g1, _ := pgraph.NewGraph("g1") // original graph + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + a2 := pgraph.NewVertex(NewNoopResTest("a2")) + g1.AddVertex(a1, a2) + } + g2, _ := pgraph.NewGraph("g2") // expected result + { + a := pgraph.NewVertex(NewNoopResTest("a1,a2")) + g2.AddVertex(a) + } + runGraphCmp(t, g1, g2) +} + +// three vertices merge +func TestPgraphGrouping5(t *testing.T) { + g1, _ := pgraph.NewGraph("g1") // original graph + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + a2 := pgraph.NewVertex(NewNoopResTest("a2")) + a3 := pgraph.NewVertex(NewNoopResTest("a3")) + g1.AddVertex(a1, a2, a3) + } + g2, _ := pgraph.NewGraph("g2") // expected result + { + a := pgraph.NewVertex(NewNoopResTest("a1,a2,a3")) + g2.AddVertex(a) + } + runGraphCmp(t, g1, g2) +} + +// three vertices, two merge +func TestPgraphGrouping6(t *testing.T) { + g1, _ := pgraph.NewGraph("g1") // original graph + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + a2 := pgraph.NewVertex(NewNoopResTest("a2")) + b1 := pgraph.NewVertex(NewNoopResTest("b1")) + g1.AddVertex(a1, a2, b1) + } + g2, _ := pgraph.NewGraph("g2") // expected result + { + a := pgraph.NewVertex(NewNoopResTest("a1,a2")) + b1 := pgraph.NewVertex(NewNoopResTest("b1")) + g2.AddVertex(a, b1) + } + runGraphCmp(t, g1, g2) +} + +// four vertices, three merge +func TestPgraphGrouping7(t *testing.T) { + g1, _ := pgraph.NewGraph("g1") // original graph + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + a2 := pgraph.NewVertex(NewNoopResTest("a2")) + a3 := pgraph.NewVertex(NewNoopResTest("a3")) + b1 := pgraph.NewVertex(NewNoopResTest("b1")) + g1.AddVertex(a1, a2, a3, b1) + } + g2, _ := pgraph.NewGraph("g2") // expected result + { + a := pgraph.NewVertex(NewNoopResTest("a1,a2,a3")) + b1 := pgraph.NewVertex(NewNoopResTest("b1")) + g2.AddVertex(a, b1) + } + runGraphCmp(t, g1, g2) +} + +// four vertices, two&two merge +func TestPgraphGrouping8(t *testing.T) { + g1, _ := pgraph.NewGraph("g1") // original graph + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + a2 := pgraph.NewVertex(NewNoopResTest("a2")) + b1 := pgraph.NewVertex(NewNoopResTest("b1")) + b2 := pgraph.NewVertex(NewNoopResTest("b2")) + g1.AddVertex(a1, a2, b1, b2) + } + g2, _ := pgraph.NewGraph("g2") // expected result + { + a := pgraph.NewVertex(NewNoopResTest("a1,a2")) + b := pgraph.NewVertex(NewNoopResTest("b1,b2")) + g2.AddVertex(a, b) + } + runGraphCmp(t, g1, g2) +} + +// five vertices, two&three merge +func TestPgraphGrouping9(t *testing.T) { + g1, _ := pgraph.NewGraph("g1") // original graph + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + a2 := pgraph.NewVertex(NewNoopResTest("a2")) + b1 := pgraph.NewVertex(NewNoopResTest("b1")) + b2 := pgraph.NewVertex(NewNoopResTest("b2")) + b3 := pgraph.NewVertex(NewNoopResTest("b3")) + g1.AddVertex(a1, a2, b1, b2, b3) + } + g2, _ := pgraph.NewGraph("g2") // expected result + { + a := pgraph.NewVertex(NewNoopResTest("a1,a2")) + b := pgraph.NewVertex(NewNoopResTest("b1,b2,b3")) + g2.AddVertex(a, b) + } + runGraphCmp(t, g1, g2) +} + +// three unique vertices +func TestPgraphGrouping10(t *testing.T) { + g1, _ := pgraph.NewGraph("g1") // original graph + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + b1 := pgraph.NewVertex(NewNoopResTest("b1")) + c1 := pgraph.NewVertex(NewNoopResTest("c1")) + g1.AddVertex(a1, b1, c1) + } + g2, _ := pgraph.NewGraph("g2") // expected result + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + b1 := pgraph.NewVertex(NewNoopResTest("b1")) + c1 := pgraph.NewVertex(NewNoopResTest("c1")) + g2.AddVertex(a1, b1, c1) + } + runGraphCmp(t, g1, g2) +} + +// three unique vertices, two merge +func TestPgraphGrouping11(t *testing.T) { + g1, _ := pgraph.NewGraph("g1") // original graph + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + b1 := pgraph.NewVertex(NewNoopResTest("b1")) + b2 := pgraph.NewVertex(NewNoopResTest("b2")) + c1 := pgraph.NewVertex(NewNoopResTest("c1")) + g1.AddVertex(a1, b1, b2, c1) + } + g2, _ := pgraph.NewGraph("g2") // expected result + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + b := pgraph.NewVertex(NewNoopResTest("b1,b2")) + c1 := pgraph.NewVertex(NewNoopResTest("c1")) + g2.AddVertex(a1, b, c1) + } + runGraphCmp(t, g1, g2) +} + +// simple merge 1 +// a1 a2 a1,a2 +// \ / >>> | (arrows point downwards) +// b b +func TestPgraphGrouping12(t *testing.T) { + g1, _ := pgraph.NewGraph("g1") // original graph + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + a2 := pgraph.NewVertex(NewNoopResTest("a2")) + b1 := pgraph.NewVertex(NewNoopResTest("b1")) + e1 := pgraph.NewEdge("e1") + e2 := pgraph.NewEdge("e2") + g1.AddEdge(a1, b1, e1) + g1.AddEdge(a2, b1, e2) + } + g2, _ := pgraph.NewGraph("g2") // expected result + { + a := pgraph.NewVertex(NewNoopResTest("a1,a2")) + b1 := pgraph.NewVertex(NewNoopResTest("b1")) + e := pgraph.NewEdge("e1,e2") + g2.AddEdge(a, b1, e) + } + runGraphCmp(t, g1, g2) +} + +// simple merge 2 +// b b +// / \ >>> | (arrows point downwards) +// a1 a2 a1,a2 +func TestPgraphGrouping13(t *testing.T) { + g1, _ := pgraph.NewGraph("g1") // original graph + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + a2 := pgraph.NewVertex(NewNoopResTest("a2")) + b1 := pgraph.NewVertex(NewNoopResTest("b1")) + e1 := pgraph.NewEdge("e1") + e2 := pgraph.NewEdge("e2") + g1.AddEdge(b1, a1, e1) + g1.AddEdge(b1, a2, e2) + } + g2, _ := pgraph.NewGraph("g2") // expected result + { + a := pgraph.NewVertex(NewNoopResTest("a1,a2")) + b1 := pgraph.NewVertex(NewNoopResTest("b1")) + e := pgraph.NewEdge("e1,e2") + g2.AddEdge(b1, a, e) + } + runGraphCmp(t, g1, g2) +} + +// triple merge +// a1 a2 a3 a1,a2,a3 +// \ | / >>> | (arrows point downwards) +// b b +func TestPgraphGrouping14(t *testing.T) { + g1, _ := pgraph.NewGraph("g1") // original graph + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + a2 := pgraph.NewVertex(NewNoopResTest("a2")) + a3 := pgraph.NewVertex(NewNoopResTest("a3")) + b1 := pgraph.NewVertex(NewNoopResTest("b1")) + e1 := pgraph.NewEdge("e1") + e2 := pgraph.NewEdge("e2") + e3 := pgraph.NewEdge("e3") + g1.AddEdge(a1, b1, e1) + g1.AddEdge(a2, b1, e2) + g1.AddEdge(a3, b1, e3) + } + g2, _ := pgraph.NewGraph("g2") // expected result + { + a := pgraph.NewVertex(NewNoopResTest("a1,a2,a3")) + b1 := pgraph.NewVertex(NewNoopResTest("b1")) + e := pgraph.NewEdge("e1,e2,e3") + g2.AddEdge(a, b1, e) + } + runGraphCmp(t, g1, g2) +} + +// chain merge +// a1 a1 +// / \ | +// b1 b2 >>> b1,b2 (arrows point downwards) +// \ / | +// c1 c1 +func TestPgraphGrouping15(t *testing.T) { + g1, _ := pgraph.NewGraph("g1") // original graph + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + b1 := pgraph.NewVertex(NewNoopResTest("b1")) + b2 := pgraph.NewVertex(NewNoopResTest("b2")) + c1 := pgraph.NewVertex(NewNoopResTest("c1")) + e1 := pgraph.NewEdge("e1") + e2 := pgraph.NewEdge("e2") + e3 := pgraph.NewEdge("e3") + e4 := pgraph.NewEdge("e4") + g1.AddEdge(a1, b1, e1) + g1.AddEdge(a1, b2, e2) + g1.AddEdge(b1, c1, e3) + g1.AddEdge(b2, c1, e4) + } + g2, _ := pgraph.NewGraph("g2") // expected result + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + b := pgraph.NewVertex(NewNoopResTest("b1,b2")) + c1 := pgraph.NewVertex(NewNoopResTest("c1")) + e1 := pgraph.NewEdge("e1,e2") + e2 := pgraph.NewEdge("e3,e4") + g2.AddEdge(a1, b, e1) + g2.AddEdge(b, c1, e2) + } + runGraphCmp(t, g1, g2) +} + +// re-attach 1 (outer) +// technically the second possibility is valid too, depending on which order we +// merge edges in, and if we don't filter out any unnecessary edges afterwards! +// a1 a2 a1,a2 a1,a2 +// | / | | \ +// b1 / >>> b1 OR b1 / (arrows point downwards) +// | / | | / +// c1 c1 c1 +func TestPgraphGrouping16(t *testing.T) { + g1, _ := pgraph.NewGraph("g1") // original graph + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + a2 := pgraph.NewVertex(NewNoopResTest("a2")) + b1 := pgraph.NewVertex(NewNoopResTest("b1")) + c1 := pgraph.NewVertex(NewNoopResTest("c1")) + e1 := pgraph.NewEdge("e1") + e2 := pgraph.NewEdge("e2") + e3 := pgraph.NewEdge("e3") + g1.AddEdge(a1, b1, e1) + g1.AddEdge(b1, c1, e2) + g1.AddEdge(a2, c1, e3) + } + g2, _ := pgraph.NewGraph("g2") // expected result + { + a := pgraph.NewVertex(NewNoopResTest("a1,a2")) + b1 := pgraph.NewVertex(NewNoopResTest("b1")) + c1 := pgraph.NewVertex(NewNoopResTest("c1")) + e1 := pgraph.NewEdge("e1,e3") + e2 := pgraph.NewEdge("e2,e3") // e3 gets "merged through" to BOTH edges! + g2.AddEdge(a, b1, e1) + g2.AddEdge(b1, c1, e2) + } + runGraphCmp(t, g1, g2) +} + +// re-attach 2 (inner) +// a1 b2 a1 +// | / | +// b1 / >>> b1,b2 (arrows point downwards) +// | / | +// c1 c1 +func TestPgraphGrouping17(t *testing.T) { + g1, _ := pgraph.NewGraph("g1") // original graph + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + b1 := pgraph.NewVertex(NewNoopResTest("b1")) + b2 := pgraph.NewVertex(NewNoopResTest("b2")) + c1 := pgraph.NewVertex(NewNoopResTest("c1")) + e1 := pgraph.NewEdge("e1") + e2 := pgraph.NewEdge("e2") + e3 := pgraph.NewEdge("e3") + g1.AddEdge(a1, b1, e1) + g1.AddEdge(b1, c1, e2) + g1.AddEdge(b2, c1, e3) + } + g2, _ := pgraph.NewGraph("g2") // expected result + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + b := pgraph.NewVertex(NewNoopResTest("b1,b2")) + c1 := pgraph.NewVertex(NewNoopResTest("c1")) + e1 := pgraph.NewEdge("e1") + e2 := pgraph.NewEdge("e2,e3") + g2.AddEdge(a1, b, e1) + g2.AddEdge(b, c1, e2) + } + runGraphCmp(t, g1, g2) +} + +// re-attach 3 (double) +// similar to "re-attach 1", technically there is a second possibility for this +// a2 a1 b2 a1,a2 +// \ | / | +// \ b1 / >>> b1,b2 (arrows point downwards) +// \ | / | +// c1 c1 +func TestPgraphGrouping18(t *testing.T) { + g1, _ := pgraph.NewGraph("g1") // original graph + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + a2 := pgraph.NewVertex(NewNoopResTest("a2")) + b1 := pgraph.NewVertex(NewNoopResTest("b1")) + b2 := pgraph.NewVertex(NewNoopResTest("b2")) + c1 := pgraph.NewVertex(NewNoopResTest("c1")) + e1 := pgraph.NewEdge("e1") + e2 := pgraph.NewEdge("e2") + e3 := pgraph.NewEdge("e3") + e4 := pgraph.NewEdge("e4") + g1.AddEdge(a1, b1, e1) + g1.AddEdge(b1, c1, e2) + g1.AddEdge(a2, c1, e3) + g1.AddEdge(b2, c1, e4) + } + g2, _ := pgraph.NewGraph("g2") // expected result + { + a := pgraph.NewVertex(NewNoopResTest("a1,a2")) + b := pgraph.NewVertex(NewNoopResTest("b1,b2")) + c1 := pgraph.NewVertex(NewNoopResTest("c1")) + e1 := pgraph.NewEdge("e1,e3") + e2 := pgraph.NewEdge("e2,e3,e4") // e3 gets "merged through" to BOTH edges! + g2.AddEdge(a, b, e1) + g2.AddEdge(b, c1, e2) + } + runGraphCmp(t, g1, g2) +} + +// connected merge 0, (no change!) +// a1 a1 +// \ >>> \ (arrows point downwards) +// a2 a2 +func TestPgraphGroupingConnected0(t *testing.T) { + g1, _ := pgraph.NewGraph("g1") // original graph + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + a2 := pgraph.NewVertex(NewNoopResTest("a2")) + e1 := pgraph.NewEdge("e1") + g1.AddEdge(a1, a2, e1) + } + g2, _ := pgraph.NewGraph("g2") // expected result ? + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + a2 := pgraph.NewVertex(NewNoopResTest("a2")) + e1 := pgraph.NewEdge("e1") + g2.AddEdge(a1, a2, e1) + } + runGraphCmp(t, g1, g2) +} + +// connected merge 1, (no change!) +// a1 a1 +// \ \ +// b >>> b (arrows point downwards) +// \ \ +// a2 a2 +func TestPgraphGroupingConnected1(t *testing.T) { + g1, _ := pgraph.NewGraph("g1") // original graph + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + b := pgraph.NewVertex(NewNoopResTest("b")) + a2 := pgraph.NewVertex(NewNoopResTest("a2")) + e1 := pgraph.NewEdge("e1") + e2 := pgraph.NewEdge("e2") + g1.AddEdge(a1, b, e1) + g1.AddEdge(b, a2, e2) + } + g2, _ := pgraph.NewGraph("g2") // expected result ? + { + a1 := pgraph.NewVertex(NewNoopResTest("a1")) + b := pgraph.NewVertex(NewNoopResTest("b")) + a2 := pgraph.NewVertex(NewNoopResTest("a2")) + e1 := pgraph.NewEdge("e1") + e2 := pgraph.NewEdge("e2") + g2.AddEdge(a1, b, e1) + g2.AddEdge(b, a2, e2) + } + runGraphCmp(t, g1, g2) +} diff --git a/resources/pgraph.go b/resources/pgraph.go index 99142ad4..ca18ef3e 100644 --- a/resources/pgraph.go +++ b/resources/pgraph.go @@ -19,9 +19,13 @@ package resources import ( "fmt" - "log" + "sync" + "github.com/purpleidea/mgmt/event" "github.com/purpleidea/mgmt/pgraph" + "github.com/purpleidea/mgmt/util" + + errwrap "github.com/pkg/errors" ) //go:generate stringer -type=graphState -output=graphstate_stringer.go @@ -43,7 +47,7 @@ func getState(g *pgraph.Graph) graphState { //mutex.Lock() //defer mutex.Unlock() if u, ok := g.Value("state"); ok { - return util.Uint(u) + return graphState(util.Uint(u)) } return graphStateNil } @@ -54,7 +58,7 @@ func setState(g *pgraph.Graph, state graphState) graphState { mutex.Lock() defer mutex.Unlock() prev := getState(g) - g.SetValue("state", state) + g.SetValue("state", uint(state)) return prev } @@ -73,3 +77,113 @@ func StateLockFromGraph(g *pgraph.Graph) *sync.Mutex { } return m } + +// VtoR casts the Vertex into a Res for use. It panics if it can't convert. +func VtoR(v pgraph.Vertex) Res { + res, ok := v.(Res) + if !ok { + panic("not a Res") + } + return res +} + +// GraphSync updates the oldGraph so that it matches the newGraph receiver. It +// leaves identical elements alone so that they don't need to be refreshed. It +// tries to mutate existing elements into new ones, if they support this. +// FIXME: add test cases +func GraphSync(g *pgraph.Graph, oldGraph *pgraph.Graph) (*pgraph.Graph, error) { + + if oldGraph == nil { + var err error + oldGraph, err = pgraph.NewGraph(g.GetName()) // copy over the name + if err != nil { + return nil, errwrap.Wrapf(err, "could not run GraphSync() properly") + } + } + oldGraph.SetName(g.GetName()) // overwrite the name + + var lookup = make(map[pgraph.Vertex]pgraph.Vertex) + var vertexKeep []pgraph.Vertex // list of vertices which are the same in new graph + var edgeKeep []*pgraph.Edge // list of vertices which are the same in new graph + + for v := range g.Adjacency() { // loop through the vertices (resources) + res := VtoR(v) // resource + var vertex pgraph.Vertex + + // step one, direct compare with res.Compare + if vertex == nil { // redundant guard for consistency + fn := func(v pgraph.Vertex) (bool, error) { + return VtoR(v).Compare(res), nil + } + var err error + vertex, err = oldGraph.VertexMatchFn(fn) + if err != nil { + return nil, errwrap.Wrapf(err, "could not VertexMatchFn() resource") + } + } + + // TODO: consider adding a mutate API. + // step two, try and mutate with res.Mutate + //if vertex == nil { // not found yet... + // vertex = oldGraph.MutateMatch(res) + //} + + if vertex == nil { // no match found yet + if err := res.Validate(); err != nil { + return nil, errwrap.Wrapf(err, "could not Validate() resource") + } + vertex = v + oldGraph.AddVertex(vertex) // call standalone in case not part of an edge + } + lookup[v] = vertex // used for constructing edges + vertexKeep = append(vertexKeep, vertex) // append + } + + // get rid of any vertices we shouldn't keep (that aren't in new graph) + for v := range oldGraph.Adjacency() { + if !pgraph.VertexContains(v, vertexKeep) { + // wait for exit before starting new graph! + VtoR(v).SendEvent(event.EventExit, nil) // sync + VtoR(v).WaitGroup().Wait() + oldGraph.DeleteVertex(v) + } + } + + // compare edges + for v1 := range g.Adjacency() { // loop through the vertices (resources) + for v2, e := range g.Adjacency()[v1] { + // we have an edge! + + // lookup vertices (these should exist now) + //res1 := v1.Res // resource + //res2 := v2.Res + //vertex1 := oldGraph.CompareMatch(res1) // now: VertexMatchFn + //vertex2 := oldGraph.CompareMatch(res2) // now: VertexMatchFn + vertex1, exists1 := lookup[v1] + vertex2, exists2 := lookup[v2] + if !exists1 || !exists2 { // no match found, bug? + //if vertex1 == nil || vertex2 == nil { // no match found + return nil, fmt.Errorf("new vertices weren't found") // programming error + } + + edge, exists := oldGraph.Adjacency()[vertex1][vertex2] + if !exists || edge.Name != e.Name { // TODO: edgeCmp + edge = e // use or overwrite edge + } + oldGraph.Adjacency()[vertex1][vertex2] = edge // store it (AddEdge) + edgeKeep = append(edgeKeep, edge) // mark as saved + } + } + + // delete unused edges + for v1 := range oldGraph.Adjacency() { + for _, e := range oldGraph.Adjacency()[v1] { + // we have an edge! + if !pgraph.EdgeContains(e, edgeKeep) { + oldGraph.DeleteEdge(e) + } + } + } + + return oldGraph, nil +} diff --git a/resources/resources.go b/resources/resources.go index b4cb2638..8f306d04 100644 --- a/resources/resources.go +++ b/resources/resources.go @@ -177,6 +177,7 @@ type Base interface { SetName(string) SetKind(string) GetKind() string + String() string Meta() *MetaParams Events() chan *event.Event Data() *Data @@ -430,6 +431,11 @@ func (obj *BaseRes) GetKind() string { return obj.Kind } +// String returns the canonical string representation for a resource. +func (obj *BaseRes) String() string { + return fmt.Sprintf("%s[%s]", obj.GetKind(), obj.GetName()) +} + // Meta returns the MetaParams as a reference, which we can then get/set on. func (obj *BaseRes) Meta() *MetaParams { return &obj.MetaParams @@ -721,3 +727,14 @@ func (obj *BaseRes) Poll() error { func (obj *BaseRes) Prometheus() *prometheus.Prometheus { return obj.Data().Prometheus } + +// TODO: consider adding a mutate API. +//func (g *Graph) MutateMatch(obj resources.Res) Vertex { +// for v := range g.adjacency { +// if err := v.Res.Mutate(obj); err == nil { +// // transmogrified! +// return v +// } +// } +// return nil +//} diff --git a/pgraph/semaphore.go b/resources/semaphore.go similarity index 91% rename from pgraph/semaphore.go rename to resources/semaphore.go index d2f703d8..635306de 100644 --- a/pgraph/semaphore.go +++ b/resources/semaphore.go @@ -15,7 +15,7 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package pgraph +package resources import ( "fmt" @@ -24,6 +24,7 @@ import ( "strings" "sync" + "github.com/purpleidea/mgmt/pgraph" "github.com/purpleidea/mgmt/util/semaphore" multierr "github.com/hashicorp/go-multierror" @@ -33,11 +34,11 @@ import ( const SemaSep = ":" // SemaLock acquires the list of semaphores in the graph. -func (g *Graph) SemaLock(semas []string) error { +func SemaLock(g *pgraph.Graph, semas []string) error { var reterr error sort.Strings(semas) // very important to avoid deadlock in the dag! slock := SemaLockFromGraph(g) - smap := *SemaMapFromGraph(g) // returns a *map, but can't use directly + smap := SemaMapFromGraph(g) // returns a map, which can be modified by ref for _, id := range semas { slock.Lock() // semaphore creation lock @@ -57,10 +58,10 @@ func (g *Graph) SemaLock(semas []string) error { } // SemaUnlock releases the list of semaphores in the graph. -func (g *Graph) SemaUnlock(semas []string) error { +func SemaUnlock(g *pgraph.Graph, semas []string) error { var reterr error sort.Strings(semas) // unlock in the same order to remove partial locks - smap := *SemaMapFromGraph(g) + smap := SemaMapFromGraph(g) for _, id := range semas { sema, ok := smap[id] // lookup @@ -92,7 +93,7 @@ func SemaSize(id string) int { // SemaLockFromGraph returns a pointer to the semaphore lock stored with the // graph, otherwise it panics. If one does not exist, it will create it. -func SemaLockFromGraph(g *Graph) *sync.Mutex { +func SemaLockFromGraph(g *pgraph.Graph) *sync.Mutex { x, exists := g.Value("slock") if !exists { g.SetValue("slock", &sync.Mutex{}) diff --git a/pgraph/semaphore_test.go b/resources/semaphore_test.go similarity index 57% rename from pgraph/semaphore_test.go rename to resources/semaphore_test.go index c2e6ae05..087cfd19 100644 --- a/pgraph/semaphore_test.go +++ b/resources/semaphore_test.go @@ -15,12 +15,12 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package pgraph +package resources import ( "testing" - "github.com/purpleidea/mgmt/resources" + "github.com/purpleidea/mgmt/pgraph" ) func TestSemaSize(t *testing.T) { @@ -38,10 +38,10 @@ func TestSemaSize(t *testing.T) { func NewNoopResTestSema(name string, semas []string) *NoopResTest { obj := &NoopResTest{ - NoopRes: resources.NoopRes{ - BaseRes: resources.BaseRes{ + NoopRes: NoopRes{ + BaseRes: BaseRes{ Name: name, - MetaParams: resources.MetaParams{ + MetaParams: MetaParams{ AutoGroup: true, // always autogroup Sema: semas, }, @@ -52,54 +52,54 @@ func NewNoopResTestSema(name string, semas []string) *NoopResTest { } func TestPgraphSemaphoreGrouping1(t *testing.T) { - g1, _ := NewGraph("g1") // original graph + g1, _ := pgraph.NewGraph("g1") // original graph { - a1 := NewVertex(NewNoopResTestSema("a1", []string{"s:1"})) - a2 := NewVertex(NewNoopResTestSema("a2", []string{"s:2"})) - a3 := NewVertex(NewNoopResTestSema("a3", []string{"s:3"})) + a1 := pgraph.NewVertex(NewNoopResTestSema("a1", []string{"s:1"})) + a2 := pgraph.NewVertex(NewNoopResTestSema("a2", []string{"s:2"})) + a3 := pgraph.NewVertex(NewNoopResTestSema("a3", []string{"s:3"})) g1.AddVertex(a1) g1.AddVertex(a2) g1.AddVertex(a3) } - g2, _ := NewGraph("g2") // expected result + g2, _ := pgraph.NewGraph("g2") // expected result { - a123 := NewVertex(NewNoopResTestSema("a1,a2,a3", []string{"s:1", "s:2", "s:3"})) + a123 := pgraph.NewVertex(NewNoopResTestSema("a1,a2,a3", []string{"s:1", "s:2", "s:3"})) g2.AddVertex(a123) } runGraphCmp(t, g1, g2) } func TestPgraphSemaphoreGrouping2(t *testing.T) { - g1, _ := NewGraph("g1") // original graph + g1, _ := pgraph.NewGraph("g1") // original graph { - a1 := NewVertex(NewNoopResTestSema("a1", []string{"s:10", "s:11"})) - a2 := NewVertex(NewNoopResTestSema("a2", []string{"s:2"})) - a3 := NewVertex(NewNoopResTestSema("a3", []string{"s:3"})) + a1 := pgraph.NewVertex(NewNoopResTestSema("a1", []string{"s:10", "s:11"})) + a2 := pgraph.NewVertex(NewNoopResTestSema("a2", []string{"s:2"})) + a3 := pgraph.NewVertex(NewNoopResTestSema("a3", []string{"s:3"})) g1.AddVertex(a1) g1.AddVertex(a2) g1.AddVertex(a3) } - g2, _ := NewGraph("g2") // expected result + g2, _ := pgraph.NewGraph("g2") // expected result { - a123 := NewVertex(NewNoopResTestSema("a1,a2,a3", []string{"s:10", "s:11", "s:2", "s:3"})) + a123 := pgraph.NewVertex(NewNoopResTestSema("a1,a2,a3", []string{"s:10", "s:11", "s:2", "s:3"})) g2.AddVertex(a123) } runGraphCmp(t, g1, g2) } func TestPgraphSemaphoreGrouping3(t *testing.T) { - g1, _ := NewGraph("g1") // original graph + g1, _ := pgraph.NewGraph("g1") // original graph { - a1 := NewVertex(NewNoopResTestSema("a1", []string{"s:1", "s:2"})) - a2 := NewVertex(NewNoopResTestSema("a2", []string{"s:2"})) - a3 := NewVertex(NewNoopResTestSema("a3", []string{"s:3"})) + a1 := pgraph.NewVertex(NewNoopResTestSema("a1", []string{"s:1", "s:2"})) + a2 := pgraph.NewVertex(NewNoopResTestSema("a2", []string{"s:2"})) + a3 := pgraph.NewVertex(NewNoopResTestSema("a3", []string{"s:3"})) g1.AddVertex(a1) g1.AddVertex(a2) g1.AddVertex(a3) } - g2, _ := NewGraph("g2") // expected result + g2, _ := pgraph.NewGraph("g2") // expected result { - a123 := NewVertex(NewNoopResTestSema("a1,a2,a3", []string{"s:1", "s:2", "s:3"})) + a123 := pgraph.NewVertex(NewNoopResTestSema("a1,a2,a3", []string{"s:1", "s:2", "s:3"})) g2.AddVertex(a123) } runGraphCmp(t, g1, g2) diff --git a/yamlgraph/gconfig.go b/yamlgraph/gconfig.go index 43d63a62..b5a23daa 100644 --- a/yamlgraph/gconfig.go +++ b/yamlgraph/gconfig.go @@ -103,14 +103,14 @@ func (c *GraphConfig) NewGraphFromConfig(hostname string, world resources.World, return nil, errwrap.Wrapf(err, "could not run NewGraphFromConfig() properly") } - var lookup = make(map[string]map[string]*pgraph.Vertex) + var lookup = make(map[string]map[string]pgraph.Vertex) //log.Printf("%+v", config) // debug // TODO: if defined (somehow)... graph.SetName(c.Graph) // set graph name - var keep []*pgraph.Vertex // list of vertex which are the same in new graph + var keep []pgraph.Vertex // list of vertex which are the same in new graph var resourceList []resources.Res // list of resources to export // use reflection to avoid duplicating code... better options welcome! value := reflect.Indirect(reflect.ValueOf(c.Resources)) @@ -131,13 +131,13 @@ func (c *GraphConfig) NewGraphFromConfig(hostname string, world resources.World, // res.Meta().Noop = noop //} if _, exists := lookup[kind]; !exists { - lookup[kind] = make(map[string]*pgraph.Vertex) + lookup[kind] = make(map[string]pgraph.Vertex) } // XXX: should we export based on a @@ prefix, or a metaparam // like exported => true || exported => (host pattern)||(other pattern?) if !strings.HasPrefix(res.GetName(), "@@") { // not exported resource - fn := func(v *pgraph.Vertex) (bool, error) { - return v.Res.Compare(res), nil + fn := func(v pgraph.Vertex) (bool, error) { + return resources.VtoR(v).Compare(res), nil } v, err := graph.VertexMatchFn(fn) if err != nil { @@ -211,11 +211,11 @@ func (c *GraphConfig) NewGraphFromConfig(hostname string, world resources.World, // XXX: similar to other resource add code: if _, exists := lookup[kind]; !exists { - lookup[kind] = make(map[string]*pgraph.Vertex) + lookup[kind] = make(map[string]pgraph.Vertex) } - fn := func(v *pgraph.Vertex) (bool, error) { - return v.Res.Compare(res), nil + fn := func(v pgraph.Vertex) (bool, error) { + return resources.VtoR(v).Compare(res), nil } v, err := graph.VertexMatchFn(fn) if err != nil { diff --git a/yamlgraph2/gconfig.go b/yamlgraph2/gconfig.go index 0a691791..c12522c0 100644 --- a/yamlgraph2/gconfig.go +++ b/yamlgraph2/gconfig.go @@ -163,27 +163,27 @@ func (c *GraphConfig) NewGraphFromConfig(hostname string, world resources.World, return nil, errwrap.Wrapf(err, "could not run NewGraphFromConfig() properly") } - var lookup = make(map[string]map[string]*pgraph.Vertex) + var lookup = make(map[string]map[string]pgraph.Vertex) //log.Printf("%+v", config) // debug // TODO: if defined (somehow)... graph.SetName(c.Graph) // set graph name - var keep []*pgraph.Vertex // list of vertex which are the same in new graph + var keep []pgraph.Vertex // list of vertex which are the same in new graph var resourceList []resources.Res // list of resources to export // Resources for _, res := range c.ResList { kind := res.GetKind() if _, exists := lookup[kind]; !exists { - lookup[kind] = make(map[string]*pgraph.Vertex) + lookup[kind] = make(map[string]pgraph.Vertex) } // XXX: should we export based on a @@ prefix, or a metaparam // like exported => true || exported => (host pattern)||(other pattern?) if !strings.HasPrefix(res.GetName(), "@@") { // not exported resource - fn := func(v *pgraph.Vertex) (bool, error) { - return v.Res.Compare(res), nil + fn := func(v pgraph.Vertex) (bool, error) { + return resources.VtoR(v).Compare(res), nil } v, err := graph.VertexMatchFn(fn) if err != nil { @@ -257,11 +257,11 @@ func (c *GraphConfig) NewGraphFromConfig(hostname string, world resources.World, // XXX: similar to other resource add code: if _, exists := lookup[kind]; !exists { - lookup[kind] = make(map[string]*pgraph.Vertex) + lookup[kind] = make(map[string]pgraph.Vertex) } - fn := func(v *pgraph.Vertex) (bool, error) { - return v.Res.Compare(res), nil + fn := func(v pgraph.Vertex) (bool, error) { + return resources.VtoR(v).Compare(res), nil } v, err := graph.VertexMatchFn(fn) if err != nil {