Support N distributed agents

This is the third main feature of this system. The code needs a bunch of
polish, but it actually all works :)

I've tested this briefly with N <= 3.

Currently you have to build your own etcd cluster. It's quite easy, just
run `etcd` and it will be ready. I usually run it in a throw away /tmp/
dir so that I can blow away the stored data easily.
This commit is contained in:
James Shubin
2016-01-02 19:41:36 -05:00
parent 72a8027b7f
commit d8cbeb56f9
19 changed files with 482 additions and 140 deletions

View File

@@ -39,7 +39,6 @@ const (
graphStarted
graphPausing
graphPaused
graphContinuing
)
// The graph abstract data type (ADT) is defined as follows:
@@ -538,18 +537,20 @@ func HeisenbergCount(ch chan *Vertex) int {
}
// main kick to start the graph
func (g *Graph) Start(wg *sync.WaitGroup) {
func (g *Graph) Start(wg *sync.WaitGroup) { // start or continue
t, _ := g.TopologicalSort()
for _, v := range Reverse(t) {
wg.Add(1)
// must pass in value to avoid races...
// see: https://ttboj.wordpress.com/2015/07/27/golang-parallelism-issues-causing-too-many-open-files-error/
go func(vv *Vertex) {
defer wg.Done()
vv.Type.Watch()
log.Printf("Finish: %v", vv.GetName())
}(v)
if !v.Type.IsWatching() { // if Watch() is not running...
wg.Add(1)
// must pass in value to avoid races...
// see: https://ttboj.wordpress.com/2015/07/27/golang-parallelism-issues-causing-too-many-open-files-error/
go func(vv *Vertex) {
defer wg.Done()
vv.Type.Watch()
log.Printf("Finish: %v", vv.GetName())
}(v)
}
// ensure state is started before continuing on to next vertex
v.Type.SendEvent(eventStart, true)
@@ -557,13 +558,6 @@ func (g *Graph) Start(wg *sync.WaitGroup) {
}
}
func (g *Graph) Continue() {
t, _ := g.TopologicalSort()
for _, v := range Reverse(t) {
v.Type.SendEvent(eventContinue, true)
}
}
func (g *Graph) Pause() {
t, _ := g.TopologicalSort()
for _, v := range t { // squeeze out the events...