engine: graph, resources: Reduce and clean up logging
Make the output more usable.
This commit is contained in:
@@ -338,9 +338,17 @@ func (obj *Engine) Worker(vertex pgraph.Vertex) error {
|
||||
obj.state[vertex].cuid.StopTimer() // clean up nicely
|
||||
} else {
|
||||
obj.state[vertex].cuid.StartTimer()
|
||||
obj.Logf("Watch(%s)", vertex)
|
||||
if obj.Debug {
|
||||
obj.Logf("%s: Watch...", vertex)
|
||||
}
|
||||
err = res.Watch(obj.state[vertex].doneCtx) // run the watch normally
|
||||
obj.Logf("Watch(%s): Exited(%s)", vertex, engineUtil.CleanError(err))
|
||||
if obj.Debug {
|
||||
if s := engineUtil.CleanError(err); err != nil {
|
||||
obj.Logf("%s: Watch Error: %s", vertex, s)
|
||||
} else {
|
||||
obj.Logf("%s: Watch Exited...", vertex)
|
||||
}
|
||||
}
|
||||
obj.state[vertex].cuid.StopTimer() // clean up nicely
|
||||
}
|
||||
if err == nil { // || err == engine.ErrClosed
|
||||
|
||||
@@ -253,10 +253,18 @@ func (obj *Engine) Commit() error {
|
||||
obj.wlock.Unlock()
|
||||
}()
|
||||
|
||||
obj.Logf("Worker(%s)", v)
|
||||
if obj.Debug || true {
|
||||
obj.Logf("%s: Working...", v)
|
||||
}
|
||||
// contains the Watch and CheckApply loops
|
||||
err := obj.Worker(v)
|
||||
obj.Logf("Worker(%s): Exited(%s)", v, engineUtil.CleanError(err))
|
||||
if obj.Debug || true {
|
||||
if s := engineUtil.CleanError(err); err != nil {
|
||||
obj.Logf("%s: Error: %s", v, s)
|
||||
} else {
|
||||
obj.Logf("%s: Exited...", v)
|
||||
}
|
||||
}
|
||||
obj.state[v].workerErr = err // store the error
|
||||
// If the Rewatch metaparam is true, then this will get
|
||||
// restarted if we do a graph cmp swap. This is why the
|
||||
|
||||
@@ -423,7 +423,7 @@ func (obj *DHCPServerRes) Watch(ctx context.Context) error {
|
||||
|
||||
newLogger := &overEngineeredLogger{
|
||||
logf: func(format string, v ...interface{}) {
|
||||
obj.init.Logf("dhcpv4: "+format, v...)
|
||||
obj.init.Logf(format, v...)
|
||||
},
|
||||
}
|
||||
logOpt := server4.WithLogger(newLogger)
|
||||
|
||||
Reference in New Issue
Block a user