Don't block on member exits

The MemberList() function which apparently looks at all of the endpoints
was blocking right after a member exited, because we still had a stale
list of endpoints, and while a member that exited safely would update
the endpoints lists, the other member would (unavoidably) race to get
that message, and so it might call the MemberList() with the now stale
endpoint list. This way we invalidate an endpoint we know to be gone
immediately.

This also adds a simple test case to catch this scenario.
This commit is contained in:
James Shubin
2016-07-26 04:03:50 -04:00
parent f6c1bba3b6
commit 24ba6abc6b
2 changed files with 39 additions and 2 deletions

19
etcd.go
View File

@@ -1241,7 +1241,19 @@ func (obj *EmbdEtcd) volunteerCallback(re *RE) error {
if removed { if removed {
log.Printf("Etcd: Member Removed (forced): %v(%v)", quitter, mID) log.Printf("Etcd: Member Removed (forced): %v(%v)", quitter, mID)
} }
return &CtxDelayErr{1 * time.Second, fmt.Sprintf("Member %s (%d) removed successfully!", quitter, mID)} // retry asap
// Remove the endpoint from our list to avoid blocking
// future MemberList calls which would try and connect
// to a missing endpoint... The endpoints should get
// updated from the member exiting safely if it doesn't
// crash, but if it did and/or since it's a race to see
// if the update event will get seen before we need the
// new data, just do it now anyways, then update the
// endpoint list and trigger a reconnect.
delete(obj.endpoints, quitter) // proactively delete it
obj.endpointCallback(nil) // update!
log.Printf("Member %s (%d) removed successfully!", quitter, mID)
return &CtxReconnectErr{"a member was removed"} // retry asap and update endpoint list
} else { } else {
// programming error // programming error
@@ -1852,7 +1864,10 @@ func EtcdMembers(obj *EmbdEtcd) (map[uint64]string, error) {
return nil, fmt.Errorf("Exiting...") return nil, fmt.Errorf("Exiting...")
} }
obj.rLock.RLock() obj.rLock.RLock()
response, err = obj.client.MemberList(context.Background()) if TRACE {
log.Printf("Trace: Etcd: EtcdMembers(): Endpoints are: %v", obj.client.Endpoints())
}
response, err = obj.client.MemberList(ctx)
obj.rLock.RUnlock() obj.rLock.RUnlock()
if err == nil { if err == nil {
break break

22
test/shell/t8.sh Executable file
View File

@@ -0,0 +1,22 @@
#!/bin/bash -e
# run empty graphs, we're just testing etcd clustering
timeout --kill-after=120s 90s ./mgmt run --hostname h1 &
pid1=$!
sleep 5s # let it startup
timeout --kill-after=120s 90s ./mgmt run --hostname h2 --seeds http://127.0.0.1:2379 --client-urls http://127.0.0.1:2381 --server-urls http://127.0.0.1:2382 &
pid2=$!
sleep 5s
$(sleep 5s && kill -SIGINT $pid2)& # send ^C to exit 2nd mgmt
wait $pid2
e=$?
if [ $e -ne 0 ]; then
exit $e
fi
$(sleep 5s && kill -SIGINT $pid1)& # send ^C to exit 1st mgmt
wait $pid1 # get exit status
# if pid1 exits because of a timeout, then it blocked, and this is a bug!
exit $?