diff --git a/internal/pkg/manager/manager.go b/internal/pkg/manager/manager.go index ecdd3a0..fe316c9 100644 --- a/internal/pkg/manager/manager.go +++ b/internal/pkg/manager/manager.go @@ -9,8 +9,10 @@ import ( "time" ) +// max allowable connection attempts is 255 +const MaxConnAttempts = 0xFF + // basic manager for starting/stopping checks plus built in heartbeat for downtime detection -// used across server/reactor type Connection struct { Attempts float64 // float for pow @@ -23,31 +25,43 @@ type Manager struct { Active int32 // atomic checks } -func New(maxCon int) *Manager { +// errors +var ( + ErrInvalidMaxConn = errors.New("invalid max connection attempts") + ErrManagerInactive = errors.New("manager inactive") + ErrManagerActive = errors.New("manager active") + ErrMaxAttemptsExceeded = errors.New("max connection attempts exceeded") +) + +func New(maxConn int) (*Manager, error) { + + if maxConn < 0 || maxConn > MaxConnAttempts { + return &Manager{}, ErrInvalidMaxConn + } - c := &Connection{MaxAttempts: maxCon} + c := &Connection{MaxAttempts: maxConn} m := &Manager{ Connection: c, } - return m + return m, nil } func (m *Manager) Start() error { - // atomically checks/updates status if atomic.CompareAndSwapInt32(&m.Active, 0, 1) { m.ResetConnections() return nil } - // already running - return errors.New("Manager already started!") + + return ErrManagerActive } -func (m *Manager) Exit() error { +func (m *Manager) Stop() error { if atomic.CompareAndSwapInt32(&m.Active, 1, 0) { return nil } - return errors.New("Manager not active!") + + return ErrManagerInactive } func (m *Manager) IsActive() int { @@ -85,12 +99,11 @@ func (c *Connection) Timeout() (time.Duration, error) { c.Lock() defer c.Unlock() if int(c.Attempts) < c.MaxAttempts { - c.Attempts += 1 - // 50, 100, 200... to := time.Duration(50*math.Pow(2, c.Attempts)) * time.Millisecond + c.Attempts += 1 return to, nil } - return 0, errors.New("Connection Failed") + return 0, ErrMaxAttemptsExceeded } func (c *Connection) ResetConnections() { diff --git a/internal/pkg/manager/manager_test.go b/internal/pkg/manager/manager_test.go new file mode 100644 index 0000000..5221a6b --- /dev/null +++ b/internal/pkg/manager/manager_test.go @@ -0,0 +1,206 @@ +package manager + +import ( + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +// creating, starting and stopping tests + +// newManager is a helper for creating new managers for tests +func newManager(conn int, want error, t *testing.T) *Manager { + var manager *Manager + var err error + assert := assert.New(t) + + manager, err = New(conn) + + if err != want { + t.Fatalf( + `New(%d) = %v, %v, %d max connections failed`, + conn, + manager, + err, + conn, + ) + } + + assert.Equal(manager.IsActive(), 0, "manager should start inactive") + + return manager +} + +// TestEmptyManager creates a new manager with 0 max connections +func TestEmptyManager(t *testing.T) { + conn := 0 + newManager(conn, nil, t) +} + +// TestPostiveManager creates a new manager with valid max connections +func TestPositiveManager(t *testing.T) { + conn := rand.Intn(MaxConnAttempts) + newManager(conn, nil, t) +} + +// TestNegativeManager creates a new manager with negative max connections +func TestNegativeManager(t *testing.T) { + conn := -1 * rand.Intn(MaxConnAttempts) + newManager(conn, ErrInvalidMaxConn, t) +} + +// TestInvalidManager creates a new manager with large max connections +func TestInvalidManager(t *testing.T) { + conn := MaxConnAttempts + 0xf + newManager(conn, ErrInvalidMaxConn, t) +} + +// TestManagerLifeCycle tests that a manager can start and exit several times +func TestManagerLifeCycle(t *testing.T) { + + var manager *Manager + assert := assert.New(t) + + conn := rand.Intn(MaxConnAttempts) + manager = newManager(conn, nil, t) + + cycles := 10 + + // starting and stopping sequentially + for i := 0; i < cycles; i++ { + + assert.NoError(manager.Start(), "starting manager failed") + assert.Equal(manager.IsActive(), 1, "manager is inactive after start") + + assert.NoError(manager.Stop(), "stopping manager failed") + assert.Equal(manager.IsActive(), 0, "manager is active after stop") + } +} + +// TestManagerStopFail tests that stopping an inactive manager will error +func TestManagerStopFail(t *testing.T) { + + var manager *Manager + assert := assert.New(t) + + conn := rand.Intn(MaxConnAttempts) + manager = newManager(conn, nil, t) + + assert.NoError(manager.Start(), "starting manager failed") + + // stopping sequentially + assert.NoError(manager.Stop(), "stopping manager failed") + assert.Error(manager.Stop(), "stopping inactive manager should fail") +} + +// TestManagerStartFail tests that starting an active manager will error +func TestManagerStartFail(t *testing.T) { + + var manager *Manager + assert := assert.New(t) + + conn := rand.Intn(MaxConnAttempts) + manager = newManager(conn, nil, t) + + // starting sequentially + assert.NoError(manager.Start(), "starting manager failed") + assert.Error(manager.Start(), "starting active manager should fail") +} + +// auxiliary tests + +// TestManagerTimeout checks that timeouts exponentially backoff +func TestManagerTimeout(t *testing.T) { + var manager *Manager + assert := assert.New(t) + + conn := 10 + manager = newManager(conn, nil, t) + + assert.NoError(manager.Start(), "starting manager failed") + assert.Equal(manager.IsActive(), 1, "manager is inactive") + + prevTimeout, err := manager.Timeout() + + for i := 1; i <= conn; i++ { + assert.NoError(err, "generating timeout failed") + assert.True(prevTimeout > 0, "invalid timeout") + + timeout, err := manager.Timeout() + + if i == conn { + assert.Error(err, "allowed exceeding max attempts") + } else { + assert.NoError(err, "generating timeout failed") + assert.True( + timeout == 2*prevTimeout, + "incorrect timeout %d, expected %d", + timeout, 2*prevTimeout, + ) + } + + prevTimeout = timeout + } +} + +// TestManagerHB tests the heartbeat channel opens and closes +func TestManagerHB(t *testing.T) { + + var manager *Manager + assert := assert.New(t) + + conn := rand.Intn(MaxConnAttempts) + manager = newManager(conn, nil, t) + + assert.NoError(manager.Start(), "starting manager failed") + assert.Equal(manager.IsActive(), 1, "manager is inactive") + + ch := make(chan struct{}) + + go manager.HeartBeat(ch, 10, 0, time.Millisecond) + + for range ch { + // close on first ping + assert.NoError(manager.Stop(), "stopping manager failed") + } + + assert.Equal(manager.IsActive(), 0, "manager is active") +} + +// TestManagerHBTiming tests the heartbeat channel timing is correct +func TestManagerHBTiming(t *testing.T) { + + var manager *Manager + assert := assert.New(t) + + conn := rand.Intn(MaxConnAttempts) + manager = newManager(conn, nil, t) + + assert.NoError(manager.Start(), "starting manager failed") + assert.Equal(manager.IsActive(), 1, "manager is inactive") + + ch := make(chan struct{}) + hb := 100 + pings := 10 + + // expected time with some tolerance for other events + expected := time.Duration(pings*hb+5) * time.Millisecond + + go manager.HeartBeat(ch, hb, 0, time.Millisecond) + + iter := 0 + start := time.Now() + for range ch { + // close after 10 pings + iter += 1 + if iter >= pings { + assert.NoError(manager.Stop(), "stopping manager failed") + } + } + end := time.Now() + + assert.Equal(manager.IsActive(), 0, "manager is active") + assert.WithinDuration(start, end, expected, "inaccurate heartbeat") +}