working downstream
parent
efd91b1c90
commit
42ce886114
@ -1,25 +1,147 @@
|
||||
package device
|
||||
|
||||
// serves as a server side device coordinator to sync
|
||||
import (
|
||||
"FRMS/internal/pkg/I2C"
|
||||
pb "FRMS/internal/pkg/grpc"
|
||||
"FRMS/internal/pkg/manager"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
// assume has a server connection from DM
|
||||
// STEPS
|
||||
// 1) client loads web page
|
||||
// 2) DC pushes what it has to the client
|
||||
// 3) requests DM for what it doesnt
|
||||
// 4) DM responds
|
||||
// 5) DC forwards responses to client
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// What can happen
|
||||
// - Client can push something to client
|
||||
// - Client requests info
|
||||
// - If this happens, we can push it to client when we get it. Just need to know it was requests
|
||||
// Created by rlc to manage devices
|
||||
|
||||
// basic manager to embed
|
||||
type Manager interface {
|
||||
Start() error
|
||||
Exit() error
|
||||
// create a heartbeat to send to chan at intervals
|
||||
HeartBeat(chan struct{}, int, int, time.Duration)
|
||||
}
|
||||
|
||||
func NewManager() Manager {
|
||||
// dont need timeout functionality
|
||||
return manager.New(0)
|
||||
}
|
||||
|
||||
// I2C client for locking
|
||||
type I2CClient interface {
|
||||
// i2c client w/ locking
|
||||
GetConnected() (map[int]bool, error) // gets connected addresses
|
||||
SendCmd(int, string) (string, error)
|
||||
}
|
||||
|
||||
func NewI2CClient(config *viper.Viper) (I2CClient, error) {
|
||||
return I2C.NewClient(config)
|
||||
}
|
||||
|
||||
// device coordinator itself
|
||||
type DeviceCoordinator struct {
|
||||
NameChan chan string
|
||||
StatusChan chan string
|
||||
I2C I2CClient
|
||||
Manager
|
||||
Config *viper.Viper
|
||||
|
||||
managersMu sync.RWMutex
|
||||
Managers map[int]*DeviceManager
|
||||
}
|
||||
|
||||
func NewCoordinator(config *viper.Viper) *DeviceCoordinator {
|
||||
dm := make(map[int]*DeviceManager)
|
||||
m := NewManager()
|
||||
c := &DeviceCoordinator{
|
||||
Manager: m,
|
||||
Managers: dm,
|
||||
Config: config,
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *DeviceCoordinator) Start() error {
|
||||
var err error
|
||||
|
||||
if err = c.Manager.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.I2C, err = NewI2CClient(c.Config); err != nil {
|
||||
return err
|
||||
}
|
||||
go c.Monitor()
|
||||
return err
|
||||
}
|
||||
|
||||
func ClientSetName() {
|
||||
// pass client set names to DM
|
||||
func (c *DeviceCoordinator) Monitor() {
|
||||
// monitor I2C for new devices
|
||||
ch := make(chan struct{})
|
||||
go c.HeartBeat(ch, 10, 0, time.Second)
|
||||
|
||||
for range ch {
|
||||
// on notification (10s)
|
||||
devs, err := c.I2C.GetConnected()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// update list
|
||||
go c.UpdateManagers(devs)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *DeviceCoordinator) UpdateManagers(active map[int]bool) {
|
||||
// updates managers
|
||||
c.managersMu.Lock()
|
||||
defer c.managersMu.Unlock()
|
||||
|
||||
for addr, dm := range c.Managers {
|
||||
_, ok := active[addr]
|
||||
|
||||
if ok && dm.IsActive() == 0 {
|
||||
// active and dm not
|
||||
if err := dm.Start(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
} else if dm.IsActive() == 1 {
|
||||
// not active and dm is
|
||||
if err := dm.Exit(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
// remove from map
|
||||
delete(active, addr)
|
||||
}
|
||||
|
||||
for addr, _ := range active {
|
||||
// no manager, create one
|
||||
fmt.Printf("New device %d!\n", addr)
|
||||
|
||||
dm, err := NewDeviceManager(addr, c.Config, "", c.I2C)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := dm.Start(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
c.Managers[addr] = dm
|
||||
}
|
||||
}
|
||||
|
||||
func (c *DeviceCoordinator) GetDeviceInfo() ([]*pb.Device, error) {
|
||||
// gets device info for monitoring
|
||||
c.managersMu.RLock()
|
||||
defer c.managersMu.RUnlock()
|
||||
|
||||
var devices []*pb.Device
|
||||
|
||||
for addr, dm := range c.Managers {
|
||||
// looping over devices
|
||||
devices = append(devices, &pb.Device{
|
||||
Addr: int32(addr),
|
||||
Status: pb.Status(dm.IsActive()),
|
||||
})
|
||||
}
|
||||
|
||||
return devices, nil
|
||||
}
|
||||
|
@ -1,100 +0,0 @@
|
||||
package reactor
|
||||
|
||||
import (
|
||||
"FRMS/internal/pkg/device"
|
||||
pb "FRMS/internal/pkg/grpc"
|
||||
"sync"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
type DeviceManager interface {
|
||||
Start() error
|
||||
Exit() error
|
||||
GetStatus() string
|
||||
LoadConfig() error
|
||||
}
|
||||
|
||||
func NewDeviceManager(addr int, config *viper.Viper, prefix string) (DeviceManager, error) {
|
||||
return device.NewDeviceManager(addr, config, prefix)
|
||||
}
|
||||
|
||||
type DeviceCoordinator struct {
|
||||
Config *viper.Viper
|
||||
Managers map[int]DeviceManager
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func NewDeviceCoordinator(config *viper.Viper) *DeviceCoordinator {
|
||||
dm := &DeviceCoordinator{Config: config}
|
||||
dm.Managers = make(map[int]DeviceManager)
|
||||
return dm
|
||||
}
|
||||
|
||||
func (c *DeviceCoordinator) UpdateDevices(config *viper.Viper, i2c I2CClient, active map[int]bool) error {
|
||||
// update device list
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
for addr, _ := range active {
|
||||
// loop over devs
|
||||
if _, ok := c.Managers[addr]; !ok {
|
||||
// no device, creating one
|
||||
|
||||
dm, err := NewDeviceManager(addr, c.Config, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// starting
|
||||
if err = dm.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// loading config
|
||||
if err = dm.LoadConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// update entry
|
||||
c.Managers[addr] = dm
|
||||
}
|
||||
}
|
||||
// all devs accounted for
|
||||
// I can rework this to rely on individual devices to keep track of status and only need above
|
||||
// for addr, dm := range c.Managers {
|
||||
// if active[addr] {
|
||||
// // active
|
||||
// if dm.IsActive() != 1 {
|
||||
// err = dm.Start()
|
||||
// }
|
||||
// } else {
|
||||
// if dm.IsActive() != 0 {
|
||||
// err = dm.Exit()
|
||||
// }
|
||||
// }
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *DeviceCoordinator) GetDevices() ([]*pb.Device, error) {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
var err error
|
||||
var devices []*pb.Device
|
||||
|
||||
for addr, dm := range c.Managers {
|
||||
status := pb.Status(pb.Status_value[dm.GetStatus()])
|
||||
devices = append(devices, &pb.Device{
|
||||
Addr: int32(addr),
|
||||
Status: status,
|
||||
})
|
||||
}
|
||||
|
||||
return devices, err
|
||||
}
|
@ -1,27 +0,0 @@
|
||||
package reactor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
//"FRMS/internal/pkg/logging"
|
||||
//"google.golang.org/grpc"
|
||||
pb "FRMS/internal/pkg/grpc"
|
||||
)
|
||||
|
||||
// implements grpc handler and device data aggregater handler
|
||||
// grpc status update handler
|
||||
func (c *ReactorCoordinator) Ping() {
|
||||
// sends all device status to central coordinator
|
||||
fmt.Printf("Pinging coordinator\n")
|
||||
// get devices
|
||||
devices, err := c.GetDevices()
|
||||
if err != nil {
|
||||
c.Err <- err
|
||||
}
|
||||
|
||||
req := &pb.ReactorStatusPing{Id: int32(c.ID), Devices: devices}
|
||||
if _, err := c.MonitoringClient.ReactorStatusHandler(context.Background(), req); err != nil {
|
||||
c.Err <- err
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue