TUI working with reactor connects/disconnects.fixed some other minor bugs such as manager not updating client info on new connection

main
KeeganForelight 3 years ago
parent 24f7c39263
commit f64a9d81fc

Binary file not shown.

Binary file not shown.

@ -0,0 +1,2 @@
#!/bin/bash
env GOOS=linux GOARCH=arm GOARM=7 go build

@ -24,6 +24,7 @@ func main() {
w := flag.CommandLine.Output()
fmt.Fprintf(w,"Usage: %s port \n", os.Args[0])
}
iptr := flag.String("i","192.1.168.136","ip address of listener")
flag.Parse()
if flag.NArg() != 1 {
flag.Usage()
@ -36,8 +37,9 @@ func main() {
} else if err != nil {
log.Fatal(err)
}
ip := *iptr
ch := make(chan error)
t := NewTUI("192.1.168.136",port,ch)
t := NewTUI(ip,port,ch)
go t.Start()
err = <-ch
if err != nil {

Binary file not shown.

@ -3,18 +3,15 @@ module FRMS
go 1.18
require (
github.com/gdamore/tcell/v2 v2.4.1-0.20210905002822-f057f0a857a1
github.com/rivo/tview v0.0.0-20220610163003-691f46d6f500
google.golang.org/grpc v1.47.0
google.golang.org/protobuf v1.27.1
)
require (
github.com/chzyer/readline v1.5.0 // indirect
github.com/gdamore/encoding v1.0.0 // indirect
github.com/gdamore/tcell/v2 v2.4.1-0.20210905002822-f057f0a857a1 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/pprof v0.0.0-20220608213341-c488b8fa1db3 // indirect
github.com/ianlancetaylor/demangle v0.0.0-20220517205856-0058ec4f073c // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/rivo/uniseg v0.2.0 // indirect

@ -4,10 +4,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY=
github.com/chzyer/readline v1.5.0 h1:lSwwFrbNviGePhkewF1az4oLmcwqCZijQ2/Wi3BGHAI=
github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic=
github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@ -51,12 +47,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/pprof v0.0.0-20220608213341-c488b8fa1db3 h1:mpL/HvfIgIejhVwAfxBQkwEjlhP5o0O9RAeTAjpwzxc=
github.com/google/pprof v0.0.0-20220608213341-c488b8fa1db3/go.mod h1:gSuNB+gJaOiQKLEZ+q+PK9Mq3SOzhRcw2GsGS/FhYDk=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/ianlancetaylor/demangle v0.0.0-20220517205856-0058ec4f073c h1:rwmN+hgiyp8QyBqzdEX43lTjKAxaqCrYHaU5op5P9J8=
github.com/ianlancetaylor/demangle v0.0.0-20220517205856-0058ec4f073c/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
@ -100,9 +92,7 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210309074719-68d13333faf2 h1:46ULzRKLh1CwgRq2dC5SlBzEqqNCi8rreOZnNrbqcIY=
golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b h1:2n253B2r0pYSmEV+UNCQoPfU/FiaizQEK5Gu4Bq4JE8=
golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=

@ -45,11 +45,11 @@ func (c *Coordinator) GetStatus() []*DeviceStatus {
for {
select{
case s:= <-statusChan:
fmt.Printf("%v is %v, ",s.Type,s.Status)
//fmt.Printf("%v is %v, ",s.Type,s.Status)
devs = append(devs,s)
wg.Done()
case <-allDone:
fmt.Printf("\n")
fmt.Printf("Devices scaned\n")
return devs
}
}

@ -10,7 +10,7 @@ type CreateManager interface {
}
type GeneralManager interface {
Start()
Start(*Client)
GetPort() int
}
@ -42,22 +42,25 @@ func (c *Coordinator) Start() {
// on each new connection we want to check its id against our mapping
}
func (c *Coordinator) ClientHandler(cl *Client) {
func (c *Coordinator) ClientHandler(cl *Client) int {
// (creates and) notifies manager of client connection
m := c.GetManager(cl)
go m.Start(cl)
return m.GetPort()
}
func (c *Coordinator) GetManager(cl *Client) GeneralManager {
c.Managers.Lock()
defer c.Managers.Unlock()
if m, exists := c.Managers.Directory[cl.Id]; exists {
var exists bool
var m GeneralManager
if m, exists = c.Managers.Directory[cl.Id]; !exists {
// manager in memory
go m.Start()
} else {
// create channel and manager
m := c.NewManager(cl, c.Sys, c.Err)
m = c.NewManager(cl, c.Sys, c.Err)
c.Managers.Directory[cl.Id] = m
go m.Start()
}
return m
}
// tui port grabber
// reactor coordinator
type reactorCoordinator struct {
//empty unexported for method
@ -75,7 +78,6 @@ func NewReactorCoordinator(sys *System, err chan error) *Coordinator {
type tuiCoordinator struct {
//can add fields as needed
Ip string
Port map[uint32]int
}
func (t *tuiCoordinator) NewManager(cl *Client, sys *System, err chan error) GeneralManager {
@ -83,11 +85,5 @@ func (t *tuiCoordinator) NewManager(cl *Client, sys *System, err chan error) Gen
}
func NewTUICoordinator(ip string, sys *System, err chan error) *Coordinator {
p := make(map[uint32]int)
return NewCoordinator(&tuiCoordinator{Ip:ip,Port:p}, sys, err)
}
func (c *Coordinator) GetTUIPort(cl *Client) int {
m := c.Managers.Directory[cl.Id]
return m.GetPort()
return NewCoordinator(&tuiCoordinator{Ip:ip}, sys, err)
}

@ -1,6 +1,7 @@
package server
import (
"log"
"fmt"
"net"
"context"
@ -56,7 +57,7 @@ func (l *Listener) Start() {
l.Err <- err
}
// listener started and grpc handler registered
fmt.Printf("Started listener on %v:%v\n",l.Ip,l.Port)
log.Printf("Started listener on %v:%v\n",l.Ip,l.Port)
}
func (l *Listener) Register() error {
@ -75,8 +76,7 @@ func (l *Listener) Register() error {
func (l *Listener) ReactorClientDiscoveryHandler(ctx context.Context, ping *pb.ReactorClientRequest) (*pb.ReactorClientResponse, error) {
// incoming reactor ping need to spawn coord
c := &Client{Ip:ping.GetClientIp(),Model:ping.GetClientModel(),Type:"reactor",Port:int(ping.GetClientPort()),Id:ping.GetClientId()}
fmt.Printf("%v Client %v has connected from %v:%v\n",c.Type,c.Id,c.Ip,c.Port)
log.Printf("%v %v has connected from %v:%v\n",c.Type,c.Id,c.Ip,c.Port)
coord, ok := l.Coordinators["reactor"]
if !ok {
coord = NewReactorCoordinator(l.Sys, l.Err)
@ -90,14 +90,16 @@ func (l *Listener) ReactorClientDiscoveryHandler(ctx context.Context, ping *pb.R
func (l *Listener) TUIClientDiscoveryHandler(ctx context.Context, ping *pb.TUIClientRequest) (*pb.TUIClientResponse, error) {
t := &Client{Type:"tui",Id:ping.GetClientId()}
coord, ok := l.Coordinators["tui"]
var coord *Coordinator
var ok bool
coord, ok = l.Coordinators["tui"]
if !ok {
coord := NewTUICoordinator(l.Ip, l.Sys, l.Err)
coord = NewTUICoordinator(l.Ip, l.Sys, l.Err)
l.Coordinators["tui"] = coord
coord.Start()
}
go coord.ClientHandler(t)
port := coord.GetTUIPort(t)
port := coord.ClientHandler(t)
log.Printf("%v %v has connected from %v:%v\n",t.Type,t.Id,l.Ip,port)
r := &pb.TUIClientResponse{ClientId:t.Id,ServerIp:l.Ip,ServerPort:int32(port)}
return r, nil
}

@ -24,15 +24,15 @@ type active struct{
int
}
func NewManager(c *Client, err chan error) *Manager {
hb := time.Duration(1) //hb to
func NewManager(err chan error) *Manager {
hb := time.Duration(1 * time.Second) //hb to
m := &Manager{Hb:hb,Err:err}
m.Client = c
return m
}
func (m *Manager) Start() {
func (m *Manager) Start(cl *Client) {
// establish connection with client and start pinging at set intervals
m.Client = cl
if !m.Activate() {
// manager already running
m.Err <-errors.New("Manager already running!")
@ -55,6 +55,7 @@ func (m *Manager) IsActive() bool {
}
func (m *Manager) Activate() bool {
// slightly confusing but returns result of trying to activate
m.Active.Lock()
defer m.Active.Unlock()
alive := m.Active.bool
@ -68,6 +69,7 @@ func (m *Manager) Activate() bool {
}
func (m *Manager) Deactivate() bool {
// result of trying to deactivate
m.Active.Lock()
defer m.Active.Unlock()
alive := m.Active.bool

@ -28,20 +28,26 @@ type Devices struct {
func NewReactorManager(c *Client,sys *System,err chan error) GeneralManager {
d := new(Devices)
r := &ReactorManager{Devs:d}
r.Manager = NewManager(c, err)
r.Manager = NewManager(err)
r.System = sys
return r
}
func (r *ReactorManager) Start() {
r.Manager.Start()
func (r *ReactorManager) Start(cl *Client) {
r.Manager.Start(cl)
conn := r.Connect()
empty := &grpc.ClientConn{}
if conn != empty {
go r.UpdateReactor(r.Id,true)
go r.Monitor(conn)
}
}
func (r *ReactorManager) Exit() {
r.Manager.Exit()
go r.UpdateReactor(r.Id,false)
}
func (r *ReactorManager) GetPort() int {
return 0
}
@ -84,19 +90,18 @@ func (r *ReactorManager) Connect() *grpc.ClientConn {
func (r *ReactorManager) Monitor(conn *grpc.ClientConn) {
defer conn.Close()
client := pb.NewMonitoringClient(conn)
r.UpdateReactor(r.Id,true)
for r.IsActive() {
req := &pb.ReactorStatusRequest{Id:r.Id}
resp, err := client.GetReactorStatus(context.Background(),req)
code := status.Code(err)
if code != 0 { // if != OK
go r.UpdateReactor(r.Id,false)
fmt.Printf("Reactor %v down! ", r.Id)
fmt.Printf("Reactor %v down! Code: %v\n", r.Id,code)
r.Exit()
break;
}
for _,v := range resp.GetDevices() {
go r.System.UpdateReactorDevice(r.Id, int(v.GetAddr()), v.GetType(), v.GetStatus(), v.GetData())
}
time.Sleep(r.Hb * time.Second) // time between sensor pings
time.Sleep(r.Hb) // time between sensor pings
}
}

@ -2,6 +2,7 @@ package server
import (
"sync"
_ "fmt"
)
// package will create and maintain a concurrent system structure

@ -2,7 +2,7 @@ package server
import (
"fmt"
"time"
_ "time"
_ "sync"
"net"
"log"
@ -17,23 +17,29 @@ type TUIManager struct {
*Manager // embedded manager for access to methods and client
*System
Ip string
Port int
Port *port
Err chan error
*pb.UnimplementedManagementServer
}
type port struct {
Chan chan int
int
}
func NewTUIManager(ip string, c *Client, sys *System, err chan error) GeneralManager {
m := NewManager(c, err)
m := NewManager(err)
t := &TUIManager{Err: err}
t.Port = &port{Chan:make(chan int)}
t.Manager = m
t.System = sys
t.Ip = ip
return t
}
func (t *TUIManager) Start() {
func (t *TUIManager) Start(cl *Client) {
//
t.Manager.Start()
t.Manager.Start(cl)
go t.Register() // begin tui server to respond to tui client reqs
//go t.Monitor(conn)
}
@ -43,22 +49,23 @@ func (t *TUIManager) Register() {
if err != nil {
log.Fatal(err)
}
t.Port = lis.Addr().(*net.TCPAddr).Port
grpcServer := grpc.NewServer()
pb.RegisterManagementServer(grpcServer,t)
go grpcServer.Serve(lis)
log.Printf("TUI %v Endpoint active on %v:%v\n",t.Id, t.Ip, t.Port)
// send port now that server is up
t.Port.int = lis.Addr().(*net.TCPAddr).Port
go func(ch chan int,p int) {
ch <-p
}(t.Port.Chan, t.Port.int)
// up and running
}
func (t *TUIManager) GetPort() int {
for t.Port == 0 {
time.Sleep(10 * time.Millisecond)
}
return t.Port
port := <-t.Port.Chan
return port
}
func (t *TUIManager) GetReactors(ctx context.Context, req *pb.GetReactorsRequest) (*pb.GetReactorsResponse, error) {
//
reactors := []*pb.Reactor{}
resp := &pb.GetReactorsResponse{ClientId:t.Id,Reactors:reactors}
r := t.System.GetReactors()

@ -56,6 +56,22 @@ func NewHWinfo() (*HWinfo, error) {
return h, err
}
func GetId(eth string) (uint32, error) {
maccmd := fmt.Sprintf("ifconfig %v | awk '/ether / {print $2}'", eth)
var stderr bytes.Buffer
var out bytes.Buffer
cmd := exec.Command("bash","-c",maccmd)
cmd.Stdout = &out
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
return 0, err
}
hash := fnv.New32a()
hash.Write(out.Bytes())
id := hash.Sum32()
return id, nil
}
func GetIp(eth string) (string,error) {
ipcmd := fmt.Sprintf("ifconfig %v | awk '/inet / {print $2}'",eth)
var stderr bytes.Buffer

@ -7,6 +7,7 @@ import (
"time"
"math"
"context"
"FRMS/internal/pkg/system"
"google.golang.org/grpc"
"google.golang.org/grpc/status"
"google.golang.org/grpc/credentials/insecure"
@ -16,9 +17,10 @@ import (
// this package will interact with the server to get system status
type TUIClient struct {
Id uint32
Ip string
Port int
client pb.ManagementClient
ClientConn *grpc.ClientConn
Active
}
type Active struct {
@ -28,7 +30,11 @@ type Active struct {
}
func NewTUIClient(ip string, port int) *TUIClient {
t := &TUIClient{Ip:ip,Port:port}
id, err := system.GetId("eth2")
if err != nil {
log.Fatal(err)
}
t := &TUIClient{Id:id,Ip:ip,Port:port}
return t
}
@ -75,11 +81,12 @@ func (t *TUIClient) Connect() {
}
// handle handshake logic here
client := pb.NewHandshakeClient(conn)
req := &pb.TUIClientRequest{ClientId:10120}
req := &pb.TUIClientRequest{ClientId:t.Id}
resp, err := client.TUIClientDiscoveryHandler(context.Background(),req)
if err != nil {
log.Fatal(err)
}
conn.Close() // closing old connection
// setting up server connection with provided port
t.Ip = resp.GetServerIp()
t.Port = int(resp.GetServerPort())
@ -98,15 +105,19 @@ func (t *TUIClient) Connect() {
log.Fatal("Central server currently unavailable")
}
}
t.client = pb.NewManagementClient(conn)
t.ClientConn = conn
break;
}
}
// going to redo this idk wtf i was thinking
func (t *TUIClient) GetReactors() (map[uint32]*Reactor, error) {
req := &pb.GetReactorsRequest{}
r := make(map[uint32]*Reactor)
resp, err := t.client.GetReactors(context.Background(), req)
client := pb.NewManagementClient(t.ClientConn)
resp, err := client.GetReactors(context.Background(), req)
if err != nil {
return r, err
}
@ -120,7 +131,8 @@ func (t *TUIClient) GetReactors() (map[uint32]*Reactor, error) {
func (t *TUIClient) GetReactorDevices(id uint32) (map[int]*Device, error) {
req := &pb.GetReactorDevicesRequest{}
r := make(map[int]*Device)
resp, err := t.client.GetReactorDevices(context.Background(), req)
client := pb.NewManagementClient(t.ClientConn)
resp, err := client.GetReactorDevices(context.Background(), req)
if err != nil {
return r, nil
}
@ -132,12 +144,14 @@ func (t *TUIClient) GetReactorDevices(id uint32) (map[int]*Device, error) {
func (t *TUIClient) DeleteReactor(id uint32) error {
req := &pb.DeleteReactorRequest{}
_, err := t.client.DeleteReactor(context.Background(), req)
client := pb.NewManagementClient(t.ClientConn)
_, err := client.DeleteReactor(context.Background(), req)
return err
}
func (t *TUIClient) DeleteReactorDevice(id uint32, addr int) error {
req := &pb.DeleteReactorDeviceRequest{}
_, err := t.client.DeleteReactorDevice(context.Background(), req)
client := pb.NewManagementClient(t.ClientConn)
_, err := client.DeleteReactorDevice(context.Background(), req)
return err
}

@ -8,24 +8,21 @@ import (
"github.com/rivo/tview"
_ "github.com/gdamore/tcell/v2"
)
// gonna start from scratch :/
type TUI struct {
*Display
*LocalView
//*LocalView
*TUIClient
SelectedReactor <-chan uint32
Err chan error
}
func NewTUI(ip string, port int, ch chan error) *TUI {
r := make(map[uint32]*Reactor)
//r := make(map[uint32]*Reactor)
t := &TUI{}
l := new(LocalView)
l.Reactors = r
t.LocalView = l
c := make(chan uint32)
t.Display = NewDisplay(c)
t.SelectedReactor = c
//l := new(LocalView)
//l.Reactors = r
//t.LocalView = l
t.Err = ch
client := NewTUIClient(ip, port)
t.TUIClient = client
@ -34,25 +31,22 @@ func NewTUI(ip string, port int, ch chan error) *TUI {
func (t *TUI) Start() {
// setup tview app and wait for user connection in standin modal
t.Display.Start()
t.Connect()
if err := t.TUIClient.Start(); err != nil {
t.Err <- err
}
go t.Monitor()
go t.Listen()
t.CreateDisplay()
t.Display.Start()
//go t.Refresh()
}
func (t *TUI) Refresh() {
for {
//
}
}
func (t *TUI) Listen() {
for {
select {
case <-t.SelectedReactor:
//blah
}
}
func (t *TUI) CreateDisplay() {
c := make(chan uint32)
t.Display = NewDisplay(c)
t.SelectedReactor = c
t.ReactorList.AddItem("REACTOR IS ONLINE"," ",0,nil)
t.Flex.AddItem(t.ReactorList,0,1,true).
AddItem(t.DeviceList,0,2,false)
}
func (t *TUI) Monitor() {
@ -74,7 +68,9 @@ func (t *TUI) Monitor() {
//t.DisplayReactorDevices(devs)
case <-timer:
// time to ping for status
go t.UpdateReactors()
t.App.QueueUpdateDraw(func() {
t.UpdateReactors()
})
}
}
}
@ -85,9 +81,9 @@ func (t *TUI) UpdateDevices(r uint32) {
if err != nil {
log.Fatal(err)
}
for a, d := range devs {
go t.LocalView.UpdateReactorDevices(r,a,d)
}
//for a, d := range devs {
// go t.LocalView.UpdateReactorDevices(r,a,d)
//}
t.DisplayReactorDevices(devs)
}
@ -97,10 +93,10 @@ func (t *TUI) UpdateReactors() {
if err != nil {
log.Fatal(err)
}
for id, r := range reactors {
go t.LocalView.UpdateReactors(id, r)
}
t.DisplayReactors(reactors)
//for id, r := range reactors {
// go t.LocalView.UpdateReactors(id, r)
//}
}
@ -119,21 +115,22 @@ func NewDisplay(ch chan uint32) *Display {
d.Flex = tview.NewFlex()
d.ReactorList = tview.NewList().ShowSecondaryText(false)
d.DeviceList = tview.NewList().ShowSecondaryText(false)
d.ReactorList.SetTitle("Reactors").SetBorder(true)
d.DeviceList.SetTitle("Devices").SetBorder(true)
d.SelectedReactor = ch
d.ReactorList.SetSelectedFunc(d.SelectReactor)
return d
}
func (d *Display) Start() {
d.Flex.AddItem(d.ReactorList.SetBorder(true).SetTitle("Reactors"),0,1,true)
d.Flex.AddItem(d.DeviceList.SetBorder(true).SetTitle("Devices"),0,3,false)
if err := d.App.SetRoot(d.Flex, true).Run(); err != nil {
d.App.Stop()
log.Fatal(err)
}
}
func (d *Display) DisplayReactors(r map[uint32]*Reactor) {
// function to display reactor list to table
d.ReactorList.Clear()
for id, reactor := range r {
var status string
if reactor.Status {
@ -141,7 +138,7 @@ func (d *Display) DisplayReactors(r map[uint32]*Reactor) {
} else {
status = "[red]OFFLINE"
}
txt := fmt.Sprintf("Reactor %v is %v", id, status)
txt := fmt.Sprintf("%v is %v", id, status)
d.ReactorList.AddItem(txt,string(id),0,nil)
}
}
@ -157,7 +154,7 @@ func (d *Display) SelectReactor(index int, main, id string, r rune) {
func (d *Display) DisplayReactorDevices(devs map[int]*Device) {
//Function that displays devices to the table
d.DeviceList.Clear()
//d.DeviceList.Clear()
for addr, dev := range devs {
var status string
if dev.Status == "ACTIVE" {
@ -168,4 +165,5 @@ func (d *Display) DisplayReactorDevices(devs map[int]*Device) {
txt := fmt.Sprintf("%v is %v at %x",dev.Type,status,addr)
d.DeviceList.AddItem(txt,"",0,nil)
}
d.App.Draw()
}

29
notes

@ -675,3 +675,32 @@ relies on
- server for up to date reactor and sensor data
TUI TIME
coordinator/listner/reactor seem to be stable
- will have to add exiting for tui manager
need to create the actual TUI at this point
seperate into two logical aspects
- The client that gets the system info from the server
- the tui which actually displays this info into the tui
how to share the data between the client and tui?
- structs
- pros
- very efficient
- cons
- risky
- chan
- pros
- synchronize stuff
- cons
- hard to set up and finnicky
- methods
- pros
- syncronized
- easy to implement
- cons
- need to store/embed struct

Loading…
Cancel
Save