in process of adding listener for tui clients

main
Keegan 2 years ago
parent 6109b79215
commit 58d4c08ce4

@ -3,10 +3,12 @@ env GOOS=linux GOARCH=arm GOARM=7 go build -o bin/reactor_linux_arm cmd/reactor/
env GOOS=linux GOARCH=arm64 go build -o bin/reactor_linux_arm64 cmd/reactor/main.go
env GOOS=linux GOARCH=arm GOARM=7 go build -o bin/tui_linux_arm cmd/tui/main.go
env GOOS=linux GOARCH=arm64 go build -o bin/tui_linux_arm64 cmd/tui/main.go
env GOOS=linux GOARCH=amd64 go build -o bin/tui_linux_amd64 cmd/tui/main.go
env GOOS=linux GOARCH=amd64 go build -o bin/server_linux_amd64 cmd/server/main.go
tar -czf pireactor.tar.gz bin/reactor_linux_arm64
tar -czf bbreactor.tar.gz bin/reactor_linux_arm
tar -czf tui.tar.gz bin/tui_linux_amd64
tar -czf server.tar.gz bin/server_linux_amd64
tar -czvf pireactor.tar.gz -C bin reactor_linux_arm64
tar -czvf bbreactor.tar.gz -C bin reactor_linux_arm
tar -czvf server.tar.gz -C bin server_linux_amd64
tar -czvf tui.tar.gz -C bin tui_linux_amd64 tui_linux_arm tui_linux_arm64

@ -6,7 +6,6 @@ import (
_ "fmt"
_ "log"
"os/exec"
"os/user"
"bytes"
"strings"
"sync"

@ -29,6 +29,17 @@ type InfoStream struct {
// NewListener will add the statusmon to the list of devs to echo to
Stream chan *DeviceInfo
Layout *syslayout
*listeners
}
type listeners struct {
sync.RWMutex
Listeners map[uint32]*lischan
}
type lischan struct {
sync.WaitGroup
*StatusMonitor
}
type syslayout struct {
@ -42,6 +53,7 @@ func NewInfoStream() *InfoStream {
s := &InfoStream{Stream:dch}
m := make(map[uint32]*DeviceInfo)
s.Layout = &syslayout{Devs:m}
s.listeners = &listeners{Listeners:make(map[uint32]*lischan)}
return s
}
@ -59,6 +71,7 @@ func (s *InfoStream) Listener() {
for {
deviceInfo := <-s.Stream
go s.Update(deviceInfo)
go s.Echo(deviceInfo)
}
}
@ -74,23 +87,49 @@ func (s *InfoStream) Update(d *DeviceInfo) {
go s.Echo(d)
}
func (s *InfoStream) Echo(d *DeviceInfo) {
s.Listeners.RLock()
defer s.Listeners.RUnlock()
func (l *listener) Echo(d *DeviceInfo) {
l.RLock()
defer l.RUnlock()
// read only lock
for _, lis := range s.Listeners {
go func(){
lis <-d
}()
for _, lis := range l.Listeners {
lis.Add(1)
go func(listener *lischan, dev *DeviceInfo){
defer listener.Done()
listener.StatusChan <-dev
}(lis,d)
}
}
func (s *InfoStream) AddListner(ch chan *DeviceInfo) {
s.Listeners.Lock()
defer s.Listeners.Unlock()
s.Listeners = append(s.Listeners, ch)
func (s *InfoStream) AddListener(id uint32, ch chan *DeviceInfo) (map[uint32]*DeviceInfo, *StatusMonitor) {
s.listener.Lock()
s.Layout.Lock()
defer s.listner.Unlock()
defer s.Layout.Unlock()
if sm, ok l.listener.Listeners[id]; ok {
// listener already exists return nil
// going to delete and create a new one here just because f it
return s.Layout.Devs, sm
} else {
ch := make(chan *DeviceInfo)
sm := NewStatusMonitor(ch)
s.listener.Listeners[id] = sm
return s.Layout.Devs, sm
}
}
func (l *listener) RemoveListener(id uint32) error {
l.Lock()
defer l.Unlock()
if lis, ok := l.Listeners[id]; !ok {
return errors.New("Listener doesn't exists!")
} else {
lis.Wait()
lis.StatusChan.Close()
delete(l.Listeners,id)
}
return nil
}
//func (s *InfoStream) GetLayout() map[uint32]*DeviceInfo {
//s.Layout.RLock()
//defer s.Layout.RUnlock()
@ -102,6 +141,12 @@ type StatusMonitor struct {
// serve as base to embed in managers to send/receive device info
TransactionId chan uint32 // monotonically increases to track outdated reqs
StatusChan chan *DeviceInfo // sending reactor info in same fmt
Buffer *buf
}
type buf struct {
Buffer map[uint32]*DeviceInfo //directory of changes since last req
sync.Mutex
}
func (s *StatusMonitor) Start() {
@ -133,6 +178,7 @@ type SystemViewer struct {
type ds struct {
Reactors map[uint32]*InfoStream //map from reactor id to its device info stream
Clients map[uint32]uint32 // maps tui ids to checked out reactor dev ids
sync.Mutex
}
@ -140,7 +186,8 @@ func NewSystemViewer() *SystemViewer {
rs := NewInfoStream()
s := &SystemViewer{ReactorStream:rs}
m := make(map[uint32]*InfoStream)
s.DeviceStream = &ds{Reactors:m}
c := make(map[uint32]uint32)
s.DeviceStream = &ds{Reactors:m,Clients:c}
return s
}
@ -165,6 +212,20 @@ func (s *SystemViewer) AddDeviceSender(reactorId uint32) *StatusMonitor {
return ds.AddSender()
}
func (s *SystemViewer) AddReactorListener(tid uint32) (map[uint32]*DeviceInfo, *StatusMonitor) {
// adds status monitor as a listener and returns any reactors loaded before our channel is active
// id serves as client id and limits them to one of each
rstatus, ch := s.ReactorStream.AddListener(id)
if sm
func (s *SystemViewer) AddDeviceListener(rid, tid uint32) (map[uint32]*DeviceInfo, *StatusMonitor) {
// adds status monitor as a listener and returns any reactors loaded before our channel is active
// id serves as client id and limits them to one of each
rstatus, sm := s.ReactorStream.AddListener(id)
if sm
/*
func (s *SystemViewer) GetReactorStatus() map[uint32]DeviceInfo {
devs := s.ReactorStream.GetLayout()
ret := make(map[uint32]DeviceInfo)
@ -185,3 +246,4 @@ func (s *SystemViewer) GetDeviceStatus(reactorId uint32) map[uint32]DeviceInfo {
}
return ret
}
*/

@ -115,6 +115,24 @@ func (t *TUIManager) GetPort() int {
return port
}
// tui client requests and logic will be down here
func (t *TUIManager) ReactorListener() {
// called on start requests reactor info and buffers updates
buffer, devchan := t.SystemViewer.AddReactorListener(t.Id)
for dev := range devchan {
go t.UpdateReactor(dev)
}
}
func (t *TUIManager) DeviceListener(id uint32) {
buffer, devchan := t.SystemViewer.AddDeviceListener(t.Id,id)
for _, dev
for dev := range devchan {
go t.UpdateDevices(dev)
}
}
func (t *TUIManager) GetDevices(ctx context.Context, req *pb.GetDevicesRequest) (*pb.GetDevicesResponse, error) {
go t.PingReset()
rid := req.GetReactorId()

17
notes

@ -847,3 +847,20 @@ TUI - TUI Client
every debug message will be of format
topic, devcode: id
alright time to get this damn tui updating working
general implementation details
- libary card method
- only allow clients to checkout 1 device stream at a time (besides reactor stream obviously)
- close stream when call to open new one
- should we even store the reactor devices locally?
- or we could request when its selected and then let stream take care of itself
- simplifies a few things
- same setup for reactors/devices
- just call/create device listeners dynamically and reactor listeners at the start
- only check out reactor stream and 1 device stream at a time
- request for devices gets you the current state and adds your listener to the echo chain so that you recieve any updates
- need to ensure sends can complete even if the manager is dead
- close the channel?

Loading…
Cancel
Save