merging config

main
spinach 2 years ago
commit 1caa6c78c5

@ -1,8 +1,13 @@
*
# exluding everything and only allowing directly relevant stuff
!cmd/server/main.go
!cmd/reactor/main.go
!internal
!tokens
!configs
!*.yaml
!go.mod
!go.sum
!server
!reactor
!.env

13
.gitignore vendored

@ -21,6 +21,15 @@ bin
*.tar.gz
# logs
*.log
# binaries generated in testing
cmd/server/server
cmd/reactor/reactor
cmd/tui/tui
tokens
logs
# task related
.task
# machine dependent
tokens/
logs/
influxdb/config

@ -0,0 +1 @@
b43ecff1fe53e18c4c9b756b32d38078

@ -0,0 +1,22 @@
# syntax=docker/dockerfile:1
FROM --platform=$BUILDPLATFORM golang:1.18-alpine as builder
WORKDIR /app
COPY . .
RUN go mod download
ARG TARGETOS TARGETARCH TARGETVARIANT
RUN if [[ $TARGETVARIANT == "v7" ]]; \
then \
export GOARM=7; \
fi; \
CGO_ENABLED=0 GOOS=$TARGETOS GOARCH=$TARGETARCH go build -o /reactor ./cmd/reactor/main.go
FROM alpine
COPY --from=builder /reactor .
ENTRYPOINT [ "./reactor" ]

@ -12,7 +12,6 @@ RUN CGO_ENABLED=0 go build -o /server ./cmd/server/main.go
FROM alpine
COPY --from=builder /server .
COPY --from=builder /app/tokens/ ./tokens
EXPOSE 2022
EXPOSE 2023

@ -0,0 +1,46 @@
version: '3'
tasks:
clean:
desc: "clean all of the old binaries"
cmds:
- rm -v bin/* 2>/dev/null
all:
desc: "cleans and builds all"
deps: [clean, bb, server]
bb:
desc: "Builds and sends to the beaglebone"
cmds:
- task: go-build
vars:
GOARM: 7
GOARCH: "arm"
GOOS: "linux"
BUILD_DIR: "reactor"
- scp bin/reactor_linux_arm debian:~/
server:
desc: "Builds server binary"
cmds:
- task: go-build
vars:
BUILD_DIR: "server"
GOOS: "{{OS}}"
GOARCH: "{{ARCH}}"
go-build:
internal: true
cmds:
- go build -o bin/{{.BUILD_DIR}}_{{.GOOS}}_{{.GOARCH}} cmd/{{.BUILD_DIR}}/main.go
sources:
- internal/pkg/**/*.go
- cmd/{{.BUILD_DIR}}/main.go
generates:
- bin/{{.BUILD_DIR}}_{{.GOOS}}_{{.GOARCH}}
env:
GOARM: "{{.GOARM}}"
GOARCH: "{{.GOARCH}}"
GOOS: "{{.GOOS}}"

@ -1,24 +1,149 @@
#!/bin/bash
echo "Purging old builds"
# adding commands
usage() {
# how to use this build script
cat <<EOF
usage: $0 [-c][-l][-i s] s1 [s2....]
s1, s2, etc. the systems to build for (see -l)
Options:
-c, --clean cleans the bin folder of any existing builds
-f, --force same as clean but skips prompt
-l, --list list available systems to build for
-s, --scp will attempt to scp to aplicable devices
-h, --help display this message
EOF
}
list_systems() {
# list available systems to build for
cat <<EOF
Name (shorthand) SCP available? (y/n)
$0 Name or $0 (shorthand) will build for the device
RaspberryPi (rpi) y
BeagleBone (bb) y
Desktop (d) n
Server (s) n
EOF
}
clean_builds() {
# cleans old builds
if [[ "$FORCE"=true ]] ; then
printf 'Cleaning old builds... \n'
rm -v bin/* 2>/dev/null
else
read -p "Clean old builds?(y/n) " -n 1 -r
if [[ $REPLY =~ ^[Yy]$ ]] ; then
rm -v bin/* 2>/dev/null
fi
fi
printf 'Clean!\n'
}
create_build() {
# create build for $1
case $1 in
'rpi' )
printf 'Building for Raspberry Pi!\n'
GARCH="arm64"
PLATFORM="reactor"
;;
'bb')
printf 'Building for BeagleBone!\n'
GARCH="arm"
GARM="GOARM=7"
PLATFORM="reactor"
;;
's')
printf 'Building for Server!\n'
GARCH="amd64"
PLATFORM="server"
;;
'd')
printf 'Building for Desktop!\n'
GARCH="amd64"
PLATFORM="server"
;;
* )
printf 'ERROR: %s type unrecognized!\n' "$1"
usage
exit 1
;;
esac
# setting up build
OUTFILE=$(printf '%s_linux_%s' "$PLATFORM" "$GARCH")
INFILE=$(printf '%s/main.go' "$PLATFORM")
# building
env GOOS=linux GOARCH="$GARCH" $GARM go build -o bin/"$OUTFILE" cmd/"$INFILE"
echo "Finished"
if [[ "$SCP"=true ]] ; then
printf 'Attempting to transfer to %s\n' "$2"
if [[ "$1" == "bb" ]] ; then
printf 'Copying to %s\n' "192.168.100.90"
scp "$HOME/FRMS/bin/$OUTFILE" debian:~/
else
printf 'SCP Not available!\n'
fi
fi
}
echo "Removing Logs"
rm -v bin/log/* 2>/dev/null
# handle long form
for arg in "$@"; do
shift
case "$arg" in
'--help') set -- "$@" "-h" ;;
'--list') set -- "$@" "-l" ;;
'--scp') set -- "$@" "-s" ;;
'--clean') set -- "$@" "-c" ;;
'--force') set -- "$@" "-f" ;;
*) set -- "$@" "$arg" ;;
esac
done
echo "Building reactor binaries"
env GOOS=linux GOARCH=arm GOARM=7 go build -o bin/reactor_linux_arm cmd/reactor/main.go
env GOOS=linux GOARCH=arm64 go build -o bin/reactor_linux_arm64 cmd/reactor/main.go
# handle args
while getopts "lcsfh" opt ; do
case "$opt" in
'h' )
usage
exit 0
;;
'c' )
clean_builds
;;
'f' )
FORCE=true
clean_builds
;;
's' )
SCP=true
;;
'l')
list_systems
;;
'?' )
usage
exit 1
;;
esac
done
echo "Building tui binaries"
env GOOS=linux GOARCH=arm GOARM=7 go build -o bin/tui_linux_arm cmd/tui/main.go
env GOOS=linux GOARCH=arm64 go build -o bin/tui_linux_arm64 cmd/tui/main.go
env GOOS=linux GOARCH=amd64 go build -o bin/tui_linux_amd64 cmd/tui/main.go
shift $(($OPTIND - 1))
echo "Building server binary"
env GOOS=linux GOARCH=amd64 go build -o bin/server_linux_amd64 cmd/server/main.go
for dev in "$@"; do
case "$dev" in
'RaspberryPi') dev='rpi' ;;
'BeagleBone') dev='bb' ;;
'Server') dev='s' ;;
'Desktop') dev='d' ;;
esac
create_build "$dev"
done
printf 'Nothing else to do!\n'
echo "Compressing binaries for distrubution"
tar -czf pireactor.tar.gz -C bin reactor_linux_arm64
tar -czf bbreactor.tar.gz -C bin reactor_linux_arm
tar -czf server.tar.gz -C bin server_linux_amd64
tar -czf tui.tar.gz -C bin tui_linux_amd64 tui_linux_arm tui_linux_arm64
# echo "Compressing binaries for distrubution"
# tar -czf pireactor.tar.gz -C bin reactor_linux_arm64
# tar -czf bbreactor.tar.gz -C bin reactor_linux_arm
# tar -czf server.tar.gz -C bin server_linux_amd64
# tar -czf tui.tar.gz -C bin tui_linux_amd64 tui_linux_arm tui_linux_arm64

@ -1,2 +0,0 @@
#!/bin/bash
env GOOS=linux GOARCH=arm GOARM=7 go build -o ../../bin/

@ -1,59 +1,57 @@
package main
import (
"FRMS/internal/pkg/config"
"FRMS/internal/pkg/logging"
"FRMS/internal/pkg/reactor"
"fmt"
"os"
"flag"
"log"
"strconv"
"FRMS/internal/pkg/reactor"
"FRMS/internal/pkg/logging"
"syscall"
"os/signal"
"github.com/spf13/viper"
)
type coordinator interface {
type reactorCoordinator interface {
Start()
}
func NewCoordinator(ip string,port int,ch chan error) coordinator {
func NewReactorCoordinator(config *viper.Viper, ch chan error) reactorCoordinator {
// allows interface checking as opposed to calling directly
return reactor.NewCoordinator(ip,port,ch)
return reactor.NewCoordinator(config, ch)
}
func NewConfig(fname string) *viper.Viper {
return config.LoadConfig(fname)
}
func main() {
var ip string
var port int
// shutdown
gracefulShutdown := make(chan os.Signal, 1)
signal.Notify(gracefulShutdown, syscall.SIGINT, syscall.SIGTERM)
flag.Usage = func() {
w := flag.CommandLine.Output()
fmt.Fprintf(w, "Usage: %s port \n",os.Args[0])
}
iptr := flag.String("i","192.168.100.2","ip address of server")
//iptr := flag.String("i","192.1.168.136","ip address of laptop")
flag.Parse()
if flag.NArg() != 1 {
flag.Usage()
os.Exit(1)
}
args := flag.Args()
// load any stored configs
conf := NewConfig("reactor")
if p, err := strconv.Atoi(args[0]);p < 1024 || p > 65535 {
flag.Usage()
log.Fatal("Port must be between [1023,65535]")
} else if err != nil {
log.Fatal(err)
}
ip = *iptr
port, err := strconv.Atoi(args[0])
if err != nil {
log.Fatal(err)
}
ch := make(chan error)
rlc := NewCoordinator(ip,port,ch) // host port
rlc := NewReactorCoordinator(conf, ch) // passing conf and err
go rlc.Start()
logging.Debug(logging.DStart, "Reactor Started")
err = <-ch
// check for errors
select {
case err := <-ch:
if err != nil {
log.Fatal(err)
conf.WriteConfig() // save changes
panic(err)
}
case <-gracefulShutdown:
// sigint
fmt.Printf("\nStoring config to %s\n", conf.ConfigFileUsed())
if err := conf.WriteConfig(); err != nil {
panic(err)
}
os.Exit(0)
}
}

@ -1,2 +0,0 @@
#!/bin/bash
go build -race -o ../../bin/server_$GOOS_$GOARCH

@ -1,65 +1,68 @@
package main
import (
_"net/http"
_ "net/http/pprof"
//"flag"
"log"
"os"
"fmt"
"FRMS/internal/pkg/logging"
"os/signal"
"syscall"
"FRMS/internal/pkg/config"
"FRMS/internal/pkg/logging"
"FRMS/internal/pkg/server"
"FRMS/internal/pkg/websocket"
"os"
"github.com/spf13/viper"
)
type listener interface {
type coordinator interface {
Start()
}
func NewListener(ch chan error, port int) listener {
return server.NewListener(ch, port)
func NewCoordinator(config *viper.Viper, ch chan error) coordinator {
return server.NewCentralCoordinator(config, ch)
}
func NewConfig(fname string) *viper.Viper {
return config.LoadConfig(fname)
}
type dbconfig interface {
GetUrl() string
GetOrg() string
GetBucket() string
GetToken() string
type ws interface {
Start()
}
func ReadConfig() dbconfig {
return config.ReadServerConfig()
func NewWebSocket() ws {
return websocket.New()
}
func main() {
// lets get this bread
// all we need to do is call the reactor coordinator and thats it
// removing os flags in favor of env vars
// go func() {
// fmt.Println(http.ListenAndServe("localhost:6060",nil))
// }()
ch := make(chan error)
// creating listener
var lport int
//var dbport int
if port := os.Getenv("gRPC_PORT"); port == "" {
lport = 2022 // default docker port
}
//if port := os.Getenv("DATABASE_PORT"); port == "" {
//dbport = 8086
//}
//fmt.Printf("DBPORT %d\n", dbport)
conf := ReadConfig()
fmt.Printf("Found %v %v %v %v\n",conf.GetUrl(),conf.GetBucket(),conf.GetOrg(),conf.GetToken())
fmt.Printf("Listening on %v\n", lport)
l := NewListener(ch,lport)
gracefulShutdown := make(chan os.Signal, 1)
signal.Notify(gracefulShutdown, syscall.SIGINT, syscall.SIGTERM)
// config file
conf := NewConfig("server")
//db := os.Getenv("DATABASE_URL") // database url
errCh := make(chan error)
go l.Start()
logging.Debug(logging.DStart, "CCO 01 Server started")
err := <-ch // blocking to wait for any errors and keep alive otherwise
if err != nil {
log.Fatal(err)
c := NewCoordinator(conf, errCh)
go c.Start()
logging.Debug(logging.DStart, "CCO 01 Server %s started", conf.Get("name"))
// starting websocket server
w := NewWebSocket()
go w.Start()
select {
case err := <-errCh: // blocking to wait for any errors and keep alive otherwise
panic(err)
case <-gracefulShutdown:
// Shutdown via INT
// storing config
fmt.Printf("\nStoring config to %s\n", conf.ConfigFileUsed())
if err := conf.WriteConfig(); err != nil {
panic(err)
}
fmt.Println("Stored config successfully. Exiting...")
os.Exit(0)
}
}

@ -1,2 +0,0 @@
#!/bin/bash
env GOOS=linux GOARCH=arm GOARM=7 go build -o ../../bin/

@ -1,2 +0,0 @@
#!/bin/bash
go build -o ../../bin/

@ -1,52 +0,0 @@
package main
import (
"fmt"
"flag"
"log"
"os"
"strconv"
"FRMS/internal/pkg/tui"
"FRMS/internal/pkg/logging"
)
type TUI interface {
Start()
}
func NewTUI(ip string, port int, ifconfig string, ch chan error) TUI {
return tui.NewTUI(ip, port, ifconfig, ch)
}
func main() {
var port int
var err error
flag.Usage = func() {
w := flag.CommandLine.Output()
fmt.Fprintf(w,"Usage: %s port [eth*, wlan*, etc.]\n", os.Args[0])
}
iptr := flag.String("i","192.168.100.2","ip address of listener")
//iptr := flag.String("i","192.1.168.136","ip address of laptop")
flag.Parse()
if flag.NArg() != 2 {
flag.Usage()
os.Exit(1)
}
args := flag.Args()
if port, err = strconv.Atoi(args[0]); port < 1024 || port > 65536 {
flag.Usage()
log.Fatal("Port must be between [1023,65535]")
} else if err != nil {
log.Fatal(err)
}
ifconfig := string(args[1])
ip := *iptr
ch := make(chan error)
t := NewTUI(ip,port,ifconfig,ch)
go t.Start()
logging.Debug(logging.DStart, "Started TUI Client")
err = <-ch
if err != nil {
log.Fatal(err)
}
}

@ -8,11 +8,16 @@ services:
ports:
- "2022:2022"
- "2023:2023"
- "2024:2024"
volumes:
- ./logs:/app/log
- ./logs:/log
- server-config:/etc/frms/config
environment:
- LOGTYPE=SERVER
- VERBOSE=1
- LIS_PORT=2022
- REACTOR_PORT=2023
- TUI_PORT=2024
depends_on:
- db
db:
@ -21,20 +26,28 @@ services:
- "8086:8086"
volumes:
- influx-data:/var/lib/influxdb2
- influx-config:/etc/influxdb2
- ./influxdb/startup:/docker-entrypoint-initdb.d
- server-config:/configs
- grafana-provisioning:/grafana
env_file:
- ./internal/configs/db.env
environment:
- DOCKER_INFLUXDB_INIT_MODE=setup
- DOCKER_INFLUXDB_INIT_USERNAME=${INFLUXDB_USERNAME}
- DOCKER_INFLUXDB_INIT_PASSWORD=${INFLUXDB_PASSWORD}
- DOCKER_INFLUXDB_INIT_ORG=${INFLUXDB_ORG}
- DOCKER_INFLUXDB_INIT_BUCKET=${INFLUXDB_BUCKET}
- DOCKER_INFLUXDB_INIT_USERNAME=admin
- DOCKER_INFLUXDB_INIT_PASSWORD=F0r3l1ght
- DOCKER_INFLUXDB_INIT_ORG=ForeLight
- DOCKER_INFLUXDB_INIT_BUCKET=test
grafana:
image: grafana/grafana-oss:latest
ports:
- "3000:3000"
volumes:
- grafana-provisioning:/etc/grafana/provisioning
- grafana-data:/var/lib/grafana
depends_on:
- db
volumes:
grafana-data:
grafana-provisioning:
influx-data:
influx-config:
server-config:

@ -16,6 +16,7 @@ require (
github.com/fsnotify/fsnotify v1.5.4 // indirect
github.com/gdamore/encoding v1.0.0 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect

@ -143,6 +143,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=

@ -0,0 +1,14 @@
#!/bin/bash
#DB_URL=$(cat "$INFLUX_CONFIGS_PATH" | awk '/url/ {print $3}' | head -n 1)
DB_URL="frms-db-1:8086"
TOKEN=$(influx auth list --user ${DOCKER_INFLUXDB_INIT_USER_ID} --hide-headers | cut -f 3)
ORG=$(influx org list | grep ${DOCKER_INFLUXDB_INIT_ORG_ID} | awk '{print $2}')
# creating starting server YAML
echo -e "server:\n db-url: ${DB_URL}\n db-org: ${ORG}\n db-token: ${TOKEN}" >/configs/server.yaml;
# creating grafana yaml
influx user create -n grafana -o ${ORG}
GRAFANA_TOKEN=$(influx auth list --user grafana --hide-headers | cut -f 3)
echo -e "apiVersion: 1\n\ndeleteDatasources:\n\ndatasources:\n - name: INFLUXDB\n type: influxdb\n access: proxy\n url: ${DB_URL}\n jsonData:\n httpMode: GET\n httpHeaderName1: 'Authorization'\n secureJsonData:\n httpHeaderValue1: 'Token ${GRAFANA_TOKEN}'" >/grafana/datasources/datasource.yaml

@ -0,0 +1,6 @@
----
# ${gen_statement}
server:
db-url: "${db_url}"
db-org: "${db_org}"
db-token: "${db_token}"

@ -1,4 +0,0 @@
model: raspberrypi
bus: 1
model: beagleboard
bus: 2

@ -0,0 +1,5 @@
INFLUXDB_USERNAME=admin
INFLUXDB_PASSWORD=admin
INFLUXDB_ORG=ForeLight
INFLUXDB_BUCKET=default

@ -0,0 +1,11 @@
devices:
address: 112
name: DO Sensor
reactor:
heartbeat: 5
id: 2166136261
model: ""
name: Dummy Reactor
server:
ip: 192.168.100.2
port: 2022

@ -0,0 +1,26 @@
db:
org: ForeLight
url: http://192.168.100.2:8086
ports_db: 2022
ports_lis: 2022
reactors:
"10002123":
db:
bucket: test
token: ""
name: Beaglebone Black
"2062445129":
devices:
"97":
name: DO Sensor
"99":
name: pH Sensor
"102":
name: RTD Sensor
server:
name: Rack Server
ports:
db: 8086
lis: 2022
reactor: 2023
tui: 2024

@ -0,0 +1,71 @@
*Time for a coherent plan of attack*
### Current Issues:
- There is a lot of redundancy between the managers/coordinators when it comes to basic checks
- the package seperation kind of makes sense, but it needs to be better fleshed out
- I need to enforce better seperation of responsibilities. Somewhat unclear when state is being kept centrally in the coordinator for no apparent reason.
### Solution:
- Go through the packages and consolidate
- Reduce the state we have to keep centrally, push responsibility to the other packages
### Plan of attack:
- Outline core information flow
- Examine what interfaces are nessecary to make this work
- Stop looking at the server/reactor as seperate entities
*I need to put the whole docker thing on the back burner for now. It isn't that important when it comes to immediate goals.*
#### 12/05 TODO
- Cleanup server side config stuff to make it coherent
- Reflect changes to reactor side startup
- Boil down interface to address core issues
- Config outline:
1) Startup and load the existing config
2) Overwrite any previous settings with the flags
3) Intelligently translate config into action
4) launch coordinator and start up existing reactor managers
- Config Structure:
- Wrap viper functions in config struct methods to be used thrtugh interfaces
- minimize the reliance on viper so we can sub in othermethods
- is it even important to launch reactor managers? Wont they just be started on connection?
#### 12/06 TODO
- I think I can completely remove the old config way and just pass the viper object directly. I think its not worth the hassle of trying to keep track of a million interfaces
#### 12/07 TODO
- I concede, I will just remove flags as most people will never use them anyway and instead rely on env vars and config files. To hell with the flags.
- I am ripping out all of the TUI and status manager stuff, its convoluted and harder than just pulling info from database.
- I can eventaully rework TUI to pull from DB which is fine, there will never be that many clients anyway and a lot of them are only 1 time calls with refreshes which aren't that slow anyway.
- alright I gutted the tui and system viewer, reworking sub coord to launch at start. That way there is a listener active
- time to boil down to functionality a LOT, right now its clumsy and inefficent, there needs to be a better way to keep everything straight
- Moving the DB responsibilites to the reactor itself seems to be the best way to do it in the short term. Reduce network load and overall keep things efficient. May lead to duplicte copies of data? Not the end of the world, logging system can make sure we are maintaining entries.
**IDEA**
Reactors log data themselves, Send periodic status updates over grpc to enable monitoring faster than the sample rate
*This could work!*
Outline:
- Reactors reach out to server on boot to get DB info
- compare this against what they have internally to ensure they are up to date and allow for migrations
- Maybe not even save the db info because we don't need to??
- Reactors also recieve port for their specific manager
- Can be dynamically given out to allow for spread out load
- Reactors then reach out with sensor and device info periodically (5s?) which can be used for live monitoring
- RM responds with any potential updates for the device settings i.e. change pwm duty on web interface, pass on to reactor
- Allows for a live view with current reading as well as historical data at differing interval via grafana. (i.e. 5s live view with 10 min sample interval)
Need to differentiate sensors vs devices that can be changed
- Sensors have a variable sample rate and eventually name/address
- Devices have more and widley varying parameters, could be pwm with freq/duty/onoff or ph pump with on, time or off etc.
#### 12/09 TODO
- Alright I have a baseline! I want to start to integrate atlas type stuff so that I have some mock data/sensors to work with. I am going to try to flesh out the "atlas" interface/struct to implement some of the more basic commands.
#### 1/11 TODO
Plan of attack for websocket stuff and things
**Questions**
- What to do about the reactor to user comms
- Websockets? GRPC? smoke signals?
-

@ -876,3 +876,126 @@ Refactoring server code now
is there ever a situation where I would need to run this not on docker?
- can i just hardcode for docker and then rely on nginx for routing etc?
ALRIGHT TIME TO LOCK TF IN
#TODO 8/1
Time to set up proper config loading and storing
Doing it all through interfaces and tagged structs
On start up
Server needs to load up its own config
- take action on that config
wait for client connections
- load client config and reply with associated data
on client disconnect
- store any updates and return to idle state
restructing away from "listener" and coordiantor and stuff
going to just have central server
with an embedded listener
and database and shit
so
SERVER will call NewServer which will take care of subsequents
# TODO 8/5
Config storing time
going to probably have to add admin database client(aka server client which makes 0 sense)
can abstract all operations through interface and plugable package
I just reliazed I coupled my mechanism with influxs token thing because it wokrs well but I am going to have to completely rebuild that if its properietary or we transition to a new DB
- hopefully null point due to the nature of OSS and time series
CONFIG (and by extension DB)
config.UpdateReactor(id, key, value)
config.UpdateSelf(key, value)
should just be a basic way to update a given entry for an reactor
seperating server and reactor methods should lead to less finicky behaviour
should also probably wrap these in a seperate struct
- methods?
- who can call?
- who stores ptr?
- do we even need a ptr? can configs be stored and loaded statically or is that a bitch on the FS
does it make more sensor to load different configs for each entity or justhave one monolithic config (probably load for each one and then let it update itself)
going to have the init script set up the
Welcome back
#TODO 8/31
Goals:
- Add a config parser to load/store device manager struct
- start figuring out what a generic config package looks like
- figure out how to load different sensor functions dynamically
Basic reactor workflow overview
1) On boot, scan I2C bus to find active devices
2) For every device shown as active, spawn a sensor manager from the assocaited config
3) on disconnect, shut the dm down and save current settings to config
implementation time
#TODO 9/4
Might be dying nbd
- i think its just freshman flu but could be clot who knows
on to code
Need to have a functional BETA by 9/15 at the latest
pref 9/8 with a week to test
What do we NEED out of FRMS v0.1.0 (pre-alpha
as an aside v1.#.#-apha then v.1.#.#-beta for versions)
Needs:
- Connect and disconnect at will
- set sample and log rate
- set name
- live view data
- export expiriement data to CSV
Notes:
- all sensors will be atlas
- can leverage for a unified library
- can use grafana for the UI
- can bash script to eport data for a given time range into resspective sheet aka sheet of DO measurements etc.
- can setuo the format pretty easily and probably just print F the query worst case I mean its 3 data points at probabnly 1 sample per minute at worst
Architecture planning phase
What would each need require software wise
Need: Connect and disconnect at will
- directory of which device manager to load
- a way to store and load settings
- a way to monitor the i2c lines for new devices
Config interface
At a core
Load()
- load keys, config and env
- prompt for any updates
- store said updates
- store any future requests
functions both server and reactor will use:
- load config
- load keys
- dif keys
- load env
- dif env
order of ops
load config
load keys and env to overwrite config
store updates
have config with methods to get/set values

@ -0,0 +1,4 @@
## Weekly Planning
[Jan 16-20](weekly/Jan-16-20.md)
[Jan 23-27](weekly/Jan-23-27.md)

@ -0,0 +1,149 @@
# Jan 18
### Planning
**Monitoring Changes**
I want to refactor the reactor stuff to be less method oriented as far as data collection. For example, the monitoring stuff is all about events that happen pretty infrequently. It makes sense to then use a channel on the device side to just feed relevant status updates back to the reactor. I think that this makes the most sense because this will synchronize updates and leverage the rarity of events to cut down on errant calls.
- pros
- less repitive method calls needed
- less device locking
- localize the information to different packages
- cons
- extra memory for channels and duplicate storage info
- could just remove status from dm?
**New Idea**
I can leverage wireguard to do server-> reactor connections even beyond the testing phase
Changes:
1) move device coordinator into device package
2) expose relevant methods to reactor interface
3) clarify individual package responsibilities
4) add stuff server side to create/destroy grpc connections as the information is rendered client side
- this might be scuffed but oh well
### Package Separation
**Reactor**
- coordinator
- creates initial link to the server
- creates database client
- creates and starts a device coordinator
**Device**
- coordinator
- searches i2c bus for connected devices
- spins up managers to control the connected devices
- relays information back up to the reactor coordinator
- manager
- control over singular device
- has the core information that will be needed across any type of device (name, status, address etc)
- sub-manager
- fine grained struct with methods specific to the device
**Server**
Going to ignore for now because I am lazy
- central coordinator starts up database connection config etc
- reactor coordinator
### TODO
**Monitoring Changes**
- [] change methods to channel based
- [] internal methods with spins
- [] pass structs with interface for methods
# Jan 19
### Orginizational changes
What structure makes the most sense for the devices?
#### Top-Down
Ex) DeviceManager -> SensorManager -> DOManager -> Manager
**Pros**
- probably a less complex interface layout?
**Cons**
- annoying to keep/pass state
- i.e. atlas needs the address to pass to the I2C but right now the devicemanager is storing that. Have to pass down via start which doesn't make a ton of sense
#### Bottom-Up
Ex) DOManager -> SensorManager -> DeviceManager -> Manager
**Pros**
- top level manager has access to common info
- i.e. address, name etc
- can easily define common functions and use this to pass info upwards
- still don't have to import device manager as interfaces can handle getting/setting stuff
**Cons**
- might get ugly with interfaces
- there might have to be a bunch of interfaces in the device package to handle nesting the manager itself
- this might not be true though as the device coordinator dictates what interfaces are needed, and already it doesn't really use any of the dm functionality
**What would it look like?**
Device coordinator would call NewDeviceManager,
### Outline of functionality
Hopefully by going over what is expected of each manager, it will become clear what the layout should look like
**Device Coordinator**
- responsibilities
- starting/stopping device managers as devices connect/disconnect
- maintaining a map of the devices and their status
- updating the server with this information at set intervals
- pass the I2C client to the device managers
**Device Manager**
- responsibilities
- struct to store information that is used by any type of device
- i.e. Address, Name, Config(prefix and file)? Status?
- probably don't need status as this can be determined via IsActive()
- config might be helpful to have, could pass up to managers via a Get function
- start/stop as requested by the device coordinator
- serves
- broad functions such as SetName(), GetName(), etc.
**Sensor/Controller Manager**
- responsibilities
- provide corresponding broad struct that will be consistent across types of each
- i.e. sensors all have sample rate
- provide methods all will use such as TakeReading()
- serves
- more specific functions such as GetSampleRate(), Set...
**Specific Managers**
- responsibilities
- provides specific functions that a certain sensor/controller might need
- i.e. pwm will need setFreq, DO might need a conversion etc.
- broadly will need access to I2C for comms
- serves
- Hyper Specific functions such as SetFreq() etc.
### Trying Bottom-Up
Right now, I am using some hybrid format which doesn't really make any sense. It goes
DeviceManager -> DOManager -> SensorManager -> Manager
This just feels *wrong*
**Changes**
- Going to use the specifc -> broad becaus it seems intiuitive
- the most common methods/information is at the base and propogates up through the more specific managers
- should make it simplier to define
- maybe go back to the unified package? Not quite clear what the purpose of seperate is beyond convience
- although... the idea of the device manager as a reusable peice makes enough sense to potentially keep it as a seperate package
- I'll stick with the seperate for now and keep it unless it becomes unworkable
### I2C Changes
The i2c bus is locked at the device level, so I am going to rewrite the bs to just use a function with no struct and remove the whole passing of structs garbage
#### For tomorrow
What I have now works, but it is still pretty backwards. Need further improvements and need to start thinking about what a websocket might look like in the current model

@ -0,0 +1,49 @@
# Jan 23
### Connecting Clients to reactors
**Client -> Server -> Reactor**
I can take advantage of the private network created via wireguard to allow the server to connected back to individual reactors and then intiate gRPC calls.
**Pros**
- This *VASTLY* simplifies the implementation as I can now connect back to the reactors themselves
- from there, I can implement various functions I will need server side
- i.e. GetName() SetName() etc.
**Cons**
- I will eventually need to build the wiregaurd implementation
- although because its all local network for now, I can plug and play down the road
### TODO
- refactor packages to provide a cleaner interface via simple commands as opposed to the convoluted passing structure that was present with the old I2C library
- start working on the interface between the websocket and the reactor
- react side this is the actual content that will be rendered by the client
- server side this will be a connection to a reactor with the gRPC calls
- moving monitoring functionality to the reactor
- refactoring to use streaming functionality to avoid needing to re initiate request
- have server connect each reactor manager to the rlc
- have the reactor manager ping for server info
- handle disconnects via exit
- sets up cleaner device handling via multiplexing
# Jan 24
### Proto changes
It's time to refactor the current protobuf stuff to make more sense from the servers perspective. In this sense, I am going to have the reactor provide connection details to the server on connect, and then the server can connect/disconnect at will.
### Outline
- Update the server to connect to the reactor itself for the information
- Decide what information is important enough to send to the server consistently, vs what only is needed upon "further inspection"
- need reactor information on connect
- need basic device information such as address and status
- when selected
- need specific device breakouts with advanced functions per device
- this can be multiplexed over the same gRPC connection and can be fulfilled by the device coordinator
- dc will catch all incoming requests and forward to the correct DM based on address
### TODO
- reverse monitoring stuff
- make it so reactor manager has a timeout/ recognizes disconnects gracefully
- convert monitoring to a stream as opposed to consistent calls

@ -1,138 +0,0 @@
package I2C
// file has general wrappers to interact with i2c-tools
import (
"fmt"
_ "log"
"encoding/hex"
"os/exec"
"bytes"
"strings"
"sync"
"strconv"
"FRMS/internal/pkg/logging"
)
type I2CBus struct {
int
sync.Mutex
}
func NewBus(bus int) *I2CBus {
b := &I2CBus{}
b.int = bus
return b
}
func (b *I2CBus) Scan() map[int]bool {
/*
Returns all the connected devices
*/
b.Lock()
defer b.Unlock()
bus := strconv.Itoa(b.int)
cmd := exec.Command("i2cdetect", "-y", "-r", bus)
var out bytes.Buffer
var errs bytes.Buffer
cmd.Stderr = &errs
cmd.Stdout = &out
if err := cmd.Run(); err != nil {
logging.Debug(logging.DError, "I2C error performing scan. %v", errs.String())
}
outString := out.String()
// could split by \n too
split := strings.SplitAfter(outString,":")
// 1st entry is garbage headers and ending is always \n##:
split = split[1:]
// create empty slice for all the devices
//var devices []i2cdev
devices := map[int]bool{} //maps device addresses to active bool
for i,v := range split {
lst := strings.Index(v,"\n")
trimmed := v[:lst]
trimmed = strings.Trim(trimmed," ")
// trimmed now holds just possible sensor addresses
count := strings.Split(trimmed," ")
for j,d := range count {
// the first row has to be offset by 3 but after its just i*16 + j
offset := 0
if i == 0 {
offset = 3
}
addr := i*16 + j + offset
if strings.Contains(d,"--") || strings.Contains(d,"UU") {
// address is unconnected or reserved
//devices = append(devices, I2Cdev{Addr:addr,Active:false})
devices[addr] = false
} else {
//devices = append(devices, I2Cdev{Addr:addr,Active:true,LastSeen:now})
devices[addr] = true
}
}
}
return devices
}
func (b *I2CBus) GetStatus(addr int) bool {
b.Lock()
defer b.Unlock()
bus := strconv.Itoa(b.int)
a := strconv.Itoa(addr)
cmd := exec.Command("i2cdetect","-y","-r",bus,a,a)
var out bytes.Buffer
var errs bytes.Buffer
cmd.Stderr = &errs
cmd.Stdout = &out
if err := cmd.Run(); err != nil {
logging.Debug(logging.DError,"I2C error getting status! %v", errs.String())
}
outString := out.String()
split := strings.SplitAfter(outString,":")
split = split[1:] // remove garbage header
val := int(addr/16) // if addr = 90 90/16 = int(5.6) = 5 will be in 5th row
dev := split[val]
lst := strings.Index(dev,"\n")
dev = dev[:lst]
trimmed := strings.Trim(dev," \n")
if strings.Contains(trimmed,"--") {
return false
} else {
return true
}
}
func (b *I2CBus) GetData(addr int) string {
b.Lock()
defer b.Unlock()
bus := strconv.Itoa(b.int)
a := strconv.FormatInt(int64(addr),16)
cmd := exec.Command("i2ctransfer","-y",bus,fmt.Sprintf("r40@0x%s",a))
var out bytes.Buffer
var errs bytes.Buffer
cmd.Stderr = &errs
cmd.Stdout = &out
if err := cmd.Run(); err != nil {
logging.Debug(logging.DError,"I2C error getting data! %v", errs.String())
}
outString := out.String()
split := strings.SplitAfter(outString," ") //getting chars 0x12 0x2f etc
var final string
for _,v := range split {
trimmed := strings.TrimLeft(v, "0x ") // trimming extra bs in front of num
trimmed = strings.TrimRight(trimmed," \n") // trimming back
if trimmed != "ff" {
final += trimmed
}
}
ret, err := hex.DecodeString(final)
if err != nil {
panic(err)
}
return string(ret)
}

@ -1,79 +0,0 @@
package I2C
import (
"fmt"
"sync"
"time"
)
type I2CDevice struct {
*I2CBus // embeds bus
bool // stores whether dev is currently connected
int // addr
Data *data
}
type data struct {
string
bool
sync.Mutex
}
func (d I2CDevice) String() string {
t := map[int]string{97:"DO Sensor",99:"pH Sensor",102:"Temperature Sensor",64:"DHT11 Sensor"}
return t[d.int]
}
func NewDevice(addr int,bus *I2CBus) *I2CDevice {
d := &I2CDevice{}
d.I2CBus = bus
d.int = addr
d.Data = &data{}
return d
}
func (d *I2CDevice) GetAddr() int {
return d.int
}
func (d *I2CDevice) GetStatus() string {
// TODO
s := d.I2CBus.GetStatus(d.int)
if s {
d.Data.Active()
return "[green]ACTIVE[white]"
} else {
d.Data.Killed()
return "[red]KILLED[white]"
}
}
func (d *I2CDevice) GetType() string {
// TODO
return fmt.Sprint(d)
}
func (d *I2CDevice) GetData() string {
d.Data.Lock()
defer d.Data.Unlock()
d.Data.string = d.I2CBus.GetData(d.int)
return d.Data.string
}
func (d *data) Active() {
d.Lock()
defer d.Unlock()
if !d.bool {
d.string = ""
d.bool = true
}
}
func (d *data) Killed() {
d.Lock()
defer d.Unlock()
if d.bool {
d.string = time.Now().Format("Mon at 03:04:05pm MST")
d.bool = false
}
}

@ -1,97 +0,0 @@
package I2C
import (
"time"
_ "fmt"
"sync"
)
/*
i2c monitor implements a long running monitor responsible for sending active devices to the rlc
*/
type I2CMonitor struct {
*I2CBus
Devices *devs
DevChan chan int
}
type devs struct {
sync.Mutex
m map[int]*I2CDevice
}
func NewMonitor(bus int,ch chan int) *I2CMonitor {
m := &I2CMonitor{}
b := NewBus(bus)
m.I2CBus = b
d := make(map[int]*I2CDevice)
m.Devices = &devs{m:d}
m.DevChan = ch
return m
}
func (m *I2CMonitor) Update() {
/*
scans bus and adds new active devices
*/
devs := m.Scan()
chng := m.Devices.Parse(m.I2CBus,devs)
for _, d := range chng {
go m.ConnectDevice(d)
}
}
func (m *I2CMonitor) Monitor() {
// functon that updates the device list and notifies rlc of any changes to sensor composition
s := make(chan struct{})
t := 5 * time.Second
go func(signal chan struct{},to time.Duration) { // simple signal func to init scan
for {
signal <-struct{}{}
time.Sleep(to)
}
}(s,t)
for {
<-s
m.Update()
}
}
func (m *I2CMonitor) ConnectDevice(addr int) {
m.DevChan <-addr
}
func (m *I2CMonitor) GetDevice(addr int) interface{ GetAddr() int; GetData() string; GetStatus() string; GetType() string } {
m.Devices.Lock()
defer m.Devices.Unlock()
return m.Devices.m[addr]
}
func (d *devs) Parse(bus *I2CBus,devices map[int]bool) []int {
d.Lock()
defer d.Unlock()
newdevs := []int{}
for addr, status := range devices {
if dev, exists := d.m[addr]; exists {
// device seen
if status != dev.bool { // if device state changed
dev.bool = status
if status {
newdevs = append(newdevs,dev.GetAddr())
}
}
} else {
// device not seen yet
if status {
// active
newd := NewDevice(addr,bus)
newd.bool = status
d.m[addr] = newd
newdevs = append(newdevs,newd.GetAddr())
}
}
}
return newdevs
}

@ -0,0 +1,46 @@
package config
/*
Load.go contains methods to load values from config, flags and env.
*/
import (
"FRMS/internal/pkg/logging"
"fmt"
"github.com/spf13/viper"
)
func LoadConfig(fname string) *viper.Viper {
// Demarshalls a given filename into the struct
// returns nil if successful
config := viper.New()
configPath := "$HOME/FRMS/internal/configs"
logging.Debug(logging.DStart, "Loading config for %s", fname)
config.SetConfigName(fname)
config.SetConfigType("yaml")
//viper.AddConfigPath("/etc/frms/config")
config.AddConfigPath(configPath)
// struct and env vars
// Sets env vars
config.AutomaticEnv()
// reading
if err := config.ReadInConfig(); err != nil {
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
// no config file found
fmt.Printf("No config file found! creating empty one at %s.\n", configPath)
if err = config.WriteConfigAs(configPath); err != nil {
panic(err)
}
} else {
panic(err)
}
}
logging.Debug(logging.DStart, "CON Loaded configs from %v", config.ConfigFileUsed())
// returning config object
return config
}

@ -1,70 +0,0 @@
package config
// package serves to store/load config files for server
import (
_ "fmt"
"github.com/spf13/viper"
"FRMS/internal/pkg/logging"
"log"
"os/exec"
"bytes"
"strings"
)
type serverconfig struct {
URL string
Token string
Bucket string
Orginization string
}
func ReadServerConfig() *serverconfig {
viper.SetConfigName("database")
viper.SetConfigType("yaml")
viper.AddConfigPath("./internal/configs")
viper.SetDefault("Orginization","ForeLight")
viper.SetDefault("URL","http://localhost:8086")
var C serverconfig
err := viper.Unmarshal(&C)
if err != nil {
logging.Debug(logging.DError,"Cannot unmarshal! %v",err)
log.Fatal(err)
}
if C.Token == "" {
// token unset
logging.Debug(logging.DClient,"CON Grabbing adming token")
cmd := exec.Command("cat","tokens/admin_token")
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
logging.Debug(logging.DError,"CON Error grabbing token %v",err)
log.Fatal(err)
}
outstring := out.String()
C.Token = strings.Trim(outstring," \n")
viper.Set("token",C.Token)
viper.WriteConfig()
}
return &C
}
func (s *serverconfig) GetUrl() string {
return s.URL
}
func (s *serverconfig) GetOrg() string {
return s.Orginization
}
func (s *serverconfig) GetBucket() string {
return s.Bucket
}
func (s *serverconfig) GetToken() string {
return s.Token
}

@ -0,0 +1,85 @@
package device
import (
"FRMS/internal/pkg/i2c"
"encoding/hex"
"errors"
"strconv"
"strings"
"time"
)
// atlas helpers to fulfill sensor manager functions
type Atlas struct {
// delays passed by caller
CalDelay int
ReadDelay int
}
func (a *Atlas) Calibrate(bus, addr int, cal string) error {
// calibrate sensor
if a.CalDelay == 0 {
return errors.New("Cal delay unset, please check config")
}
if _, err := i2c.SendCmd(bus, addr, cal); err != nil {
return err
}
time.Sleep(time.Duration(a.CalDelay) * time.Millisecond) // sleep
_, err := i2c.SendCmd(bus, addr, "") // read for success
// return the err if there is any
return err
}
var ErrReadFail = errors.New("atlas read failure")
func (a *Atlas) TakeReading(bus, addr int) (float64, error) {
// take reading function
if _, err := i2c.SendCmd(bus, addr, "R"); err != nil {
// read command
return 0, err
}
if a.ReadDelay == 0 {
return 0, errors.New("Read Delay unset, please check config")
}
sleep := time.Duration(a.ReadDelay) * time.Millisecond
time.Sleep(sleep) // sleep between reads
data, err := i2c.SendCmd(bus, addr, "")
if err != nil {
return 0, ErrReadFail
}
// fmt data from 0x... to proper
var final string
split := strings.Split(data, " ")
for i, v := range split {
// loop over chars
if i == 0 && v != "0x01" {
// reading failed
return 0, ErrReadFail
}
// trimming bs
trimmed := strings.TrimLeft(v, "0x ")
trimmed = strings.TrimRight(trimmed, " \n")
if trimmed != "ff" && i != 0 {
final += trimmed
}
}
// return as a float
var b []byte
if b, err = hex.DecodeString(final); err != nil {
return 0, err
}
return strconv.ParseFloat(string(b), 32)
}
// for config
func (a *Atlas) GetCalDelay() int {
return a.CalDelay
}
func (a *Atlas) GetReadDelay() int {
return a.ReadDelay
}

@ -0,0 +1,22 @@
package device
import (
"sync"
)
// base controller manager
type ControllerManager struct {
*DeviceManager
sync.Mutex
Enabled bool // turn controller on or off
}
func NewControllerManager() *ControllerManager {
return &ControllerManager{}
}
func (c *ControllerManager) SetDeviceManager(d *DeviceManager) {
c.DeviceManager = d
}

@ -0,0 +1,41 @@
package device
// do sensor and methods
import (
"sync"
)
type DOManager struct {
// do sensor manager
*SensorManager
*Atlas
sync.RWMutex
}
func NewDOManager() *DOManager {
// atlas delays
a := &Atlas{
CalDelay: 1300,
ReadDelay: 600,
}
sm := NewSensorManager()
m := &DOManager{
Atlas: a,
SensorManager: sm,
}
return m
}
func (m *DOManager) Start() error {
// start sensor manager
return m.SensorManager.Start(m.Atlas.TakeReading)
}
func (m *DOManager) String() string {
// TODO
return ""
}

@ -0,0 +1,71 @@
package device
import (
"FRMS/internal/pkg/manager"
"fmt"
"sync"
"time"
"github.com/spf13/viper"
)
// base device manager
type Manager interface {
// core manager
Start() error
Exit() error
IsActive() int
HeartBeat(chan struct{}, int, int, time.Duration)
}
func NewManager() Manager {
// no timeouts needed
return manager.New(0)
}
type DeviceManager struct {
// for device agnostic fields/methods
Address int `mapstructure:"address"`
Bus int // i2c bus
// mutable
infoMu sync.RWMutex
Name string `mapstructure:"name"`
defaultName string
// base manager
Manager
// config
Config *viper.Viper
// gRPC server
pb.UnimplementedDeviceServer
}
func NewDeviceManager(bus, addr int, config *viper.Viper, defaultName string) *DeviceManager {
// new base dm
m := NewManager()
dm := &DeviceManager{
Address: addr,
Bus: bus,
defaultName: defaultName,
Manager: m,
Config: config,
}
return dm
}
func (m *DeviceManager) LoadConfig() error {
// setting default name
mainKey := fmt.Sprintf("devices.%d", m.Address)
nameKey := fmt.Sprintf("%s.name", mainKey)
if !m.Config.IsSet(nameKey) {
m.Config.Set(nameKey, m.defaultName)
}
return nil
}
func (m *DeviceManager) Start() error {
// start
return m.Manager.Start()
}

@ -0,0 +1,50 @@
package device
import (
"errors"
"fmt"
"github.com/spf13/viper"
)
// Returns the correct manager for sensor/controller
type Device interface {
Start() error
Exit() error
IsActive() int
SetDeviceManager(*DeviceManager)
}
func New(bus, addr int, config *viper.Viper) (Device, error) {
// returns correct device manager by ID
var err error
var defaultName string
var m Device
switch addr {
case 97:
// DO
defaultName = "DO Sensor"
m = NewDOManager()
case 99:
// pH
defaultName = "pH Sensor"
m = NewPHManager()
case 102:
// RTD
defaultName = "RTD Sensor"
m = NewRTDManager()
case 256:
// PWM
defaultName = "PWM Controller"
m = NewPWMManager()
default:
err = errors.New(fmt.Sprintf("Error: device id %d unrecognized!", addr))
}
// setting device manager
dm := NewDeviceManager(bus, addr, config, defaultName)
m.SetDeviceManager(dm)
// setting up gRPC server functionality
return m, err
}

@ -0,0 +1,40 @@
package device
// do sensor and methods
import (
"sync"
)
type PHManager struct {
// do sensor manager
*SensorManager
*Atlas
sync.RWMutex
}
func NewPHManager() *PHManager {
// atlas delays
a := &Atlas{
CalDelay: 900,
ReadDelay: 900,
}
sm := NewSensorManager()
m := &PHManager{
Atlas: a,
SensorManager: sm,
}
return m
}
func (m *PHManager) Start() error {
// start sensor manager
return m.SensorManager.Start(m.Atlas.TakeReading)
}
func (m PHManager) String() string {
// TODO
return ""
}

@ -0,0 +1,32 @@
package device
// do sensor and methods
import (
"sync"
)
type PWMManager struct {
// do sensor manager
*ControllerManager
sync.RWMutex
Frequency int
DutyCycle int
}
func NewPWMManager() *PWMManager {
cm := NewControllerManager()
return &PWMManager{ControllerManager: cm}
}
// freq changing
func (m *PWMManager) GetFrequency() (int, error) {
m.Lock()
defer m.Unlock()
return m.Frequency, nil
}
func (m *PWMManager) String() string {
// TODO
return ""
}

@ -0,0 +1,38 @@
package device
// do sensor and methods
import (
"sync"
)
type RTDManager struct {
// do sensor manager
*Atlas
*SensorManager
sync.RWMutex
}
func NewRTDManager() *RTDManager {
// atlas delays
a := &Atlas{
CalDelay: 600,
ReadDelay: 600,
}
sm := NewSensorManager()
m := &RTDManager{
Atlas: a,
SensorManager: sm,
}
return m
}
func (m *RTDManager) Start() error {
return m.SensorManager.Start(m.Atlas.TakeReading)
}
func (m *RTDManager) String() string {
// TODO
return ""
}

@ -0,0 +1,93 @@
package device
import (
"errors"
"fmt"
"sync"
"time"
)
type SensorManager struct {
SampleRate int `mapstructure:"sample_rate"` // in (ms)
// sampling
sampleMu sync.RWMutex
LatestSample float32
SampleTimestamp int64
*DeviceManager `mapstructure:",squash"`
// gRPC server
pb.UnimplementedSensorServer
}
func NewSensorManager() *SensorManager {
s := &SensorManager{}
return s
}
func (s *SensorManager) SetDeviceManager(d *DeviceManager) {
s.DeviceManager = d
}
type takeReading func(int, int) (float64, error)
func (s *SensorManager) Start(f takeReading) error {
// loading config
if err := s.LoadConfig(); err != nil {
return err
}
// starting
if err := s.DeviceManager.Start(); err != nil {
return err
}
// starting monitoring
go s.Monitor(f)
return nil
}
func (s *SensorManager) LoadConfig() error {
// setting keys
mainKey := fmt.Sprintf("devices.%d", s.Address)
sampleKey := fmt.Sprintf("%s.sample_rate", mainKey)
if !s.Config.IsSet(sampleKey) {
// no sample rate, default to 10s
s.Config.Set(sampleKey, 10000)
}
// loading lower
s.DeviceManager.LoadConfig()
s.Config.UnmarshalKey(mainKey, s)
return nil
}
func (s *SensorManager) Monitor(f takeReading) {
ch := make(chan struct{}) // hb chan
go s.HeartBeat(ch, s.SampleRate, 2000, time.Millisecond)
var reading float64
var err error
for range ch {
if reading, err = f(s.Bus, s.Address); err != nil {
if !errors.Is(err, ErrReadFail) {
// unknown error, panic
panic(err)
}
fmt.Printf("Reading failed, skipping!\n")
}
// update sample
if !errors.Is(err, ErrReadFail) {
fmt.Printf("Got %f\n", reading)
s.sampleMu.Lock()
s.LatestSample = float32(reading)
s.SampleTimestamp = time.Now.Unix()
s.sampleMu.Unlock()
}
}
}

@ -0,0 +1,42 @@
syntax = "proto3";
package grpc;
option go_package = "internal/pkg/grpc";
service device {
// groups basic device interactions
// get/set name based on request
rpc Name(NameRequest) returns (NameResponse)
}
message NameRequest {
// empty for future expansion
string Name = 1;
}
message NameResponse {
string Name = 1;
}
service sensor {
// sensor specific functions
rpc Reading(ReadingRequest) returns (ReadingResponse)
rpc SampleRate(SampleRateRequest) returns (SampleRateResponse)
}
message ReadingRequest {
// empty
}
message ReadingResponse {
string Reading = 1; // formatted reading "9.7 pH"
int64 Timestamp = 2; // when the reading was taken
}
message SampleRateRequest {
int32 SampleRate = 1; // 0 to return current sample rate, value in seconds
}
message SampleRateResponse {
int32 SampleRate = 1; // returns the set sample rate
}

@ -1,700 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.0
// protoc v3.6.1
// source: internal/pkg/grpc/management.proto
package grpc
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type GetDevicesRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
ClientId uint32 `protobuf:"varint,1,opt,name=clientId,proto3" json:"clientId,omitempty"`
ReactorId uint32 `protobuf:"varint,2,opt,name=reactorId,proto3" json:"reactorId,omitempty"` // if unspecified, don't return any devs
Refresh bool `protobuf:"varint,3,opt,name=refresh,proto3" json:"refresh,omitempty"`
}
func (x *GetDevicesRequest) Reset() {
*x = GetDevicesRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_internal_pkg_grpc_management_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GetDevicesRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetDevicesRequest) ProtoMessage() {}
func (x *GetDevicesRequest) ProtoReflect() protoreflect.Message {
mi := &file_internal_pkg_grpc_management_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetDevicesRequest.ProtoReflect.Descriptor instead.
func (*GetDevicesRequest) Descriptor() ([]byte, []int) {
return file_internal_pkg_grpc_management_proto_rawDescGZIP(), []int{0}
}
func (x *GetDevicesRequest) GetClientId() uint32 {
if x != nil {
return x.ClientId
}
return 0
}
func (x *GetDevicesRequest) GetReactorId() uint32 {
if x != nil {
return x.ReactorId
}
return 0
}
func (x *GetDevicesRequest) GetRefresh() bool {
if x != nil {
return x.Refresh
}
return false
}
type GetDevicesResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
ClientId uint32 `protobuf:"varint,1,opt,name=clientId,proto3" json:"clientId,omitempty"`
ReactorId uint32 `protobuf:"varint,2,opt,name=reactorId,proto3" json:"reactorId,omitempty"`
Devices []*Dev `protobuf:"bytes,3,rep,name=devices,proto3" json:"devices,omitempty"`
}
func (x *GetDevicesResponse) Reset() {
*x = GetDevicesResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_internal_pkg_grpc_management_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GetDevicesResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetDevicesResponse) ProtoMessage() {}
func (x *GetDevicesResponse) ProtoReflect() protoreflect.Message {
mi := &file_internal_pkg_grpc_management_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetDevicesResponse.ProtoReflect.Descriptor instead.
func (*GetDevicesResponse) Descriptor() ([]byte, []int) {
return file_internal_pkg_grpc_management_proto_rawDescGZIP(), []int{1}
}
func (x *GetDevicesResponse) GetClientId() uint32 {
if x != nil {
return x.ClientId
}
return 0
}
func (x *GetDevicesResponse) GetReactorId() uint32 {
if x != nil {
return x.ReactorId
}
return 0
}
func (x *GetDevicesResponse) GetDevices() []*Dev {
if x != nil {
return x.Devices
}
return nil
}
type DeleteReactorRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
ClientId uint32 `protobuf:"varint,1,opt,name=clientId,proto3" json:"clientId,omitempty"`
ReactorId uint32 `protobuf:"varint,2,opt,name=reactorId,proto3" json:"reactorId,omitempty"`
}
func (x *DeleteReactorRequest) Reset() {
*x = DeleteReactorRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_internal_pkg_grpc_management_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DeleteReactorRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DeleteReactorRequest) ProtoMessage() {}
func (x *DeleteReactorRequest) ProtoReflect() protoreflect.Message {
mi := &file_internal_pkg_grpc_management_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DeleteReactorRequest.ProtoReflect.Descriptor instead.
func (*DeleteReactorRequest) Descriptor() ([]byte, []int) {
return file_internal_pkg_grpc_management_proto_rawDescGZIP(), []int{2}
}
func (x *DeleteReactorRequest) GetClientId() uint32 {
if x != nil {
return x.ClientId
}
return 0
}
func (x *DeleteReactorRequest) GetReactorId() uint32 {
if x != nil {
return x.ReactorId
}
return 0
}
type DeleteReactorResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
ClientId uint32 `protobuf:"varint,1,opt,name=clientId,proto3" json:"clientId,omitempty"`
ReactorId uint32 `protobuf:"varint,2,opt,name=reactorId,proto3" json:"reactorId,omitempty"`
Success bool `protobuf:"varint,3,opt,name=success,proto3" json:"success,omitempty"`
}
func (x *DeleteReactorResponse) Reset() {
*x = DeleteReactorResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_internal_pkg_grpc_management_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DeleteReactorResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DeleteReactorResponse) ProtoMessage() {}
func (x *DeleteReactorResponse) ProtoReflect() protoreflect.Message {
mi := &file_internal_pkg_grpc_management_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DeleteReactorResponse.ProtoReflect.Descriptor instead.
func (*DeleteReactorResponse) Descriptor() ([]byte, []int) {
return file_internal_pkg_grpc_management_proto_rawDescGZIP(), []int{3}
}
func (x *DeleteReactorResponse) GetClientId() uint32 {
if x != nil {
return x.ClientId
}
return 0
}
func (x *DeleteReactorResponse) GetReactorId() uint32 {
if x != nil {
return x.ReactorId
}
return 0
}
func (x *DeleteReactorResponse) GetSuccess() bool {
if x != nil {
return x.Success
}
return false
}
type DeleteReactorDeviceRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
ClientId uint32 `protobuf:"varint,1,opt,name=clientId,proto3" json:"clientId,omitempty"`
ReactorId uint32 `protobuf:"varint,2,opt,name=reactorId,proto3" json:"reactorId,omitempty"`
DevAddr int32 `protobuf:"varint,3,opt,name=devAddr,proto3" json:"devAddr,omitempty"`
}
func (x *DeleteReactorDeviceRequest) Reset() {
*x = DeleteReactorDeviceRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_internal_pkg_grpc_management_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DeleteReactorDeviceRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DeleteReactorDeviceRequest) ProtoMessage() {}
func (x *DeleteReactorDeviceRequest) ProtoReflect() protoreflect.Message {
mi := &file_internal_pkg_grpc_management_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DeleteReactorDeviceRequest.ProtoReflect.Descriptor instead.
func (*DeleteReactorDeviceRequest) Descriptor() ([]byte, []int) {
return file_internal_pkg_grpc_management_proto_rawDescGZIP(), []int{4}
}
func (x *DeleteReactorDeviceRequest) GetClientId() uint32 {
if x != nil {
return x.ClientId
}
return 0
}
func (x *DeleteReactorDeviceRequest) GetReactorId() uint32 {
if x != nil {
return x.ReactorId
}
return 0
}
func (x *DeleteReactorDeviceRequest) GetDevAddr() int32 {
if x != nil {
return x.DevAddr
}
return 0
}
type DeleteReactorDeviceResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
ClientId uint32 `protobuf:"varint,1,opt,name=clientId,proto3" json:"clientId,omitempty"`
ReactorId uint32 `protobuf:"varint,2,opt,name=reactorId,proto3" json:"reactorId,omitempty"`
DevAddr int32 `protobuf:"varint,3,opt,name=devAddr,proto3" json:"devAddr,omitempty"`
Success bool `protobuf:"varint,4,opt,name=success,proto3" json:"success,omitempty"`
}
func (x *DeleteReactorDeviceResponse) Reset() {
*x = DeleteReactorDeviceResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_internal_pkg_grpc_management_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DeleteReactorDeviceResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DeleteReactorDeviceResponse) ProtoMessage() {}
func (x *DeleteReactorDeviceResponse) ProtoReflect() protoreflect.Message {
mi := &file_internal_pkg_grpc_management_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DeleteReactorDeviceResponse.ProtoReflect.Descriptor instead.
func (*DeleteReactorDeviceResponse) Descriptor() ([]byte, []int) {
return file_internal_pkg_grpc_management_proto_rawDescGZIP(), []int{5}
}
func (x *DeleteReactorDeviceResponse) GetClientId() uint32 {
if x != nil {
return x.ClientId
}
return 0
}
func (x *DeleteReactorDeviceResponse) GetReactorId() uint32 {
if x != nil {
return x.ReactorId
}
return 0
}
func (x *DeleteReactorDeviceResponse) GetDevAddr() int32 {
if x != nil {
return x.DevAddr
}
return 0
}
func (x *DeleteReactorDeviceResponse) GetSuccess() bool {
if x != nil {
return x.Success
}
return false
}
type Dev struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` // either reactor id or dev addr
Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` // ["reactor","__ sensor",...]
Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` // set by RLC/SM
Data string `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` // set by RLC/SM
Index uint32 `protobuf:"varint,5,opt,name=index,proto3" json:"index,omitempty"` // set by infostream to keep consistency
}
func (x *Dev) Reset() {
*x = Dev{}
if protoimpl.UnsafeEnabled {
mi := &file_internal_pkg_grpc_management_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Dev) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Dev) ProtoMessage() {}
func (x *Dev) ProtoReflect() protoreflect.Message {
mi := &file_internal_pkg_grpc_management_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Dev.ProtoReflect.Descriptor instead.
func (*Dev) Descriptor() ([]byte, []int) {
return file_internal_pkg_grpc_management_proto_rawDescGZIP(), []int{6}
}
func (x *Dev) GetId() uint32 {
if x != nil {
return x.Id
}
return 0
}
func (x *Dev) GetType() string {
if x != nil {
return x.Type
}
return ""
}
func (x *Dev) GetStatus() string {
if x != nil {
return x.Status
}
return ""
}
func (x *Dev) GetData() string {
if x != nil {
return x.Data
}
return ""
}
func (x *Dev) GetIndex() uint32 {
if x != nil {
return x.Index
}
return 0
}
var File_internal_pkg_grpc_management_proto protoreflect.FileDescriptor
var file_internal_pkg_grpc_management_proto_rawDesc = []byte{
0x0a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x67,
0x72, 0x70, 0x63, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x67, 0x72, 0x70, 0x63, 0x22, 0x67, 0x0a, 0x11, 0x47, 0x65,
0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
0x1a, 0x0a, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0d, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x72,
0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09,
0x72, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x66,
0x72, 0x65, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x72, 0x65, 0x66, 0x72,
0x65, 0x73, 0x68, 0x22, 0x73, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65,
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72,
0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x61, 0x63, 0x74, 0x6f,
0x72, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x07, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x03,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x65, 0x76, 0x52,
0x07, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x22, 0x50, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65,
0x74, 0x65, 0x52, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0d, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09,
0x72, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52,
0x09, 0x72, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x49, 0x64, 0x22, 0x6b, 0x0a, 0x15, 0x44, 0x65,
0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x18,
0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12,
0x1c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x49, 0x64, 0x18, 0x02, 0x20, 0x01,
0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x49, 0x64, 0x12, 0x18, 0x0a,
0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07,
0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0x70, 0x0a, 0x1a, 0x44, 0x65, 0x6c, 0x65, 0x74,
0x65, 0x52, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49,
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49,
0x64, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x49, 0x64, 0x18, 0x02,
0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x49, 0x64, 0x12,
0x18, 0x0a, 0x07, 0x64, 0x65, 0x76, 0x41, 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05,
0x52, 0x07, 0x64, 0x65, 0x76, 0x41, 0x64, 0x64, 0x72, 0x22, 0x8b, 0x01, 0x0a, 0x1b, 0x44, 0x65,
0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x44, 0x65, 0x76, 0x69, 0x63,
0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72,
0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x61, 0x63, 0x74, 0x6f,
0x72, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x76, 0x41, 0x64, 0x64, 0x72, 0x18, 0x03,
0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x64, 0x65, 0x76, 0x41, 0x64, 0x64, 0x72, 0x12, 0x18, 0x0a,
0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07,
0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0x6b, 0x0a, 0x03, 0x44, 0x65, 0x76, 0x12, 0x0e,
0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12,
0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79,
0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01,
0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61,
0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x14,
0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69,
0x6e, 0x64, 0x65, 0x78, 0x32, 0xf3, 0x01, 0x0a, 0x0a, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d,
0x65, 0x6e, 0x74, 0x12, 0x3f, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65,
0x73, 0x12, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69,
0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70,
0x63, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65,
0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x65, 0x6c,
0x65, 0x74, 0x65, 0x52, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52,
0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a,
0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x44,
0x65, 0x76, 0x69, 0x63, 0x65, 0x12, 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x65, 0x6c,
0x65, 0x74, 0x65, 0x52, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x44,
0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x44, 0x65, 0x76, 0x69,
0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x13, 0x5a, 0x11, 0x69, 0x6e,
0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_internal_pkg_grpc_management_proto_rawDescOnce sync.Once
file_internal_pkg_grpc_management_proto_rawDescData = file_internal_pkg_grpc_management_proto_rawDesc
)
func file_internal_pkg_grpc_management_proto_rawDescGZIP() []byte {
file_internal_pkg_grpc_management_proto_rawDescOnce.Do(func() {
file_internal_pkg_grpc_management_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_pkg_grpc_management_proto_rawDescData)
})
return file_internal_pkg_grpc_management_proto_rawDescData
}
var file_internal_pkg_grpc_management_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
var file_internal_pkg_grpc_management_proto_goTypes = []interface{}{
(*GetDevicesRequest)(nil), // 0: grpc.GetDevicesRequest
(*GetDevicesResponse)(nil), // 1: grpc.GetDevicesResponse
(*DeleteReactorRequest)(nil), // 2: grpc.DeleteReactorRequest
(*DeleteReactorResponse)(nil), // 3: grpc.DeleteReactorResponse
(*DeleteReactorDeviceRequest)(nil), // 4: grpc.DeleteReactorDeviceRequest
(*DeleteReactorDeviceResponse)(nil), // 5: grpc.DeleteReactorDeviceResponse
(*Dev)(nil), // 6: grpc.Dev
}
var file_internal_pkg_grpc_management_proto_depIdxs = []int32{
6, // 0: grpc.GetDevicesResponse.devices:type_name -> grpc.Dev
0, // 1: grpc.management.GetDevices:input_type -> grpc.GetDevicesRequest
2, // 2: grpc.management.DeleteReactor:input_type -> grpc.DeleteReactorRequest
4, // 3: grpc.management.DeleteReactorDevice:input_type -> grpc.DeleteReactorDeviceRequest
1, // 4: grpc.management.GetDevices:output_type -> grpc.GetDevicesResponse
3, // 5: grpc.management.DeleteReactor:output_type -> grpc.DeleteReactorResponse
5, // 6: grpc.management.DeleteReactorDevice:output_type -> grpc.DeleteReactorDeviceResponse
4, // [4:7] is the sub-list for method output_type
1, // [1:4] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_internal_pkg_grpc_management_proto_init() }
func file_internal_pkg_grpc_management_proto_init() {
if File_internal_pkg_grpc_management_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_internal_pkg_grpc_management_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetDevicesRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_internal_pkg_grpc_management_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetDevicesResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_internal_pkg_grpc_management_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DeleteReactorRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_internal_pkg_grpc_management_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DeleteReactorResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_internal_pkg_grpc_management_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DeleteReactorDeviceRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_internal_pkg_grpc_management_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DeleteReactorDeviceResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_internal_pkg_grpc_management_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Dev); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_internal_pkg_grpc_management_proto_rawDesc,
NumEnums: 0,
NumMessages: 7,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_internal_pkg_grpc_management_proto_goTypes,
DependencyIndexes: file_internal_pkg_grpc_management_proto_depIdxs,
MessageInfos: file_internal_pkg_grpc_management_proto_msgTypes,
}.Build()
File_internal_pkg_grpc_management_proto = out.File
file_internal_pkg_grpc_management_proto_rawDesc = nil
file_internal_pkg_grpc_management_proto_goTypes = nil
file_internal_pkg_grpc_management_proto_depIdxs = nil
}

@ -1,54 +0,0 @@
syntax = "proto3";
package grpc;
option go_package = "internal/pkg/grpc";
service management {
rpc GetDevices(GetDevicesRequest) returns (GetDevicesResponse);
rpc DeleteReactor(DeleteReactorRequest) returns (DeleteReactorResponse);
rpc DeleteReactorDevice(DeleteReactorDeviceRequest) returns (DeleteReactorDeviceResponse);
}
message GetDevicesRequest {
uint32 clientId = 1;
uint32 reactorId = 2; // if unspecified, don't return any devs
bool refresh = 3;
}
message GetDevicesResponse {
uint32 clientId = 1;
uint32 reactorId = 2;
repeated Dev devices = 3;
}
message DeleteReactorRequest {
uint32 clientId = 1;
uint32 reactorId = 2;
}
message DeleteReactorResponse {
uint32 clientId = 1;
uint32 reactorId = 2;
bool success = 3;
}
message DeleteReactorDeviceRequest {
uint32 clientId = 1;
uint32 reactorId = 2;
int32 devAddr = 3;
}
message DeleteReactorDeviceResponse {
uint32 clientId = 1;
uint32 reactorId = 2;
int32 devAddr = 3;
bool success = 4;
}
message Dev {
uint32 id = 1; // either reactor id or dev addr
string type = 2; // ["reactor","__ sensor",...]
string status = 3; // set by RLC/SM
string data = 4; // set by RLC/SM
uint32 index = 5; // set by infostream to keep consistency
}

@ -1,177 +0,0 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
// - protoc v3.6.1
// source: internal/pkg/grpc/management.proto
package grpc
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// ManagementClient is the client API for Management service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type ManagementClient interface {
GetDevices(ctx context.Context, in *GetDevicesRequest, opts ...grpc.CallOption) (*GetDevicesResponse, error)
DeleteReactor(ctx context.Context, in *DeleteReactorRequest, opts ...grpc.CallOption) (*DeleteReactorResponse, error)
DeleteReactorDevice(ctx context.Context, in *DeleteReactorDeviceRequest, opts ...grpc.CallOption) (*DeleteReactorDeviceResponse, error)
}
type managementClient struct {
cc grpc.ClientConnInterface
}
func NewManagementClient(cc grpc.ClientConnInterface) ManagementClient {
return &managementClient{cc}
}
func (c *managementClient) GetDevices(ctx context.Context, in *GetDevicesRequest, opts ...grpc.CallOption) (*GetDevicesResponse, error) {
out := new(GetDevicesResponse)
err := c.cc.Invoke(ctx, "/grpc.management/GetDevices", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *managementClient) DeleteReactor(ctx context.Context, in *DeleteReactorRequest, opts ...grpc.CallOption) (*DeleteReactorResponse, error) {
out := new(DeleteReactorResponse)
err := c.cc.Invoke(ctx, "/grpc.management/DeleteReactor", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *managementClient) DeleteReactorDevice(ctx context.Context, in *DeleteReactorDeviceRequest, opts ...grpc.CallOption) (*DeleteReactorDeviceResponse, error) {
out := new(DeleteReactorDeviceResponse)
err := c.cc.Invoke(ctx, "/grpc.management/DeleteReactorDevice", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// ManagementServer is the server API for Management service.
// All implementations must embed UnimplementedManagementServer
// for forward compatibility
type ManagementServer interface {
GetDevices(context.Context, *GetDevicesRequest) (*GetDevicesResponse, error)
DeleteReactor(context.Context, *DeleteReactorRequest) (*DeleteReactorResponse, error)
DeleteReactorDevice(context.Context, *DeleteReactorDeviceRequest) (*DeleteReactorDeviceResponse, error)
mustEmbedUnimplementedManagementServer()
}
// UnimplementedManagementServer must be embedded to have forward compatible implementations.
type UnimplementedManagementServer struct {
}
func (UnimplementedManagementServer) GetDevices(context.Context, *GetDevicesRequest) (*GetDevicesResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetDevices not implemented")
}
func (UnimplementedManagementServer) DeleteReactor(context.Context, *DeleteReactorRequest) (*DeleteReactorResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteReactor not implemented")
}
func (UnimplementedManagementServer) DeleteReactorDevice(context.Context, *DeleteReactorDeviceRequest) (*DeleteReactorDeviceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteReactorDevice not implemented")
}
func (UnimplementedManagementServer) mustEmbedUnimplementedManagementServer() {}
// UnsafeManagementServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ManagementServer will
// result in compilation errors.
type UnsafeManagementServer interface {
mustEmbedUnimplementedManagementServer()
}
func RegisterManagementServer(s grpc.ServiceRegistrar, srv ManagementServer) {
s.RegisterService(&Management_ServiceDesc, srv)
}
func _Management_GetDevices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetDevicesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ManagementServer).GetDevices(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/grpc.management/GetDevices",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ManagementServer).GetDevices(ctx, req.(*GetDevicesRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Management_DeleteReactor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteReactorRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ManagementServer).DeleteReactor(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/grpc.management/DeleteReactor",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ManagementServer).DeleteReactor(ctx, req.(*DeleteReactorRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Management_DeleteReactorDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteReactorDeviceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ManagementServer).DeleteReactorDevice(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/grpc.management/DeleteReactorDevice",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ManagementServer).DeleteReactorDevice(ctx, req.(*DeleteReactorDeviceRequest))
}
return interceptor(ctx, in, info, handler)
}
// Management_ServiceDesc is the grpc.ServiceDesc for Management service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Management_ServiceDesc = grpc.ServiceDesc{
ServiceName: "grpc.management",
HandlerType: (*ManagementServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetDevices",
Handler: _Management_GetDevices_Handler,
},
{
MethodName: "DeleteReactor",
Handler: _Management_DeleteReactor_Handler,
},
{
MethodName: "DeleteReactorDevice",
Handler: _Management_DeleteReactorDevice_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "internal/pkg/grpc/management.proto",
}

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.0
// protoc v3.6.1
// protoc v3.12.4
// source: internal/pkg/grpc/monitoring.proto
package grpc
@ -20,12 +20,61 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type Status int32
const (
Status_DEAD Status = 0
Status_ALIVE Status = 1
Status_UNKOWN Status = 2
)
// Enum value maps for Status.
var (
Status_name = map[int32]string{
0: "DEAD",
1: "ALIVE",
2: "UNKOWN",
}
Status_value = map[string]int32{
"DEAD": 0,
"ALIVE": 1,
"UNKOWN": 2,
}
)
func (x Status) Enum() *Status {
p := new(Status)
*p = x
return p
}
func (x Status) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (Status) Descriptor() protoreflect.EnumDescriptor {
return file_internal_pkg_grpc_monitoring_proto_enumTypes[0].Descriptor()
}
func (Status) Type() protoreflect.EnumType {
return &file_internal_pkg_grpc_monitoring_proto_enumTypes[0]
}
func (x Status) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use Status.Descriptor instead.
func (Status) EnumDescriptor() ([]byte, []int) {
return file_internal_pkg_grpc_monitoring_proto_rawDescGZIP(), []int{0}
}
type ReactorStatusResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
Id int32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
}
func (x *ReactorStatusResponse) Reset() {
@ -60,7 +109,7 @@ func (*ReactorStatusResponse) Descriptor() ([]byte, []int) {
return file_internal_pkg_grpc_monitoring_proto_rawDescGZIP(), []int{0}
}
func (x *ReactorStatusResponse) GetId() uint32 {
func (x *ReactorStatusResponse) GetId() int32 {
if x != nil {
return x.Id
}
@ -72,7 +121,8 @@ type ReactorStatusPing struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
Id int32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
// new devices
Devices []*Device `protobuf:"bytes,2,rep,name=devices,proto3" json:"devices,omitempty"`
}
@ -108,7 +158,7 @@ func (*ReactorStatusPing) Descriptor() ([]byte, []int) {
return file_internal_pkg_grpc_monitoring_proto_rawDescGZIP(), []int{1}
}
func (x *ReactorStatusPing) GetId() uint32 {
func (x *ReactorStatusPing) GetId() int32 {
if x != nil {
return x.Id
}
@ -127,10 +177,8 @@ type Device struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Addr int32 `protobuf:"varint,1,opt,name=addr,proto3" json:"addr,omitempty"`
Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
Data string `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
Addr int32 `protobuf:"varint,1,opt,name=addr,proto3" json:"addr,omitempty"` // i2c addr
Status Status `protobuf:"varint,2,opt,name=status,proto3,enum=grpc.Status" json:"status,omitempty"` // most recent status
}
func (x *Device) Reset() {
@ -172,25 +220,11 @@ func (x *Device) GetAddr() int32 {
return 0
}
func (x *Device) GetType() string {
if x != nil {
return x.Type
}
return ""
}
func (x *Device) GetStatus() string {
func (x *Device) GetStatus() Status {
if x != nil {
return x.Status
}
return ""
}
func (x *Device) GetData() string {
if x != nil {
return x.Data
}
return ""
return Status_DEAD
}
var File_internal_pkg_grpc_monitoring_proto protoreflect.FileDescriptor
@ -200,26 +234,27 @@ var file_internal_pkg_grpc_monitoring_proto_rawDesc = []byte{
0x72, 0x70, 0x63, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x67, 0x72, 0x70, 0x63, 0x22, 0x27, 0x0a, 0x15, 0x52, 0x65,
0x61, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52,
0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52,
0x02, 0x69, 0x64, 0x22, 0x4b, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x74,
0x61, 0x74, 0x75, 0x73, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x26, 0x0a, 0x07, 0x64, 0x65, 0x76, 0x69,
0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x26, 0x0a, 0x07, 0x64, 0x65, 0x76, 0x69,
0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x67, 0x72, 0x70, 0x63,
0x2e, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52, 0x07, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73,
0x22, 0x5c, 0x0a, 0x06, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x64,
0x64, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x61, 0x64, 0x64, 0x72, 0x12, 0x12,
0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79,
0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01,
0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61,
0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x32, 0x5a,
0x0a, 0x0a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x4c, 0x0a, 0x14,
0x52, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, 0x61, 0x6e,
0x64, 0x6c, 0x65, 0x72, 0x12, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x61, 0x63,
0x74, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, 0x69, 0x6e, 0x67, 0x1a, 0x1b, 0x2e,
0x67, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74,
0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x13, 0x5a, 0x11, 0x69, 0x6e,
0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x22, 0x42, 0x0a, 0x06, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x64,
0x64, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x61, 0x64, 0x64, 0x72, 0x12, 0x24,
0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0c,
0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74,
0x61, 0x74, 0x75, 0x73, 0x2a, 0x29, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x08,
0x0a, 0x04, 0x44, 0x45, 0x41, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x56,
0x45, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x4e, 0x4b, 0x4f, 0x57, 0x4e, 0x10, 0x02, 0x32,
0x5a, 0x0a, 0x0a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x4c, 0x0a,
0x14, 0x52, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, 0x61,
0x6e, 0x64, 0x6c, 0x65, 0x72, 0x12, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x61,
0x63, 0x74, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, 0x69, 0x6e, 0x67, 0x1a, 0x1b,
0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x74, 0x61,
0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x13, 0x5a, 0x11, 0x69,
0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63,
0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@ -234,21 +269,24 @@ func file_internal_pkg_grpc_monitoring_proto_rawDescGZIP() []byte {
return file_internal_pkg_grpc_monitoring_proto_rawDescData
}
var file_internal_pkg_grpc_monitoring_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_internal_pkg_grpc_monitoring_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
var file_internal_pkg_grpc_monitoring_proto_goTypes = []interface{}{
(*ReactorStatusResponse)(nil), // 0: grpc.ReactorStatusResponse
(*ReactorStatusPing)(nil), // 1: grpc.ReactorStatusPing
(*Device)(nil), // 2: grpc.Device
(Status)(0), // 0: grpc.Status
(*ReactorStatusResponse)(nil), // 1: grpc.ReactorStatusResponse
(*ReactorStatusPing)(nil), // 2: grpc.ReactorStatusPing
(*Device)(nil), // 3: grpc.Device
}
var file_internal_pkg_grpc_monitoring_proto_depIdxs = []int32{
2, // 0: grpc.ReactorStatusPing.devices:type_name -> grpc.Device
1, // 1: grpc.monitoring.ReactorStatusHandler:input_type -> grpc.ReactorStatusPing
0, // 2: grpc.monitoring.ReactorStatusHandler:output_type -> grpc.ReactorStatusResponse
2, // [2:3] is the sub-list for method output_type
1, // [1:2] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
3, // 0: grpc.ReactorStatusPing.devices:type_name -> grpc.Device
0, // 1: grpc.Device.status:type_name -> grpc.Status
2, // 2: grpc.monitoring.ReactorStatusHandler:input_type -> grpc.ReactorStatusPing
1, // 3: grpc.monitoring.ReactorStatusHandler:output_type -> grpc.ReactorStatusResponse
3, // [3:4] is the sub-list for method output_type
2, // [2:3] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_internal_pkg_grpc_monitoring_proto_init() }
@ -299,13 +337,14 @@ func file_internal_pkg_grpc_monitoring_proto_init() {
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_internal_pkg_grpc_monitoring_proto_rawDesc,
NumEnums: 0,
NumEnums: 1,
NumMessages: 3,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_internal_pkg_grpc_monitoring_proto_goTypes,
DependencyIndexes: file_internal_pkg_grpc_monitoring_proto_depIdxs,
EnumInfos: file_internal_pkg_grpc_monitoring_proto_enumTypes,
MessageInfos: file_internal_pkg_grpc_monitoring_proto_msgTypes,
}.Build()
File_internal_pkg_grpc_monitoring_proto = out.File

@ -8,17 +8,22 @@ service monitoring {
}
message ReactorStatusResponse {
uint32 id = 1;
int32 id = 1;
}
message ReactorStatusPing {
uint32 id = 1;
int32 id = 1;
// new devices
repeated Device devices = 2;
}
enum Status {
DEAD = 0;
ALIVE = 1;
UNKOWN = 2;
}
message Device {
int32 addr = 1;
string type = 2;
string status = 3;
string data = 4;
int32 addr = 1; // i2c addr
Status status = 2; // most recent status
}

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
// - protoc v3.6.1
// - protoc v3.12.4
// source: internal/pkg/grpc/monitoring.proto
package grpc

@ -82,6 +82,7 @@ type ClientResponse struct {
ClientId uint32 `protobuf:"varint,1,opt,name=clientId,proto3" json:"clientId,omitempty"`
ServerPort uint32 `protobuf:"varint,2,opt,name=serverPort,proto3" json:"serverPort,omitempty"`
Database *Database `protobuf:"bytes,3,opt,name=database,proto3" json:"database,omitempty"`
}
func (x *ClientResponse) Reset() {
@ -130,6 +131,84 @@ func (x *ClientResponse) GetServerPort() uint32 {
return 0
}
func (x *ClientResponse) GetDatabase() *Database {
if x != nil {
return x.Database
}
return nil
}
type Database struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
URL string `protobuf:"bytes,1,opt,name=URL,proto3" json:"URL,omitempty"`
ORG string `protobuf:"bytes,2,opt,name=ORG,proto3" json:"ORG,omitempty"`
Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"`
Bucket string `protobuf:"bytes,4,opt,name=bucket,proto3" json:"bucket,omitempty"`
}
func (x *Database) Reset() {
*x = Database{}
if protoimpl.UnsafeEnabled {
mi := &file_internal_pkg_grpc_server_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Database) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Database) ProtoMessage() {}
func (x *Database) ProtoReflect() protoreflect.Message {
mi := &file_internal_pkg_grpc_server_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Database.ProtoReflect.Descriptor instead.
func (*Database) Descriptor() ([]byte, []int) {
return file_internal_pkg_grpc_server_proto_rawDescGZIP(), []int{2}
}
func (x *Database) GetURL() string {
if x != nil {
return x.URL
}
return ""
}
func (x *Database) GetORG() string {
if x != nil {
return x.ORG
}
return ""
}
func (x *Database) GetToken() string {
if x != nil {
return x.Token
}
return ""
}
func (x *Database) GetBucket() string {
if x != nil {
return x.Bucket
}
return ""
}
var File_internal_pkg_grpc_server_proto protoreflect.FileDescriptor
var file_internal_pkg_grpc_server_proto_rawDesc = []byte{
@ -140,18 +219,27 @@ var file_internal_pkg_grpc_server_proto_rawDesc = []byte{
0x74, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e,
0x74, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70,
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54,
0x79, 0x70, 0x65, 0x22, 0x4c, 0x0a, 0x0e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73,
0x79, 0x70, 0x65, 0x22, 0x78, 0x0a, 0x0e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49,
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49,
0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x6f, 0x72,
0x74, 0x32, 0x50, 0x0a, 0x09, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x43,
0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72,
0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x12, 0x13, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e,
0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e,
0x67, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x42, 0x13, 0x5a, 0x11, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f,
0x70, 0x6b, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x74, 0x12, 0x2a, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x03, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62,
0x61, 0x73, 0x65, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x22, 0x5c, 0x0a,
0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x55, 0x52, 0x4c,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x55, 0x52, 0x4c, 0x12, 0x10, 0x0a, 0x03, 0x4f,
0x52, 0x47, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x4f, 0x52, 0x47, 0x12, 0x14, 0x0a,
0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f,
0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x04, 0x20,
0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x32, 0x50, 0x0a, 0x09, 0x68,
0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x43, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65,
0x6e, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c,
0x65, 0x72, 0x12, 0x13, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x43,
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x13, 0x5a,
0x11, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x67, 0x72,
0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@ -166,19 +254,21 @@ func file_internal_pkg_grpc_server_proto_rawDescGZIP() []byte {
return file_internal_pkg_grpc_server_proto_rawDescData
}
var file_internal_pkg_grpc_server_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_internal_pkg_grpc_server_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
var file_internal_pkg_grpc_server_proto_goTypes = []interface{}{
(*ClientRequest)(nil), // 0: grpc.ClientRequest
(*ClientResponse)(nil), // 1: grpc.ClientResponse
(*Database)(nil), // 2: grpc.Database
}
var file_internal_pkg_grpc_server_proto_depIdxs = []int32{
0, // 0: grpc.handshake.ClientDiscoveryHandler:input_type -> grpc.ClientRequest
1, // 1: grpc.handshake.ClientDiscoveryHandler:output_type -> grpc.ClientResponse
1, // [1:2] is the sub-list for method output_type
0, // [0:1] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
2, // 0: grpc.ClientResponse.database:type_name -> grpc.Database
0, // 1: grpc.handshake.ClientDiscoveryHandler:input_type -> grpc.ClientRequest
1, // 2: grpc.handshake.ClientDiscoveryHandler:output_type -> grpc.ClientResponse
2, // [2:3] is the sub-list for method output_type
1, // [1:2] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_internal_pkg_grpc_server_proto_init() }
@ -211,6 +301,18 @@ func file_internal_pkg_grpc_server_proto_init() {
return nil
}
}
file_internal_pkg_grpc_server_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Database); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
@ -218,7 +320,7 @@ func file_internal_pkg_grpc_server_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_internal_pkg_grpc_server_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumMessages: 3,
NumExtensions: 0,
NumServices: 1,
},

@ -10,6 +10,8 @@ service handshake {
message ClientRequest {
uint32 clientId = 1;
string clientType = 2;
string ip = 3; // client ip
uint32 port = 4; // client port for gRPC server
}
message ClientResponse {
@ -22,4 +24,5 @@ message Database {
string URL = 1;
string ORG = 2;
string token = 3;
string bucket = 4;
}

@ -0,0 +1,88 @@
package i2c
// file has general wrappers to interact with i2c-tools
import (
"FRMS/internal/pkg/logging"
"bytes"
"fmt"
_ "log"
"os/exec"
"strconv"
"strings"
)
func GetConnected(b int) (map[int]bool, error) {
// Returns all the connected devices by address
// might just do this in bash and make it easier
bus := strconv.Itoa(b)
devices := make(map[int]bool) // only keys
cmd := exec.Command("i2cdetect", "-y", "-r", bus)
var out bytes.Buffer
var errs bytes.Buffer
cmd.Stderr = &errs
cmd.Stdout = &out
if err := cmd.Run(); err != nil {
logging.Debug(logging.DError, "I2C error performing scan. %v", errs.String())
return devices, err
}
outString := out.String()
// could split by \n too
split := strings.SplitAfter(outString, ":")
// 1st entry is garbage headers and ending is always \n##:
split = split[1:]
// create empty slice for all the devices
for i, v := range split {
lst := strings.Index(v, "\n")
trimmed := v[:lst]
trimmed = strings.Trim(trimmed, " ")
// trimmed now holds just possible sensor addresses
count := strings.Split(trimmed, " ")
for j, d := range count {
// the first row has to be offset by 3 but after its just i*16 + j
offset := 0
if i == 0 {
offset = 3
}
addr := i*16 + j + offset
if !strings.Contains(d, "--") && !strings.Contains(d, "UU") {
// active
devices[addr] = true
}
}
}
return devices, nil
}
func SendCmd(b, addr int, command string) (string, error) {
// sends an arbituary commnd over specified bus to int
// might make a base script for this too
var cmd *exec.Cmd
bus := strconv.Itoa(b)
operation := "r20" // default read
frmt_cmd := "" // empty cmd
if command != "" {
// command, do write
operation = fmt.Sprintf("w%d", len(command)) // write
// formatting cmd
for _, char := range command {
// loop over string
frmt_cmd += fmt.Sprintf("0x%x", char)
}
cmd = exec.Command("i2ctransfer", "-y", bus, fmt.Sprintf("%s@0x%x", operation, addr), frmt_cmd)
} else {
// reading
cmd = exec.Command("i2ctransfer", "-y", bus, fmt.Sprintf("%s@0x%x", operation, addr))
}
// exec command
var out bytes.Buffer
var errs bytes.Buffer
cmd.Stderr = &errs
cmd.Stdout = &out
if err := cmd.Run(); err != nil {
logging.Debug(logging.DError, "I2C error getting data! %v", err)
return "", err
}
return out.String(), nil
}

@ -1,6 +1,81 @@
package influxdb
import (
"fmt"
"github.com/influxdata/influxdb-client-go/v2"
_ "fmt"
_ "github.com/influxdata/influxdb-client-go/v2"
"github.com/spf13/viper"
)
type DBInfo struct {
URL string `mapstructure:"url"`
Org string `mapstructure:"org,omitempty`
Bucket string `mapstructure:"bucket,omitempty"`
Token string `mapstructure:"token,omitempty"`
// Client *influxdb2.Client
}
type DBAdmin struct {
// struct for admin methods
*DBInfo
Config *viper.Viper
}
type DBClient struct {
// struct for client methods
*DBInfo
Config *viper.Viper
}
func NewDBInfo(config *viper.Viper) (*DBInfo, error) {
db := &DBInfo{}
// grabbing config vals
err := config.UnmarshalKey("db", db)
return db, err
}
func NewDBClient(config *viper.Viper) (*DBClient, error) {
client := &DBClient{Config: config}
// grabbing config vals
var err error
client.DBInfo, err = NewDBInfo(config)
return client, err
}
func NewDBAdmin(config *viper.Viper) (*DBAdmin, error) {
admin := &DBAdmin{Config: config}
var err error
// creating client
admin.DBInfo, err = NewDBInfo(config)
return admin, err
}
// base level funcs
func (d *DBInfo) Start() error {
// connect to DB based w/ info
return nil
}
func (d *DBAdmin) GetReactorClient(id int) (url, bucket, org, token string, err error) {
// given an id returns
// (url, org, bucket, token, error) for said id
/*
client := influxdb2.NewClient(d.URL, d.Token)
defer client.Close()
bucket, err := client.BucketsAPI().FindBucketByName(context.Background(), id)
if err != nil {
return "", "", err
}
if d.ReactorExists(id) {
// get corresponding reactor token and bucket
}
*/
url = d.URL
org = d.Org
token = ""
bucket = ""
//err = errors.New("Unimpl")
err = nil
return
}

@ -0,0 +1,100 @@
package manager
import (
"errors"
"math"
"math/rand"
"sync"
"sync/atomic"
"time"
)
// basic manager for starting/stopping checks plus built in heartbeat for downtime detection
// used across server/reactor
type Connection struct {
Attempts float64 // float for pow
MaxAttempts int // max allowed
sync.Mutex
}
type Manager struct {
*Connection // embedded for timeout stuff
Active int32 // atomic checks
}
func New(maxCon int) *Manager {
c := &Connection{MaxAttempts: maxCon}
m := &Manager{
Connection: c,
}
return m
}
func (m *Manager) Start() error {
// atomically checks/updates status
if atomic.CompareAndSwapInt32(&m.Active, 0, 1) {
m.ResetConnections()
return nil
}
// already running
return errors.New("Manager already started!")
}
func (m *Manager) Exit() error {
if atomic.CompareAndSwapInt32(&m.Active, 1, 0) {
return nil
}
return errors.New("Manager not active!")
}
func (m *Manager) IsActive() int {
return int(atomic.LoadInt32(&m.Active))
}
// Heartbeat tracker
func (m *Manager) HeartBeat(ping chan struct{}, hb, interval int, scale time.Duration) {
// pings channel every (HB + randInterval) * time.Duration
// can be used anywhere a heartbeat is needed
// closes channel on exit
if interval > 0 {
rand.Seed(time.Now().UnixNano())
}
for atomic.LoadInt32(&m.Active) > 0 {
// atomoic read may cause memory leak, can revisit
ping <- struct{}{} // no mem
sleep := time.Duration(hb-interval) * scale
if interval > 0 {
sleep += time.Duration(rand.Intn(2*interval)) * scale
}
time.Sleep(sleep)
}
// exited, close chan
close(ping)
}
// connection timeout generator
func (c *Connection) Timeout() (time.Duration, error) {
// exponential backoff
c.Lock()
defer c.Unlock()
if int(c.Attempts) < c.MaxAttempts {
c.Attempts += 1
// 50, 100, 200...
to := time.Duration(50*math.Pow(2, c.Attempts)) * time.Millisecond
return to, nil
}
return 0, errors.New("Connection Failed")
}
func (c *Connection) ResetConnections() {
c.Lock()
defer c.Unlock()
c.Attempts = 0
}

@ -0,0 +1,251 @@
package reactor
// file describes reactor level coordinator and associated implementation
import (
pb "FRMS/internal/pkg/grpc"
"FRMS/internal/pkg/influxdb"
"FRMS/internal/pkg/logging"
"FRMS/internal/pkg/manager"
"FRMS/internal/pkg/system"
"context"
"fmt"
"time"
"github.com/spf13/viper"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/status"
)
// basic manager
// I dont think I actually need this interface, package manager has a point
type Manager interface {
Start() error
Exit() error
Timeout() (time.Duration, error)
HeartBeat(chan struct{}, int, int, time.Duration) // creates a hb
}
func NewManager(max int) Manager {
return manager.New(max)
}
// db client
type DBClient interface {
//
Start() error
}
func NewDBClient(config *viper.Viper) (DBClient, error) {
return influxdb.NewDBClient(config)
}
type Server struct {
Ip string `mapstructure:"ip"`
Port int `mapstructure:"port"`
}
type ReactorInfo struct {
Name string `mapstructure:"name,omitempty"`
ID int `mapstructure:"id,omitempty"`
Model string `mapstructure:"model,omitempty"`
HB int `mapstructure:"heartbeat"`
Bus int `mapstructure:"bus"`
Server
}
type ReactorCoordinator struct {
Manager // base manager
Config *viper.Viper // config
ReactorInfo `mapstructure:",squash"`
Database DBClient
pb.MonitoringClient // grpc embedding
*DeviceCoordinator // struct for locking
Err chan error
}
func NewCoordinator(config *viper.Viper, errCh chan error) *ReactorCoordinator {
m := NewManager(6) // max 6 attempts
dc := NewDeviceCoordinator(config)
c := &ReactorCoordinator{
Manager: m,
Config: config,
DeviceCoordinator: dc,
Err: errCh,
}
// this is going to be scuffed
//c.DB = &DB{Bucket: "bb", Org: "ForeLight", URL: url, Token: "S1UZssBu6KPfHaQCt34pZFpyc5lzbH9XanYJWCkOI5FqLY7gq205C6FTH-CmugiPH6o2WoKlTkEuPgIfaJjAhw=="}
return c
}
func (c *ReactorCoordinator) Start() {
// should discover hwinfo and sensors on its own
// now setting up sensor managers
var err error
if err = c.Manager.Start(); err != nil {
c.Err <- err
}
// load config
if err = c.LoadConfig(); err != nil { // loads info
c.Err <- err
}
if err = c.DeviceCoordinator.Start(c.ReactorInfo.Bus); err != nil {
c.Err <- err
}
// loading clients
if c.Database, err = NewDBClient(c.Config); err != nil {
c.Err <- err
}
go c.Discover()
go c.Database.Start()
}
func (c *ReactorCoordinator) LoadConfig() error {
var err error
// get hb
if !c.Config.IsSet("reactor.heartbeat") {
// default to 5 seconds
c.Config.Set("reactor.heartbeat", 5)
}
// check id
if !c.Config.IsSet("reactor.id") {
// get from hw
var id int
if id, err = system.GetId("eth0"); err != nil {
return err
}
c.Config.Set("reactor.id", id)
}
// check Model
if !c.Config.IsSet("reactor.model") {
// get from hw
var model string
if model, err = system.GetModel(); err != nil {
return err
}
c.Config.Set("reactor.model", model)
}
// check i2c bus
if !c.Config.IsSet("reactor.bus") {
// get from hw
var bus int
if bus, err = system.GetBus(); err != nil {
return err
}
c.Config.Set("reactor.bus", bus)
}
// all good, unmarhsaling
c.Config.UnmarshalKey("reactor", c)
return err
}
func (c *ReactorCoordinator) Monitor() {
// periodically grabs connected devs and updates list
ch := make(chan struct{})
go c.HeartBeat(ch, c.HB, 0, time.Second)
for range ch {
// check devs and ping
logging.Debug(logging.DClient, "RLC Pinging server")
// ping central server with status
go c.Ping()
}
}
func (c *ReactorCoordinator) Discover() {
// sets up connection to central coordiantor
conn, err := c.Connect(c.Ip, c.Port)
if err != nil {
c.Err <- err
}
defer conn.Close()
client := pb.NewHandshakeClient(conn)
req := &pb.ClientRequest{ClientId: uint32(c.ID), ClientType: "reactor"}
resp, err := client.ClientDiscoveryHandler(context.Background(), req)
if err != nil {
c.Err <- err
}
c.Port = int(resp.GetServerPort()) // updating server port
logging.Debug(logging.DClient, "RLC Central server reached, supplied port %v", c.Port)
// connecting to manager now
clientConn, err := c.Connect(c.Ip, c.Port)
if err != nil {
c.Err <- err
}
c.MonitoringClient = pb.NewMonitoringClient(clientConn)
// manager
go c.Monitor()
}
func (c *ReactorCoordinator) Connect(ip string, port int) (*grpc.ClientConn, error) {
// function connects to central server and passes hwinfo
var opts []grpc.DialOption
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
var conn *grpc.ClientConn
var err error
for {
conn, err = grpc.Dial(fmt.Sprintf("%v:%v", ip, port), opts...)
code := status.Code(err)
if code != 0 { // != OK
if code == (5 | 14) { // service temp down
var to time.Duration
if to, err = c.Timeout(); err != nil {
// from manager
return &grpc.ClientConn{}, err
}
logging.Debug(logging.DClient, "Server currently unavailable, retrying in %v", to)
time.Sleep(to)
} else {
return &grpc.ClientConn{}, err
}
}
break
}
return conn, nil
}
func (c *ReactorCoordinator) Ping() {
// send device info to central coordinator
fmt.Printf("Pinging server\n")
var devices []*pb.Device
var err error
if devices, err = c.GetDeviceInfo(); err != nil {
c.Err <- err
}
// create request
req := &pb.ReactorStatusPing{
Id: int32(c.ID),
Devices: devices,
}
// ping server
if _, err = c.ReactorStatusHandler(context.Background(), req); err != nil {
c.Err <- err
}
}

@ -0,0 +1,134 @@
package reactor
import (
"FRMS/internal/pkg/device"
pb "FRMS/internal/pkg/grpc"
"FRMS/internal/pkg/i2c"
"fmt"
"sync"
"time"
"github.com/spf13/viper"
)
// Created by rlc to manage devices
// device manager
type DeviceManager interface {
Start() error
Exit() error
IsActive() int
}
func NewDeviceManager(bus, addr int, config *viper.Viper) (DeviceManager, error) {
return device.New(bus, addr, config)
}
// device coordinator itself
type DeviceCoordinator struct {
// base level manager for heartbeat
Bus int // i2c bus
Manager
Config *viper.Viper
managersMu sync.RWMutex
DeviceManagers map[int]DeviceManager
}
func NewDeviceCoordinator(config *viper.Viper) *DeviceCoordinator {
dm := make(map[int]DeviceManager)
m := NewManager(0)
c := &DeviceCoordinator{
Manager: m,
DeviceManagers: dm,
Config: config,
}
return c
}
func (c *DeviceCoordinator) Start(bus int) error {
var err error
if err = c.Manager.Start(); err != nil {
return err
}
// i2c bus
c.Bus = bus
go c.Monitor()
return err
}
func (c *DeviceCoordinator) Monitor() {
// monitor I2C for new devices
ch := make(chan struct{})
go c.HeartBeat(ch, 10, 0, time.Second)
for range ch {
// on notification (10s)
devs, err := i2c.GetConnected(c.Bus)
if err != nil {
panic(err)
}
// update list
go c.UpdateManagers(devs)
}
}
func (c *DeviceCoordinator) UpdateManagers(active map[int]bool) {
// updates managers
c.managersMu.Lock()
defer c.managersMu.Unlock()
for addr, dm := range c.DeviceManagers {
_, ok := active[addr]
if ok && dm.IsActive() == 0 {
// active and dm not
if err := dm.Start(); err != nil {
panic(err)
}
} else if !ok && dm.IsActive() == 1 {
// not active and dm is
if err := dm.Exit(); err != nil {
panic(err)
}
}
// remove from map
delete(active, addr)
}
for addr, _ := range active {
// no manager, create one
fmt.Printf("New device %d!\n", addr)
dm, err := NewDeviceManager(c.Bus, addr, c.Config)
if err != nil {
panic(err)
}
if err := dm.Start(); err != nil {
panic(err)
}
c.DeviceManagers[addr] = dm
}
}
func (c *DeviceCoordinator) GetDeviceInfo() ([]*pb.Device, error) {
// gets device info for monitoring
c.managersMu.RLock()
defer c.managersMu.RUnlock()
var devices []*pb.Device
for addr, dm := range c.DeviceManagers {
// looping over devices
devices = append(devices, &pb.Device{
Addr: int32(addr),
Status: pb.Status(dm.IsActive()),
})
}
return devices, nil
}

@ -1,107 +0,0 @@
package reactor
import (
"sync"
"context"
"strings"
"github.com/influxdata/influxdb-client-go/v2"
"strconv"
"time"
//"log"
//"fmt"
//"net"
//"FRMS/internal/pkg/logging"
//"google.golang.org/grpc"
pb "FRMS/internal/pkg/grpc"
)
// implements grpc handler and device data aggregater handler
type DeviceStatus struct {
Addr int
Status string
Type string
Data string
}
// get reactor/device status
func (c *Coordinator) DevStatus(ch chan *DeviceStatus, a int, dm DeviceManager) {
d := &DeviceStatus{Addr:a}
d.Type = dm.GetType()
d.Status = dm.GetStatus()
d.Data = dm.GetData()
ch <-d
}
func (c *Coordinator) GetStatus(client influxdb2.Client) []*pb.Device {
// db stuff
api := client.WriteAPIBlocking(c.Org,c.Bucket)
var wg sync.WaitGroup
devs := []*pb.Device{}
statusChan := make(chan *DeviceStatus)
c.Devices.Lock()
for a,dm := range c.Devices.Managers {
wg.Add(1)
go c.DevStatus(statusChan,a,dm)
}
c.Devices.Unlock()
allDone := make(chan struct{})
go func(){
wg.Wait()
allDone <-struct{}{}
}() // once all the status are sent we send all done on the chan
for {
select{
case s:= <-statusChan:
//fmt.Printf("%v is %v\n",s.Type,s.Status)
data := strings.Split(s.Data,",") // T:10C,H:102% -> T:10C H:10%
for _, m := range data {
var meas string
splt := strings.Split(m,":") // T 10C or H 10%
if splt[0] == "T" {
meas = "Temperature"
} else if splt[0] == "H" {
meas = "Humidity"
}
val, err := strconv.ParseFloat(strings.Trim(splt[1]," %C\n"), 64)
if err != nil {
panic(err)
}
p := influxdb2.NewPoint("measurements",map[string]string{"type":meas},map[string]interface{}{"val":val},time.Now())
if err := api.WritePoint(context.Background(), p); err != nil {
panic(err)
}
}
devs = append(devs,&pb.Device{Addr:int32(s.Addr),Type:s.Type,Status:s.Status,Data:s.Data})
wg.Done()
case <-allDone:
return devs
}
}
}
// grpc status update handler
func (c *Coordinator) Ping(client influxdb2.Client) {
// sends all device status to central coordinator
devs := c.GetStatus(client)
req := &pb.ReactorStatusPing{Id:c.Id,Devices:devs}
_, err := c.MonitoringClient.ReactorStatusHandler(context.Background(),req)
if err != nil {
c.Err <-err
go c.Exit()
}
}
/*
func (c *Coordinator) Register() {
ip := c.hwinfo.Ip
if lis, err := net.Listen("tcp", fmt.Sprintf("%v:0",ip)); err != nil {
log.Fatal(err)
} else {
c.hwinfo.Port = lis.Addr().(*net.TCPAddr).Port
grpcServer := grpc.NewServer()
pb.RegisterMonitoringServer(grpcServer,c)
go grpcServer.Serve(lis)
}
logging.Debug(logging.DStart, "Listening for pings on %v:%v\n",ip,c.hwinfo.Port)
}
*/

@ -1,260 +0,0 @@
package reactor
// file describes reactor level coordinator and associated implementation
import (
"fmt"
"sync"
"time"
"math"
"FRMS/internal/pkg/system"
"FRMS/internal/pkg/I2C"
"FRMS/internal/pkg/sensor"
"FRMS/internal/pkg/logging"
"errors"
"context"
"google.golang.org/grpc"
"google.golang.org/grpc/status"
"google.golang.org/grpc/credentials/insecure"
"github.com/influxdata/influxdb-client-go/v2"
pb "FRMS/internal/pkg/grpc"
)
// Coordinator == Reactor Level Coordinator
type Coordinator struct {
Ip string
Port int // listener port
MonitoringClient pb.MonitoringClient
*hw
Devices *DeviceManagers // struct for fine grain locking
Err chan error
mu sync.Mutex
HB time.Duration
PingTimer chan struct{}
*DB
Active active
}
type DB struct {
// struct to hold db connection info
Org string
Bucket string
Token string
URL string
}
type active struct {
bool
int
sync.Mutex
}
type hw struct {
// store reactor info
Model string
Bus int
Id uint32
}
type DeviceManagers struct {
Managers map[int]DeviceManager
sync.Mutex
}
// basic devicemanager struct manipulations
type DeviceManager interface {
Start()
GetType() string
GetStatus() string
GetData() string
}
type I2CDev interface {
GetAddr() int
GetData() string
GetStatus() string
GetType() string
}
func NewDeviceManager(i2c I2CDev) DeviceManager {
return sensor.NewDeviceManager(i2c)
}
type I2CMonitor interface {
Monitor()
GetDevice(int) interface{ GetAddr() int; GetStatus() string; GetData() string; GetType() string}
}
func NewI2CMonitor(b int,ch chan int) I2CMonitor {
return I2C.NewMonitor(b, ch)
}
func NewCoordinator(ip string,port int,ch chan error) *Coordinator {
sen := new(DeviceManagers)
sen.Managers = make(map[int]DeviceManager)
c := &Coordinator{Err:ch,Devices:sen}
c.Ip = ip
c.Port = port
c.hw = &hw{}
c.HB = time.Duration(5 * time.Second)
c.PingTimer = make(chan struct{})
// this is going to be scuffed
url := fmt.Sprintf("http://%s:8086",ip)
fmt.Println(url)
c.DB = &DB{Bucket:"bb",Org:"ForeLight",URL:url,Token:"S1UZssBu6KPfHaQCt34pZFpyc5lzbH9XanYJWCkOI5FqLY7gq205C6FTH-CmugiPH6o2WoKlTkEuPgIfaJjAhw=="}
return c
}
func (c *Coordinator) Start() {
// should discover hwinfo and sensors on its own
// now setting up sensor managers
// setting up hw stuff
c.Activate()
var err error
c.Id, err = system.GetId("eth0")
c.Model, err = system.GetModel()
c.Bus, err = system.GetBus()
if err != nil {
c.Err <-err
}
go c.Monitor()
go c.Discover()
}
func (c *Coordinator) Monitor() {
// function to automatically create and destroy sm
// scuffedaf
client := influxdb2.NewClient(c.URL,c.Token)
defer client.Close()
dch := make(chan int)
im := NewI2CMonitor(c.Bus,dch)
go im.Monitor()
for c.IsActive() {
select {
case d := <-dch:
i := im.GetDevice(d)
go c.DeviceConnect(i)
case <-c.PingTimer:
go c.Ping(client)
}
}
}
func (c *Coordinator) HeartBeat() {
for c.IsActive() {
c.PingTimer <-struct{}{}
logging.Debug(logging.DClient,"RLC Pinging server")
time.Sleep(c.HB)
}
}
func (c *Coordinator) DeviceConnect(i2c I2CDev) {
c.Devices.Lock()
defer c.Devices.Unlock()
addr := i2c.GetAddr()
if dm, exists := c.Devices.Managers[addr]; !exists{
dm := NewDeviceManager(i2c)
c.Devices.Managers[addr] = dm
go dm.Start()
} else {
go dm.Start()
}
}
func (c *Coordinator) Discover() {
// sets up connection to central coordiantor
conn, err := c.Connect(c.Ip, c.Port)
if err != nil {
c.Err <-err
}
defer conn.Close()
client := pb.NewHandshakeClient(conn)
req := &pb.ClientRequest{ClientId:c.Id,ClientType:"reactor"}
resp, err := client.ClientDiscoveryHandler(context.Background(), req)
if err != nil {
c.Err <-err
}
c.Port = int(resp.GetServerPort()) // updating server port
logging.Debug(logging.DClient,"RLC Central server reached, supplied port %v",c.Port)
// connecting to manager now
clientConn, err := c.Connect(c.Ip, c.Port)
if err != nil {
c.Err <-err
}
c.MonitoringClient = pb.NewMonitoringClient(clientConn)
go c.HeartBeat()
}
func (c *Coordinator) Connect(ip string, port int) (*grpc.ClientConn, error) {
// function connects to central server and passes hwinfo
var opts []grpc.DialOption
opts = append(opts,grpc.WithTransportCredentials(insecure.NewCredentials()))
var conn *grpc.ClientConn
var err error
for {
conn, err = grpc.Dial(fmt.Sprintf("%v:%v",ip,port),opts...)
code := status.Code(err)
if code != 0 { // != OK
if code == (5 | 14) { // service temp down
to := c.Timeout()
if to == 0 {
err = errors.New("Failed to connect to central server")
return &grpc.ClientConn{}, err
}
logging.Debug(logging.DClient,"Server currently unavailable, retrying in %v ms", to)
time.Sleep(time.Duration(to) * time.Millisecond)
} else {
return &grpc.ClientConn{}, err
}
}
break;
}
return conn, nil
}
func (c *Coordinator) Timeout() int {
c.Active.Lock()
defer c.Active.Unlock()
if c.Active.int < 9 {
v := int(5 * math.Pow(float64(2), float64(c.Active.int)))
c.Active.int +=1
return v
} else {
//excedded retries
return 0
}
}
func (c *Coordinator) IsActive() bool {
c.Active.Lock()
defer c.Active.Unlock()
return c.Active.bool
}
func (c *Coordinator) Exit() bool {
c.Active.Lock()
defer c.Active.Unlock()
if c.Active.bool {
c.Active.bool = false
logging.Debug(logging.DClient,"RLC Exiting...")
return true
} else {
logging.Debug(logging.DError, "RLC Already Dead!")
return false
}
}
func (c *Coordinator) Activate() bool {
c.Active.Lock()
defer c.Active.Unlock()
if c.Active.bool {
logging.Debug(logging.DError,"RLC Already Started!")
return false
} else {
logging.Debug(logging.DClient, "RLC Starting")
c.Active.bool = true
return c.Active.bool
}
}

@ -1,114 +0,0 @@
package sensor
import (
_"fmt"
"time"
"sync"
"strings"
_ "FRMS/internal/pkg/I2C"
"log"
)
type Manager struct {
*Dev
I2CDevice
*Active
Hb time.Duration
}
type Active struct {
sync.Mutex
bool
int
}
type Dev struct {
// last known values
Addr int
Type string
Status string // could be more efficient but to hell with it
Data string
}
type I2CDevice interface {
// basic device info
GetAddr() int
GetStatus() string
GetType() string
GetData() string
}
func NewDeviceManager(i2c I2CDevice) *Manager {
m := &Manager{Hb:time.Duration(1*time.Second)}
m.I2CDevice = i2c
m.Active = &Active{}
m.Dev = &Dev{Addr:i2c.GetAddr(),Type:i2c.GetType(),Status:i2c.GetStatus(),Data:i2c.GetData()}
return m
}
func (m *Manager) Start() {
// goal is to start a long running monitoring routine
if !m.Activate() {
log.Fatal("Manager already running!")
} // atomically activated if this runs
// go m.Monitor()
}
func (m *Manager) Exit() {
if !m.Deactivate() {
log.Fatal("Manager already exited!")
}
}
func (m *Manager) GetType() string {
return m.Type
}
func (m *Manager) GetStatus() string {
m.Status = m.I2CDevice.GetStatus()
if m.IsActive() && strings.Contains(m.Status,"KILLED") {
m.Exit()
}
return m.Status
}
func (m *Manager) GetData() string {
m.Data = m.I2CDevice.GetData()
return m.Data
}
func (m *Manager) GetAddr() int {
return m.Addr
}
// atomic activation and deactivation
func (a *Active) Activate() bool {
// returns true if success, false otherwise
a.Lock()
defer a.Unlock()
if a.bool { // already active
return false
} else {
a.bool = true
a.int = 0
return a.bool
}
}
func (a *Active) Deactivate() bool {
// returns true if success false otherise
a.Lock()
defer a.Unlock()
if a.bool {
a.bool = false
return true
} else { // already deactivated
return a.bool // false
}
}
func (a *Active) IsActive() bool {
a.Lock()
defer a.Unlock()
return a.bool
}

@ -1,6 +0,0 @@
package sensor
import (
_ "fmt"
)

@ -1,194 +1,198 @@
package server
import (
"sync"
//"fmt"
"net"
pb "FRMS/internal/pkg/grpc"
"FRMS/internal/pkg/influxdb"
_ "FRMS/internal/pkg/influxdb"
"FRMS/internal/pkg/logging"
"context"
"errors"
"FRMS/internal/pkg/logging"
"google.golang.org/grpc"
pb "FRMS/internal/pkg/grpc"
"fmt"
"net"
"sync"
"github.com/spf13/viper"
"google.golang.org/grpc"
)
// this package creates coordinators responsible for keeping track of active clients and invoking managers
type SubCoordinator interface {
Start()
NewManager(*Client,*SystemViewer, chan error) GeneralManager
GetManager(uint32) (GeneralManager, bool)
AddManager(uint32, GeneralManager)
Register()
// this package creates the central coordiantor and sub coordiantors to route clients
// db client interface
type Database interface {
// getters (all create if doesnt exist)
GetReactorClient(int) (string, string, string, string, error) // returns (url, org, bucket, token, err)
}
type GeneralManager interface {
Start()
UpdateClient(*Client)
ReactorStatusHandler(context.Context,*pb.ReactorStatusPing) (*pb.ReactorStatusResponse, error)
GetDevices(context.Context, *pb.GetDevicesRequest) (*pb.GetDevicesResponse, error)
func NewDatabaseAdmin(config *viper.Viper) (Database, error) {
return influxdb.NewDBAdmin(config)
}
type Coordinator struct {
Port int // port that we set up gRPC endpoint on
//*Managers going to embed this in subcoordinator
SubCoordinator
*SystemViewer
type CentralCoordinator struct {
// main coordinator
ClientConnections *ClientPacket
*ReactorCoordinator
Database
Config *viper.Viper
// from config
Ports map[string]int `mapstructure:"ports"`
Err chan error
}
type Managers struct {
Directory map[uint32]GeneralManager
sync.RWMutex // potential perf
func NewCentralCoordinator(config *viper.Viper, ch chan error) *CentralCoordinator {
// create a central coordinator to manage requests
db, err := NewDatabaseAdmin(config)
if err != nil {
ch <- err
}
// interface stuff
func NewCoordinator(clientType string, sys *SystemViewer, err chan error) *Coordinator {
d := make(map[uint32]GeneralManager)
m := &Managers{Directory:d}
c := &Coordinator{Err:err}
c.Port = 2023
sub, errs := NewSubCoordinator(clientType, m, err)
if errs != nil {
err <-errs
rc, err := NewReactorCoordinator(config, ch)
if err != nil {
ch <- err
}
c.SubCoordinator = sub
c.SystemViewer = sys
//c.Managers = m
go c.Register()
return c
config.UnmarshalKey("server.ports", rc) // get reactor port
c := &CentralCoordinator{
Err: ch,
Config: config,
Database: db,
ReactorCoordinator: rc,
}
func (c *Coordinator) Start() {
// on start we need to create channel listener
// on each new connection we want to check its id against our mapping
c.SubCoordinator.Start()
// grab config settings
if err = config.UnmarshalKey("server", c); err != nil {
ch <- err
}
func (c *Coordinator) ClientHandler(cl *Client) int {
// (creates and) notifies manager of client connection
go c.UpdateManager(cl)
return c.Port
return c
}
func (c *Coordinator) UpdateManager(cl *Client) {
// shouldn't happen all that often so should be fine to lock
m, exists := c.GetManager(cl.Id)
if !exists {
m = c.NewManager(cl, c.SystemViewer, c.Err)
m.UpdateClient(cl)
go c.AddManager(cl.Id, m)
go m.Start()
func (c *CentralCoordinator) Start() {
// starts up associated funcs
clientChan := make(chan *ClientPacket)
l := NewListener(clientChan, c.Err)
// grabs lis port
c.Config.UnmarshalKey("server.ports", l)
// starting reactor coordinator
if err := c.ReactorCoordinator.Start(); err != nil {
c.Err <- err
}
go m.UpdateClient(cl)
// starting listener
if err := l.Start(); err != nil {
c.Err <- err
}
func (m *Managers) AddManager(id uint32, man GeneralManager) {
m.Lock()
defer m.Unlock()
m.Directory[id] = man
// lastly start client listener
go c.ClientListener(clientChan)
}
func (m *Managers) GetManager(id uint32) (GeneralManager, bool) {
// just read locks and reuturns
m.RLock()
defer m.RUnlock()
man, exists := m.Directory[id]
return man, exists
func (c *CentralCoordinator) ClientListener(ch chan *ClientPacket) {
for client := range ch {
// basically loops until channel is closed
client.Response <- c.ClientHandler(client.Client) // respond with cred
}
}
func NewSubCoordinator(clientType string, m *Managers, err chan error) (SubCoordinator, error) {
func (c *CentralCoordinator) ClientHandler(cl *Client) *ClientResponse {
// returns reactor db info
var err error
cr := &ClientResponse{Port: c.Ports[cl.Type]}
if clientType == "reactor" {
c := &reactorCoordinator{}
c.Managers = m
return c, nil
} else if clientType == "tui" {
c := &tuiCoordinator{}
c.Managers = m
return c, nil
if cl.Type == "reactor" {
// get reactor info
go c.ReactorCoordinator.ClientHandler(cl)
// db info
cr.URL, cr.Org, cr.Token, cr.Bucket, err = c.Database.GetReactorClient(cl.Id)
} else {
// throw error
err = errors.New(fmt.Sprintf("Client type %s not recognized!"))
}
return &reactorCoordinator{}, errors.New("Unrecognized client type")
// returns based on cl type
if err != nil {
c.Err <- err
}
return cr
}
// creating sub coordinators for associated gRPC handlers
// reactor coordinator
type reactorCoordinator struct {
*Managers
type ReactorCoordinator struct {
Port int `mapstructure:"reactor"`
*ReactorManagers
Err chan error
pb.UnimplementedMonitoringServer
}
func (r *reactorCoordinator) Start() {
logging.Debug(logging.DStart,"RCO 01 Starting!")
type ReactorManagers struct {
Config *viper.Viper
Directory map[int]*ReactorManager
sync.RWMutex
}
func (r *reactorCoordinator) NewManager(cl *Client, sys *SystemViewer, err chan error) GeneralManager {
logging.Debug(logging.DClient, "RCO 01 starting manager for %v client %v",cl.Type,cl.Id)
return NewReactorManager(cl,sys,err)
func NewReactorCoordinator(config *viper.Viper, errCh chan error) (*ReactorCoordinator, error) {
rmap := make(map[int]*ReactorManager)
rm := &ReactorManagers{Directory: rmap, Config: config}
c := &ReactorCoordinator{Err: errCh, ReactorManagers: rm}
return c, nil
}
func (r *reactorCoordinator) Register() {
lis, err := net.Listen("tcp", ":2023")
if err != nil {
// rip
func (c *ReactorCoordinator) Start() error {
logging.Debug(logging.DStart, "RCO 01 Starting!")
// register grpc service
return c.Register()
}
func (c *ReactorCoordinator) ClientHandler(cl *Client) {
// updates clients if nessecary
if err := c.UpdateReactorManager(cl, c.Err); err != nil {
c.Err <- err
}
grpcServer := grpc.NewServer()
pb.RegisterMonitoringServer(grpcServer,r)
go grpcServer.Serve(lis)
logging.Debug(logging.DClient, "RCO ready for client requests")
}
func (r *reactorCoordinator) ReactorStatusHandler(ctx context.Context, req *pb.ReactorStatusPing) (*pb.ReactorStatusResponse, error) {
m, exists := r.GetManager(req.GetId())
func (m *ReactorManagers) GetReactorManager(id int) (*ReactorManager, error) {
m.RLock()
defer m.RUnlock()
rm, exists := m.Directory[id]
if !exists {
return &pb.ReactorStatusResponse{}, errors.New("Manager doesn't exists for that client")
return &ReactorManager{}, errors.New(fmt.Sprintf("No manager for reactor %d!", id))
}
return m.ReactorStatusHandler(ctx, req)
return rm, nil
}
//tui coordinator
type tuiCoordinator struct {
*Managers
pb.UnimplementedManagementServer
}
func (m *ReactorManagers) UpdateReactorManager(cl *Client, errCh chan error) error {
// locking
m.RLock()
defer m.RUnlock()
func (t *tuiCoordinator) Start() {
logging.Debug(logging.DStart,"TCO 01 Starting!")
}
var err error
func (t *tuiCoordinator) NewManager(cl *Client, sys *SystemViewer, err chan error) GeneralManager {
logging.Debug(logging.DClient, "TCO 01 starting manager for %v client %v",cl.Type,cl.Id)
return NewTUIManager(cl,sys,err)
rm, exists := m.Directory[cl.Id]
if !exists {
logging.Debug(logging.DClient, "RCO creating manager for reactor client %v", cl.Id)
// creating
rm = NewReactorManager(cl, m.Config, errCh)
// starting
if err = rm.Start(); err != nil {
return err
}
m.Directory[cl.Id] = rm
}
return rm.UpdateClient(cl)
}
func (t *tuiCoordinator) Register() {
lis, err := net.Listen("tcp", ":2024")
func (r *ReactorCoordinator) Register() error {
lis, err := net.Listen("tcp", fmt.Sprintf(":%v", r.Port))
if err != nil {
// rip
return err
}
grpcServer := grpc.NewServer()
pb.RegisterManagementServer(grpcServer,t)
pb.RegisterMonitoringServer(grpcServer, r)
go grpcServer.Serve(lis)
logging.Debug(logging.DClient, "TCO ready for client requests")
}
func (t *tuiCoordinator) GetDevices(ctx context.Context, req *pb.GetDevicesRequest) (*pb.GetDevicesResponse, error) {
// grpc handler to fwd to manager
m, exists := t.GetManager(req.GetClientId())
if !exists {
// doesnt exist for some reason
return &pb.GetDevicesResponse{}, errors.New("Manager doesn't exists for client")
}
return m.GetDevices(ctx,req)
logging.Debug(logging.DClient, "RCO ready for client requests")
return nil
}
// unimplemented bs for grpc
func (t *tuiCoordinator) DeleteReactor(ctx context.Context, req *pb.DeleteReactorRequest) (*pb.DeleteReactorResponse, error) {
// TODO
return &pb.DeleteReactorResponse{}, nil
func (r *ReactorCoordinator) ReactorStatusHandler(ctx context.Context, req *pb.ReactorStatusPing) (*pb.ReactorStatusResponse, error) {
rm, err := r.GetReactorManager(int(req.GetId()))
// error checking
if err != nil {
return &pb.ReactorStatusResponse{}, err
}
func (t *tuiCoordinator) DeleteReactorDevice(ctx context.Context, req *pb.DeleteReactorDeviceRequest) (*pb.DeleteReactorDeviceResponse, error) {
// TODO
return &pb.DeleteReactorDeviceResponse{}, nil
return rm.ReactorStatusHandler(ctx, req)
}

@ -2,94 +2,83 @@ package server
import (
//"log"
pb "FRMS/internal/pkg/grpc"
"FRMS/internal/pkg/logging"
"context"
"fmt"
"net"
"context"
// "FRMS/internal/pkg/system"
"FRMS/internal/pkg/logging"
"google.golang.org/grpc"
pb "FRMS/internal/pkg/grpc"
)
/*
Originally this package served as a client listener to route requests
I am going to repurpose this to serve as a listener for all gRPC requests
should simplify interfaces
Listens on a supplied port and sends incoming clients over a supplied channel
Waits for a response on that channel to send back to the client with DB credentials
*/
type Listener struct { // exporting for easy use in the short term
Port int
Coordinators map[string]*Coordinator
CLis *grpc.Server
Sys *SystemViewer
Port int `mapstructure:"lis"`
ClientConnections chan *ClientPacket
Err chan error
pb.UnimplementedHandshakeServer
}
type ClientPacket struct {
*Client
Response chan *ClientResponse
}
type Client struct {
// general client struct to store reqs from reactors/tui
Ip string
Port int
Id uint32
//Ip string
//Port int
Id int
Model string
Type string
}
func NewListener(ch chan error, port int) *Listener {
c := make(map[string]*Coordinator)
l := &Listener{Err:ch}
l.Coordinators = c
l.Sys = NewSystemViewer()
l.Port = port
type ClientResponse struct {
Port int
URL string
Org string
Token string
Bucket string
}
func NewListener(cch chan *ClientPacket, ech chan error) *Listener {
l := &Listener{Err: ech, ClientConnections: cch}
return l
}
func (l *Listener) Start() {
func (l *Listener) Start() error {
// start grpc server and implement reciever
if err := l.Register(); err != nil {
l.Err <- err
}
go l.Sys.Start()
// listener started and grpc handler registered
logging.Debug(logging.DStart,"Started client listener on port %v\n",l.Port)
//fmt.Printf("==========================\n PORT: %v\n==========================\n",l.Port)
logging.Debug(logging.DStart, "LIS 01 Started client listener")
return l.Register()
}
func (l *Listener) Register() error {
// creates a gRPC service and binds it to our handler
lis, err := net.Listen("tcp", fmt.Sprintf(":%v",l.Port)) // either binding to supplied port or binding to docker default
lis, err := net.Listen("tcp", fmt.Sprintf(":%v", l.Port))
if err != nil {
return err
}
grpcServer := grpc.NewServer()
pb.RegisterHandshakeServer(grpcServer, l)
go grpcServer.Serve(lis)
logging.Debug(logging.DStart, "LIS Registered on port %v", l.Port)
//lis, err = net.Listen("tcp", fmt.Sprintf(":%v",l.Port+1)) // either binding to supplied port or binding to docker default
//if err != nil {
//return err
//}
//grpcServer = grpc.NewServer()
//l.CLis = grpcServer
//go grpcServer.Serve(lis)
//logging.Debug(logging.DStart, "LIS Coordinator server registered on port %v", l.Port + 1)
logging.Debug(logging.DStart, "LIS 01 Registered on port %v", l.Port)
return nil
}
func (l *Listener) ClientDiscoveryHandler(ctx context.Context, ping *pb.ClientRequest) (*pb.ClientResponse, error) {
// incoming reactor ping need to spawn coord
c := &Client{Id:ping.GetClientId(),Type:ping.GetClientType()}
logging.Debug(logging.DClient, "%v %v has connected\n",c.Type,c.Id)
coord, ok := l.Coordinators[c.Type]
if !ok {
logging.Debug(logging.DSpawn,"CCO 01 Created Coordinator")
coord = NewCoordinator(c.Type, l.Sys, l.Err)
l.Coordinators[c.Type] = coord
go coord.Start()
}
port := coord.ClientHandler(c)
// return the port for the incoming requests
return &pb.ClientResponse{ClientId:c.Id,ServerPort:uint32(port)}, nil
// incoming client ping, notify coord and wait for DB credentials to respond
c := &Client{Id: int(ping.GetClientId()), Type: ping.GetClientType()}
logging.Debug(logging.DClient, "LIS %v %v has connected\n", c.Type, c.Id)
// prepare packet to send to coordinator
ch := make(chan *ClientResponse)
p := &ClientPacket{Client: c, Response: ch}
// blocking
l.ClientConnections <- p
resp := <-ch
// prepare object to return to client
db := &pb.Database{URL: resp.URL, ORG: resp.Org, Token: resp.Token, Bucket: resp.Bucket}
return &pb.ClientResponse{ClientId: uint32(c.Id), ServerPort: uint32(resp.Port), Database: db}, nil
}

@ -2,113 +2,28 @@ package server
import (
//"log"
"time"
"math"
"sync"
"errors"
"context"
"FRMS/internal/pkg/logging"
pb "FRMS/internal/pkg/grpc" // unimplemented base methods
_ "context"
)
// this package will implement a boilerplate manager
// will condense into the rm soon enough
// manager connects to client on start and returns the gRPC connection to make gRPC clients
type Manager struct {
*Client // gives access to c.Ip c.Id etc
Hb time.Duration // used for managing hb timer for client
Active active
Sig chan bool
Err chan error
}
type active struct{
sync.Mutex
bool
int
}
func NewManager(err chan error) *Manager {
hb := time.Duration(1 * time.Second) //hb to
m := &Manager{Hb:hb,Err:err}
return m
}
func (m *Manager) Start() {
if !m.Activate() {
// manager already running
m.Err <-errors.New("Manager already running!")
} // if we get here, manager is atomically activated and we can ensure start wont run again
}
func (m *Manager) Exit() {
// exit function to eventually allow saving to configs
if !m.Deactivate() {
m.Err <-errors.New("Manager already disabled!")
}
}
func (m *Manager) UpdateClient(cl *Client) {
logging.Debug(logging.DClient,"MAN Updating client %v",cl.Id)
m.Client = cl
}
// reactor manager atomic operations
func (m *Manager) IsActive() bool {
m.Active.Lock()
defer m.Active.Unlock()
return m.Active.bool
}
func (m *Manager) Activate() bool {
// slightly confusing but returns result of trying to activate
m.Active.Lock()
defer m.Active.Unlock()
alive := m.Active.bool
if alive {
return false
} else {
m.Active.bool = true
m.Active.int = 0
return m.Active.bool
}
}
func (m *Manager) Deactivate() bool {
// result of trying to deactivate
m.Active.Lock()
defer m.Active.Unlock()
alive := m.Active.bool
if alive {
m.Active.bool = false
return true
} else {
return m.Active.bool
}
}
// connection stuff
func (m *Manager) Timeout() int {
// keeps track of and generates timeout [0-1.2s) over span of ~2.5s
// returns 0 on TO elapse
m.Active.Lock()
defer m.Active.Unlock()
if m.Active.int < 9 {
v := int(5 * math.Pow(float64(2), float64(m.Active.int)))
m.Active.int += 1
return v
} else {
// exceeded retries
return 0
}
}
func (m *Manager) GetDevices(ctx context.Context, req *pb.GetDevicesRequest) (*pb.GetDevicesResponse, error) {
return &pb.GetDevicesResponse{}, errors.New("Get Devices not implemented!")
}
func (m *Manager) ReactorStatusHandler(ctx context.Context, req *pb.ReactorStatusPing) (*pb.ReactorStatusResponse, error) {
return &pb.ReactorStatusResponse{}, errors.New("Reactor Status Handler not implemented!")
}
// type ClientManager struct {
// *Client // gives access to c.Ip c.Id etc
// Hb time.Duration // used for managing hb timer for client
// Sig chan bool
// sync.Mutex
// }
// func NewClientManager(cl *Client) *ClientManager {
// return &ClientManager{Client: cl}
// }
// func (m *ClientManager) UpdateClient(cl *Client) error {
// m.Lock()
// defer m.Unlock()
// logging.Debug(logging.DClient, "MAN Updating client %v", cl.Id)
// m.Client = cl
// return nil
// }

@ -1,149 +1,149 @@
package server
import (
"fmt"
pb "FRMS/internal/pkg/grpc"
"FRMS/internal/pkg/logging"
"FRMS/internal/pkg/manager"
"time"
_ "log"
//"FRMS/internal/pkg/device"
"context"
"sync"
"FRMS/internal/pkg/logging"
"google.golang.org/grpc"
"google.golang.org/grpc/status"
"google.golang.org/grpc/credentials/insecure"
pb "FRMS/internal/pkg/grpc"
"fmt"
_ "log"
"github.com/spf13/viper"
)
// this package will implement a reactor coordinator and associated go routines
// manager stuff
type ReactorManager struct {
*Manager
StatusMon *StatusMonitor
*devstatus
type Manager interface {
Start() error // status checks
Exit() error
Timeout() (time.Duration, error) // TO Generator
}
type devstatus struct {
sync.Mutex
Devs map[uint32]*DeviceInfo
func NewManager(max int) Manager {
// takes a heartbeat and max connection attempts
return manager.New(max)
}
func NewReactorManager(c *Client,sys *SystemViewer,err chan error) GeneralManager {
r := &ReactorManager{}
di := make(map[uint32]*DeviceInfo)
r.devstatus = &devstatus{Devs:di}
r.Manager = NewManager(err)
r.StatusMon = NewStatusMonitor("Reactor",c.Id,sys)
return r
type ReactorManager struct {
Manager // base manager interface
// *ClientManager // client manager (OUTDATED)
*Client // access to ID etc
// StatusMon *StatusMonitor putting on pause
// *ReactorDevices
Config *viper.Viper // config to update
Err chan error
}
func (r *ReactorManager) Start() {
r.Manager.Start()
logging.Debug(logging.DStart,"RMA %v starting", r.Id)
go r.StatusMon.Send(&DeviceInfo{Id:r.Id,Type:"Reactor",Status:"[green]ONLINE[white]"},"Reactor")
//conn := r.Connect()
//empty := &grpc.ClientConn{}
//if conn != empty {
// type ReactorDevices struct {
// // device struct
// Devices map[int]DeviceManager
// sync.RWMutex
// }
}
func (r *ReactorManager) Exit() {
r.Manager.Exit()
logging.Debug(logging.DExit, "RMA %v exiting", r.Id)
go r.StatusMon.Send(&DeviceInfo{Id:r.Id,Type:"Reactor",Status:"[red]OFFLINE[white]",Data:fmt.Sprintf("Last Seen %v",time.Now().Format("Mon at 03:04:05pm MST"))},"Reactor")
r.devstatus.Lock()
defer r.devstatus.Unlock()
for _, d := range r.Devs {
newd := d
newd.Status = "[yellow]UNKOWN[white]"
r.Devs[newd.Id] = newd
go r.StatusMon.Send(newd,"Device")
func NewReactorManager(cl *Client, config *viper.Viper, errCh chan error) *ReactorManager {
// making managers
m := NewManager(6)
r := &ReactorManager{
Manager: m,
Client: cl,
Config: config,
Err: errCh,
}
return r
}
func (r *ReactorManager) Connect() *grpc.ClientConn {
// establish gRPC conection with reactor
var opts []grpc.DialOption
var conn *grpc.ClientConn
opts = append(opts,grpc.WithTransportCredentials(insecure.NewCredentials()))
for {
if !r.IsActive() {
logging.Debug(logging.DClient,"RMA %v No longer active, aborting connection attempt",r.Id)
return &grpc.ClientConn{}
}
var err error
conn, err = grpc.Dial(fmt.Sprintf("%v:%v",r.Ip,r.Port),opts...)
// error handling
code := status.Code(err)
if code != 0 { // != OK
if code == (5 | 14) { // unavailable or not found
to := r.Timeout()
if to == 0 {
logging.Debug(logging.DClient,"RMA %v Client not responding",r.Id)
return &grpc.ClientConn{}
func (r *ReactorManager) Start() error {
// allows for extra stuff
logging.Debug(logging.DStart, "RMA %v starting", r.Id)
return r.Manager.Start()
//go r.StatusMon.Send(&DeviceInfo{Id: r.Id, Type: "Reactor", Status: "[green]ONLINE[white]"}, "Reactor")
}
logging.Debug(logging.DClient,"RMA %v Client currently down, retrying in %v ms",r.Id, to)
time.Sleep(time.Duration(to) * time.Millisecond)
} else {
logging.Debug(logging.DError,"RMA %v GRPC ERROR: %v",r.Id, code)
r.Err <- err
}
}
break;
func (r *ReactorManager) Exit() error {
// allows for extra stuff
logging.Debug(logging.DExit, "RMA %v exiting", r.Id)
return r.Manager.Exit()
//go r.StatusMon.Send(&DeviceInfo{Id: r.Id, Type: "Reactor", Status: "[red]OFFLINE[white]", Data: fmt.Sprintf("Last Seen %v", time.Now().Format("Mon at 03:04:05pm MST"))}, "Reactor")
}
return conn
func (r *ReactorManager) UpdateClient(cl *Client) error {
// this is probably unnessecary
fmt.Printf("Reactor Manager %d updating client!\n", r.Id)
r.Client = cl
return nil
}
func (r *ReactorManager) ReactorStatusHandler(ctx context.Context, req *pb.ReactorStatusPing) (*pb.ReactorStatusResponse, error) {
// function client will call to update reactor information
//go r.PingReset()
fmt.Printf("Recieved ping from %d!\n", req.GetId())
// update devices/sensors
for _, dev := range req.GetDevices() {
d := &DeviceInfo{Id:uint32(dev.GetAddr()),Type:dev.GetType(),Status:dev.GetStatus(),Data:dev.GetData()}
go r.UpdateDevice(d)
fmt.Printf("Device %d is %s ", dev.GetAddr(), dev.GetStatus().String())
}
return &pb.ReactorStatusResponse{Id:r.Id}, nil
fmt.Printf("\n")
// go r.UpdateDevices(req.GetDevices())
return &pb.ReactorStatusResponse{Id: int32(r.Id)}, nil
}
// // device stuff
/*
func (r *ReactorManager) Monitor(conn *grpc.ClientConn) {
defer conn.Close()
client := pb.NewMonitoringClient(conn)
for r.IsActive() {
req := &pb.ReactorStatusRequest{Id:r.Id}
resp, err := client.GetReactorStatus(context.Background(),req)
code := status.Code(err)
if code != 0 { // if != OK
logging.Debug(logging.DClient,"RMA %v Reactor not responding! Code: %v\n", r.Id,code)
r.devstatus.Lock()
for _, d := range r.Devs {
newd := d
newd.Status = "[yellow]UNKOWN[white]"
r.Devs[newd.Id] = newd
go r.StatusMon.Send(newd,"Device")
}
r.devstatus.Unlock()
r.Exit()
break;
}
for _,v := range resp.GetDevices() {
d := &DeviceInfo{Id:uint32(v.GetAddr()),Type:v.GetType(),Status:v.GetStatus(),Data:v.GetData()}
go r.UpdateDevice(d)
}
time.Sleep(r.Hb) // time between sensor pings
}
}
*/
func (r *ReactorManager) UpdateDevice(d *DeviceInfo) {
r.devstatus.Lock()
defer r.devstatus.Unlock()
if olddev, ok := r.Devs[d.Id]; !ok {
// new device
r.Devs[d.Id] = d
go r.StatusMon.Send(d,"Device")
} else if olddev.Status != d.Status || olddev.Data != d.Data {
// dev status or data has changed
r.Devs[d.Id] = d
go r.StatusMon.Send(d,"Device")
}
}
// type DeviceManager interface {
// LoadConfig() error
// UpdateStatus(string) error
// String() string // printing
// }
// func NewDeviceManager(addr int, config *viper.Viper, prefix string) (DeviceManager, error) {
// // returns a manager struct
// return device.NewDeviceManager(addr, config, prefix)
// }
//func (r *ReactorManager) UpdateDevices(devs []*pb.Device) {
// // pass updates to correct manager
// r.ReactorDevices.RLock() // read lock only
// defer r.ReactorDevices.RUnlock()
// for _, dev := range devs {
// // looping over devs
// if dm, ok := r.ReactorDevices.Devices[int(dev.GetAddr())]; ok {
// // device manager found
// go dm.UpdateStatus(dev.GetStatus().String())
// //fmt.Println(dm)
// } else {
// // not found
// go r.AddDevice(dev, r.Id, r.Config, r.Err)
// }
// }
//}
// func (r *ReactorDevices) AddDevice(dev *pb.Device, id int, config *viper.Viper, errCh chan error) {
// // setting vars
// prefix := fmt.Sprintf("reactors.%d.", id)
// addr := int(dev.GetAddr())
// var dm DeviceManager
// var err error
// // write locking
// r.Lock()
// defer r.Unlock()
// if dm, err = NewDeviceManager(addr, config, prefix); err != nil {
// errCh <- err
// }
// // setting status
// if err = dm.UpdateStatus(dev.GetStatus().String()); err != nil {
// errCh <- err
// }
// // loading config
// if err = dm.LoadConfig(); err != nil {
// errCh <- err
// }
// r.Devices[int(addr)] = dm
// }

@ -1,19 +1,11 @@
package server
import (
"sync"
_ "fmt"
"FRMS/internal/pkg/logging"
// sensor components
)
// allows for multiple readers/writers
type DeviceInfo struct {
Id uint32
Type string
Status string
Data string
Index uint32
TransactionId uint32
}
/*
type StatusMonitor struct {
// allows for embedding into managers
@ -26,24 +18,25 @@ type StatusMonitor struct {
}
type devbuf struct {
ReactorId uint32 // reactor we are looking at, if any
Buf map[string]map[uint32]*DeviceInfo // convienent way to store/seperate device data
ReactorId int // reactor we are looking at, if any
Buf map[string]map[int]*DeviceInfo // convienent way to store/seperate device data
sync.Mutex
}
func NewBuffer() map[string]map[uint32]*DeviceInfo {
rbuf := make(map[uint32]*DeviceInfo)
dbuf := make(map[uint32]*DeviceInfo)
sbuf := make(map[string]map[uint32]*DeviceInfo)
func NewBuffer() map[string]map[int]*DeviceInfo {
rbuf := make(map[int]*DeviceInfo)
dbuf := make(map[int]*DeviceInfo)
sbuf := make(map[string]map[int]*DeviceInfo)
sbuf["Reactor"] = rbuf
sbuf["Device"] = dbuf
return sbuf
}
func NewStatusMonitor(t string, id uint32, sys *SystemViewer) *StatusMonitor {
func NewStatusMonitor(t string, id int, sys *SystemViewer) *StatusMonitor {
tid := make(chan uint32)
sm := &StatusMonitor{TransactionId: tid}
sm.SystemViewer = sys
logging.Debug(logging.DClient, "SYS Creating new status monitor")
if t == "Reactor" {
// reactor status monitor
sm.ReactorChan = sys.AddReactorSender()
@ -189,6 +182,7 @@ func (s *InfoStream) Start() {
// consistency
go s.Listen()
}
// goal is to hook every new manager into the reactor status chan
func (s *InfoStream) AddSender() chan *DeviceInfo {
return s.Stream
@ -226,7 +220,7 @@ func (l *listeners) Echo(d *DeviceInfo) {
}
}
func (s *InfoStream) AddListener(id uint32, ch chan *DeviceInfo) map[uint32]*DeviceInfo {
func (s *InfoStream) AddListener(id int, ch chan *DeviceInfo) map[uint32]*DeviceInfo {
// if i get a memory leak ill eat my shoe
s.listeners.Lock()
defer s.listeners.Unlock()
@ -242,7 +236,7 @@ func (s *InfoStream) AddListener(id uint32, ch chan *DeviceInfo) map[uint32]*Dev
return s.Layout.Devs
}
func (l *listeners) RemoveListener(id uint32) {
func (l *listeners) RemoveListener(id int) {
l.Lock()
defer l.Unlock()
if lis, ok := l.Listeners[id]; ok {
@ -296,7 +290,7 @@ func (s *SystemViewer) AddDeviceSender(reactorId uint32) chan *DeviceInfo {
return ds.AddSender()
}
func (s *SystemViewer) AddListener(id, rid uint32) (chan *DeviceInfo, map[uint32]*DeviceInfo) {
func (s *SystemViewer) AddListener(id, rid int) (chan *DeviceInfo, map[uint32]*DeviceInfo) {
// returns a listener for that chan
ch := make(chan *DeviceInfo)
if rid != 0 {
@ -306,9 +300,10 @@ func (s *SystemViewer) AddListener(id, rid uint32) (chan *DeviceInfo, map[uint32
}
}
func (s *SystemViewer) RemoveListener(rid, tid uint32) {
func (s *SystemViewer) RemoveListener(rid, tid int) {
// removes chan for specific tid and rid
s.DeviceStream.Lock()
defer s.DeviceStream.Unlock()
go s.DeviceStream.Reactors[rid].RemoveListener(tid)
}
*/

@ -1,121 +0,0 @@
package server
import (
// "fmt"
"time"
"sync"
// "net"
// "log"
"context"
"FRMS/internal/pkg/logging"
// "google.golang.org/grpc"
pb "FRMS/internal/pkg/grpc"
)
// implement tui specific manager to be called for each client conn
type TUIManager struct {
*Manager // embedded manager for access to methods and client
StatusMon *StatusMonitor // use it for all devs coming in
Err chan error
*Timeout
*pb.UnimplementedManagementServer
}
type Timeout struct {
Alert chan bool
LastSeen time.Time
TO time.Duration
sync.Mutex
}
func NewTUIManager(c *Client, sys *SystemViewer, err chan error) GeneralManager {
m := NewManager(err)
t := &TUIManager{Err: err}
alert := make(chan bool)
t.Timeout = &Timeout{Alert:alert,TO:time.Duration(2500*time.Millisecond)} // short time outs are fine because we will just rejoin
t.Manager = m
t.StatusMon = NewStatusMonitor("TUI",c.Id,sys)
t.Manager.UpdateClient(c)
return t
}
func (t *TUIManager) Start() {
//
t.PingReset()
t.Manager.Start()
logging.Debug(logging.DStart,"TMA %v starting", t.Id)
go t.Timeoutd()
//go t.Monitor(conn)
}
func (t *TUIManager) Exit() {
t.Manager.Exit()
logging.Debug(logging.DExit,"TMA %v exiting",t.Id)
}
func (t *Timeout) PingReset() {
t.Lock()
defer t.Unlock()
t.LastSeen = time.Now()
}
func (t *TUIManager) Timeoutd() {
for t.IsActive() {
if sleep, elapsed := t.Elapsed(); elapsed {
// timeout elapsed
logging.Debug(logging.DClient,"TMA %V client not responding", t.Id)
t.Exit()
} else {
time.Sleep(sleep)
}
}
}
func (t *Timeout) Elapsed() (time.Duration, bool) {
t.Lock()
defer t.Unlock()
now := time.Now()
if now.After(t.LastSeen.Add(t.TO)) {
// timeout expired
return 0 * time.Second, true
} else {
sleep := t.LastSeen.Add(t.TO).Sub(now)
return sleep, false
}
}
// tui client requests and logic will be down here
func (t *TUIManager) GetDevices(ctx context.Context, req *pb.GetDevicesRequest) (*pb.GetDevicesResponse, error) {
go t.PingReset()
devices := []*pb.Dev{}
resp := &pb.GetDevicesResponse{ClientId:t.Id,Devices:devices}
if req.GetReactorId() > 0 || req.GetRefresh() {
logging.Debug(logging.DClient,"TMA %v client requested devs from %v",t.Id,req.GetReactorId())
resp.ReactorId = req.GetReactorId()
t.StatusMon.UpdateListener(t.Id, req.GetReactorId())
}
devs := t.StatusMon.GetBuffer() // always empty buffer
for _, v := range devs {
resp.Devices = append(resp.Devices, &pb.Dev{Id:v.Id,Type:v.Type,Status:v.Status,Data:v.Data,Index:v.Index})
}
if len(resp.Devices) > 0 {
logging.Debug(logging.DClient,"TMA %v sending %v devices to client" ,t.Id, len(resp.Devices))
}
return resp, nil
}
func (t *TUIManager) DeleteReactors(ctx context.Context, req *pb.DeleteReactorRequest) (*pb.DeleteReactorResponse, error) {
go t.PingReset()
//
return &pb.DeleteReactorResponse{}, nil
}
func (t *TUIManager) DeleteReactorDevice(ctx context.Context, req *pb.DeleteReactorDeviceRequest) (*pb.DeleteReactorDeviceResponse, error) {
go t.PingReset()
//
return &pb.DeleteReactorDeviceResponse{}, nil
}

@ -1,29 +1,19 @@
// package system uses linux commands to get hardware info from devices
package system
import (
"os/exec"
"bytes"
"errors"
"fmt"
"hash/fnv"
"strings"
"net"
"fmt"
"errors"
"os/exec"
"strings"
)
// this package serves to add in wrappers for system commands to get hwinfo from the board
// this package does not actually use the hwinfo command, but rather gathers hardware info from a variety of commands
// command for model :
// lshw -C system 2>/dev/null | head -n 1
// command for ip && mac address :
// ifconfig eth0 | awk '/inet |ether / {print $2}'
// can combine and dump into file using
// lshw -C system 2>/dev/null | head -n 1 > hwinfo.txt && ifconfig eth0 | awk '/inet |ether / {print $2}' >> hwinfo.txt
// *** will just replace info in file everytime
func GetId(eth string) (uint32, error) {
maccmd := fmt.Sprintf("ifconfig %v | awk '/ether / {print $2}'", eth)
func GetId() (int, error) {
// gets the mac address and hashes into consistent id
maccmd := fmt.Sprintf("ifconfig %v | awk '/ether / {print $2}'", et)
var stderr bytes.Buffer
var out bytes.Buffer
cmd := exec.Command("bash", "-c", maccmd)
@ -35,11 +25,11 @@ func GetId(eth string) (uint32, error) {
hash := fnv.New32a()
hash.Write(out.Bytes())
id := hash.Sum32()
return id, nil
return int(id), nil
}
func GetIp(eth string) (string,error) {
ipcmd := fmt.Sprintf("ifconfig %v | awk '/inet / {print $2}'",eth)
func GetIp() (string, error) {
ipcmd := "ip route get 1 | sed 's/^.*src \([^ ]*\).*$/\1/;q'"
var stderr bytes.Buffer
var out bytes.Buffer
cmd := exec.Command("bash", "-c", ipcmd)
@ -54,6 +44,7 @@ func GetIp(eth string) (string,error) {
}
func GetPort() (int, error) {
// obsolete
if addr, err := net.ResolveTCPAddr("tcp", ":0"); err != nil {
return 0, err
} else if lis, err := net.ListenTCP("tcp", addr); err != nil {
@ -65,28 +56,25 @@ func GetPort() (int, error) {
}
func GetBus() (int, error) {
bus := map[string]int{"raspberrypi":1,"beaglebone":2}
devname := "lshw -C system 2>/dev/null | head -n 1"
var stderr, out bytes.Buffer
cmd := exec.Command("bash","-c",devname)
cmd.Stdout = &out
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
return 0, err
}
b := out.String()
b = strings.Trim(b," \n")
if bs, ok := bus[b]; !ok {
return 0, errors.New(fmt.Sprintf("No bus for dev %v", b))
} else {
return bs, nil
// preset busses
busList := map[string]int{"raspberrypi": 1, "beaglebone": 2}
// vars
var bus int
var ok bool
if name, err =: GetModel(); err != nil {
return bus, err
} else if bus, ok = busList[b]; !ok {
return 0, errors.New(fmt.Sprintf("No bus for dev %s", b))
}
// returns correct bus
return bus, nil
}
func GetModel() (string, error) {
devname := "lshw -C system 2>/dev/null | head -n 1"
var stderr, out bytes.Buffer
cmd := exec.Command("bash","-c",devname)
cmd := exec.Command("bash", "-c", "hostname")
cmd.Stdout = &out
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {

@ -1,147 +0,0 @@
package tui
import (
"sync"
"fmt"
"log"
"time"
"math"
"context"
"FRMS/internal/pkg/system"
"google.golang.org/grpc"
"google.golang.org/grpc/status"
"google.golang.org/grpc/credentials/insecure"
pb "FRMS/internal/pkg/grpc"
)
// this package will interact with the server to get system status
type TUIClient struct {
Id uint32
Ip string
Port int
ClientConn *grpc.ClientConn
Active
}
type Active struct {
sync.Mutex
bool
int
}
func NewTUIClient(ip string, port int, ifconfig string) *TUIClient {
id, err := system.GetId(ifconfig)
if err != nil {
log.Fatal(err)
}
t := &TUIClient{Id:id,Ip:ip,Port:port}
return t
}
func (t *TUIClient) Start() error {
t.Connect()
return nil
}
func (t *TUIClient) Timeout() int {
t.Active.Lock()
defer t.Active.Unlock()
if t.Active.int < 9 {
v := int( 5 * math.Pow(float64(2), float64(t.Active.int)))
t.Active.int += 1
return v
} else {
return 0
}
}
func (t *TUIClient) Connect() {
// connect to server and register as client
var conn *grpc.ClientConn
var err error
var opts []grpc.DialOption
opts = append(opts,grpc.WithTransportCredentials(insecure.NewCredentials()))
for {
conn, err = grpc.Dial(fmt.Sprintf("%v:%v",t.Ip,t.Port),opts...)
code := status.Code(err)
if code != 0 {
if code == (5 | 14) {
to := t.Timeout()
if to == 0 {
log.Fatal("Failed to connect to central server")
}
fmt.Printf("Server currently down, reconnecting in %v ms\n",to)
time.Sleep(time.Duration(to) * time.Millisecond)
} else {
log.Fatal("Central server currently unavailable")
}
}
//t.client = pb.NewManagementClient(conn)
break;
}
// handle handshake logic here
client := pb.NewHandshakeClient(conn)
req := &pb.ClientRequest{ClientId:t.Id,ClientType:"tui"}
resp, err := client.ClientDiscoveryHandler(context.Background(),req)
if err != nil {
log.Fatal(err)
}
conn.Close() // closing old connection
// setting up server connection with provided port
t.Port = int(resp.GetServerPort())
for {
conn, err = grpc.Dial(fmt.Sprintf("%v:%v",t.Ip,t.Port),opts...)
code := status.Code(err)
if code != 0 {
if code == (5 | 14) {
to := t.Timeout()
if to == 0 {
log.Fatal("Failed to connect to central server")
}
fmt.Printf("Server currently down, reconnecting in %v ms\n",to)
time.Sleep(time.Duration(to) * time.Millisecond)
} else {
log.Fatal("Central server currently unavailable")
}
}
t.ClientConn = conn
break;
}
}
func (t *TUIClient) GetDevices(id ...uint32) (map[uint32]*Device, error) {
// returns
req := &pb.GetDevicesRequest{ClientId:t.Id}
if len(id) > 0 {
if id[0] == 0 {
req.Refresh = true
} else {
req.ReactorId = id[0]
}
}
r := make(map[uint32]*Device)
client := pb.NewManagementClient(t.ClientConn)
resp, err := client.GetDevices(context.Background(), req)
if err != nil {
return r, err
}
for _, v := range resp.GetDevices() {
r[v.GetId()] = &Device{Type:v.GetType(),Status:v.GetStatus(),Id:v.GetId(),Data:v.GetData(),Index:v.GetIndex()}
}
return r, err
}
func (t *TUIClient) DeleteReactor(id uint32) error {
req := &pb.DeleteReactorRequest{}
client := pb.NewManagementClient(t.ClientConn)
_, err := client.DeleteReactor(context.Background(), req)
return err
}
func (t *TUIClient) DeleteReactorDevice(id uint32, addr int) error {
req := &pb.DeleteReactorDeviceRequest{}
client := pb.NewManagementClient(t.ClientConn)
_, err := client.DeleteReactorDevice(context.Background(), req)
return err
}

@ -1,232 +0,0 @@
package tui
import (
"fmt"
"log"
"sync"
"strconv"
"strings"
"time"
"FRMS/internal/pkg/logging"
"github.com/rivo/tview"
_ "github.com/gdamore/tcell/v2"
"os"
)
type Device struct {
Id uint32
Type string
Status string
Data string
Index uint32
}
type TUI struct {
*Display
*TUIClient
SelectedReactor <-chan uint32
SelectedDevice <-chan uint32
Err chan error
}
func NewTUI(ip string, port int, ifconfig string, ch chan error) *TUI {
t := &TUI{}
t.Err = ch
client := NewTUIClient(ip, port, ifconfig)
t.TUIClient = client
return t
}
func (t *TUI) Start() {
// setup tview app and wait for user connection in standin modal
if err := t.TUIClient.Start(); err != nil {
t.Err <- err
}
logging.Debug(logging.DStart, "TUI %v starting", t.Id)
go t.Monitor()
t.CreateDisplay()
t.Display.Start()
}
func (t *TUI) CreateDisplay() {
rc := make(chan uint32)
dc := make(chan uint32)
t.Display = NewDisplay(rc,dc)
t.SelectedReactor = rc
t.SelectedDevice = dc
t.Flex.AddItem(t.ReactorList,0,1,true).
AddItem(t.DeviceList,0,2,false)
}
func (t *TUI) Monitor() {
// orchestrates updates and grpc requests
timer := make(chan struct{})
go func(signal chan struct{}){
for {
signal <- struct{}{}
time.Sleep(1 * time.Second)
}
}(timer)
for {
select {
case reactor := <-t.SelectedReactor:
// reactor has been selected in tui, grabbing devs
t.App.QueueUpdateDraw(func() {
t.UpdateDevices(reactor)
})
logging.Debug(logging.DClient, "%v getting reactor devices", t.Id)
case dev := <-t.SelectedDevice:
logging.Debug(logging.DClient, "%v editing device %v", t.Id, dev)
// TODO
case <-timer:
// time to ping for status
logging.Debug(logging.DClient, "%v pinging for updates", t.Id)
t.App.QueueUpdateDraw(func() {
t.UpdateDevices()
})
}
}
}
func (t *TUI) UpdateDevices(r ...uint32) {
// get devices for the reactor and update the tui
// see if there is a page being displayed
// overwrite if called as a func
var devs map[uint32]*Device
var err error
if len(r) > 0 {
// could be a reactor id or 1 for update reactors
if r[0] != 0 {
t.Display.DeviceList.Clear()
} else {
t.ReactorList.Clear()
t.ReactorList.AddItem("Refresh","Press (r) to refresh", 114, nil)
t.ReactorList.AddItem("Quit","Press (q) to quit",113,func() {
t.App.Stop()
os.Exit(0)
})
}
devs, err = t.TUIClient.GetDevices(r[0])
} else {
devs, err = t.TUIClient.GetDevices()
}
if err != nil {
log.Fatal(err)
}
//if id != 0 {
// split based on type to simplify update
reactors := make(map[uint32]*Device)
devices := make(map[uint32]*Device)
for id, dev := range devs {
if dev.Type == "Reactor" {
reactors[id] = dev
} else {
devices[id] = dev
}
}
t.DisplayDevices(devices)
t.DisplayReactors(reactors)
}
// display struct and logic
type Display struct {
App *tview.Application
Flex *tview.Flex
ReactorList *tview.List
DeviceList *tview.List
SelectedReactor chan<- uint32
SelectedDevice chan<- uint32
sync.Mutex
}
func NewDisplay(rc,dc chan uint32) *Display {
d := &Display{}
d.App = tview.NewApplication()
d.Flex = tview.NewFlex()
d.DeviceList = tview.NewList().SetSelectedFocusOnly(true)
d.ReactorList = tview.NewList()
d.ReactorList.AddItem("Refresh","Press (r) to refresh manually", 114, nil)
d.ReactorList.AddItem("Quit","Press (q) to quit",113,func() {
d.App.Stop()
os.Exit(0)
})
d.ReactorList.SetTitle("Reactors").SetBorder(true)
d.ReactorList.SetSelectedFunc(d.SelectReactor)
d.DeviceList.SetTitle("Devices").SetBorder(true)
d.DeviceList.SetSelectedFunc(d.SelectDevice)
d.SelectedReactor = rc
d.SelectedDevice = dc
return d
}
func (d *Display) Start() {
if err := d.App.SetRoot(d.Flex, true).Run(); err != nil {
d.App.Stop()
log.Fatal(err)
}
}
func (d *Display) DisplayReactors(r map[uint32]*Device) {
// this func takes in a list of devices to update and loops over them
// works by padding list for entries not seen yet
for _, reactor := range r {
txt := fmt.Sprintf("%v %v", reactor.Id, reactor.Status)
indx := int(reactor.Index)
for indx + 2 >= d.ReactorList.GetItemCount() {
// this prevent overwriting quit entry
d.ReactorList.InsertItem(-3,txt,reactor.Data,rune(47+d.ReactorList.GetItemCount()),nil)
}
if indx + 2 < d.ReactorList.GetItemCount() {
d.ReactorList.SetItemText(indx,txt,reactor.Data)
}
}
}
func (d *Display) DisplayDevices(devs map[uint32]*Device) {
// going to just clear every time as we reload new dev lists anyway
// going to clear on every reactor selection to simplify
// can probably just load from SM to save system resources on spam reloading
for _, dev := range devs {
logging.Debug(logging.DClient,"Displaying device %v",dev)
txt := fmt.Sprintf("0x%x %v %v",dev.Id,dev.Status,dev.Type)
indx := int(dev.Index)
for indx >= d.DeviceList.GetItemCount() {
d.DeviceList.AddItem(txt,dev.Data,rune(49+d.DeviceList.GetItemCount()), nil)
}
if indx < d.DeviceList.GetItemCount() {
d.DeviceList.SetItemText(indx,txt,dev.Data)
}
}
}
func (d *Display) SelectReactor(index int, main, data string, r rune) {
// called when reactor in list in selected
if main != "Quit" {
if main == "Refresh" {
// TODO
} else {
maintxt := strings.Split(main," ")
id := maintxt[0]
if id, err := strconv.ParseUint(id, 10, 32); err != nil {
log.Fatal(err)
} else {
d.SelectedReactor <-uint32(id)
}
}
}
}
func (d *Display) SelectDevice(index int, main, data string, r rune) {
// called when device is selected in sub menu
maintxt := strings.Split(main," ")
id := maintxt[0]
id = strings.Trim(id,"0x \n")
logging.Debug(logging.DClient,"Selected dev %v", id)
if id, err := strconv.ParseUint(id, 16, 32); err != nil {
logging.Debug(logging.DError, "Error parsing: %v", err)
os.Exit(1)
} else {
d.SelectedDevice <-uint32(id)
}
}

@ -0,0 +1,89 @@
package websocket
// creates websocket server and upgrades incoming connections
import (
"encoding/json"
"fmt"
"net/http"
ws "github.com/gorilla/websocket"
)
type ReactorTest struct {
Id int `json:"id"`
Name string `json:"name"`
}
type WebSocket struct {
// dummy struct for interface
N string
}
func New() *WebSocket {
return &WebSocket{}
}
func (s *WebSocket) Start() {
fmt.Println("Starting ws server!")
setupRoutes()
http.ListenAndServe(":8080", nil)
}
// default opts allow all origins
var upgrader = ws.Upgrader{
CheckOrigin: func(r *http.Request) bool { return true },
}
// reader
func reader(conn *ws.Conn) {
for {
// read forever
//messageType, p, err := conn.ReadMessage()
_, p, err := conn.ReadMessage()
if err != nil {
if ws.IsCloseError(err, ws.CloseNormalClosure, ws.CloseGoingAway) {
// normally closed
return
}
panic(err)
}
fmt.Printf("Msg: %s\n", string(p))
}
}
func serverWs(w http.ResponseWriter, r *http.Request) {
fmt.Println(r.Host)
websocket, err := upgrader.Upgrade(w, r, nil)
if err != nil {
panic(err)
}
// try sending reactor
t1 := &ReactorTest{Id: 1111, Name: "test1"}
t2 := &ReactorTest{Id: 1112, Name: "test2"}
t3 := &ReactorTest{Id: 1113, Name: "test3"}
n := []*ReactorTest{t1, t2, t3}
msg, err := json.Marshal(n)
if err != nil {
panic(err)
}
// pass to connection
if err := websocket.WriteMessage(ws.TextMessage, msg); err != nil {
panic(err)
}
// pass to reader
reader(websocket)
}
func setupRoutes() {
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Simple Server")
})
http.HandleFunc("/ws", serverWs)
}

@ -0,0 +1,2 @@
// implemenets a reactor object with websocket methods
package websocket

@ -0,0 +1,44 @@
#!/bin/bash
display_usage() {
echo "Usage: $0 reactor_type"
}
# checking for help options
if [[ $@ == "--help" || $@ == "-h" ]]
then
display_usage
exit 0
fi
# checking that arguements are not empty
if [[ -z $1 ]]
then
echo "Type of reactor not specified!"
display_usage
exit 1
fi
# checking for valid reactor types
if [[ $1 == "pi" ]]
then
platform="linux/arm64"
elif [[ $1 == "bb" ]]
then
platform="linux/arm/v7"
else
echo "Reactor type $1 not supported!"
echo "Supported reactors include: pi, bb"
display_usage
exit 1
fi
# building reactor image
echo "Building Reactor image for $1 platform=$platform"
docker buildx build --rm --platform=$platform -f Dockerfile.reactor --tag localhost:5000/reactor .
echo "Cleaning local images"
docker image remove localhost:5000/reactor
Loading…
Cancel
Save