merging config
commit
1caa6c78c5
@ -1,8 +1,13 @@
|
||||
*
|
||||
# exluding everything and only allowing directly relevant stuff
|
||||
!cmd/server/main.go
|
||||
!cmd/reactor/main.go
|
||||
!internal
|
||||
!tokens
|
||||
!configs
|
||||
!*.yaml
|
||||
!go.mod
|
||||
!go.sum
|
||||
!server
|
||||
!reactor
|
||||
!.env
|
||||
|
@ -0,0 +1 @@
|
||||
b43ecff1fe53e18c4c9b756b32d38078
|
@ -0,0 +1,22 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
FROM --platform=$BUILDPLATFORM golang:1.18-alpine as builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN go mod download
|
||||
|
||||
ARG TARGETOS TARGETARCH TARGETVARIANT
|
||||
|
||||
RUN if [[ $TARGETVARIANT == "v7" ]]; \
|
||||
then \
|
||||
export GOARM=7; \
|
||||
fi; \
|
||||
CGO_ENABLED=0 GOOS=$TARGETOS GOARCH=$TARGETARCH go build -o /reactor ./cmd/reactor/main.go
|
||||
|
||||
FROM alpine
|
||||
|
||||
COPY --from=builder /reactor .
|
||||
|
||||
ENTRYPOINT [ "./reactor" ]
|
@ -0,0 +1,46 @@
|
||||
version: '3'
|
||||
|
||||
tasks:
|
||||
clean:
|
||||
desc: "clean all of the old binaries"
|
||||
cmds:
|
||||
- rm -v bin/* 2>/dev/null
|
||||
|
||||
all:
|
||||
desc: "cleans and builds all"
|
||||
deps: [clean, bb, server]
|
||||
|
||||
bb:
|
||||
desc: "Builds and sends to the beaglebone"
|
||||
cmds:
|
||||
- task: go-build
|
||||
vars:
|
||||
GOARM: 7
|
||||
GOARCH: "arm"
|
||||
GOOS: "linux"
|
||||
BUILD_DIR: "reactor"
|
||||
- scp bin/reactor_linux_arm debian:~/
|
||||
|
||||
server:
|
||||
desc: "Builds server binary"
|
||||
cmds:
|
||||
- task: go-build
|
||||
vars:
|
||||
BUILD_DIR: "server"
|
||||
GOOS: "{{OS}}"
|
||||
GOARCH: "{{ARCH}}"
|
||||
|
||||
go-build:
|
||||
internal: true
|
||||
cmds:
|
||||
- go build -o bin/{{.BUILD_DIR}}_{{.GOOS}}_{{.GOARCH}} cmd/{{.BUILD_DIR}}/main.go
|
||||
sources:
|
||||
- internal/pkg/**/*.go
|
||||
- cmd/{{.BUILD_DIR}}/main.go
|
||||
generates:
|
||||
- bin/{{.BUILD_DIR}}_{{.GOOS}}_{{.GOARCH}}
|
||||
env:
|
||||
GOARM: "{{.GOARM}}"
|
||||
GOARCH: "{{.GOARCH}}"
|
||||
GOOS: "{{.GOOS}}"
|
||||
|
@ -1,24 +1,149 @@
|
||||
#!/bin/bash
|
||||
echo "Purging old builds"
|
||||
|
||||
# adding commands
|
||||
usage() {
|
||||
# how to use this build script
|
||||
cat <<EOF
|
||||
usage: $0 [-c][-l][-i s] s1 [s2....]
|
||||
s1, s2, etc. the systems to build for (see -l)
|
||||
Options:
|
||||
-c, --clean cleans the bin folder of any existing builds
|
||||
-f, --force same as clean but skips prompt
|
||||
-l, --list list available systems to build for
|
||||
-s, --scp will attempt to scp to aplicable devices
|
||||
-h, --help display this message
|
||||
EOF
|
||||
}
|
||||
|
||||
list_systems() {
|
||||
# list available systems to build for
|
||||
cat <<EOF
|
||||
Name (shorthand) SCP available? (y/n)
|
||||
$0 Name or $0 (shorthand) will build for the device
|
||||
|
||||
RaspberryPi (rpi) y
|
||||
BeagleBone (bb) y
|
||||
Desktop (d) n
|
||||
Server (s) n
|
||||
EOF
|
||||
}
|
||||
|
||||
clean_builds() {
|
||||
# cleans old builds
|
||||
if [[ "$FORCE"=true ]] ; then
|
||||
printf 'Cleaning old builds... \n'
|
||||
rm -v bin/* 2>/dev/null
|
||||
else
|
||||
read -p "Clean old builds?(y/n) " -n 1 -r
|
||||
if [[ $REPLY =~ ^[Yy]$ ]] ; then
|
||||
rm -v bin/* 2>/dev/null
|
||||
fi
|
||||
fi
|
||||
printf 'Clean!\n'
|
||||
}
|
||||
|
||||
create_build() {
|
||||
# create build for $1
|
||||
case $1 in
|
||||
'rpi' )
|
||||
printf 'Building for Raspberry Pi!\n'
|
||||
GARCH="arm64"
|
||||
PLATFORM="reactor"
|
||||
;;
|
||||
'bb')
|
||||
printf 'Building for BeagleBone!\n'
|
||||
GARCH="arm"
|
||||
GARM="GOARM=7"
|
||||
PLATFORM="reactor"
|
||||
;;
|
||||
's')
|
||||
printf 'Building for Server!\n'
|
||||
GARCH="amd64"
|
||||
PLATFORM="server"
|
||||
;;
|
||||
'd')
|
||||
printf 'Building for Desktop!\n'
|
||||
GARCH="amd64"
|
||||
PLATFORM="server"
|
||||
;;
|
||||
* )
|
||||
printf 'ERROR: %s type unrecognized!\n' "$1"
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
# setting up build
|
||||
OUTFILE=$(printf '%s_linux_%s' "$PLATFORM" "$GARCH")
|
||||
INFILE=$(printf '%s/main.go' "$PLATFORM")
|
||||
# building
|
||||
env GOOS=linux GOARCH="$GARCH" $GARM go build -o bin/"$OUTFILE" cmd/"$INFILE"
|
||||
echo "Finished"
|
||||
if [[ "$SCP"=true ]] ; then
|
||||
printf 'Attempting to transfer to %s\n' "$2"
|
||||
if [[ "$1" == "bb" ]] ; then
|
||||
printf 'Copying to %s\n' "192.168.100.90"
|
||||
scp "$HOME/FRMS/bin/$OUTFILE" debian:~/
|
||||
else
|
||||
printf 'SCP Not available!\n'
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
echo "Removing Logs"
|
||||
rm -v bin/log/* 2>/dev/null
|
||||
# handle long form
|
||||
for arg in "$@"; do
|
||||
shift
|
||||
case "$arg" in
|
||||
'--help') set -- "$@" "-h" ;;
|
||||
'--list') set -- "$@" "-l" ;;
|
||||
'--scp') set -- "$@" "-s" ;;
|
||||
'--clean') set -- "$@" "-c" ;;
|
||||
'--force') set -- "$@" "-f" ;;
|
||||
*) set -- "$@" "$arg" ;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "Building reactor binaries"
|
||||
env GOOS=linux GOARCH=arm GOARM=7 go build -o bin/reactor_linux_arm cmd/reactor/main.go
|
||||
env GOOS=linux GOARCH=arm64 go build -o bin/reactor_linux_arm64 cmd/reactor/main.go
|
||||
# handle args
|
||||
while getopts "lcsfh" opt ; do
|
||||
case "$opt" in
|
||||
'h' )
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
'c' )
|
||||
clean_builds
|
||||
;;
|
||||
'f' )
|
||||
FORCE=true
|
||||
clean_builds
|
||||
;;
|
||||
's' )
|
||||
SCP=true
|
||||
;;
|
||||
'l')
|
||||
list_systems
|
||||
;;
|
||||
'?' )
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "Building tui binaries"
|
||||
env GOOS=linux GOARCH=arm GOARM=7 go build -o bin/tui_linux_arm cmd/tui/main.go
|
||||
env GOOS=linux GOARCH=arm64 go build -o bin/tui_linux_arm64 cmd/tui/main.go
|
||||
env GOOS=linux GOARCH=amd64 go build -o bin/tui_linux_amd64 cmd/tui/main.go
|
||||
shift $(($OPTIND - 1))
|
||||
|
||||
echo "Building server binary"
|
||||
env GOOS=linux GOARCH=amd64 go build -o bin/server_linux_amd64 cmd/server/main.go
|
||||
for dev in "$@"; do
|
||||
case "$dev" in
|
||||
'RaspberryPi') dev='rpi' ;;
|
||||
'BeagleBone') dev='bb' ;;
|
||||
'Server') dev='s' ;;
|
||||
'Desktop') dev='d' ;;
|
||||
esac
|
||||
create_build "$dev"
|
||||
done
|
||||
printf 'Nothing else to do!\n'
|
||||
|
||||
echo "Compressing binaries for distrubution"
|
||||
tar -czf pireactor.tar.gz -C bin reactor_linux_arm64
|
||||
tar -czf bbreactor.tar.gz -C bin reactor_linux_arm
|
||||
tar -czf server.tar.gz -C bin server_linux_amd64
|
||||
tar -czf tui.tar.gz -C bin tui_linux_amd64 tui_linux_arm tui_linux_arm64
|
||||
# echo "Compressing binaries for distrubution"
|
||||
# tar -czf pireactor.tar.gz -C bin reactor_linux_arm64
|
||||
# tar -czf bbreactor.tar.gz -C bin reactor_linux_arm
|
||||
# tar -czf server.tar.gz -C bin server_linux_amd64
|
||||
# tar -czf tui.tar.gz -C bin tui_linux_amd64 tui_linux_arm tui_linux_arm64
|
||||
|
@ -1,2 +0,0 @@
|
||||
#!/bin/bash
|
||||
env GOOS=linux GOARCH=arm GOARM=7 go build -o ../../bin/
|
@ -1,2 +0,0 @@
|
||||
#!/bin/bash
|
||||
go build -race -o ../../bin/server_$GOOS_$GOARCH
|
@ -1,65 +1,68 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
_"net/http"
|
||||
_ "net/http/pprof"
|
||||
//"flag"
|
||||
"log"
|
||||
"os"
|
||||
"fmt"
|
||||
"FRMS/internal/pkg/logging"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"FRMS/internal/pkg/config"
|
||||
"FRMS/internal/pkg/logging"
|
||||
"FRMS/internal/pkg/server"
|
||||
"FRMS/internal/pkg/websocket"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
type listener interface {
|
||||
type coordinator interface {
|
||||
Start()
|
||||
}
|
||||
|
||||
func NewListener(ch chan error, port int) listener {
|
||||
return server.NewListener(ch, port)
|
||||
func NewCoordinator(config *viper.Viper, ch chan error) coordinator {
|
||||
return server.NewCentralCoordinator(config, ch)
|
||||
}
|
||||
|
||||
func NewConfig(fname string) *viper.Viper {
|
||||
return config.LoadConfig(fname)
|
||||
}
|
||||
|
||||
type dbconfig interface {
|
||||
GetUrl() string
|
||||
GetOrg() string
|
||||
GetBucket() string
|
||||
GetToken() string
|
||||
type ws interface {
|
||||
Start()
|
||||
}
|
||||
|
||||
func ReadConfig() dbconfig {
|
||||
return config.ReadServerConfig()
|
||||
func NewWebSocket() ws {
|
||||
return websocket.New()
|
||||
}
|
||||
|
||||
func main() {
|
||||
// lets get this bread
|
||||
// all we need to do is call the reactor coordinator and thats it
|
||||
// removing os flags in favor of env vars
|
||||
// go func() {
|
||||
// fmt.Println(http.ListenAndServe("localhost:6060",nil))
|
||||
// }()
|
||||
ch := make(chan error)
|
||||
// creating listener
|
||||
var lport int
|
||||
//var dbport int
|
||||
if port := os.Getenv("gRPC_PORT"); port == "" {
|
||||
lport = 2022 // default docker port
|
||||
}
|
||||
//if port := os.Getenv("DATABASE_PORT"); port == "" {
|
||||
//dbport = 8086
|
||||
//}
|
||||
//fmt.Printf("DBPORT %d\n", dbport)
|
||||
conf := ReadConfig()
|
||||
fmt.Printf("Found %v %v %v %v\n",conf.GetUrl(),conf.GetBucket(),conf.GetOrg(),conf.GetToken())
|
||||
fmt.Printf("Listening on %v\n", lport)
|
||||
l := NewListener(ch,lport)
|
||||
gracefulShutdown := make(chan os.Signal, 1)
|
||||
signal.Notify(gracefulShutdown, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
// config file
|
||||
conf := NewConfig("server")
|
||||
|
||||
//db := os.Getenv("DATABASE_URL") // database url
|
||||
errCh := make(chan error)
|
||||
|
||||
go l.Start()
|
||||
logging.Debug(logging.DStart, "CCO 01 Server started")
|
||||
err := <-ch // blocking to wait for any errors and keep alive otherwise
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
c := NewCoordinator(conf, errCh)
|
||||
go c.Start()
|
||||
logging.Debug(logging.DStart, "CCO 01 Server %s started", conf.Get("name"))
|
||||
// starting websocket server
|
||||
|
||||
w := NewWebSocket()
|
||||
go w.Start()
|
||||
|
||||
select {
|
||||
case err := <-errCh: // blocking to wait for any errors and keep alive otherwise
|
||||
panic(err)
|
||||
case <-gracefulShutdown:
|
||||
// Shutdown via INT
|
||||
// storing config
|
||||
fmt.Printf("\nStoring config to %s\n", conf.ConfigFileUsed())
|
||||
if err := conf.WriteConfig(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println("Stored config successfully. Exiting...")
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
|
@ -1,2 +0,0 @@
|
||||
#!/bin/bash
|
||||
env GOOS=linux GOARCH=arm GOARM=7 go build -o ../../bin/
|
@ -1,2 +0,0 @@
|
||||
#!/bin/bash
|
||||
go build -o ../../bin/
|
@ -1,52 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"FRMS/internal/pkg/tui"
|
||||
"FRMS/internal/pkg/logging"
|
||||
)
|
||||
|
||||
type TUI interface {
|
||||
Start()
|
||||
}
|
||||
|
||||
func NewTUI(ip string, port int, ifconfig string, ch chan error) TUI {
|
||||
return tui.NewTUI(ip, port, ifconfig, ch)
|
||||
}
|
||||
|
||||
func main() {
|
||||
var port int
|
||||
var err error
|
||||
flag.Usage = func() {
|
||||
w := flag.CommandLine.Output()
|
||||
fmt.Fprintf(w,"Usage: %s port [eth*, wlan*, etc.]\n", os.Args[0])
|
||||
}
|
||||
iptr := flag.String("i","192.168.100.2","ip address of listener")
|
||||
//iptr := flag.String("i","192.1.168.136","ip address of laptop")
|
||||
flag.Parse()
|
||||
if flag.NArg() != 2 {
|
||||
flag.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
args := flag.Args()
|
||||
if port, err = strconv.Atoi(args[0]); port < 1024 || port > 65536 {
|
||||
flag.Usage()
|
||||
log.Fatal("Port must be between [1023,65535]")
|
||||
} else if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
ifconfig := string(args[1])
|
||||
ip := *iptr
|
||||
ch := make(chan error)
|
||||
t := NewTUI(ip,port,ifconfig,ch)
|
||||
go t.Start()
|
||||
logging.Debug(logging.DStart, "Started TUI Client")
|
||||
err = <-ch
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
|
||||
#DB_URL=$(cat "$INFLUX_CONFIGS_PATH" | awk '/url/ {print $3}' | head -n 1)
|
||||
DB_URL="frms-db-1:8086"
|
||||
|
||||
TOKEN=$(influx auth list --user ${DOCKER_INFLUXDB_INIT_USER_ID} --hide-headers | cut -f 3)
|
||||
ORG=$(influx org list | grep ${DOCKER_INFLUXDB_INIT_ORG_ID} | awk '{print $2}')
|
||||
# creating starting server YAML
|
||||
echo -e "server:\n db-url: ${DB_URL}\n db-org: ${ORG}\n db-token: ${TOKEN}" >/configs/server.yaml;
|
||||
|
||||
# creating grafana yaml
|
||||
influx user create -n grafana -o ${ORG}
|
||||
GRAFANA_TOKEN=$(influx auth list --user grafana --hide-headers | cut -f 3)
|
||||
echo -e "apiVersion: 1\n\ndeleteDatasources:\n\ndatasources:\n - name: INFLUXDB\n type: influxdb\n access: proxy\n url: ${DB_URL}\n jsonData:\n httpMode: GET\n httpHeaderName1: 'Authorization'\n secureJsonData:\n httpHeaderValue1: 'Token ${GRAFANA_TOKEN}'" >/grafana/datasources/datasource.yaml
|
@ -0,0 +1,6 @@
|
||||
----
|
||||
# ${gen_statement}
|
||||
server:
|
||||
db-url: "${db_url}"
|
||||
db-org: "${db_org}"
|
||||
db-token: "${db_token}"
|
@ -1,4 +0,0 @@
|
||||
model: raspberrypi
|
||||
bus: 1
|
||||
model: beagleboard
|
||||
bus: 2
|
@ -0,0 +1,5 @@
|
||||
INFLUXDB_USERNAME=admin
|
||||
INFLUXDB_PASSWORD=admin
|
||||
INFLUXDB_ORG=ForeLight
|
||||
INFLUXDB_BUCKET=default
|
||||
|
@ -0,0 +1,11 @@
|
||||
devices:
|
||||
address: 112
|
||||
name: DO Sensor
|
||||
reactor:
|
||||
heartbeat: 5
|
||||
id: 2166136261
|
||||
model: ""
|
||||
name: Dummy Reactor
|
||||
server:
|
||||
ip: 192.168.100.2
|
||||
port: 2022
|
@ -0,0 +1,26 @@
|
||||
db:
|
||||
org: ForeLight
|
||||
url: http://192.168.100.2:8086
|
||||
ports_db: 2022
|
||||
ports_lis: 2022
|
||||
reactors:
|
||||
"10002123":
|
||||
db:
|
||||
bucket: test
|
||||
token: ""
|
||||
name: Beaglebone Black
|
||||
"2062445129":
|
||||
devices:
|
||||
"97":
|
||||
name: DO Sensor
|
||||
"99":
|
||||
name: pH Sensor
|
||||
"102":
|
||||
name: RTD Sensor
|
||||
server:
|
||||
name: Rack Server
|
||||
ports:
|
||||
db: 8086
|
||||
lis: 2022
|
||||
reactor: 2023
|
||||
tui: 2024
|
@ -0,0 +1,71 @@
|
||||
*Time for a coherent plan of attack*
|
||||
|
||||
### Current Issues:
|
||||
- There is a lot of redundancy between the managers/coordinators when it comes to basic checks
|
||||
- the package seperation kind of makes sense, but it needs to be better fleshed out
|
||||
- I need to enforce better seperation of responsibilities. Somewhat unclear when state is being kept centrally in the coordinator for no apparent reason.
|
||||
|
||||
### Solution:
|
||||
- Go through the packages and consolidate
|
||||
- Reduce the state we have to keep centrally, push responsibility to the other packages
|
||||
|
||||
### Plan of attack:
|
||||
- Outline core information flow
|
||||
- Examine what interfaces are nessecary to make this work
|
||||
- Stop looking at the server/reactor as seperate entities
|
||||
|
||||
*I need to put the whole docker thing on the back burner for now. It isn't that important when it comes to immediate goals.*
|
||||
|
||||
#### 12/05 TODO
|
||||
- Cleanup server side config stuff to make it coherent
|
||||
- Reflect changes to reactor side startup
|
||||
- Boil down interface to address core issues
|
||||
- Config outline:
|
||||
1) Startup and load the existing config
|
||||
2) Overwrite any previous settings with the flags
|
||||
3) Intelligently translate config into action
|
||||
4) launch coordinator and start up existing reactor managers
|
||||
- Config Structure:
|
||||
- Wrap viper functions in config struct methods to be used thrtugh interfaces
|
||||
- minimize the reliance on viper so we can sub in othermethods
|
||||
- is it even important to launch reactor managers? Wont they just be started on connection?
|
||||
|
||||
|
||||
#### 12/06 TODO
|
||||
- I think I can completely remove the old config way and just pass the viper object directly. I think its not worth the hassle of trying to keep track of a million interfaces
|
||||
|
||||
#### 12/07 TODO
|
||||
- I concede, I will just remove flags as most people will never use them anyway and instead rely on env vars and config files. To hell with the flags.
|
||||
- I am ripping out all of the TUI and status manager stuff, its convoluted and harder than just pulling info from database.
|
||||
- I can eventaully rework TUI to pull from DB which is fine, there will never be that many clients anyway and a lot of them are only 1 time calls with refreshes which aren't that slow anyway.
|
||||
- alright I gutted the tui and system viewer, reworking sub coord to launch at start. That way there is a listener active
|
||||
- time to boil down to functionality a LOT, right now its clumsy and inefficent, there needs to be a better way to keep everything straight
|
||||
- Moving the DB responsibilites to the reactor itself seems to be the best way to do it in the short term. Reduce network load and overall keep things efficient. May lead to duplicte copies of data? Not the end of the world, logging system can make sure we are maintaining entries.
|
||||
|
||||
**IDEA**
|
||||
Reactors log data themselves, Send periodic status updates over grpc to enable monitoring faster than the sample rate
|
||||
*This could work!*
|
||||
Outline:
|
||||
- Reactors reach out to server on boot to get DB info
|
||||
- compare this against what they have internally to ensure they are up to date and allow for migrations
|
||||
- Maybe not even save the db info because we don't need to??
|
||||
- Reactors also recieve port for their specific manager
|
||||
- Can be dynamically given out to allow for spread out load
|
||||
- Reactors then reach out with sensor and device info periodically (5s?) which can be used for live monitoring
|
||||
- RM responds with any potential updates for the device settings i.e. change pwm duty on web interface, pass on to reactor
|
||||
- Allows for a live view with current reading as well as historical data at differing interval via grafana. (i.e. 5s live view with 10 min sample interval)
|
||||
|
||||
Need to differentiate sensors vs devices that can be changed
|
||||
- Sensors have a variable sample rate and eventually name/address
|
||||
- Devices have more and widley varying parameters, could be pwm with freq/duty/onoff or ph pump with on, time or off etc.
|
||||
|
||||
#### 12/09 TODO
|
||||
- Alright I have a baseline! I want to start to integrate atlas type stuff so that I have some mock data/sensors to work with. I am going to try to flesh out the "atlas" interface/struct to implement some of the more basic commands.
|
||||
|
||||
#### 1/11 TODO
|
||||
Plan of attack for websocket stuff and things
|
||||
|
||||
**Questions**
|
||||
- What to do about the reactor to user comms
|
||||
- Websockets? GRPC? smoke signals?
|
||||
-
|
@ -0,0 +1,4 @@
|
||||
## Weekly Planning
|
||||
|
||||
[Jan 16-20](weekly/Jan-16-20.md)
|
||||
[Jan 23-27](weekly/Jan-23-27.md)
|
@ -0,0 +1,149 @@
|
||||
# Jan 18
|
||||
### Planning
|
||||
**Monitoring Changes**
|
||||
|
||||
I want to refactor the reactor stuff to be less method oriented as far as data collection. For example, the monitoring stuff is all about events that happen pretty infrequently. It makes sense to then use a channel on the device side to just feed relevant status updates back to the reactor. I think that this makes the most sense because this will synchronize updates and leverage the rarity of events to cut down on errant calls.
|
||||
- pros
|
||||
- less repitive method calls needed
|
||||
- less device locking
|
||||
- localize the information to different packages
|
||||
- cons
|
||||
- extra memory for channels and duplicate storage info
|
||||
- could just remove status from dm?
|
||||
|
||||
**New Idea**
|
||||
|
||||
I can leverage wireguard to do server-> reactor connections even beyond the testing phase
|
||||
|
||||
Changes:
|
||||
1) move device coordinator into device package
|
||||
2) expose relevant methods to reactor interface
|
||||
3) clarify individual package responsibilities
|
||||
4) add stuff server side to create/destroy grpc connections as the information is rendered client side
|
||||
- this might be scuffed but oh well
|
||||
|
||||
### Package Separation
|
||||
**Reactor**
|
||||
- coordinator
|
||||
- creates initial link to the server
|
||||
- creates database client
|
||||
- creates and starts a device coordinator
|
||||
|
||||
**Device**
|
||||
- coordinator
|
||||
- searches i2c bus for connected devices
|
||||
- spins up managers to control the connected devices
|
||||
- relays information back up to the reactor coordinator
|
||||
- manager
|
||||
- control over singular device
|
||||
- has the core information that will be needed across any type of device (name, status, address etc)
|
||||
- sub-manager
|
||||
- fine grained struct with methods specific to the device
|
||||
|
||||
**Server**
|
||||
|
||||
Going to ignore for now because I am lazy
|
||||
- central coordinator starts up database connection config etc
|
||||
- reactor coordinator
|
||||
|
||||
### TODO
|
||||
**Monitoring Changes**
|
||||
- [] change methods to channel based
|
||||
- [] internal methods with spins
|
||||
- [] pass structs with interface for methods
|
||||
|
||||
|
||||
# Jan 19
|
||||
|
||||
### Orginizational changes
|
||||
|
||||
What structure makes the most sense for the devices?
|
||||
|
||||
#### Top-Down
|
||||
|
||||
Ex) DeviceManager -> SensorManager -> DOManager -> Manager
|
||||
|
||||
**Pros**
|
||||
- probably a less complex interface layout?
|
||||
|
||||
|
||||
**Cons**
|
||||
- annoying to keep/pass state
|
||||
- i.e. atlas needs the address to pass to the I2C but right now the devicemanager is storing that. Have to pass down via start which doesn't make a ton of sense
|
||||
|
||||
#### Bottom-Up
|
||||
|
||||
Ex) DOManager -> SensorManager -> DeviceManager -> Manager
|
||||
|
||||
**Pros**
|
||||
- top level manager has access to common info
|
||||
- i.e. address, name etc
|
||||
- can easily define common functions and use this to pass info upwards
|
||||
- still don't have to import device manager as interfaces can handle getting/setting stuff
|
||||
|
||||
**Cons**
|
||||
- might get ugly with interfaces
|
||||
- there might have to be a bunch of interfaces in the device package to handle nesting the manager itself
|
||||
- this might not be true though as the device coordinator dictates what interfaces are needed, and already it doesn't really use any of the dm functionality
|
||||
|
||||
**What would it look like?**
|
||||
Device coordinator would call NewDeviceManager,
|
||||
|
||||
### Outline of functionality
|
||||
|
||||
Hopefully by going over what is expected of each manager, it will become clear what the layout should look like
|
||||
|
||||
**Device Coordinator**
|
||||
- responsibilities
|
||||
- starting/stopping device managers as devices connect/disconnect
|
||||
- maintaining a map of the devices and their status
|
||||
- updating the server with this information at set intervals
|
||||
- pass the I2C client to the device managers
|
||||
|
||||
**Device Manager**
|
||||
- responsibilities
|
||||
- struct to store information that is used by any type of device
|
||||
- i.e. Address, Name, Config(prefix and file)? Status?
|
||||
- probably don't need status as this can be determined via IsActive()
|
||||
- config might be helpful to have, could pass up to managers via a Get function
|
||||
- start/stop as requested by the device coordinator
|
||||
- serves
|
||||
- broad functions such as SetName(), GetName(), etc.
|
||||
|
||||
**Sensor/Controller Manager**
|
||||
- responsibilities
|
||||
- provide corresponding broad struct that will be consistent across types of each
|
||||
- i.e. sensors all have sample rate
|
||||
- provide methods all will use such as TakeReading()
|
||||
- serves
|
||||
- more specific functions such as GetSampleRate(), Set...
|
||||
|
||||
**Specific Managers**
|
||||
- responsibilities
|
||||
- provides specific functions that a certain sensor/controller might need
|
||||
- i.e. pwm will need setFreq, DO might need a conversion etc.
|
||||
- broadly will need access to I2C for comms
|
||||
- serves
|
||||
- Hyper Specific functions such as SetFreq() etc.
|
||||
|
||||
### Trying Bottom-Up
|
||||
|
||||
Right now, I am using some hybrid format which doesn't really make any sense. It goes
|
||||
|
||||
DeviceManager -> DOManager -> SensorManager -> Manager
|
||||
|
||||
This just feels *wrong*
|
||||
|
||||
**Changes**
|
||||
- Going to use the specifc -> broad becaus it seems intiuitive
|
||||
- the most common methods/information is at the base and propogates up through the more specific managers
|
||||
- should make it simplier to define
|
||||
- maybe go back to the unified package? Not quite clear what the purpose of seperate is beyond convience
|
||||
- although... the idea of the device manager as a reusable peice makes enough sense to potentially keep it as a seperate package
|
||||
- I'll stick with the seperate for now and keep it unless it becomes unworkable
|
||||
|
||||
### I2C Changes
|
||||
The i2c bus is locked at the device level, so I am going to rewrite the bs to just use a function with no struct and remove the whole passing of structs garbage
|
||||
|
||||
#### For tomorrow
|
||||
What I have now works, but it is still pretty backwards. Need further improvements and need to start thinking about what a websocket might look like in the current model
|
@ -0,0 +1,49 @@
|
||||
# Jan 23
|
||||
|
||||
### Connecting Clients to reactors
|
||||
|
||||
**Client -> Server -> Reactor**
|
||||
|
||||
I can take advantage of the private network created via wireguard to allow the server to connected back to individual reactors and then intiate gRPC calls.
|
||||
|
||||
**Pros**
|
||||
- This *VASTLY* simplifies the implementation as I can now connect back to the reactors themselves
|
||||
- from there, I can implement various functions I will need server side
|
||||
- i.e. GetName() SetName() etc.
|
||||
|
||||
**Cons**
|
||||
- I will eventually need to build the wiregaurd implementation
|
||||
- although because its all local network for now, I can plug and play down the road
|
||||
|
||||
### TODO
|
||||
- refactor packages to provide a cleaner interface via simple commands as opposed to the convoluted passing structure that was present with the old I2C library
|
||||
- start working on the interface between the websocket and the reactor
|
||||
- react side this is the actual content that will be rendered by the client
|
||||
- server side this will be a connection to a reactor with the gRPC calls
|
||||
- moving monitoring functionality to the reactor
|
||||
- refactoring to use streaming functionality to avoid needing to re initiate request
|
||||
- have server connect each reactor manager to the rlc
|
||||
- have the reactor manager ping for server info
|
||||
- handle disconnects via exit
|
||||
- sets up cleaner device handling via multiplexing
|
||||
|
||||
# Jan 24
|
||||
|
||||
### Proto changes
|
||||
|
||||
It's time to refactor the current protobuf stuff to make more sense from the servers perspective. In this sense, I am going to have the reactor provide connection details to the server on connect, and then the server can connect/disconnect at will.
|
||||
|
||||
### Outline
|
||||
- Update the server to connect to the reactor itself for the information
|
||||
- Decide what information is important enough to send to the server consistently, vs what only is needed upon "further inspection"
|
||||
- need reactor information on connect
|
||||
- need basic device information such as address and status
|
||||
- when selected
|
||||
- need specific device breakouts with advanced functions per device
|
||||
- this can be multiplexed over the same gRPC connection and can be fulfilled by the device coordinator
|
||||
- dc will catch all incoming requests and forward to the correct DM based on address
|
||||
|
||||
### TODO
|
||||
- reverse monitoring stuff
|
||||
- make it so reactor manager has a timeout/ recognizes disconnects gracefully
|
||||
- convert monitoring to a stream as opposed to consistent calls
|
@ -1,138 +0,0 @@
|
||||
package I2C
|
||||
|
||||
// file has general wrappers to interact with i2c-tools
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
_ "log"
|
||||
"encoding/hex"
|
||||
"os/exec"
|
||||
"bytes"
|
||||
"strings"
|
||||
"sync"
|
||||
"strconv"
|
||||
"FRMS/internal/pkg/logging"
|
||||
)
|
||||
|
||||
type I2CBus struct {
|
||||
int
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func NewBus(bus int) *I2CBus {
|
||||
b := &I2CBus{}
|
||||
b.int = bus
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *I2CBus) Scan() map[int]bool {
|
||||
/*
|
||||
Returns all the connected devices
|
||||
*/
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
bus := strconv.Itoa(b.int)
|
||||
cmd := exec.Command("i2cdetect", "-y", "-r", bus)
|
||||
var out bytes.Buffer
|
||||
var errs bytes.Buffer
|
||||
cmd.Stderr = &errs
|
||||
cmd.Stdout = &out
|
||||
if err := cmd.Run(); err != nil {
|
||||
logging.Debug(logging.DError, "I2C error performing scan. %v", errs.String())
|
||||
}
|
||||
|
||||
outString := out.String()
|
||||
// could split by \n too
|
||||
split := strings.SplitAfter(outString,":")
|
||||
// 1st entry is garbage headers and ending is always \n##:
|
||||
split = split[1:]
|
||||
// create empty slice for all the devices
|
||||
//var devices []i2cdev
|
||||
devices := map[int]bool{} //maps device addresses to active bool
|
||||
for i,v := range split {
|
||||
lst := strings.Index(v,"\n")
|
||||
trimmed := v[:lst]
|
||||
trimmed = strings.Trim(trimmed," ")
|
||||
// trimmed now holds just possible sensor addresses
|
||||
count := strings.Split(trimmed," ")
|
||||
for j,d := range count {
|
||||
// the first row has to be offset by 3 but after its just i*16 + j
|
||||
offset := 0
|
||||
if i == 0 {
|
||||
offset = 3
|
||||
}
|
||||
addr := i*16 + j + offset
|
||||
if strings.Contains(d,"--") || strings.Contains(d,"UU") {
|
||||
// address is unconnected or reserved
|
||||
//devices = append(devices, I2Cdev{Addr:addr,Active:false})
|
||||
devices[addr] = false
|
||||
} else {
|
||||
//devices = append(devices, I2Cdev{Addr:addr,Active:true,LastSeen:now})
|
||||
devices[addr] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return devices
|
||||
}
|
||||
|
||||
func (b *I2CBus) GetStatus(addr int) bool {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
|
||||
bus := strconv.Itoa(b.int)
|
||||
a := strconv.Itoa(addr)
|
||||
cmd := exec.Command("i2cdetect","-y","-r",bus,a,a)
|
||||
var out bytes.Buffer
|
||||
var errs bytes.Buffer
|
||||
cmd.Stderr = &errs
|
||||
cmd.Stdout = &out
|
||||
if err := cmd.Run(); err != nil {
|
||||
logging.Debug(logging.DError,"I2C error getting status! %v", errs.String())
|
||||
}
|
||||
|
||||
outString := out.String()
|
||||
split := strings.SplitAfter(outString,":")
|
||||
split = split[1:] // remove garbage header
|
||||
val := int(addr/16) // if addr = 90 90/16 = int(5.6) = 5 will be in 5th row
|
||||
dev := split[val]
|
||||
lst := strings.Index(dev,"\n")
|
||||
dev = dev[:lst]
|
||||
trimmed := strings.Trim(dev," \n")
|
||||
if strings.Contains(trimmed,"--") {
|
||||
return false
|
||||
} else {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (b *I2CBus) GetData(addr int) string {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
|
||||
bus := strconv.Itoa(b.int)
|
||||
a := strconv.FormatInt(int64(addr),16)
|
||||
cmd := exec.Command("i2ctransfer","-y",bus,fmt.Sprintf("r40@0x%s",a))
|
||||
var out bytes.Buffer
|
||||
var errs bytes.Buffer
|
||||
cmd.Stderr = &errs
|
||||
cmd.Stdout = &out
|
||||
if err := cmd.Run(); err != nil {
|
||||
logging.Debug(logging.DError,"I2C error getting data! %v", errs.String())
|
||||
}
|
||||
|
||||
outString := out.String()
|
||||
split := strings.SplitAfter(outString," ") //getting chars 0x12 0x2f etc
|
||||
var final string
|
||||
for _,v := range split {
|
||||
trimmed := strings.TrimLeft(v, "0x ") // trimming extra bs in front of num
|
||||
trimmed = strings.TrimRight(trimmed," \n") // trimming back
|
||||
if trimmed != "ff" {
|
||||
final += trimmed
|
||||
}
|
||||
}
|
||||
ret, err := hex.DecodeString(final)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return string(ret)
|
||||
}
|
@ -1,79 +0,0 @@
|
||||
package I2C
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type I2CDevice struct {
|
||||
*I2CBus // embeds bus
|
||||
bool // stores whether dev is currently connected
|
||||
int // addr
|
||||
Data *data
|
||||
}
|
||||
|
||||
type data struct {
|
||||
string
|
||||
bool
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (d I2CDevice) String() string {
|
||||
t := map[int]string{97:"DO Sensor",99:"pH Sensor",102:"Temperature Sensor",64:"DHT11 Sensor"}
|
||||
return t[d.int]
|
||||
}
|
||||
|
||||
func NewDevice(addr int,bus *I2CBus) *I2CDevice {
|
||||
d := &I2CDevice{}
|
||||
d.I2CBus = bus
|
||||
d.int = addr
|
||||
d.Data = &data{}
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *I2CDevice) GetAddr() int {
|
||||
return d.int
|
||||
}
|
||||
|
||||
func (d *I2CDevice) GetStatus() string {
|
||||
// TODO
|
||||
s := d.I2CBus.GetStatus(d.int)
|
||||
if s {
|
||||
d.Data.Active()
|
||||
return "[green]ACTIVE[white]"
|
||||
} else {
|
||||
d.Data.Killed()
|
||||
return "[red]KILLED[white]"
|
||||
}
|
||||
}
|
||||
|
||||
func (d *I2CDevice) GetType() string {
|
||||
// TODO
|
||||
return fmt.Sprint(d)
|
||||
}
|
||||
|
||||
func (d *I2CDevice) GetData() string {
|
||||
d.Data.Lock()
|
||||
defer d.Data.Unlock()
|
||||
d.Data.string = d.I2CBus.GetData(d.int)
|
||||
return d.Data.string
|
||||
}
|
||||
|
||||
func (d *data) Active() {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
if !d.bool {
|
||||
d.string = ""
|
||||
d.bool = true
|
||||
}
|
||||
}
|
||||
|
||||
func (d *data) Killed() {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
if d.bool {
|
||||
d.string = time.Now().Format("Mon at 03:04:05pm MST")
|
||||
d.bool = false
|
||||
}
|
||||
}
|
@ -1,97 +0,0 @@
|
||||
package I2C
|
||||
|
||||
import (
|
||||
"time"
|
||||
_ "fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
/*
|
||||
i2c monitor implements a long running monitor responsible for sending active devices to the rlc
|
||||
*/
|
||||
|
||||
type I2CMonitor struct {
|
||||
*I2CBus
|
||||
Devices *devs
|
||||
DevChan chan int
|
||||
}
|
||||
|
||||
type devs struct {
|
||||
sync.Mutex
|
||||
m map[int]*I2CDevice
|
||||
}
|
||||
|
||||
func NewMonitor(bus int,ch chan int) *I2CMonitor {
|
||||
m := &I2CMonitor{}
|
||||
b := NewBus(bus)
|
||||
m.I2CBus = b
|
||||
d := make(map[int]*I2CDevice)
|
||||
m.Devices = &devs{m:d}
|
||||
m.DevChan = ch
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *I2CMonitor) Update() {
|
||||
/*
|
||||
scans bus and adds new active devices
|
||||
*/
|
||||
devs := m.Scan()
|
||||
chng := m.Devices.Parse(m.I2CBus,devs)
|
||||
for _, d := range chng {
|
||||
go m.ConnectDevice(d)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *I2CMonitor) Monitor() {
|
||||
// functon that updates the device list and notifies rlc of any changes to sensor composition
|
||||
s := make(chan struct{})
|
||||
t := 5 * time.Second
|
||||
go func(signal chan struct{},to time.Duration) { // simple signal func to init scan
|
||||
for {
|
||||
signal <-struct{}{}
|
||||
time.Sleep(to)
|
||||
}
|
||||
}(s,t)
|
||||
|
||||
for {
|
||||
<-s
|
||||
m.Update()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *I2CMonitor) ConnectDevice(addr int) {
|
||||
m.DevChan <-addr
|
||||
}
|
||||
|
||||
func (m *I2CMonitor) GetDevice(addr int) interface{ GetAddr() int; GetData() string; GetStatus() string; GetType() string } {
|
||||
m.Devices.Lock()
|
||||
defer m.Devices.Unlock()
|
||||
return m.Devices.m[addr]
|
||||
}
|
||||
|
||||
func (d *devs) Parse(bus *I2CBus,devices map[int]bool) []int {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
newdevs := []int{}
|
||||
for addr, status := range devices {
|
||||
if dev, exists := d.m[addr]; exists {
|
||||
// device seen
|
||||
if status != dev.bool { // if device state changed
|
||||
dev.bool = status
|
||||
if status {
|
||||
newdevs = append(newdevs,dev.GetAddr())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// device not seen yet
|
||||
if status {
|
||||
// active
|
||||
newd := NewDevice(addr,bus)
|
||||
newd.bool = status
|
||||
d.m[addr] = newd
|
||||
newdevs = append(newdevs,newd.GetAddr())
|
||||
}
|
||||
}
|
||||
}
|
||||
return newdevs
|
||||
}
|
@ -0,0 +1,46 @@
|
||||
package config
|
||||
|
||||
/*
|
||||
Load.go contains methods to load values from config, flags and env.
|
||||
*/
|
||||
|
||||
import (
|
||||
"FRMS/internal/pkg/logging"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func LoadConfig(fname string) *viper.Viper {
|
||||
// Demarshalls a given filename into the struct
|
||||
// returns nil if successful
|
||||
config := viper.New()
|
||||
configPath := "$HOME/FRMS/internal/configs"
|
||||
logging.Debug(logging.DStart, "Loading config for %s", fname)
|
||||
config.SetConfigName(fname)
|
||||
config.SetConfigType("yaml")
|
||||
//viper.AddConfigPath("/etc/frms/config")
|
||||
config.AddConfigPath(configPath)
|
||||
// struct and env vars
|
||||
|
||||
// Sets env vars
|
||||
config.AutomaticEnv()
|
||||
|
||||
// reading
|
||||
if err := config.ReadInConfig(); err != nil {
|
||||
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
|
||||
// no config file found
|
||||
fmt.Printf("No config file found! creating empty one at %s.\n", configPath)
|
||||
if err = config.WriteConfigAs(configPath); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
logging.Debug(logging.DStart, "CON Loaded configs from %v", config.ConfigFileUsed())
|
||||
|
||||
// returning config object
|
||||
return config
|
||||
}
|
@ -1,70 +0,0 @@
|
||||
package config
|
||||
|
||||
// package serves to store/load config files for server
|
||||
|
||||
import (
|
||||
_ "fmt"
|
||||
"github.com/spf13/viper"
|
||||
"FRMS/internal/pkg/logging"
|
||||
"log"
|
||||
"os/exec"
|
||||
"bytes"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type serverconfig struct {
|
||||
URL string
|
||||
Token string
|
||||
Bucket string
|
||||
Orginization string
|
||||
}
|
||||
|
||||
func ReadServerConfig() *serverconfig {
|
||||
|
||||
viper.SetConfigName("database")
|
||||
viper.SetConfigType("yaml")
|
||||
viper.AddConfigPath("./internal/configs")
|
||||
viper.SetDefault("Orginization","ForeLight")
|
||||
viper.SetDefault("URL","http://localhost:8086")
|
||||
var C serverconfig
|
||||
err := viper.Unmarshal(&C)
|
||||
if err != nil {
|
||||
logging.Debug(logging.DError,"Cannot unmarshal! %v",err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
if C.Token == "" {
|
||||
// token unset
|
||||
logging.Debug(logging.DClient,"CON Grabbing adming token")
|
||||
cmd := exec.Command("cat","tokens/admin_token")
|
||||
var out bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
cmd.Stderr = &stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
logging.Debug(logging.DError,"CON Error grabbing token %v",err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
outstring := out.String()
|
||||
C.Token = strings.Trim(outstring," \n")
|
||||
viper.Set("token",C.Token)
|
||||
viper.WriteConfig()
|
||||
}
|
||||
return &C
|
||||
}
|
||||
|
||||
func (s *serverconfig) GetUrl() string {
|
||||
return s.URL
|
||||
}
|
||||
|
||||
|
||||
func (s *serverconfig) GetOrg() string {
|
||||
return s.Orginization
|
||||
}
|
||||
|
||||
func (s *serverconfig) GetBucket() string {
|
||||
return s.Bucket
|
||||
}
|
||||
|
||||
func (s *serverconfig) GetToken() string {
|
||||
return s.Token
|
||||
}
|
@ -0,0 +1,85 @@
|
||||
package device
|
||||
|
||||
import (
|
||||
"FRMS/internal/pkg/i2c"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// atlas helpers to fulfill sensor manager functions
|
||||
|
||||
type Atlas struct {
|
||||
// delays passed by caller
|
||||
CalDelay int
|
||||
ReadDelay int
|
||||
}
|
||||
|
||||
func (a *Atlas) Calibrate(bus, addr int, cal string) error {
|
||||
// calibrate sensor
|
||||
if a.CalDelay == 0 {
|
||||
return errors.New("Cal delay unset, please check config")
|
||||
}
|
||||
if _, err := i2c.SendCmd(bus, addr, cal); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
time.Sleep(time.Duration(a.CalDelay) * time.Millisecond) // sleep
|
||||
|
||||
_, err := i2c.SendCmd(bus, addr, "") // read for success
|
||||
// return the err if there is any
|
||||
return err
|
||||
}
|
||||
|
||||
var ErrReadFail = errors.New("atlas read failure")
|
||||
|
||||
func (a *Atlas) TakeReading(bus, addr int) (float64, error) {
|
||||
// take reading function
|
||||
if _, err := i2c.SendCmd(bus, addr, "R"); err != nil {
|
||||
// read command
|
||||
return 0, err
|
||||
}
|
||||
if a.ReadDelay == 0 {
|
||||
return 0, errors.New("Read Delay unset, please check config")
|
||||
}
|
||||
sleep := time.Duration(a.ReadDelay) * time.Millisecond
|
||||
time.Sleep(sleep) // sleep between reads
|
||||
data, err := i2c.SendCmd(bus, addr, "")
|
||||
if err != nil {
|
||||
return 0, ErrReadFail
|
||||
}
|
||||
|
||||
// fmt data from 0x... to proper
|
||||
var final string
|
||||
split := strings.Split(data, " ")
|
||||
for i, v := range split {
|
||||
// loop over chars
|
||||
if i == 0 && v != "0x01" {
|
||||
// reading failed
|
||||
return 0, ErrReadFail
|
||||
}
|
||||
// trimming bs
|
||||
trimmed := strings.TrimLeft(v, "0x ")
|
||||
trimmed = strings.TrimRight(trimmed, " \n")
|
||||
if trimmed != "ff" && i != 0 {
|
||||
final += trimmed
|
||||
}
|
||||
}
|
||||
// return as a float
|
||||
var b []byte
|
||||
if b, err = hex.DecodeString(final); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseFloat(string(b), 32)
|
||||
}
|
||||
|
||||
// for config
|
||||
func (a *Atlas) GetCalDelay() int {
|
||||
return a.CalDelay
|
||||
}
|
||||
|
||||
func (a *Atlas) GetReadDelay() int {
|
||||
return a.ReadDelay
|
||||
}
|
@ -0,0 +1,22 @@
|
||||
package device
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// base controller manager
|
||||
|
||||
type ControllerManager struct {
|
||||
*DeviceManager
|
||||
|
||||
sync.Mutex
|
||||
Enabled bool // turn controller on or off
|
||||
}
|
||||
|
||||
func NewControllerManager() *ControllerManager {
|
||||
return &ControllerManager{}
|
||||
}
|
||||
|
||||
func (c *ControllerManager) SetDeviceManager(d *DeviceManager) {
|
||||
c.DeviceManager = d
|
||||
}
|
@ -0,0 +1,41 @@
|
||||
package device
|
||||
|
||||
// do sensor and methods
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type DOManager struct {
|
||||
// do sensor manager
|
||||
*SensorManager
|
||||
*Atlas
|
||||
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func NewDOManager() *DOManager {
|
||||
// atlas delays
|
||||
a := &Atlas{
|
||||
CalDelay: 1300,
|
||||
ReadDelay: 600,
|
||||
}
|
||||
|
||||
sm := NewSensorManager()
|
||||
|
||||
m := &DOManager{
|
||||
Atlas: a,
|
||||
SensorManager: sm,
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *DOManager) Start() error {
|
||||
// start sensor manager
|
||||
return m.SensorManager.Start(m.Atlas.TakeReading)
|
||||
}
|
||||
|
||||
func (m *DOManager) String() string {
|
||||
// TODO
|
||||
return ""
|
||||
}
|
@ -0,0 +1,71 @@
|
||||
package device
|
||||
|
||||
import (
|
||||
"FRMS/internal/pkg/manager"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// base device manager
|
||||
|
||||
type Manager interface {
|
||||
// core manager
|
||||
Start() error
|
||||
Exit() error
|
||||
IsActive() int
|
||||
HeartBeat(chan struct{}, int, int, time.Duration)
|
||||
}
|
||||
|
||||
func NewManager() Manager {
|
||||
// no timeouts needed
|
||||
return manager.New(0)
|
||||
}
|
||||
|
||||
type DeviceManager struct {
|
||||
// for device agnostic fields/methods
|
||||
Address int `mapstructure:"address"`
|
||||
Bus int // i2c bus
|
||||
// mutable
|
||||
infoMu sync.RWMutex
|
||||
Name string `mapstructure:"name"`
|
||||
defaultName string
|
||||
// base manager
|
||||
Manager
|
||||
// config
|
||||
Config *viper.Viper
|
||||
// gRPC server
|
||||
pb.UnimplementedDeviceServer
|
||||
}
|
||||
|
||||
func NewDeviceManager(bus, addr int, config *viper.Viper, defaultName string) *DeviceManager {
|
||||
// new base dm
|
||||
m := NewManager()
|
||||
dm := &DeviceManager{
|
||||
Address: addr,
|
||||
Bus: bus,
|
||||
defaultName: defaultName,
|
||||
Manager: m,
|
||||
Config: config,
|
||||
}
|
||||
return dm
|
||||
}
|
||||
|
||||
func (m *DeviceManager) LoadConfig() error {
|
||||
|
||||
// setting default name
|
||||
mainKey := fmt.Sprintf("devices.%d", m.Address)
|
||||
nameKey := fmt.Sprintf("%s.name", mainKey)
|
||||
|
||||
if !m.Config.IsSet(nameKey) {
|
||||
m.Config.Set(nameKey, m.defaultName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *DeviceManager) Start() error {
|
||||
// start
|
||||
return m.Manager.Start()
|
||||
}
|
@ -0,0 +1,50 @@
|
||||
package device
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// Returns the correct manager for sensor/controller
|
||||
type Device interface {
|
||||
Start() error
|
||||
Exit() error
|
||||
IsActive() int
|
||||
SetDeviceManager(*DeviceManager)
|
||||
}
|
||||
|
||||
func New(bus, addr int, config *viper.Viper) (Device, error) {
|
||||
// returns correct device manager by ID
|
||||
var err error
|
||||
var defaultName string
|
||||
var m Device
|
||||
|
||||
switch addr {
|
||||
case 97:
|
||||
// DO
|
||||
defaultName = "DO Sensor"
|
||||
m = NewDOManager()
|
||||
case 99:
|
||||
// pH
|
||||
defaultName = "pH Sensor"
|
||||
m = NewPHManager()
|
||||
case 102:
|
||||
// RTD
|
||||
defaultName = "RTD Sensor"
|
||||
m = NewRTDManager()
|
||||
case 256:
|
||||
// PWM
|
||||
defaultName = "PWM Controller"
|
||||
m = NewPWMManager()
|
||||
default:
|
||||
err = errors.New(fmt.Sprintf("Error: device id %d unrecognized!", addr))
|
||||
}
|
||||
// setting device manager
|
||||
dm := NewDeviceManager(bus, addr, config, defaultName)
|
||||
m.SetDeviceManager(dm)
|
||||
// setting up gRPC server functionality
|
||||
|
||||
return m, err
|
||||
}
|
@ -0,0 +1,40 @@
|
||||
package device
|
||||
|
||||
// do sensor and methods
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type PHManager struct {
|
||||
// do sensor manager
|
||||
*SensorManager
|
||||
*Atlas
|
||||
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func NewPHManager() *PHManager {
|
||||
// atlas delays
|
||||
a := &Atlas{
|
||||
CalDelay: 900,
|
||||
ReadDelay: 900,
|
||||
}
|
||||
|
||||
sm := NewSensorManager()
|
||||
m := &PHManager{
|
||||
Atlas: a,
|
||||
SensorManager: sm,
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *PHManager) Start() error {
|
||||
// start sensor manager
|
||||
return m.SensorManager.Start(m.Atlas.TakeReading)
|
||||
}
|
||||
|
||||
func (m PHManager) String() string {
|
||||
// TODO
|
||||
return ""
|
||||
}
|
@ -0,0 +1,32 @@
|
||||
package device
|
||||
|
||||
// do sensor and methods
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type PWMManager struct {
|
||||
// do sensor manager
|
||||
*ControllerManager
|
||||
sync.RWMutex
|
||||
Frequency int
|
||||
DutyCycle int
|
||||
}
|
||||
|
||||
func NewPWMManager() *PWMManager {
|
||||
cm := NewControllerManager()
|
||||
return &PWMManager{ControllerManager: cm}
|
||||
}
|
||||
|
||||
// freq changing
|
||||
func (m *PWMManager) GetFrequency() (int, error) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return m.Frequency, nil
|
||||
}
|
||||
|
||||
func (m *PWMManager) String() string {
|
||||
// TODO
|
||||
return ""
|
||||
}
|
@ -0,0 +1,38 @@
|
||||
package device
|
||||
|
||||
// do sensor and methods
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type RTDManager struct {
|
||||
// do sensor manager
|
||||
*Atlas
|
||||
*SensorManager
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func NewRTDManager() *RTDManager {
|
||||
// atlas delays
|
||||
a := &Atlas{
|
||||
CalDelay: 600,
|
||||
ReadDelay: 600,
|
||||
}
|
||||
|
||||
sm := NewSensorManager()
|
||||
m := &RTDManager{
|
||||
Atlas: a,
|
||||
SensorManager: sm,
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *RTDManager) Start() error {
|
||||
return m.SensorManager.Start(m.Atlas.TakeReading)
|
||||
}
|
||||
|
||||
func (m *RTDManager) String() string {
|
||||
// TODO
|
||||
return ""
|
||||
}
|
@ -0,0 +1,93 @@
|
||||
package device
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type SensorManager struct {
|
||||
SampleRate int `mapstructure:"sample_rate"` // in (ms)
|
||||
|
||||
// sampling
|
||||
sampleMu sync.RWMutex
|
||||
LatestSample float32
|
||||
SampleTimestamp int64
|
||||
|
||||
*DeviceManager `mapstructure:",squash"`
|
||||
|
||||
// gRPC server
|
||||
pb.UnimplementedSensorServer
|
||||
}
|
||||
|
||||
func NewSensorManager() *SensorManager {
|
||||
s := &SensorManager{}
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *SensorManager) SetDeviceManager(d *DeviceManager) {
|
||||
s.DeviceManager = d
|
||||
}
|
||||
|
||||
type takeReading func(int, int) (float64, error)
|
||||
|
||||
func (s *SensorManager) Start(f takeReading) error {
|
||||
|
||||
// loading config
|
||||
if err := s.LoadConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// starting
|
||||
if err := s.DeviceManager.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// starting monitoring
|
||||
go s.Monitor(f)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SensorManager) LoadConfig() error {
|
||||
// setting keys
|
||||
mainKey := fmt.Sprintf("devices.%d", s.Address)
|
||||
sampleKey := fmt.Sprintf("%s.sample_rate", mainKey)
|
||||
|
||||
if !s.Config.IsSet(sampleKey) {
|
||||
// no sample rate, default to 10s
|
||||
s.Config.Set(sampleKey, 10000)
|
||||
}
|
||||
|
||||
// loading lower
|
||||
s.DeviceManager.LoadConfig()
|
||||
|
||||
s.Config.UnmarshalKey(mainKey, s)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SensorManager) Monitor(f takeReading) {
|
||||
ch := make(chan struct{}) // hb chan
|
||||
go s.HeartBeat(ch, s.SampleRate, 2000, time.Millisecond)
|
||||
|
||||
var reading float64
|
||||
var err error
|
||||
for range ch {
|
||||
if reading, err = f(s.Bus, s.Address); err != nil {
|
||||
if !errors.Is(err, ErrReadFail) {
|
||||
// unknown error, panic
|
||||
panic(err)
|
||||
}
|
||||
fmt.Printf("Reading failed, skipping!\n")
|
||||
}
|
||||
// update sample
|
||||
if !errors.Is(err, ErrReadFail) {
|
||||
fmt.Printf("Got %f\n", reading)
|
||||
s.sampleMu.Lock()
|
||||
s.LatestSample = float32(reading)
|
||||
s.SampleTimestamp = time.Now.Unix()
|
||||
s.sampleMu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,42 @@
|
||||
syntax = "proto3";
|
||||
package grpc;
|
||||
|
||||
option go_package = "internal/pkg/grpc";
|
||||
|
||||
service device {
|
||||
// groups basic device interactions
|
||||
// get/set name based on request
|
||||
rpc Name(NameRequest) returns (NameResponse)
|
||||
}
|
||||
|
||||
message NameRequest {
|
||||
// empty for future expansion
|
||||
string Name = 1;
|
||||
}
|
||||
|
||||
message NameResponse {
|
||||
string Name = 1;
|
||||
}
|
||||
|
||||
service sensor {
|
||||
// sensor specific functions
|
||||
rpc Reading(ReadingRequest) returns (ReadingResponse)
|
||||
rpc SampleRate(SampleRateRequest) returns (SampleRateResponse)
|
||||
}
|
||||
|
||||
message ReadingRequest {
|
||||
// empty
|
||||
}
|
||||
|
||||
message ReadingResponse {
|
||||
string Reading = 1; // formatted reading "9.7 pH"
|
||||
int64 Timestamp = 2; // when the reading was taken
|
||||
}
|
||||
|
||||
message SampleRateRequest {
|
||||
int32 SampleRate = 1; // 0 to return current sample rate, value in seconds
|
||||
}
|
||||
|
||||
message SampleRateResponse {
|
||||
int32 SampleRate = 1; // returns the set sample rate
|
||||
}
|
@ -1,700 +0,0 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.0
|
||||
// protoc v3.6.1
|
||||
// source: internal/pkg/grpc/management.proto
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type GetDevicesRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ClientId uint32 `protobuf:"varint,1,opt,name=clientId,proto3" json:"clientId,omitempty"`
|
||||
ReactorId uint32 `protobuf:"varint,2,opt,name=reactorId,proto3" json:"reactorId,omitempty"` // if unspecified, don't return any devs
|
||||
Refresh bool `protobuf:"varint,3,opt,name=refresh,proto3" json:"refresh,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetDevicesRequest) Reset() {
|
||||
*x = GetDevicesRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_internal_pkg_grpc_management_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetDevicesRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetDevicesRequest) ProtoMessage() {}
|
||||
|
||||
func (x *GetDevicesRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_internal_pkg_grpc_management_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetDevicesRequest.ProtoReflect.Descriptor instead.
|
||||
func (*GetDevicesRequest) Descriptor() ([]byte, []int) {
|
||||
return file_internal_pkg_grpc_management_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *GetDevicesRequest) GetClientId() uint32 {
|
||||
if x != nil {
|
||||
return x.ClientId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *GetDevicesRequest) GetReactorId() uint32 {
|
||||
if x != nil {
|
||||
return x.ReactorId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *GetDevicesRequest) GetRefresh() bool {
|
||||
if x != nil {
|
||||
return x.Refresh
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type GetDevicesResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ClientId uint32 `protobuf:"varint,1,opt,name=clientId,proto3" json:"clientId,omitempty"`
|
||||
ReactorId uint32 `protobuf:"varint,2,opt,name=reactorId,proto3" json:"reactorId,omitempty"`
|
||||
Devices []*Dev `protobuf:"bytes,3,rep,name=devices,proto3" json:"devices,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetDevicesResponse) Reset() {
|
||||
*x = GetDevicesResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_internal_pkg_grpc_management_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetDevicesResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetDevicesResponse) ProtoMessage() {}
|
||||
|
||||
func (x *GetDevicesResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_internal_pkg_grpc_management_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetDevicesResponse.ProtoReflect.Descriptor instead.
|
||||
func (*GetDevicesResponse) Descriptor() ([]byte, []int) {
|
||||
return file_internal_pkg_grpc_management_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *GetDevicesResponse) GetClientId() uint32 {
|
||||
if x != nil {
|
||||
return x.ClientId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *GetDevicesResponse) GetReactorId() uint32 {
|
||||
if x != nil {
|
||||
return x.ReactorId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *GetDevicesResponse) GetDevices() []*Dev {
|
||||
if x != nil {
|
||||
return x.Devices
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type DeleteReactorRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ClientId uint32 `protobuf:"varint,1,opt,name=clientId,proto3" json:"clientId,omitempty"`
|
||||
ReactorId uint32 `protobuf:"varint,2,opt,name=reactorId,proto3" json:"reactorId,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DeleteReactorRequest) Reset() {
|
||||
*x = DeleteReactorRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_internal_pkg_grpc_management_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeleteReactorRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DeleteReactorRequest) ProtoMessage() {}
|
||||
|
||||
func (x *DeleteReactorRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_internal_pkg_grpc_management_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DeleteReactorRequest.ProtoReflect.Descriptor instead.
|
||||
func (*DeleteReactorRequest) Descriptor() ([]byte, []int) {
|
||||
return file_internal_pkg_grpc_management_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *DeleteReactorRequest) GetClientId() uint32 {
|
||||
if x != nil {
|
||||
return x.ClientId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *DeleteReactorRequest) GetReactorId() uint32 {
|
||||
if x != nil {
|
||||
return x.ReactorId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type DeleteReactorResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ClientId uint32 `protobuf:"varint,1,opt,name=clientId,proto3" json:"clientId,omitempty"`
|
||||
ReactorId uint32 `protobuf:"varint,2,opt,name=reactorId,proto3" json:"reactorId,omitempty"`
|
||||
Success bool `protobuf:"varint,3,opt,name=success,proto3" json:"success,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DeleteReactorResponse) Reset() {
|
||||
*x = DeleteReactorResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_internal_pkg_grpc_management_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeleteReactorResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DeleteReactorResponse) ProtoMessage() {}
|
||||
|
||||
func (x *DeleteReactorResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_internal_pkg_grpc_management_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DeleteReactorResponse.ProtoReflect.Descriptor instead.
|
||||
func (*DeleteReactorResponse) Descriptor() ([]byte, []int) {
|
||||
return file_internal_pkg_grpc_management_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *DeleteReactorResponse) GetClientId() uint32 {
|
||||
if x != nil {
|
||||
return x.ClientId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *DeleteReactorResponse) GetReactorId() uint32 {
|
||||
if x != nil {
|
||||
return x.ReactorId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *DeleteReactorResponse) GetSuccess() bool {
|
||||
if x != nil {
|
||||
return x.Success
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type DeleteReactorDeviceRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ClientId uint32 `protobuf:"varint,1,opt,name=clientId,proto3" json:"clientId,omitempty"`
|
||||
ReactorId uint32 `protobuf:"varint,2,opt,name=reactorId,proto3" json:"reactorId,omitempty"`
|
||||
DevAddr int32 `protobuf:"varint,3,opt,name=devAddr,proto3" json:"devAddr,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DeleteReactorDeviceRequest) Reset() {
|
||||
*x = DeleteReactorDeviceRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_internal_pkg_grpc_management_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeleteReactorDeviceRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DeleteReactorDeviceRequest) ProtoMessage() {}
|
||||
|
||||
func (x *DeleteReactorDeviceRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_internal_pkg_grpc_management_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DeleteReactorDeviceRequest.ProtoReflect.Descriptor instead.
|
||||
func (*DeleteReactorDeviceRequest) Descriptor() ([]byte, []int) {
|
||||
return file_internal_pkg_grpc_management_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *DeleteReactorDeviceRequest) GetClientId() uint32 {
|
||||
if x != nil {
|
||||
return x.ClientId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *DeleteReactorDeviceRequest) GetReactorId() uint32 {
|
||||
if x != nil {
|
||||
return x.ReactorId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *DeleteReactorDeviceRequest) GetDevAddr() int32 {
|
||||
if x != nil {
|
||||
return x.DevAddr
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type DeleteReactorDeviceResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ClientId uint32 `protobuf:"varint,1,opt,name=clientId,proto3" json:"clientId,omitempty"`
|
||||
ReactorId uint32 `protobuf:"varint,2,opt,name=reactorId,proto3" json:"reactorId,omitempty"`
|
||||
DevAddr int32 `protobuf:"varint,3,opt,name=devAddr,proto3" json:"devAddr,omitempty"`
|
||||
Success bool `protobuf:"varint,4,opt,name=success,proto3" json:"success,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DeleteReactorDeviceResponse) Reset() {
|
||||
*x = DeleteReactorDeviceResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_internal_pkg_grpc_management_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeleteReactorDeviceResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DeleteReactorDeviceResponse) ProtoMessage() {}
|
||||
|
||||
func (x *DeleteReactorDeviceResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_internal_pkg_grpc_management_proto_msgTypes[5]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DeleteReactorDeviceResponse.ProtoReflect.Descriptor instead.
|
||||
func (*DeleteReactorDeviceResponse) Descriptor() ([]byte, []int) {
|
||||
return file_internal_pkg_grpc_management_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *DeleteReactorDeviceResponse) GetClientId() uint32 {
|
||||
if x != nil {
|
||||
return x.ClientId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *DeleteReactorDeviceResponse) GetReactorId() uint32 {
|
||||
if x != nil {
|
||||
return x.ReactorId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *DeleteReactorDeviceResponse) GetDevAddr() int32 {
|
||||
if x != nil {
|
||||
return x.DevAddr
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *DeleteReactorDeviceResponse) GetSuccess() bool {
|
||||
if x != nil {
|
||||
return x.Success
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type Dev struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` // either reactor id or dev addr
|
||||
Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` // ["reactor","__ sensor",...]
|
||||
Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` // set by RLC/SM
|
||||
Data string `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` // set by RLC/SM
|
||||
Index uint32 `protobuf:"varint,5,opt,name=index,proto3" json:"index,omitempty"` // set by infostream to keep consistency
|
||||
}
|
||||
|
||||
func (x *Dev) Reset() {
|
||||
*x = Dev{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_internal_pkg_grpc_management_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Dev) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Dev) ProtoMessage() {}
|
||||
|
||||
func (x *Dev) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_internal_pkg_grpc_management_proto_msgTypes[6]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Dev.ProtoReflect.Descriptor instead.
|
||||
func (*Dev) Descriptor() ([]byte, []int) {
|
||||
return file_internal_pkg_grpc_management_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
func (x *Dev) GetId() uint32 {
|
||||
if x != nil {
|
||||
return x.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Dev) GetType() string {
|
||||
if x != nil {
|
||||
return x.Type
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Dev) GetStatus() string {
|
||||
if x != nil {
|
||||
return x.Status
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Dev) GetData() string {
|
||||
if x != nil {
|
||||
return x.Data
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Dev) GetIndex() uint32 {
|
||||
if x != nil {
|
||||
return x.Index
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
var File_internal_pkg_grpc_management_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_internal_pkg_grpc_management_proto_rawDesc = []byte{
|
||||
0x0a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x67,
|
||||
0x72, 0x70, 0x63, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x67, 0x72, 0x70, 0x63, 0x22, 0x67, 0x0a, 0x11, 0x47, 0x65,
|
||||
0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
|
||||
0x1a, 0x0a, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x0d, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x72,
|
||||
0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09,
|
||||
0x72, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x66,
|
||||
0x72, 0x65, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x72, 0x65, 0x66, 0x72,
|
||||
0x65, 0x73, 0x68, 0x22, 0x73, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65,
|
||||
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6c, 0x69,
|
||||
0x65, 0x6e, 0x74, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x63, 0x6c, 0x69,
|
||||
0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72,
|
||||
0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x61, 0x63, 0x74, 0x6f,
|
||||
0x72, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x07, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x03,
|
||||
0x20, 0x03, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x65, 0x76, 0x52,
|
||||
0x07, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x22, 0x50, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65,
|
||||
0x74, 0x65, 0x52, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x0d, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09,
|
||||
0x72, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52,
|
||||
0x09, 0x72, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x49, 0x64, 0x22, 0x6b, 0x0a, 0x15, 0x44, 0x65,
|
||||
0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12,
|
||||
0x1c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x49, 0x64, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x49, 0x64, 0x12, 0x18, 0x0a,
|
||||
0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07,
|
||||
0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0x70, 0x0a, 0x1a, 0x44, 0x65, 0x6c, 0x65, 0x74,
|
||||
0x65, 0x52, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49,
|
||||
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49,
|
||||
0x64, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x49, 0x64, 0x18, 0x02,
|
||||
0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x49, 0x64, 0x12,
|
||||
0x18, 0x0a, 0x07, 0x64, 0x65, 0x76, 0x41, 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05,
|
||||
0x52, 0x07, 0x64, 0x65, 0x76, 0x41, 0x64, 0x64, 0x72, 0x22, 0x8b, 0x01, 0x0a, 0x1b, 0x44, 0x65,
|
||||
0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x44, 0x65, 0x76, 0x69, 0x63,
|
||||
0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6c, 0x69,
|
||||
0x65, 0x6e, 0x74, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x63, 0x6c, 0x69,
|
||||
0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72,
|
||||
0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x61, 0x63, 0x74, 0x6f,
|
||||
0x72, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x76, 0x41, 0x64, 0x64, 0x72, 0x18, 0x03,
|
||||
0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x64, 0x65, 0x76, 0x41, 0x64, 0x64, 0x72, 0x12, 0x18, 0x0a,
|
||||
0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07,
|
||||
0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0x6b, 0x0a, 0x03, 0x44, 0x65, 0x76, 0x12, 0x0e,
|
||||
0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12,
|
||||
0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79,
|
||||
0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61,
|
||||
0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x14,
|
||||
0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69,
|
||||
0x6e, 0x64, 0x65, 0x78, 0x32, 0xf3, 0x01, 0x0a, 0x0a, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d,
|
||||
0x65, 0x6e, 0x74, 0x12, 0x3f, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65,
|
||||
0x73, 0x12, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69,
|
||||
0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70,
|
||||
0x63, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65,
|
||||
0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x65, 0x6c,
|
||||
0x65, 0x74, 0x65, 0x52, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x1a, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52,
|
||||
0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a,
|
||||
0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x44,
|
||||
0x65, 0x76, 0x69, 0x63, 0x65, 0x12, 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x65, 0x6c,
|
||||
0x65, 0x74, 0x65, 0x52, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x44,
|
||||
0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x44, 0x65, 0x76, 0x69,
|
||||
0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x13, 0x5a, 0x11, 0x69, 0x6e,
|
||||
0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x62,
|
||||
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_internal_pkg_grpc_management_proto_rawDescOnce sync.Once
|
||||
file_internal_pkg_grpc_management_proto_rawDescData = file_internal_pkg_grpc_management_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_internal_pkg_grpc_management_proto_rawDescGZIP() []byte {
|
||||
file_internal_pkg_grpc_management_proto_rawDescOnce.Do(func() {
|
||||
file_internal_pkg_grpc_management_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_pkg_grpc_management_proto_rawDescData)
|
||||
})
|
||||
return file_internal_pkg_grpc_management_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_internal_pkg_grpc_management_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
|
||||
var file_internal_pkg_grpc_management_proto_goTypes = []interface{}{
|
||||
(*GetDevicesRequest)(nil), // 0: grpc.GetDevicesRequest
|
||||
(*GetDevicesResponse)(nil), // 1: grpc.GetDevicesResponse
|
||||
(*DeleteReactorRequest)(nil), // 2: grpc.DeleteReactorRequest
|
||||
(*DeleteReactorResponse)(nil), // 3: grpc.DeleteReactorResponse
|
||||
(*DeleteReactorDeviceRequest)(nil), // 4: grpc.DeleteReactorDeviceRequest
|
||||
(*DeleteReactorDeviceResponse)(nil), // 5: grpc.DeleteReactorDeviceResponse
|
||||
(*Dev)(nil), // 6: grpc.Dev
|
||||
}
|
||||
var file_internal_pkg_grpc_management_proto_depIdxs = []int32{
|
||||
6, // 0: grpc.GetDevicesResponse.devices:type_name -> grpc.Dev
|
||||
0, // 1: grpc.management.GetDevices:input_type -> grpc.GetDevicesRequest
|
||||
2, // 2: grpc.management.DeleteReactor:input_type -> grpc.DeleteReactorRequest
|
||||
4, // 3: grpc.management.DeleteReactorDevice:input_type -> grpc.DeleteReactorDeviceRequest
|
||||
1, // 4: grpc.management.GetDevices:output_type -> grpc.GetDevicesResponse
|
||||
3, // 5: grpc.management.DeleteReactor:output_type -> grpc.DeleteReactorResponse
|
||||
5, // 6: grpc.management.DeleteReactorDevice:output_type -> grpc.DeleteReactorDeviceResponse
|
||||
4, // [4:7] is the sub-list for method output_type
|
||||
1, // [1:4] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_internal_pkg_grpc_management_proto_init() }
|
||||
func file_internal_pkg_grpc_management_proto_init() {
|
||||
if File_internal_pkg_grpc_management_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_internal_pkg_grpc_management_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetDevicesRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_internal_pkg_grpc_management_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetDevicesResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_internal_pkg_grpc_management_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DeleteReactorRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_internal_pkg_grpc_management_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DeleteReactorResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_internal_pkg_grpc_management_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DeleteReactorDeviceRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_internal_pkg_grpc_management_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DeleteReactorDeviceResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_internal_pkg_grpc_management_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Dev); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_internal_pkg_grpc_management_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 7,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_internal_pkg_grpc_management_proto_goTypes,
|
||||
DependencyIndexes: file_internal_pkg_grpc_management_proto_depIdxs,
|
||||
MessageInfos: file_internal_pkg_grpc_management_proto_msgTypes,
|
||||
}.Build()
|
||||
File_internal_pkg_grpc_management_proto = out.File
|
||||
file_internal_pkg_grpc_management_proto_rawDesc = nil
|
||||
file_internal_pkg_grpc_management_proto_goTypes = nil
|
||||
file_internal_pkg_grpc_management_proto_depIdxs = nil
|
||||
}
|
@ -1,54 +0,0 @@
|
||||
syntax = "proto3";
|
||||
package grpc;
|
||||
|
||||
option go_package = "internal/pkg/grpc";
|
||||
|
||||
service management {
|
||||
rpc GetDevices(GetDevicesRequest) returns (GetDevicesResponse);
|
||||
rpc DeleteReactor(DeleteReactorRequest) returns (DeleteReactorResponse);
|
||||
rpc DeleteReactorDevice(DeleteReactorDeviceRequest) returns (DeleteReactorDeviceResponse);
|
||||
}
|
||||
|
||||
message GetDevicesRequest {
|
||||
uint32 clientId = 1;
|
||||
uint32 reactorId = 2; // if unspecified, don't return any devs
|
||||
bool refresh = 3;
|
||||
}
|
||||
|
||||
message GetDevicesResponse {
|
||||
uint32 clientId = 1;
|
||||
uint32 reactorId = 2;
|
||||
repeated Dev devices = 3;
|
||||
}
|
||||
|
||||
message DeleteReactorRequest {
|
||||
uint32 clientId = 1;
|
||||
uint32 reactorId = 2;
|
||||
}
|
||||
|
||||
message DeleteReactorResponse {
|
||||
uint32 clientId = 1;
|
||||
uint32 reactorId = 2;
|
||||
bool success = 3;
|
||||
}
|
||||
|
||||
message DeleteReactorDeviceRequest {
|
||||
uint32 clientId = 1;
|
||||
uint32 reactorId = 2;
|
||||
int32 devAddr = 3;
|
||||
}
|
||||
|
||||
message DeleteReactorDeviceResponse {
|
||||
uint32 clientId = 1;
|
||||
uint32 reactorId = 2;
|
||||
int32 devAddr = 3;
|
||||
bool success = 4;
|
||||
}
|
||||
|
||||
message Dev {
|
||||
uint32 id = 1; // either reactor id or dev addr
|
||||
string type = 2; // ["reactor","__ sensor",...]
|
||||
string status = 3; // set by RLC/SM
|
||||
string data = 4; // set by RLC/SM
|
||||
uint32 index = 5; // set by infostream to keep consistency
|
||||
}
|
@ -1,177 +0,0 @@
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc v3.6.1
|
||||
// source: internal/pkg/grpc/management.proto
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
context "context"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
// ManagementClient is the client API for Management service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type ManagementClient interface {
|
||||
GetDevices(ctx context.Context, in *GetDevicesRequest, opts ...grpc.CallOption) (*GetDevicesResponse, error)
|
||||
DeleteReactor(ctx context.Context, in *DeleteReactorRequest, opts ...grpc.CallOption) (*DeleteReactorResponse, error)
|
||||
DeleteReactorDevice(ctx context.Context, in *DeleteReactorDeviceRequest, opts ...grpc.CallOption) (*DeleteReactorDeviceResponse, error)
|
||||
}
|
||||
|
||||
type managementClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewManagementClient(cc grpc.ClientConnInterface) ManagementClient {
|
||||
return &managementClient{cc}
|
||||
}
|
||||
|
||||
func (c *managementClient) GetDevices(ctx context.Context, in *GetDevicesRequest, opts ...grpc.CallOption) (*GetDevicesResponse, error) {
|
||||
out := new(GetDevicesResponse)
|
||||
err := c.cc.Invoke(ctx, "/grpc.management/GetDevices", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *managementClient) DeleteReactor(ctx context.Context, in *DeleteReactorRequest, opts ...grpc.CallOption) (*DeleteReactorResponse, error) {
|
||||
out := new(DeleteReactorResponse)
|
||||
err := c.cc.Invoke(ctx, "/grpc.management/DeleteReactor", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *managementClient) DeleteReactorDevice(ctx context.Context, in *DeleteReactorDeviceRequest, opts ...grpc.CallOption) (*DeleteReactorDeviceResponse, error) {
|
||||
out := new(DeleteReactorDeviceResponse)
|
||||
err := c.cc.Invoke(ctx, "/grpc.management/DeleteReactorDevice", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// ManagementServer is the server API for Management service.
|
||||
// All implementations must embed UnimplementedManagementServer
|
||||
// for forward compatibility
|
||||
type ManagementServer interface {
|
||||
GetDevices(context.Context, *GetDevicesRequest) (*GetDevicesResponse, error)
|
||||
DeleteReactor(context.Context, *DeleteReactorRequest) (*DeleteReactorResponse, error)
|
||||
DeleteReactorDevice(context.Context, *DeleteReactorDeviceRequest) (*DeleteReactorDeviceResponse, error)
|
||||
mustEmbedUnimplementedManagementServer()
|
||||
}
|
||||
|
||||
// UnimplementedManagementServer must be embedded to have forward compatible implementations.
|
||||
type UnimplementedManagementServer struct {
|
||||
}
|
||||
|
||||
func (UnimplementedManagementServer) GetDevices(context.Context, *GetDevicesRequest) (*GetDevicesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetDevices not implemented")
|
||||
}
|
||||
func (UnimplementedManagementServer) DeleteReactor(context.Context, *DeleteReactorRequest) (*DeleteReactorResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteReactor not implemented")
|
||||
}
|
||||
func (UnimplementedManagementServer) DeleteReactorDevice(context.Context, *DeleteReactorDeviceRequest) (*DeleteReactorDeviceResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteReactorDevice not implemented")
|
||||
}
|
||||
func (UnimplementedManagementServer) mustEmbedUnimplementedManagementServer() {}
|
||||
|
||||
// UnsafeManagementServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to ManagementServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeManagementServer interface {
|
||||
mustEmbedUnimplementedManagementServer()
|
||||
}
|
||||
|
||||
func RegisterManagementServer(s grpc.ServiceRegistrar, srv ManagementServer) {
|
||||
s.RegisterService(&Management_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _Management_GetDevices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetDevicesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ManagementServer).GetDevices(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/grpc.management/GetDevices",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ManagementServer).GetDevices(ctx, req.(*GetDevicesRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Management_DeleteReactor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DeleteReactorRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ManagementServer).DeleteReactor(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/grpc.management/DeleteReactor",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ManagementServer).DeleteReactor(ctx, req.(*DeleteReactorRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Management_DeleteReactorDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DeleteReactorDeviceRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ManagementServer).DeleteReactorDevice(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/grpc.management/DeleteReactorDevice",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ManagementServer).DeleteReactorDevice(ctx, req.(*DeleteReactorDeviceRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// Management_ServiceDesc is the grpc.ServiceDesc for Management service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var Management_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "grpc.management",
|
||||
HandlerType: (*ManagementServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "GetDevices",
|
||||
Handler: _Management_GetDevices_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DeleteReactor",
|
||||
Handler: _Management_DeleteReactor_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DeleteReactorDevice",
|
||||
Handler: _Management_DeleteReactorDevice_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "internal/pkg/grpc/management.proto",
|
||||
}
|
@ -0,0 +1,88 @@
|
||||
package i2c
|
||||
|
||||
// file has general wrappers to interact with i2c-tools
|
||||
|
||||
import (
|
||||
"FRMS/internal/pkg/logging"
|
||||
"bytes"
|
||||
"fmt"
|
||||
_ "log"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func GetConnected(b int) (map[int]bool, error) {
|
||||
// Returns all the connected devices by address
|
||||
// might just do this in bash and make it easier
|
||||
bus := strconv.Itoa(b)
|
||||
devices := make(map[int]bool) // only keys
|
||||
cmd := exec.Command("i2cdetect", "-y", "-r", bus)
|
||||
var out bytes.Buffer
|
||||
var errs bytes.Buffer
|
||||
cmd.Stderr = &errs
|
||||
cmd.Stdout = &out
|
||||
if err := cmd.Run(); err != nil {
|
||||
logging.Debug(logging.DError, "I2C error performing scan. %v", errs.String())
|
||||
return devices, err
|
||||
}
|
||||
|
||||
outString := out.String()
|
||||
// could split by \n too
|
||||
split := strings.SplitAfter(outString, ":")
|
||||
// 1st entry is garbage headers and ending is always \n##:
|
||||
split = split[1:]
|
||||
// create empty slice for all the devices
|
||||
for i, v := range split {
|
||||
lst := strings.Index(v, "\n")
|
||||
trimmed := v[:lst]
|
||||
trimmed = strings.Trim(trimmed, " ")
|
||||
// trimmed now holds just possible sensor addresses
|
||||
count := strings.Split(trimmed, " ")
|
||||
for j, d := range count {
|
||||
// the first row has to be offset by 3 but after its just i*16 + j
|
||||
offset := 0
|
||||
if i == 0 {
|
||||
offset = 3
|
||||
}
|
||||
addr := i*16 + j + offset
|
||||
if !strings.Contains(d, "--") && !strings.Contains(d, "UU") {
|
||||
// active
|
||||
devices[addr] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return devices, nil
|
||||
}
|
||||
|
||||
func SendCmd(b, addr int, command string) (string, error) {
|
||||
// sends an arbituary commnd over specified bus to int
|
||||
// might make a base script for this too
|
||||
var cmd *exec.Cmd
|
||||
bus := strconv.Itoa(b)
|
||||
operation := "r20" // default read
|
||||
frmt_cmd := "" // empty cmd
|
||||
if command != "" {
|
||||
// command, do write
|
||||
operation = fmt.Sprintf("w%d", len(command)) // write
|
||||
// formatting cmd
|
||||
for _, char := range command {
|
||||
// loop over string
|
||||
frmt_cmd += fmt.Sprintf("0x%x", char)
|
||||
}
|
||||
cmd = exec.Command("i2ctransfer", "-y", bus, fmt.Sprintf("%s@0x%x", operation, addr), frmt_cmd)
|
||||
} else {
|
||||
// reading
|
||||
cmd = exec.Command("i2ctransfer", "-y", bus, fmt.Sprintf("%s@0x%x", operation, addr))
|
||||
}
|
||||
// exec command
|
||||
var out bytes.Buffer
|
||||
var errs bytes.Buffer
|
||||
cmd.Stderr = &errs
|
||||
cmd.Stdout = &out
|
||||
if err := cmd.Run(); err != nil {
|
||||
logging.Debug(logging.DError, "I2C error getting data! %v", err)
|
||||
return "", err
|
||||
}
|
||||
return out.String(), nil
|
||||
}
|
@ -1,6 +1,81 @@
|
||||
package influxdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/influxdata/influxdb-client-go/v2"
|
||||
_ "fmt"
|
||||
|
||||
_ "github.com/influxdata/influxdb-client-go/v2"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
type DBInfo struct {
|
||||
URL string `mapstructure:"url"`
|
||||
Org string `mapstructure:"org,omitempty`
|
||||
Bucket string `mapstructure:"bucket,omitempty"`
|
||||
Token string `mapstructure:"token,omitempty"`
|
||||
// Client *influxdb2.Client
|
||||
}
|
||||
|
||||
type DBAdmin struct {
|
||||
// struct for admin methods
|
||||
*DBInfo
|
||||
Config *viper.Viper
|
||||
}
|
||||
|
||||
type DBClient struct {
|
||||
// struct for client methods
|
||||
*DBInfo
|
||||
Config *viper.Viper
|
||||
}
|
||||
|
||||
func NewDBInfo(config *viper.Viper) (*DBInfo, error) {
|
||||
db := &DBInfo{}
|
||||
// grabbing config vals
|
||||
err := config.UnmarshalKey("db", db)
|
||||
return db, err
|
||||
}
|
||||
|
||||
func NewDBClient(config *viper.Viper) (*DBClient, error) {
|
||||
|
||||
client := &DBClient{Config: config}
|
||||
// grabbing config vals
|
||||
var err error
|
||||
client.DBInfo, err = NewDBInfo(config)
|
||||
return client, err
|
||||
}
|
||||
|
||||
func NewDBAdmin(config *viper.Viper) (*DBAdmin, error) {
|
||||
admin := &DBAdmin{Config: config}
|
||||
var err error
|
||||
// creating client
|
||||
admin.DBInfo, err = NewDBInfo(config)
|
||||
return admin, err
|
||||
}
|
||||
|
||||
// base level funcs
|
||||
func (d *DBInfo) Start() error {
|
||||
// connect to DB based w/ info
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DBAdmin) GetReactorClient(id int) (url, bucket, org, token string, err error) {
|
||||
// given an id returns
|
||||
// (url, org, bucket, token, error) for said id
|
||||
/*
|
||||
client := influxdb2.NewClient(d.URL, d.Token)
|
||||
defer client.Close()
|
||||
bucket, err := client.BucketsAPI().FindBucketByName(context.Background(), id)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if d.ReactorExists(id) {
|
||||
// get corresponding reactor token and bucket
|
||||
}
|
||||
*/
|
||||
url = d.URL
|
||||
org = d.Org
|
||||
token = ""
|
||||
bucket = ""
|
||||
//err = errors.New("Unimpl")
|
||||
err = nil
|
||||
return
|
||||
}
|
||||
|
@ -0,0 +1,100 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// basic manager for starting/stopping checks plus built in heartbeat for downtime detection
|
||||
// used across server/reactor
|
||||
|
||||
type Connection struct {
|
||||
Attempts float64 // float for pow
|
||||
MaxAttempts int // max allowed
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
type Manager struct {
|
||||
*Connection // embedded for timeout stuff
|
||||
Active int32 // atomic checks
|
||||
}
|
||||
|
||||
func New(maxCon int) *Manager {
|
||||
|
||||
c := &Connection{MaxAttempts: maxCon}
|
||||
m := &Manager{
|
||||
Connection: c,
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *Manager) Start() error {
|
||||
// atomically checks/updates status
|
||||
if atomic.CompareAndSwapInt32(&m.Active, 0, 1) {
|
||||
m.ResetConnections()
|
||||
return nil
|
||||
}
|
||||
// already running
|
||||
return errors.New("Manager already started!")
|
||||
}
|
||||
|
||||
func (m *Manager) Exit() error {
|
||||
if atomic.CompareAndSwapInt32(&m.Active, 1, 0) {
|
||||
return nil
|
||||
}
|
||||
return errors.New("Manager not active!")
|
||||
}
|
||||
|
||||
func (m *Manager) IsActive() int {
|
||||
return int(atomic.LoadInt32(&m.Active))
|
||||
}
|
||||
|
||||
// Heartbeat tracker
|
||||
|
||||
func (m *Manager) HeartBeat(ping chan struct{}, hb, interval int, scale time.Duration) {
|
||||
// pings channel every (HB + randInterval) * time.Duration
|
||||
// can be used anywhere a heartbeat is needed
|
||||
// closes channel on exit
|
||||
|
||||
if interval > 0 {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
for atomic.LoadInt32(&m.Active) > 0 {
|
||||
// atomoic read may cause memory leak, can revisit
|
||||
ping <- struct{}{} // no mem
|
||||
sleep := time.Duration(hb-interval) * scale
|
||||
if interval > 0 {
|
||||
sleep += time.Duration(rand.Intn(2*interval)) * scale
|
||||
}
|
||||
time.Sleep(sleep)
|
||||
}
|
||||
// exited, close chan
|
||||
close(ping)
|
||||
}
|
||||
|
||||
// connection timeout generator
|
||||
|
||||
func (c *Connection) Timeout() (time.Duration, error) {
|
||||
// exponential backoff
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if int(c.Attempts) < c.MaxAttempts {
|
||||
c.Attempts += 1
|
||||
// 50, 100, 200...
|
||||
to := time.Duration(50*math.Pow(2, c.Attempts)) * time.Millisecond
|
||||
return to, nil
|
||||
}
|
||||
return 0, errors.New("Connection Failed")
|
||||
}
|
||||
|
||||
func (c *Connection) ResetConnections() {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.Attempts = 0
|
||||
}
|
@ -0,0 +1,251 @@
|
||||
package reactor
|
||||
|
||||
// file describes reactor level coordinator and associated implementation
|
||||
|
||||
import (
|
||||
pb "FRMS/internal/pkg/grpc"
|
||||
"FRMS/internal/pkg/influxdb"
|
||||
"FRMS/internal/pkg/logging"
|
||||
"FRMS/internal/pkg/manager"
|
||||
"FRMS/internal/pkg/system"
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// basic manager
|
||||
// I dont think I actually need this interface, package manager has a point
|
||||
type Manager interface {
|
||||
Start() error
|
||||
Exit() error
|
||||
Timeout() (time.Duration, error)
|
||||
HeartBeat(chan struct{}, int, int, time.Duration) // creates a hb
|
||||
}
|
||||
|
||||
func NewManager(max int) Manager {
|
||||
return manager.New(max)
|
||||
}
|
||||
|
||||
// db client
|
||||
type DBClient interface {
|
||||
//
|
||||
Start() error
|
||||
}
|
||||
|
||||
func NewDBClient(config *viper.Viper) (DBClient, error) {
|
||||
return influxdb.NewDBClient(config)
|
||||
}
|
||||
|
||||
type Server struct {
|
||||
Ip string `mapstructure:"ip"`
|
||||
Port int `mapstructure:"port"`
|
||||
}
|
||||
|
||||
type ReactorInfo struct {
|
||||
Name string `mapstructure:"name,omitempty"`
|
||||
ID int `mapstructure:"id,omitempty"`
|
||||
Model string `mapstructure:"model,omitempty"`
|
||||
HB int `mapstructure:"heartbeat"`
|
||||
Bus int `mapstructure:"bus"`
|
||||
|
||||
Server
|
||||
}
|
||||
|
||||
type ReactorCoordinator struct {
|
||||
Manager // base manager
|
||||
Config *viper.Viper // config
|
||||
|
||||
ReactorInfo `mapstructure:",squash"`
|
||||
|
||||
Database DBClient
|
||||
|
||||
pb.MonitoringClient // grpc embedding
|
||||
|
||||
*DeviceCoordinator // struct for locking
|
||||
|
||||
Err chan error
|
||||
}
|
||||
|
||||
func NewCoordinator(config *viper.Viper, errCh chan error) *ReactorCoordinator {
|
||||
|
||||
m := NewManager(6) // max 6 attempts
|
||||
dc := NewDeviceCoordinator(config)
|
||||
|
||||
c := &ReactorCoordinator{
|
||||
Manager: m,
|
||||
Config: config,
|
||||
DeviceCoordinator: dc,
|
||||
Err: errCh,
|
||||
}
|
||||
|
||||
// this is going to be scuffed
|
||||
//c.DB = &DB{Bucket: "bb", Org: "ForeLight", URL: url, Token: "S1UZssBu6KPfHaQCt34pZFpyc5lzbH9XanYJWCkOI5FqLY7gq205C6FTH-CmugiPH6o2WoKlTkEuPgIfaJjAhw=="}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *ReactorCoordinator) Start() {
|
||||
// should discover hwinfo and sensors on its own
|
||||
// now setting up sensor managers
|
||||
var err error
|
||||
|
||||
if err = c.Manager.Start(); err != nil {
|
||||
c.Err <- err
|
||||
}
|
||||
// load config
|
||||
if err = c.LoadConfig(); err != nil { // loads info
|
||||
c.Err <- err
|
||||
}
|
||||
|
||||
if err = c.DeviceCoordinator.Start(c.ReactorInfo.Bus); err != nil {
|
||||
c.Err <- err
|
||||
}
|
||||
|
||||
// loading clients
|
||||
if c.Database, err = NewDBClient(c.Config); err != nil {
|
||||
c.Err <- err
|
||||
}
|
||||
|
||||
go c.Discover()
|
||||
go c.Database.Start()
|
||||
}
|
||||
|
||||
func (c *ReactorCoordinator) LoadConfig() error {
|
||||
|
||||
var err error
|
||||
|
||||
// get hb
|
||||
if !c.Config.IsSet("reactor.heartbeat") {
|
||||
// default to 5 seconds
|
||||
c.Config.Set("reactor.heartbeat", 5)
|
||||
}
|
||||
|
||||
// check id
|
||||
if !c.Config.IsSet("reactor.id") {
|
||||
// get from hw
|
||||
var id int
|
||||
if id, err = system.GetId("eth0"); err != nil {
|
||||
return err
|
||||
}
|
||||
c.Config.Set("reactor.id", id)
|
||||
}
|
||||
|
||||
// check Model
|
||||
if !c.Config.IsSet("reactor.model") {
|
||||
// get from hw
|
||||
var model string
|
||||
if model, err = system.GetModel(); err != nil {
|
||||
return err
|
||||
}
|
||||
c.Config.Set("reactor.model", model)
|
||||
}
|
||||
|
||||
// check i2c bus
|
||||
if !c.Config.IsSet("reactor.bus") {
|
||||
// get from hw
|
||||
var bus int
|
||||
if bus, err = system.GetBus(); err != nil {
|
||||
return err
|
||||
}
|
||||
c.Config.Set("reactor.bus", bus)
|
||||
}
|
||||
|
||||
// all good, unmarhsaling
|
||||
c.Config.UnmarshalKey("reactor", c)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *ReactorCoordinator) Monitor() {
|
||||
// periodically grabs connected devs and updates list
|
||||
ch := make(chan struct{})
|
||||
go c.HeartBeat(ch, c.HB, 0, time.Second)
|
||||
|
||||
for range ch {
|
||||
// check devs and ping
|
||||
logging.Debug(logging.DClient, "RLC Pinging server")
|
||||
// ping central server with status
|
||||
go c.Ping()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ReactorCoordinator) Discover() {
|
||||
// sets up connection to central coordiantor
|
||||
conn, err := c.Connect(c.Ip, c.Port)
|
||||
if err != nil {
|
||||
c.Err <- err
|
||||
}
|
||||
defer conn.Close()
|
||||
client := pb.NewHandshakeClient(conn)
|
||||
req := &pb.ClientRequest{ClientId: uint32(c.ID), ClientType: "reactor"}
|
||||
resp, err := client.ClientDiscoveryHandler(context.Background(), req)
|
||||
if err != nil {
|
||||
c.Err <- err
|
||||
}
|
||||
c.Port = int(resp.GetServerPort()) // updating server port
|
||||
logging.Debug(logging.DClient, "RLC Central server reached, supplied port %v", c.Port)
|
||||
// connecting to manager now
|
||||
clientConn, err := c.Connect(c.Ip, c.Port)
|
||||
if err != nil {
|
||||
c.Err <- err
|
||||
}
|
||||
c.MonitoringClient = pb.NewMonitoringClient(clientConn)
|
||||
// manager
|
||||
go c.Monitor()
|
||||
|
||||
}
|
||||
|
||||
func (c *ReactorCoordinator) Connect(ip string, port int) (*grpc.ClientConn, error) {
|
||||
// function connects to central server and passes hwinfo
|
||||
var opts []grpc.DialOption
|
||||
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
var conn *grpc.ClientConn
|
||||
var err error
|
||||
for {
|
||||
conn, err = grpc.Dial(fmt.Sprintf("%v:%v", ip, port), opts...)
|
||||
code := status.Code(err)
|
||||
if code != 0 { // != OK
|
||||
if code == (5 | 14) { // service temp down
|
||||
var to time.Duration
|
||||
if to, err = c.Timeout(); err != nil {
|
||||
// from manager
|
||||
return &grpc.ClientConn{}, err
|
||||
}
|
||||
logging.Debug(logging.DClient, "Server currently unavailable, retrying in %v", to)
|
||||
time.Sleep(to)
|
||||
} else {
|
||||
return &grpc.ClientConn{}, err
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func (c *ReactorCoordinator) Ping() {
|
||||
// send device info to central coordinator
|
||||
fmt.Printf("Pinging server\n")
|
||||
|
||||
var devices []*pb.Device
|
||||
var err error
|
||||
|
||||
if devices, err = c.GetDeviceInfo(); err != nil {
|
||||
c.Err <- err
|
||||
}
|
||||
|
||||
// create request
|
||||
req := &pb.ReactorStatusPing{
|
||||
Id: int32(c.ID),
|
||||
Devices: devices,
|
||||
}
|
||||
|
||||
// ping server
|
||||
if _, err = c.ReactorStatusHandler(context.Background(), req); err != nil {
|
||||
c.Err <- err
|
||||
}
|
||||
}
|
@ -0,0 +1,134 @@
|
||||
package reactor
|
||||
|
||||
import (
|
||||
"FRMS/internal/pkg/device"
|
||||
pb "FRMS/internal/pkg/grpc"
|
||||
"FRMS/internal/pkg/i2c"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// Created by rlc to manage devices
|
||||
|
||||
// device manager
|
||||
type DeviceManager interface {
|
||||
Start() error
|
||||
Exit() error
|
||||
IsActive() int
|
||||
}
|
||||
|
||||
func NewDeviceManager(bus, addr int, config *viper.Viper) (DeviceManager, error) {
|
||||
return device.New(bus, addr, config)
|
||||
}
|
||||
|
||||
// device coordinator itself
|
||||
type DeviceCoordinator struct {
|
||||
// base level manager for heartbeat
|
||||
Bus int // i2c bus
|
||||
Manager
|
||||
Config *viper.Viper
|
||||
|
||||
managersMu sync.RWMutex
|
||||
DeviceManagers map[int]DeviceManager
|
||||
}
|
||||
|
||||
func NewDeviceCoordinator(config *viper.Viper) *DeviceCoordinator {
|
||||
dm := make(map[int]DeviceManager)
|
||||
m := NewManager(0)
|
||||
c := &DeviceCoordinator{
|
||||
Manager: m,
|
||||
DeviceManagers: dm,
|
||||
Config: config,
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *DeviceCoordinator) Start(bus int) error {
|
||||
var err error
|
||||
|
||||
if err = c.Manager.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
// i2c bus
|
||||
c.Bus = bus
|
||||
|
||||
go c.Monitor()
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *DeviceCoordinator) Monitor() {
|
||||
// monitor I2C for new devices
|
||||
ch := make(chan struct{})
|
||||
go c.HeartBeat(ch, 10, 0, time.Second)
|
||||
|
||||
for range ch {
|
||||
// on notification (10s)
|
||||
devs, err := i2c.GetConnected(c.Bus)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// update list
|
||||
go c.UpdateManagers(devs)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *DeviceCoordinator) UpdateManagers(active map[int]bool) {
|
||||
// updates managers
|
||||
c.managersMu.Lock()
|
||||
defer c.managersMu.Unlock()
|
||||
|
||||
for addr, dm := range c.DeviceManagers {
|
||||
_, ok := active[addr]
|
||||
|
||||
if ok && dm.IsActive() == 0 {
|
||||
// active and dm not
|
||||
if err := dm.Start(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
} else if !ok && dm.IsActive() == 1 {
|
||||
// not active and dm is
|
||||
if err := dm.Exit(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
// remove from map
|
||||
delete(active, addr)
|
||||
}
|
||||
|
||||
for addr, _ := range active {
|
||||
// no manager, create one
|
||||
fmt.Printf("New device %d!\n", addr)
|
||||
|
||||
dm, err := NewDeviceManager(c.Bus, addr, c.Config)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := dm.Start(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
c.DeviceManagers[addr] = dm
|
||||
}
|
||||
}
|
||||
|
||||
func (c *DeviceCoordinator) GetDeviceInfo() ([]*pb.Device, error) {
|
||||
// gets device info for monitoring
|
||||
c.managersMu.RLock()
|
||||
defer c.managersMu.RUnlock()
|
||||
|
||||
var devices []*pb.Device
|
||||
|
||||
for addr, dm := range c.DeviceManagers {
|
||||
// looping over devices
|
||||
devices = append(devices, &pb.Device{
|
||||
Addr: int32(addr),
|
||||
Status: pb.Status(dm.IsActive()),
|
||||
})
|
||||
}
|
||||
|
||||
return devices, nil
|
||||
}
|
@ -1,107 +0,0 @@
|
||||
package reactor
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"context"
|
||||
"strings"
|
||||
"github.com/influxdata/influxdb-client-go/v2"
|
||||
"strconv"
|
||||
"time"
|
||||
//"log"
|
||||
//"fmt"
|
||||
//"net"
|
||||
//"FRMS/internal/pkg/logging"
|
||||
//"google.golang.org/grpc"
|
||||
pb "FRMS/internal/pkg/grpc"
|
||||
)
|
||||
|
||||
// implements grpc handler and device data aggregater handler
|
||||
type DeviceStatus struct {
|
||||
Addr int
|
||||
Status string
|
||||
Type string
|
||||
Data string
|
||||
}
|
||||
|
||||
// get reactor/device status
|
||||
func (c *Coordinator) DevStatus(ch chan *DeviceStatus, a int, dm DeviceManager) {
|
||||
d := &DeviceStatus{Addr:a}
|
||||
d.Type = dm.GetType()
|
||||
d.Status = dm.GetStatus()
|
||||
d.Data = dm.GetData()
|
||||
ch <-d
|
||||
}
|
||||
|
||||
func (c *Coordinator) GetStatus(client influxdb2.Client) []*pb.Device {
|
||||
// db stuff
|
||||
api := client.WriteAPIBlocking(c.Org,c.Bucket)
|
||||
var wg sync.WaitGroup
|
||||
devs := []*pb.Device{}
|
||||
statusChan := make(chan *DeviceStatus)
|
||||
c.Devices.Lock()
|
||||
for a,dm := range c.Devices.Managers {
|
||||
wg.Add(1)
|
||||
go c.DevStatus(statusChan,a,dm)
|
||||
}
|
||||
c.Devices.Unlock()
|
||||
allDone := make(chan struct{})
|
||||
go func(){
|
||||
wg.Wait()
|
||||
allDone <-struct{}{}
|
||||
}() // once all the status are sent we send all done on the chan
|
||||
for {
|
||||
select{
|
||||
case s:= <-statusChan:
|
||||
//fmt.Printf("%v is %v\n",s.Type,s.Status)
|
||||
data := strings.Split(s.Data,",") // T:10C,H:102% -> T:10C H:10%
|
||||
for _, m := range data {
|
||||
var meas string
|
||||
splt := strings.Split(m,":") // T 10C or H 10%
|
||||
if splt[0] == "T" {
|
||||
meas = "Temperature"
|
||||
} else if splt[0] == "H" {
|
||||
meas = "Humidity"
|
||||
}
|
||||
val, err := strconv.ParseFloat(strings.Trim(splt[1]," %C\n"), 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p := influxdb2.NewPoint("measurements",map[string]string{"type":meas},map[string]interface{}{"val":val},time.Now())
|
||||
if err := api.WritePoint(context.Background(), p); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
devs = append(devs,&pb.Device{Addr:int32(s.Addr),Type:s.Type,Status:s.Status,Data:s.Data})
|
||||
wg.Done()
|
||||
case <-allDone:
|
||||
return devs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// grpc status update handler
|
||||
func (c *Coordinator) Ping(client influxdb2.Client) {
|
||||
// sends all device status to central coordinator
|
||||
devs := c.GetStatus(client)
|
||||
req := &pb.ReactorStatusPing{Id:c.Id,Devices:devs}
|
||||
_, err := c.MonitoringClient.ReactorStatusHandler(context.Background(),req)
|
||||
if err != nil {
|
||||
c.Err <-err
|
||||
go c.Exit()
|
||||
}
|
||||
}
|
||||
/*
|
||||
func (c *Coordinator) Register() {
|
||||
ip := c.hwinfo.Ip
|
||||
|
||||
if lis, err := net.Listen("tcp", fmt.Sprintf("%v:0",ip)); err != nil {
|
||||
log.Fatal(err)
|
||||
} else {
|
||||
c.hwinfo.Port = lis.Addr().(*net.TCPAddr).Port
|
||||
grpcServer := grpc.NewServer()
|
||||
pb.RegisterMonitoringServer(grpcServer,c)
|
||||
go grpcServer.Serve(lis)
|
||||
}
|
||||
logging.Debug(logging.DStart, "Listening for pings on %v:%v\n",ip,c.hwinfo.Port)
|
||||
}
|
||||
*/
|
@ -1,260 +0,0 @@
|
||||
package reactor
|
||||
|
||||
// file describes reactor level coordinator and associated implementation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
"math"
|
||||
"FRMS/internal/pkg/system"
|
||||
"FRMS/internal/pkg/I2C"
|
||||
"FRMS/internal/pkg/sensor"
|
||||
"FRMS/internal/pkg/logging"
|
||||
"errors"
|
||||
"context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"github.com/influxdata/influxdb-client-go/v2"
|
||||
pb "FRMS/internal/pkg/grpc"
|
||||
)
|
||||
|
||||
// Coordinator == Reactor Level Coordinator
|
||||
|
||||
type Coordinator struct {
|
||||
Ip string
|
||||
Port int // listener port
|
||||
MonitoringClient pb.MonitoringClient
|
||||
*hw
|
||||
Devices *DeviceManagers // struct for fine grain locking
|
||||
Err chan error
|
||||
mu sync.Mutex
|
||||
HB time.Duration
|
||||
PingTimer chan struct{}
|
||||
*DB
|
||||
Active active
|
||||
}
|
||||
|
||||
type DB struct {
|
||||
// struct to hold db connection info
|
||||
Org string
|
||||
Bucket string
|
||||
Token string
|
||||
URL string
|
||||
}
|
||||
|
||||
type active struct {
|
||||
bool
|
||||
int
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
type hw struct {
|
||||
// store reactor info
|
||||
Model string
|
||||
Bus int
|
||||
Id uint32
|
||||
}
|
||||
|
||||
type DeviceManagers struct {
|
||||
Managers map[int]DeviceManager
|
||||
sync.Mutex
|
||||
}
|
||||
// basic devicemanager struct manipulations
|
||||
|
||||
type DeviceManager interface {
|
||||
Start()
|
||||
GetType() string
|
||||
GetStatus() string
|
||||
GetData() string
|
||||
}
|
||||
|
||||
type I2CDev interface {
|
||||
GetAddr() int
|
||||
GetData() string
|
||||
GetStatus() string
|
||||
GetType() string
|
||||
}
|
||||
|
||||
func NewDeviceManager(i2c I2CDev) DeviceManager {
|
||||
return sensor.NewDeviceManager(i2c)
|
||||
}
|
||||
|
||||
type I2CMonitor interface {
|
||||
Monitor()
|
||||
GetDevice(int) interface{ GetAddr() int; GetStatus() string; GetData() string; GetType() string}
|
||||
}
|
||||
|
||||
func NewI2CMonitor(b int,ch chan int) I2CMonitor {
|
||||
return I2C.NewMonitor(b, ch)
|
||||
}
|
||||
|
||||
func NewCoordinator(ip string,port int,ch chan error) *Coordinator {
|
||||
sen := new(DeviceManagers)
|
||||
sen.Managers = make(map[int]DeviceManager)
|
||||
c := &Coordinator{Err:ch,Devices:sen}
|
||||
c.Ip = ip
|
||||
c.Port = port
|
||||
c.hw = &hw{}
|
||||
c.HB = time.Duration(5 * time.Second)
|
||||
c.PingTimer = make(chan struct{})
|
||||
// this is going to be scuffed
|
||||
url := fmt.Sprintf("http://%s:8086",ip)
|
||||
fmt.Println(url)
|
||||
c.DB = &DB{Bucket:"bb",Org:"ForeLight",URL:url,Token:"S1UZssBu6KPfHaQCt34pZFpyc5lzbH9XanYJWCkOI5FqLY7gq205C6FTH-CmugiPH6o2WoKlTkEuPgIfaJjAhw=="}
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Coordinator) Start() {
|
||||
// should discover hwinfo and sensors on its own
|
||||
// now setting up sensor managers
|
||||
// setting up hw stuff
|
||||
c.Activate()
|
||||
var err error
|
||||
c.Id, err = system.GetId("eth0")
|
||||
c.Model, err = system.GetModel()
|
||||
c.Bus, err = system.GetBus()
|
||||
if err != nil {
|
||||
c.Err <-err
|
||||
}
|
||||
go c.Monitor()
|
||||
go c.Discover()
|
||||
}
|
||||
|
||||
func (c *Coordinator) Monitor() {
|
||||
// function to automatically create and destroy sm
|
||||
// scuffedaf
|
||||
client := influxdb2.NewClient(c.URL,c.Token)
|
||||
defer client.Close()
|
||||
dch := make(chan int)
|
||||
im := NewI2CMonitor(c.Bus,dch)
|
||||
go im.Monitor()
|
||||
for c.IsActive() {
|
||||
select {
|
||||
case d := <-dch:
|
||||
i := im.GetDevice(d)
|
||||
go c.DeviceConnect(i)
|
||||
case <-c.PingTimer:
|
||||
go c.Ping(client)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Coordinator) HeartBeat() {
|
||||
for c.IsActive() {
|
||||
c.PingTimer <-struct{}{}
|
||||
logging.Debug(logging.DClient,"RLC Pinging server")
|
||||
time.Sleep(c.HB)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Coordinator) DeviceConnect(i2c I2CDev) {
|
||||
c.Devices.Lock()
|
||||
defer c.Devices.Unlock()
|
||||
addr := i2c.GetAddr()
|
||||
if dm, exists := c.Devices.Managers[addr]; !exists{
|
||||
dm := NewDeviceManager(i2c)
|
||||
c.Devices.Managers[addr] = dm
|
||||
go dm.Start()
|
||||
} else {
|
||||
go dm.Start()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Coordinator) Discover() {
|
||||
// sets up connection to central coordiantor
|
||||
conn, err := c.Connect(c.Ip, c.Port)
|
||||
if err != nil {
|
||||
c.Err <-err
|
||||
}
|
||||
defer conn.Close()
|
||||
client := pb.NewHandshakeClient(conn)
|
||||
req := &pb.ClientRequest{ClientId:c.Id,ClientType:"reactor"}
|
||||
resp, err := client.ClientDiscoveryHandler(context.Background(), req)
|
||||
if err != nil {
|
||||
c.Err <-err
|
||||
}
|
||||
c.Port = int(resp.GetServerPort()) // updating server port
|
||||
logging.Debug(logging.DClient,"RLC Central server reached, supplied port %v",c.Port)
|
||||
// connecting to manager now
|
||||
clientConn, err := c.Connect(c.Ip, c.Port)
|
||||
if err != nil {
|
||||
c.Err <-err
|
||||
}
|
||||
c.MonitoringClient = pb.NewMonitoringClient(clientConn)
|
||||
go c.HeartBeat()
|
||||
|
||||
}
|
||||
|
||||
func (c *Coordinator) Connect(ip string, port int) (*grpc.ClientConn, error) {
|
||||
// function connects to central server and passes hwinfo
|
||||
var opts []grpc.DialOption
|
||||
opts = append(opts,grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
var conn *grpc.ClientConn
|
||||
var err error
|
||||
for {
|
||||
conn, err = grpc.Dial(fmt.Sprintf("%v:%v",ip,port),opts...)
|
||||
code := status.Code(err)
|
||||
if code != 0 { // != OK
|
||||
if code == (5 | 14) { // service temp down
|
||||
to := c.Timeout()
|
||||
if to == 0 {
|
||||
err = errors.New("Failed to connect to central server")
|
||||
return &grpc.ClientConn{}, err
|
||||
}
|
||||
logging.Debug(logging.DClient,"Server currently unavailable, retrying in %v ms", to)
|
||||
time.Sleep(time.Duration(to) * time.Millisecond)
|
||||
} else {
|
||||
return &grpc.ClientConn{}, err
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func (c *Coordinator) Timeout() int {
|
||||
c.Active.Lock()
|
||||
defer c.Active.Unlock()
|
||||
if c.Active.int < 9 {
|
||||
v := int(5 * math.Pow(float64(2), float64(c.Active.int)))
|
||||
c.Active.int +=1
|
||||
return v
|
||||
} else {
|
||||
//excedded retries
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Coordinator) IsActive() bool {
|
||||
c.Active.Lock()
|
||||
defer c.Active.Unlock()
|
||||
return c.Active.bool
|
||||
}
|
||||
|
||||
func (c *Coordinator) Exit() bool {
|
||||
c.Active.Lock()
|
||||
defer c.Active.Unlock()
|
||||
if c.Active.bool {
|
||||
c.Active.bool = false
|
||||
logging.Debug(logging.DClient,"RLC Exiting...")
|
||||
return true
|
||||
} else {
|
||||
logging.Debug(logging.DError, "RLC Already Dead!")
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Coordinator) Activate() bool {
|
||||
c.Active.Lock()
|
||||
defer c.Active.Unlock()
|
||||
if c.Active.bool {
|
||||
logging.Debug(logging.DError,"RLC Already Started!")
|
||||
return false
|
||||
} else {
|
||||
logging.Debug(logging.DClient, "RLC Starting")
|
||||
c.Active.bool = true
|
||||
return c.Active.bool
|
||||
}
|
||||
}
|
@ -1,114 +0,0 @@
|
||||
package sensor
|
||||
|
||||
import (
|
||||
_"fmt"
|
||||
"time"
|
||||
"sync"
|
||||
"strings"
|
||||
_ "FRMS/internal/pkg/I2C"
|
||||
"log"
|
||||
)
|
||||
|
||||
type Manager struct {
|
||||
*Dev
|
||||
I2CDevice
|
||||
*Active
|
||||
Hb time.Duration
|
||||
}
|
||||
|
||||
type Active struct {
|
||||
sync.Mutex
|
||||
bool
|
||||
int
|
||||
}
|
||||
|
||||
type Dev struct {
|
||||
// last known values
|
||||
Addr int
|
||||
Type string
|
||||
Status string // could be more efficient but to hell with it
|
||||
Data string
|
||||
}
|
||||
|
||||
type I2CDevice interface {
|
||||
// basic device info
|
||||
GetAddr() int
|
||||
GetStatus() string
|
||||
GetType() string
|
||||
GetData() string
|
||||
}
|
||||
|
||||
func NewDeviceManager(i2c I2CDevice) *Manager {
|
||||
m := &Manager{Hb:time.Duration(1*time.Second)}
|
||||
m.I2CDevice = i2c
|
||||
m.Active = &Active{}
|
||||
m.Dev = &Dev{Addr:i2c.GetAddr(),Type:i2c.GetType(),Status:i2c.GetStatus(),Data:i2c.GetData()}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *Manager) Start() {
|
||||
// goal is to start a long running monitoring routine
|
||||
if !m.Activate() {
|
||||
log.Fatal("Manager already running!")
|
||||
} // atomically activated if this runs
|
||||
// go m.Monitor()
|
||||
}
|
||||
|
||||
func (m *Manager) Exit() {
|
||||
if !m.Deactivate() {
|
||||
log.Fatal("Manager already exited!")
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) GetType() string {
|
||||
return m.Type
|
||||
}
|
||||
|
||||
func (m *Manager) GetStatus() string {
|
||||
m.Status = m.I2CDevice.GetStatus()
|
||||
if m.IsActive() && strings.Contains(m.Status,"KILLED") {
|
||||
m.Exit()
|
||||
}
|
||||
return m.Status
|
||||
}
|
||||
|
||||
func (m *Manager) GetData() string {
|
||||
m.Data = m.I2CDevice.GetData()
|
||||
return m.Data
|
||||
}
|
||||
|
||||
func (m *Manager) GetAddr() int {
|
||||
return m.Addr
|
||||
}
|
||||
|
||||
// atomic activation and deactivation
|
||||
func (a *Active) Activate() bool {
|
||||
// returns true if success, false otherwise
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
if a.bool { // already active
|
||||
return false
|
||||
} else {
|
||||
a.bool = true
|
||||
a.int = 0
|
||||
return a.bool
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Active) Deactivate() bool {
|
||||
// returns true if success false otherise
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
if a.bool {
|
||||
a.bool = false
|
||||
return true
|
||||
} else { // already deactivated
|
||||
return a.bool // false
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Active) IsActive() bool {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
return a.bool
|
||||
}
|
@ -1,6 +0,0 @@
|
||||
package sensor
|
||||
|
||||
import (
|
||||
_ "fmt"
|
||||
)
|
||||
|
@ -1,149 +1,149 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
pb "FRMS/internal/pkg/grpc"
|
||||
"FRMS/internal/pkg/logging"
|
||||
"FRMS/internal/pkg/manager"
|
||||
"time"
|
||||
_ "log"
|
||||
|
||||
//"FRMS/internal/pkg/device"
|
||||
"context"
|
||||
"sync"
|
||||
"FRMS/internal/pkg/logging"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
pb "FRMS/internal/pkg/grpc"
|
||||
"fmt"
|
||||
_ "log"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// this package will implement a reactor coordinator and associated go routines
|
||||
// manager stuff
|
||||
|
||||
type ReactorManager struct {
|
||||
*Manager
|
||||
StatusMon *StatusMonitor
|
||||
*devstatus
|
||||
type Manager interface {
|
||||
Start() error // status checks
|
||||
Exit() error
|
||||
Timeout() (time.Duration, error) // TO Generator
|
||||
}
|
||||
|
||||
type devstatus struct {
|
||||
sync.Mutex
|
||||
Devs map[uint32]*DeviceInfo
|
||||
func NewManager(max int) Manager {
|
||||
// takes a heartbeat and max connection attempts
|
||||
return manager.New(max)
|
||||
}
|
||||
|
||||
func NewReactorManager(c *Client,sys *SystemViewer,err chan error) GeneralManager {
|
||||
r := &ReactorManager{}
|
||||
di := make(map[uint32]*DeviceInfo)
|
||||
r.devstatus = &devstatus{Devs:di}
|
||||
r.Manager = NewManager(err)
|
||||
r.StatusMon = NewStatusMonitor("Reactor",c.Id,sys)
|
||||
return r
|
||||
type ReactorManager struct {
|
||||
Manager // base manager interface
|
||||
// *ClientManager // client manager (OUTDATED)
|
||||
*Client // access to ID etc
|
||||
// StatusMon *StatusMonitor putting on pause
|
||||
// *ReactorDevices
|
||||
Config *viper.Viper // config to update
|
||||
Err chan error
|
||||
}
|
||||
|
||||
func (r *ReactorManager) Start() {
|
||||
r.Manager.Start()
|
||||
logging.Debug(logging.DStart,"RMA %v starting", r.Id)
|
||||
go r.StatusMon.Send(&DeviceInfo{Id:r.Id,Type:"Reactor",Status:"[green]ONLINE[white]"},"Reactor")
|
||||
//conn := r.Connect()
|
||||
//empty := &grpc.ClientConn{}
|
||||
//if conn != empty {
|
||||
// type ReactorDevices struct {
|
||||
// // device struct
|
||||
// Devices map[int]DeviceManager
|
||||
// sync.RWMutex
|
||||
// }
|
||||
}
|
||||
|
||||
func (r *ReactorManager) Exit() {
|
||||
r.Manager.Exit()
|
||||
logging.Debug(logging.DExit, "RMA %v exiting", r.Id)
|
||||
go r.StatusMon.Send(&DeviceInfo{Id:r.Id,Type:"Reactor",Status:"[red]OFFLINE[white]",Data:fmt.Sprintf("Last Seen %v",time.Now().Format("Mon at 03:04:05pm MST"))},"Reactor")
|
||||
r.devstatus.Lock()
|
||||
defer r.devstatus.Unlock()
|
||||
for _, d := range r.Devs {
|
||||
newd := d
|
||||
newd.Status = "[yellow]UNKOWN[white]"
|
||||
r.Devs[newd.Id] = newd
|
||||
go r.StatusMon.Send(newd,"Device")
|
||||
func NewReactorManager(cl *Client, config *viper.Viper, errCh chan error) *ReactorManager {
|
||||
// making managers
|
||||
m := NewManager(6)
|
||||
r := &ReactorManager{
|
||||
Manager: m,
|
||||
Client: cl,
|
||||
Config: config,
|
||||
Err: errCh,
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *ReactorManager) Connect() *grpc.ClientConn {
|
||||
// establish gRPC conection with reactor
|
||||
var opts []grpc.DialOption
|
||||
var conn *grpc.ClientConn
|
||||
opts = append(opts,grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
|
||||
for {
|
||||
if !r.IsActive() {
|
||||
logging.Debug(logging.DClient,"RMA %v No longer active, aborting connection attempt",r.Id)
|
||||
return &grpc.ClientConn{}
|
||||
}
|
||||
var err error
|
||||
conn, err = grpc.Dial(fmt.Sprintf("%v:%v",r.Ip,r.Port),opts...)
|
||||
// error handling
|
||||
code := status.Code(err)
|
||||
if code != 0 { // != OK
|
||||
if code == (5 | 14) { // unavailable or not found
|
||||
to := r.Timeout()
|
||||
if to == 0 {
|
||||
logging.Debug(logging.DClient,"RMA %v Client not responding",r.Id)
|
||||
return &grpc.ClientConn{}
|
||||
func (r *ReactorManager) Start() error {
|
||||
// allows for extra stuff
|
||||
logging.Debug(logging.DStart, "RMA %v starting", r.Id)
|
||||
return r.Manager.Start()
|
||||
//go r.StatusMon.Send(&DeviceInfo{Id: r.Id, Type: "Reactor", Status: "[green]ONLINE[white]"}, "Reactor")
|
||||
}
|
||||
logging.Debug(logging.DClient,"RMA %v Client currently down, retrying in %v ms",r.Id, to)
|
||||
time.Sleep(time.Duration(to) * time.Millisecond)
|
||||
|
||||
} else {
|
||||
logging.Debug(logging.DError,"RMA %v GRPC ERROR: %v",r.Id, code)
|
||||
r.Err <- err
|
||||
}
|
||||
}
|
||||
break;
|
||||
func (r *ReactorManager) Exit() error {
|
||||
// allows for extra stuff
|
||||
logging.Debug(logging.DExit, "RMA %v exiting", r.Id)
|
||||
return r.Manager.Exit()
|
||||
//go r.StatusMon.Send(&DeviceInfo{Id: r.Id, Type: "Reactor", Status: "[red]OFFLINE[white]", Data: fmt.Sprintf("Last Seen %v", time.Now().Format("Mon at 03:04:05pm MST"))}, "Reactor")
|
||||
}
|
||||
return conn
|
||||
|
||||
func (r *ReactorManager) UpdateClient(cl *Client) error {
|
||||
// this is probably unnessecary
|
||||
fmt.Printf("Reactor Manager %d updating client!\n", r.Id)
|
||||
r.Client = cl
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ReactorManager) ReactorStatusHandler(ctx context.Context, req *pb.ReactorStatusPing) (*pb.ReactorStatusResponse, error) {
|
||||
// function client will call to update reactor information
|
||||
//go r.PingReset()
|
||||
fmt.Printf("Recieved ping from %d!\n", req.GetId())
|
||||
// update devices/sensors
|
||||
for _, dev := range req.GetDevices() {
|
||||
d := &DeviceInfo{Id:uint32(dev.GetAddr()),Type:dev.GetType(),Status:dev.GetStatus(),Data:dev.GetData()}
|
||||
go r.UpdateDevice(d)
|
||||
fmt.Printf("Device %d is %s ", dev.GetAddr(), dev.GetStatus().String())
|
||||
}
|
||||
return &pb.ReactorStatusResponse{Id:r.Id}, nil
|
||||
fmt.Printf("\n")
|
||||
// go r.UpdateDevices(req.GetDevices())
|
||||
|
||||
return &pb.ReactorStatusResponse{Id: int32(r.Id)}, nil
|
||||
}
|
||||
|
||||
// // device stuff
|
||||
|
||||
/*
|
||||
func (r *ReactorManager) Monitor(conn *grpc.ClientConn) {
|
||||
defer conn.Close()
|
||||
client := pb.NewMonitoringClient(conn)
|
||||
for r.IsActive() {
|
||||
req := &pb.ReactorStatusRequest{Id:r.Id}
|
||||
resp, err := client.GetReactorStatus(context.Background(),req)
|
||||
code := status.Code(err)
|
||||
if code != 0 { // if != OK
|
||||
logging.Debug(logging.DClient,"RMA %v Reactor not responding! Code: %v\n", r.Id,code)
|
||||
r.devstatus.Lock()
|
||||
for _, d := range r.Devs {
|
||||
newd := d
|
||||
newd.Status = "[yellow]UNKOWN[white]"
|
||||
r.Devs[newd.Id] = newd
|
||||
go r.StatusMon.Send(newd,"Device")
|
||||
}
|
||||
r.devstatus.Unlock()
|
||||
r.Exit()
|
||||
break;
|
||||
}
|
||||
for _,v := range resp.GetDevices() {
|
||||
d := &DeviceInfo{Id:uint32(v.GetAddr()),Type:v.GetType(),Status:v.GetStatus(),Data:v.GetData()}
|
||||
go r.UpdateDevice(d)
|
||||
}
|
||||
time.Sleep(r.Hb) // time between sensor pings
|
||||
}
|
||||
}
|
||||
*/
|
||||
func (r *ReactorManager) UpdateDevice(d *DeviceInfo) {
|
||||
r.devstatus.Lock()
|
||||
defer r.devstatus.Unlock()
|
||||
if olddev, ok := r.Devs[d.Id]; !ok {
|
||||
// new device
|
||||
r.Devs[d.Id] = d
|
||||
go r.StatusMon.Send(d,"Device")
|
||||
} else if olddev.Status != d.Status || olddev.Data != d.Data {
|
||||
// dev status or data has changed
|
||||
r.Devs[d.Id] = d
|
||||
go r.StatusMon.Send(d,"Device")
|
||||
}
|
||||
}
|
||||
// type DeviceManager interface {
|
||||
// LoadConfig() error
|
||||
// UpdateStatus(string) error
|
||||
// String() string // printing
|
||||
// }
|
||||
|
||||
// func NewDeviceManager(addr int, config *viper.Viper, prefix string) (DeviceManager, error) {
|
||||
// // returns a manager struct
|
||||
// return device.NewDeviceManager(addr, config, prefix)
|
||||
// }
|
||||
|
||||
//func (r *ReactorManager) UpdateDevices(devs []*pb.Device) {
|
||||
// // pass updates to correct manager
|
||||
// r.ReactorDevices.RLock() // read lock only
|
||||
// defer r.ReactorDevices.RUnlock()
|
||||
|
||||
// for _, dev := range devs {
|
||||
// // looping over devs
|
||||
// if dm, ok := r.ReactorDevices.Devices[int(dev.GetAddr())]; ok {
|
||||
// // device manager found
|
||||
// go dm.UpdateStatus(dev.GetStatus().String())
|
||||
// //fmt.Println(dm)
|
||||
// } else {
|
||||
// // not found
|
||||
// go r.AddDevice(dev, r.Id, r.Config, r.Err)
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
|
||||
// func (r *ReactorDevices) AddDevice(dev *pb.Device, id int, config *viper.Viper, errCh chan error) {
|
||||
|
||||
// // setting vars
|
||||
// prefix := fmt.Sprintf("reactors.%d.", id)
|
||||
// addr := int(dev.GetAddr())
|
||||
// var dm DeviceManager
|
||||
// var err error
|
||||
// // write locking
|
||||
// r.Lock()
|
||||
// defer r.Unlock()
|
||||
|
||||
// if dm, err = NewDeviceManager(addr, config, prefix); err != nil {
|
||||
// errCh <- err
|
||||
// }
|
||||
|
||||
// // setting status
|
||||
// if err = dm.UpdateStatus(dev.GetStatus().String()); err != nil {
|
||||
// errCh <- err
|
||||
// }
|
||||
|
||||
// // loading config
|
||||
// if err = dm.LoadConfig(); err != nil {
|
||||
// errCh <- err
|
||||
// }
|
||||
// r.Devices[int(addr)] = dm
|
||||
// }
|
||||
|
@ -1,121 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
// "fmt"
|
||||
"time"
|
||||
"sync"
|
||||
// "net"
|
||||
// "log"
|
||||
"context"
|
||||
"FRMS/internal/pkg/logging"
|
||||
// "google.golang.org/grpc"
|
||||
pb "FRMS/internal/pkg/grpc"
|
||||
)
|
||||
|
||||
// implement tui specific manager to be called for each client conn
|
||||
|
||||
type TUIManager struct {
|
||||
*Manager // embedded manager for access to methods and client
|
||||
StatusMon *StatusMonitor // use it for all devs coming in
|
||||
Err chan error
|
||||
*Timeout
|
||||
*pb.UnimplementedManagementServer
|
||||
}
|
||||
|
||||
type Timeout struct {
|
||||
Alert chan bool
|
||||
LastSeen time.Time
|
||||
TO time.Duration
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func NewTUIManager(c *Client, sys *SystemViewer, err chan error) GeneralManager {
|
||||
m := NewManager(err)
|
||||
t := &TUIManager{Err: err}
|
||||
alert := make(chan bool)
|
||||
t.Timeout = &Timeout{Alert:alert,TO:time.Duration(2500*time.Millisecond)} // short time outs are fine because we will just rejoin
|
||||
t.Manager = m
|
||||
t.StatusMon = NewStatusMonitor("TUI",c.Id,sys)
|
||||
t.Manager.UpdateClient(c)
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *TUIManager) Start() {
|
||||
//
|
||||
t.PingReset()
|
||||
t.Manager.Start()
|
||||
logging.Debug(logging.DStart,"TMA %v starting", t.Id)
|
||||
go t.Timeoutd()
|
||||
//go t.Monitor(conn)
|
||||
}
|
||||
|
||||
func (t *TUIManager) Exit() {
|
||||
t.Manager.Exit()
|
||||
logging.Debug(logging.DExit,"TMA %v exiting",t.Id)
|
||||
}
|
||||
|
||||
func (t *Timeout) PingReset() {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
t.LastSeen = time.Now()
|
||||
}
|
||||
|
||||
func (t *TUIManager) Timeoutd() {
|
||||
for t.IsActive() {
|
||||
if sleep, elapsed := t.Elapsed(); elapsed {
|
||||
// timeout elapsed
|
||||
logging.Debug(logging.DClient,"TMA %V client not responding", t.Id)
|
||||
t.Exit()
|
||||
} else {
|
||||
time.Sleep(sleep)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Timeout) Elapsed() (time.Duration, bool) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
now := time.Now()
|
||||
if now.After(t.LastSeen.Add(t.TO)) {
|
||||
// timeout expired
|
||||
return 0 * time.Second, true
|
||||
} else {
|
||||
sleep := t.LastSeen.Add(t.TO).Sub(now)
|
||||
return sleep, false
|
||||
}
|
||||
}
|
||||
|
||||
// tui client requests and logic will be down here
|
||||
|
||||
func (t *TUIManager) GetDevices(ctx context.Context, req *pb.GetDevicesRequest) (*pb.GetDevicesResponse, error) {
|
||||
go t.PingReset()
|
||||
devices := []*pb.Dev{}
|
||||
resp := &pb.GetDevicesResponse{ClientId:t.Id,Devices:devices}
|
||||
if req.GetReactorId() > 0 || req.GetRefresh() {
|
||||
logging.Debug(logging.DClient,"TMA %v client requested devs from %v",t.Id,req.GetReactorId())
|
||||
resp.ReactorId = req.GetReactorId()
|
||||
t.StatusMon.UpdateListener(t.Id, req.GetReactorId())
|
||||
}
|
||||
devs := t.StatusMon.GetBuffer() // always empty buffer
|
||||
|
||||
for _, v := range devs {
|
||||
resp.Devices = append(resp.Devices, &pb.Dev{Id:v.Id,Type:v.Type,Status:v.Status,Data:v.Data,Index:v.Index})
|
||||
}
|
||||
if len(resp.Devices) > 0 {
|
||||
logging.Debug(logging.DClient,"TMA %v sending %v devices to client" ,t.Id, len(resp.Devices))
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (t *TUIManager) DeleteReactors(ctx context.Context, req *pb.DeleteReactorRequest) (*pb.DeleteReactorResponse, error) {
|
||||
go t.PingReset()
|
||||
//
|
||||
return &pb.DeleteReactorResponse{}, nil
|
||||
}
|
||||
|
||||
func (t *TUIManager) DeleteReactorDevice(ctx context.Context, req *pb.DeleteReactorDeviceRequest) (*pb.DeleteReactorDeviceResponse, error) {
|
||||
go t.PingReset()
|
||||
//
|
||||
return &pb.DeleteReactorDeviceResponse{}, nil
|
||||
}
|
||||
|
@ -1,147 +0,0 @@
|
||||
package tui
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
"math"
|
||||
"context"
|
||||
"FRMS/internal/pkg/system"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
pb "FRMS/internal/pkg/grpc"
|
||||
)
|
||||
|
||||
// this package will interact with the server to get system status
|
||||
|
||||
type TUIClient struct {
|
||||
Id uint32
|
||||
Ip string
|
||||
Port int
|
||||
ClientConn *grpc.ClientConn
|
||||
Active
|
||||
}
|
||||
|
||||
type Active struct {
|
||||
sync.Mutex
|
||||
bool
|
||||
int
|
||||
}
|
||||
|
||||
func NewTUIClient(ip string, port int, ifconfig string) *TUIClient {
|
||||
id, err := system.GetId(ifconfig)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
t := &TUIClient{Id:id,Ip:ip,Port:port}
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *TUIClient) Start() error {
|
||||
t.Connect()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TUIClient) Timeout() int {
|
||||
t.Active.Lock()
|
||||
defer t.Active.Unlock()
|
||||
if t.Active.int < 9 {
|
||||
v := int( 5 * math.Pow(float64(2), float64(t.Active.int)))
|
||||
t.Active.int += 1
|
||||
return v
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TUIClient) Connect() {
|
||||
// connect to server and register as client
|
||||
var conn *grpc.ClientConn
|
||||
var err error
|
||||
var opts []grpc.DialOption
|
||||
opts = append(opts,grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
for {
|
||||
conn, err = grpc.Dial(fmt.Sprintf("%v:%v",t.Ip,t.Port),opts...)
|
||||
code := status.Code(err)
|
||||
if code != 0 {
|
||||
if code == (5 | 14) {
|
||||
to := t.Timeout()
|
||||
if to == 0 {
|
||||
log.Fatal("Failed to connect to central server")
|
||||
}
|
||||
fmt.Printf("Server currently down, reconnecting in %v ms\n",to)
|
||||
time.Sleep(time.Duration(to) * time.Millisecond)
|
||||
} else {
|
||||
log.Fatal("Central server currently unavailable")
|
||||
}
|
||||
}
|
||||
//t.client = pb.NewManagementClient(conn)
|
||||
break;
|
||||
}
|
||||
// handle handshake logic here
|
||||
client := pb.NewHandshakeClient(conn)
|
||||
req := &pb.ClientRequest{ClientId:t.Id,ClientType:"tui"}
|
||||
resp, err := client.ClientDiscoveryHandler(context.Background(),req)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
conn.Close() // closing old connection
|
||||
// setting up server connection with provided port
|
||||
t.Port = int(resp.GetServerPort())
|
||||
for {
|
||||
conn, err = grpc.Dial(fmt.Sprintf("%v:%v",t.Ip,t.Port),opts...)
|
||||
code := status.Code(err)
|
||||
if code != 0 {
|
||||
if code == (5 | 14) {
|
||||
to := t.Timeout()
|
||||
if to == 0 {
|
||||
log.Fatal("Failed to connect to central server")
|
||||
}
|
||||
fmt.Printf("Server currently down, reconnecting in %v ms\n",to)
|
||||
time.Sleep(time.Duration(to) * time.Millisecond)
|
||||
} else {
|
||||
log.Fatal("Central server currently unavailable")
|
||||
}
|
||||
}
|
||||
t.ClientConn = conn
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TUIClient) GetDevices(id ...uint32) (map[uint32]*Device, error) {
|
||||
// returns
|
||||
req := &pb.GetDevicesRequest{ClientId:t.Id}
|
||||
if len(id) > 0 {
|
||||
if id[0] == 0 {
|
||||
req.Refresh = true
|
||||
} else {
|
||||
req.ReactorId = id[0]
|
||||
}
|
||||
}
|
||||
r := make(map[uint32]*Device)
|
||||
client := pb.NewManagementClient(t.ClientConn)
|
||||
resp, err := client.GetDevices(context.Background(), req)
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
for _, v := range resp.GetDevices() {
|
||||
r[v.GetId()] = &Device{Type:v.GetType(),Status:v.GetStatus(),Id:v.GetId(),Data:v.GetData(),Index:v.GetIndex()}
|
||||
}
|
||||
return r, err
|
||||
}
|
||||
|
||||
func (t *TUIClient) DeleteReactor(id uint32) error {
|
||||
req := &pb.DeleteReactorRequest{}
|
||||
client := pb.NewManagementClient(t.ClientConn)
|
||||
_, err := client.DeleteReactor(context.Background(), req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *TUIClient) DeleteReactorDevice(id uint32, addr int) error {
|
||||
req := &pb.DeleteReactorDeviceRequest{}
|
||||
client := pb.NewManagementClient(t.ClientConn)
|
||||
_, err := client.DeleteReactorDevice(context.Background(), req)
|
||||
return err
|
||||
}
|
@ -1,232 +0,0 @@
|
||||
package tui
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"FRMS/internal/pkg/logging"
|
||||
"github.com/rivo/tview"
|
||||
_ "github.com/gdamore/tcell/v2"
|
||||
"os"
|
||||
)
|
||||
|
||||
type Device struct {
|
||||
Id uint32
|
||||
Type string
|
||||
Status string
|
||||
Data string
|
||||
Index uint32
|
||||
}
|
||||
|
||||
type TUI struct {
|
||||
*Display
|
||||
*TUIClient
|
||||
SelectedReactor <-chan uint32
|
||||
SelectedDevice <-chan uint32
|
||||
Err chan error
|
||||
}
|
||||
|
||||
func NewTUI(ip string, port int, ifconfig string, ch chan error) *TUI {
|
||||
t := &TUI{}
|
||||
t.Err = ch
|
||||
client := NewTUIClient(ip, port, ifconfig)
|
||||
t.TUIClient = client
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *TUI) Start() {
|
||||
// setup tview app and wait for user connection in standin modal
|
||||
if err := t.TUIClient.Start(); err != nil {
|
||||
t.Err <- err
|
||||
}
|
||||
logging.Debug(logging.DStart, "TUI %v starting", t.Id)
|
||||
go t.Monitor()
|
||||
t.CreateDisplay()
|
||||
t.Display.Start()
|
||||
}
|
||||
|
||||
func (t *TUI) CreateDisplay() {
|
||||
rc := make(chan uint32)
|
||||
dc := make(chan uint32)
|
||||
t.Display = NewDisplay(rc,dc)
|
||||
t.SelectedReactor = rc
|
||||
t.SelectedDevice = dc
|
||||
t.Flex.AddItem(t.ReactorList,0,1,true).
|
||||
AddItem(t.DeviceList,0,2,false)
|
||||
}
|
||||
|
||||
func (t *TUI) Monitor() {
|
||||
// orchestrates updates and grpc requests
|
||||
timer := make(chan struct{})
|
||||
go func(signal chan struct{}){
|
||||
for {
|
||||
signal <- struct{}{}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}(timer)
|
||||
|
||||
for {
|
||||
select {
|
||||
case reactor := <-t.SelectedReactor:
|
||||
// reactor has been selected in tui, grabbing devs
|
||||
t.App.QueueUpdateDraw(func() {
|
||||
t.UpdateDevices(reactor)
|
||||
})
|
||||
logging.Debug(logging.DClient, "%v getting reactor devices", t.Id)
|
||||
case dev := <-t.SelectedDevice:
|
||||
logging.Debug(logging.DClient, "%v editing device %v", t.Id, dev)
|
||||
// TODO
|
||||
case <-timer:
|
||||
// time to ping for status
|
||||
logging.Debug(logging.DClient, "%v pinging for updates", t.Id)
|
||||
t.App.QueueUpdateDraw(func() {
|
||||
t.UpdateDevices()
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TUI) UpdateDevices(r ...uint32) {
|
||||
// get devices for the reactor and update the tui
|
||||
// see if there is a page being displayed
|
||||
// overwrite if called as a func
|
||||
var devs map[uint32]*Device
|
||||
var err error
|
||||
if len(r) > 0 {
|
||||
// could be a reactor id or 1 for update reactors
|
||||
if r[0] != 0 {
|
||||
t.Display.DeviceList.Clear()
|
||||
} else {
|
||||
t.ReactorList.Clear()
|
||||
t.ReactorList.AddItem("Refresh","Press (r) to refresh", 114, nil)
|
||||
t.ReactorList.AddItem("Quit","Press (q) to quit",113,func() {
|
||||
t.App.Stop()
|
||||
os.Exit(0)
|
||||
})
|
||||
}
|
||||
devs, err = t.TUIClient.GetDevices(r[0])
|
||||
} else {
|
||||
devs, err = t.TUIClient.GetDevices()
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
//if id != 0 {
|
||||
// split based on type to simplify update
|
||||
reactors := make(map[uint32]*Device)
|
||||
devices := make(map[uint32]*Device)
|
||||
for id, dev := range devs {
|
||||
if dev.Type == "Reactor" {
|
||||
reactors[id] = dev
|
||||
} else {
|
||||
devices[id] = dev
|
||||
}
|
||||
}
|
||||
t.DisplayDevices(devices)
|
||||
t.DisplayReactors(reactors)
|
||||
}
|
||||
|
||||
// display struct and logic
|
||||
type Display struct {
|
||||
App *tview.Application
|
||||
Flex *tview.Flex
|
||||
ReactorList *tview.List
|
||||
DeviceList *tview.List
|
||||
SelectedReactor chan<- uint32
|
||||
SelectedDevice chan<- uint32
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func NewDisplay(rc,dc chan uint32) *Display {
|
||||
d := &Display{}
|
||||
d.App = tview.NewApplication()
|
||||
d.Flex = tview.NewFlex()
|
||||
d.DeviceList = tview.NewList().SetSelectedFocusOnly(true)
|
||||
d.ReactorList = tview.NewList()
|
||||
d.ReactorList.AddItem("Refresh","Press (r) to refresh manually", 114, nil)
|
||||
d.ReactorList.AddItem("Quit","Press (q) to quit",113,func() {
|
||||
d.App.Stop()
|
||||
os.Exit(0)
|
||||
})
|
||||
d.ReactorList.SetTitle("Reactors").SetBorder(true)
|
||||
d.ReactorList.SetSelectedFunc(d.SelectReactor)
|
||||
d.DeviceList.SetTitle("Devices").SetBorder(true)
|
||||
d.DeviceList.SetSelectedFunc(d.SelectDevice)
|
||||
d.SelectedReactor = rc
|
||||
d.SelectedDevice = dc
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *Display) Start() {
|
||||
if err := d.App.SetRoot(d.Flex, true).Run(); err != nil {
|
||||
d.App.Stop()
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Display) DisplayReactors(r map[uint32]*Device) {
|
||||
// this func takes in a list of devices to update and loops over them
|
||||
// works by padding list for entries not seen yet
|
||||
for _, reactor := range r {
|
||||
txt := fmt.Sprintf("%v %v", reactor.Id, reactor.Status)
|
||||
indx := int(reactor.Index)
|
||||
for indx + 2 >= d.ReactorList.GetItemCount() {
|
||||
// this prevent overwriting quit entry
|
||||
d.ReactorList.InsertItem(-3,txt,reactor.Data,rune(47+d.ReactorList.GetItemCount()),nil)
|
||||
}
|
||||
if indx + 2 < d.ReactorList.GetItemCount() {
|
||||
d.ReactorList.SetItemText(indx,txt,reactor.Data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Display) DisplayDevices(devs map[uint32]*Device) {
|
||||
// going to just clear every time as we reload new dev lists anyway
|
||||
// going to clear on every reactor selection to simplify
|
||||
// can probably just load from SM to save system resources on spam reloading
|
||||
for _, dev := range devs {
|
||||
logging.Debug(logging.DClient,"Displaying device %v",dev)
|
||||
txt := fmt.Sprintf("0x%x %v %v",dev.Id,dev.Status,dev.Type)
|
||||
indx := int(dev.Index)
|
||||
for indx >= d.DeviceList.GetItemCount() {
|
||||
d.DeviceList.AddItem(txt,dev.Data,rune(49+d.DeviceList.GetItemCount()), nil)
|
||||
}
|
||||
if indx < d.DeviceList.GetItemCount() {
|
||||
d.DeviceList.SetItemText(indx,txt,dev.Data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Display) SelectReactor(index int, main, data string, r rune) {
|
||||
// called when reactor in list in selected
|
||||
if main != "Quit" {
|
||||
if main == "Refresh" {
|
||||
// TODO
|
||||
} else {
|
||||
maintxt := strings.Split(main," ")
|
||||
id := maintxt[0]
|
||||
if id, err := strconv.ParseUint(id, 10, 32); err != nil {
|
||||
log.Fatal(err)
|
||||
} else {
|
||||
d.SelectedReactor <-uint32(id)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Display) SelectDevice(index int, main, data string, r rune) {
|
||||
// called when device is selected in sub menu
|
||||
maintxt := strings.Split(main," ")
|
||||
id := maintxt[0]
|
||||
id = strings.Trim(id,"0x \n")
|
||||
logging.Debug(logging.DClient,"Selected dev %v", id)
|
||||
if id, err := strconv.ParseUint(id, 16, 32); err != nil {
|
||||
logging.Debug(logging.DError, "Error parsing: %v", err)
|
||||
os.Exit(1)
|
||||
} else {
|
||||
d.SelectedDevice <-uint32(id)
|
||||
}
|
||||
}
|
@ -0,0 +1,89 @@
|
||||
package websocket
|
||||
|
||||
// creates websocket server and upgrades incoming connections
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
ws "github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
type ReactorTest struct {
|
||||
Id int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type WebSocket struct {
|
||||
// dummy struct for interface
|
||||
N string
|
||||
}
|
||||
|
||||
func New() *WebSocket {
|
||||
return &WebSocket{}
|
||||
}
|
||||
|
||||
func (s *WebSocket) Start() {
|
||||
fmt.Println("Starting ws server!")
|
||||
setupRoutes()
|
||||
http.ListenAndServe(":8080", nil)
|
||||
}
|
||||
|
||||
// default opts allow all origins
|
||||
var upgrader = ws.Upgrader{
|
||||
CheckOrigin: func(r *http.Request) bool { return true },
|
||||
}
|
||||
|
||||
// reader
|
||||
func reader(conn *ws.Conn) {
|
||||
|
||||
for {
|
||||
// read forever
|
||||
//messageType, p, err := conn.ReadMessage()
|
||||
_, p, err := conn.ReadMessage()
|
||||
|
||||
if err != nil {
|
||||
if ws.IsCloseError(err, ws.CloseNormalClosure, ws.CloseGoingAway) {
|
||||
// normally closed
|
||||
return
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
fmt.Printf("Msg: %s\n", string(p))
|
||||
}
|
||||
}
|
||||
|
||||
func serverWs(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Println(r.Host)
|
||||
|
||||
websocket, err := upgrader.Upgrade(w, r, nil)
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// try sending reactor
|
||||
t1 := &ReactorTest{Id: 1111, Name: "test1"}
|
||||
t2 := &ReactorTest{Id: 1112, Name: "test2"}
|
||||
t3 := &ReactorTest{Id: 1113, Name: "test3"}
|
||||
n := []*ReactorTest{t1, t2, t3}
|
||||
msg, err := json.Marshal(n)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// pass to connection
|
||||
if err := websocket.WriteMessage(ws.TextMessage, msg); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// pass to reader
|
||||
reader(websocket)
|
||||
}
|
||||
|
||||
func setupRoutes() {
|
||||
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, "Simple Server")
|
||||
})
|
||||
|
||||
http.HandleFunc("/ws", serverWs)
|
||||
}
|
@ -0,0 +1,2 @@
|
||||
// implemenets a reactor object with websocket methods
|
||||
package websocket
|
@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
|
||||
display_usage() {
|
||||
echo "Usage: $0 reactor_type"
|
||||
}
|
||||
|
||||
# checking for help options
|
||||
if [[ $@ == "--help" || $@ == "-h" ]]
|
||||
then
|
||||
display_usage
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# checking that arguements are not empty
|
||||
if [[ -z $1 ]]
|
||||
then
|
||||
echo "Type of reactor not specified!"
|
||||
display_usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# checking for valid reactor types
|
||||
if [[ $1 == "pi" ]]
|
||||
then
|
||||
platform="linux/arm64"
|
||||
elif [[ $1 == "bb" ]]
|
||||
then
|
||||
platform="linux/arm/v7"
|
||||
else
|
||||
echo "Reactor type $1 not supported!"
|
||||
echo "Supported reactors include: pi, bb"
|
||||
display_usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# building reactor image
|
||||
|
||||
echo "Building Reactor image for $1 platform=$platform"
|
||||
|
||||
docker buildx build --rm --platform=$platform -f Dockerfile.reactor --tag localhost:5000/reactor .
|
||||
|
||||
echo "Cleaning local images"
|
||||
|
||||
docker image remove localhost:5000/reactor
|
Loading…
Reference in New Issue