diff --git a/.env b/.env deleted file mode 100644 index ab2e438..0000000 --- a/.env +++ /dev/null @@ -1,4 +0,0 @@ -INFLUXDB_USERNAME=admin -INFLUXDB_PASSWORD=forelight -INFLUXDB_ORG=ForeLight -INFLUXDB_BUCKET=test diff --git a/build.sh b/build.sh index 3dbe3fe..fa991da 100755 --- a/build.sh +++ b/build.sh @@ -46,11 +46,13 @@ create_build() { # create build for $1 case $1 in 'rpi' ) + echo "NOT IMPL">&2 && exit 1 printf 'Building for Raspberry Pi!\n' GARCH="arm64" PLATFORM="reactor" ;; 'bb') + echo "NOT IMPL">&2 && exit 1 printf 'Building for BeagleBone!\n' GARCH="arm" GARM="GOARM=7" @@ -74,11 +76,14 @@ create_build() { esac # setting up build OUTFILE=$(printf '%s_linux_%s' "$PLATFORM" "$GARCH") - INFILE=$(printf '%s/main.go' "$PLATFORM") # building - env GOOS=linux GOARCH="$GARCH" $GARM go build -o bin/"$OUTFILE" cmd/"$INFILE" + ( cd server; env GOOS=linux GOARCH="$GARCH" $GARM go build -o "$OUTFILE") + mv server/"$OUTFILE" bin/"$OUTFILE" + echo "Finished" - if [[ "$SCP"=true ]] ; then + + # scp + if [[ -n "$SCP" ]] ; then printf 'Attempting to transfer to %s\n' "$2" if [[ "$1" == "bb" ]] ; then printf 'Copying to %s\n' "192.168.100.90" @@ -142,7 +147,6 @@ for dev in "$@"; do done printf 'Nothing else to do!\n' -# echo "Compressing binaries for distrubution" # tar -czf pireactor.tar.gz -C bin reactor_linux_arm64 # tar -czf bbreactor.tar.gz -C bin reactor_linux_arm # tar -czf server.tar.gz -C bin server_linux_amd64 diff --git a/cmd/reactor/main.go b/cmd/reactor/main.go deleted file mode 100644 index 1db35e6..0000000 --- a/cmd/reactor/main.go +++ /dev/null @@ -1,57 +0,0 @@ -package main - -import ( - "FRMS/internal/pkg/config" - "FRMS/internal/pkg/logging" - "FRMS/internal/pkg/reactor" - "fmt" - "os" - "syscall" - - "os/signal" - - "github.com/spf13/viper" -) - -type reactorCoordinator interface { - Start() -} - -func NewReactorCoordinator(config *viper.Viper, ch chan error) reactorCoordinator { - // allows interface checking as opposed to calling directly - return reactor.NewCoordinator(config, ch) -} - -func NewConfig(fname string) *viper.Viper { - return config.LoadConfig(fname) -} - -func main() { - // shutdown - gracefulShutdown := make(chan os.Signal, 1) - signal.Notify(gracefulShutdown, syscall.SIGINT, syscall.SIGTERM) - - // load any stored configs - conf := NewConfig("reactor") - - ch := make(chan error) - rlc := NewReactorCoordinator(conf, ch) // passing conf and err - go rlc.Start() - logging.Debug(logging.DStart, "Reactor Started") - - // check for errors - select { - case err := <-ch: - if err != nil { - conf.WriteConfig() // save changes - panic(err) - } - case <-gracefulShutdown: - // sigint - fmt.Printf("\nStoring config to %s\n", conf.ConfigFileUsed()) - if err := conf.WriteConfig(); err != nil { - panic(err) - } - os.Exit(0) - } -} diff --git a/Dockerfile.reactor b/docker/Dockerfile.reactor similarity index 100% rename from Dockerfile.reactor rename to docker/Dockerfile.reactor diff --git a/Dockerfile.server b/docker/Dockerfile.server similarity index 100% rename from Dockerfile.server rename to docker/Dockerfile.server diff --git a/docker-compose.yml b/docker/docker-compose.yml similarity index 100% rename from docker-compose.yml rename to docker/docker-compose.yml diff --git a/reactor_build.sh b/docker/reactor_build.sh similarity index 100% rename from reactor_build.sh rename to docker/reactor_build.sh diff --git a/internal/api/reactor.go b/internal/api/reactor.go deleted file mode 100644 index c1cb9eb..0000000 --- a/internal/api/reactor.go +++ /dev/null @@ -1,12 +0,0 @@ -package api - -import "errors" - -type ReactorCoordinator interface { - Start () -} - -type reactorCoordinator struct { - - -func StartReactor diff --git a/internal/configs/database.yaml b/internal/configs/database.yaml deleted file mode 100644 index afe6cc3..0000000 --- a/internal/configs/database.yaml +++ /dev/null @@ -1,3 +0,0 @@ -orginization: ForeLight -token: Zrtg0Q9u65HbFaK4KPWbl9y1xofJwsRHVwuWcIq3xvSOstVbjshDoRNjPiwsz31vIoP-GwDuGL8gzouEHqMuYg== -url: http://localhost:8086 diff --git a/internal/configs/db.env b/internal/configs/db.env deleted file mode 100644 index dbc19f5..0000000 --- a/internal/configs/db.env +++ /dev/null @@ -1,5 +0,0 @@ -INFLUXDB_USERNAME=admin -INFLUXDB_PASSWORD=admin -INFLUXDB_ORG=ForeLight -INFLUXDB_BUCKET=default - diff --git a/internal/configs/reactor.yaml b/internal/configs/reactor.yaml deleted file mode 100644 index 8682f29..0000000 --- a/internal/configs/reactor.yaml +++ /dev/null @@ -1,11 +0,0 @@ -devices: - address: 112 - name: DO Sensor -reactor: - heartbeat: 5 - id: 2166136261 - model: "" - name: Dummy Reactor - server: - ip: 192.168.100.2 - port: 2022 diff --git a/internal/configs/server.yaml b/internal/configs/server.yaml deleted file mode 100644 index 43928db..0000000 --- a/internal/configs/server.yaml +++ /dev/null @@ -1,26 +0,0 @@ -db: - org: ForeLight - url: http://192.168.100.2:8086 -ports_db: 2022 -ports_lis: 2022 -reactors: - "10002123": - db: - bucket: test - token: "" - name: Beaglebone Black - "2062445129": - devices: - "97": - name: DO Sensor - "99": - name: pH Sensor - "102": - name: RTD Sensor -server: - name: Rack Server - ports: - db: 8086 - lis: 2022 - reactor: 2023 - tui: 2024 diff --git a/internal/notes/archive/notes.md b/internal/notes/archive/notes.md deleted file mode 100644 index 69938dd..0000000 --- a/internal/notes/archive/notes.md +++ /dev/null @@ -1,71 +0,0 @@ -*Time for a coherent plan of attack* - -### Current Issues: -- There is a lot of redundancy between the managers/coordinators when it comes to basic checks -- the package seperation kind of makes sense, but it needs to be better fleshed out -- I need to enforce better seperation of responsibilities. Somewhat unclear when state is being kept centrally in the coordinator for no apparent reason. - -### Solution: -- Go through the packages and consolidate -- Reduce the state we have to keep centrally, push responsibility to the other packages - -### Plan of attack: -- Outline core information flow -- Examine what interfaces are nessecary to make this work -- Stop looking at the server/reactor as seperate entities - -*I need to put the whole docker thing on the back burner for now. It isn't that important when it comes to immediate goals.* - -#### 12/05 TODO -- Cleanup server side config stuff to make it coherent -- Reflect changes to reactor side startup -- Boil down interface to address core issues -- Config outline: - 1) Startup and load the existing config - 2) Overwrite any previous settings with the flags - 3) Intelligently translate config into action - 4) launch coordinator and start up existing reactor managers -- Config Structure: - - Wrap viper functions in config struct methods to be used thrtugh interfaces - - minimize the reliance on viper so we can sub in othermethods -- is it even important to launch reactor managers? Wont they just be started on connection? - - -#### 12/06 TODO -- I think I can completely remove the old config way and just pass the viper object directly. I think its not worth the hassle of trying to keep track of a million interfaces - -#### 12/07 TODO -- I concede, I will just remove flags as most people will never use them anyway and instead rely on env vars and config files. To hell with the flags. -- I am ripping out all of the TUI and status manager stuff, its convoluted and harder than just pulling info from database. - - I can eventaully rework TUI to pull from DB which is fine, there will never be that many clients anyway and a lot of them are only 1 time calls with refreshes which aren't that slow anyway. -- alright I gutted the tui and system viewer, reworking sub coord to launch at start. That way there is a listener active -- time to boil down to functionality a LOT, right now its clumsy and inefficent, there needs to be a better way to keep everything straight -- Moving the DB responsibilites to the reactor itself seems to be the best way to do it in the short term. Reduce network load and overall keep things efficient. May lead to duplicte copies of data? Not the end of the world, logging system can make sure we are maintaining entries. - -**IDEA** -Reactors log data themselves, Send periodic status updates over grpc to enable monitoring faster than the sample rate -*This could work!* -Outline: -- Reactors reach out to server on boot to get DB info - - compare this against what they have internally to ensure they are up to date and allow for migrations - - Maybe not even save the db info because we don't need to?? -- Reactors also recieve port for their specific manager - - Can be dynamically given out to allow for spread out load -- Reactors then reach out with sensor and device info periodically (5s?) which can be used for live monitoring -- RM responds with any potential updates for the device settings i.e. change pwm duty on web interface, pass on to reactor -- Allows for a live view with current reading as well as historical data at differing interval via grafana. (i.e. 5s live view with 10 min sample interval) - -Need to differentiate sensors vs devices that can be changed -- Sensors have a variable sample rate and eventually name/address -- Devices have more and widley varying parameters, could be pwm with freq/duty/onoff or ph pump with on, time or off etc. - -#### 12/09 TODO -- Alright I have a baseline! I want to start to integrate atlas type stuff so that I have some mock data/sensors to work with. I am going to try to flesh out the "atlas" interface/struct to implement some of the more basic commands. - -#### 1/11 TODO -Plan of attack for websocket stuff and things - -**Questions** -- What to do about the reactor to user comms - - Websockets? GRPC? smoke signals? -- diff --git a/internal/notes/archive/old_notes b/internal/notes/archive/old_notes deleted file mode 100644 index 99d46ce..0000000 --- a/internal/notes/archive/old_notes +++ /dev/null @@ -1,1001 +0,0 @@ -time to plan - -terms - -RLC - reactor level coordinator (Beagleboard) -RH - Reactor Handler (goroutine) -SH - sensor handler (gourtine) -Reactor Side: - -needs - - way to discover active sensors - - spin up goroutine for each sensor responsible for keeping status and logs - - way to read back and truncate logs for safe data delivery to servr - - routing requests from reactor level coordinator to relevant sensor - - internal memory sharing and channels for reactor level coordination - -thoughts - - maybe the RLC can be responsible for packaging data for coordinator response - adv: - - clears up the network - - simplifies pinging - - keeps the data aributrary - cons: - - unknown data size - - how to coordinate data structure - -Server Side: - -needs - - way to look into a config file for active reactors - - should then spin up a goroutine for each reactor - - responsible for recovery and consistent communication - - individual database entries - - API? - - use gRPC for comms between server and BB - - each reactor handler needs mechanism for pinging, recovery, and database correctness - - - -message PingRequest { - // do we even need anything in a ping request? -} - -message PingResponse { - repeated Sensor sensor = 1; -} - -message Sensor { - string type = 1; - bool status = 2; - byte data = 3; -} - -sensors := [string]q - -6/23 TODO: - -X- BBB mem fix - - 32 gig for the main but where to put the OS? - - obv in EMMC but how to init sd card? (probably dev tree :( ) -Y- Server side impl - Y - Need a struct for the RC - X - Should we store and load configs based on IDs? (efficiency of this vs performance increases i.e. bandwidth vs storage) - Y/X - Who should kill the RC and how do we know its dead? (Garbage collection to the rescue hopefully) - -X- UPDATE PRES - - Add bottle necks for each part in that section - - I2C: 128 addrs and ~90 bytes/s per device at 128 devs optimally - - BB: Hardware is upgradeable even customizable ;) - - Server: Its overkill as is, can benchmark with a rudementary go overload once its completed -- Sensor configs - - how to store sensor info efficiently and searchably lol - - who needs to know what the sensor is? (Just the SM? Even the SM?) -X- TUI - - pls this would be so sick -TODO: 6-24 - -Y - Pres stuff from yesterday + python gRPC abstraction -Y - RPI flash -- Add resiliance to coordinator process (aka error handley blech) - - -TODO 6/27 -- Time to tackle sensor managers officially - - to hell with port generation - - going to use channels but not like a jackass - - going to try generating channels interface side but via implicit types to avoid the interface stff - - should set up a structure where I can use arbiturary types on the backend and fulfill methods to get/infer information on the frontend -- rewrite I2C interface to employ same method, should allow for this - 1) generate type - 2) send it to worker - 3) receive back (original? copy?) - 4) use interface methods to get required values -- should simplify all internal communication and potentially suggests api for implementation - -TODO 6/28 -- It works... kind of - - I learned a lot about - "the smaller the interface, the more useful it is" --Y time to tackle the server side error handleing aka reconnect - - custom backoff? Max TO? Sleep Period? - 5ms -> 10ms -> 25ms -> 50ms -> 100ms -> 250ms -> 500ms -> 1s -> 1s --Y Actual logic? - 1) Create listener - 2) create reactor managers for each reactor - a) whose job is it to kill on disconnect? Should we kill? - b) the RM will have the actual ping mechanism with coordinator aggregating in eventual TUI - 3) reactivated reactors should connect to the same RM to resume connections with less downtime. Memory use be damned (ALLOCATED?? ID VS IP) - 4) need a way to purge manually disconnected reactors - a) also should check ids which are deterministic and map actual hardware - 5) continue forever (for select??) --Y RM Logic? - 1) on spawn ping reactor to get initial sensor status - 2) enter ping loop to get status and keep heartbeat alive - 3) no action on sensor going down (unless method but duh) - 4) on reactor going down - 1) save to config? - 2) "sleep mode" - i.e. stop pinging and wasting resources doing stuff - 3) wait for coordinator to reactivated - 5) reactivation: call start and resume pinging -- RM Struct? - - needs to know - - ip:port of reactor - - id of reactor - - mapping of sensors to status - - last seen for reactor (and sensor?) - - needs to be able to - - ping reactor for status - - get status - - store and update status reliabily - - stop pinging on down detection - - detection outages - - respond to coordinator requests? (rpc or method most likely?) - - relies on - - SM for sensor status - - implements - - start/restart mechanism for coordinator - - ping response for coordinator -- Coordinator Struct? - - needs to know - - mapping of ids of connected reactors to RM - - its own ip:port to serve listener on - - internal mapping of system? (any efficiency benifiets here vs mem usage?) - - needs to be able to - - setup new RMs on incoming connections - - call existing RMs on incoming connections - - ping for reactor status from RMs - - store status for TUI? - - relies on - - RM for reactor status - - implements - - application logic - -That went surprisingly well... Same method for tui - -process outline: -TUI - 1) display TUI outline and connect to coordinator - 2) let coordinator know where to send reactor/sensor changes - 3) enter loop of checking for changes and drawing - 4) on quit should gracefully exit - -Coordinator - 1) on TUI connection start routine - 2) collect? sensor/reactor info and send init to TUI - 3) upon changes? send to TUI - 4) exit when TUI connection closes - - -- TUI struct - - needs to know - - all the tui stuff (duh) - - reactor/sensor status - - needs to be able to - - create (several instances of) a TUI - - receive and display up to date system info - - delete reactors/sensors - - be efficient - - i know this is broad but bear with me - - relies on - - coordinator struct for system info - - coordinator struct to fulfil delete request - - implements - - tui - - user interface or management -- Coordinator Struct - - needs to know - - all the sensor and reactor states - - needs to be able to - - know what the TUI knows/doesnt know - - efficiently notify tui of change in system - - notify sensors or reactors of removal - - relies on - - rm/sm to implement reactor/sensor removal - - rm for reactor/sensor status - - implements - - sender of system status for TUI - -TODO 6/29 -- refactoring - - creating general listener and then a coordinator for reactor/tui that uses listener to field incoming requests - - change update loops for status to only send new data or empty messages for pings -- tui - - creating coordinator/manager for TUI clients - - create update loop for tui clients - - grpc to tui client with updates and hb - - drawfunc loops over change buffer - - on disconnect should display static terminal with offline warning - -- Listener Struct - - needs to know - - IP - - Port - - needs to be able to - - respond to incoming gRPC - - create corrisponding manager // taken care of in the actual handler - - relies on - - grpc for the endpoint stuff - - manager impl - - implements - - listener for main func - - manager generator essentially - -coordinator should be seperate *** -- new coordinator struct - - listener sends new connections to coordinator who appends to internal registery and reacts - - needs to know - - incoming: - - ip - - port - - client type? - - needs to be able to - - wait for incoming connections - - create a new manager for the client - - update internal directory - - remove entries ?? (tui -> reactor) - - implements - - manager creation - - connection handling - - client hb - - relies on - - manager impl - - listener call? - - -alright complete redesign -server acts as singular listener -routes all requests to a central coordiantor -this calls cooresponding client coordinator which then initiates managers etc - -now redesinging sensor info - -new fmt -1) have a seperate long running coordinator routine responsible for a "changed" list of sensors -2) on reactor status request: - a) send the changed list - b) if the send was acknowledged purge the change list - * both must be atomic - -new rlc struct - -- needs to know - - sensors connected - - changes in sensors - - how to establish connection to central server -- needs to be able to - - track changes in its system layout - - atomically relay these to the cc -- depends on - - I2C sensor info -- implements - - reactor sensor change tracking - -new general coord - -What does it need to do? -- Needs to respond to incoming clients from listener - - what does this mean? - - needs to link a client to a manager - - this is the whole high level idea - - can everything else be abstracted away? Why not? - - how to link? Channel? Shared Memory? - - channel is probably the best choice here - - structure: - managers - [uint32] : chan<- bool - 10292133 : chan<- bool - - how to use? - - when a client connects we search to see if a channel exits - - if it does we just send "true" down the channel to signal that the client connected - - if we dont we create a channel and a manager and start the manager - - we then send true down the newly created channel - -Do we ever close the channel? - - shouldn't that would mean the program is done accepting connections (could be a useful behavior in the future) - - do we ever send false? - - no, not sure what that would signal. - - coordinator is specifically only designed to create/notify managers of a client connection - -formally, New Coordinator: -- needs to know - - client type -- needs to be able to - - either notify or create and notify a manager on client connection - - handle concurrency -- relies on - - listener to send incoming clients - - manager to implement actual manager -- implements - - manager activation and memory - -TODO 6/30 -creating efficient system mapping and data logging/structure info - -idea # 1.5 -use json maybe? - -how? -- use json to build in the structure of our system via heirarchy -ex) -[ - { - "reactor": uint32, - "status": bool, - "connected devices": [ - "device" : { - "addr": "0x10" - "type": "ph sensor", - "status": uint32, - "data": [{"ph7.3"}, // json marshelling of the specific struct - }, - "device" : { - "addr": "0x11" - "type": "temp sensor" - status: uint32 - "data": "t24.5C" - } - ] - } -] - -use go structs to map components and embed them -can send - - -need to just spitball here - -what am I trying to do at the end of the day? -I am taking sensor measurements -and potentially tweaking control paramters -lets treat each one sperately at firs - -sensor measurements - -each particular sensor manager will only be responsible for getting data from its sensor -what is the scope of responsibilities? -the sensor manager should log this data locally using a method? json? - -how do we aggregate this info? - -what if we structure our reactor as a mirror of our coordiantor - -rlc job would be to -- establish connection with central server -- wait for connections from devices -- create reactor managers for these devices - -this could be really nice - -rm (general) job: -- establish connection with device via I2C (client via wifi) -- shut down when device connection drops -- start when device connects again - -adding data responsiblities - -tuim: - needs to know of a struct of system - [reactorid][deviceaddress][device] - thus needs to know: - - reactor id - - connected device addresses - - device info: can just be a string! - - made up of status and relevant data - what do we rely on - - accurate device info string - - can have someone else make/maintain struct and proxy updates -tuic: - -needs to maintain an atomic struct of system - as above - - still only needs to know - - reactor id - - connected device address maped to device info [string] - relies on - - accurate status updates - - accurate device info - -RC ** could have this guy be responsible for change parsing - - respond to updated status from RM and send to TUI - - basically a focus point - -RM - - needs to call corret handlers for data coming in from rlc - - can we just use grpc handlers that get embedded in the manager at start? - - handlers can also notify when data doesnt match previous entry - - this would prompt the data to be sent to the rc where it can be forwardd - -RLC - - needs to have internal reactor state - - - - -this gives us a complete "skeleton" of service where we can connect/reconnect clients with appropriate managers -there isnt any functionality yet to actually log data - -how do we leverage our current connections and add functionality to managers and coordinators? - -methods and channels - -each manager is responsible for pinging the associate device {reactor, device, tui} -either sending device info in tui case -or recieving it in reactor/device case - -this is why wrapping the gen structures is nessecary. Two different operations - -device manager: -could recieve 0-100000000 values -could be any type -could be any number per entry -common struct? -"timestamp":"data" -data could be json struct - - makes it easy to parse at some point - - force sensor driver to write a go struct for the data - - Parse___Data(*json.Unmarshalled) - - -complete i2c monitor redesign - -i2c interface needs to do - data stuff: - - locking structure to serialize commands/reads - - removal function to manually parse dead devices - - reconnecting should reinit device manager and stuff - init stuff: - - keep track of devices seen and connected - - notify rlc of devices that connect/reconnect -build init stuff into a struct that can be embedded? -I2CCoordinator - - created when rlc is created - - tie rlc to i2ccoord via channels - - new devices channel for devices that go offline->online - - send the i2cdevice struct to embed in rm - - can call interface funcs on the embedded interface - - - -Eureka part 2? -we are writing all of the software libraries which means we should (with some basic cleansing) be able to just send direct database queries -this means some things which could be pros or cons - -- each sensor realistically will have its own table for each reactor. -- we can group entries by reactor and look across time stamps (tidy?) -- we can log sql entries as text based backups -- we can use basic string struct with time stamps -- each sensor library will use a common struct and probably just use string fmting -- there are some efficiency benfiets if we used custom gRPC calls for each db entry - - but we can just leverage a biolerplate call with some extra overhead? -- we still need a way of representing state of components - - reactor is easy and could be kept server side - - sensor needs to be transmitted across rlc - - should default to down if the reactor goes offline (unknown?) - -direct query approach -pros - - easy to implement - - easy to use interfaces for common libs (compiling efficiency) - - easy to add sensors (use common libs and just make custom string in wrapper) - - can develop logging and db parts as manager funcs -cons - - need unique daemon to parse data on server for state struct - - trusting each sensor to maintain itself - - still need a way of translating state - -state problem - -it just should be an enumeration -its efficeint (could be as little as 4 bits but probably 1 byte) as opposed to a string ( len(s) * byte ex "ONLINE" = 6) - - is this all worth ~1-10? bytes of savings per dev? - - 100 reactors @ ~45 sensors = 46*100 = ~4.5 kb of state or ~ 36kb if we use strings - - so maybe? -more important than memory are network calls -need to update on tui: - - state changes (hopefully less frequent) - - current value (~5 seconds - ~30 minutes) -store both client and server side - - only store actively view data client side to prevent leaks - - store full struct but only serve as request response to prevent extra copies - -system struct - - mapping of reactor ids to "Reactor" structs - - reactor is mapping of addr to sensor structs - - sensor struct is basic info - - device type (enum vs string) - - device status (enum vs string) - - device most recent value (int? vs string?) - - if offline last seen time - -notes on struct - - should ideally have locks at reactor and sensor level - - use func to return sensor list via lock on reactor - - use func to update sensor list via lock on reactor - - use returned list to parse and request value from each sensor - - use goroutines and channels for efficient operation - - build response via returned structs - - respond to client - -note on tui manager - - ideally should keep simplified current client struct to spawn copies of the update daemons for each sensor - - each daemon should be EXTREMELY light weight and just feed new data values to the manager - - tuimanager will be responsible for efficently buffering for tui client requests - - tui pings should be frequent and response should be any data to update - - client side we should be able to essentialy overwrite any entries on our response - - simplifies interface - -data aggregation outline -Starting from sensor -1) specific sensor manager initiates a read of the embedded i2c dev -2) on success read gets logged with the time to the internal txt log (json) -RLC loop: - 3) rlc has long running method with sub routines reading each log and adding pending entries to the buffer - - buffer is bounded and routines block when it fills (use to limit rpc reply length) - 4) on ping buffer is parsed into rpc reply - - send buffered log ids to cleanup routine but dont delete from log yet - 5) next req has transaction ids of previous data that have been acked - 6) send ids to cleanup process - 7) respond with new buffer repeat -RM side: -received data from rlc -1) send reply to data parsing goroutine -parser loop: - 1) start a completion listener - 2) read each device in reply - 3) start goroutine of db daemon for each dev with completion chan - 4) once reply is empty can end -db daemon loop: - 1) loop over device data entries - 2) initiate db connection - 3) parse through each data entry and send to db - 4) if it was succesfull send the transaction id to the completion channel -monitoring rpc loop: - 1) listen for completed transaction entries - 2) append entries to ack - 3) send to rm on ping timer - -Data is now in database for all intents and purposes - - - -process ensures that the data is collected -now the data is on the server -6) server sends grpc reply results to a parsing gorotuine -7) the parser loops over reply and spawns db daemons to enter info - - -should we even aggregate data? why would we not just write a db client as part of the rlc and let the sensor managers themselves log - -need to focus: - -2 major things going on - -rlc can do data stuff on the reactor itself and just use the db client - - relies on exposed db endpoint but can just handle auth stuff - - can log locally - -rlc also responds to status requests - - queries sensors for status - - adds to and sends reply - - recieves these pings <= 5 seconds apart - - should have down detection to kill db actions - - optionally include a "data" string of the most recent reading - -going to focus on status -want system to - init reactors - poll for status - respond with sensor info - view and manage on tui - -how? - -all structs only in status context - -rlc struct -- knows - - connected devs and managers -- is able to - - poll managers for state info -- relies on - - managers for accurate and fast data -- implements data aggregation for rm - -dm struct -- knows - - underlying i2c dev interface - - basic device info -- is able to - - respond to rlc status requests -- relies on - - rlc to ask for status -- implements - - status response - - -alright holy shit -i have rewritten the same five functions so many times - -time to take inventory - -right now the system has -a central coordinator -that can spawn managers -that can manage clients -and a reactor coordinator -that can spawn device managers -that can manage devices - -I have a rudimentary ping system that queries the reactors for their status - -where to go next - -I want to build in control before I worry about actual data -this means tui and removal of managers - -how to start? - -need to create a central struct that serves as a representation of the system - -map[reactorid] -> [dev addr] device info -reactor is online by nature of responding to a ping -device info is what we query for - -tui manager will request data from the server struct - -server struct should bridge between reactor coordiantor and terminal coordinator -needs to be concurrent -needs to synchronize incoming sensor data - -instead of a big stupid struct - - just have rm maintain struct for each reactor - - connect tui requests to rm - -pros - - prevent redundancies in data - - limit operations after the ping - - serve copies? - -what will this look like - -TODO 7/5 -time to flesh out the tui and move into actual sensor/db libraries - -tuitime: -tui client (user side) -*will most likely be divided -needs to know: -- ip address of host -- eventually - - username/pass auth -needs to be able to -- request system info via server -- establish connection to central server -implements -- basic system management functionality -relies on - -- server for up to date reactor and sensor data - - -TUI TIME -coordinator/listner/reactor seem to be stable - - will have to add exiting for tui manager - -need to create the actual TUI at this point -seperate into two logical aspects - - The client that gets the system info from the server - - the tui which actually displays this info into the tui - -how to share the data between the client and tui? -- structs - - pros - - very efficient - - cons - - risky -- chan - - pros - - synchronize stuff - - cons - - hard to set up and finnicky -- methods - - pros - - syncronized - - easy to implement - - cons - - need to store/embed struct - -systemviewer.go -TODO 7/7 -time to make the synchronous system view -have a few ideas for the approach - a) make RM responsible for maintaining their own "branch" and store pointers to their branch - b) store the branch centrally and have the RM post changes - -I like the idea of a central system viewer and you post updates to worker clients - a) posting updates vs calling methods - blocking vs non blocking - b) - -lets layout expectations -RM should keep a current view of itself and whether it is online - - this should persist despite reactor outage - - in case of outage every sensor is UKNOWN - - optional last seen time for sensors/reactor - - exit should save to memory? persist for a given id? - - have a removal mechanism - - use case is to purge dead mechanism aka no black list - - each rm keeps most recent sensor view or reactor view in mem and can accept incoming system viewer connections -system viewer clients - - spawn 1 per tui client - - can do 2 things to any rm - a) call it for a complete copy which needs to be fast (gRPC?) - b) latch onto update chan to build its own copy for when the client requests the new devices - - follow a buffer -> client -> delete life cycle -system viewer "server" - a) spawn new system veiwer clients and direct them to the proper reactors - -aside: can we use this to replace coordinator system? just make a system directory - -what are we already storing? - in coordinator we have a mapping of ids to managers for reactor and - what if we remap system viewer and coordiantor to system coordinator which does both - seems redudent to keep multiple system copies - - any speed concerns? Client connections spawning new managers? - - we must lock map - - channels is probably the answer here, just desync the whole process from itself - - listener gets incoming clients - - clients get parsed into a "Client" object and sent to system coodiantor - - types have slight diffences but essentially: - 1) see if there is a stored manager or if we need to create one - 1.5) create manager if it doesnt exits - 2) start the manager with the client details - 3) create 2 chans (<-reactor tui<-) for reactor & device info - now the divergence - Reactor Manager: - 1) Connect to reactor and send initial ping - - if reactor ever doesnt respond (maybe backoff for unavailable) just kill manager and send offline to reactor status chan - 2) As device info comes in start maintaining a system struct - this must persist exits and starts - 3) For the sensor info coming in, send a copy on the chan to the void for all youre concerned - 4) Respond to requests for entire system copies as clients initially connect - - probably just a method - 5) only need to send reactor status on changes aka starts and exits - TUI Manager: - 1) Buffer the currently known reactor status via chan hook - 2) Wait (Timeout) for connection for tui client - - probably sub 5 seconds before we exit - - need to keep track via a last seen - 3) respond with the buffered reactor info - 4) on request for specific info - - request system viewer for the reactor info which will return the reactors sensor chan - - spawn goroutine to hook onto this chan and maintain a "local" copy of the new data for the client - - can probably make this more efficient but f it - - biggest buffer gets is # devs * size dev struct (bytes) - - drop anything but most recent - 5) as client requests for info we either reply with the buffer from the hook or create a new buffer - 6) translates into pages client side which are more efficent - 7) could even look at batching these eventually - 8) should probably kill the listeners (atleas the chan) when the tui client - a) disconnects - b) goes idle - - System Coordinator must then - 1) have a method to handle client connections that is concurrent safe - 2) start client managers on connection - 3) keep a directory of all the channels for clients for device and reactor info - 4) link tui client managers to rm properly -no need for a name change coordinator will have - system viewing functions in systemview.go - -alright check in time - -now have system viewer -which embeds a straight up info stream -and a map from reactor ids -> Info streams - -InfoStreams are structs with methods for adding listeners and senders -both return monitor objects which you can either - Send(device info) -or GetBuffer() and ClearBuffer([]da) - -this is all the foundation -just need to wrap into a thing the coordinator can useor maybe even replace coordinator - -systemviewer has 4 methods -every tui manager will embed a reactor listener -every reactor manager will embed a reactor sender -when a tui client selects a reactor we will embed the device listener -every reactor will be its own device sender - -the only thing that happens after init is tui may add device listeners - - - -should unify so that its only 1 ping request or a special request when we go to a page for the first time - -ex devinfo - -{ -Id uint32 // either id or addr -Type string //['reactor','device'] set by rlc -Status string //set by sm -Data string //optional -Index //added by monitor for consistent location in tui -Transaction ID //added by server and discarded in reply - -I may just scrap this shit in favor of a synced view -overhead is probably minimal anyway -redo listener bullshit to just route to the cs -tui clients will just get a fresh copy of the reactor info and device infofor every request - - -ADDING STANDARDIZED LOGGING -adding a log package for all packages -logs to a file named after start time -going to be of format - -TIME PROC CODE ID MSG -so -00013 STRT COR 912939123 Central coordinator started -00033 STRT -CODES -CCO - Central Coordinator -RCO - Reactor Coordinator -TCO - TUI Coordinator -RMA - Reactor Manager -TMA - TUI Manager -RLC - Reactor Level Coordinator -DMA - Device Manager -TUI - TUI Client - -every debug message will be of format -topic, devcode: id - -alright time to get this damn tui updating working - -general implementation details - -- libary card method - - only allow clients to checkout 1 device stream at a time (besides reactor stream obviously) - - close stream when call to open new one - - should we even store the reactor devices locally? - - or we could request when its selected and then let stream take care of itself -- simplifies a few things - - same setup for reactors/devices - - just call/create device listeners dynamically and reactor listeners at the start - - only check out reactor stream and 1 device stream at a time - - request for devices gets you the current state and adds your listener to the echo chain so that you recieve any updates - - need to ensure sends can complete even if the manager is dead - - close the channel? - - - -docker time -Need to refactor code so its eaisier to run in both envs -Refactoring server code now - - bind all gRPC services to the same IP:port to make it efficent - - funnel all rpcs through the one external port - - could even use nginx to route from default :80 -is there ever a situation where I would need to run this not on docker? - - can i just hardcode for docker and then rely on nginx for routing etc? - - - -ALRIGHT TIME TO LOCK TF IN -#TODO 8/1 -Time to set up proper config loading and storing - -Doing it all through interfaces and tagged structs - -On start up -Server needs to load up its own config - - take action on that config -wait for client connections - - load client config and reply with associated data -on client disconnect - - store any updates and return to idle state -restructing away from "listener" and coordiantor and stuff -going to just have central server -with an embedded listener -and database and shit -so -SERVER will call NewServer which will take care of subsequents - -# TODO 8/5 -Config storing time -going to probably have to add admin database client(aka server client which makes 0 sense) -can abstract all operations through interface and plugable package - -I just reliazed I coupled my mechanism with influxs token thing because it wokrs well but I am going to have to completely rebuild that if its properietary or we transition to a new DB - - hopefully null point due to the nature of OSS and time series - -CONFIG (and by extension DB) - -config.UpdateReactor(id, key, value) -config.UpdateSelf(key, value) - -should just be a basic way to update a given entry for an reactor -seperating server and reactor methods should lead to less finicky behaviour -should also probably wrap these in a seperate struct - - methods? - - who can call? - - who stores ptr? - - do we even need a ptr? can configs be stored and loaded statically or is that a bitch on the FS - -does it make more sensor to load different configs for each entity or justhave one monolithic config (probably load for each one and then let it update itself) - -going to have the init script set up the - -Welcome back - -#TODO 8/31 - -Goals: - - Add a config parser to load/store device manager struct - - start figuring out what a generic config package looks like - - figure out how to load different sensor functions dynamically - -Basic reactor workflow overview -1) On boot, scan I2C bus to find active devices -2) For every device shown as active, spawn a sensor manager from the assocaited config -3) on disconnect, shut the dm down and save current settings to config - -implementation time -#TODO 9/4 -Might be dying nbd - - i think its just freshman flu but could be clot who knows - -on to code -Need to have a functional BETA by 9/15 at the latest - pref 9/8 with a week to test - -What do we NEED out of FRMS v0.1.0 (pre-alpha - as an aside v1.#.#-apha then v.1.#.#-beta for versions) - -Needs: - - Connect and disconnect at will - - set sample and log rate - - set name - - live view data - - export expiriement data to CSV - -Notes: - - all sensors will be atlas - - can leverage for a unified library - - can use grafana for the UI - - can bash script to eport data for a given time range into resspective sheet aka sheet of DO measurements etc. - - can setuo the format pretty easily and probably just print F the query worst case I mean its 3 data points at probabnly 1 sample per minute at worst - -Architecture planning phase - -What would each need require software wise - -Need: Connect and disconnect at will - - directory of which device manager to load - - a way to store and load settings - - a way to monitor the i2c lines for new devices - - -Config interface - At a core -Load() - - load keys, config and env - - prompt for any updates - - store said updates - - store any future requests - -functions both server and reactor will use: -- load config -- load keys - - dif keys -- load env - - dif env - -order of ops - load config - load keys and env to overwrite config - store updates - have config with methods to get/set values - - - - - - diff --git a/internal/notes/archive/structure b/internal/notes/archive/structure deleted file mode 100644 index 7cc3e54..0000000 --- a/internal/notes/archive/structure +++ /dev/null @@ -1,103 +0,0 @@ -this will be a living doc - -starting with for connection management: - -listener: -- knows - - ip:port to listen to - - clients connect with{ip, port, clientType, model, id} -- is able to - - create coordinators for each clientType - - send new clients to coordiantor handlers via chan -- depends on - - clients sending known data (gRPC) - - NewCoordinator func -- implements - * shouldnt really have any callable methods - -coordinator: (General) -- knows - - what client ids have already connected - - which managers correlate to which clients -- is able to - - create managers for new clients - - start managers for clients -- depends on - - listener for client structure - - manager for NewManager() function -- implements - - client connection handling - - general manager call - -manager (general): -- knows - - client info - - timeout - - if it is active -- is able to - - establish a connection with a client - - stop when the connection drops -- depends on - - coordinator for start calls -- implements - - client connection creation - - client info storage - -manager (reactor): -* embedds the gm -- knows - - devices attached to the reactor - - underlying client manager -- is able to - - maintain device struct - - no pings only control logic (i.e remove device, restart etc) -- depends on - - gm for client conn - - coordiantor for starts -- implements - - reactor structure tracking - -manager (tui): -* embedds the gm -- knows - - structure of the system (ie reactors:devices) - - underlying client manager -- is able to - - keep track of system changes - - updates client from buffer or concurrent grpc? -- depends on - - RM to get system info - - coordinator for starts -- implements - - system tracking - -reactor level coordinator: (right away this feels so fat compared) -- knows - - current connected devices - - server to init to - - its own hwinfo to establish itself as a client -- is able to: - - reach out to server on boot - - transmit client details - - keep reactor devices current -- depends on - - I2C package to notify of connected devices - - hardware info to get its client info - - server to handle connection - - sm for new manager -- implements - - reactor status handler for updates to other coords/managers - -device itself: -- knows - - probe status ( maybe) - - data in buffer -- is able to - - clear buffer on request - - respond to commands -- implements - - data collection - - control execution -- depends on - - nothing its a driver - - maybe the control logic?? diff --git a/internal/notes/index.md b/internal/notes/index.md deleted file mode 100644 index 24f3e20..0000000 --- a/internal/notes/index.md +++ /dev/null @@ -1,4 +0,0 @@ -## Weekly Planning - -[Jan 16-20](weekly/Jan-16-20.md) -[Jan 23-27](weekly/Jan-23-27.md) diff --git a/internal/notes/weekly/Jan-16-20.md b/internal/notes/weekly/Jan-16-20.md deleted file mode 100644 index ceb8d75..0000000 --- a/internal/notes/weekly/Jan-16-20.md +++ /dev/null @@ -1,149 +0,0 @@ -# Jan 18 -### Planning -**Monitoring Changes** - -I want to refactor the reactor stuff to be less method oriented as far as data collection. For example, the monitoring stuff is all about events that happen pretty infrequently. It makes sense to then use a channel on the device side to just feed relevant status updates back to the reactor. I think that this makes the most sense because this will synchronize updates and leverage the rarity of events to cut down on errant calls. -- pros - - less repitive method calls needed - - less device locking - - localize the information to different packages -- cons - - extra memory for channels and duplicate storage info - - could just remove status from dm? - -**New Idea** - -I can leverage wireguard to do server-> reactor connections even beyond the testing phase - -Changes: -1) move device coordinator into device package -2) expose relevant methods to reactor interface -3) clarify individual package responsibilities -4) add stuff server side to create/destroy grpc connections as the information is rendered client side - - this might be scuffed but oh well - -### Package Separation -**Reactor** -- coordinator - - creates initial link to the server - - creates database client - - creates and starts a device coordinator - -**Device** -- coordinator - - searches i2c bus for connected devices - - spins up managers to control the connected devices - - relays information back up to the reactor coordinator -- manager - - control over singular device - - has the core information that will be needed across any type of device (name, status, address etc) -- sub-manager - - fine grained struct with methods specific to the device - -**Server** - -Going to ignore for now because I am lazy -- central coordinator starts up database connection config etc -- reactor coordinator - -### TODO -**Monitoring Changes** -- [] change methods to channel based - - [] internal methods with spins - - [] pass structs with interface for methods - - -# Jan 19 - -### Orginizational changes - -What structure makes the most sense for the devices? - -#### Top-Down - -Ex) DeviceManager -> SensorManager -> DOManager -> Manager - -**Pros** -- probably a less complex interface layout? - - -**Cons** -- annoying to keep/pass state - - i.e. atlas needs the address to pass to the I2C but right now the devicemanager is storing that. Have to pass down via start which doesn't make a ton of sense - -#### Bottom-Up - -Ex) DOManager -> SensorManager -> DeviceManager -> Manager - -**Pros** -- top level manager has access to common info - - i.e. address, name etc -- can easily define common functions and use this to pass info upwards -- still don't have to import device manager as interfaces can handle getting/setting stuff - -**Cons** -- might get ugly with interfaces - - there might have to be a bunch of interfaces in the device package to handle nesting the manager itself - - this might not be true though as the device coordinator dictates what interfaces are needed, and already it doesn't really use any of the dm functionality - -**What would it look like?** -Device coordinator would call NewDeviceManager, - -### Outline of functionality - -Hopefully by going over what is expected of each manager, it will become clear what the layout should look like - -**Device Coordinator** -- responsibilities - - starting/stopping device managers as devices connect/disconnect - - maintaining a map of the devices and their status - - updating the server with this information at set intervals - - pass the I2C client to the device managers - -**Device Manager** -- responsibilities - - struct to store information that is used by any type of device - - i.e. Address, Name, Config(prefix and file)? Status? - - probably don't need status as this can be determined via IsActive() - - config might be helpful to have, could pass up to managers via a Get function - - start/stop as requested by the device coordinator -- serves - - broad functions such as SetName(), GetName(), etc. - -**Sensor/Controller Manager** -- responsibilities - - provide corresponding broad struct that will be consistent across types of each - - i.e. sensors all have sample rate - - provide methods all will use such as TakeReading() -- serves - - more specific functions such as GetSampleRate(), Set... - -**Specific Managers** -- responsibilities - - provides specific functions that a certain sensor/controller might need - - i.e. pwm will need setFreq, DO might need a conversion etc. - - broadly will need access to I2C for comms -- serves - - Hyper Specific functions such as SetFreq() etc. - -### Trying Bottom-Up - -Right now, I am using some hybrid format which doesn't really make any sense. It goes - -DeviceManager -> DOManager -> SensorManager -> Manager - -This just feels *wrong* - -**Changes** -- Going to use the specifc -> broad becaus it seems intiuitive - - the most common methods/information is at the base and propogates up through the more specific managers - - should make it simplier to define -- maybe go back to the unified package? Not quite clear what the purpose of seperate is beyond convience - - although... the idea of the device manager as a reusable peice makes enough sense to potentially keep it as a seperate package - - I'll stick with the seperate for now and keep it unless it becomes unworkable - -### I2C Changes -The i2c bus is locked at the device level, so I am going to rewrite the bs to just use a function with no struct and remove the whole passing of structs garbage - -#### For tomorrow -What I have now works, but it is still pretty backwards. Need further improvements and need to start thinking about what a websocket might look like in the current model diff --git a/internal/notes/weekly/Jan-23-27.md b/internal/notes/weekly/Jan-23-27.md deleted file mode 100644 index 76afd80..0000000 --- a/internal/notes/weekly/Jan-23-27.md +++ /dev/null @@ -1,49 +0,0 @@ -# Jan 23 - -### Connecting Clients to reactors - -**Client -> Server -> Reactor** - -I can take advantage of the private network created via wireguard to allow the server to connected back to individual reactors and then intiate gRPC calls. - -**Pros** -- This *VASTLY* simplifies the implementation as I can now connect back to the reactors themselves - - from there, I can implement various functions I will need server side - - i.e. GetName() SetName() etc. - -**Cons** -- I will eventually need to build the wiregaurd implementation - - although because its all local network for now, I can plug and play down the road - -### TODO -- refactor packages to provide a cleaner interface via simple commands as opposed to the convoluted passing structure that was present with the old I2C library -- start working on the interface between the websocket and the reactor - - react side this is the actual content that will be rendered by the client - - server side this will be a connection to a reactor with the gRPC calls -- moving monitoring functionality to the reactor - - refactoring to use streaming functionality to avoid needing to re initiate request - - have server connect each reactor manager to the rlc - - have the reactor manager ping for server info - - handle disconnects via exit - - sets up cleaner device handling via multiplexing - -# Jan 24 - -### Proto changes - -It's time to refactor the current protobuf stuff to make more sense from the servers perspective. In this sense, I am going to have the reactor provide connection details to the server on connect, and then the server can connect/disconnect at will. - -### Outline -- Update the server to connect to the reactor itself for the information -- Decide what information is important enough to send to the server consistently, vs what only is needed upon "further inspection" - - need reactor information on connect - - need basic device information such as address and status - - when selected - - need specific device breakouts with advanced functions per device - - this can be multiplexed over the same gRPC connection and can be fulfilled by the device coordinator - - dc will catch all incoming requests and forward to the correct DM based on address - -### TODO -- reverse monitoring stuff - - make it so reactor manager has a timeout/ recognizes disconnects gracefully - - convert monitoring to a stream as opposed to consistent calls diff --git a/internal/pkg/config/load.go b/internal/pkg/config/load.go deleted file mode 100644 index 22568ac..0000000 --- a/internal/pkg/config/load.go +++ /dev/null @@ -1,46 +0,0 @@ -package config - -/* -Load.go contains methods to load values from config, flags and env. -*/ - -import ( - "FRMS/internal/pkg/logging" - "fmt" - - "github.com/spf13/viper" -) - -func LoadConfig(fname string) *viper.Viper { - // Demarshalls a given filename into the struct - // returns nil if successful - config := viper.New() - configPath := "$HOME/FRMS/internal/configs" - logging.Debug(logging.DStart, "Loading config for %s", fname) - config.SetConfigName(fname) - config.SetConfigType("yaml") - //viper.AddConfigPath("/etc/frms/config") - config.AddConfigPath(configPath) - // struct and env vars - - // Sets env vars - config.AutomaticEnv() - - // reading - if err := config.ReadInConfig(); err != nil { - if _, ok := err.(viper.ConfigFileNotFoundError); ok { - // no config file found - fmt.Printf("No config file found! creating empty one at %s.\n", configPath) - if err = config.WriteConfigAs(configPath); err != nil { - panic(err) - } - } else { - panic(err) - } - } - - logging.Debug(logging.DStart, "CON Loaded configs from %v", config.ConfigFileUsed()) - - // returning config object - return config -} diff --git a/internal/pkg/logging/logging.go b/internal/pkg/logging/logging.go deleted file mode 100644 index c4c0bbc..0000000 --- a/internal/pkg/logging/logging.go +++ /dev/null @@ -1,84 +0,0 @@ -package logging - -import ( - "log" - "fmt" - "os" - "errors" - "time" - "strconv" -) - -func getLogType() string { - if t, ok := os.LookupEnv("LOGTYPE"); ok { - return t - } - return "DEFAULT" -} - -func getVerbosity() int { - v := os.Getenv("VERBOSE") - level := 0 - if v != "" { - var err error - level, err = strconv.Atoi(v) - if err != nil { - log.Fatalf("Invalid Verbosity %v", v) - } - } - return level -} - -type logTopic string -const ( - // define 4 character topic abbreviations for coloring - DError logTopic = "ERRO" - DClient logTopic = "CLNT" - DStart logTopic = "STRT" - DExit logTopic = "EXIT" - DPing logTopic = "PING" - DScan logTopic = "SCAN" - DSpawn logTopic = "SPWN" -) -// the list can grow - -var debugStart time.Time -var debugVerbosity int - -func init() { - - debugVerbosity = getVerbosity() - debugStart = time.Now() - if debugVerbosity > 0 { - path := "log/" - if _, err := os.Stat(path); errors.Is(err, os.ErrNotExist) { - err := os.Mkdir(path, os.ModePerm) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - } - logtype := getLogType() // start with "REACTOR" etc - timestamp := time.Now().Format("Mon-15:04:05") - filename := fmt.Sprintf("%s-%s.log", logtype, timestamp) - f, err := os.OpenFile(path+filename, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) - if err != nil { - log.Fatal(err) - } - log.SetOutput(f) - } - - log.SetFlags(log.Flags() &^ (log.Ldate | log.Ltime)) // turns off date and time so we can set manually -} - -// example call Debug(dClient, "R%d connecting to client %d", r.Id, c.Id) -func Debug(topic logTopic, format string, a ...interface{}) { - if debugVerbosity >= 1 { - time := time.Since(debugStart).Microseconds() - time /= 100 - prefix := fmt.Sprintf("%06d %v ", time, string(topic)) - format = prefix + format - log.Printf(format, a...) - } -} - diff --git a/internal/pkg/websocket/reactor.go b/internal/pkg/websocket/reactor.go deleted file mode 100644 index d055e26..0000000 --- a/internal/pkg/websocket/reactor.go +++ /dev/null @@ -1,2 +0,0 @@ -// implemenets a reactor object with websocket methods -package websocket diff --git a/reactor/Cargo.lock b/reactor/Cargo.lock new file mode 100644 index 0000000..1ab52f1 --- /dev/null +++ b/reactor/Cargo.lock @@ -0,0 +1,7 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "reactor" +version = "0.1.0" diff --git a/reactor/Cargo.toml b/reactor/Cargo.toml new file mode 100644 index 0000000..71fe067 --- /dev/null +++ b/reactor/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "reactor" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] diff --git a/internal/pkg/reactor/coordinator.go b/reactor/needs_port/core/coordinator.go similarity index 100% rename from internal/pkg/reactor/coordinator.go rename to reactor/needs_port/core/coordinator.go diff --git a/internal/pkg/reactor/device.go b/reactor/needs_port/core/device.go similarity index 100% rename from internal/pkg/reactor/device.go rename to reactor/needs_port/core/device.go diff --git a/internal/pkg/device/atlas.go b/reactor/needs_port/device/atlas.go similarity index 100% rename from internal/pkg/device/atlas.go rename to reactor/needs_port/device/atlas.go diff --git a/internal/pkg/device/controller.go b/reactor/needs_port/device/controller.go similarity index 100% rename from internal/pkg/device/controller.go rename to reactor/needs_port/device/controller.go diff --git a/internal/pkg/device/do.go b/reactor/needs_port/device/do.go similarity index 100% rename from internal/pkg/device/do.go rename to reactor/needs_port/device/do.go diff --git a/internal/pkg/device/manager.go b/reactor/needs_port/device/manager.go similarity index 100% rename from internal/pkg/device/manager.go rename to reactor/needs_port/device/manager.go diff --git a/internal/pkg/device/mappings.go b/reactor/needs_port/device/mappings.go similarity index 100% rename from internal/pkg/device/mappings.go rename to reactor/needs_port/device/mappings.go diff --git a/internal/pkg/device/ph.go b/reactor/needs_port/device/ph.go similarity index 100% rename from internal/pkg/device/ph.go rename to reactor/needs_port/device/ph.go diff --git a/internal/pkg/device/pwm.go b/reactor/needs_port/device/pwm.go similarity index 100% rename from internal/pkg/device/pwm.go rename to reactor/needs_port/device/pwm.go diff --git a/internal/pkg/device/rtd.go b/reactor/needs_port/device/rtd.go similarity index 100% rename from internal/pkg/device/rtd.go rename to reactor/needs_port/device/rtd.go diff --git a/internal/pkg/device/sensor.go b/reactor/needs_port/device/sensor.go similarity index 100% rename from internal/pkg/device/sensor.go rename to reactor/needs_port/device/sensor.go diff --git a/internal/pkg/i2c/bus.go b/reactor/needs_port/i2c/bus.go similarity index 100% rename from internal/pkg/i2c/bus.go rename to reactor/needs_port/i2c/bus.go diff --git a/internal/pkg/system/hwinfo.go b/reactor/needs_port/system/hwinfo.go similarity index 97% rename from internal/pkg/system/hwinfo.go rename to reactor/needs_port/system/hwinfo.go index dca66ed..08715b7 100644 --- a/internal/pkg/system/hwinfo.go +++ b/reactor/needs_port/system/hwinfo.go @@ -1,4 +1,4 @@ -// package system uses linux commands to get hardware info from devices +// package system uses linux commands to get hardware info for identifation package system import ( diff --git a/reactor/src/device.rs b/reactor/src/device.rs new file mode 100644 index 0000000..a0aedb8 --- /dev/null +++ b/reactor/src/device.rs @@ -0,0 +1,22 @@ +use std::fmt; + +pub struct Device { + address: i32, +} + +impl Device { + fn new(address: i32) -> Device { + return Device {address: address}; + } +} + +impl fmt::Display for Device { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "{}", self.address) + } +} + +/// outward factory function +pub fn create_device(address: i32) -> Device { + return Device::new(address); +} diff --git a/reactor/src/i2c.rs b/reactor/src/i2c.rs new file mode 100644 index 0000000..d0a214e --- /dev/null +++ b/reactor/src/i2c.rs @@ -0,0 +1,9 @@ +/// get_connected returns an array of Device objects. +pub fn get_connected() -> Vec{ + let mut devices: Vec = Vec::new(); + return devices; +} + +fn send_i2c_command() -> { + +} diff --git a/reactor/src/main.rs b/reactor/src/main.rs new file mode 100644 index 0000000..c02f6fa --- /dev/null +++ b/reactor/src/main.rs @@ -0,0 +1,12 @@ +mod i2c; +mod device; + +fn main() { + let addresses = i2c::get_connected(); + let mut devices: Vec = Vec::new(); + for address in addresses.iter() { + let device: device::Device = device::create_device(*address); + println!("Device: {}", device); + devices.push(device); + } +} diff --git a/go.mod b/server/go.mod similarity index 75% rename from go.mod rename to server/go.mod index 1643e45..8782978 100644 --- a/go.mod +++ b/server/go.mod @@ -3,9 +3,8 @@ module FRMS go 1.18 require ( - github.com/gdamore/tcell/v2 v2.4.1-0.20210905002822-f057f0a857a1 + github.com/gorilla/websocket v1.5.0 github.com/influxdata/influxdb-client-go/v2 v2.9.1 - github.com/rivo/tview v0.0.0-20220610163003-691f46d6f500 github.com/spf13/viper v1.12.0 google.golang.org/grpc v1.47.0 google.golang.org/protobuf v1.28.0 @@ -14,19 +13,14 @@ require ( require ( github.com/deepmap/oapi-codegen v1.8.2 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect - github.com/gdamore/encoding v1.0.0 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/gorilla/websocket v1.5.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect - github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/magiconair/properties v1.8.6 // indirect - github.com/mattn/go-runewidth v0.0.13 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.0.1 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/rivo/uniseg v0.2.0 // indirect github.com/spf13/afero v1.8.2 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect @@ -34,7 +28,6 @@ require ( github.com/subosito/gotenv v1.3.0 // indirect golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 // indirect golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b // indirect - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect gopkg.in/ini.v1 v1.66.4 // indirect diff --git a/go.sum b/server/go.sum similarity index 96% rename from go.sum rename to server/go.sum index 3c6907e..63ed049 100644 --- a/go.sum +++ b/server/go.sum @@ -69,10 +69,6 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= -github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= -github.com/gdamore/tcell/v2 v2.4.1-0.20210905002822-f057f0a857a1 h1:QqwPZCwh/k1uYqq6uXSb9TRDhTkfQbO80v8zhnIe5zM= -github.com/gdamore/tcell/v2 v2.4.1-0.20210905002822-f057f0a857a1/go.mod h1:Az6Jt+M5idSED2YPGtwnfJV0kXohgdCBPmHGSYc1r04= github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= @@ -167,8 +163,6 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= -github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= -github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -180,8 +174,6 @@ github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= -github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= @@ -195,10 +187,6 @@ github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qR github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rivo/tview v0.0.0-20220610163003-691f46d6f500 h1:KvoRB2TMfMqK2NF2mIvZprDT/Ofvsa4RphWLoCmUDag= -github.com/rivo/tview v0.0.0-20220610163003-691f46d6f500/go.mod h1:WIfMkQNY+oq/mWwtsjOYHIZBuwthioY2srOmljJkTnk= -github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= @@ -370,7 +358,6 @@ golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -380,10 +367,6 @@ golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b h1:2n253B2r0pYSmEV+UNCQoPfU/ golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -391,7 +374,6 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/internal/api/server.go b/server/internal/api/server.go similarity index 62% rename from internal/api/server.go rename to server/internal/api/server.go index 0ba38b2..2248c4c 100644 --- a/internal/api/server.go +++ b/server/internal/api/server.go @@ -13,6 +13,14 @@ type reactorCoordinator struct { id string } +func (r *reactorCoordinator) Start() error { + return errors.New("todo") +} + +func (r *reactorCoordinator) Ping() error { + return errors.New("todo") +} + func StartReactor(id string) ReactorCoordinator { return &reactorCoordinator{id:id} } diff --git a/server/internal/pkg/config/load.go b/server/internal/pkg/config/load.go new file mode 100644 index 0000000..1145a7b --- /dev/null +++ b/server/internal/pkg/config/load.go @@ -0,0 +1,62 @@ +// Package config provides an interface to load and store config files +// using the XDG standard ($HOME/.config/FRMS) as the base directory +// +// WARNING: only built for Linux +package config + +import ( + "FRMS/internal/pkg/logging" + "errors" + "fmt" + "os" + + "github.com/spf13/viper" +) + +// LoadConfig takes a filename as an aruguement and returns a *viper.Viper object. +// Loads and stores config files into the base directory according to XDG standard ($HOME/.config/FRMS/). +// Will create directory and config if they don't exist. +func LoadConfig(filename string) (*viper.Viper, error) { + + config := viper.New() + + // default config dir + path := fmt.Sprintf("%s/.config/FRMS", os.Getenv("HOME")) + filetype := "yaml" + + // checking for existence + if _, err := os.Stat(path); errors.Is(err, os.ErrNotExist) { + if err := os.Mkdir(path, os.ModePerm); err != nil { + return config, err + } + } + + logging.Debug(logging.DStart, "Loading config (%s)", filename) + + // setting config file info + config.SetConfigName(filename) + config.SetConfigType(filetype) + config.AddConfigPath(path) + + config.AutomaticEnv() + + // reading in config + if err := config.ReadInConfig(); err != nil { + + if _, ok := err.(viper.ConfigFileNotFoundError); ok { + + fullpath := fmt.Sprintf("%s/%s.%s", path, filename, filetype) + logging.Debug(logging.DStart, "CON config does not exist!") + if err = config.WriteConfigAs(fullpath); err != nil { + return config, err + } + logging.Debug(logging.DStart, "CON created at %s", fullpath) + } else { + return config, err + } + } + + logging.Debug(logging.DStart, "CON Loaded configs from %v", config.ConfigFileUsed()) + + return config, nil +} diff --git a/internal/pkg/grpc/device.proto b/server/internal/pkg/grpc/device.proto similarity index 100% rename from internal/pkg/grpc/device.proto rename to server/internal/pkg/grpc/device.proto diff --git a/internal/pkg/grpc/monitoring.pb.go b/server/internal/pkg/grpc/monitoring.pb.go similarity index 100% rename from internal/pkg/grpc/monitoring.pb.go rename to server/internal/pkg/grpc/monitoring.pb.go diff --git a/internal/pkg/grpc/monitoring.proto b/server/internal/pkg/grpc/monitoring.proto similarity index 100% rename from internal/pkg/grpc/monitoring.proto rename to server/internal/pkg/grpc/monitoring.proto diff --git a/internal/pkg/grpc/monitoring_grpc.pb.go b/server/internal/pkg/grpc/monitoring_grpc.pb.go similarity index 100% rename from internal/pkg/grpc/monitoring_grpc.pb.go rename to server/internal/pkg/grpc/monitoring_grpc.pb.go diff --git a/internal/pkg/grpc/server.pb.go b/server/internal/pkg/grpc/server.pb.go similarity index 100% rename from internal/pkg/grpc/server.pb.go rename to server/internal/pkg/grpc/server.pb.go diff --git a/internal/pkg/grpc/server.proto b/server/internal/pkg/grpc/server.proto similarity index 100% rename from internal/pkg/grpc/server.proto rename to server/internal/pkg/grpc/server.proto diff --git a/internal/pkg/grpc/server_grpc.pb.go b/server/internal/pkg/grpc/server_grpc.pb.go similarity index 100% rename from internal/pkg/grpc/server_grpc.pb.go rename to server/internal/pkg/grpc/server_grpc.pb.go diff --git a/internal/pkg/influxdb/client.go b/server/internal/pkg/influxdb/client.go similarity index 100% rename from internal/pkg/influxdb/client.go rename to server/internal/pkg/influxdb/client.go diff --git a/internal/pkg/logging/dslogs b/server/internal/pkg/logging/dslogs similarity index 99% rename from internal/pkg/logging/dslogs rename to server/internal/pkg/logging/dslogs index dfdb147..98a98f4 100755 --- a/internal/pkg/logging/dslogs +++ b/server/internal/pkg/logging/dslogs @@ -17,6 +17,7 @@ TOPICS = { "PING": "#d0b343", "SCAN": "#70c43f", "SPWN": "#4878bc", + "STOP": "#ffff00", #"LOG2": "#398280", #"CMIT": "#98719f", #"PERS": "#d08341", diff --git a/server/internal/pkg/logging/logging.go b/server/internal/pkg/logging/logging.go new file mode 100644 index 0000000..aa28511 --- /dev/null +++ b/server/internal/pkg/logging/logging.go @@ -0,0 +1,86 @@ +package logging + +import ( + "errors" + "fmt" + "log" + "os" + "strconv" + "time" +) + +func getLogType() string { + if t, ok := os.LookupEnv("LOGTYPE"); ok { + return t + } + return "DEFAULT" +} + +func getVerbosity() int { + v := os.Getenv("VERBOSE") + level := 0 + if v != "" { + var err error + level, err = strconv.Atoi(v) + if err != nil { + log.Fatalf("Invalid Verbosity %v", v) + } + } + return level +} + +type logTopic string + +const ( + // define 4 character topic abbreviations for coloring + DError logTopic = "ERRO" + DClient logTopic = "CLNT" + DStart logTopic = "STRT" + DExit logTopic = "EXIT" + DPing logTopic = "PING" + DScan logTopic = "SCAN" + DSpawn logTopic = "SPWN" + DStop logTopic = "STOP" +) + +// the list can grow + +var debugStart time.Time +var debugVerbosity int + +func init() { + + debugVerbosity = getVerbosity() + debugStart = time.Now() + if debugVerbosity > 0 { + path := "log/" + if _, err := os.Stat(path); errors.Is(err, os.ErrNotExist) { + err := os.Mkdir(path, os.ModePerm) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + } + logtype := getLogType() // start with "REACTOR" etc + timestamp := time.Now().Format("Mon-15:04:05") + filename := fmt.Sprintf("%s-%s.log", logtype, timestamp) + f, err := os.OpenFile(path+filename, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) + if err != nil { + log.Fatal(err) + } + log.SetOutput(f) + } + + log.SetFlags(log.Flags() &^ (log.Ldate | log.Ltime)) // turns off date and time so we can set manually +} + +// example call Debug(dClient, "R%d connecting to client %d", r.Id, c.Id) +func Debug(topic logTopic, format string, a ...interface{}) { + if debugVerbosity >= 1 { + time := time.Since(debugStart).Microseconds() + time /= 100 + prefix := fmt.Sprintf("%06d %v ", time, string(topic)) + format = prefix + format + log.Printf(format, a...) + } +} diff --git a/internal/pkg/manager/manager.go b/server/internal/pkg/manager/manager.go similarity index 100% rename from internal/pkg/manager/manager.go rename to server/internal/pkg/manager/manager.go diff --git a/internal/pkg/server/coordinator.go b/server/internal/pkg/server/coordinator.go similarity index 99% rename from internal/pkg/server/coordinator.go rename to server/internal/pkg/server/coordinator.go index 65c98a8..95e6158 100644 --- a/internal/pkg/server/coordinator.go +++ b/server/internal/pkg/server/coordinator.go @@ -3,7 +3,6 @@ package server import ( pb "FRMS/internal/pkg/grpc" "FRMS/internal/pkg/influxdb" - _ "FRMS/internal/pkg/influxdb" "FRMS/internal/pkg/logging" "context" "errors" diff --git a/internal/pkg/server/listener.go b/server/internal/pkg/server/listener.go similarity index 100% rename from internal/pkg/server/listener.go rename to server/internal/pkg/server/listener.go diff --git a/internal/pkg/server/manager.go b/server/internal/pkg/server/manager.go similarity index 100% rename from internal/pkg/server/manager.go rename to server/internal/pkg/server/manager.go diff --git a/internal/pkg/server/reactormanager.go b/server/internal/pkg/server/reactormanager.go similarity index 100% rename from internal/pkg/server/reactormanager.go rename to server/internal/pkg/server/reactormanager.go diff --git a/internal/pkg/server/system.go b/server/internal/pkg/server/system.go similarity index 100% rename from internal/pkg/server/system.go rename to server/internal/pkg/server/system.go diff --git a/server/internal/pkg/system/hwinfo.go b/server/internal/pkg/system/hwinfo.go new file mode 100644 index 0000000..08715b7 --- /dev/null +++ b/server/internal/pkg/system/hwinfo.go @@ -0,0 +1,118 @@ +// package system uses linux commands to get hardware info for identifation +package system + +import ( + "bytes" + "errors" + "fmt" + "hash/fnv" + "net" + "os/exec" + "strings" +) + +func GetId() (int, error) { + // gets the mac address and hashes into consistent id + maccmd := fmt.Sprintf("ifconfig %v | awk '/ether / {print $2}'", et) + var stderr bytes.Buffer + var out bytes.Buffer + cmd := exec.Command("bash", "-c", maccmd) + cmd.Stdout = &out + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + return 0, err + } + hash := fnv.New32a() + hash.Write(out.Bytes()) + id := hash.Sum32() + return int(id), nil +} + +func GetIp() (string, error) { + ipcmd := "ip route get 1 | sed 's/^.*src \([^ ]*\).*$/\1/;q'" + var stderr bytes.Buffer + var out bytes.Buffer + cmd := exec.Command("bash", "-c", ipcmd) + cmd.Stdout = &out + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + return "", err + } + ip := strings.Trim(out.String(), " \n") + return ip, nil + +} + +func GetPort() (int, error) { + // obsolete + if addr, err := net.ResolveTCPAddr("tcp", ":0"); err != nil { + return 0, err + } else if lis, err := net.ListenTCP("tcp", addr); err != nil { + return 0, err + } else { + defer lis.Close() + return lis.Addr().(*net.TCPAddr).Port, nil + } +} + +func GetBus() (int, error) { + // preset busses + busList := map[string]int{"raspberrypi": 1, "beaglebone": 2} + // vars + var bus int + var ok bool + + if name, err =: GetModel(); err != nil { + return bus, err + } else if bus, ok = busList[b]; !ok { + return 0, errors.New(fmt.Sprintf("No bus for dev %s", b)) + } + + // returns correct bus + return bus, nil +} + +func GetModel() (string, error) { + var stderr, out bytes.Buffer + cmd := exec.Command("bash", "-c", "hostname") + cmd.Stdout = &out + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + return "", err + } + b := out.String() + b = strings.Trim(b, " \n") + return b, nil +} + +func Get() error { + // responsible for filling out struct + //bus := map[string]int{"raspberrypi":1,"beaglebone":2} // eventually will replace this with a config file + + ipcmd := "ifconfig eth0 | awk '/inet / {print $2}'" + maccmd := "ifconfig eth0 | awk '/ether / {print $2}'" + devcmd := "lshw -C system 2>/dev/null | head -n 1" + + res := [3]bytes.Buffer{} + var stderr bytes.Buffer + cmds := []string{ipcmd, maccmd, devcmd} + for i, c := range cmds { + cmd := exec.Command("bash", "-c", c) + cmd.Stdout = &res[i] + cmd.Stderr = &stderr + err := cmd.Run() + if err != nil { + return err + } + } + // formatting + ip := res[0].String() + ip = strings.Trim(ip, " \n") + + hash := fnv.New32a() + hash.Write(res[1].Bytes()) + + b := res[2].String() + b = strings.Trim(b, " \n") + return nil +} diff --git a/internal/pkg/websocket/connect.go b/server/internal/pkg/websocket/connect.go similarity index 94% rename from internal/pkg/websocket/connect.go rename to server/internal/pkg/websocket/connect.go index 4378083..70127af 100644 --- a/internal/pkg/websocket/connect.go +++ b/server/internal/pkg/websocket/connect.go @@ -1,3 +1,4 @@ +// Package websocket sets up websocket connections with clients and allows live reactor readouts. package websocket // creates websocket server and upgrades incoming connections diff --git a/cmd/server/main.go b/server/main.go similarity index 55% rename from cmd/server/main.go rename to server/main.go index 42f6bf5..ab8c64f 100644 --- a/cmd/server/main.go +++ b/server/main.go @@ -22,8 +22,8 @@ func NewCoordinator(config *viper.Viper, ch chan error) coordinator { return server.NewCentralCoordinator(config, ch) } -func NewConfig(fname string) *viper.Viper { - return config.LoadConfig(fname) +func NewConfig(filename string) (*viper.Viper, error) { + return config.LoadConfig(filename) } type ws interface { @@ -40,29 +40,38 @@ func main() { signal.Notify(gracefulShutdown, syscall.SIGINT, syscall.SIGTERM) // config file - conf := NewConfig("server") + conf, err := NewConfig("server") + if err != nil { + panic(err) + } errCh := make(chan error) c := NewCoordinator(conf, errCh) go c.Start() - logging.Debug(logging.DStart, "CCO 01 Server %s started", conf.Get("name")) - // starting websocket server + fmt.Printf("Coordiantor started\n") + logging.Debug(logging.DStart, "CCO %s started", conf.Get("name")) - w := NewWebSocket() - go w.Start() + // starting websocket server + // gated behind env for testing + if conf.Get("WEBSOCKET") == "start" { + w := NewWebSocket() + go w.Start() + fmt.Printf("Websocket started\n") + logging.Debug(logging.DStart, "WS websocket started") + } select { - case err := <-errCh: // blocking to wait for any errors and keep alive otherwise + case err := <-errCh: // allows passing errors up for handling panic(err) case <-gracefulShutdown: // Shutdown via INT - // storing config - fmt.Printf("\nStoring config to %s\n", conf.ConfigFileUsed()) + fmt.Printf("\nExiting...\n") + logging.Debug(logging.DStop, "CON storing to %s", conf.ConfigFileUsed()) if err := conf.WriteConfig(); err != nil { panic(err) } - fmt.Println("Stored config successfully. Exiting...") + logging.Debug(logging.DStop, "CON stored successfully", conf.ConfigFileUsed()) os.Exit(0) } }