From 444bb8684e31f27505732d759f718cc00314c6aa Mon Sep 17 00:00:00 2001 From: KeeganForelight Date: Fri, 16 Jun 2023 13:38:54 -0400 Subject: [PATCH] cleaning up old files and working on readme --- README.md | 23 +- Taskfile.dist.yml | 5 +- gotest.py | 21 +- internal/notes/archive/notes.md | 71 -- internal/notes/archive/old_notes | 1001 ---------------------------- internal/notes/archive/structure | 103 --- internal/notes/index.md | 4 - internal/notes/weekly/Jan-16-20.md | 149 ----- internal/notes/weekly/Jan-23-27.md | 49 -- internal/pkg/i2c/bus.go | 66 +- 10 files changed, 86 insertions(+), 1406 deletions(-) delete mode 100644 internal/notes/archive/notes.md delete mode 100644 internal/notes/archive/old_notes delete mode 100644 internal/notes/archive/structure delete mode 100644 internal/notes/index.md delete mode 100644 internal/notes/weekly/Jan-16-20.md delete mode 100644 internal/notes/weekly/Jan-23-27.md diff --git a/README.md b/README.md index e77b3f8..c18b524 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,24 @@ # FRMS -ForeLight Reactor Management System - -## Outline +## ForeLight Reactor Management System This branch will serve as the staging ground for adding unit tests and documentation in order to finalize v0.1.0-alpha + +## Table of Contents + +* [Introduction](#introduction) +* [Getting Started](#getting-started) + * [Installation](#installation) + * [Usage](#usage) +* [Wiki](#wiki) + +## Introduction + +## Getting Started + +### Installation + +### Usage + +## Wiki + diff --git a/Taskfile.dist.yml b/Taskfile.dist.yml index 6d48331..e1956da 100644 --- a/Taskfile.dist.yml +++ b/Taskfile.dist.yml @@ -43,4 +43,7 @@ tasks: GOARM: "{{.GOARM}}" GOARCH: "{{.GOARCH}}" GOOS: "{{.GOOS}}" - + test: + desc: "Runs the full test suite" + cmds: + - ./gotest.py diff --git a/gotest.py b/gotest.py index 37a72a3..1fa03c9 100755 --- a/gotest.py +++ b/gotest.py @@ -39,8 +39,10 @@ for line in output[:-1]: # individual test pkg.tests.append((parsed["Test"],parsed["Action"],parsed["Elapsed"])) +totalRan = 0 +totalPassed = 0 +totalTime = 0 # generating output from parsed json -total = 0 for name, info in res.items(): pkgname = name.split('/') pkgname = '/'.join(name.split('/')[1:]) @@ -58,9 +60,9 @@ for name, info in res.items(): out = [] if test[1] == "pass": passed += 1 - out = [" " + test[0] + ":",'\033[32mpass\033[0m ','(' + str(test[2]) + 's)'] + out = [" " + test[0] + ":",'\033[32mpass\033[0m ',str(test[2]) + 's'] elif test[1] == "fail": - out = [" " + test[0] + ":",'\033[31mfail\033[0m ','(' + str(test[2]) + 's)'] + out = [" " + test[0] + ":",'\033[31mfail\033[0m ',str(test[2]) + 's'] print(f"{out[0] : <30}{out[1] : >5}{out[2] : >8}") @@ -70,9 +72,18 @@ for name, info in res.items(): else: result = "\033[31mFAILED\033[0m" - print("summary:\n\t%s (%d/%d in %.3fs)" % (result, passed, total, info.totaltime)) + # keep track of grand totals + totalRan += total + totalPassed += passed + totalTime += info.totaltime + print(" %s %d/%d in %.3fs/n" % (result, passed, total, info.totaltime)) -# print("OVERALL:\n\tSkipped %d/%d\n\tFailed %d/%d\n\tPassed %d/%d\n" % (skippedTests, totalTests, failedTests, totalTests, passedTests, totalTests)) +# output overall test statistics +if totalRan == totalPassed: + result = "\033[32mPASSED\033[0m" +else: + result = "\033[31mFAILED\033[0m" +print("\nSUMMARY:\n\t%s %d/%d in %.3fs" % (result, totalPassed, totalRan, totalTime)) diff --git a/internal/notes/archive/notes.md b/internal/notes/archive/notes.md deleted file mode 100644 index 69938dd..0000000 --- a/internal/notes/archive/notes.md +++ /dev/null @@ -1,71 +0,0 @@ -*Time for a coherent plan of attack* - -### Current Issues: -- There is a lot of redundancy between the managers/coordinators when it comes to basic checks -- the package seperation kind of makes sense, but it needs to be better fleshed out -- I need to enforce better seperation of responsibilities. Somewhat unclear when state is being kept centrally in the coordinator for no apparent reason. - -### Solution: -- Go through the packages and consolidate -- Reduce the state we have to keep centrally, push responsibility to the other packages - -### Plan of attack: -- Outline core information flow -- Examine what interfaces are nessecary to make this work -- Stop looking at the server/reactor as seperate entities - -*I need to put the whole docker thing on the back burner for now. It isn't that important when it comes to immediate goals.* - -#### 12/05 TODO -- Cleanup server side config stuff to make it coherent -- Reflect changes to reactor side startup -- Boil down interface to address core issues -- Config outline: - 1) Startup and load the existing config - 2) Overwrite any previous settings with the flags - 3) Intelligently translate config into action - 4) launch coordinator and start up existing reactor managers -- Config Structure: - - Wrap viper functions in config struct methods to be used thrtugh interfaces - - minimize the reliance on viper so we can sub in othermethods -- is it even important to launch reactor managers? Wont they just be started on connection? - - -#### 12/06 TODO -- I think I can completely remove the old config way and just pass the viper object directly. I think its not worth the hassle of trying to keep track of a million interfaces - -#### 12/07 TODO -- I concede, I will just remove flags as most people will never use them anyway and instead rely on env vars and config files. To hell with the flags. -- I am ripping out all of the TUI and status manager stuff, its convoluted and harder than just pulling info from database. - - I can eventaully rework TUI to pull from DB which is fine, there will never be that many clients anyway and a lot of them are only 1 time calls with refreshes which aren't that slow anyway. -- alright I gutted the tui and system viewer, reworking sub coord to launch at start. That way there is a listener active -- time to boil down to functionality a LOT, right now its clumsy and inefficent, there needs to be a better way to keep everything straight -- Moving the DB responsibilites to the reactor itself seems to be the best way to do it in the short term. Reduce network load and overall keep things efficient. May lead to duplicte copies of data? Not the end of the world, logging system can make sure we are maintaining entries. - -**IDEA** -Reactors log data themselves, Send periodic status updates over grpc to enable monitoring faster than the sample rate -*This could work!* -Outline: -- Reactors reach out to server on boot to get DB info - - compare this against what they have internally to ensure they are up to date and allow for migrations - - Maybe not even save the db info because we don't need to?? -- Reactors also recieve port for their specific manager - - Can be dynamically given out to allow for spread out load -- Reactors then reach out with sensor and device info periodically (5s?) which can be used for live monitoring -- RM responds with any potential updates for the device settings i.e. change pwm duty on web interface, pass on to reactor -- Allows for a live view with current reading as well as historical data at differing interval via grafana. (i.e. 5s live view with 10 min sample interval) - -Need to differentiate sensors vs devices that can be changed -- Sensors have a variable sample rate and eventually name/address -- Devices have more and widley varying parameters, could be pwm with freq/duty/onoff or ph pump with on, time or off etc. - -#### 12/09 TODO -- Alright I have a baseline! I want to start to integrate atlas type stuff so that I have some mock data/sensors to work with. I am going to try to flesh out the "atlas" interface/struct to implement some of the more basic commands. - -#### 1/11 TODO -Plan of attack for websocket stuff and things - -**Questions** -- What to do about the reactor to user comms - - Websockets? GRPC? smoke signals? -- diff --git a/internal/notes/archive/old_notes b/internal/notes/archive/old_notes deleted file mode 100644 index 99d46ce..0000000 --- a/internal/notes/archive/old_notes +++ /dev/null @@ -1,1001 +0,0 @@ -time to plan - -terms - -RLC - reactor level coordinator (Beagleboard) -RH - Reactor Handler (goroutine) -SH - sensor handler (gourtine) -Reactor Side: - -needs - - way to discover active sensors - - spin up goroutine for each sensor responsible for keeping status and logs - - way to read back and truncate logs for safe data delivery to servr - - routing requests from reactor level coordinator to relevant sensor - - internal memory sharing and channels for reactor level coordination - -thoughts - - maybe the RLC can be responsible for packaging data for coordinator response - adv: - - clears up the network - - simplifies pinging - - keeps the data aributrary - cons: - - unknown data size - - how to coordinate data structure - -Server Side: - -needs - - way to look into a config file for active reactors - - should then spin up a goroutine for each reactor - - responsible for recovery and consistent communication - - individual database entries - - API? - - use gRPC for comms between server and BB - - each reactor handler needs mechanism for pinging, recovery, and database correctness - - - -message PingRequest { - // do we even need anything in a ping request? -} - -message PingResponse { - repeated Sensor sensor = 1; -} - -message Sensor { - string type = 1; - bool status = 2; - byte data = 3; -} - -sensors := [string]q - -6/23 TODO: - -X- BBB mem fix - - 32 gig for the main but where to put the OS? - - obv in EMMC but how to init sd card? (probably dev tree :( ) -Y- Server side impl - Y - Need a struct for the RC - X - Should we store and load configs based on IDs? (efficiency of this vs performance increases i.e. bandwidth vs storage) - Y/X - Who should kill the RC and how do we know its dead? (Garbage collection to the rescue hopefully) - -X- UPDATE PRES - - Add bottle necks for each part in that section - - I2C: 128 addrs and ~90 bytes/s per device at 128 devs optimally - - BB: Hardware is upgradeable even customizable ;) - - Server: Its overkill as is, can benchmark with a rudementary go overload once its completed -- Sensor configs - - how to store sensor info efficiently and searchably lol - - who needs to know what the sensor is? (Just the SM? Even the SM?) -X- TUI - - pls this would be so sick -TODO: 6-24 - -Y - Pres stuff from yesterday + python gRPC abstraction -Y - RPI flash -- Add resiliance to coordinator process (aka error handley blech) - - -TODO 6/27 -- Time to tackle sensor managers officially - - to hell with port generation - - going to use channels but not like a jackass - - going to try generating channels interface side but via implicit types to avoid the interface stff - - should set up a structure where I can use arbiturary types on the backend and fulfill methods to get/infer information on the frontend -- rewrite I2C interface to employ same method, should allow for this - 1) generate type - 2) send it to worker - 3) receive back (original? copy?) - 4) use interface methods to get required values -- should simplify all internal communication and potentially suggests api for implementation - -TODO 6/28 -- It works... kind of - - I learned a lot about - "the smaller the interface, the more useful it is" --Y time to tackle the server side error handleing aka reconnect - - custom backoff? Max TO? Sleep Period? - 5ms -> 10ms -> 25ms -> 50ms -> 100ms -> 250ms -> 500ms -> 1s -> 1s --Y Actual logic? - 1) Create listener - 2) create reactor managers for each reactor - a) whose job is it to kill on disconnect? Should we kill? - b) the RM will have the actual ping mechanism with coordinator aggregating in eventual TUI - 3) reactivated reactors should connect to the same RM to resume connections with less downtime. Memory use be damned (ALLOCATED?? ID VS IP) - 4) need a way to purge manually disconnected reactors - a) also should check ids which are deterministic and map actual hardware - 5) continue forever (for select??) --Y RM Logic? - 1) on spawn ping reactor to get initial sensor status - 2) enter ping loop to get status and keep heartbeat alive - 3) no action on sensor going down (unless method but duh) - 4) on reactor going down - 1) save to config? - 2) "sleep mode" - i.e. stop pinging and wasting resources doing stuff - 3) wait for coordinator to reactivated - 5) reactivation: call start and resume pinging -- RM Struct? - - needs to know - - ip:port of reactor - - id of reactor - - mapping of sensors to status - - last seen for reactor (and sensor?) - - needs to be able to - - ping reactor for status - - get status - - store and update status reliabily - - stop pinging on down detection - - detection outages - - respond to coordinator requests? (rpc or method most likely?) - - relies on - - SM for sensor status - - implements - - start/restart mechanism for coordinator - - ping response for coordinator -- Coordinator Struct? - - needs to know - - mapping of ids of connected reactors to RM - - its own ip:port to serve listener on - - internal mapping of system? (any efficiency benifiets here vs mem usage?) - - needs to be able to - - setup new RMs on incoming connections - - call existing RMs on incoming connections - - ping for reactor status from RMs - - store status for TUI? - - relies on - - RM for reactor status - - implements - - application logic - -That went surprisingly well... Same method for tui - -process outline: -TUI - 1) display TUI outline and connect to coordinator - 2) let coordinator know where to send reactor/sensor changes - 3) enter loop of checking for changes and drawing - 4) on quit should gracefully exit - -Coordinator - 1) on TUI connection start routine - 2) collect? sensor/reactor info and send init to TUI - 3) upon changes? send to TUI - 4) exit when TUI connection closes - - -- TUI struct - - needs to know - - all the tui stuff (duh) - - reactor/sensor status - - needs to be able to - - create (several instances of) a TUI - - receive and display up to date system info - - delete reactors/sensors - - be efficient - - i know this is broad but bear with me - - relies on - - coordinator struct for system info - - coordinator struct to fulfil delete request - - implements - - tui - - user interface or management -- Coordinator Struct - - needs to know - - all the sensor and reactor states - - needs to be able to - - know what the TUI knows/doesnt know - - efficiently notify tui of change in system - - notify sensors or reactors of removal - - relies on - - rm/sm to implement reactor/sensor removal - - rm for reactor/sensor status - - implements - - sender of system status for TUI - -TODO 6/29 -- refactoring - - creating general listener and then a coordinator for reactor/tui that uses listener to field incoming requests - - change update loops for status to only send new data or empty messages for pings -- tui - - creating coordinator/manager for TUI clients - - create update loop for tui clients - - grpc to tui client with updates and hb - - drawfunc loops over change buffer - - on disconnect should display static terminal with offline warning - -- Listener Struct - - needs to know - - IP - - Port - - needs to be able to - - respond to incoming gRPC - - create corrisponding manager // taken care of in the actual handler - - relies on - - grpc for the endpoint stuff - - manager impl - - implements - - listener for main func - - manager generator essentially - -coordinator should be seperate *** -- new coordinator struct - - listener sends new connections to coordinator who appends to internal registery and reacts - - needs to know - - incoming: - - ip - - port - - client type? - - needs to be able to - - wait for incoming connections - - create a new manager for the client - - update internal directory - - remove entries ?? (tui -> reactor) - - implements - - manager creation - - connection handling - - client hb - - relies on - - manager impl - - listener call? - - -alright complete redesign -server acts as singular listener -routes all requests to a central coordiantor -this calls cooresponding client coordinator which then initiates managers etc - -now redesinging sensor info - -new fmt -1) have a seperate long running coordinator routine responsible for a "changed" list of sensors -2) on reactor status request: - a) send the changed list - b) if the send was acknowledged purge the change list - * both must be atomic - -new rlc struct - -- needs to know - - sensors connected - - changes in sensors - - how to establish connection to central server -- needs to be able to - - track changes in its system layout - - atomically relay these to the cc -- depends on - - I2C sensor info -- implements - - reactor sensor change tracking - -new general coord - -What does it need to do? -- Needs to respond to incoming clients from listener - - what does this mean? - - needs to link a client to a manager - - this is the whole high level idea - - can everything else be abstracted away? Why not? - - how to link? Channel? Shared Memory? - - channel is probably the best choice here - - structure: - managers - [uint32] : chan<- bool - 10292133 : chan<- bool - - how to use? - - when a client connects we search to see if a channel exits - - if it does we just send "true" down the channel to signal that the client connected - - if we dont we create a channel and a manager and start the manager - - we then send true down the newly created channel - -Do we ever close the channel? - - shouldn't that would mean the program is done accepting connections (could be a useful behavior in the future) - - do we ever send false? - - no, not sure what that would signal. - - coordinator is specifically only designed to create/notify managers of a client connection - -formally, New Coordinator: -- needs to know - - client type -- needs to be able to - - either notify or create and notify a manager on client connection - - handle concurrency -- relies on - - listener to send incoming clients - - manager to implement actual manager -- implements - - manager activation and memory - -TODO 6/30 -creating efficient system mapping and data logging/structure info - -idea # 1.5 -use json maybe? - -how? -- use json to build in the structure of our system via heirarchy -ex) -[ - { - "reactor": uint32, - "status": bool, - "connected devices": [ - "device" : { - "addr": "0x10" - "type": "ph sensor", - "status": uint32, - "data": [{"ph7.3"}, // json marshelling of the specific struct - }, - "device" : { - "addr": "0x11" - "type": "temp sensor" - status: uint32 - "data": "t24.5C" - } - ] - } -] - -use go structs to map components and embed them -can send - - -need to just spitball here - -what am I trying to do at the end of the day? -I am taking sensor measurements -and potentially tweaking control paramters -lets treat each one sperately at firs - -sensor measurements - -each particular sensor manager will only be responsible for getting data from its sensor -what is the scope of responsibilities? -the sensor manager should log this data locally using a method? json? - -how do we aggregate this info? - -what if we structure our reactor as a mirror of our coordiantor - -rlc job would be to -- establish connection with central server -- wait for connections from devices -- create reactor managers for these devices - -this could be really nice - -rm (general) job: -- establish connection with device via I2C (client via wifi) -- shut down when device connection drops -- start when device connects again - -adding data responsiblities - -tuim: - needs to know of a struct of system - [reactorid][deviceaddress][device] - thus needs to know: - - reactor id - - connected device addresses - - device info: can just be a string! - - made up of status and relevant data - what do we rely on - - accurate device info string - - can have someone else make/maintain struct and proxy updates -tuic: - -needs to maintain an atomic struct of system - as above - - still only needs to know - - reactor id - - connected device address maped to device info [string] - relies on - - accurate status updates - - accurate device info - -RC ** could have this guy be responsible for change parsing - - respond to updated status from RM and send to TUI - - basically a focus point - -RM - - needs to call corret handlers for data coming in from rlc - - can we just use grpc handlers that get embedded in the manager at start? - - handlers can also notify when data doesnt match previous entry - - this would prompt the data to be sent to the rc where it can be forwardd - -RLC - - needs to have internal reactor state - - - - -this gives us a complete "skeleton" of service where we can connect/reconnect clients with appropriate managers -there isnt any functionality yet to actually log data - -how do we leverage our current connections and add functionality to managers and coordinators? - -methods and channels - -each manager is responsible for pinging the associate device {reactor, device, tui} -either sending device info in tui case -or recieving it in reactor/device case - -this is why wrapping the gen structures is nessecary. Two different operations - -device manager: -could recieve 0-100000000 values -could be any type -could be any number per entry -common struct? -"timestamp":"data" -data could be json struct - - makes it easy to parse at some point - - force sensor driver to write a go struct for the data - - Parse___Data(*json.Unmarshalled) - - -complete i2c monitor redesign - -i2c interface needs to do - data stuff: - - locking structure to serialize commands/reads - - removal function to manually parse dead devices - - reconnecting should reinit device manager and stuff - init stuff: - - keep track of devices seen and connected - - notify rlc of devices that connect/reconnect -build init stuff into a struct that can be embedded? -I2CCoordinator - - created when rlc is created - - tie rlc to i2ccoord via channels - - new devices channel for devices that go offline->online - - send the i2cdevice struct to embed in rm - - can call interface funcs on the embedded interface - - - -Eureka part 2? -we are writing all of the software libraries which means we should (with some basic cleansing) be able to just send direct database queries -this means some things which could be pros or cons - -- each sensor realistically will have its own table for each reactor. -- we can group entries by reactor and look across time stamps (tidy?) -- we can log sql entries as text based backups -- we can use basic string struct with time stamps -- each sensor library will use a common struct and probably just use string fmting -- there are some efficiency benfiets if we used custom gRPC calls for each db entry - - but we can just leverage a biolerplate call with some extra overhead? -- we still need a way of representing state of components - - reactor is easy and could be kept server side - - sensor needs to be transmitted across rlc - - should default to down if the reactor goes offline (unknown?) - -direct query approach -pros - - easy to implement - - easy to use interfaces for common libs (compiling efficiency) - - easy to add sensors (use common libs and just make custom string in wrapper) - - can develop logging and db parts as manager funcs -cons - - need unique daemon to parse data on server for state struct - - trusting each sensor to maintain itself - - still need a way of translating state - -state problem - -it just should be an enumeration -its efficeint (could be as little as 4 bits but probably 1 byte) as opposed to a string ( len(s) * byte ex "ONLINE" = 6) - - is this all worth ~1-10? bytes of savings per dev? - - 100 reactors @ ~45 sensors = 46*100 = ~4.5 kb of state or ~ 36kb if we use strings - - so maybe? -more important than memory are network calls -need to update on tui: - - state changes (hopefully less frequent) - - current value (~5 seconds - ~30 minutes) -store both client and server side - - only store actively view data client side to prevent leaks - - store full struct but only serve as request response to prevent extra copies - -system struct - - mapping of reactor ids to "Reactor" structs - - reactor is mapping of addr to sensor structs - - sensor struct is basic info - - device type (enum vs string) - - device status (enum vs string) - - device most recent value (int? vs string?) - - if offline last seen time - -notes on struct - - should ideally have locks at reactor and sensor level - - use func to return sensor list via lock on reactor - - use func to update sensor list via lock on reactor - - use returned list to parse and request value from each sensor - - use goroutines and channels for efficient operation - - build response via returned structs - - respond to client - -note on tui manager - - ideally should keep simplified current client struct to spawn copies of the update daemons for each sensor - - each daemon should be EXTREMELY light weight and just feed new data values to the manager - - tuimanager will be responsible for efficently buffering for tui client requests - - tui pings should be frequent and response should be any data to update - - client side we should be able to essentialy overwrite any entries on our response - - simplifies interface - -data aggregation outline -Starting from sensor -1) specific sensor manager initiates a read of the embedded i2c dev -2) on success read gets logged with the time to the internal txt log (json) -RLC loop: - 3) rlc has long running method with sub routines reading each log and adding pending entries to the buffer - - buffer is bounded and routines block when it fills (use to limit rpc reply length) - 4) on ping buffer is parsed into rpc reply - - send buffered log ids to cleanup routine but dont delete from log yet - 5) next req has transaction ids of previous data that have been acked - 6) send ids to cleanup process - 7) respond with new buffer repeat -RM side: -received data from rlc -1) send reply to data parsing goroutine -parser loop: - 1) start a completion listener - 2) read each device in reply - 3) start goroutine of db daemon for each dev with completion chan - 4) once reply is empty can end -db daemon loop: - 1) loop over device data entries - 2) initiate db connection - 3) parse through each data entry and send to db - 4) if it was succesfull send the transaction id to the completion channel -monitoring rpc loop: - 1) listen for completed transaction entries - 2) append entries to ack - 3) send to rm on ping timer - -Data is now in database for all intents and purposes - - - -process ensures that the data is collected -now the data is on the server -6) server sends grpc reply results to a parsing gorotuine -7) the parser loops over reply and spawns db daemons to enter info - - -should we even aggregate data? why would we not just write a db client as part of the rlc and let the sensor managers themselves log - -need to focus: - -2 major things going on - -rlc can do data stuff on the reactor itself and just use the db client - - relies on exposed db endpoint but can just handle auth stuff - - can log locally - -rlc also responds to status requests - - queries sensors for status - - adds to and sends reply - - recieves these pings <= 5 seconds apart - - should have down detection to kill db actions - - optionally include a "data" string of the most recent reading - -going to focus on status -want system to - init reactors - poll for status - respond with sensor info - view and manage on tui - -how? - -all structs only in status context - -rlc struct -- knows - - connected devs and managers -- is able to - - poll managers for state info -- relies on - - managers for accurate and fast data -- implements data aggregation for rm - -dm struct -- knows - - underlying i2c dev interface - - basic device info -- is able to - - respond to rlc status requests -- relies on - - rlc to ask for status -- implements - - status response - - -alright holy shit -i have rewritten the same five functions so many times - -time to take inventory - -right now the system has -a central coordinator -that can spawn managers -that can manage clients -and a reactor coordinator -that can spawn device managers -that can manage devices - -I have a rudimentary ping system that queries the reactors for their status - -where to go next - -I want to build in control before I worry about actual data -this means tui and removal of managers - -how to start? - -need to create a central struct that serves as a representation of the system - -map[reactorid] -> [dev addr] device info -reactor is online by nature of responding to a ping -device info is what we query for - -tui manager will request data from the server struct - -server struct should bridge between reactor coordiantor and terminal coordinator -needs to be concurrent -needs to synchronize incoming sensor data - -instead of a big stupid struct - - just have rm maintain struct for each reactor - - connect tui requests to rm - -pros - - prevent redundancies in data - - limit operations after the ping - - serve copies? - -what will this look like - -TODO 7/5 -time to flesh out the tui and move into actual sensor/db libraries - -tuitime: -tui client (user side) -*will most likely be divided -needs to know: -- ip address of host -- eventually - - username/pass auth -needs to be able to -- request system info via server -- establish connection to central server -implements -- basic system management functionality -relies on - -- server for up to date reactor and sensor data - - -TUI TIME -coordinator/listner/reactor seem to be stable - - will have to add exiting for tui manager - -need to create the actual TUI at this point -seperate into two logical aspects - - The client that gets the system info from the server - - the tui which actually displays this info into the tui - -how to share the data between the client and tui? -- structs - - pros - - very efficient - - cons - - risky -- chan - - pros - - synchronize stuff - - cons - - hard to set up and finnicky -- methods - - pros - - syncronized - - easy to implement - - cons - - need to store/embed struct - -systemviewer.go -TODO 7/7 -time to make the synchronous system view -have a few ideas for the approach - a) make RM responsible for maintaining their own "branch" and store pointers to their branch - b) store the branch centrally and have the RM post changes - -I like the idea of a central system viewer and you post updates to worker clients - a) posting updates vs calling methods - blocking vs non blocking - b) - -lets layout expectations -RM should keep a current view of itself and whether it is online - - this should persist despite reactor outage - - in case of outage every sensor is UKNOWN - - optional last seen time for sensors/reactor - - exit should save to memory? persist for a given id? - - have a removal mechanism - - use case is to purge dead mechanism aka no black list - - each rm keeps most recent sensor view or reactor view in mem and can accept incoming system viewer connections -system viewer clients - - spawn 1 per tui client - - can do 2 things to any rm - a) call it for a complete copy which needs to be fast (gRPC?) - b) latch onto update chan to build its own copy for when the client requests the new devices - - follow a buffer -> client -> delete life cycle -system viewer "server" - a) spawn new system veiwer clients and direct them to the proper reactors - -aside: can we use this to replace coordinator system? just make a system directory - -what are we already storing? - in coordinator we have a mapping of ids to managers for reactor and - what if we remap system viewer and coordiantor to system coordinator which does both - seems redudent to keep multiple system copies - - any speed concerns? Client connections spawning new managers? - - we must lock map - - channels is probably the answer here, just desync the whole process from itself - - listener gets incoming clients - - clients get parsed into a "Client" object and sent to system coodiantor - - types have slight diffences but essentially: - 1) see if there is a stored manager or if we need to create one - 1.5) create manager if it doesnt exits - 2) start the manager with the client details - 3) create 2 chans (<-reactor tui<-) for reactor & device info - now the divergence - Reactor Manager: - 1) Connect to reactor and send initial ping - - if reactor ever doesnt respond (maybe backoff for unavailable) just kill manager and send offline to reactor status chan - 2) As device info comes in start maintaining a system struct - this must persist exits and starts - 3) For the sensor info coming in, send a copy on the chan to the void for all youre concerned - 4) Respond to requests for entire system copies as clients initially connect - - probably just a method - 5) only need to send reactor status on changes aka starts and exits - TUI Manager: - 1) Buffer the currently known reactor status via chan hook - 2) Wait (Timeout) for connection for tui client - - probably sub 5 seconds before we exit - - need to keep track via a last seen - 3) respond with the buffered reactor info - 4) on request for specific info - - request system viewer for the reactor info which will return the reactors sensor chan - - spawn goroutine to hook onto this chan and maintain a "local" copy of the new data for the client - - can probably make this more efficient but f it - - biggest buffer gets is # devs * size dev struct (bytes) - - drop anything but most recent - 5) as client requests for info we either reply with the buffer from the hook or create a new buffer - 6) translates into pages client side which are more efficent - 7) could even look at batching these eventually - 8) should probably kill the listeners (atleas the chan) when the tui client - a) disconnects - b) goes idle - - System Coordinator must then - 1) have a method to handle client connections that is concurrent safe - 2) start client managers on connection - 3) keep a directory of all the channels for clients for device and reactor info - 4) link tui client managers to rm properly -no need for a name change coordinator will have - system viewing functions in systemview.go - -alright check in time - -now have system viewer -which embeds a straight up info stream -and a map from reactor ids -> Info streams - -InfoStreams are structs with methods for adding listeners and senders -both return monitor objects which you can either - Send(device info) -or GetBuffer() and ClearBuffer([]da) - -this is all the foundation -just need to wrap into a thing the coordinator can useor maybe even replace coordinator - -systemviewer has 4 methods -every tui manager will embed a reactor listener -every reactor manager will embed a reactor sender -when a tui client selects a reactor we will embed the device listener -every reactor will be its own device sender - -the only thing that happens after init is tui may add device listeners - - - -should unify so that its only 1 ping request or a special request when we go to a page for the first time - -ex devinfo - -{ -Id uint32 // either id or addr -Type string //['reactor','device'] set by rlc -Status string //set by sm -Data string //optional -Index //added by monitor for consistent location in tui -Transaction ID //added by server and discarded in reply - -I may just scrap this shit in favor of a synced view -overhead is probably minimal anyway -redo listener bullshit to just route to the cs -tui clients will just get a fresh copy of the reactor info and device infofor every request - - -ADDING STANDARDIZED LOGGING -adding a log package for all packages -logs to a file named after start time -going to be of format - -TIME PROC CODE ID MSG -so -00013 STRT COR 912939123 Central coordinator started -00033 STRT -CODES -CCO - Central Coordinator -RCO - Reactor Coordinator -TCO - TUI Coordinator -RMA - Reactor Manager -TMA - TUI Manager -RLC - Reactor Level Coordinator -DMA - Device Manager -TUI - TUI Client - -every debug message will be of format -topic, devcode: id - -alright time to get this damn tui updating working - -general implementation details - -- libary card method - - only allow clients to checkout 1 device stream at a time (besides reactor stream obviously) - - close stream when call to open new one - - should we even store the reactor devices locally? - - or we could request when its selected and then let stream take care of itself -- simplifies a few things - - same setup for reactors/devices - - just call/create device listeners dynamically and reactor listeners at the start - - only check out reactor stream and 1 device stream at a time - - request for devices gets you the current state and adds your listener to the echo chain so that you recieve any updates - - need to ensure sends can complete even if the manager is dead - - close the channel? - - - -docker time -Need to refactor code so its eaisier to run in both envs -Refactoring server code now - - bind all gRPC services to the same IP:port to make it efficent - - funnel all rpcs through the one external port - - could even use nginx to route from default :80 -is there ever a situation where I would need to run this not on docker? - - can i just hardcode for docker and then rely on nginx for routing etc? - - - -ALRIGHT TIME TO LOCK TF IN -#TODO 8/1 -Time to set up proper config loading and storing - -Doing it all through interfaces and tagged structs - -On start up -Server needs to load up its own config - - take action on that config -wait for client connections - - load client config and reply with associated data -on client disconnect - - store any updates and return to idle state -restructing away from "listener" and coordiantor and stuff -going to just have central server -with an embedded listener -and database and shit -so -SERVER will call NewServer which will take care of subsequents - -# TODO 8/5 -Config storing time -going to probably have to add admin database client(aka server client which makes 0 sense) -can abstract all operations through interface and plugable package - -I just reliazed I coupled my mechanism with influxs token thing because it wokrs well but I am going to have to completely rebuild that if its properietary or we transition to a new DB - - hopefully null point due to the nature of OSS and time series - -CONFIG (and by extension DB) - -config.UpdateReactor(id, key, value) -config.UpdateSelf(key, value) - -should just be a basic way to update a given entry for an reactor -seperating server and reactor methods should lead to less finicky behaviour -should also probably wrap these in a seperate struct - - methods? - - who can call? - - who stores ptr? - - do we even need a ptr? can configs be stored and loaded statically or is that a bitch on the FS - -does it make more sensor to load different configs for each entity or justhave one monolithic config (probably load for each one and then let it update itself) - -going to have the init script set up the - -Welcome back - -#TODO 8/31 - -Goals: - - Add a config parser to load/store device manager struct - - start figuring out what a generic config package looks like - - figure out how to load different sensor functions dynamically - -Basic reactor workflow overview -1) On boot, scan I2C bus to find active devices -2) For every device shown as active, spawn a sensor manager from the assocaited config -3) on disconnect, shut the dm down and save current settings to config - -implementation time -#TODO 9/4 -Might be dying nbd - - i think its just freshman flu but could be clot who knows - -on to code -Need to have a functional BETA by 9/15 at the latest - pref 9/8 with a week to test - -What do we NEED out of FRMS v0.1.0 (pre-alpha - as an aside v1.#.#-apha then v.1.#.#-beta for versions) - -Needs: - - Connect and disconnect at will - - set sample and log rate - - set name - - live view data - - export expiriement data to CSV - -Notes: - - all sensors will be atlas - - can leverage for a unified library - - can use grafana for the UI - - can bash script to eport data for a given time range into resspective sheet aka sheet of DO measurements etc. - - can setuo the format pretty easily and probably just print F the query worst case I mean its 3 data points at probabnly 1 sample per minute at worst - -Architecture planning phase - -What would each need require software wise - -Need: Connect and disconnect at will - - directory of which device manager to load - - a way to store and load settings - - a way to monitor the i2c lines for new devices - - -Config interface - At a core -Load() - - load keys, config and env - - prompt for any updates - - store said updates - - store any future requests - -functions both server and reactor will use: -- load config -- load keys - - dif keys -- load env - - dif env - -order of ops - load config - load keys and env to overwrite config - store updates - have config with methods to get/set values - - - - - - diff --git a/internal/notes/archive/structure b/internal/notes/archive/structure deleted file mode 100644 index 7cc3e54..0000000 --- a/internal/notes/archive/structure +++ /dev/null @@ -1,103 +0,0 @@ -this will be a living doc - -starting with for connection management: - -listener: -- knows - - ip:port to listen to - - clients connect with{ip, port, clientType, model, id} -- is able to - - create coordinators for each clientType - - send new clients to coordiantor handlers via chan -- depends on - - clients sending known data (gRPC) - - NewCoordinator func -- implements - * shouldnt really have any callable methods - -coordinator: (General) -- knows - - what client ids have already connected - - which managers correlate to which clients -- is able to - - create managers for new clients - - start managers for clients -- depends on - - listener for client structure - - manager for NewManager() function -- implements - - client connection handling - - general manager call - -manager (general): -- knows - - client info - - timeout - - if it is active -- is able to - - establish a connection with a client - - stop when the connection drops -- depends on - - coordinator for start calls -- implements - - client connection creation - - client info storage - -manager (reactor): -* embedds the gm -- knows - - devices attached to the reactor - - underlying client manager -- is able to - - maintain device struct - - no pings only control logic (i.e remove device, restart etc) -- depends on - - gm for client conn - - coordiantor for starts -- implements - - reactor structure tracking - -manager (tui): -* embedds the gm -- knows - - structure of the system (ie reactors:devices) - - underlying client manager -- is able to - - keep track of system changes - - updates client from buffer or concurrent grpc? -- depends on - - RM to get system info - - coordinator for starts -- implements - - system tracking - -reactor level coordinator: (right away this feels so fat compared) -- knows - - current connected devices - - server to init to - - its own hwinfo to establish itself as a client -- is able to: - - reach out to server on boot - - transmit client details - - keep reactor devices current -- depends on - - I2C package to notify of connected devices - - hardware info to get its client info - - server to handle connection - - sm for new manager -- implements - - reactor status handler for updates to other coords/managers - -device itself: -- knows - - probe status ( maybe) - - data in buffer -- is able to - - clear buffer on request - - respond to commands -- implements - - data collection - - control execution -- depends on - - nothing its a driver - - maybe the control logic?? diff --git a/internal/notes/index.md b/internal/notes/index.md deleted file mode 100644 index 24f3e20..0000000 --- a/internal/notes/index.md +++ /dev/null @@ -1,4 +0,0 @@ -## Weekly Planning - -[Jan 16-20](weekly/Jan-16-20.md) -[Jan 23-27](weekly/Jan-23-27.md) diff --git a/internal/notes/weekly/Jan-16-20.md b/internal/notes/weekly/Jan-16-20.md deleted file mode 100644 index ceb8d75..0000000 --- a/internal/notes/weekly/Jan-16-20.md +++ /dev/null @@ -1,149 +0,0 @@ -# Jan 18 -### Planning -**Monitoring Changes** - -I want to refactor the reactor stuff to be less method oriented as far as data collection. For example, the monitoring stuff is all about events that happen pretty infrequently. It makes sense to then use a channel on the device side to just feed relevant status updates back to the reactor. I think that this makes the most sense because this will synchronize updates and leverage the rarity of events to cut down on errant calls. -- pros - - less repitive method calls needed - - less device locking - - localize the information to different packages -- cons - - extra memory for channels and duplicate storage info - - could just remove status from dm? - -**New Idea** - -I can leverage wireguard to do server-> reactor connections even beyond the testing phase - -Changes: -1) move device coordinator into device package -2) expose relevant methods to reactor interface -3) clarify individual package responsibilities -4) add stuff server side to create/destroy grpc connections as the information is rendered client side - - this might be scuffed but oh well - -### Package Separation -**Reactor** -- coordinator - - creates initial link to the server - - creates database client - - creates and starts a device coordinator - -**Device** -- coordinator - - searches i2c bus for connected devices - - spins up managers to control the connected devices - - relays information back up to the reactor coordinator -- manager - - control over singular device - - has the core information that will be needed across any type of device (name, status, address etc) -- sub-manager - - fine grained struct with methods specific to the device - -**Server** - -Going to ignore for now because I am lazy -- central coordinator starts up database connection config etc -- reactor coordinator - -### TODO -**Monitoring Changes** -- [] change methods to channel based - - [] internal methods with spins - - [] pass structs with interface for methods - - -# Jan 19 - -### Orginizational changes - -What structure makes the most sense for the devices? - -#### Top-Down - -Ex) DeviceManager -> SensorManager -> DOManager -> Manager - -**Pros** -- probably a less complex interface layout? - - -**Cons** -- annoying to keep/pass state - - i.e. atlas needs the address to pass to the I2C but right now the devicemanager is storing that. Have to pass down via start which doesn't make a ton of sense - -#### Bottom-Up - -Ex) DOManager -> SensorManager -> DeviceManager -> Manager - -**Pros** -- top level manager has access to common info - - i.e. address, name etc -- can easily define common functions and use this to pass info upwards -- still don't have to import device manager as interfaces can handle getting/setting stuff - -**Cons** -- might get ugly with interfaces - - there might have to be a bunch of interfaces in the device package to handle nesting the manager itself - - this might not be true though as the device coordinator dictates what interfaces are needed, and already it doesn't really use any of the dm functionality - -**What would it look like?** -Device coordinator would call NewDeviceManager, - -### Outline of functionality - -Hopefully by going over what is expected of each manager, it will become clear what the layout should look like - -**Device Coordinator** -- responsibilities - - starting/stopping device managers as devices connect/disconnect - - maintaining a map of the devices and their status - - updating the server with this information at set intervals - - pass the I2C client to the device managers - -**Device Manager** -- responsibilities - - struct to store information that is used by any type of device - - i.e. Address, Name, Config(prefix and file)? Status? - - probably don't need status as this can be determined via IsActive() - - config might be helpful to have, could pass up to managers via a Get function - - start/stop as requested by the device coordinator -- serves - - broad functions such as SetName(), GetName(), etc. - -**Sensor/Controller Manager** -- responsibilities - - provide corresponding broad struct that will be consistent across types of each - - i.e. sensors all have sample rate - - provide methods all will use such as TakeReading() -- serves - - more specific functions such as GetSampleRate(), Set... - -**Specific Managers** -- responsibilities - - provides specific functions that a certain sensor/controller might need - - i.e. pwm will need setFreq, DO might need a conversion etc. - - broadly will need access to I2C for comms -- serves - - Hyper Specific functions such as SetFreq() etc. - -### Trying Bottom-Up - -Right now, I am using some hybrid format which doesn't really make any sense. It goes - -DeviceManager -> DOManager -> SensorManager -> Manager - -This just feels *wrong* - -**Changes** -- Going to use the specifc -> broad becaus it seems intiuitive - - the most common methods/information is at the base and propogates up through the more specific managers - - should make it simplier to define -- maybe go back to the unified package? Not quite clear what the purpose of seperate is beyond convience - - although... the idea of the device manager as a reusable peice makes enough sense to potentially keep it as a seperate package - - I'll stick with the seperate for now and keep it unless it becomes unworkable - -### I2C Changes -The i2c bus is locked at the device level, so I am going to rewrite the bs to just use a function with no struct and remove the whole passing of structs garbage - -#### For tomorrow -What I have now works, but it is still pretty backwards. Need further improvements and need to start thinking about what a websocket might look like in the current model diff --git a/internal/notes/weekly/Jan-23-27.md b/internal/notes/weekly/Jan-23-27.md deleted file mode 100644 index 76afd80..0000000 --- a/internal/notes/weekly/Jan-23-27.md +++ /dev/null @@ -1,49 +0,0 @@ -# Jan 23 - -### Connecting Clients to reactors - -**Client -> Server -> Reactor** - -I can take advantage of the private network created via wireguard to allow the server to connected back to individual reactors and then intiate gRPC calls. - -**Pros** -- This *VASTLY* simplifies the implementation as I can now connect back to the reactors themselves - - from there, I can implement various functions I will need server side - - i.e. GetName() SetName() etc. - -**Cons** -- I will eventually need to build the wiregaurd implementation - - although because its all local network for now, I can plug and play down the road - -### TODO -- refactor packages to provide a cleaner interface via simple commands as opposed to the convoluted passing structure that was present with the old I2C library -- start working on the interface between the websocket and the reactor - - react side this is the actual content that will be rendered by the client - - server side this will be a connection to a reactor with the gRPC calls -- moving monitoring functionality to the reactor - - refactoring to use streaming functionality to avoid needing to re initiate request - - have server connect each reactor manager to the rlc - - have the reactor manager ping for server info - - handle disconnects via exit - - sets up cleaner device handling via multiplexing - -# Jan 24 - -### Proto changes - -It's time to refactor the current protobuf stuff to make more sense from the servers perspective. In this sense, I am going to have the reactor provide connection details to the server on connect, and then the server can connect/disconnect at will. - -### Outline -- Update the server to connect to the reactor itself for the information -- Decide what information is important enough to send to the server consistently, vs what only is needed upon "further inspection" - - need reactor information on connect - - need basic device information such as address and status - - when selected - - need specific device breakouts with advanced functions per device - - this can be multiplexed over the same gRPC connection and can be fulfilled by the device coordinator - - dc will catch all incoming requests and forward to the correct DM based on address - -### TODO -- reverse monitoring stuff - - make it so reactor manager has a timeout/ recognizes disconnects gracefully - - convert monitoring to a stream as opposed to consistent calls diff --git a/internal/pkg/i2c/bus.go b/internal/pkg/i2c/bus.go index 12a55d4..7ba012e 100644 --- a/internal/pkg/i2c/bus.go +++ b/internal/pkg/i2c/bus.go @@ -1,7 +1,9 @@ +// package i2c wraps the [i2c-tools] commands to interact +// with devices on the buss +// +// [i2c-tools]: https://manpages.debian.org/unstable/i2c-tools/index.html package i2c -// file has general wrappers to interact with i2c-tools - import ( "FRMS/internal/pkg/logging" "bytes" @@ -12,56 +14,75 @@ import ( "strings" ) +// GetConnected returns a map of each device address and its current +// connection status. func GetConnected(b int) (map[int]bool, error) { - // Returns all the connected devices by address - // might just do this in bash and make it easier + bus := strconv.Itoa(b) devices := make(map[int]bool) // only keys + cmd := exec.Command("i2cdetect", "-y", "-r", bus) + var out bytes.Buffer var errs bytes.Buffer cmd.Stderr = &errs cmd.Stdout = &out + if err := cmd.Run(); err != nil { - logging.Debug(logging.DError, "I2C error performing scan. %v", errs.String()) + + logging.Debug( + logging.DError, + "I2C scan error %v", + errs.String(), + ) + return devices, err } + // parsing the command output outString := out.String() - // could split by \n too split := strings.SplitAfter(outString, ":") - // 1st entry is garbage headers and ending is always \n##: + + // 1st entry is reserved and ending is always \n##: split = split[1:] + // create empty slice for all the devices for i, v := range split { - lst := strings.Index(v, "\n") - trimmed := v[:lst] + lastDevice := strings.Index(v, "\n") + + trimmed := v[:lastDevice] trimmed = strings.Trim(trimmed, " ") - // trimmed now holds just possible sensor addresses + count := strings.Split(trimmed, " ") + for j, d := range count { // the first row has to be offset by 3 but after its just i*16 + j - offset := 0 + offset := j if i == 0 { - offset = 3 + offset += 3 } - addr := i*16 + j + offset + + addr := i*16 + offset + if !strings.Contains(d, "--") && !strings.Contains(d, "UU") { - // active devices[addr] = true } } } + return devices, nil } +// SendCmd sends an arbitrary command string to the device at addr on i2c bus b. +// Command will be converted from a string to bytes before +// attempting to be sent. func SendCmd(b, addr int, command string) (string, error) { - // sends an arbituary commnd over specified bus to int - // might make a base script for this too + var cmd *exec.Cmd bus := strconv.Itoa(b) - operation := "r20" // default read - frmt_cmd := "" // empty cmd + // default to an empty read + operation := "r20" + frmt_cmd := "" if command != "" { // command, do write operation = fmt.Sprintf("w%d", len(command)) // write @@ -75,14 +96,19 @@ func SendCmd(b, addr int, command string) (string, error) { // reading cmd = exec.Command("i2ctransfer", "-y", bus, fmt.Sprintf("%s@0x%x", operation, addr)) } - // exec command + + // execute command var out bytes.Buffer var errs bytes.Buffer cmd.Stderr = &errs cmd.Stdout = &out + if err := cmd.Run(); err != nil { - logging.Debug(logging.DError, "I2C error getting data! %v", err) + + logging.Debug(logging.DError, "I2C command error %v", err) + return "", err } + return out.String(), nil }