cleaning up old files and working on readme

main
KeeganForelight 1 year ago
parent f7f679ba58
commit 444bb8684e

@ -1,7 +1,24 @@
# FRMS
ForeLight Reactor Management System
## Outline
## ForeLight Reactor Management System
This branch will serve as the staging ground for adding unit tests and documentation in order to finalize v0.1.0-alpha
## Table of Contents
* [Introduction](#introduction)
* [Getting Started](#getting-started)
* [Installation](#installation)
* [Usage](#usage)
* [Wiki](#wiki)
## Introduction
## Getting Started
### Installation
### Usage
## Wiki

@ -43,4 +43,7 @@ tasks:
GOARM: "{{.GOARM}}"
GOARCH: "{{.GOARCH}}"
GOOS: "{{.GOOS}}"
test:
desc: "Runs the full test suite"
cmds:
- ./gotest.py

@ -39,8 +39,10 @@ for line in output[:-1]:
# individual test
pkg.tests.append((parsed["Test"],parsed["Action"],parsed["Elapsed"]))
totalRan = 0
totalPassed = 0
totalTime = 0
# generating output from parsed json
total = 0
for name, info in res.items():
pkgname = name.split('/')
pkgname = '/'.join(name.split('/')[1:])
@ -58,9 +60,9 @@ for name, info in res.items():
out = []
if test[1] == "pass":
passed += 1
out = [" " + test[0] + ":",'\033[32mpass\033[0m ','(' + str(test[2]) + 's)']
out = [" " + test[0] + ":",'\033[32mpass\033[0m ',str(test[2]) + 's']
elif test[1] == "fail":
out = [" " + test[0] + ":",'\033[31mfail\033[0m ','(' + str(test[2]) + 's)']
out = [" " + test[0] + ":",'\033[31mfail\033[0m ',str(test[2]) + 's']
print(f"{out[0] : <30}{out[1] : >5}{out[2] : >8}")
@ -70,9 +72,18 @@ for name, info in res.items():
else:
result = "\033[31mFAILED\033[0m"
print("summary:\n\t%s (%d/%d in %.3fs)" % (result, passed, total, info.totaltime))
# keep track of grand totals
totalRan += total
totalPassed += passed
totalTime += info.totaltime
print(" %s %d/%d in %.3fs/n" % (result, passed, total, info.totaltime))
# print("OVERALL:\n\tSkipped %d/%d\n\tFailed %d/%d\n\tPassed %d/%d\n" % (skippedTests, totalTests, failedTests, totalTests, passedTests, totalTests))
# output overall test statistics
if totalRan == totalPassed:
result = "\033[32mPASSED\033[0m"
else:
result = "\033[31mFAILED\033[0m"
print("\nSUMMARY:\n\t%s %d/%d in %.3fs" % (result, totalPassed, totalRan, totalTime))

@ -1,71 +0,0 @@
*Time for a coherent plan of attack*
### Current Issues:
- There is a lot of redundancy between the managers/coordinators when it comes to basic checks
- the package seperation kind of makes sense, but it needs to be better fleshed out
- I need to enforce better seperation of responsibilities. Somewhat unclear when state is being kept centrally in the coordinator for no apparent reason.
### Solution:
- Go through the packages and consolidate
- Reduce the state we have to keep centrally, push responsibility to the other packages
### Plan of attack:
- Outline core information flow
- Examine what interfaces are nessecary to make this work
- Stop looking at the server/reactor as seperate entities
*I need to put the whole docker thing on the back burner for now. It isn't that important when it comes to immediate goals.*
#### 12/05 TODO
- Cleanup server side config stuff to make it coherent
- Reflect changes to reactor side startup
- Boil down interface to address core issues
- Config outline:
1) Startup and load the existing config
2) Overwrite any previous settings with the flags
3) Intelligently translate config into action
4) launch coordinator and start up existing reactor managers
- Config Structure:
- Wrap viper functions in config struct methods to be used thrtugh interfaces
- minimize the reliance on viper so we can sub in othermethods
- is it even important to launch reactor managers? Wont they just be started on connection?
#### 12/06 TODO
- I think I can completely remove the old config way and just pass the viper object directly. I think its not worth the hassle of trying to keep track of a million interfaces
#### 12/07 TODO
- I concede, I will just remove flags as most people will never use them anyway and instead rely on env vars and config files. To hell with the flags.
- I am ripping out all of the TUI and status manager stuff, its convoluted and harder than just pulling info from database.
- I can eventaully rework TUI to pull from DB which is fine, there will never be that many clients anyway and a lot of them are only 1 time calls with refreshes which aren't that slow anyway.
- alright I gutted the tui and system viewer, reworking sub coord to launch at start. That way there is a listener active
- time to boil down to functionality a LOT, right now its clumsy and inefficent, there needs to be a better way to keep everything straight
- Moving the DB responsibilites to the reactor itself seems to be the best way to do it in the short term. Reduce network load and overall keep things efficient. May lead to duplicte copies of data? Not the end of the world, logging system can make sure we are maintaining entries.
**IDEA**
Reactors log data themselves, Send periodic status updates over grpc to enable monitoring faster than the sample rate
*This could work!*
Outline:
- Reactors reach out to server on boot to get DB info
- compare this against what they have internally to ensure they are up to date and allow for migrations
- Maybe not even save the db info because we don't need to??
- Reactors also recieve port for their specific manager
- Can be dynamically given out to allow for spread out load
- Reactors then reach out with sensor and device info periodically (5s?) which can be used for live monitoring
- RM responds with any potential updates for the device settings i.e. change pwm duty on web interface, pass on to reactor
- Allows for a live view with current reading as well as historical data at differing interval via grafana. (i.e. 5s live view with 10 min sample interval)
Need to differentiate sensors vs devices that can be changed
- Sensors have a variable sample rate and eventually name/address
- Devices have more and widley varying parameters, could be pwm with freq/duty/onoff or ph pump with on, time or off etc.
#### 12/09 TODO
- Alright I have a baseline! I want to start to integrate atlas type stuff so that I have some mock data/sensors to work with. I am going to try to flesh out the "atlas" interface/struct to implement some of the more basic commands.
#### 1/11 TODO
Plan of attack for websocket stuff and things
**Questions**
- What to do about the reactor to user comms
- Websockets? GRPC? smoke signals?
-

File diff suppressed because it is too large Load Diff

@ -1,103 +0,0 @@
this will be a living doc
starting with for connection management:
listener:
- knows
- ip:port to listen to
- clients connect with{ip, port, clientType, model, id}
- is able to
- create coordinators for each clientType
- send new clients to coordiantor handlers via chan
- depends on
- clients sending known data (gRPC)
- NewCoordinator func
- implements
* shouldnt really have any callable methods
coordinator: (General)
- knows
- what client ids have already connected
- which managers correlate to which clients
- is able to
- create managers for new clients
- start managers for clients
- depends on
- listener for client structure
- manager for NewManager() function
- implements
- client connection handling
- general manager call
manager (general):
- knows
- client info
- timeout
- if it is active
- is able to
- establish a connection with a client
- stop when the connection drops
- depends on
- coordinator for start calls
- implements
- client connection creation
- client info storage
manager (reactor):
* embedds the gm
- knows
- devices attached to the reactor
- underlying client manager
- is able to
- maintain device struct
- no pings only control logic (i.e remove device, restart etc)
- depends on
- gm for client conn
- coordiantor for starts
- implements
- reactor structure tracking
manager (tui):
* embedds the gm
- knows
- structure of the system (ie reactors:devices)
- underlying client manager
- is able to
- keep track of system changes
- updates client from buffer or concurrent grpc?
- depends on
- RM to get system info
- coordinator for starts
- implements
- system tracking
reactor level coordinator: (right away this feels so fat compared)
- knows
- current connected devices
- server to init to
- its own hwinfo to establish itself as a client
- is able to:
- reach out to server on boot
- transmit client details
- keep reactor devices current
- depends on
- I2C package to notify of connected devices
- hardware info to get its client info
- server to handle connection
- sm for new manager
- implements
- reactor status handler for updates to other coords/managers
device itself:
- knows
- probe status ( maybe)
- data in buffer
- is able to
- clear buffer on request
- respond to commands
- implements
- data collection
- control execution
- depends on
- nothing its a driver
- maybe the control logic??

@ -1,4 +0,0 @@
## Weekly Planning
[Jan 16-20](weekly/Jan-16-20.md)
[Jan 23-27](weekly/Jan-23-27.md)

@ -1,149 +0,0 @@
# Jan 18
### Planning
**Monitoring Changes**
I want to refactor the reactor stuff to be less method oriented as far as data collection. For example, the monitoring stuff is all about events that happen pretty infrequently. It makes sense to then use a channel on the device side to just feed relevant status updates back to the reactor. I think that this makes the most sense because this will synchronize updates and leverage the rarity of events to cut down on errant calls.
- pros
- less repitive method calls needed
- less device locking
- localize the information to different packages
- cons
- extra memory for channels and duplicate storage info
- could just remove status from dm?
**New Idea**
I can leverage wireguard to do server-> reactor connections even beyond the testing phase
Changes:
1) move device coordinator into device package
2) expose relevant methods to reactor interface
3) clarify individual package responsibilities
4) add stuff server side to create/destroy grpc connections as the information is rendered client side
- this might be scuffed but oh well
### Package Separation
**Reactor**
- coordinator
- creates initial link to the server
- creates database client
- creates and starts a device coordinator
**Device**
- coordinator
- searches i2c bus for connected devices
- spins up managers to control the connected devices
- relays information back up to the reactor coordinator
- manager
- control over singular device
- has the core information that will be needed across any type of device (name, status, address etc)
- sub-manager
- fine grained struct with methods specific to the device
**Server**
Going to ignore for now because I am lazy
- central coordinator starts up database connection config etc
- reactor coordinator
### TODO
**Monitoring Changes**
- [] change methods to channel based
- [] internal methods with spins
- [] pass structs with interface for methods
# Jan 19
### Orginizational changes
What structure makes the most sense for the devices?
#### Top-Down
Ex) DeviceManager -> SensorManager -> DOManager -> Manager
**Pros**
- probably a less complex interface layout?
**Cons**
- annoying to keep/pass state
- i.e. atlas needs the address to pass to the I2C but right now the devicemanager is storing that. Have to pass down via start which doesn't make a ton of sense
#### Bottom-Up
Ex) DOManager -> SensorManager -> DeviceManager -> Manager
**Pros**
- top level manager has access to common info
- i.e. address, name etc
- can easily define common functions and use this to pass info upwards
- still don't have to import device manager as interfaces can handle getting/setting stuff
**Cons**
- might get ugly with interfaces
- there might have to be a bunch of interfaces in the device package to handle nesting the manager itself
- this might not be true though as the device coordinator dictates what interfaces are needed, and already it doesn't really use any of the dm functionality
**What would it look like?**
Device coordinator would call NewDeviceManager,
### Outline of functionality
Hopefully by going over what is expected of each manager, it will become clear what the layout should look like
**Device Coordinator**
- responsibilities
- starting/stopping device managers as devices connect/disconnect
- maintaining a map of the devices and their status
- updating the server with this information at set intervals
- pass the I2C client to the device managers
**Device Manager**
- responsibilities
- struct to store information that is used by any type of device
- i.e. Address, Name, Config(prefix and file)? Status?
- probably don't need status as this can be determined via IsActive()
- config might be helpful to have, could pass up to managers via a Get function
- start/stop as requested by the device coordinator
- serves
- broad functions such as SetName(), GetName(), etc.
**Sensor/Controller Manager**
- responsibilities
- provide corresponding broad struct that will be consistent across types of each
- i.e. sensors all have sample rate
- provide methods all will use such as TakeReading()
- serves
- more specific functions such as GetSampleRate(), Set...
**Specific Managers**
- responsibilities
- provides specific functions that a certain sensor/controller might need
- i.e. pwm will need setFreq, DO might need a conversion etc.
- broadly will need access to I2C for comms
- serves
- Hyper Specific functions such as SetFreq() etc.
### Trying Bottom-Up
Right now, I am using some hybrid format which doesn't really make any sense. It goes
DeviceManager -> DOManager -> SensorManager -> Manager
This just feels *wrong*
**Changes**
- Going to use the specifc -> broad becaus it seems intiuitive
- the most common methods/information is at the base and propogates up through the more specific managers
- should make it simplier to define
- maybe go back to the unified package? Not quite clear what the purpose of seperate is beyond convience
- although... the idea of the device manager as a reusable peice makes enough sense to potentially keep it as a seperate package
- I'll stick with the seperate for now and keep it unless it becomes unworkable
### I2C Changes
The i2c bus is locked at the device level, so I am going to rewrite the bs to just use a function with no struct and remove the whole passing of structs garbage
#### For tomorrow
What I have now works, but it is still pretty backwards. Need further improvements and need to start thinking about what a websocket might look like in the current model

@ -1,49 +0,0 @@
# Jan 23
### Connecting Clients to reactors
**Client -> Server -> Reactor**
I can take advantage of the private network created via wireguard to allow the server to connected back to individual reactors and then intiate gRPC calls.
**Pros**
- This *VASTLY* simplifies the implementation as I can now connect back to the reactors themselves
- from there, I can implement various functions I will need server side
- i.e. GetName() SetName() etc.
**Cons**
- I will eventually need to build the wiregaurd implementation
- although because its all local network for now, I can plug and play down the road
### TODO
- refactor packages to provide a cleaner interface via simple commands as opposed to the convoluted passing structure that was present with the old I2C library
- start working on the interface between the websocket and the reactor
- react side this is the actual content that will be rendered by the client
- server side this will be a connection to a reactor with the gRPC calls
- moving monitoring functionality to the reactor
- refactoring to use streaming functionality to avoid needing to re initiate request
- have server connect each reactor manager to the rlc
- have the reactor manager ping for server info
- handle disconnects via exit
- sets up cleaner device handling via multiplexing
# Jan 24
### Proto changes
It's time to refactor the current protobuf stuff to make more sense from the servers perspective. In this sense, I am going to have the reactor provide connection details to the server on connect, and then the server can connect/disconnect at will.
### Outline
- Update the server to connect to the reactor itself for the information
- Decide what information is important enough to send to the server consistently, vs what only is needed upon "further inspection"
- need reactor information on connect
- need basic device information such as address and status
- when selected
- need specific device breakouts with advanced functions per device
- this can be multiplexed over the same gRPC connection and can be fulfilled by the device coordinator
- dc will catch all incoming requests and forward to the correct DM based on address
### TODO
- reverse monitoring stuff
- make it so reactor manager has a timeout/ recognizes disconnects gracefully
- convert monitoring to a stream as opposed to consistent calls

@ -1,7 +1,9 @@
// package i2c wraps the [i2c-tools] commands to interact
// with devices on the buss
//
// [i2c-tools]: https://manpages.debian.org/unstable/i2c-tools/index.html
package i2c
// file has general wrappers to interact with i2c-tools
import (
"FRMS/internal/pkg/logging"
"bytes"
@ -12,56 +14,75 @@ import (
"strings"
)
// GetConnected returns a map of each device address and its current
// connection status.
func GetConnected(b int) (map[int]bool, error) {
// Returns all the connected devices by address
// might just do this in bash and make it easier
bus := strconv.Itoa(b)
devices := make(map[int]bool) // only keys
cmd := exec.Command("i2cdetect", "-y", "-r", bus)
var out bytes.Buffer
var errs bytes.Buffer
cmd.Stderr = &errs
cmd.Stdout = &out
if err := cmd.Run(); err != nil {
logging.Debug(logging.DError, "I2C error performing scan. %v", errs.String())
logging.Debug(
logging.DError,
"I2C scan error %v",
errs.String(),
)
return devices, err
}
// parsing the command output
outString := out.String()
// could split by \n too
split := strings.SplitAfter(outString, ":")
// 1st entry is garbage headers and ending is always \n##:
// 1st entry is reserved and ending is always \n##:
split = split[1:]
// create empty slice for all the devices
for i, v := range split {
lst := strings.Index(v, "\n")
trimmed := v[:lst]
lastDevice := strings.Index(v, "\n")
trimmed := v[:lastDevice]
trimmed = strings.Trim(trimmed, " ")
// trimmed now holds just possible sensor addresses
count := strings.Split(trimmed, " ")
for j, d := range count {
// the first row has to be offset by 3 but after its just i*16 + j
offset := 0
offset := j
if i == 0 {
offset = 3
offset += 3
}
addr := i*16 + j + offset
addr := i*16 + offset
if !strings.Contains(d, "--") && !strings.Contains(d, "UU") {
// active
devices[addr] = true
}
}
}
return devices, nil
}
// SendCmd sends an arbitrary command string to the device at addr on i2c bus b.
// Command will be converted from a string to bytes before
// attempting to be sent.
func SendCmd(b, addr int, command string) (string, error) {
// sends an arbituary commnd over specified bus to int
// might make a base script for this too
var cmd *exec.Cmd
bus := strconv.Itoa(b)
operation := "r20" // default read
frmt_cmd := "" // empty cmd
// default to an empty read
operation := "r20"
frmt_cmd := ""
if command != "" {
// command, do write
operation = fmt.Sprintf("w%d", len(command)) // write
@ -75,14 +96,19 @@ func SendCmd(b, addr int, command string) (string, error) {
// reading
cmd = exec.Command("i2ctransfer", "-y", bus, fmt.Sprintf("%s@0x%x", operation, addr))
}
// exec command
// execute command
var out bytes.Buffer
var errs bytes.Buffer
cmd.Stderr = &errs
cmd.Stdout = &out
if err := cmd.Run(); err != nil {
logging.Debug(logging.DError, "I2C error getting data! %v", err)
logging.Debug(logging.DError, "I2C command error %v", err)
return "", err
}
return out.String(), nil
}

Loading…
Cancel
Save