working on log format

main
Keegan 2 years ago
parent d22eaad075
commit f8b9324a0d

@ -27,8 +27,8 @@ func main() {
w := flag.CommandLine.Output()
fmt.Fprintf(w, "Usage: %s port \n",os.Args[0])
}
// iptr := flag.String("i","192.168.100.2","ip address of server")
iptr := flag.String("i","192.1.168.136","ip address of laptop")
iptr := flag.String("i","192.168.100.2","ip address of server")
//iptr := flag.String("i","192.1.168.136","ip address of laptop")
flag.Parse()
if flag.NArg() != 1 {
flag.Usage()

@ -32,7 +32,8 @@ func main() {
flag.Parse()
if flag.NArg() != 1 {
flag.Usage()
log.Fatal("Specify ifconfig interface. See man ifconfig for further information")
fmt.Println("Specify ifconfig interface. See man ifconfig for further information")
os.Exit(1)
}
ifconfig := string(flag.Arg(0))
ch := make(chan error)
@ -41,7 +42,7 @@ func main() {
log.Fatal(err)
}
go l.Start()
logging.Debug(logging.DStart, "Server started")
logging.Debug(logging.DStart, "CCO 01 Server started")
err = <-ch // blocking to wait for any errors and keep alive otherwise
if err != nil {
//fmt.Printf("ERROR: %v\n",err)

@ -25,8 +25,8 @@ func main() {
w := flag.CommandLine.Output()
fmt.Fprintf(w,"Usage: %s port [eth*, wlan*, etc.]\n", os.Args[0])
}
// iptr := flag.String("i","192.168.100.2","ip address of listener")
iptr := flag.String("i","192.1.168.136","ip address of laptop")
iptr := flag.String("i","192.168.100.2","ip address of listener")
//iptr := flag.String("i","192.1.168.136","ip address of laptop")
flag.Parse()
if flag.NArg() != 2 {
flag.Usage()

@ -16,7 +16,7 @@ TOPICS = {
"STRT": "#67a0b2",
"PING": "#d0b343",
"SCAN": "#70c43f",
#"LOG1": "#4878bc",
"SPWN": "#4878bc",
#"LOG2": "#398280",
#"CMIT": "#98719f",
#"PERS": "#d08341",

@ -30,6 +30,7 @@ const (
DExit logTopic = "EXIT"
DPing logTopic = "PING"
DScan logTopic = "SCAN"
DSpawn logTopic = "SPWN"
)
// the list can grow

@ -8,6 +8,7 @@ import (
// this package creates coordinators responsible for keeping track of active clients and invoking managers
type CreateManager interface {
Start()
NewManager(*Client, chan error) GeneralManager
}
@ -40,13 +41,13 @@ func NewCoordinator(manager CreateManager, err chan error) *Coordinator {
func (c *Coordinator) Start() {
// on start we need to create channel listener
// on each new connection we want to check its id against our mapping
c.CreateManager.Start()
}
func (c *Coordinator) ClientHandler(cl *Client) int {
// (creates and) notifies manager of client connection
m := c.GetManager(cl)
go m.Start(cl)
logging.Debug(logging.DClient, "Coordinator starting manager for %v client %v",cl.Type,cl.Id)
return m.GetPort()
}
@ -68,7 +69,12 @@ type reactorCoordinator struct {
Sys *SystemViewer
}
func (r *reactorCoordinator) Start() {
logging.Debug(logging.DStart,"RCO 01 Starting!")
}
func (r *reactorCoordinator) NewManager(cl *Client, err chan error) GeneralManager {
logging.Debug(logging.DClient, "RCO 01 starting manager for %v client %v",cl.Type,cl.Id)
return NewReactorManager(cl,r.Sys,err)
}
@ -83,7 +89,12 @@ type tuiCoordinator struct {
Sys *SystemViewer
}
func (t *tuiCoordinator) Start() {
logging.Debug(logging.DStart,"TCO 01 Starting!")
}
func (t *tuiCoordinator) NewManager(cl *Client, err chan error) GeneralManager {
logging.Debug(logging.DClient, "TCO 01 starting manager for %v client %v",cl.Type,cl.Id)
return NewTUIManager(t.Ip,cl,t.Sys,err)
}

@ -82,7 +82,7 @@ func (l *Listener) ReactorClientDiscoveryHandler(ctx context.Context, ping *pb.R
logging.Debug(logging.DClient, "%v %v has connected from %v:%v\n",c.Type,c.Id,c.Ip,c.Port)
coord, ok := l.Coordinators["reactor"]
if !ok {
logging.Debug(logging.DStart, "Stating Reactor Coordinator")
logging.Debug(logging.DSpawn,"CCO 01 Created RCO")
coord = NewReactorCoordinator(l.Sys, l.Err)
l.Coordinators["reactor"] = coord
coord.Start()
@ -93,19 +93,19 @@ func (l *Listener) ReactorClientDiscoveryHandler(ctx context.Context, ping *pb.R
}
func (l *Listener) TUIClientDiscoveryHandler(ctx context.Context, ping *pb.TUIClientRequest) (*pb.TUIClientResponse, error) {
t := &Client{Type:"tui",Id:ping.GetClientId()}
c := &Client{Type:"tui",Id:ping.GetClientId()}
var coord *Coordinator
var ok bool
coord, ok = l.Coordinators["tui"]
if !ok {
logging.Debug(logging.DStart, "Stating TUI Coordinator")
logging.Debug(logging.DSpawn,"CCO 01 Created RCO")
coord = NewTUICoordinator(l.Ip, l.Sys, l.Err)
l.Coordinators["tui"] = coord
coord.Start()
}
port := coord.ClientHandler(t)
logging.Debug(logging.DClient,"%v %v has connected from %v:%v\n",t.Type,t.Id,l.Ip,port)
r := &pb.TUIClientResponse{ClientId:t.Id,ServerIp:l.Ip,ServerPort:int32(port)}
port := coord.ClientHandler(c)
logging.Debug(logging.DClient,"%v %v has connected from %v:%v\n",c.Type,c.Id,l.Ip,port)
r := &pb.TUIClientResponse{ClientId:c.Id,ServerIp:l.Ip,ServerPort:int32(port)}
return r, nil
}

@ -6,7 +6,7 @@ import (
"math"
"sync"
"errors"
"FRMS/internal/pkg/logging"
//"FRMS/internal/pkg/logging"
)
// this package will implement a boilerplate manager
@ -39,7 +39,6 @@ func (m *Manager) Start(cl *Client) {
// manager already running
m.Err <-errors.New("Manager already running!")
} // if we get here, manager is atomically activated and we can ensure start wont run again
logging.Debug(logging.DStart, "%v Manager (%v) starting!\n",m.Type,m.Id)
}
func (m *Manager) Exit() {
@ -47,7 +46,6 @@ func (m *Manager) Exit() {
if !m.Deactivate() {
m.Err <-errors.New("Manager already disabled!")
}
logging.Debug(logging.DExit, "%v Manager (%v) exiting!\n",m.Type,m.Id)
}
// reactor manager atomic operations

@ -3,7 +3,7 @@ package server
import (
"fmt"
"time"
"log"
_ "log"
"context"
"sync"
"FRMS/internal/pkg/logging"
@ -39,6 +39,7 @@ func NewReactorManager(c *Client,sys *SystemViewer,err chan error) GeneralManage
func (r *ReactorManager) Start(cl *Client) {
r.Manager.Start(cl)
logging.Debug(logging.DStart,"RMA %v starting", r.Id)
go r.StatusMon.Start()
go r.DevsMon.Start()
go r.StatusMon.Send(&DeviceInfo{Id:r.Id,Type:"Reactor",Status:"[green]ONLINE[white]"})
@ -51,6 +52,7 @@ func (r *ReactorManager) Start(cl *Client) {
func (r *ReactorManager) Exit() {
r.Manager.Exit()
logging.Debug(logging.DExit, "RMA %v exiting", r.Id)
go r.StatusMon.Send(&DeviceInfo{Id:r.Id,Type:"Reactor",Status:"[red]OFFLINE[white]",Data:fmt.Sprintf("Last Seen %v",time.Now().Format("Mon at 03:04:05pm MST"))})
}
@ -66,7 +68,7 @@ func (r *ReactorManager) Connect() *grpc.ClientConn {
for {
if !r.IsActive() {
logging.Debug(logging.DClient,"No longer active, aborting connection attempt\n")
logging.Debug(logging.DClient,"RMA %v No longer active, aborting connection attempt",r.Id)
return &grpc.ClientConn{}
}
var err error
@ -77,14 +79,14 @@ func (r *ReactorManager) Connect() *grpc.ClientConn {
if code == (5 | 14) { // unavailable or not found
to := r.Timeout()
if to == 0 {
logging.Debug(logging.DClient,"Client not responding\n")
logging.Debug(logging.DClient,"RMA %v Client not responding",r.Id)
return &grpc.ClientConn{}
}
logging.Debug(logging.DClient,"Client currently down, retrying in %v ms\n",to)
logging.Debug(logging.DClient,"RMA %v Client currently down, retrying in %v ms",r.Id, to)
time.Sleep(time.Duration(to) * time.Millisecond)
} else {
log.Fatal("GRPC ERROR: %v",code)
logging.Debug(logging.DError,"RMA %v GRPC ERROR: %v",r.Id, code)
r.Err <- err
}
}
@ -101,7 +103,7 @@ func (r *ReactorManager) Monitor(conn *grpc.ClientConn) {
resp, err := client.GetReactorStatus(context.Background(),req)
code := status.Code(err)
if code != 0 { // if != OK
logging.Debug(logging.DClient,"Reactor %v down! Code: %v\n", r.Id,code)
logging.Debug(logging.DClient,"RMA %v Reactor not responding! Code: %v\n", r.Id,code)
r.devstatus.Lock()
for _, d := range r.Devs {
newd := d
@ -119,7 +121,7 @@ func (r *ReactorManager) Monitor(conn *grpc.ClientConn) {
r.Devs[d.Id] = d
}
r.devstatus.Unlock()
logging.Debug(logging.DPing, "Devices Updated for reactor %v", r.Id)
logging.Debug(logging.DPing, "RMA %v Reactor Reached", r.Id)
time.Sleep(r.Hb) // time between sensor pings
}
}

@ -52,11 +52,16 @@ func (t *TUIManager) Start(cl *Client) {
//
t.PingReset()
t.Manager.Start(cl)
logging.Debug(logging.DStart,"TMA %v starting", t.Id)
go t.Timeoutd()
go t.Register() // begin tui server to respond to tui client reqs
//go t.Monitor(conn)
}
func (t *TUIManager) Exit() {
t.Manager.Exit()
logging.Debug(logging.DExit,"TMA %v exiting",t.Id)
}
func (t *Timeout) PingReset() {
t.Lock()
defer t.Unlock()
@ -67,7 +72,7 @@ func (t *TUIManager) Timeoutd() {
for t.IsActive() {
if sleep, elapsed := t.Elapsed(); elapsed {
// timeout elapsed
logging.Debug(logging.DExit,"Client %v no longer responding", t.Id)
logging.Debug(logging.DClient,"TMA %V client not responding", t.Id)
t.Exit()
} else {
time.Sleep(sleep)
@ -101,7 +106,7 @@ func (t *TUIManager) Register() {
go func(ch chan int,p int) {
ch <-p
}(t.Port.Chan, t.Port.int)
logging.Debug(logging.DStart, "TUI Manager %v ready for client", t.Id)
logging.Debug(logging.DClient, "TMA %v reayd for client conn", t.Id)
// up and running
}
@ -127,7 +132,7 @@ func (t *TUIManager) GetDevices(ctx context.Context, req *pb.GetDevicesRequest)
resp.Devices = append(resp.Devices, &pb.Dev{Id:v.Id,Type:v.Type,Status:v.Status,Data:v.Data,Index:v.Index})
}
}
logging.Debug(logging.DClient,"Got devices for TUI %v" ,t.Id)
logging.Debug(logging.DClient,"TMA %v sending devices to client" ,t.Id)
return resp, nil
}

22
notes

@ -825,3 +825,25 @@ overhead is probably minimal anyway
redo listener bullshit to just route to the cs
tui clients will just get a fresh copy of the reactor info and device infofor every request
ADDING STANDARDIZED LOGGING
adding a log package for all packages
logs to a file named after start time
going to be of format
TIME PROC CODE ID MSG
so
00013 STRT COR 912939123 Central coordinator started
00033 STRT
CODES
CCO - Central Coordinator
RCO - Reactor Coordinator
TCO - TUI Coordinator
RMA - Reactor Manager
TMA - TUI Manager
RLC - Reactor Level Coordinator
DMA - Device Manager
TUI - TUI Client
every debug message will be of format
topic, devcode: id

Loading…
Cancel
Save