stripping all status monitor and TUI stuff out for now, focusing on core reactor impl

main
KeeganForelight 2 years ago
parent ef7bf9d665
commit 62f37b5f80

@ -10,7 +10,6 @@ import (
"os/signal"
flag "github.com/spf13/pflag"
"github.com/spf13/viper"
)
@ -36,19 +35,6 @@ func main() {
// load any stored configs
conf := NewConfig("reactor")
flag.String("ip", "192.168.100.2", "server ip")
flag.Int("port", 2022, "server port")
flag.String("name", "", "human readable name")
// bind flags
conf.BindPFlag("reactor_ip", flag.Lookup("ip"))
conf.BindPFlag("reactor_port", flag.Lookup("port"))
conf.BindPFlag("reactor_name", flag.Lookup("name"))
flag.Parse()
conf.WriteConfig()
ch := make(chan error)
rlc := NewCoordinator(conf, ch) // passing conf and err
go rlc.Start()

@ -10,7 +10,6 @@ import (
"FRMS/internal/pkg/server"
"os"
flag "github.com/spf13/pflag"
"github.com/spf13/viper"
)
@ -35,18 +34,6 @@ func main() {
// config file
conf := NewConfig("server")
// flags
flag.String("name", "", "human readable name")
flag.Int("lis_port", 2022, "port for listener")
flag.Int("db_port", 2022, "port for database")
// bind flags
conf.BindPFlag("ports_lis", flag.Lookup("lis_port"))
conf.BindPFlag("ports_db", flag.Lookup("db_port"))
conf.BindPFlag("server_name", flag.Lookup("name"))
flag.Parse()
errCh := make(chan error)
c := NewCoordinator(conf, errCh)

@ -1,7 +1,6 @@
package influxdb
import (
"errors"
_ "fmt"
_ "github.com/influxdata/influxdb-client-go/v2"
@ -74,6 +73,9 @@ func (d *DBAdmin) GetReactorClient(id int) (url, bucket, org, token string, err
*/
url = d.URL
org = d.Org
err = errors.New("Unimpl")
token = ""
bucket = ""
//err = errors.New("Unimpl")
err = nil
return
}

@ -14,16 +14,11 @@ import (
)
// this package creates the central coordiantor and sub coordiantors for clients
// interfaces
// db client interface
type DB interface {
// getters (all create if doesnt exist)
//GetToken() (string, error) // returns admin token (Creates if it doesnt exist)
GetReactorClient(int) (string, string, string, string, error) // returns (url, org, bucket, token, err)
// delete
// DeleteReactorClient(string) error // removes client token but maintains bucket
// PurgeReactorClientData(string) error // perm deletes all assocaited reactor data (token, bucket etc)
}
func NewDBAdmin(config *viper.Viper) (DB, error) {
@ -31,9 +26,10 @@ func NewDBAdmin(config *viper.Viper) (DB, error) {
}
type CentralCoordinator struct {
// main coordinator
ClientConnections *ClientPacket
*SubCoordinators
*SystemViewer
//*SystemViewer
DB
Config *viper.Viper
// from config
@ -47,18 +43,23 @@ type SubCoordinators struct {
}
func NewCentralCoordinator(config *viper.Viper, ch chan error) *CentralCoordinator {
c := &CentralCoordinator{Err: ch, Config: config}
if err := config.UnmarshalKey("server", c); err != nil {
// report error
// create a central coordinator to manage requests
db, err := NewDBAdmin(config)
if err != nil {
ch <- err
}
fmt.Printf("%+v\n", c)
c.SystemViewer = NewSystemViewer()
go c.SystemViewer.Start()
c := &CentralCoordinator{Err: ch, Config: config, DB: db}
// grab config settings
if err = config.UnmarshalKey("server", c); err != nil {
ch <- err
}
// spawn a systemviewer DECOMIS
//c.SystemViewer = NewSystemViewer()
//.go c.SystemViewer.Start()
// subcoord map
s := make(map[string]*SubCoordinator)
sub := &SubCoordinators{Directory: s}
c.SubCoordinators = sub
c.SubCoordinators = &SubCoordinators{Directory: s}
// return init coordinator
return c
}
@ -66,7 +67,10 @@ func (c *CentralCoordinator) Start() {
// starts up associated funcs
clientChan := make(chan *ClientPacket)
l := NewListener(clientChan, c.Err)
// grabs lis port
c.Config.UnmarshalKey("server.ports", l)
// starts client listener routines
go l.Start()
go c.ClientListener(clientChan)
}
@ -74,36 +78,48 @@ func (c *CentralCoordinator) Start() {
func (c *CentralCoordinator) ClientListener(ch chan *ClientPacket) {
for client := range ch {
// basically loops until channel is closed
cr := c.ClientHandler(client.Client)
client.Response <- cr
fmt.Printf("Incoming client: +%v\n", client)
client.Response <- c.ClientHandler(client.Client) // respond with cred
}
}
func (c *CentralCoordinator) ClientHandler(cl *Client) *ClientResponse {
// look for sub coord
c.SubCoordinators.Lock()
defer c.SubCoordinators.Unlock()
subcoord, ok := c.SubCoordinators.Directory[cl.Type]
if !ok {
// Sub Coordinator does not exists
logging.Debug(logging.DSpawn, "CC0 01 Created %v Coordinator", cl.Type)
subcoord = NewSubCoordinator(cl.Type, c.SystemViewer, c.Err)
// Sub Coordinator does not exists, creating
fmt.Printf("Cl type: %s, Port: %d\n", cl.Type, c.Ports[cl.Type])
subcoord = NewSubCoordinator(cl.Type, c.Ports[cl.Type], c.Err)
c.SubCoordinators.Directory[cl.Type] = subcoord
fmt.Printf("Creating subcord for %s on %d\n", cl.Type, c.Ports[cl.Type])
logging.Debug(logging.DSpawn, "CC0 01 Created %v Coordinator", cl.Type)
}
// unlocking
c.SubCoordinators.Unlock()
// starts sub coord with client credentials
fmt.Printf("Starting subcoord client handler\n")
go subcoord.ClientHandler(cl)
fmt.Printf("Getting db info\n")
// setting up client response
url, org, token, bucket, err := c.DB.GetReactorClient(int(cl.Id))
url, org, token, bucket, err := c.DB.GetReactorClient(cl.Id)
fmt.Printf("Got URL: %s, Org: %s, Token: %s, Bucket: %b\n", url, org, token, bucket)
if err != nil {
c.Err <- err
}
cr := &ClientResponse{URL: url, Org: org, Token: token, Bucket: bucket, Port: c.Ports[cl.Type]}
return cr
// returning info
return &ClientResponse{URL: url, Org: org, Token: token, Bucket: bucket, Port: c.Ports[cl.Type]}
}
type ManagerInterface interface {
Start()
NewManager(*Client, *SystemViewer, chan error) GeneralManager
GetManager(uint32) (GeneralManager, bool)
AddManager(uint32, GeneralManager)
NewManager(*Client, chan error) GeneralManager
GetManager(int) (GeneralManager, bool)
AddManager(int, GeneralManager)
Register()
}
@ -116,22 +132,22 @@ type GeneralManager interface {
type SubCoordinator struct {
Port int // port that we set up gRPC endpoint on
ManagerInterface // embed an interface to create/manager managers
*SystemViewer
//*SystemViewer
Err chan error
}
type Managers struct {
Directory map[uint32]interface{} // support for either manager
Directory map[int]interface{} // support for either manager
sync.RWMutex // potential perf
}
// interface stuff
func NewSubCoordinator(clientType string, sys *SystemViewer, err chan error) *SubCoordinator {
c := &SubCoordinator{Err: err}
c.SystemViewer = sys
man, errs := NewCoordinatorType(clientType, err)
if errs != nil {
err <- errs
func NewSubCoordinator(clientType string, port int, errCh chan error) *SubCoordinator {
c := &SubCoordinator{Err: errCh}
//c.SystemViewer = sys
man, err := NewCoordinatorType(clientType, errCh)
if err != nil {
errCh <- err
}
c.ManagerInterface = man
go man.Start()
@ -141,15 +157,16 @@ func NewSubCoordinator(clientType string, sys *SystemViewer, err chan error) *Su
func (c *SubCoordinator) ClientHandler(cl *Client) {
// (creates and) notifies manager of client connection
c.UpdateManager(cl)
}
func (c *SubCoordinator) UpdateManager(cl *Client) {
// shouldn't happen all that often so should be fine to lock
fmt.Printf("Grabbing Manager\n")
m, exists := c.GetManager(cl.Id)
if !exists {
m = c.NewManager(cl, c.SystemViewer, c.Err)
fmt.Printf("Creating Manager\n")
m = c.NewManager(cl, c.Err)
m.UpdateClient(cl)
go c.AddManager(cl.Id, m)
go m.Start()
@ -157,13 +174,13 @@ func (c *SubCoordinator) UpdateManager(cl *Client) {
go m.UpdateClient(cl)
}
func (m *Managers) AddManager(id uint32, man GeneralManager) {
func (m *Managers) AddManager(id int, man GeneralManager) {
m.Lock()
defer m.Unlock()
m.Directory[id] = man
}
func (m *Managers) GetManager(id uint32) (GeneralManager, bool) {
func (m *Managers) GetManager(id int) (GeneralManager, bool) {
// just read locks and reuturns
m.RLock()
defer m.RUnlock()
@ -176,17 +193,17 @@ func (m *Managers) GetManager(id uint32) (GeneralManager, bool) {
func NewCoordinatorType(clientType string, err chan error) (ManagerInterface, error) {
m := make(map[uint32]interface{})
m := make(map[int]interface{})
if clientType == "reactor" {
c := &reactorCoordinator{}
//m := make(map[uint32]*ReactorManager)
c.Managers = &Managers{Directory: m}
return c, nil
} else if clientType == "tui" {
c := &tuiCoordinator{}
c := &reactorCoordinator{}
//m := make(map[uint32]*TUIManager)
c.Managers = &Managers{Directory: m}
return c, nil
//c.Managers = &Managers{Directory: m}
return c, errors.New(fmt.Sprint("error, TUI Not impl"))
}
return &reactorCoordinator{}, errors.New("Unrecognized client type")
}
@ -202,9 +219,9 @@ func (r *reactorCoordinator) Start() {
logging.Debug(logging.DStart, "RCO 01 Starting!")
}
func (r *reactorCoordinator) NewManager(cl *Client, sys *SystemViewer, err chan error) GeneralManager {
func (r *reactorCoordinator) NewManager(cl *Client, err chan error) GeneralManager {
logging.Debug(logging.DClient, "RCO 01 starting manager for %v client %v", cl.Type, cl.Id)
return NewReactorManager(cl, sys, err)
return NewReactorManager(cl, err)
}
func (r *reactorCoordinator) Register() {
@ -226,7 +243,7 @@ func (r *reactorCoordinator) Register() {
}
func (r *reactorCoordinator) ReactorStatusHandler(ctx context.Context, req *pb.ReactorStatusPing) (*pb.ReactorStatusResponse, error) {
m, exists := r.GetManager(req.GetId())
m, exists := r.GetManager(int(req.GetId()))
if !exists {
return &pb.ReactorStatusResponse{}, errors.New("Manager doesn't exists for that client")
}
@ -238,6 +255,7 @@ func (r *reactorCoordinator) ReactorStatusHandler(ctx context.Context, req *pb.R
}
// tui coordinator
/*
type tuiCoordinator struct {
*Managers // by embedding general struct we allow coordinator to still call general funcs
pb.UnimplementedManagementServer
@ -247,9 +265,9 @@ func (t *tuiCoordinator) Start() {
logging.Debug(logging.DStart, "TCO 01 Starting!")
}
func (t *tuiCoordinator) NewManager(cl *Client, sys *SystemViewer, err chan error) GeneralManager {
func (t *tuiCoordinator) NewManager(cl *Client, err chan error) GeneralManager {
logging.Debug(logging.DClient, "TCO 01 starting manager for %v client %v", cl.Type, cl.Id)
return NewTUIManager(cl, sys, err)
return NewTUIManager(cl, err)
}
func (t *tuiCoordinator) Register() {
@ -268,11 +286,12 @@ func (t *tuiCoordinator) Register() {
go grpcServer.Serve(lis)
logging.Debug(logging.DClient, "TCO ready for client requests")
*/
/*
}
func (t *tuiCoordinator) GetDevices(ctx context.Context, req *pb.GetDevicesRequest) (*pb.GetDevicesResponse, error) {
// grpc handler to fwd to manager
m, exists := t.GetManager(req.GetClientId())
m, exists := t.GetManager(int(req.GetClientId()))
if !exists {
// doesnt exist for some reason
return &pb.GetDevicesResponse{}, errors.New("Manager doesn't exists for client")
@ -294,3 +313,4 @@ func (t *tuiCoordinator) DeleteReactorDevice(ctx context.Context, req *pb.Delete
// TODO
return &pb.DeleteReactorDeviceResponse{}, nil
}
*/

@ -13,6 +13,7 @@ import (
/*
Listens on a supplied port and sends incoming clients over a supplied channel
Waits for a response on that channel to send back to the client with DB credentials
*/
type Listener struct { // exporting for easy use in the short term
@ -28,10 +29,9 @@ type ClientPacket struct {
}
type Client struct {
// general client struct to store reqs from reactors/tui
Ip string
Port int
Id uint32
Id int
Model string
Type string
}
@ -71,15 +71,16 @@ func (l *Listener) Register() error {
}
func (l *Listener) ClientDiscoveryHandler(ctx context.Context, ping *pb.ClientRequest) (*pb.ClientResponse, error) {
// incoming reactor ping need to spawn coord
c := &Client{Id: ping.GetClientId(), Type: ping.GetClientType()}
// incoming client ping, notify coord and wait for DB credentials to respond
c := &Client{Id: int(ping.GetClientId()), Type: ping.GetClientType()}
logging.Debug(logging.DClient, "LIS %v %v has connected\n", c.Type, c.Id)
// prepare packet to send to coordinator
ch := make(chan *ClientResponse)
p := &ClientPacket{Response: ch}
p.Client = c
p := &ClientPacket{Client: c, Response: ch}
// blocking
l.ClientConnections <- p
resp := <-ch
// return the port for the incoming requests
// prepare object to return to client
db := &pb.Database{URL: resp.URL, ORG: resp.Org, Token: resp.Token, Bucket: resp.Bucket}
return &pb.ClientResponse{ClientId: c.Id, ServerPort: uint32(resp.Port), Database: db}, nil
return &pb.ClientResponse{ClientId: uint32(c.Id), ServerPort: uint32(resp.Port), Database: db}, nil
}

@ -2,12 +2,12 @@ package server
import (
//"log"
"time"
"FRMS/internal/pkg/logging"
_ "context"
"errors"
"math"
"sync"
"errors"
_ "context"
"FRMS/internal/pkg/logging"
"time"
)
// this package will implement a boilerplate manager
@ -28,7 +28,7 @@ type active struct{
}
func NewManager(err chan error) *Manager {
hb := time.Duration(1 * time.Second) //hb to
hb := time.Duration(5 * time.Second) //hb to
m := &Manager{Hb: hb, Err: err}
return m
}
@ -103,6 +103,7 @@ func (m *Manager) Timeout() int {
return 0
}
}
/*
shouldnt be nessecary anymore

@ -1,44 +1,46 @@
package server
import (
pb "FRMS/internal/pkg/grpc"
"FRMS/internal/pkg/logging"
"context"
"fmt"
"time"
_ "log"
"context"
"sync"
"FRMS/internal/pkg/logging"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/status"
"google.golang.org/grpc/credentials/insecure"
pb "FRMS/internal/pkg/grpc"
"google.golang.org/grpc/status"
)
// this package will implement a reactor coordinator and associated go routines
// this package will implement a reactor manager and associated go routines
type ReactorManager struct {
*Manager
StatusMon *StatusMonitor
// StatusMon *StatusMonitor putting on pause
*devstatus
}
type devstatus struct {
// keeping this around but not using it to create status for status mon
sync.Mutex
Devs map[uint32]*DeviceInfo
Devs map[int]*DeviceInfo
}
func NewReactorManager(c *Client,sys *SystemViewer,err chan error) GeneralManager {
func NewReactorManager(c *Client, err chan error) GeneralManager {
r := &ReactorManager{}
di := make(map[uint32]*DeviceInfo)
di := make(map[int]*DeviceInfo)
r.devstatus = &devstatus{Devs: di}
r.Manager = NewManager(err)
r.StatusMon = NewStatusMonitor("Reactor",c.Id,sys)
//r.StatusMon = NewStatusMonitor("Reactor", c.Id, sys)
return r
}
func (r *ReactorManager) Start() {
r.Manager.Start()
logging.Debug(logging.DStart, "RMA %v starting", r.Id)
go r.StatusMon.Send(&DeviceInfo{Id:r.Id,Type:"Reactor",Status:"[green]ONLINE[white]"},"Reactor")
//go r.StatusMon.Send(&DeviceInfo{Id: r.Id, Type: "Reactor", Status: "[green]ONLINE[white]"}, "Reactor")
//conn := r.Connect()
//empty := &grpc.ClientConn{}
//if conn != empty {
@ -48,19 +50,21 @@ func (r *ReactorManager) Start() {
func (r *ReactorManager) Exit() {
r.Manager.Exit()
logging.Debug(logging.DExit, "RMA %v exiting", r.Id)
go r.StatusMon.Send(&DeviceInfo{Id:r.Id,Type:"Reactor",Status:"[red]OFFLINE[white]",Data:fmt.Sprintf("Last Seen %v",time.Now().Format("Mon at 03:04:05pm MST"))},"Reactor")
//go r.StatusMon.Send(&DeviceInfo{Id: r.Id, Type: "Reactor", Status: "[red]OFFLINE[white]", Data: fmt.Sprintf("Last Seen %v", time.Now().Format("Mon at 03:04:05pm MST"))}, "Reactor")
r.devstatus.Lock()
defer r.devstatus.Unlock()
// keeping this because it **COULD** be useful, maybe
for _, d := range r.Devs {
newd := d
newd.Status = "[yellow]UNKOWN[white]"
newd.Status = "UNKOWN"
r.Devs[newd.Id] = newd
go r.StatusMon.Send(newd,"Device")
//go r.StatusMon.Send(newd, "Device")
}
}
func (r *ReactorManager) Connect() *grpc.ClientConn {
// establish gRPC conection with reactor
// this seems pretty stupid, seems like reactor should communicate up the chain to avoid unnessecary comms.
var opts []grpc.DialOption
var conn *grpc.ClientConn
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
@ -89,7 +93,7 @@ func (r *ReactorManager) Connect() *grpc.ClientConn {
r.Err <- err
}
}
break;
break
}
return conn
}
@ -98,13 +102,12 @@ func (r *ReactorManager) ReactorStatusHandler(ctx context.Context, req *pb.React
// function client will call to update reactor information
//go r.PingReset()
for _, dev := range req.GetDevices() {
d := &DeviceInfo{Id:uint32(dev.GetAddr()),Type:dev.GetType(),Status:dev.GetStatus(),Data:dev.GetData()}
d := &DeviceInfo{Id: int(dev.GetAddr()), Type: dev.GetType(), Status: dev.GetStatus(), Data: dev.GetData()}
go r.UpdateDevice(d)
}
return &pb.ReactorStatusResponse{Id:r.Id}, nil
return &pb.ReactorStatusResponse{Id: uint32(r.Id)}, nil
}
/*
func (r *ReactorManager) Monitor(conn *grpc.ClientConn) {
defer conn.Close()
@ -134,16 +137,17 @@ func (r *ReactorManager) Monitor(conn *grpc.ClientConn) {
}
}
*/
func (r *ReactorManager) UpdateDevice(d *DeviceInfo) {
r.devstatus.Lock()
defer r.devstatus.Unlock()
if olddev, ok := r.Devs[d.Id]; !ok {
// new device
r.Devs[d.Id] = d
go r.StatusMon.Send(d,"Device")
//go r.StatusMon.Send(d, "Device")
} else if olddev.Status != d.Status || olddev.Data != d.Data {
// dev status or data has changed
r.Devs[d.Id] = d
go r.StatusMon.Send(d,"Device")
//go r.StatusMon.Send(d, "Device")
}
}

@ -1,20 +1,21 @@
package server
import (
"sync"
_ "fmt"
"FRMS/internal/pkg/logging"
)
// allows for multiple readers/writers
type DeviceInfo struct {
Id uint32
Id int
Type string
Status string
Data string
Index uint32
Index int
TransactionId uint32
}
/*
type StatusMonitor struct {
// allows for embedding into managers
TransactionId chan uint32 // monotonically increases to track outdated reqs
@ -26,21 +27,21 @@ type StatusMonitor struct {
}
type devbuf struct {
ReactorId uint32 // reactor we are looking at, if any
Buf map[string]map[uint32]*DeviceInfo // convienent way to store/seperate device data
ReactorId int // reactor we are looking at, if any
Buf map[string]map[int]*DeviceInfo // convienent way to store/seperate device data
sync.Mutex
}
func NewBuffer() map[string]map[uint32]*DeviceInfo {
rbuf := make(map[uint32]*DeviceInfo)
dbuf := make(map[uint32]*DeviceInfo)
sbuf := make(map[string]map[uint32]*DeviceInfo)
func NewBuffer() map[string]map[int]*DeviceInfo {
rbuf := make(map[int]*DeviceInfo)
dbuf := make(map[int]*DeviceInfo)
sbuf := make(map[string]map[int]*DeviceInfo)
sbuf["Reactor"] = rbuf
sbuf["Device"] = dbuf
return sbuf
}
func NewStatusMonitor(t string, id uint32, sys *SystemViewer) *StatusMonitor {
func NewStatusMonitor(t string, id int, sys *SystemViewer) *StatusMonitor {
tid := make(chan uint32)
sm := &StatusMonitor{TransactionId: tid}
sm.SystemViewer = sys
@ -190,6 +191,7 @@ func (s *InfoStream) Start() {
// consistency
go s.Listen()
}
// goal is to hook every new manager into the reactor status chan
func (s *InfoStream) AddSender() chan *DeviceInfo {
return s.Stream
@ -227,7 +229,7 @@ func (l *listeners) Echo(d *DeviceInfo) {
}
}
func (s *InfoStream) AddListener(id uint32, ch chan *DeviceInfo) map[uint32]*DeviceInfo {
func (s *InfoStream) AddListener(id int, ch chan *DeviceInfo) map[uint32]*DeviceInfo {
// if i get a memory leak ill eat my shoe
s.listeners.Lock()
defer s.listeners.Unlock()
@ -243,7 +245,7 @@ func (s *InfoStream) AddListener(id uint32, ch chan *DeviceInfo) map[uint32]*Dev
return s.Layout.Devs
}
func (l *listeners) RemoveListener(id uint32) {
func (l *listeners) RemoveListener(id int) {
l.Lock()
defer l.Unlock()
if lis, ok := l.Listeners[id]; ok {
@ -297,7 +299,7 @@ func (s *SystemViewer) AddDeviceSender(reactorId uint32) chan *DeviceInfo {
return ds.AddSender()
}
func (s *SystemViewer) AddListener(id, rid uint32) (chan *DeviceInfo, map[uint32]*DeviceInfo) {
func (s *SystemViewer) AddListener(id, rid int) (chan *DeviceInfo, map[uint32]*DeviceInfo) {
// returns a listener for that chan
ch := make(chan *DeviceInfo)
if rid != 0 {
@ -307,9 +309,10 @@ func (s *SystemViewer) AddListener(id, rid uint32) (chan *DeviceInfo, map[uint32
}
}
func (s *SystemViewer) RemoveListener(rid, tid uint32) {
func (s *SystemViewer) RemoveListener(rid, tid int) {
// removes chan for specific tid and rid
s.DeviceStream.Lock()
defer s.DeviceStream.Unlock()
go s.DeviceStream.Reactors[rid].RemoveListener(tid)
}
*/

@ -1,5 +1,7 @@
package server
/*
import (
// "fmt"
"time"
@ -118,4 +120,4 @@ func (t *TUIManager) DeleteReactorDevice(ctx context.Context, req *pb.DeleteReac
//
return &pb.DeleteReactorDeviceResponse{}, nil
}
*/

@ -34,3 +34,7 @@
#### 12/06 TODO
- I think I can completely remove the old config way and just pass the viper object directly. I think its not worth the hassle of trying to keep track of a million interfaces
#### 12/07 TODO
- I concede, I will just remove flags as most people will never use them anyway and instead rely on env vars and config files. To hell with the flags.
- I am ripping out all of the TUI and status manager stuff, its convoluted and harder than just pulling info from database.
- I can eventaully rework TUI to pull from DB which is fine, there will never be that many clients anyway and a lot of them are only 1 time calls with refreshes which aren't that slow anyway.

Loading…
Cancel
Save