From b01eb57ee54f3f1a55545a939e10cf5fcea44476 Mon Sep 17 00:00:00 2001 From: zeaslity Date: Tue, 28 Mar 2023 13:59:16 +0800 Subject: [PATCH 01/45] [agent-go] bug-fix-1 --- agent-go/register/AgentIntitilization.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/agent-go/register/AgentIntitilization.go b/agent-go/register/AgentIntitilization.go index 36ad407..d833585 100644 --- a/agent-go/register/AgentIntitilization.go +++ b/agent-go/register/AgentIntitilization.go @@ -152,7 +152,9 @@ func parseAgentServerInfo() *config.AgentServerInfo { // 约定文件地址为 /etc/environment.d/octopus-agent.conf // 目前使用 var agentServerInfo *config.AgentServerInfo - yamlFile, err := ioutil.ReadFile("C:\\Users\\wdd\\IdeaProjects\\ProjectOctopus\\agent-go\\server-env.yaml") + //yamlFile, err := ioutil.ReadFile("C:\\Users\\wdd\\IdeaProjects\\ProjectOctopus\\agent-go\\server-env.yaml") + yamlFile, err := ioutil.ReadFile("server-env.yaml") + if err != nil { panic(fmt.Errorf("failed to read YAML file: %v", err)) } From 124143c6c6b847aad4b050e78325ec85b41ef717 Mon Sep 17 00:00:00 2001 From: zeaslity Date: Thu, 30 Mar 2023 14:26:11 +0800 Subject: [PATCH 02/45] =?UTF-8?q?[agent-go]=20=E8=B0=83=E6=95=B4=E4=BB=A3?= =?UTF-8?q?=E7=A0=81=E7=BB=93=E6=9E=84?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- agent-go/IRabbitSendWriter.go | 11 - agent-go/config/OctopusMessage.go | 50 ---- agent-go/executor/CommandExecutor.go | 32 +-- agent-go/g/Nacos.go | 1 - agent-go/g/global.go | 23 +- agent-go/{g => logger}/logger.go | 4 +- agent-go/main.go | 7 +- agent-go/rabbitmq/MessageReaderWriter.go | 63 ------ agent-go/rabbitmq/OMsgConnector.go | 72 ++++++ agent-go/rabbitmq/OctopusMessage.go | 113 +++++++++ agent-go/rabbitmq/OctopusMsgHandler.go | 106 --------- agent-go/rabbitmq/RabbitMQConnector.go | 154 ------------- agent-go/rabbitmq/RabbitMsgQueue.go | 214 ++++++++++++++++++ agent-go/register/AgentInitialization.go | 186 +++++++++++++++ agent-go/register/AgentIntitilization.go | 175 -------------- .../{config => register}/AgentServerInfo.go | 2 +- .../NacosInitalization.go} | 3 +- 17 files changed, 622 insertions(+), 594 deletions(-) delete mode 100644 agent-go/IRabbitSendWriter.go delete mode 100644 agent-go/config/OctopusMessage.go delete mode 100644 agent-go/g/Nacos.go rename agent-go/{g => logger}/logger.go (97%) delete mode 100644 agent-go/rabbitmq/MessageReaderWriter.go create mode 100644 agent-go/rabbitmq/OMsgConnector.go create mode 100644 agent-go/rabbitmq/OctopusMessage.go delete mode 100644 agent-go/rabbitmq/OctopusMsgHandler.go delete mode 100644 agent-go/rabbitmq/RabbitMQConnector.go create mode 100644 agent-go/rabbitmq/RabbitMsgQueue.go create mode 100644 agent-go/register/AgentInitialization.go delete mode 100644 agent-go/register/AgentIntitilization.go rename agent-go/{config => register}/AgentServerInfo.go (98%) rename agent-go/{g/NacosConfig.go => register/NacosInitalization.go} (99%) diff --git a/agent-go/IRabbitSendWriter.go b/agent-go/IRabbitSendWriter.go deleted file mode 100644 index 11186ab..0000000 --- a/agent-go/IRabbitSendWriter.go +++ /dev/null @@ -1,11 +0,0 @@ -package main - -/*type RabbitSendWriter interface { - - Send(conn *RabbitMQConn, connProp *ConnectProperty, message []byte) - - Read(conn *RabbitMQConn, connProp *ConnectProperty, autoAck bool) <-chan amqp.Delivery - -} - -*/ diff --git a/agent-go/config/OctopusMessage.go b/agent-go/config/OctopusMessage.go deleted file mode 100644 index cef83cd..0000000 --- a/agent-go/config/OctopusMessage.go +++ /dev/null @@ -1,50 +0,0 @@ -package config - -import ( - "agent-go/utils" - "encoding/json" - "fmt" - "time" -) - -type OctopusMessage struct { - UUID string `json:"uuid"` - InitTime time.Time `json:"init_time" format:"2023-03-21 16:38:30"` - Type string `json:"type"` - Content interface{} `json:"content"` - Result interface{} `json:"result"` - ACTime time.Time `json:"ac_time" format:"2023-03-21 16:38:30"` -} - -type ExecutionMessage struct { - NeedResultReplay bool `json:"needResultReplay"` - DurationTask bool `json:"durationTask,default:false"` - Type string `json:"type"` - SingleLineCommand []string `json:"singleLineCommand"` - MultiLineCommand [][]string `json:"multiLineCommand"` - PipeLineCommand [][]string `json:"pipeLineCommand"` - ResultKey string `json:"resultKey"` -} - -// BuildOctopusMsg 生成OctopusMessage -func (m *OctopusMessage) BuildOctopusMsg(omType string, content interface{}) *OctopusMessage { - - // 当前时间 - curTimeString := utils.CurTimeString() - - // must write to string format, otherwise it's very hard to deserialize - - bytes, err := json.Marshal(content) - if err != nil { - fmt.Sprintf("OctopusMessage Build Error ! %v", err) - } - - return &OctopusMessage{ - UUID: curTimeString, - InitTime: time.Now(), - Type: omType, - Content: string(bytes), - Result: nil, - ACTime: time.Time{}, - } -} diff --git a/agent-go/executor/CommandExecutor.go b/agent-go/executor/CommandExecutor.go index 076ffe6..a972fee 100644 --- a/agent-go/executor/CommandExecutor.go +++ b/agent-go/executor/CommandExecutor.go @@ -1,31 +1,43 @@ package executor import ( - "agent-go/config" - "agent-go/g" + logger2 "agent-go/logger" "bufio" "bytes" "fmt" "os/exec" - "time" ) -var log = g.G.LOG +type ExecutionMessage struct { + NeedResultReplay bool `json:"needResultReplay"` + DurationTask bool `json:"durationTask,default:false"` + Type string `json:"type"` + SingleLineCommand []string `json:"singleLineCommand"` + MultiLineCommand [][]string `json:"multiLineCommand"` + PipeLineCommand [][]string `json:"pipeLineCommand"` + ResultKey string `json:"resultKey"` +} -func Execute(om *config.OctopusMessage, em *config.ExecutionMessage) ([]string, error) { +var log = logger2.Log + +func Execute(em *ExecutionMessage) ([]string, error) { var resultLog []string var err error + var realCommand [][]string if em.PipeLineCommand != nil && len(em.PipeLineCommand) != 0 { // 管道命令 resultLog, err = PipeLineCommandExecutor(em.PipeLineCommand) + realCommand = em.PipeLineCommand } else if em.MultiLineCommand != nil && len(em.MultiLineCommand) != 0 { // 多行命令 resultLog, err = MultiLineCommandExecutor(em.MultiLineCommand) + realCommand = em.MultiLineCommand } else { // 单行命令 resultLog, err = SingleLineCommandExecutor(em.SingleLineCommand) + realCommand = [][]string{em.SingleLineCommand} } // 归一化错误和日志 @@ -33,15 +45,9 @@ func Execute(om *config.OctopusMessage, em *config.ExecutionMessage) ([]string, resultLog = append(resultLog, fmt.Sprintf("Error: %s", err.Error())) } - // 处理执行日志 - // 是否需要返回处理日志,现在默认返回 - if em.NeedResultReplay { - // 需要返回处理结果 - om.ACTime = time.Now() - om.Result = resultLog - } + commandResult := fmt.Sprintf("Excution Comand are=> %v, Executor Result: %v", realCommand, resultLog) - log.Info(fmt.Sprintf("Executor Result: %s", resultLog)) + log.Info(commandResult) return resultLog, err } diff --git a/agent-go/g/Nacos.go b/agent-go/g/Nacos.go deleted file mode 100644 index c43b680..0000000 --- a/agent-go/g/Nacos.go +++ /dev/null @@ -1 +0,0 @@ -package g diff --git a/agent-go/g/global.go b/agent-go/g/global.go index cdd3915..4cf4360 100644 --- a/agent-go/g/global.go +++ b/agent-go/g/global.go @@ -1,16 +1,15 @@ package g import ( - "agent-go/config" + logger2 "agent-go/logger" "github.com/panjf2000/ants/v2" "github.com/spf13/viper" ) type Global struct { - LOG *Logger - NacosConfig *viper.Viper - AgentServerInfo *config.AgentServerInfo - P *ants.Pool + AgentHasRegister bool + NacosConfig *viper.Viper + P *ants.Pool } const ( @@ -21,21 +20,17 @@ const ( InitOmType = "INIT" ) -var logger, _ = NewLogger() - -var pool, _ = ants.NewPool(100, ants.WithNonblocking(true), ants.WithLogger(logger)) +var pool, _ = ants.NewPool(100, ants.WithNonblocking(true), ants.WithLogger(logger2.Log)) var G = NewGlobal( - logger, pool, ) // NewGlobal NewGlobal构造函数返回一个新的Global实例,其中包含指定的Logger。 -func NewGlobal(logger *Logger, pool *ants.Pool) *Global { +func NewGlobal(pool *ants.Pool) *Global { return &Global{ - LOG: logger, - NacosConfig: nil, - AgentServerInfo: nil, - P: pool, + AgentHasRegister: false, + NacosConfig: nil, + P: pool, } } diff --git a/agent-go/g/logger.go b/agent-go/logger/logger.go similarity index 97% rename from agent-go/g/logger.go rename to agent-go/logger/logger.go index b099413..11177a8 100644 --- a/agent-go/g/logger.go +++ b/agent-go/logger/logger.go @@ -1,4 +1,4 @@ -package g +package logger import ( "fmt" @@ -11,6 +11,8 @@ type Logger struct { *zap.Logger } +var Log, _ = NewLogger() + // NewLogger creates a new Logger instance. func NewLogger() (*Logger, error) { config := zap.Config{ diff --git a/agent-go/main.go b/agent-go/main.go index 4b61e8b..8ed778d 100644 --- a/agent-go/main.go +++ b/agent-go/main.go @@ -2,12 +2,13 @@ package main import ( "agent-go/g" + logger2 "agent-go/logger" "agent-go/register" "flag" "fmt" ) -var log = g.G.LOG +var log = logger2.Log func main() { @@ -20,9 +21,9 @@ func main() { println(filename) // 初始化Nacos的连接配置 - g.G.NacosConfig = g.InitNacos(filename) + g.G.NacosConfig = register.InitNacos(filename) // 执行初始化之策工作 - g.G.AgentServerInfo = register.INIT() + register.AgentServerInfoCache = register.INIT() } diff --git a/agent-go/rabbitmq/MessageReaderWriter.go b/agent-go/rabbitmq/MessageReaderWriter.go deleted file mode 100644 index 87e1c63..0000000 --- a/agent-go/rabbitmq/MessageReaderWriter.go +++ /dev/null @@ -1,63 +0,0 @@ -package rabbitmq - -import ( - "fmt" - "github.com/nacos-group/nacos-sdk-go/v2/common/logger" - "github.com/streadway/amqp" -) - -// RabbitMQConn is a struct that holds the connection and channel objects -type RabbitMQConn struct { - Connection *amqp.Connection - Channel *amqp.Channel -} - -type ConnectProperty struct { - ExchangeName string - QueueName string - ExchangeType string - TopicKey string -} - -// Send 向RabbitMQ中发送消息 -func Send(conn *RabbitMQConn, connProp *ConnectProperty, message []byte) { - // 往哪里发 - channel := conn.Channel - - // 发送 - err := channel.Publish( - connProp.ExchangeName, - connProp.TopicKey, - false, - false, - amqp.Publishing{ - ContentType: "text/plain", - Body: message, - }, - ) - if err != nil { - logger.Error(fmt.Sprintf("Failed to publish a message: %v", err)) - } -} - -func Read(conn *RabbitMQConn, connProp *ConnectProperty, autoAck bool) <-chan amqp.Delivery { - - // 拿到特定的Channel - channel := conn.Channel - - // 开始读取队列中的全部消息 - msgs, err := channel.Consume( - connProp.QueueName, // 队列名称 - "", // 消费者名称 - autoAck, // auto-ack - false, // exclusive - false, // no-local - false, // no-wait - nil, // arguments - ) - if err != nil { - log.Error(fmt.Sprintf("Failed to register a consumer: %v", err)) - } - - return msgs -} diff --git a/agent-go/rabbitmq/OMsgConnector.go b/agent-go/rabbitmq/OMsgConnector.go new file mode 100644 index 0000000..ec016ff --- /dev/null +++ b/agent-go/rabbitmq/OMsgConnector.go @@ -0,0 +1,72 @@ +package rabbitmq + +import ( + "agent-go/g" + "encoding/json" + "fmt" +) + +var OctopusToServerQueue = &RabbitQueue{} + +func BuildOMsgRuntimeConnectorQueue(agentTopicName string) { + + // 建立 业务消息 接收队列 + // agentTopicName为名称的队列 + nacosConfig := g.G.NacosConfig + + octopusExchangeName := nacosConfig.GetString("octopus.message.octopus_exchange") + + octopusConnectProp := &ConnectProperty{ + ExchangeName: octopusExchangeName, + QueueName: agentTopicName, + ExchangeType: g.QueueTopic, + TopicKey: agentTopicName + "*", + } + + octopusMsgQueue := &RabbitQueue{ + RabbitProp: octopusConnectProp, + } + octopusMsgQueue.Connect() + + deliveries := octopusMsgQueue.Read(true) + + // 死循环,处理Octopus Message + P.Submit( + func() { + for delivery := range deliveries { + + var om *OctopusMessage + err := json.Unmarshal(delivery.Body, &om) + if err != nil { + log.Error(fmt.Sprintf("octopus message convert to json is wrong! msg is => %s", delivery.Body)) + // 保存到某处 + continue + } + + // 策略模式 处理消息 + om.Handle() + } + }) + + // 建立 业务消息 返回队列 + // 统一为 OctopusToServer + + octopusToServerQueueName := nacosConfig.GetString("octopus.message.octopus_to_server") + + octopusToServerProp := &ConnectProperty{ + ExchangeName: octopusExchangeName, + QueueName: octopusToServerQueueName, + ExchangeType: g.QueueTopic, + TopicKey: octopusToServerQueueName, + } + + OctopusToServerQueue = &RabbitQueue{ + RabbitProp: octopusToServerProp, + } + + // 开启运行时消息返回队列 + OctopusToServerQueue.Connect() + + log.InfoF("Octopus Message Replay Queue is established ! => %v", OctopusToServerQueue) + +} diff --git a/agent-go/rabbitmq/OctopusMessage.go b/agent-go/rabbitmq/OctopusMessage.go new file mode 100644 index 0000000..3b57302 --- /dev/null +++ b/agent-go/rabbitmq/OctopusMessage.go @@ -0,0 +1,113 @@ +package rabbitmq + +import ( + "agent-go/executor" + "agent-go/g" + "agent-go/utils" + "encoding/json" + "fmt" + "time" +) + +var P = g.G.P + +type IOctopusMessage interface { + OctopusMsgHandler + OctopusMsgSender + OctopusMsgBuilder +} + +type OctopusMsgHandler interface { + Handle(octopusMessage *OctopusMessage) +} + +type OctopusMsgSender interface { + Send(rabbitQueue *RabbitQueue, msg []byte) +} + +type OctopusMsgBuilder interface { + Build(omType string, content interface{}) *OctopusMessage +} + +type OctopusMessage struct { + UUID string `json:"uuid"` + InitTime time.Time `json:"init_time" format:"2023-03-21 16:38:30"` + Type string `json:"type"` + Content interface{} `json:"content"` + Result interface{} `json:"result"` + ACTime time.Time `json:"ac_time" format:"2023-03-21 16:38:30"` +} + +func (om *OctopusMessage) Handle() { + // 实际执行 OM handle进程 + doHandleOctopusMessage(om) +} + +func (om *OctopusMessage) Send(rabbitQueue *RabbitQueue, msg []byte) { + rabbitQueue.Send(msg) +} + +func (om *OctopusMessage) Build(omType string, content interface{}) *OctopusMessage { + + // 当前时间 + curTimeString := utils.CurTimeString() + + // must write to string format, otherwise it's very hard to deserialize + bytes, err := json.Marshal(content) + if err != nil { + fmt.Sprintf("OctopusMessage Build Error ! %v", err) + } + + return &OctopusMessage{ + UUID: curTimeString, + InitTime: time.Now(), + Type: omType, + Content: string(bytes), + Result: nil, + ACTime: time.Time{}, + } +} + +func doHandleOctopusMessage(octopusMessage *OctopusMessage) { + + switch octopusMessage.Type { + case g.InitOmType: + go func() {}() + case g.ExecOmType: + P.Submit(func() { + executorOMHandler(octopusMessage) + }) + case g.StatusOmType: + P.Submit(func() { + statusOMHandler(octopusMessage) + }) + default: + P.Submit(func() { + blackHoleOMHandler(octopusMessage) + }) + } + +} + +func executorOMHandler(octopusMessage *OctopusMessage) { + + executionMsgString := octopusMessage.Content.(string) + + var executionMessage *executor.ExecutionMessage + err := json.Unmarshal([]byte(executionMsgString), &executionMessage) + if err != nil { + log.Error(fmt.Sprintf("execution message convert to json is wrong! msg is => %s", executionMsgString)) + return + } + + // 交给后端的实际处理器处理, 再次策略 + executor.Execute(executionMessage) +} + +func statusOMHandler(octopusMessage *OctopusMessage) { + +} + +func blackHoleOMHandler(octopusMessage *OctopusMessage) { + log.Error(fmt.Sprintf("octopusMessage type wrong! msg is => %v", octopusMessage)) +} diff --git a/agent-go/rabbitmq/OctopusMsgHandler.go b/agent-go/rabbitmq/OctopusMsgHandler.go deleted file mode 100644 index cded014..0000000 --- a/agent-go/rabbitmq/OctopusMsgHandler.go +++ /dev/null @@ -1,106 +0,0 @@ -package rabbitmq - -import ( - "agent-go/config" - "agent-go/executor" - "agent-go/g" - "encoding/json" - "fmt" -) - -var P = g.G.P - -func HandleOMsg(initOMsgFromServer *config.OctopusMessage) { - - agentTopicName := initOMsgFromServer.Result.(string) - - OctopusExchange := g.G.NacosConfig.GetString("octopus.message.octopus_exchange") - - octopusConnectProp := &ConnectProperty{ - ExchangeName: OctopusExchange, - QueueName: agentTopicName, - ExchangeType: g.QueueTopic, - TopicKey: agentTopicName + "*", - } - - octopusConn, err := NewRabbitMQConn(octopusConnectProp) - if err != nil { - log.Error(fmt.Sprintf("Octopus Message Queue create Error ! => %v", octopusConnectProp)) - panic(err) - } - - // 开始接收消息 - channel := octopusConn.Channel - deliveries, err := channel.Consume( - agentTopicName, - agentTopicName, - true, - false, - false, - false, - nil, - ) - if err != nil { - return - } - - // 死循环,处理Ocotpus Message - for delivery := range deliveries { - - var om *config.OctopusMessage - err := json.Unmarshal(delivery.Body, &om) - if err != nil { - log.Error(fmt.Sprintf("octopus message convert to json is wrong! msg is => %s", delivery.Body)) - // 保存到某处 - continue - } - - // 策略模式 处理消息 - doHandleOctopusMessage(om) - } - -} - -func doHandleOctopusMessage(octopusMessage *config.OctopusMessage) { - - switch octopusMessage.Type { - case g.InitOmType: - go func() {}() - case g.ExecOmType: - P.Submit(func() { - executorOMHandler(octopusMessage) - }) - case g.StatusOmType: - P.Submit(func() { - statusOMHandler(octopusMessage) - }) - default: - P.Submit(func() { - blackHoleOMHandler(octopusMessage) - }) - } - -} - -func executorOMHandler(octopusMessage *config.OctopusMessage) { - - executionMsgString := octopusMessage.Content.(string) - - var executionMessage *config.ExecutionMessage - err := json.Unmarshal([]byte(executionMsgString), &executionMessage) - if err != nil { - log.Error(fmt.Sprintf("execution message convert to json is wrong! msg is => %s", executionMsgString)) - return - } - - // 交给后端的实际处理器处理, 再次策略 - executor.Execute(octopusMessage, executionMessage) -} - -func statusOMHandler(octopusMessage *config.OctopusMessage) { - -} - -func blackHoleOMHandler(octopusMessage *config.OctopusMessage) { - log.Error(fmt.Sprintf("octopusMessage type wrong! msg is => %v", octopusMessage)) -} diff --git a/agent-go/rabbitmq/RabbitMQConnector.go b/agent-go/rabbitmq/RabbitMQConnector.go deleted file mode 100644 index f079934..0000000 --- a/agent-go/rabbitmq/RabbitMQConnector.go +++ /dev/null @@ -1,154 +0,0 @@ -package rabbitmq - -import ( - "agent-go/g" - "fmt" - "github.com/streadway/amqp" - "strings" - "sync" -) - -var log = g.G.LOG - -// 定义全局唯一的 Singleton 实例 -var instance *amqp.Connection - -// 用 sync.Once 变量确保初始化函数只会被调用一次 -var once sync.Once - -// 初始化 Singleton 实例的函数 -func createInstance() { - // 在这里进行 Singleton 的初始化操作 - - // 获取RabbitMQ的连接地址 - rabbitMQEndpointFromG := parseRabbitMQEndpointFromG() - - // 创建全局唯一连接 RabbitMQ连接 - connection, err := amqp.Dial(rabbitMQEndpointFromG) - if err != nil { - log.Error(fmt.Sprintf("failed to connect to RabbitMQ: %v", err)) - } - - instance = connection -} - -// GetInstance 获取全局唯一的 Singleton 实例的函数 -func GetInstance() *amqp.Connection { - // 使用 sync.Once 确保 createInstance 只会被调用一次 - once.Do(createInstance) - return instance -} - -// NewRabbitMQConn creates a new RabbitMQ connection object -func NewRabbitMQConn(property *ConnectProperty) (*RabbitMQConn, error) { - - // 获取RabbitMQ的连接 - conn := GetInstance() - // 获取RabbitMQ的连接地址 - //rabbitMQEndpointFromG := parseRabbitMQEndpointFromG() - //conn, err := amqp.Dial(rabbitMQEndpointFromG) - //if err != nil { - // log.Error(fmt.Sprintf("failed to connect to RabbitMQ: %v", err)) - //} - - ch, err := conn.Channel() - if err != nil { - return nil, fmt.Errorf("failed to create RabbitMQ channel: %w", err) - } - - if err = ch.ExchangeDeclare( - property.ExchangeName, // name of the exchange - property.ExchangeType, // type of the exchange - false, // durable - false, // delete when complete - false, // internal - false, // noWait - nil, // arguments - ); err != nil { - return nil, fmt.Errorf("failed to declare RabbitMQ exchange: %w", err) - } - - _, err = ch.QueueDeclare( - property.QueueName, // name of the queue - false, // durable - false, // delete when unused - false, // exclusive - false, // noWait - nil, // arguments - ) - if err != nil { - return nil, fmt.Errorf("failed to declare RabbitMQ queue: %w", err) - } - - if err = ch.QueueBind( - property.QueueName, // name of the queue - property.TopicKey, // routing key - all topics - property.ExchangeName, // name of the exchange - false, // noWait - nil, // arguments - ); err != nil { - return nil, fmt.Errorf("failed to bind RabbitMQ queue: %w", err) - } - - return &RabbitMQConn{Connection: conn, Channel: ch}, nil -} - -// parseRabbitMQEndpoint 根据全局变量NacosConfig解析出RabbitMQ的连接地址 -func parseRabbitMQEndpointFromG() string { - - nacosConfig := g.G.NacosConfig - - var res strings.Builder - - host := nacosConfig.GetString("spring.rabbitmq.host") - port := nacosConfig.GetString("spring.rabbitmq.port") - username := nacosConfig.GetString("spring.rabbitmq.username") - password := nacosConfig.GetString("spring.rabbitmq.password") - virtualHost := nacosConfig.GetString("spring.rabbitmq.virtual-host") - - // amqp://{username}:{password}@{hostname}:{port}/{virtual_host} - res.WriteString("amqp://") - res.WriteString(username) - res.WriteString(":") - res.WriteString(password) - res.WriteString("@") - res.WriteString(host) - res.WriteString(":") - res.WriteString(port) - res.WriteString("/") - res.WriteString(virtualHost) - - s := res.String() - log.Debug(fmt.Sprintf("generate RabbitMQ endpoint is %s", s)) - return s -} - -func CloseChannel(conn *RabbitMQConn) error { - var err error - - if conn.Channel != nil { - if err = conn.Channel.Close(); err != nil { - log.Error(fmt.Sprintf("Failed to close RabbitMQ channel: %v", err)) - } - } - return err -} - -// CloseRabbitMQAll closes the RabbitMQ connection and channel -func (r *RabbitMQConn) CloseRabbitMQAll() error { - var err error - - if r.Channel != nil { - if err = r.Channel.Close(); err != nil { - log.Error(fmt.Sprintf("Failed to close RabbitMQ channel: %v", err)) - } - } - - if r.Connection != nil { - if err = r.Connection.Close(); err != nil { - log.Error(fmt.Sprintf("Failed to close RabbitMQ connection: %v", err)) - } - } - - return err -} diff --git a/agent-go/rabbitmq/RabbitMsgQueue.go b/agent-go/rabbitmq/RabbitMsgQueue.go new file mode 100644 index 0000000..d89111e --- /dev/null +++ b/agent-go/rabbitmq/RabbitMsgQueue.go @@ -0,0 +1,214 @@ +package rabbitmq + +import ( + "agent-go/g" + logger2 "agent-go/logger" + "fmt" + "github.com/streadway/amqp" + "strings" + "sync" +) + +type RabbitMQ interface { + RabbitSendWriter + + RabbitConnectCloser +} + +type RabbitSendWriter interface { + Send(message []byte) + + Read(autoAck bool) <-chan amqp.Delivery +} + +type RabbitConnectCloser interface { + Connect() + + Close() error +} + +type RabbitQueue struct { + RabbitConn *RabbitMQConn + RabbitProp *ConnectProperty +} + +// RabbitMQConn is a struct that holds the connection and channel objects +type RabbitMQConn struct { + Connection *amqp.Connection + Channel *amqp.Channel +} + +type ConnectProperty struct { + ExchangeName string + QueueName string + ExchangeType string + TopicKey string +} + +var log = logger2.Log + +// 定义全局唯一的 Singleton 实例 +var instance *amqp.Connection + +// 用 sync.Once 变量确保初始化函数只会被调用一次 +var once sync.Once + +// 初始化 Singleton 实例的函数 +func createInstance() { + // 在这里进行 Singleton 的初始化操作 + + // 获取RabbitMQ的连接地址 + rabbitMQEndpointFromG := parseRabbitMQEndpointFromG() + + // 创建全局唯一连接 RabbitMQ连接 + connection, err := amqp.Dial(rabbitMQEndpointFromG) + if err != nil { + log.Error(fmt.Sprintf("failed to connect to RabbitMQ: %v", err)) + } + + instance = connection +} + +// GetInstance 获取全局唯一的 Singleton 实例的函数 +func GetInstance() *amqp.Connection { + // 使用 sync.Once 确保 createInstance 只会被调用一次 + once.Do(createInstance) + return instance +} + +// Connect creates a new RabbitMQ connection object +func (r *RabbitQueue) Connect() { + + // 获取RabbitMQ的连接 + conn := GetInstance() + + ch, err := conn.Channel() + if err != nil { + log.Error(fmt.Sprintf("failed to create RabbitMQ channel: %w", err)) + } + + if err = ch.ExchangeDeclare( + r.RabbitProp.ExchangeName, // name of the exchange + r.RabbitProp.ExchangeType, // type of the exchange + false, // durable + false, // delete when complete + false, // internal + false, // noWait + nil, // arguments + ); err != nil { + log.Error(fmt.Sprintf("failed to declare exchange !: %w", err)) + } + + _, err = ch.QueueDeclare( + r.RabbitProp.QueueName, // name of the queue + false, // durable + false, // delete when unused + false, // exclusive + false, // noWait + nil, // arguments + ) + if err != nil { + log.Error(fmt.Sprintf("failed to declare RabbitMQ queue: %w", err)) + } + + if err = ch.QueueBind( + r.RabbitProp.QueueName, // name of the queue + r.RabbitProp.TopicKey, // routing key - all topics + r.RabbitProp.ExchangeName, // name of the exchange + false, // noWait + nil, // arguments + ); err != nil { + log.Error(fmt.Sprintf("failed to bind RabbitMQ queue: %w", err)) + } + + r.RabbitConn = &RabbitMQConn{ + Connection: conn, + Channel: ch, + } +} + +func (r *RabbitQueue) Close() error { + var err error + + if r.RabbitConn.Channel != nil { + if err = r.RabbitConn.Channel.Close(); err != nil { + log.Error(fmt.Sprintf("Failed to close RabbitMQ channel: %v", err)) + } + } + return err +} + +// Send 向RabbitMQ中发送消息 +func (r *RabbitQueue) Send(message []byte) { + // 往哪里发 + channel := r.RabbitConn.Channel + + // 发送 + err := channel.Publish( + r.RabbitProp.ExchangeName, + r.RabbitProp.TopicKey, + false, + false, + amqp.Publishing{ + ContentType: "text/plain", + Body: message, + }, + ) + if err != nil { + log.Error(fmt.Sprintf("Failed to publish a message: %v", err)) + } +} + +func (r *RabbitQueue) Read(autoAck bool) <-chan amqp.Delivery { + + // 拿到特定的Channel + channel := r.RabbitConn.Channel + + // 开始读取队列中的全部消息 + msgs, err := channel.Consume( + r.RabbitProp.QueueName, // 队列名称 + "", // 消费者名称 + autoAck, // auto-ack + false, // exclusive + false, // no-local + false, // no-wait + nil, // arguments + ) + if err != nil { + log.Error(fmt.Sprintf("Failed to register a consumer: %v", err)) + } + + return msgs +} + +// parseRabbitMQEndpoint 根据全局变量NacosConfig解析出RabbitMQ的连接地址 +func parseRabbitMQEndpointFromG() string { + + nacosConfig := g.G.NacosConfig + + var res strings.Builder + + host := nacosConfig.GetString("spring.rabbitmq.host") + port := nacosConfig.GetString("spring.rabbitmq.port") + username := nacosConfig.GetString("spring.rabbitmq.username") + password := nacosConfig.GetString("spring.rabbitmq.password") + virtualHost := nacosConfig.GetString("spring.rabbitmq.virtual-host") + + // amqp://{username}:{password}@{hostname}:{port}/{virtual_host} + res.WriteString("amqp://") + res.WriteString(username) + res.WriteString(":") + res.WriteString(password) + res.WriteString("@") + res.WriteString(host) + res.WriteString(":") + res.WriteString(port) + res.WriteString("/") + res.WriteString(virtualHost) + + s := res.String() + + log.Debug(fmt.Sprintf("generate RabbitMQ endpoint is %s", s)) + + return s +} diff --git a/agent-go/register/AgentInitialization.go b/agent-go/register/AgentInitialization.go new file mode 100644 index 0000000..fac6152 --- /dev/null +++ b/agent-go/register/AgentInitialization.go @@ -0,0 +1,186 @@ +package register + +import ( + "agent-go/g" + logger2 "agent-go/logger" + "agent-go/rabbitmq" + "encoding/json" + "fmt" + "gopkg.in/yaml.v3" + "io/ioutil" + "time" +) + +var omType = g.InitOmType +var log = logger2.Log +var P = g.G.P + +var AgentServerInfoCache = &AgentServerInfo{} + +func INIT() *AgentServerInfo { + + // 获取系统的环境变量 + agentServerInfo := parseAgentServerInfo() + + nacosConfig := g.G.NacosConfig + + initToServerProp := &rabbitmq.ConnectProperty{ + ExchangeName: nacosConfig.GetString("octopus.message.init_exchange"), + QueueName: nacosConfig.GetString("octopus.message.init_to_server"), + ExchangeType: g.QueueDirect, + TopicKey: nacosConfig.GetString("octopus.message.init_to_server_key"), + } + + initFromServerProp := &rabbitmq.ConnectProperty{ + ExchangeName: nacosConfig.GetString("octopus.message.init_exchange"), + QueueName: nacosConfig.GetString("octopus.message.init_from_server"), + ExchangeType: g.QueueDirect, + TopicKey: nacosConfig.GetString("octopus.message.init_from_server_key"), + } + + // 建立RabbitMQ的连接 + initToServerQueue := &rabbitmq.RabbitQueue{ + RabbitProp: initToServerProp, + } + defer initToServerQueue.Close() + + // 建立连接 + initToServerQueue.Connect() + + // 组装OctopusMessage + var octopusMsg *rabbitmq.OctopusMessage + octopusMsg = octopusMsg.Build( + omType, + agentServerInfo, + ) + msgBytes, err := json.Marshal(octopusMsg) + if err != nil { + log.Error(fmt.Sprintf("octopus message convert to json is wrong! msg is => %v", octopusMsg)) + } + log.Debug(fmt.Sprintf("Prepare to send init message to server! ==> %s", string(msgBytes))) + + // 发送OM至MQ中 + P.Submit( + func() { + for g.G.AgentHasRegister == false { + + //如果agent存活 而Server不存活 那么需要持续不断的向Server中发送消息 + initToServerQueue.Send( + msgBytes, + ) + + // 休眠 + time.Sleep(10 * time.Minute) + + } + + }) + + // 监听初始化连接中的信息 + initFromServerQueue := &rabbitmq.RabbitQueue{ + RabbitProp: initFromServerProp, + } + defer initFromServerQueue.Close() + + // 建立连接 + initFromServerQueue.Connect() + + // 建立运行时RabbitMQ连接 + handleInitMsgFromServer(initFromServerQueue, initToServerQueue, agentServerInfo) + + return agentServerInfo +} + +// handleInitMsgFromServer 处理从Server接收的 注册信息 +func handleInitMsgFromServer(initFromServerQueue *rabbitmq.RabbitQueue, initToServerQueue *rabbitmq.RabbitQueue, agentServerInfo *AgentServerInfo) { + + initOctopusMessageDeliveries := initFromServerQueue.Read(false) + + forever := make(chan bool) + + // use the ant goroutine pool + P.Submit( + func() { + + // 同步很多抢占注册的情况 + for delivery := range initOctopusMessageDeliveries { + + log.Debug(fmt.Sprintf("message received from server is %s", string(delivery.Body))) + + var initOctopusMsg *rabbitmq.OctopusMessage + err := json.Unmarshal(delivery.Body, &initOctopusMsg) + if err != nil { + log.Error(fmt.Sprintf("parse init message from server wroong, message is => %s ", + string(delivery.Body))) + } + + var serverInfo AgentServerInfo + + s, _ := initOctopusMsg.Content.(string) + cc := json.Unmarshal([]byte(s), &serverInfo) + if cc != nil { + log.Error(fmt.Sprintf("parse init message from server wroong, message is => %v ", cc)) + } + serverName := serverInfo.ServerName + + // 处理OM信息 + if initOctopusMsg != nil && initOctopusMsg.Type == g.InitOmType && serverName == agentServerInfo.ServerName { + // 是本机的注册回复信息 + + // 建立 运行时 RabbitMQ连接 + agentTopicName := initOctopusMsg.Result.(string) + rabbitmq.BuildOMsgRuntimeConnectorQueue(agentTopicName) + + // 手动确认信息 + delivery.Ack(false) + + // 手动关闭 注册队列的连接 + shutdownRegisterQueueConnection(initFromServerQueue, initToServerQueue) + + return + } + + // 不是自身的 注册回复信息 -- 拒绝 + log.Warn(fmt.Sprintf("OctopusMessage INIT from server not this agent ! => %v, ==>%s", initOctopusMsg, delivery.Body)) + delivery.Nack(false, true) + } + + }, + ) + + // wait forever + <-forever + +} + +// shutdownRegisterQueueConnection 关闭初始化连接的两个队列 +func shutdownRegisterQueueConnection(initFromServerQueue *rabbitmq.RabbitQueue, initToServerQueue *rabbitmq.RabbitQueue) { + +} + +func parseAgentServerInfo() *AgentServerInfo { + + // 约定文件地址为 /etc/environment.d/octopus-agent.conf + // 目前使用 + var agentServerInfo *AgentServerInfo + //yamlFile, err := ioutil.ReadFile("C:\\Users\\wdd\\IdeaProjects\\ProjectOctopus\\agent-go\\server-env.yaml") + yamlFile, err := ioutil.ReadFile("server-env.yaml") + + if err != nil { + panic(fmt.Errorf("failed to read YAML file: %v", err)) + } + + err = yaml.Unmarshal(yamlFile, &agentServerInfo) + if err != nil { + panic(fmt.Errorf("failed to unmarshal YAML: %v", err)) + } + + jsonFormat, err := json.Marshal(agentServerInfo) + if err != nil { + log.Error(fmt.Sprintf("agent server info convert error ! agentserverinfo is %v", agentServerInfo)) + panic(err) + } + log.Info(fmt.Sprintf("agent server info is %v", string(jsonFormat))) + + return agentServerInfo +} diff --git a/agent-go/register/AgentIntitilization.go b/agent-go/register/AgentIntitilization.go deleted file mode 100644 index d833585..0000000 --- a/agent-go/register/AgentIntitilization.go +++ /dev/null @@ -1,175 +0,0 @@ -package register - -import ( - "agent-go/config" - "agent-go/g" - "agent-go/rabbitmq" - "encoding/json" - "fmt" - "gopkg.in/yaml.v3" - "io/ioutil" -) - -var omType = g.InitOmType -var log = g.G.LOG - -func INIT() *config.AgentServerInfo { - - // 获取系统的环境变量 - agentServerInfo := parseAgentServerInfo() - - nacosConfig := g.G.NacosConfig - - initToServerProp := &rabbitmq.ConnectProperty{ - ExchangeName: nacosConfig.GetString("octopus.message.init_exchange"), - QueueName: nacosConfig.GetString("octopus.message.init_to_server"), - ExchangeType: g.QueueDirect, - TopicKey: nacosConfig.GetString("octopus.message.init_to_server_key"), - } - - initFromServerProp := &rabbitmq.ConnectProperty{ - ExchangeName: nacosConfig.GetString("octopus.message.init_exchange"), - QueueName: nacosConfig.GetString("octopus.message.init_from_server"), - ExchangeType: g.QueueDirect, - TopicKey: nacosConfig.GetString("octopus.message.init_from_server_key"), - } - - // 建立RabbitMQ的连接 - // defer 关闭初始化连接 - initToServer, err := rabbitmq.NewRabbitMQConn( - initToServerProp, - ) - if err != nil { - log.Error("init to server queue established error!") - panic(err) - } - //defer rabbitmq.CloseChannel(initToServer) - //defer rabbitmq.CloseChannel(initFromServer) - - // 组装OctopusMessage - var octopusMsg *config.OctopusMessage - octopusMsg = octopusMsg.BuildOctopusMsg( - omType, - agentServerInfo, - ) - - msgBytes, err := json.Marshal(octopusMsg) - if err != nil { - log.Error(fmt.Sprintf("octopus message convert to json is wrong! msg is => %v", octopusMsg)) - } - - log.Debug(fmt.Sprintf("Prepare to send init message to server! ==> %s", string(msgBytes))) - - // 发送OM至MQ中 - rabbitmq.Send( - initToServer, - initToServerProp, - msgBytes, - ) - - // 监听初始化连接中的信息 - initFromServer, err := rabbitmq.NewRabbitMQConn( - initFromServerProp, - ) - if err != nil { - log.Error("init from server queue established error!") - panic(err) - } - - // 建立运行时RabbitMQ连接 - handleInitMsgFromServer(initFromServer, initFromServerProp, initToServer, initToServerProp, agentServerInfo) - - return agentServerInfo -} - -// handleInitMsgFromServer 处理从Server接收的注册信息 -func handleInitMsgFromServer(initFromServer *rabbitmq.RabbitMQConn, initFromServerProp *rabbitmq.ConnectProperty, initToServer *rabbitmq.RabbitMQConn, initToServerProp *rabbitmq.ConnectProperty, agentServerInfo *config.AgentServerInfo) { - - deliveries := rabbitmq.Read(initFromServer, initFromServerProp, false) - - forever := make(chan bool) - - go func() { - - // 同步很多抢占注册的情况 - for delivery := range deliveries { - - log.Debug(fmt.Sprintf("message received from server is %s", string(delivery.Body))) - - var om *config.OctopusMessage - err := json.Unmarshal(delivery.Body, &om) - if err != nil { - log.Error(fmt.Sprintf("parse init message from server wroong, message is => %s ", - string(delivery.Body))) - } - - var serverInfo config.AgentServerInfo - - s, _ := om.Content.(string) - cc := json.Unmarshal([]byte(s), &serverInfo) - if cc != nil { - log.Error(fmt.Sprintf("parse init message from server wroong, message is => %v ", cc)) - } - - serverName := serverInfo.ServerName - - // 处理OM信息 - if om != nil && om.Type == g.InitOmType && serverName == agentServerInfo.ServerName { - // 是本机的注册回复信息 - - // 建立运行时RabbitMQ连接 - // change to async - go rabbitmq.HandleOMsg(om) - - // 手动确认信息 - delivery.Ack(false) - - // 手动关闭 注册队列的连接 - shutdownRegisterQueueConnection(initFromServer, initFromServerProp, initToServer, initToServerProp) - - return - } - - // 不是自身的 注册回复信息 -- 拒绝 - log.Warn(fmt.Sprintf("OctopusMessage INIT from server not this agent ! => %v, ==>%s", om, delivery.Body)) - delivery.Nack(false, true) - } - - }() - - // wait forever - <-forever - -} - -// shutdownRegisterQueueConnection 关闭初始化连接的两个队列 -func shutdownRegisterQueueConnection(initFromServer *rabbitmq.RabbitMQConn, initFromServerProp *rabbitmq.ConnectProperty, initToServer *rabbitmq.RabbitMQConn, initToServerProp *rabbitmq.ConnectProperty) { - -} - -func parseAgentServerInfo() *config.AgentServerInfo { - - // 约定文件地址为 /etc/environment.d/octopus-agent.conf - // 目前使用 - var agentServerInfo *config.AgentServerInfo - //yamlFile, err := ioutil.ReadFile("C:\\Users\\wdd\\IdeaProjects\\ProjectOctopus\\agent-go\\server-env.yaml") - yamlFile, err := ioutil.ReadFile("server-env.yaml") - - if err != nil { - panic(fmt.Errorf("failed to read YAML file: %v", err)) - } - - err = yaml.Unmarshal(yamlFile, &agentServerInfo) - if err != nil { - panic(fmt.Errorf("failed to unmarshal YAML: %v", err)) - } - - jsonFormat, err := json.Marshal(agentServerInfo) - if err != nil { - log.Error(fmt.Sprintf("agent server info convert error ! agentserverinfo is %v", agentServerInfo)) - panic(err) - } - log.Info(fmt.Sprintf("agent server info is %v", string(jsonFormat))) - - return agentServerInfo -} diff --git a/agent-go/config/AgentServerInfo.go b/agent-go/register/AgentServerInfo.go similarity index 98% rename from agent-go/config/AgentServerInfo.go rename to agent-go/register/AgentServerInfo.go index ad20396..3026134 100644 --- a/agent-go/config/AgentServerInfo.go +++ b/agent-go/register/AgentServerInfo.go @@ -1,4 +1,4 @@ -package config +package register type AgentServerInfo struct { ServerName string `json:"serverName" yaml:"serverName"` diff --git a/agent-go/g/NacosConfig.go b/agent-go/register/NacosInitalization.go similarity index 99% rename from agent-go/g/NacosConfig.go rename to agent-go/register/NacosInitalization.go index 452cf5e..455ebf1 100644 --- a/agent-go/g/NacosConfig.go +++ b/agent-go/register/NacosInitalization.go @@ -1,4 +1,4 @@ -package g +package register import ( "bytes" @@ -13,7 +13,6 @@ import ( "strings" ) -var log = G.LOG var group = "" func InitNacos(configFileName string) *viper.Viper { From d24a338e80bf79f304037bd3a391b2958ac6a061 Mon Sep 17 00:00:00 2001 From: zeaslity Date: Thu, 30 Mar 2023 15:10:27 +0800 Subject: [PATCH 03/45] =?UTF-8?q?[agent-go]=20=E4=BC=98=E5=8C=96=E4=BB=A3?= =?UTF-8?q?=E7=A0=81=EF=BC=8C=E5=AE=8C=E6=88=90=E8=BF=9E=E9=80=9A=E6=80=A7?= =?UTF-8?q?=E6=B5=8B=E8=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- agent-go/g/global.go | 2 +- agent-go/octopus-agent-dev.yaml | 4 +- agent-go/rabbitmq/OMsgConnector.go | 45 +++++----- agent-go/register/AgentInitialization.go | 85 ++++++++---------- .../nacos_config_export_20230330143045.zip | Bin 0 -> 11759 bytes .../octopus/run-enviroment-compose.yaml | 2 - 6 files changed, 65 insertions(+), 73 deletions(-) create mode 100644 agent-go/tmp/nacos_config_export_20230330143045.zip diff --git a/agent-go/g/global.go b/agent-go/g/global.go index 4cf4360..c5f7247 100644 --- a/agent-go/g/global.go +++ b/agent-go/g/global.go @@ -20,7 +20,7 @@ const ( InitOmType = "INIT" ) -var pool, _ = ants.NewPool(100, ants.WithNonblocking(true), ants.WithLogger(logger2.Log)) +var pool, _ = ants.NewPool(100, ants.WithNonblocking(false), ants.WithLogger(logger2.Log), ants.WithMaxBlockingTasks(30), ants.WithDisablePurge(true)) var G = NewGlobal( pool, diff --git a/agent-go/octopus-agent-dev.yaml b/agent-go/octopus-agent-dev.yaml index 87c7d61..ac474f7 100644 --- a/agent-go/octopus-agent-dev.yaml +++ b/agent-go/octopus-agent-dev.yaml @@ -10,8 +10,8 @@ spring: config-retry-time: 3000 file-extension: yaml max-retry: 3 - server-addr: "150.230.198.103:21060" -# server-addr: "42.192.52.227:21060" + # server-addr: "150.230.198.103:21060" + server-addr: "42.192.52.227:21060" timeout: 5000 config-long-poll-timeout: 5000 extension-configs: diff --git a/agent-go/rabbitmq/OMsgConnector.go b/agent-go/rabbitmq/OMsgConnector.go index ec016ff..2be7108 100644 --- a/agent-go/rabbitmq/OMsgConnector.go +++ b/agent-go/rabbitmq/OMsgConnector.go @@ -28,26 +28,6 @@ func BuildOMsgRuntimeConnectorQueue(agentTopicName string) { } octopusMsgQueue.Connect() - deliveries := octopusMsgQueue.Read(true) - - // 死循环,处理Octopus Message - P.Submit( - func() { - for delivery := range deliveries { - - var om *OctopusMessage - err := json.Unmarshal(delivery.Body, &om) - if err != nil { - log.Error(fmt.Sprintf("octopus message convert to json is wrong! msg is => %s", delivery.Body)) - // 保存到某处 - continue - } - - // 策略模式 处理消息 - om.Handle() - } - }) - // 建立 业务消息 返回队列 // 统一为 OctopusToServer @@ -67,6 +47,29 @@ func BuildOMsgRuntimeConnectorQueue(agentTopicName string) { // 开启运行时消息返回队列 OctopusToServerQueue.Connect() - log.InfoF("Octopus Message Replay Queue is established ! => %v", OctopusToServerQueue) + log.InfoF("Octopus Message Business Runtime Queue is established ! => %v", OctopusToServerQueue) + + deliveries := octopusMsgQueue.Read(true) + forever := make(chan bool) + P.Submit( + func() { + // 死循环,处理Octopus Message + for delivery := range deliveries { + + var om *OctopusMessage + err := json.Unmarshal(delivery.Body, &om) + if err != nil { + log.Error(fmt.Sprintf("octopus message convert to json is wrong! msg is => %s", delivery.Body)) + // 保存到某处 + continue + } + + // 策略模式 处理消息 + om.Handle() + } + }) + + // wait forever + <-forever } diff --git a/agent-go/register/AgentInitialization.go b/agent-go/register/AgentInitialization.go index fac6152..436537a 100644 --- a/agent-go/register/AgentInitialization.go +++ b/agent-go/register/AgentInitialization.go @@ -57,18 +57,18 @@ func INIT() *AgentServerInfo { if err != nil { log.Error(fmt.Sprintf("octopus message convert to json is wrong! msg is => %v", octopusMsg)) } - log.Debug(fmt.Sprintf("Prepare to send init message to server! ==> %s", string(msgBytes))) // 发送OM至MQ中 P.Submit( func() { for g.G.AgentHasRegister == false { + log.Debug(fmt.Sprintf("Send init message to server! ==> %s", string(msgBytes))) + //如果agent存活 而Server不存活 那么需要持续不断的向Server中发送消息 initToServerQueue.Send( msgBytes, ) - // 休眠 time.Sleep(10 * time.Minute) @@ -96,66 +96,57 @@ func handleInitMsgFromServer(initFromServerQueue *rabbitmq.RabbitQueue, initToSe initOctopusMessageDeliveries := initFromServerQueue.Read(false) - forever := make(chan bool) + // 同步很多抢占注册的情况 + for delivery := range initOctopusMessageDeliveries { - // use the ant goroutine pool - P.Submit( - func() { + log.Debug(fmt.Sprintf("message received from server is %s", string(delivery.Body))) - // 同步很多抢占注册的情况 - for delivery := range initOctopusMessageDeliveries { + var initOctopusMsg *rabbitmq.OctopusMessage + err := json.Unmarshal(delivery.Body, &initOctopusMsg) + if err != nil { + log.Error(fmt.Sprintf("parse init message from server wroong, message is => %s ", + string(delivery.Body))) + } - log.Debug(fmt.Sprintf("message received from server is %s", string(delivery.Body))) + var serverInfo AgentServerInfo - var initOctopusMsg *rabbitmq.OctopusMessage - err := json.Unmarshal(delivery.Body, &initOctopusMsg) - if err != nil { - log.Error(fmt.Sprintf("parse init message from server wroong, message is => %s ", - string(delivery.Body))) - } + s, _ := initOctopusMsg.Content.(string) + cc := json.Unmarshal([]byte(s), &serverInfo) + if cc != nil { + log.Error(fmt.Sprintf("parse init message from server wroong, message is => %v ", cc)) + } + serverName := serverInfo.ServerName - var serverInfo AgentServerInfo + // 处理OM信息 + if initOctopusMsg != nil && initOctopusMsg.Type == g.InitOmType && serverName == agentServerInfo.ServerName { + // 是本机的注册回复信息 + log.InfoF("OctopusMessage INIT from server is this agent !") - s, _ := initOctopusMsg.Content.(string) - cc := json.Unmarshal([]byte(s), &serverInfo) - if cc != nil { - log.Error(fmt.Sprintf("parse init message from server wroong, message is => %v ", cc)) - } - serverName := serverInfo.ServerName + // 手动确认信息 + delivery.Ack(false) - // 处理OM信息 - if initOctopusMsg != nil && initOctopusMsg.Type == g.InitOmType && serverName == agentServerInfo.ServerName { - // 是本机的注册回复信息 + // 修改系统参数 + g.G.AgentHasRegister = true - // 建立 运行时 RabbitMQ连接 - agentTopicName := initOctopusMsg.Result.(string) - rabbitmq.BuildOMsgRuntimeConnectorQueue(agentTopicName) + // 建立 运行时 RabbitMQ连接 + agentTopicName := initOctopusMsg.Result.(string) + rabbitmq.BuildOMsgRuntimeConnectorQueue(agentTopicName) - // 手动确认信息 - delivery.Ack(false) + // 手动关闭 注册队列的连接 + shutdownRegisterQueueConnection(initFromServerQueue, initToServerQueue) + return + } - // 手动关闭 注册队列的连接 - shutdownRegisterQueueConnection(initFromServerQueue, initToServerQueue) - - return - } - - // 不是自身的 注册回复信息 -- 拒绝 - log.Warn(fmt.Sprintf("OctopusMessage INIT from server not this agent ! => %v, ==>%s", initOctopusMsg, delivery.Body)) - delivery.Nack(false, true) - } - - }, - ) - - // wait forever - <-forever + // 不是自身的 注册回复信息 -- 拒绝 + log.Warn(fmt.Sprintf("OctopusMessage INIT from server not this agent ! => %v, ==>%s", initOctopusMsg, delivery.Body)) + delivery.Nack(false, true) + } } // shutdownRegisterQueueConnection 关闭初始化连接的两个队列 func shutdownRegisterQueueConnection(initFromServerQueue *rabbitmq.RabbitQueue, initToServerQueue *rabbitmq.RabbitQueue) { - + log.InfoF("Shutdown register queue connection !") } func parseAgentServerInfo() *AgentServerInfo { diff --git a/agent-go/tmp/nacos_config_export_20230330143045.zip b/agent-go/tmp/nacos_config_export_20230330143045.zip new file mode 100644 index 0000000000000000000000000000000000000000..2c33e55b25437245eaac9850f1f83aa9d5608945 GIT binary patch literal 11759 zcmeI2WmH^Qv-feg;1V=6P6u~)2{a){a3>Jl-QC@T1Shx!cMtAPaCaxz3z?Z`GV`vP z`+U3~?(WrVojz-I^{(o(Yu9i8OHLXR3KI+t4i0R}#$5^QmqGM&ZEB&XWBS@c&(^}y z&W2v+gMqm%{m*+qCml0WnXopLb`m82Be&q@RtWKq!P|*^oKjPhUUVoZwx0QhV2bnO zyqh0+TA8)-3ZA&G0|=<{Qeb6EiL-c{@k$kqB+bTh9HV8pVEl%}@tv?{y&H29Dq9Fn zws4PF~5?`q|E3*u9u1LT*slh_YMVI%+=6waqe?C9`%1laq#p z+w*>K2z;9H!qZ#sAAQ34k576QW@Z-Vzc^E@*k+C?j@EQbD|L-d7nWpD$)*AiNFezf zDFSh*bYyy99+GzWxxsmQC`cHEM6}hb7mBnsIxA9`%o|Op`vt#VX%pom1S>P|(pO=q zs8yeXk`kv`;GYwL?|CWE-FuSsR9hOmZDgva?FoTcbRkRD^*&h+>$@#CYVY2Qkm;sA_)uqhEAc3 zL5#Ia&9}n=l$ zuWLigg?{~w^VgeQqpQoM??)&^Jp8T~vE?W=X~d>SZ?w#1h`%S@TYEfS?qw}&N+6_NK zKYT%!MQ0qq5w9`{nle$=GOsVwW@7RyEnyem>SR!d;dbbi-5_9wY6d$LU%?VIJY6tq zy)Y%WHOd;}G?B?hOmMofYixmAa9-%NgXNgG_ebo7B08#VsiVv`p)z+qf5Dejjh{nN zYhI#Q_-OsUtoxm$4J11sSY!7}%<^~N#ux}X!(JusDTXqNM+Gc{LwLdjj$;q%pw=zb zO(+}I&yyRdnJiF<`0H5>!($@28V|(Qm43Rjm1V> zpVaVNM)r@Kg`exy^dhy!Ez#TT^R3h3CzGMWbJVy{7o-hj% zK<61uZKF{=(!1csYwxHx+#zeV%b%@AmUTvCp}RI6751M6QF=%Av8#kdk~^KfaIZ#T3yrM+oSxPn;` zeNmNPMOBrhPk2NG2K!ggZUjr8Vq!NHfmUWpa0YD(lr3>)t3O({-$T-jS?M*R%l+t> zebdYanP~a&tyS8UTiR8a_yUDhKc~O3-1KW^8dq!1U8j?$Wb;)b$xGb-1P+<5`4bOw zsE_F?D;f-~{cbRjARpt{+R#pNGFvuD5Sa~L?`A-b(u@O8Pq*!zPmBJ-HXBpn4T85J z96%5g0Utd`6CLYwWS1jU15`#G&V|l?@2-)c0w}F8eN%jBEaZ|zOPPyJo~zsTOzKQN zM{Z5jY8gX!MWVCTm>P(sK6niZR3tkUrNdCc)>rJP$WDM$0%$6S;LDeXPDHm8Fmd|6 zFC9jTqnKMNQahs%4?BA`XEc&naUXB4ZlpzowB&&ixYD8-SK>nsn_g#mY2| z7c1xfgXw69g}#81Pa|mx7{=0lzuJ{sbh>qqWR)crr1v$vJyg}EWFgw^ZBh-Y4hjz& zB2Qp46`&Kbu}wNN6FxIgIvE&3vl<2gGtfx0lNpMxYt+phBXy4t)v6T0Kw^5mO~Q50`de$ z(J17}W!6xDuQWuJvJf=cnb2q?1G-eB5>IFyAM!!^rzAo=^xQ4g>KMT7d8e`P1jNy| z&e@^69aVQ{qw`=E_mx%m0r9>Yk087;Oi}8DGyIYr1-%Kb(^MzrC!a?p7t_9hNt?@z*)EDDHp4OWMg$a0JO7O4U8O4%2qsMN{*(+$c z^C!uG%FKJctaYW)>7+(-0Asn;PjmJ+UuAl#AX8W`9A6zV*@VviXsW3^pT69bvfVPi zz7srm=2boP2PtLcuyae7@|!Qrsd$t%U5WrZ(<2BFT}joi&9@%bA_&+kUs{Px1?(Du zy*!iJuT9{SdD-*>Fqqu*aBltD>UFzQDRT?du1x#ZfwjI8 zAK184`rWH&K84>uLg_1!dkTer0L$KG7=QbbN66vQ-c_3EqjTZoR|gta-^H*90#v>F zicZEpI@<|0@Vn--Q2RuPDay3rA?Qx%4Q2lxe^^R9zJ$to{8O3tB1(~Wvt0{iy-ZQ! z%V;ZHUm)CLtTU*9abS5b)*aP-yj>j2rM0fO%TO+6S&-*Nl)w_OP-al)Rtcl0(7?tV+t%fIR_4`4PqT`9Kn^AJ zV(bm0r!v~;hM1i%?2gXA(~Gbxdx>%IbnUCRG+nrx-FE0YX5rBDBC{t{TqqgHG=RU7 zU8D){^u%8o@FhbrRm2q^I z=)Iso4%Vs2BZB)etdJgwGX+2sBgBn5o@F2X#-xs;`RMXt=jP$~_%P(*$M``t>PRSg zFYKY&>a_~HC0$7^Je_B-Vx++${Y_iQ@lvH_7QvFs=+WYwbV}kl$12GnS?ito=bl7U z|9CBbeup*bn8kSk_X815R2d^Qc8^t&2dL?a;)U|;*Or~#+PYtxPQAbqz&a-T+;6LplCd@W}D_5WH{47>!W$9_8=_^*{<*0gJ)0E4XF)%PJ zuYT(Tfrba9Vd1y3<8wAWaS;$U3M7>flAMfh)YWhL05{M%T_h)+=M#tIkvWt(pYI~JC^Pbxq=k#(K z(4see1zlauL0F_obp;PO0?J8vo*Ti^=+Y$d-U7OU;GqQ>8HLNRwpM=bM1dmc7sW^@ z=cB{Lbie^Y^e`%^qKQ&4bP3%DwAY;8>P4s*>A;zLiC}N}cT6D{Ap+zTM!NFyhCvn? z?k^If)Ow+RR>VeB^27&EAyEeZx1eGC9<<-<;Xk5Ptn}T2Kore=Sc~zC7Rgv0-=kES zLK~t)AO!k>OBG+lT8=}*;Zj=6v4WxmIlZyBh-85fi6VrkcXWKdh@{*j-uEu|Z!N9+ z_hrM>Q6avsXqS@ej^=L+?Jw_2R&~o6A(t6}10G`)Y3WxSO~E9Fh*F~ zz}%oW@99l#+E^3Xqj?pAYmjuYzr3&Pt9Ib_I+W02Z!L)n-jFIVYGKx@l8&1hj|=A6 zkasJ2Sqe!Ur%l9`L5+hk1U$P36sQLEl{7mLx@=GBYDLfzGE7Usm2|Tu+~Pxj@x^c5 z(wy!x6;%^1R_d1agkadhuEKLDhn8NjqlWTRdSf&~=sUcszU&;F;xeY!k3EjxR6-H= zjT04NioEudEuQilRCOZ--c2C^eD5tSu)zi7{AsBi`5^)mS-cja&t+Zr@J)pmy2YAR zg*KuMpOoiI1^|)txhgR?+4ja?CZH`BJTWEg=tq=$<9EVG@msw=<`_+X>AI1MGQ!vU z(tm%6>ERUI!qlaex>`D@pF`cc+@`hC?Xa?R*0#bYGVjpdq~)(w95752kn&xmrh&RyCaF859>_qqs7(`wW! zFU9~yD3%%In!3T0(IsgBzP0@o^GAF{Iy*aJc70nNkM zjNE|pm~M%8QpkchMn)B_lgkprF%XusI}A}noHi9WOwy2i*&DY`9^gi7W7#rdu@RYZ zR$lScpyhG2+|OP2p{w?uf+$-3eoz3?T55~V!p8~~xrs&pP3)38#AApg$~JzfH42Oj z8=ncste10A&KAjjL8-lKkW6=4N$agWb)oRxv)NgR9&K-g<%B{u{pGA$z(_mPb}T(y zs=&YH8z$4`rR2VF&yf%2SC(G~#n^inJsiVp1X&)aQjly2j-ZT7sP4)}o86ht1=jIN zl>@8fyN|8DJjlRHI{W*X-zc=!_SYIokLyx9sUvEylfWIm8QGkpHR!d&1!iFwC25lM z(WlQA*^{i=7?=xbF%w?jlwp3yr<@bq+#T8`nx@r2u!+SiXuD&{!!<0OV<32JLadyn z^)i$iCkXA&Z;V^dcCmeJ^MOUV&XQ}hYn}Kx`({^5|A2!61EYcew?snldm=G#G|;oN zwXmi)v@_SUHMTJSE&VIUr=n!0-ww@zrL(D9 zfJr>_D8`uG;o(hZwv6^j*h8$OY0c?}wtE(%=#fgF9{O|Xuk3+%Z=x=vM?NXsM7e3r zMW<+oO>9qeRejm9f%IA>8k{egMJTd|7yGga-|r*dkGYUBwIICM2XmGlevr^WX3=bh2J#rdqw7DU{(!bg*P=Fnq^3Z!6LE9sP7Mp z7@E-H_wOb~)`*LOKNbq~RBKcv?|!q zXxCG*Xvl*A5clx+?4^A4BT~05&O_0vow1ocOFc`i+ zxFh|+{dhT|!(z!B{s~~=9Hnp!VB(S1Uf{nM;#B#AVX1#-SU78n|1d10p5L1%hMjq0 z*xk%*T2ZoHi50~+j&GSeqp5-#{mC;E$VJzg2_m!;z$C7|@Ocrcq^<%k?zS!v+t&fk zlSCN1Qaxf)J@1V)Ei?;{Sp}oTnBRqxh>Qm?LpLi>NqUn>-7(^yG#Ocr%d6AWC!eH@ zPTd;L3TW5TVx*|i?f?mBg_0{{6aE2yMcPzM6P#t z<3Ww>n$%fH`Q>?L+b|tj!RCMmZrBsl{%D@#I7GkHH|bJLId~L}NlX>%z`=sY>GTHb zK}-3T8)-v_I7>!;2Ik96uqcX(@cC@I_ID~Pe2Ll^djDbrrx|UbD_7BUfKCCs$|NQX zuf?*Oy0G2gkv+qBg34-O>h?Hes)DvgUrSzTpV<*2Vh{=RicAU~K2sY`RB}{AvK=6k z0KkHWi2#qtz_yd@_IHGN%i!>Es()c)^U=P*+i)v3$mERHFP=>U&t$U)IHZmgXpj68 zYf)cS zsm*;CR=(XUPK2xu7M-|V0S=^uwoU5#KhpaI3YkgW(4`%g8yv<9?s|W4dpn;wi2bV$!;*tmB z>-#JR7j|aia`7oxTNcl6h*Cw4>wI=Eo7_J+5Au1q+O6Z7gRRIdiWQT0!{lJ^B@x0^ zIj&PXpOCvY;n~$>a;vTEraT-H9&^n+lDgKyJ4o(wG%KK&`Vb6v96h{PY??me#k)g3 zOk1KXAlW}`ns-48y6@r}>V;n=zCNy%vRVDYq zI;-V2x8+R3lhK=>nHWTDhskCR1}=eercH?-Pdgl#Yr@sk)c_g?@!mdjcdqiLWs;)+-g75 zIvQk>gu)5V7ez$dBVo2&IcgZF^-9j7`%Gg!=HqQ3xaqey86_%q%`6wMghP~M2DYJ? zgm62^2GMA}=z$lXx-K7C5O+ua^HgV63?53S(4 z0v-Xw&Eah9%eBYzk4V#6pIA_jn)i?R2u&XmX5WOmX2i(6IZ>()DNyn_LHp}oU8n%Qr8-q`Fq;us=fo+>lMf2+(0pDMH8gq(k< z&Eg^$%{v&7L>;> zgrEeQKJG!Ekal9KnnRi{ltz)u$>1cGo9>6SAsB>8zC$8$jcL{%?%k`%TnU2xuAzIh ziRlO$g~&?zDI(7MxaEMWmb+YT++(_WLD}uus^ZxL^HU+~X#;Z$c#rQEJ+2Q!PQ03Y z#Su)U)_5L251(^!nGC0%+#ml}27W#`&^NIE+rh!po!?Zh@ZVJ~9#JjrHphj*2Ad@3 z<8OulawJ?#1B)ZvapkK4+j4K(8!xufhD^?~I0G>D0o z!TN4HV4rF*{6~Upm6OTFB?FjNI766P6C@U0m$j-}yyK%;mzP19A|&6yP!8 zrBpM~0fP4*TybuxA0aBQI{5NLP|(Bo9Y1U~TeKw2s=yLec;#H-|3G|OC@F+(u#HehY zEDKDPP0pyZ`&mpXJS%wl0n-4>#>n#zD)ggj64SQbM#$Q2rp4;T!||ol-$cf6F*No| zWl!ec-BnXR%u+x6c%zw!jzsi;RqTOt(2VmPO|Dm=5Aj0&d3AP zG*}ADNFvst;3 zb-!}5)^@hIeJJ^Oy#0fUZ%0%73^>`s<-j;_d}38qNodiGZ%*_nwdnlOT}BE9mLGjd zduw**@a>&7<&r9)d&`UBMlNm@`U;gv_%hXIjlr&_hr#9NLK4?{+<;4+EYLYlc`hs%bo^1;TdrJM7nbpjy>H*~pcZJ+l17REeLjcznYwIGs1Sq#R(u`(=e zfFn$6@R0beEESHC?-t-0>6T6EA~Un0YSst!FSOVYgX`x<7P>b)z%@zE$gcxb7P7&( z*d@Z)$42DGT8a=bxIeGCJ<+`s!m=K{o_Be&DcafVL}NJgDx*W#xN}4_rpCCi%NlV4 z5>dNxeZNLy{AJ~jaqD&f;RhBo%HBQH^btQecJp<3azdyBhOk)aMcGM;VZ*TZ+gV6^ z>a-h3#2;o`0Hkpr-f#O4qbJ(p;m{ka(!BzG1P^@))s_lBC*)#)SI?fxPyF7A8kg>1JK9*@R6$Xe8n67oD&w2U7dHNt&&wHujtZgFU6 zv4OR3KM1%_Z9!^z+-mFKM}E9Ud7~a&CffS0o{G6lA2*`b3lJN_+QHFFp$VxIshd}_ zUE#&$Me;#j>PzlkEZ&4R2D4Hxl--!WMrMB|P^w;@JTn|qv&$EURhTX)y4OU4%vn-i zgs<`hF1PbF^ZDPI{^|=a#Y9|qSZsLtNaIqY(u#>Ud6;1?u3%s;>IVeZ0=Z}?(b#J} zmQ~o%<=h-O!vC7-jRsRvn1G;m!U<4LGS*P+9I-So`*Sk69T8R4@ga2W0hJIa|PeWRaE zh{d`^N^8|)?iddYlo#@*!WDWe&+<0fQgYltQq;Rmuk}^Fs77=-X__GBKxzw*%KgHp zWQ{Kwvz!L2b9Yjob7YjelW)pHm~kL8HG~Cw1jX*x0_mnlHMWF3={`E}6duq@6CTlJ zaHY(pcE6+R0|>s=7iNV*UL$JXaAGx^T2P&k0o-Lct4W#OY?0KVl@0l^`SSuvK(dbQ4vGK8v1b{ z`2?F>8LpC_RDUma_eOFJLIt1CPJ@4gTtL;TK+}+=Mha@jVLV&1NO#WdK+ONCv4~!% zE?Gr?|T(eP|Xau@^b<010&Blpbr&_g~%Extha znJ~8cmGNSu`vc2+*u=$-MOdGMJSO7DrtfN4*OnK<9Tvnb-y7m?0(mR=GHf98n=?9t z#Sh>0jHXZ)Oz)pHOpAxFV56Z-noUUQn%0&)i_{SGL{7n)e8}HT#TJXV50z?vd~I>T zX+*hv#OIsyy(PDMsYMqw-8P7`!u}Y1xOjAxhVlS@1v(m6K(oJEI^qrOkK+sv84waP z!|>)WPmd$~;d2qRRoM^zxpnwk=XvMM%U9$zTJZ%fi~C2-2S={s-(8|Z<~`Zyw|zPz z2;Vo)3QW8v7S>0`K#y-*yGa_B4W#n2S+0s1`?NR}Y0*AX?!7)2rqW8*FGA!ckU=g zLISe|2<)~{FkP|5hI?Y=EHyO&(wVzhJa@OP<;Bb8yy|1a*Qy@ahoyP4Bv4aT1PprW zsvE01O0Pa8DTG0)U>SmM-EgtMJ&D}3Ui|RiW*Z-BbJmDTfDPk2yS+pb*Nc1GL-sRj zJ~fK9KMn*f(hWKe1|J%#eb}FVjTkcg6}-(g*B)Umd9KDQ1FrxLgfOHCvfn8u_SY_sX2OZZO9|(p3Cq zwu%UU$vs>QvS--z%GKia$oi=fNsE2SrvP#mxWRPp1pc0CQ(E@ub?C1%9EpiQ&6`+o z0GjTXRl=0}pMpzd=IhFUC#=W&SFC@k*1&*f2DUmtCo|LjK9KCdXg>(p_nL}+O6Cn{ zkalzsJZMspVsmpFfQ>nmDRFAOk+DtnQw49&fhOzkk26ybHH4Le%0niZEWj%0%K_Yo z)*I(I^#PJ@oUY@tIAIHS7d`L`@nOf*`LGg zN!I%7g#N#_v%eerRaSpKIQVmvKXoVkbN|8L?fj~$J)`-bWAo{6f41|xUw;S?e>d=} zs_`to{W)Hv{bt}FO7*`R`t=m)uZE7%|LW22!TF~d{d(H+eCYJ&=)?OX&j00{em&Co zYm|4NKK@$9{nnKHM-}&X-+pDAzeaiF$<=2&e>)EOyMcd>GAhY$2L4NreqGdmjq(WD rUp@Lg%KzljuS@Sa2mCoQsDH@_a?;RGDFh4*`RNt^M0dh8&!7Gu<_sG@ literal 0 HcmV?d00001 diff --git a/source/src/main/java/io/wdd/source/octopus/run-enviroment-compose.yaml b/source/src/main/java/io/wdd/source/octopus/run-enviroment-compose.yaml index e1f33b9..107a94e 100644 --- a/source/src/main/java/io/wdd/source/octopus/run-enviroment-compose.yaml +++ b/source/src/main/java/io/wdd/source/octopus/run-enviroment-compose.yaml @@ -78,8 +78,6 @@ services: - MYSQL_SERVICE_PASSWORD=Superwmm.23 depends_on: - mysql - volumes: - - 'rabbitmq_data:/bitnami/rabbitmq/mnesia' # redis-replica: # image: redis-image From 61481e7543fc0e98f6e9c80621159d24082c0e8f Mon Sep 17 00:00:00 2001 From: zeaslity Date: Thu, 30 Mar 2023 16:10:34 +0800 Subject: [PATCH 04/45] [agent-go] fix-bugs --- agent-go/executor/CommandExecutor.go | 6 ++---- agent-go/rabbitmq/OMsgConnector.go | 4 +++- agent-go/register/AgentInitialization.go | 4 ++++ agent-go/tmp/executor-om-multiline.json | 8 ++++++++ agent-go/tmp/executor-om-pipeline.json | 9 +++++++++ agent-go/tmp/executor-om-single.json | 2 +- 6 files changed, 27 insertions(+), 6 deletions(-) create mode 100644 agent-go/tmp/executor-om-multiline.json create mode 100644 agent-go/tmp/executor-om-pipeline.json diff --git a/agent-go/executor/CommandExecutor.go b/agent-go/executor/CommandExecutor.go index a972fee..26f1c61 100644 --- a/agent-go/executor/CommandExecutor.go +++ b/agent-go/executor/CommandExecutor.go @@ -74,7 +74,6 @@ func PipeLineCommandExecutor(pipeLineCommand [][]string) ([]string, error) { lastCmd := cmds[len(cmds)-1] var out bytes.Buffer - lastCmd.Stdout = &out lastCmd.Stderr = &out err := lastCmd.Run() @@ -95,13 +94,12 @@ func PipeLineCommandExecutor(pipeLineCommand [][]string) ([]string, error) { func MultiLineCommandExecutor(multiLineCommandExecutor [][]string) ([]string, error) { var res []string - for _, singleLineCommand := range multiLineCommandExecutor { - singleLogs, err := SingleLineCommandExecutor(singleLineCommand) - res := append(res, singleLogs...) + res = append(res, singleLogs...) if err != nil { + log.Error(fmt.Sprintf("Execution error ! command is %v, error is %v", singleLineCommand, err)) return res, err } diff --git a/agent-go/rabbitmq/OMsgConnector.go b/agent-go/rabbitmq/OMsgConnector.go index 2be7108..5d28d25 100644 --- a/agent-go/rabbitmq/OMsgConnector.go +++ b/agent-go/rabbitmq/OMsgConnector.go @@ -65,7 +65,9 @@ func BuildOMsgRuntimeConnectorQueue(agentTopicName string) { } // 策略模式 处理消息 - om.Handle() + P.Submit(func() { + om.Handle() + }) } }) diff --git a/agent-go/register/AgentInitialization.go b/agent-go/register/AgentInitialization.go index 436537a..c762d0c 100644 --- a/agent-go/register/AgentInitialization.go +++ b/agent-go/register/AgentInitialization.go @@ -140,6 +140,10 @@ func handleInitMsgFromServer(initFromServerQueue *rabbitmq.RabbitQueue, initToSe // 不是自身的 注册回复信息 -- 拒绝 log.Warn(fmt.Sprintf("OctopusMessage INIT from server not this agent ! => %v, ==>%s", initOctopusMsg, delivery.Body)) delivery.Nack(false, true) + + // 需要休眠等待不再获取相应的信息 + time.Sleep(5 * time.Second) + } } diff --git a/agent-go/tmp/executor-om-multiline.json b/agent-go/tmp/executor-om-multiline.json new file mode 100644 index 0000000..c6b9377 --- /dev/null +++ b/agent-go/tmp/executor-om-multiline.json @@ -0,0 +1,8 @@ +{ + "uuid": "2023-03-27 14:38:49", + "init_time": "2023-03-27T14:38:49.8162801+08:00", + "type": "EXECUTOR", + "content": "{\n \"needResultReplay\": true,\n \"durationTask,default:false\": false,\n \"type\": \"pipeline\",\n \"singleLineCommand\": null,\n \"multiLineCommand\": [[\"ls\",\"-l\"],[\"pwd\"]],\n \"pipeLineCommand\": null,\n \"resultKey\": \"output\"\n}", + "result": "", + "ac_time": "0001-01-01T00:00:00Z" +} diff --git a/agent-go/tmp/executor-om-pipeline.json b/agent-go/tmp/executor-om-pipeline.json new file mode 100644 index 0000000..36044a2 --- /dev/null +++ b/agent-go/tmp/executor-om-pipeline.json @@ -0,0 +1,9 @@ +{ + "uuid": "2023-03-27 14:38:49", + "init_time": "2023-03-27T14:38:49.8162801+08:00", + "type": "EXECUTOR", + "content": "{\n \"needResultReplay\": true,\n \"durationTask,default:false\": false,\n \"type\": \"pipeline\",\n \"singleLineCommand\": null,\n \"multiLineCommand\": null,\n \"pipeLineCommand\": [[\"echo\",\"yes china\"],[\"awk\",\"'{print$2}'\"]],\n \"resultKey\": \"output\"\n}\n", + "result": "", + "ac_time": "0001-01-01T00:00:00Z" +} + diff --git a/agent-go/tmp/executor-om-single.json b/agent-go/tmp/executor-om-single.json index f48b02e..fa14caa 100644 --- a/agent-go/tmp/executor-om-single.json +++ b/agent-go/tmp/executor-om-single.json @@ -2,7 +2,7 @@ "uuid": "2023-03-27 14:38:49", "init_time": "2023-03-27T14:38:49.8162801+08:00", "type": "EXECUTOR", - "content": "{\n \"needResultReplay\": true,\n \"durationTask,default:false\": false,\n \"type\": \"command\",\n \"singleLineCommand\": [\n \"ls\",\n \"-la\"\n ],\n \"multiLineCommand\": null,\n \"pipeLineCommand\": null,\n \"resultKey\": \"output\"\n}\n", + "content": "{\n \"needResultReplay\": true,\n \"durationTask,default:false\": false,\n \"type\": \"command\",\n \"singleLineCommand\": [\n \"ls\",\n \"-l\"\n ],\n \"multiLineCommand\": null,\n \"pipeLineCommand\": null,\n \"resultKey\": \"output\"\n}\n", "result": "", "ac_time": "0001-01-01T00:00:00Z" } From 3e9c95ed6d0ed804da1919af0218d3fd50dddb61 Mon Sep 17 00:00:00 2001 From: zeaslity Date: Thu, 30 Mar 2023 16:17:13 +0800 Subject: [PATCH 05/45] [agent-go] fix-bugs --- agent-go/executor/CommandExecutor.go | 1 + agent-go/logger/logger.go | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/agent-go/executor/CommandExecutor.go b/agent-go/executor/CommandExecutor.go index 26f1c61..615d169 100644 --- a/agent-go/executor/CommandExecutor.go +++ b/agent-go/executor/CommandExecutor.go @@ -63,6 +63,7 @@ func PipeLineCommandExecutor(pipeLineCommand [][]string) ([]string, error) { prevCmd := cmds[i-1] out, err := prevCmd.StdoutPipe() if err != nil { + log.ErrorF("Pipeline command error happened! Command is => %v, Error is %v", partOfCommand, err) return nil, err } cmd.Stdin = out diff --git a/agent-go/logger/logger.go b/agent-go/logger/logger.go index 11177a8..8a0acc0 100644 --- a/agent-go/logger/logger.go +++ b/agent-go/logger/logger.go @@ -67,8 +67,9 @@ func (l *Logger) Warn(msg string, fields ...zap.Field) { } // Error logs an error message. -func (l *Logger) Error(msg string, fields ...zap.Field) { - l.Logger.Error(msg, fields...) + +func (l *Logger) ErrorF(msg string, args ...interface{}) { + l.Logger.Error(fmt.Sprintf(msg, args...)) } // Fatal logs a fatal message and exits the program with a non-zero status code. From d5193a3be1a4c5e972c90917fd7e339fbbca80a1 Mon Sep 17 00:00:00 2001 From: zeaslity Date: Thu, 30 Mar 2023 16:45:01 +0800 Subject: [PATCH 06/45] [agent-go] new cpu status --- agent-go/go.mod | 1 + agent-go/go.sum | 2 ++ agent-go/main.go | 9 +++++ agent-go/status/cpu.go | 47 ++++++++++++++++++++++++++ agent-go/tmp/executor-om-pipeline.json | 2 +- 5 files changed, 60 insertions(+), 1 deletion(-) create mode 100644 agent-go/status/cpu.go diff --git a/agent-go/go.mod b/agent-go/go.mod index 4ffda9b..a5b4461 100644 --- a/agent-go/go.mod +++ b/agent-go/go.mod @@ -3,6 +3,7 @@ module agent-go go 1.18 require ( + github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8 github.com/nacos-group/nacos-sdk-go/v2 v2.2.0 github.com/panjf2000/ants/v2 v2.7.2 github.com/spf13/viper v1.15.0 diff --git a/agent-go/go.sum b/agent-go/go.sum index 88dfd31..887ee29 100644 --- a/agent-go/go.sum +++ b/agent-go/go.sum @@ -55,6 +55,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8 h1:SjZ2GvvOononHOpK84APFuMvxqsk3tEIaKH/z4Rpu3g= +github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8/go.mod h1:uEyr4WpAH4hio6LFriaPkL938XnrvLpNPmQHBdrmbIE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= diff --git a/agent-go/main.go b/agent-go/main.go index 8ed778d..3127e9b 100644 --- a/agent-go/main.go +++ b/agent-go/main.go @@ -4,6 +4,7 @@ import ( "agent-go/g" logger2 "agent-go/logger" "agent-go/register" + "agent-go/status" "flag" "fmt" ) @@ -26,4 +27,12 @@ func main() { // 执行初始化之策工作 register.AgentServerInfoCache = register.INIT() + // 测试调用CPU状态 + getCpuMap, err := status.GetCpuMap() + if err != nil { + log.ErrorF("error is %v", err) + } + + log.InfoF("cpu status is %v", getCpuMap) + } diff --git a/agent-go/status/cpu.go b/agent-go/status/cpu.go new file mode 100644 index 0000000..b3d46f3 --- /dev/null +++ b/agent-go/status/cpu.go @@ -0,0 +1,47 @@ +package status + +import ( + "fmt" + linuxproc "github.com/c9s/goprocinfo/linux" + "time" +) + +func GetCpuMap() (map[string]uint64, error) { + statA, err := linuxproc.ReadStat("/proc/stat") + statErrMsg := "failed to stat CPU data, received error: %s" + if err != nil { + return nil, fmt.Errorf(statErrMsg, err.Error()) + } + + time.Sleep(time.Second) + + statB, err := linuxproc.ReadStat("/proc/stat") + if err != nil { + return nil, fmt.Errorf(statErrMsg, err.Error()) + } + + resultMap := make(map[string]uint64) + resultMap["all_active_percent"] = cpuStatToPercent(statA.CPUStatAll, statB.CPUStatAll) + for idx, statB := range statB.CPUStats { + statA := statA.CPUStats[idx] + resultMap[statB.Id+"_active_percent"] = cpuStatToPercent(statA, statB) + } + + return resultMap, nil +} + +func cpuStatToPercent(statA, statB linuxproc.CPUStat) uint64 { + aIdle := statA.Idle + statA.IOWait + bIdle := statB.Idle + statB.IOWait + + aNonIdle := statA.User + statA.Nice + statA.System + statA.IRQ + statA.SoftIRQ + statA.Steal + bNonIdle := statB.User + statB.Nice + statB.System + statB.IRQ + statB.SoftIRQ + statB.Steal + + aTotal := aIdle + aNonIdle + bTotal := bIdle + bNonIdle + + totalDiff := bTotal - aTotal + idleDiff := bIdle - aIdle + + return uint64((float64(totalDiff-idleDiff) / float64(totalDiff)) * 100) +} diff --git a/agent-go/tmp/executor-om-pipeline.json b/agent-go/tmp/executor-om-pipeline.json index 36044a2..2bf979e 100644 --- a/agent-go/tmp/executor-om-pipeline.json +++ b/agent-go/tmp/executor-om-pipeline.json @@ -2,7 +2,7 @@ "uuid": "2023-03-27 14:38:49", "init_time": "2023-03-27T14:38:49.8162801+08:00", "type": "EXECUTOR", - "content": "{\n \"needResultReplay\": true,\n \"durationTask,default:false\": false,\n \"type\": \"pipeline\",\n \"singleLineCommand\": null,\n \"multiLineCommand\": null,\n \"pipeLineCommand\": [[\"echo\",\"yes china\"],[\"awk\",\"'{print$2}'\"]],\n \"resultKey\": \"output\"\n}\n", + "content": "{\n \"needResultReplay\": true,\n \"durationTask,default:false\": false,\n \"type\": \"pipeline\",\n \"singleLineCommand\": null,\n \"multiLineCommand\": null,\n \"pipeLineCommand\": [[\"ls\",\"-la\"],[\"grep\",\"-c\", \"dev\"]],\n \"resultKey\": \"output\"\n}\n", "result": "", "ac_time": "0001-01-01T00:00:00Z" } From bb724603ccc01a29770d2206e67e43eef8566ffc Mon Sep 17 00:00:00 2001 From: zeaslity Date: Thu, 30 Mar 2023 16:47:30 +0800 Subject: [PATCH 07/45] [agent-go] new cpu status --- agent-go/main.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/agent-go/main.go b/agent-go/main.go index 3127e9b..30ecb80 100644 --- a/agent-go/main.go +++ b/agent-go/main.go @@ -13,6 +13,14 @@ var log = logger2.Log func main() { + // 测试调用CPU状态 + getCpuMap, err := status.GetCpuMap() + if err != nil { + log.ErrorF("error is %v", err) + } + + log.InfoF("cpu status is %v", getCpuMap) + // 解析命令行参数 var version string flag.StringVar(&version, "version", "", "config file version") @@ -27,12 +35,4 @@ func main() { // 执行初始化之策工作 register.AgentServerInfoCache = register.INIT() - // 测试调用CPU状态 - getCpuMap, err := status.GetCpuMap() - if err != nil { - log.ErrorF("error is %v", err) - } - - log.InfoF("cpu status is %v", getCpuMap) - } From c79eaab3a32a6a75b1d3ad84bb55cab0529107dc Mon Sep 17 00:00:00 2001 From: zeaslity Date: Thu, 13 Apr 2023 15:19:52 +0800 Subject: [PATCH 08/45] [agent-go] update pipline command executor --- agent-go/executor/CommandExecutor.go | 47 +++++++------------------- agent-go/tmp/executor-om-pipeline.json | 2 +- 2 files changed, 14 insertions(+), 35 deletions(-) diff --git a/agent-go/executor/CommandExecutor.go b/agent-go/executor/CommandExecutor.go index 615d169..3c65b4d 100644 --- a/agent-go/executor/CommandExecutor.go +++ b/agent-go/executor/CommandExecutor.go @@ -6,6 +6,7 @@ import ( "bytes" "fmt" "os/exec" + "strings" ) type ExecutionMessage struct { @@ -54,42 +55,20 @@ func Execute(em *ExecutionMessage) ([]string, error) { func PipeLineCommandExecutor(pipeLineCommand [][]string) ([]string, error) { - var cmds []*exec.Cmd - - // 创建每个命令对象,并将前一个命令的标准输出连接到当前命令的标准输入 - for i, partOfCommand := range pipeLineCommand { - cmd := exec.Command(partOfCommand[0], partOfCommand[1:]...) - if i > 0 { - prevCmd := cmds[i-1] - out, err := prevCmd.StdoutPipe() - if err != nil { - log.ErrorF("Pipeline command error happened! Command is => %v, Error is %v", partOfCommand, err) - return nil, err - } - cmd.Stdin = out + var output []byte + var err error + for i, command := range pipeLineCommand { + cmd := exec.Command(command[0], command[1:]...) + cmd.Stdin = bytes.NewReader(output) + output, err = cmd.Output() + if err != nil { + return strings.Split(string(output), "\n"), err + } + if i == len(pipeLineCommand)-1 { + return strings.Split(string(output), "\n"), nil } - cmds = append(cmds, cmd) } - - // 执行最后一个命令,并获取其输出 - lastCmd := cmds[len(cmds)-1] - - var out bytes.Buffer - lastCmd.Stdout = &out - lastCmd.Stderr = &out - err := lastCmd.Run() - - scanner := bufio.NewScanner(&out) - var result []string - for scanner.Scan() { - result = append(result, scanner.Text()) - } - - if err != nil { - return nil, err - } - - return result, nil + return []string{}, nil } func MultiLineCommandExecutor(multiLineCommandExecutor [][]string) ([]string, error) { diff --git a/agent-go/tmp/executor-om-pipeline.json b/agent-go/tmp/executor-om-pipeline.json index 2bf979e..55c3756 100644 --- a/agent-go/tmp/executor-om-pipeline.json +++ b/agent-go/tmp/executor-om-pipeline.json @@ -2,7 +2,7 @@ "uuid": "2023-03-27 14:38:49", "init_time": "2023-03-27T14:38:49.8162801+08:00", "type": "EXECUTOR", - "content": "{\n \"needResultReplay\": true,\n \"durationTask,default:false\": false,\n \"type\": \"pipeline\",\n \"singleLineCommand\": null,\n \"multiLineCommand\": null,\n \"pipeLineCommand\": [[\"ls\",\"-la\"],[\"grep\",\"-c\", \"dev\"]],\n \"resultKey\": \"output\"\n}\n", + "content": "{\n \"needResultReplay\": true,\n \"durationTask,default:false\": false,\n \"type\": \"pipeline\",\n \"singleLineCommand\": null,\n \"multiLineCommand\": null,\n \"pipeLineCommand\": [[\"ls\",\"-la\"],[\"grep\", \"dev\"]],\n \"resultKey\": \"output\"\n}\n", "result": "", "ac_time": "0001-01-01T00:00:00Z" } From c2a22b066f0660f07f31c010ad8104a57293ba7d Mon Sep 17 00:00:00 2001 From: zeaslity Date: Thu, 13 Apr 2023 17:24:39 +0800 Subject: [PATCH 09/45] [agent-go] [status] add status module --- agent-go/go.mod | 11 ++- agent-go/go.sum | 31 ++++++-- agent-go/rabbitmq/OctopusMessage.go | 3 + agent-go/status/CPU.go | 49 ++++++++++++ agent-go/status/CPU_test.go | 22 ++++++ agent-go/status/Memory.go | 44 +++++++++++ agent-go/status/Memory_test.go | 20 +++++ agent-go/status/Network.go | 103 +++++++++++++++++++++++++ agent-go/status/Network_test.go | 22 ++++++ agent-go/status/Status.go | 7 ++ agent-go/status/cpu.go | 47 ----------- agent-go/tmp/executor-om-pipeline.json | 2 +- 12 files changed, 306 insertions(+), 55 deletions(-) create mode 100644 agent-go/status/CPU.go create mode 100644 agent-go/status/CPU_test.go create mode 100644 agent-go/status/Memory.go create mode 100644 agent-go/status/Memory_test.go create mode 100644 agent-go/status/Network.go create mode 100644 agent-go/status/Network_test.go create mode 100644 agent-go/status/Status.go delete mode 100644 agent-go/status/cpu.go diff --git a/agent-go/go.mod b/agent-go/go.mod index a5b4461..8c19d58 100644 --- a/agent-go/go.mod +++ b/agent-go/go.mod @@ -3,9 +3,9 @@ module agent-go go 1.18 require ( - github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8 github.com/nacos-group/nacos-sdk-go/v2 v2.2.0 github.com/panjf2000/ants/v2 v2.7.2 + github.com/shirou/gopsutil/v3 v3.23.3 github.com/spf13/viper v1.15.0 github.com/streadway/amqp v1.0.0 go.uber.org/zap v1.24.0 @@ -18,11 +18,13 @@ require ( github.com/buger/jsonparser v1.1.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect github.com/golang/mock v1.6.0 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect @@ -30,20 +32,25 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pelletier/go-toml/v2 v2.0.6 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/prometheus/client_golang v1.12.2 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect + github.com/shoenig/go-m1cpu v0.1.4 // indirect github.com/spf13/afero v1.9.3 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/subosito/gotenv v1.4.2 // indirect + github.com/tklauser/go-sysconf v0.3.11 // indirect + github.com/tklauser/numcpus v0.6.0 // indirect + github.com/yusufpapurcu/wmi v1.2.2 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect golang.org/x/net v0.4.0 // indirect golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.3.0 // indirect + golang.org/x/sys v0.6.0 // indirect golang.org/x/text v0.5.0 // indirect golang.org/x/time v0.1.0 // indirect google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect diff --git a/agent-go/go.sum b/agent-go/go.sum index 887ee29..8d9c8f3 100644 --- a/agent-go/go.sum +++ b/agent-go/go.sum @@ -55,8 +55,6 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8 h1:SjZ2GvvOononHOpK84APFuMvxqsk3tEIaKH/z4Rpu3g= -github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8/go.mod h1:uEyr4WpAH4hio6LFriaPkL938XnrvLpNPmQHBdrmbIE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= @@ -95,6 +93,8 @@ github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vb github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= @@ -142,6 +142,7 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -191,6 +192,8 @@ github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= @@ -219,6 +222,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= @@ -244,6 +249,12 @@ github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/shirou/gopsutil/v3 v3.23.3 h1:Syt5vVZXUDXPEXpIBt5ziWsJ4LdSAAxF4l/xZeQgSEE= +github.com/shirou/gopsutil/v3 v3.23.3/go.mod h1:lSBNN6t3+D6W5e5nXTxc8KIMMVxAcS+6IJlffjRRlMU= +github.com/shoenig/go-m1cpu v0.1.4 h1:SZPIgRM2sEF9NJy50mRHu9PKGwxyyTTJIWvCtgVbozs= +github.com/shoenig/go-m1cpu v0.1.4/go.mod h1:Wwvst4LR89UxjeFtLRMrpgRiyY4xPsejnVZym39dbAQ= +github.com/shoenig/test v0.6.3 h1:GVXWJFk9PiOjN0KoJ7VrJGH6uLPnqxR7/fe3HUPfE0c= +github.com/shoenig/test v0.6.3/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -270,15 +281,22 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= +github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= +github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= +github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -412,6 +430,7 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -435,6 +454,7 @@ golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -447,8 +467,9 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/agent-go/rabbitmq/OctopusMessage.go b/agent-go/rabbitmq/OctopusMessage.go index 3b57302..2c684ad 100644 --- a/agent-go/rabbitmq/OctopusMessage.go +++ b/agent-go/rabbitmq/OctopusMessage.go @@ -105,6 +105,9 @@ func executorOMHandler(octopusMessage *OctopusMessage) { } func statusOMHandler(octopusMessage *OctopusMessage) { + log.Info("接收到查询Agent状态的请求,假装已经处理") + + //statusMessageString := octopusMessage.Content.(string) } diff --git a/agent-go/status/CPU.go b/agent-go/status/CPU.go new file mode 100644 index 0000000..7cc60ec --- /dev/null +++ b/agent-go/status/CPU.go @@ -0,0 +1,49 @@ +package status + +import ( + "github.com/shirou/gopsutil/v3/cpu" + "github.com/shirou/gopsutil/v3/load" +) + +type CPUStatus struct { + NumCores int + CPUInfo []cpu.InfoStat + CPUPercent float64 + CPULoads *load.AvgStat + SystemLoads *load.AvgStat +} + +func GetCPUStatus() (*CPUStatus, error) { + numCores, err := cpu.Counts(true) + if err != nil { + return nil, err + } + + cpuInfo, err := cpu.Info() + if err != nil { + return nil, err + } + + cpuPercent, err := cpu.Percent(0, false) + if err != nil { + return nil, err + } + + cpuLoads, err := load.Avg() + if err != nil { + return nil, err + } + + systemLoads, err := load.Avg() + if err != nil { + return nil, err + } + + return &CPUStatus{ + NumCores: numCores, + CPUInfo: cpuInfo, + CPUPercent: cpuPercent[0], + CPULoads: cpuLoads, + SystemLoads: systemLoads, + }, nil +} diff --git a/agent-go/status/CPU_test.go b/agent-go/status/CPU_test.go new file mode 100644 index 0000000..0183fb3 --- /dev/null +++ b/agent-go/status/CPU_test.go @@ -0,0 +1,22 @@ +package status + +import ( + "encoding/json" + "fmt" + "testing" +) + +func TestGetCPUStatus(t *testing.T) { + cpuStatus, err := GetCPUStatus() + if err != nil { + return + } + + marshalIndent, err := json.MarshalIndent(cpuStatus, "", " ") + if err != nil { + fmt.Printf("error") + } + + fmt.Println(string(marshalIndent)) + +} diff --git a/agent-go/status/Memory.go b/agent-go/status/Memory.go new file mode 100644 index 0000000..ef79c1b --- /dev/null +++ b/agent-go/status/Memory.go @@ -0,0 +1,44 @@ +package status + +import ( + "fmt" + "github.com/shirou/gopsutil/v3/mem" +) + +type MemoryStatus struct { + TotalMemory uint64 + UsedMemory uint64 + AvailableMemory uint64 + TotalVirtualMemory uint64 + UsedVirtualMemory uint64 +} + +func GetMemoryStatus() (MemoryStatus, error) { + var memoryStatus MemoryStatus + + virtualMemoryStat, err := mem.VirtualMemory() + if err != nil { + return memoryStatus, err + } + + memoryStatus.TotalMemory = virtualMemoryStat.Total + memoryStatus.UsedMemory = virtualMemoryStat.Used + memoryStatus.AvailableMemory = virtualMemoryStat.Available + memoryStatus.TotalVirtualMemory = virtualMemoryStat.Total + memoryStatus.UsedVirtualMemory = virtualMemoryStat.Used + + return memoryStatus, nil +} + +func FormatMemorySize(size uint64) string { + const unit = 1024 + if size < unit { + return fmt.Sprintf("%d B", size) + } + div, exp := int64(unit), 0 + for n := size / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.1f %cB", float64(size)/float64(div), "KMGTPE"[exp]) +} diff --git a/agent-go/status/Memory_test.go b/agent-go/status/Memory_test.go new file mode 100644 index 0000000..2842071 --- /dev/null +++ b/agent-go/status/Memory_test.go @@ -0,0 +1,20 @@ +package status + +import ( + "fmt" + "testing" +) + +func TestGetMemoryStatus(t *testing.T) { + + memoryStatus, err := GetMemoryStatus() + if err != nil { + return + } + + fmt.Printf("Total Memory: %s\n", FormatMemorySize(memoryStatus.TotalMemory)) + fmt.Printf("Used Memory: %s\n", FormatMemorySize(memoryStatus.UsedMemory)) + fmt.Printf("Available Memory: %s\n", FormatMemorySize(memoryStatus.AvailableMemory)) + fmt.Printf("Total Virtual Memory: %s\n", FormatMemorySize(memoryStatus.TotalVirtualMemory)) + fmt.Printf("Used Virtual Memory: %s\n", FormatMemorySize(memoryStatus.UsedVirtualMemory)) +} diff --git a/agent-go/status/Network.go b/agent-go/status/Network.go new file mode 100644 index 0000000..d37e66b --- /dev/null +++ b/agent-go/status/Network.go @@ -0,0 +1,103 @@ +package status + +import ( + "fmt" + "github.com/shirou/gopsutil/v3/net" + "strings" + "time" +) + +type NetworkInfo struct { + Name string `json:"name,omitempty"` + InternalIP string `json:"internal_ip,omitempty"` + ExternalIP string `json:"external_ip,omitempty"` + Mac string `json:"mac,omitempty"` + Sent uint64 `json:"sent,omitempty"` + Recv uint64 `json:"recv,omitempty"` + SentRate string `json:"sent_rate,omitempty"` + RecvRate string `json:"recv_rate,omitempty"` +} + +func main() { + info, err := GetNetworkInfo() + if err != nil { + panic(err) + } + fmt.Printf("%+v\n", info) +} + +func GetNetworkInfo() (*NetworkInfo, error) { + interfaces, err := net.Interfaces() + if err != nil { + return nil, err + } + var mainInterface net.InterfaceStat + for _, intf := range interfaces { + if strings.HasPrefix(intf.Name, "ens") || strings.HasPrefix(intf.Name, "eth") || strings.HasPrefix(intf.Name, "eno") { + mainInterface = intf + break + } + } + var internalIP string + log.DebugF("all main interface address are %v", mainInterface.Addrs) + for _, addr := range mainInterface.Addrs { + if strings.Contains(addr.Addr, ".") { + internalIP = addr.Addr + break + } + } + counters, err := net.IOCounters(true) + if err != nil { + return nil, err + } + var sent uint64 + var recv uint64 + for _, counter := range counters { + if counter.Name == mainInterface.Name { + sent = counter.BytesSent + recv = counter.BytesRecv + break + } + } + + // 休眠3秒 + + time.Sleep(3 * time.Second) + + var sentAfter uint64 + var recvAfter uint64 + for _, counter := range counters { + if counter.Name == mainInterface.Name { + sentAfter = counter.BytesSent + recvAfter = counter.BytesRecv + break + } + } + sendRate := fmt.Sprintf(formatBytes((sentAfter - sent) / 3)) + recvRate := fmt.Sprintf(formatBytes((recvAfter - recv) / 3)) + + info := &NetworkInfo{ + Name: mainInterface.Name, + InternalIP: internalIP, + ExternalIP: "", + Mac: mainInterface.HardwareAddr, + Sent: sent, + Recv: recv, + SentRate: sendRate, + RecvRate: recvRate, + } + return info, nil +} + +func formatBytes(bytes uint64) string { + const unit = 1024 + if bytes < unit { + return fmt.Sprintf("%d B", bytes) + } + div, exp := int64(unit), 0 + for n := bytes / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp]) +} diff --git a/agent-go/status/Network_test.go b/agent-go/status/Network_test.go new file mode 100644 index 0000000..0222710 --- /dev/null +++ b/agent-go/status/Network_test.go @@ -0,0 +1,22 @@ +package status + +import ( + "encoding/json" + "fmt" + "testing" +) + +func TestGetNetworkInfo(t *testing.T) { + + networkInfo, err := GetNetworkInfo() + if err != nil { + return + } + + marshalIndent, err := json.MarshalIndent(networkInfo, "", " ") + if err != nil { + fmt.Printf("error") + } + + fmt.Println(string(marshalIndent)) +} diff --git a/agent-go/status/Status.go b/agent-go/status/Status.go new file mode 100644 index 0000000..34e9809 --- /dev/null +++ b/agent-go/status/Status.go @@ -0,0 +1,7 @@ +package status + +import ( + logger2 "agent-go/logger" +) + +var log = logger2.Log diff --git a/agent-go/status/cpu.go b/agent-go/status/cpu.go deleted file mode 100644 index b3d46f3..0000000 --- a/agent-go/status/cpu.go +++ /dev/null @@ -1,47 +0,0 @@ -package status - -import ( - "fmt" - linuxproc "github.com/c9s/goprocinfo/linux" - "time" -) - -func GetCpuMap() (map[string]uint64, error) { - statA, err := linuxproc.ReadStat("/proc/stat") - statErrMsg := "failed to stat CPU data, received error: %s" - if err != nil { - return nil, fmt.Errorf(statErrMsg, err.Error()) - } - - time.Sleep(time.Second) - - statB, err := linuxproc.ReadStat("/proc/stat") - if err != nil { - return nil, fmt.Errorf(statErrMsg, err.Error()) - } - - resultMap := make(map[string]uint64) - resultMap["all_active_percent"] = cpuStatToPercent(statA.CPUStatAll, statB.CPUStatAll) - for idx, statB := range statB.CPUStats { - statA := statA.CPUStats[idx] - resultMap[statB.Id+"_active_percent"] = cpuStatToPercent(statA, statB) - } - - return resultMap, nil -} - -func cpuStatToPercent(statA, statB linuxproc.CPUStat) uint64 { - aIdle := statA.Idle + statA.IOWait - bIdle := statB.Idle + statB.IOWait - - aNonIdle := statA.User + statA.Nice + statA.System + statA.IRQ + statA.SoftIRQ + statA.Steal - bNonIdle := statB.User + statB.Nice + statB.System + statB.IRQ + statB.SoftIRQ + statB.Steal - - aTotal := aIdle + aNonIdle - bTotal := bIdle + bNonIdle - - totalDiff := bTotal - aTotal - idleDiff := bIdle - aIdle - - return uint64((float64(totalDiff-idleDiff) / float64(totalDiff)) * 100) -} diff --git a/agent-go/tmp/executor-om-pipeline.json b/agent-go/tmp/executor-om-pipeline.json index 55c3756..3f907d6 100644 --- a/agent-go/tmp/executor-om-pipeline.json +++ b/agent-go/tmp/executor-om-pipeline.json @@ -2,7 +2,7 @@ "uuid": "2023-03-27 14:38:49", "init_time": "2023-03-27T14:38:49.8162801+08:00", "type": "EXECUTOR", - "content": "{\n \"needResultReplay\": true,\n \"durationTask,default:false\": false,\n \"type\": \"pipeline\",\n \"singleLineCommand\": null,\n \"multiLineCommand\": null,\n \"pipeLineCommand\": [[\"ls\",\"-la\"],[\"grep\", \"dev\"]],\n \"resultKey\": \"output\"\n}\n", + "content": "{\n \"needResultReplay\": true,\n \"durationTask,default:false\": false,\n \"type\": \"pipeline\",\n \"singleLineCommand\": null,\n \"multiLineCommand\": null,\n \"pipeLineCommand\": [[\"ls\",\"-la\"],[\"grep\", \"dev\"],[\"awk\",\"{print $9}\"]],\n \"resultKey\": \"output\"\n}\n", "result": "", "ac_time": "0001-01-01T00:00:00Z" } From 0a310145edc820c29a8ee4f2546b13308efa32a7 Mon Sep 17 00:00:00 2001 From: zeaslity Date: Thu, 13 Apr 2023 17:33:32 +0800 Subject: [PATCH 10/45] [agent-go] [status] add status module --- agent-go/status/Network.go | 66 ++++++++++++++++++++++++-------------- 1 file changed, 42 insertions(+), 24 deletions(-) diff --git a/agent-go/status/Network.go b/agent-go/status/Network.go index d37e66b..e4b5542 100644 --- a/agent-go/status/Network.go +++ b/agent-go/status/Network.go @@ -3,19 +3,22 @@ package status import ( "fmt" "github.com/shirou/gopsutil/v3/net" + net2 "net" "strings" "time" ) type NetworkInfo struct { - Name string `json:"name,omitempty"` - InternalIP string `json:"internal_ip,omitempty"` - ExternalIP string `json:"external_ip,omitempty"` - Mac string `json:"mac,omitempty"` - Sent uint64 `json:"sent,omitempty"` - Recv uint64 `json:"recv,omitempty"` - SentRate string `json:"sent_rate,omitempty"` - RecvRate string `json:"recv_rate,omitempty"` + Name string `json:"name,omitempty"` + InternalIPv4 []string `json:"internal_ip_v4,omitempty"` + InternalIPv6 []string `json:"internal_ip_v6,omitempty"` + ExternalIPv4 []string `json:"external_ip_v4,omitempty"` + ExternalIPv6 []string `json:"external_ip_v6,omitempty"` + Mac string `json:"mac,omitempty"` + Sent uint64 `json:"sent,omitempty"` + Recv uint64 `json:"recv,omitempty"` + SentRate string `json:"sent_rate,omitempty"` + RecvRate string `json:"recv_rate,omitempty"` } func main() { @@ -38,14 +41,13 @@ func GetNetworkInfo() (*NetworkInfo, error) { break } } - var internalIP string + var allAddrs []string log.DebugF("all main interface address are %v", mainInterface.Addrs) for _, addr := range mainInterface.Addrs { - if strings.Contains(addr.Addr, ".") { - internalIP = addr.Addr - break - } + allAddrs = append(allAddrs, addr.Addr) } + ipv4List, ipv6List := GetIPAddresses(allAddrs) + counters, err := net.IOCounters(true) if err != nil { return nil, err @@ -62,7 +64,7 @@ func GetNetworkInfo() (*NetworkInfo, error) { // 休眠3秒 - time.Sleep(3 * time.Second) + time.Sleep(10 * time.Second) var sentAfter uint64 var recvAfter uint64 @@ -73,18 +75,20 @@ func GetNetworkInfo() (*NetworkInfo, error) { break } } - sendRate := fmt.Sprintf(formatBytes((sentAfter - sent) / 3)) - recvRate := fmt.Sprintf(formatBytes((recvAfter - recv) / 3)) + sendRate := fmt.Sprintf(formatBytes((sentAfter-sent)/10) + "/s") + recvRate := fmt.Sprintf(formatBytes((recvAfter-recv)/10) + "/s") info := &NetworkInfo{ - Name: mainInterface.Name, - InternalIP: internalIP, - ExternalIP: "", - Mac: mainInterface.HardwareAddr, - Sent: sent, - Recv: recv, - SentRate: sendRate, - RecvRate: recvRate, + Name: mainInterface.Name, + InternalIPv4: ipv4List, + InternalIPv6: ipv6List, + ExternalIPv4: nil, + ExternalIPv6: nil, + Mac: mainInterface.HardwareAddr, + Sent: sent, + Recv: recv, + SentRate: sendRate, + RecvRate: recvRate, } return info, nil } @@ -101,3 +105,17 @@ func formatBytes(bytes uint64) string { } return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp]) } + +func GetIPAddresses(addresses []string) ([]string, []string) { + var ipv4 []string + var ipv6 []string + for _, addr := range addresses { + ip := net2.ParseIP(addr) + if ip.To4() != nil { + ipv4 = append(ipv4, addr) + } else if ip.To16() != nil { + ipv6 = append(ipv6, addr) + } + } + return ipv4, ipv6 +} From 3dbba79203d6155335b02cd193be354d77d6bfcc Mon Sep 17 00:00:00 2001 From: zeaslity Date: Thu, 13 Apr 2023 17:36:30 +0800 Subject: [PATCH 11/45] [agent-go] [status] add status module --- agent-go/status/Network.go | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/agent-go/status/Network.go b/agent-go/status/Network.go index e4b5542..f038016 100644 --- a/agent-go/status/Network.go +++ b/agent-go/status/Network.go @@ -21,14 +21,6 @@ type NetworkInfo struct { RecvRate string `json:"recv_rate,omitempty"` } -func main() { - info, err := GetNetworkInfo() - if err != nil { - panic(err) - } - fmt.Printf("%+v\n", info) -} - func GetNetworkInfo() (*NetworkInfo, error) { interfaces, err := net.Interfaces() if err != nil { @@ -46,7 +38,8 @@ func GetNetworkInfo() (*NetworkInfo, error) { for _, addr := range mainInterface.Addrs { allAddrs = append(allAddrs, addr.Addr) } - ipv4List, ipv6List := GetIPAddresses(allAddrs) + ipv4List, ipv6List := GetInternelIpAddrs(allAddrs) + log.DebugF("ipv4 list are => %v, ipv4 list are => %v", ipv4List, ipv6List) counters, err := net.IOCounters(true) if err != nil { @@ -106,7 +99,7 @@ func formatBytes(bytes uint64) string { return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp]) } -func GetIPAddresses(addresses []string) ([]string, []string) { +func GetInternelIpAddrs(addresses []string) ([]string, []string) { var ipv4 []string var ipv6 []string for _, addr := range addresses { From 9909593545c4c26a366966da93504fd7fbafe7fd Mon Sep 17 00:00:00 2001 From: zeaslity Date: Thu, 13 Apr 2023 17:41:12 +0800 Subject: [PATCH 12/45] [agent-go] [status] network interface analyze to cidr --- agent-go/status/Network.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/agent-go/status/Network.go b/agent-go/status/Network.go index f038016..16dccbe 100644 --- a/agent-go/status/Network.go +++ b/agent-go/status/Network.go @@ -39,7 +39,7 @@ func GetNetworkInfo() (*NetworkInfo, error) { allAddrs = append(allAddrs, addr.Addr) } ipv4List, ipv6List := GetInternelIpAddrs(allAddrs) - log.DebugF("ipv4 list are => %v, ipv4 list are => %v", ipv4List, ipv6List) + log.DebugF("ipv4 list are => %v, ipv6 list are => %v", ipv4List, ipv6List) counters, err := net.IOCounters(true) if err != nil { @@ -103,7 +103,11 @@ func GetInternelIpAddrs(addresses []string) ([]string, []string) { var ipv4 []string var ipv6 []string for _, addr := range addresses { - ip := net2.ParseIP(addr) + // it parse (0.0.0.0) not cidr + ip, _, err := net2.ParseCIDR(addr) + if err != nil { + continue + } if ip.To4() != nil { ipv4 = append(ipv4, addr) } else if ip.To16() != nil { From e50ba14c1349635bcd2765c96bfbbfd04d570a34 Mon Sep 17 00:00:00 2001 From: zeaslity Date: Fri, 14 Apr 2023 10:04:58 +0800 Subject: [PATCH 13/45] [agent-go] [status] basically accomplished the status module --- agent-go/status/Disk.go | 47 +++++++++++++++++++++++++++++++++ agent-go/status/Disk_test.go | 14 ++++++++++ agent-go/status/Memory.go | 4 +-- agent-go/status/Network.go | 18 ++++++++----- agent-go/status/Network_test.go | 2 +- agent-go/status/Status.go | 30 +++++++++++++++++++++ 6 files changed, 105 insertions(+), 10 deletions(-) create mode 100644 agent-go/status/Disk.go create mode 100644 agent-go/status/Disk_test.go diff --git a/agent-go/status/Disk.go b/agent-go/status/Disk.go new file mode 100644 index 0000000..bb70794 --- /dev/null +++ b/agent-go/status/Disk.go @@ -0,0 +1,47 @@ +package status + +import ( + "fmt" + "github.com/shirou/gopsutil/v3/disk" + "runtime" + "time" +) + +type DiskStatus struct { + Total uint64 + Used uint64 + LogicalDisk []disk.PartitionStat +} + +func GetDiskStatus() *DiskStatus { + var ds *DiskStatus + + // Get disk usage + du, _ := disk.Usage("/") + ds.Total = du.Total + ds.Used = du.Used + + // Get logical disk info for Linux systems + if runtime.GOOS == "linux" { + ld, _ := disk.Partitions(true) + ds.LogicalDisk = ld + } + + return ds +} + +func CalculateDiskIO() { + + // Get initial disk IO counters + counters1, _ := disk.IOCounters() + time.Sleep(time.Second) + // Get disk IO counters after 1 second + counters2, _ := disk.IOCounters() + + for device, counter1 := range counters1 { + counter2 := counters2[device] + readSpeed := float64(counter2.ReadBytes-counter1.ReadBytes) / 1024 + writeSpeed := float64(counter2.WriteBytes-counter1.WriteBytes) / 1024 + fmt.Printf("%v: read %vKB/s, write %vKB/s\n", device, readSpeed, writeSpeed) + } +} diff --git a/agent-go/status/Disk_test.go b/agent-go/status/Disk_test.go new file mode 100644 index 0000000..71d4543 --- /dev/null +++ b/agent-go/status/Disk_test.go @@ -0,0 +1,14 @@ +package status + +import ( + "fmt" + "testing" +) + +func TestGetDiskStatus(t *testing.T) { + + ds := GetDiskStatus() + fmt.Printf("Total: %v, Used: %v\n", ds.Total, ds.Used) + fmt.Printf("Logical Disks: %v\n", ds.LogicalDisk) + +} diff --git a/agent-go/status/Memory.go b/agent-go/status/Memory.go index ef79c1b..ad2c268 100644 --- a/agent-go/status/Memory.go +++ b/agent-go/status/Memory.go @@ -13,8 +13,8 @@ type MemoryStatus struct { UsedVirtualMemory uint64 } -func GetMemoryStatus() (MemoryStatus, error) { - var memoryStatus MemoryStatus +func GetMemoryStatus() (*MemoryStatus, error) { + var memoryStatus *MemoryStatus virtualMemoryStat, err := mem.VirtualMemory() if err != nil { diff --git a/agent-go/status/Network.go b/agent-go/status/Network.go index 16dccbe..69186d4 100644 --- a/agent-go/status/Network.go +++ b/agent-go/status/Network.go @@ -8,7 +8,7 @@ import ( "time" ) -type NetworkInfo struct { +type NetworkStatus struct { Name string `json:"name,omitempty"` InternalIPv4 []string `json:"internal_ip_v4,omitempty"` InternalIPv6 []string `json:"internal_ip_v6,omitempty"` @@ -21,7 +21,7 @@ type NetworkInfo struct { RecvRate string `json:"recv_rate,omitempty"` } -func GetNetworkInfo() (*NetworkInfo, error) { +func GetNetworkStatus() (*NetworkStatus, error) { interfaces, err := net.Interfaces() if err != nil { return nil, err @@ -57,21 +57,25 @@ func GetNetworkInfo() (*NetworkInfo, error) { // 休眠3秒 - time.Sleep(10 * time.Second) + time.Sleep(3 * time.Second) var sentAfter uint64 var recvAfter uint64 - for _, counter := range counters { + countersAfter, err := net.IOCounters(true) + if err != nil { + return nil, err + } + for _, counter := range countersAfter { if counter.Name == mainInterface.Name { sentAfter = counter.BytesSent recvAfter = counter.BytesRecv break } } - sendRate := fmt.Sprintf(formatBytes((sentAfter-sent)/10) + "/s") - recvRate := fmt.Sprintf(formatBytes((recvAfter-recv)/10) + "/s") + sendRate := fmt.Sprintf(formatBytes((sentAfter-sent)/3) + "/s") + recvRate := fmt.Sprintf(formatBytes((recvAfter-recv)/3) + "/s") - info := &NetworkInfo{ + info := &NetworkStatus{ Name: mainInterface.Name, InternalIPv4: ipv4List, InternalIPv6: ipv6List, diff --git a/agent-go/status/Network_test.go b/agent-go/status/Network_test.go index 0222710..b3d3ffd 100644 --- a/agent-go/status/Network_test.go +++ b/agent-go/status/Network_test.go @@ -8,7 +8,7 @@ import ( func TestGetNetworkInfo(t *testing.T) { - networkInfo, err := GetNetworkInfo() + networkInfo, err := GetNetworkStatus() if err != nil { return } diff --git a/agent-go/status/Status.go b/agent-go/status/Status.go index 34e9809..5e067a6 100644 --- a/agent-go/status/Status.go +++ b/agent-go/status/Status.go @@ -5,3 +5,33 @@ import ( ) var log = logger2.Log + +type AgentStatus struct { + CPUStatus *CPUStatus + MemoryStatus *MemoryStatus + NetworkStatus *NetworkStatus + DiskStatus *DiskStatus +} + +func Ping() string { + return "PONG" +} + +func ReportAppStatus() *AgentStatus { + + cpuStatus, cpuerr := GetCPUStatus() + memoryStatus, memerr := GetMemoryStatus() + networkStatus, neterr := GetNetworkStatus() + if cpuerr != nil || memerr != nil || neterr != nil { + log.ErrorF("获取Agent的状态出现错误! 请检查") + return nil + } + diskStatus := GetDiskStatus() + return &AgentStatus{ + CPUStatus: cpuStatus, + MemoryStatus: memoryStatus, + NetworkStatus: networkStatus, + DiskStatus: diskStatus, + } + +} From 7eee7cac634708622eb2e1a5e0ed10a3cba77083 Mon Sep 17 00:00:00 2001 From: zeaslity Date: Fri, 14 Apr 2023 10:06:15 +0800 Subject: [PATCH 14/45] [agent-go] [status] basically accomplished the status module - 1 --- agent-go/status/Status_test.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 agent-go/status/Status_test.go diff --git a/agent-go/status/Status_test.go b/agent-go/status/Status_test.go new file mode 100644 index 0000000..b041447 --- /dev/null +++ b/agent-go/status/Status_test.go @@ -0,0 +1,19 @@ +package status + +import ( + "encoding/json" + "fmt" + "testing" +) + +func TestReportAppStatus(t *testing.T) { + + agentStatus := ReportAppStatus() + + marshal, err := json.Marshal(agentStatus) + if err != nil { + return + } + + fmt.Printf(string(marshal)) +} From 6b9487b9b2a42d1709d89d3b17dd21aa6d64e1ae Mon Sep 17 00:00:00 2001 From: zeaslity Date: Wed, 19 Apr 2023 11:25:55 +0800 Subject: [PATCH 15/45] [agent-go] [executor] real time executor --- agent-go/executor/RealTimeExecutor.go | 40 ++++++++++++++++++++++ agent-go/executor/RealTimeExecutor_test.go | 13 +++++++ agent-go/status/Status.go | 14 ++++++++ agent-go/status/Status_test.go | 7 ++++ 4 files changed, 74 insertions(+) create mode 100644 agent-go/executor/RealTimeExecutor.go create mode 100644 agent-go/executor/RealTimeExecutor_test.go diff --git a/agent-go/executor/RealTimeExecutor.go b/agent-go/executor/RealTimeExecutor.go new file mode 100644 index 0000000..b3c033b --- /dev/null +++ b/agent-go/executor/RealTimeExecutor.go @@ -0,0 +1,40 @@ +package executor + +import ( + "bufio" + "fmt" + "io" + "os/exec" +) + +func ReadTimeOutput(singleLineCommand []string) { + + cmd := exec.Command(singleLineCommand[0], singleLineCommand[1:]...) + stdout, err := cmd.StdoutPipe() + if err != nil { + panic(err) + } + stderr, err := cmd.StderrPipe() + if err != nil { + panic(err) + } + + if err := cmd.Start(); err != nil { + panic(err) + } + + go copyOutput(stdout) + go copyOutput(stderr) + + if err := cmd.Wait(); err != nil { + panic(err) + } + +} + +func copyOutput(r io.Reader) { + scanner := bufio.NewScanner(r) + for scanner.Scan() { + fmt.Println(scanner.Text()) + } +} diff --git a/agent-go/executor/RealTimeExecutor_test.go b/agent-go/executor/RealTimeExecutor_test.go new file mode 100644 index 0000000..ae0ea65 --- /dev/null +++ b/agent-go/executor/RealTimeExecutor_test.go @@ -0,0 +1,13 @@ +package executor + +import "testing" + +func TestReadTimeOutput(t *testing.T) { + strings := []string{ + "/bin/bash", + "/root/simple.sh", + } + + ReadTimeOutput(strings) + +} diff --git a/agent-go/status/Status.go b/agent-go/status/Status.go index 5e067a6..5e4e666 100644 --- a/agent-go/status/Status.go +++ b/agent-go/status/Status.go @@ -2,6 +2,8 @@ package status import ( logger2 "agent-go/logger" + "fmt" + "time" ) var log = logger2.Log @@ -13,6 +15,18 @@ type AgentStatus struct { DiskStatus *DiskStatus } +func ConvertToFormat(eventData float64) string { + duration := time.Duration(eventData) * time.Second + + fmt.Println(duration) + + hours := int(duration.Hours()) + minutes := int(duration.Minutes()) % 60 + seconds := int(duration.Seconds()) % 60 + milliseconds := duration.Milliseconds() % 1000 + return fmt.Sprintf("%02d:%02d:%02d,%03d", hours, minutes, seconds, milliseconds) +} + func Ping() string { return "PONG" } diff --git a/agent-go/status/Status_test.go b/agent-go/status/Status_test.go index b041447..2ba647d 100644 --- a/agent-go/status/Status_test.go +++ b/agent-go/status/Status_test.go @@ -6,6 +6,13 @@ import ( "testing" ) +func TestConvertToFormat(t *testing.T) { + + convertToFormat := ConvertToFormat(99.92) + + fmt.Println(convertToFormat) +} + func TestReportAppStatus(t *testing.T) { agentStatus := ReportAppStatus() From c5143d2d591c44dc344f057184cfd178bff1bb4a Mon Sep 17 00:00:00 2001 From: zeaslity Date: Sun, 23 Apr 2023 14:15:12 +0800 Subject: [PATCH 16/45] [agent-go] [status] - start to unit --- agent-go/executor/RealTimeExecutor.go | 2 +- agent-go/status/Disk.go | 3 +- agent-go/status/Disk_test.go | 8 ++ agent-go/status/Memory.go | 2 +- agent-go/status/Memory_test.go | 8 ++ .../java/io/wdd/rpc/status/beans/CPUInfo.java | 81 +++++++++++++++++++ .../io/wdd/rpc/status/beans/NetworkInfo.java | 33 ++++++++ .../java/io/wdd/rpc/status/beans/Status.java | 5 ++ 8 files changed, 139 insertions(+), 3 deletions(-) create mode 100644 server/src/main/java/io/wdd/rpc/status/beans/CPUInfo.java create mode 100644 server/src/main/java/io/wdd/rpc/status/beans/NetworkInfo.java create mode 100644 server/src/main/java/io/wdd/rpc/status/beans/Status.java diff --git a/agent-go/executor/RealTimeExecutor.go b/agent-go/executor/RealTimeExecutor.go index b3c033b..ff16f8b 100644 --- a/agent-go/executor/RealTimeExecutor.go +++ b/agent-go/executor/RealTimeExecutor.go @@ -7,7 +7,7 @@ import ( "os/exec" ) -func ReadTimeOutput(singleLineCommand []string) { +func ReadTimeCommandExecutor(singleLineCommand []string) { cmd := exec.Command(singleLineCommand[0], singleLineCommand[1:]...) stdout, err := cmd.StdoutPipe() diff --git a/agent-go/status/Disk.go b/agent-go/status/Disk.go index bb70794..09d93ef 100644 --- a/agent-go/status/Disk.go +++ b/agent-go/status/Disk.go @@ -14,7 +14,8 @@ type DiskStatus struct { } func GetDiskStatus() *DiskStatus { - var ds *DiskStatus + + ds := &DiskStatus{} // Get disk usage du, _ := disk.Usage("/") diff --git a/agent-go/status/Disk_test.go b/agent-go/status/Disk_test.go index 71d4543..dbf191a 100644 --- a/agent-go/status/Disk_test.go +++ b/agent-go/status/Disk_test.go @@ -1,6 +1,7 @@ package status import ( + "encoding/json" "fmt" "testing" ) @@ -11,4 +12,11 @@ func TestGetDiskStatus(t *testing.T) { fmt.Printf("Total: %v, Used: %v\n", ds.Total, ds.Used) fmt.Printf("Logical Disks: %v\n", ds.LogicalDisk) + marshalIndent, err := json.MarshalIndent(ds, "", " ") + if err != nil { + fmt.Printf("error") + } + + fmt.Println(string(marshalIndent)) + } diff --git a/agent-go/status/Memory.go b/agent-go/status/Memory.go index ad2c268..4f65d65 100644 --- a/agent-go/status/Memory.go +++ b/agent-go/status/Memory.go @@ -14,7 +14,7 @@ type MemoryStatus struct { } func GetMemoryStatus() (*MemoryStatus, error) { - var memoryStatus *MemoryStatus + memoryStatus := &MemoryStatus{} virtualMemoryStat, err := mem.VirtualMemory() if err != nil { diff --git a/agent-go/status/Memory_test.go b/agent-go/status/Memory_test.go index 2842071..baa2a61 100644 --- a/agent-go/status/Memory_test.go +++ b/agent-go/status/Memory_test.go @@ -1,6 +1,7 @@ package status import ( + "encoding/json" "fmt" "testing" ) @@ -17,4 +18,11 @@ func TestGetMemoryStatus(t *testing.T) { fmt.Printf("Available Memory: %s\n", FormatMemorySize(memoryStatus.AvailableMemory)) fmt.Printf("Total Virtual Memory: %s\n", FormatMemorySize(memoryStatus.TotalVirtualMemory)) fmt.Printf("Used Virtual Memory: %s\n", FormatMemorySize(memoryStatus.UsedVirtualMemory)) + + marshalIndent, err := json.MarshalIndent(memoryStatus, "", " ") + if err != nil { + fmt.Printf("error") + } + + fmt.Println(string(marshalIndent)) } diff --git a/server/src/main/java/io/wdd/rpc/status/beans/CPUInfo.java b/server/src/main/java/io/wdd/rpc/status/beans/CPUInfo.java new file mode 100644 index 0000000..018bd04 --- /dev/null +++ b/server/src/main/java/io/wdd/rpc/status/beans/CPUInfo.java @@ -0,0 +1,81 @@ +package io.wdd.rpc.status.beans; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; +import lombok.experimental.SuperBuilder; + +import java.util.List; + +@Data +@AllArgsConstructor +@NoArgsConstructor +@SuperBuilder(toBuilder = true) +public class CPUInfo { + + + @JsonProperty("NumCores") + private Integer numCores; + @JsonProperty("CPUInfo") + private List cPUInfo; + @JsonProperty("CPUPercent") + private Double cPUPercent; + @JsonProperty("CPULoads") + private CPULoadsDTO cPULoads; + @JsonProperty("SystemLoads") + private SystemLoadsDTO systemLoads; + + @NoArgsConstructor + @Data + public static class CPULoadsDTO { + @JsonProperty("load1") + private Integer load1; + @JsonProperty("load5") + private Integer load5; + @JsonProperty("load15") + private Integer load15; + } + + @NoArgsConstructor + @Data + public static class SystemLoadsDTO { + @JsonProperty("load1") + private Integer load1; + @JsonProperty("load5") + private Integer load5; + @JsonProperty("load15") + private Integer load15; + } + + @NoArgsConstructor + @Data + public static class CPUInfoDTO { + @JsonProperty("cpu") + private Integer cpu; + @JsonProperty("vendorId") + private String vendorId; + @JsonProperty("family") + private String family; + @JsonProperty("model") + private String model; + @JsonProperty("stepping") + private Integer stepping; + @JsonProperty("physicalId") + private String physicalId; + @JsonProperty("coreId") + private String coreId; + @JsonProperty("cores") + private Integer cores; + @JsonProperty("modelName") + private String modelName; + @JsonProperty("mhz") + private Integer mhz; + @JsonProperty("cacheSize") + private Integer cacheSize; + @JsonProperty("flags") + private List flags; + @JsonProperty("microcode") + private String microcode; + } +} diff --git a/server/src/main/java/io/wdd/rpc/status/beans/NetworkInfo.java b/server/src/main/java/io/wdd/rpc/status/beans/NetworkInfo.java new file mode 100644 index 0000000..9bee6cd --- /dev/null +++ b/server/src/main/java/io/wdd/rpc/status/beans/NetworkInfo.java @@ -0,0 +1,33 @@ +package io.wdd.rpc.status.beans; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; +import lombok.experimental.SuperBuilder; + +import java.util.List; + +@NoArgsConstructor +@Data +@AllArgsConstructor +@SuperBuilder(toBuilder = true) +public class NetworkInfo { + + @JsonProperty("name") + private String name; + @JsonProperty("internal_ip_v4") + private List internalIpV4; + @JsonProperty("internal_ip_v6") + private List internalIpV6; + @JsonProperty("mac") + private String mac; + @JsonProperty("sent") + private Long sent; + @JsonProperty("recv") + private Integer recv; + @JsonProperty("sent_rate") + private String sentRate; + @JsonProperty("recv_rate") + private String recvRate; +} diff --git a/server/src/main/java/io/wdd/rpc/status/beans/Status.java b/server/src/main/java/io/wdd/rpc/status/beans/Status.java new file mode 100644 index 0000000..fd2c5d5 --- /dev/null +++ b/server/src/main/java/io/wdd/rpc/status/beans/Status.java @@ -0,0 +1,5 @@ +package io.wdd.rpc.status.beans; + +public class AgentStatus { + +} From 9a676b50874373ff6636817a88a96da7d555d6a4 Mon Sep 17 00:00:00 2001 From: zeaslity Date: Sun, 23 Apr 2023 16:37:07 +0800 Subject: [PATCH 17/45] [agent-go] [status] - test for connect --- agent-go/rabbitmq/OctopusMessage.go | 24 ++++++++++++-- agent-go/status/Status.go | 13 ++++++++ agent-go/tmp/status-agentStatus.json | 0 agent-go/tmp/status-ping.json | 8 +++++ .../io/wdd/rpc/status/beans/AgentStatus.java | 23 +++++++++++++ .../io/wdd/rpc/status/beans/DiskInfo.java | 32 +++++++++++++++++++ .../io/wdd/rpc/status/beans/MemoryInfo.java | 21 ++++++++++++ .../java/io/wdd/rpc/status/beans/Status.java | 5 --- 8 files changed, 119 insertions(+), 7 deletions(-) create mode 100644 agent-go/tmp/status-agentStatus.json create mode 100644 agent-go/tmp/status-ping.json create mode 100644 server/src/main/java/io/wdd/rpc/status/beans/AgentStatus.java create mode 100644 server/src/main/java/io/wdd/rpc/status/beans/DiskInfo.java create mode 100644 server/src/main/java/io/wdd/rpc/status/beans/MemoryInfo.java delete mode 100644 server/src/main/java/io/wdd/rpc/status/beans/Status.java diff --git a/agent-go/rabbitmq/OctopusMessage.go b/agent-go/rabbitmq/OctopusMessage.go index 2c684ad..869bf8a 100644 --- a/agent-go/rabbitmq/OctopusMessage.go +++ b/agent-go/rabbitmq/OctopusMessage.go @@ -3,9 +3,11 @@ package rabbitmq import ( "agent-go/executor" "agent-go/g" + "agent-go/status" "agent-go/utils" "encoding/json" "fmt" + "strings" "time" ) @@ -105,10 +107,28 @@ func executorOMHandler(octopusMessage *OctopusMessage) { } func statusOMHandler(octopusMessage *OctopusMessage) { - log.Info("接收到查询Agent状态的请求,假装已经处理") - //statusMessageString := octopusMessage.Content.(string) + statusMsgString := octopusMessage.Content.(string) + var statusMessage *status.StatusMessage + err := json.Unmarshal([]byte(statusMsgString), &statusMessage) + if err != nil { + log.Error(fmt.Sprintf("status message convert to json is wrong! msg is => %s", statusMsgString)) + return + } + var statusRes string + if strings.HasPrefix(statusMessage.Type, "p") { + // ping info + statusRes = status.Ping() + } else { + // status info + agentStatusString, _ := json.Marshal(status.ReportAppStatus()) + statusRes = string(agentStatusString) + } + + // 返回消息 + + log.InfoF("接收到查询Agent状态的请求,结果为 => %s", statusRes) } func blackHoleOMHandler(octopusMessage *OctopusMessage) { diff --git a/agent-go/status/Status.go b/agent-go/status/Status.go index 5e4e666..63d65db 100644 --- a/agent-go/status/Status.go +++ b/agent-go/status/Status.go @@ -8,6 +8,19 @@ import ( var log = logger2.Log +type StatusMessage struct { + /** + * which kind of status should be return + * metric => short time message + * all => all agent status message + * healthy => check for healthy + * */ + Type string `json:"type,omitempty"` + AgentTopicName string `json:"agent_topic_name,omitempty"` + MetricRepeatCount int `json:"metric_repeat_count,omitempty"` + metricRepeatPinch int `json:"metric_repeat_pinch,omitempty"` +} + type AgentStatus struct { CPUStatus *CPUStatus MemoryStatus *MemoryStatus diff --git a/agent-go/tmp/status-agentStatus.json b/agent-go/tmp/status-agentStatus.json new file mode 100644 index 0000000..e69de29 diff --git a/agent-go/tmp/status-ping.json b/agent-go/tmp/status-ping.json new file mode 100644 index 0000000..3046aa4 --- /dev/null +++ b/agent-go/tmp/status-ping.json @@ -0,0 +1,8 @@ +{ + "uuid": "2023-03-27 14:38:49", + "init_time": "2023-03-27T14:38:49.8162801+08:00", + "type": "PING", + "content": "ping", + "result": "", + "ac_time": "0001-01-01T00:00:00Z" +} diff --git a/server/src/main/java/io/wdd/rpc/status/beans/AgentStatus.java b/server/src/main/java/io/wdd/rpc/status/beans/AgentStatus.java new file mode 100644 index 0000000..4ab2767 --- /dev/null +++ b/server/src/main/java/io/wdd/rpc/status/beans/AgentStatus.java @@ -0,0 +1,23 @@ +package io.wdd.rpc.status.beans; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.Data; +import lombok.NoArgsConstructor; + +@NoArgsConstructor +@Data +public class AgentStatus { + + @JsonProperty("CPUStatus") + private CPUInfo cPUStatus; + + @JsonProperty("MemoryStatus") + private MemoryInfo memoryStatus; + + @JsonProperty("NetworkStatus") + private NetworkInfo networkStatus; + + @JsonProperty("DiskStatus") + private DiskInfo diskStatus; + +} diff --git a/server/src/main/java/io/wdd/rpc/status/beans/DiskInfo.java b/server/src/main/java/io/wdd/rpc/status/beans/DiskInfo.java new file mode 100644 index 0000000..7b8c7de --- /dev/null +++ b/server/src/main/java/io/wdd/rpc/status/beans/DiskInfo.java @@ -0,0 +1,32 @@ +package io.wdd.rpc.status.beans; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.Data; +import lombok.NoArgsConstructor; + +import java.util.List; + +@NoArgsConstructor +@Data +public class DiskInfo { + + @JsonProperty("Total") + private Long total; + @JsonProperty("Used") + private Long used; + @JsonProperty("LogicalDisk") + private List logicalDisk; + + @NoArgsConstructor + @Data + public static class LogicalDiskDTO { + @JsonProperty("device") + private String device; + @JsonProperty("mountpoint") + private String mountpoint; + @JsonProperty("fstype") + private String fstype; + @JsonProperty("opts") + private List opts; + } +} diff --git a/server/src/main/java/io/wdd/rpc/status/beans/MemoryInfo.java b/server/src/main/java/io/wdd/rpc/status/beans/MemoryInfo.java new file mode 100644 index 0000000..15971af --- /dev/null +++ b/server/src/main/java/io/wdd/rpc/status/beans/MemoryInfo.java @@ -0,0 +1,21 @@ +package io.wdd.rpc.status.beans; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.Data; +import lombok.NoArgsConstructor; + +@NoArgsConstructor +@Data +public class MemoryInfo { + + @JsonProperty("TotalMemory") + private Long totalMemory; + @JsonProperty("UsedMemory") + private Long usedMemory; + @JsonProperty("AvailableMemory") + private Long availableMemory; + @JsonProperty("TotalVirtualMemory") + private Long totalVirtualMemory; + @JsonProperty("UsedVirtualMemory") + private Long usedVirtualMemory; +} diff --git a/server/src/main/java/io/wdd/rpc/status/beans/Status.java b/server/src/main/java/io/wdd/rpc/status/beans/Status.java deleted file mode 100644 index fd2c5d5..0000000 --- a/server/src/main/java/io/wdd/rpc/status/beans/Status.java +++ /dev/null @@ -1,5 +0,0 @@ -package io.wdd.rpc.status.beans; - -public class AgentStatus { - -} From 42b7b6e511b3c6d559dbbd8acc5534deac8459f1 Mon Sep 17 00:00:00 2001 From: zeaslity Date: Tue, 25 Apr 2023 11:31:15 +0800 Subject: [PATCH 18/45] [agent-go] [status] - update code --- agent-go/rabbitmq/OctopusMessage.go | 3 ++- agent-go/rabbitmq/RabbitMsgQueue.go | 4 ++-- agent-go/tmp/status-agentStatus.json | 8 ++++++++ agent-go/tmp/status-ping.json | 4 ++-- server/src/main/resources/bootstrap.yml | 12 ++++++------ 5 files changed, 20 insertions(+), 11 deletions(-) diff --git a/agent-go/rabbitmq/OctopusMessage.go b/agent-go/rabbitmq/OctopusMessage.go index 869bf8a..3553445 100644 --- a/agent-go/rabbitmq/OctopusMessage.go +++ b/agent-go/rabbitmq/OctopusMessage.go @@ -109,6 +109,7 @@ func executorOMHandler(octopusMessage *OctopusMessage) { func statusOMHandler(octopusMessage *OctopusMessage) { statusMsgString := octopusMessage.Content.(string) + var statusMessage *status.StatusMessage err := json.Unmarshal([]byte(statusMsgString), &statusMessage) if err != nil { @@ -132,5 +133,5 @@ func statusOMHandler(octopusMessage *OctopusMessage) { } func blackHoleOMHandler(octopusMessage *OctopusMessage) { - log.Error(fmt.Sprintf("octopusMessage type wrong! msg is => %v", octopusMessage)) + log.Error(fmt.Sprintf("[BLACK HOLE] octopusMessage type wrong! msg is => %v", octopusMessage)) } diff --git a/agent-go/rabbitmq/RabbitMsgQueue.go b/agent-go/rabbitmq/RabbitMsgQueue.go index d89111e..0193f84 100644 --- a/agent-go/rabbitmq/RabbitMsgQueue.go +++ b/agent-go/rabbitmq/RabbitMsgQueue.go @@ -90,7 +90,7 @@ func (r *RabbitQueue) Connect() { if err = ch.ExchangeDeclare( r.RabbitProp.ExchangeName, // name of the exchange r.RabbitProp.ExchangeType, // type of the exchange - false, // durable + true, // durable false, // delete when complete false, // internal false, // noWait @@ -101,7 +101,7 @@ func (r *RabbitQueue) Connect() { _, err = ch.QueueDeclare( r.RabbitProp.QueueName, // name of the queue - false, // durable + true, // durable false, // delete when unused false, // exclusive false, // noWait diff --git a/agent-go/tmp/status-agentStatus.json b/agent-go/tmp/status-agentStatus.json index e69de29..514957e 100644 --- a/agent-go/tmp/status-agentStatus.json +++ b/agent-go/tmp/status-agentStatus.json @@ -0,0 +1,8 @@ +{ + "uuid": "2023-03-27 14:38:49", + "init_time": "2023-03-27T14:38:49.8162801+08:00", + "type": "STATUS", + "content": "{\n\"type\": \"status\",\n\"agent_topic_name\": \"exampleAgentTopicName\",\n\"metric_repeat_count\": 0,\n\"metric_repeat_pinch\": 0\n}", + "result": "", + "ac_time": "0001-01-01T00:00:00Z" +} diff --git a/agent-go/tmp/status-ping.json b/agent-go/tmp/status-ping.json index 3046aa4..c821d60 100644 --- a/agent-go/tmp/status-ping.json +++ b/agent-go/tmp/status-ping.json @@ -1,8 +1,8 @@ { "uuid": "2023-03-27 14:38:49", "init_time": "2023-03-27T14:38:49.8162801+08:00", - "type": "PING", - "content": "ping", + "type": "STATUS", + "content": "{\n\"type\": \"ping\",\n\"agent_topic_name\": \"exampleAgentTopicName\",\n\"metric_repeat_count\": 0,\n\"metric_repeat_pinch\": 0\n}", "result": "", "ac_time": "0001-01-01T00:00:00Z" } diff --git a/server/src/main/resources/bootstrap.yml b/server/src/main/resources/bootstrap.yml index 47a0a39..c98f222 100644 --- a/server/src/main/resources/bootstrap.yml +++ b/server/src/main/resources/bootstrap.yml @@ -2,22 +2,22 @@ spring: application: name: octopus-server profiles: - active: k3s + active: local cloud: nacos: config: - group: k3s + group: local config-retry-time: 3000 file-extension: yaml max-retry: 3 - # server-addr: 43.154.83.213:21060 - server-addr: 150.230.198.103:21060 + server-addr: 42.192.52.227:21060 + # server-addr: 150.230.198.103:21060 # server-addr: https://nacos.107421.xyz:443 timeout: 5000 config-long-poll-timeout: 5000 extension-configs: - - group: k3s - data-id: common-k3s.yaml + - group: local + data-id: common-local.yaml server: port: 9999 \ No newline at end of file From 2ea1d7cd4d1425582710cc2d6e0217ccbbd5fb48 Mon Sep 17 00:00:00 2001 From: zeaslity Date: Tue, 13 Jun 2023 11:16:50 +0800 Subject: [PATCH 19/45] [agent-go] [status] - update code --- .run/Agent-Dev-LapPro-Ubuntu.run.xml | 41 ------------- .run/Agent-dev-1.run.xml | 36 ------------ .run/Agent-dev-oracle-s5.run.xml | 37 ------------ .run/Server-k3s.run.xml | 13 ----- agent-go/executor/RealTimeExecutor_test.go | 2 +- agent-go/main.go | 9 --- .../wdd/source/octopus/simple-middleware.yaml | 58 +++++++++++++++++++ 7 files changed, 59 insertions(+), 137 deletions(-) delete mode 100644 .run/Agent-Dev-LapPro-Ubuntu.run.xml delete mode 100644 .run/Agent-dev-1.run.xml delete mode 100644 .run/Agent-dev-oracle-s5.run.xml delete mode 100644 .run/Server-k3s.run.xml create mode 100644 source/src/main/java/io/wdd/source/octopus/simple-middleware.yaml diff --git a/.run/Agent-Dev-LapPro-Ubuntu.run.xml b/.run/Agent-Dev-LapPro-Ubuntu.run.xml deleted file mode 100644 index cc4b0c2..0000000 --- a/.run/Agent-Dev-LapPro-Ubuntu.run.xml +++ /dev/null @@ -1,41 +0,0 @@ - - - - \ No newline at end of file diff --git a/.run/Agent-dev-1.run.xml b/.run/Agent-dev-1.run.xml deleted file mode 100644 index cb65223..0000000 --- a/.run/Agent-dev-1.run.xml +++ /dev/null @@ -1,36 +0,0 @@ - - - - \ No newline at end of file diff --git a/.run/Agent-dev-oracle-s5.run.xml b/.run/Agent-dev-oracle-s5.run.xml deleted file mode 100644 index 98bef8c..0000000 --- a/.run/Agent-dev-oracle-s5.run.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - - \ No newline at end of file diff --git a/.run/Server-k3s.run.xml b/.run/Server-k3s.run.xml deleted file mode 100644 index e47d4e0..0000000 --- a/.run/Server-k3s.run.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - \ No newline at end of file diff --git a/agent-go/executor/RealTimeExecutor_test.go b/agent-go/executor/RealTimeExecutor_test.go index ae0ea65..715fbf6 100644 --- a/agent-go/executor/RealTimeExecutor_test.go +++ b/agent-go/executor/RealTimeExecutor_test.go @@ -8,6 +8,6 @@ func TestReadTimeOutput(t *testing.T) { "/root/simple.sh", } - ReadTimeOutput(strings) + ReadTimeCommandExecutor(strings) } diff --git a/agent-go/main.go b/agent-go/main.go index 30ecb80..8ed778d 100644 --- a/agent-go/main.go +++ b/agent-go/main.go @@ -4,7 +4,6 @@ import ( "agent-go/g" logger2 "agent-go/logger" "agent-go/register" - "agent-go/status" "flag" "fmt" ) @@ -13,14 +12,6 @@ var log = logger2.Log func main() { - // 测试调用CPU状态 - getCpuMap, err := status.GetCpuMap() - if err != nil { - log.ErrorF("error is %v", err) - } - - log.InfoF("cpu status is %v", getCpuMap) - // 解析命令行参数 var version string flag.StringVar(&version, "version", "", "config file version") diff --git a/source/src/main/java/io/wdd/source/octopus/simple-middleware.yaml b/source/src/main/java/io/wdd/source/octopus/simple-middleware.yaml new file mode 100644 index 0000000..8934227 --- /dev/null +++ b/source/src/main/java/io/wdd/source/octopus/simple-middleware.yaml @@ -0,0 +1,58 @@ +version: '2.1' + +services: + mysql: + # https://hub.docker.com/r/bitnami/mysql + image: bitnami/mysql:8.0.32-debian-11-r12 + ports: + - '21306:3306' + volumes: + #- '/octopus-server/mysql/data:/bitnami/mysql/data' + - 'mysql_data:/bitnami/mysql/data' + #- '/octopus-server/mysql/nacos-2.2.0-initdb.sql:/docker-entrypoint-initdb.d/nacos-2.2.0-initdb.sql' + # - '/octopus-server/mysql/wdd-server.sql:/docker-entrypoint-initdb.d/wdd-server.sql' + networks: + - app-tier + environment: + - MYSQL_ROOT_USER=boge + - MYSQL_ROOT_PASSWORD=boge8tingH + - MYSQL_CHARACTER_SET=utf8mb4 + - MYSQL_COLLATE:utf8_general_ci + - MYSQL_AUTHENTICATION_PLUGIN=mysql_native_password + healthcheck: + test: [ 'CMD', '/opt/bitnami/scripts/mysql/healthcheck.sh' ] + interval: 15s + timeout: 5s + retries: 6 + rabbitmq: + # https://hub.docker.com/r/bitnami/rabbitmq + image: docker.io/bitnami/rabbitmq:3.11.10-debian-11-r0 + ports: + - '4369' + - '5551' + - '5552' + - '20672:5672' + - '25672' + - '20678:15672' + environment: + - RABBITMQ_VHOST=/ + - RABBITMQ_VHOSTS=/dev + - RABBITMQ_USERNAME=boge + - RABBITMQ_PASSWORD=boge8tingH + - RABBITMQ_SECURE_PASSWORD=no + - RABBITMQ_LOAD_DEFINITIONS=no + - RABBITMQ_NODE_PORT_NUMBER=5672 + - RABBITMQ_NODE_SSL_PORT_NUMBER=5671 + - RABBITMQ_MANAGEMENT_PORT_NUMBER=15672 + volumes: + - 'rabbitmq_data:/bitnami/rabbitmq/mnesia' + +volumes: + mysql_data: + driver: local + rabbitmq_data: + driver: local + +networks: + app-tier: + driver: bridge \ No newline at end of file From e30dafc26f4b3fa77df8c99494f7f5a33196caa9 Mon Sep 17 00:00:00 2001 From: zeaslity Date: Tue, 13 Jun 2023 14:45:57 +0800 Subject: [PATCH 20/45] [agent-go] [status] - update code --- agent-go/tmp/rapid-api.sh | 1 + 1 file changed, 1 insertion(+) create mode 100644 agent-go/tmp/rapid-api.sh diff --git a/agent-go/tmp/rapid-api.sh b/agent-go/tmp/rapid-api.sh new file mode 100644 index 0000000..dd30938 --- /dev/null +++ b/agent-go/tmp/rapid-api.sh @@ -0,0 +1 @@ +92a968c0d5msh36a70a2da667c96p1bcc99jsnae97d91732f3 \ No newline at end of file From 6d569b2d3d8390dc56c72d32c3365b216dc79d7b Mon Sep 17 00:00:00 2001 From: zeaslity Date: Tue, 13 Jun 2023 14:46:07 +0800 Subject: [PATCH 21/45] [agent-go] [status] - update code --- agent-go/tmp/rapid-api.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent-go/tmp/rapid-api.sh b/agent-go/tmp/rapid-api.sh index dd30938..ce9452c 100644 --- a/agent-go/tmp/rapid-api.sh +++ b/agent-go/tmp/rapid-api.sh @@ -1 +1 @@ -92a968c0d5msh36a70a2da667c96p1bcc99jsnae97d91732f3 \ No newline at end of file +92a968c0d5msh36a70a2da667c96p1bcc99jsnae97d91732f3 From 282e70d94273916328f5c62e671e1de2d46db5cb Mon Sep 17 00:00:00 2001 From: zeaslity Date: Wed, 14 Jun 2023 11:21:31 +0800 Subject: [PATCH 22/45] [Octopus] modify project to SpringBoot version --- .github/workflows/build-push-docker.yml | 2 +- agent-go/executor/function/BaseFunction.go | 136 +++++++++++++++ agent-go/g/global.go | 1 + agent-go/rabbitmq/OctopusMessage.go | 10 ++ common/pom.xml | 4 + .../java/io/wdd/common/CommonApplication.java | 20 +-- pom.xml | 138 ++++----------- server/pom.xml | 72 +++++++- .../handler/GlobalExceptionHandler.java | 4 +- .../wdd/common/handler/MyMessageSource.java | 0 .../common/handler/MyRuntimeException.java | 2 +- .../main/java/io/wdd/common}/response/R.java | 2 +- .../io/wdd/common}/response/ResultStat.java | 2 +- .../java/io/wdd/common/utils/DataUnit.java | 0 .../java/io/wdd/common/utils/FormatUtils.java | 0 .../io/wdd/common/utils/FunctionReader.java | 0 .../io/wdd/common/utils/MessageUtils.java | 2 +- .../utils/OctopusObjectMapperConfig.java | 0 .../utils/OctopusRabbitTemplateConfig.java | 1 - .../java/io/wdd/common/utils/TimeUtils.java | 0 .../io/wdd/func/controller/OSSController.java | 2 +- .../wdd/func/controller/XrayController.java | 2 +- .../wdd/rpc}/agent/AgentOperationMessage.java | 2 +- .../io/wdd/rpc}/agent/AgentOperationType.java | 2 +- .../rpc/agent/OctopusAgentServiceImpl.java | 6 +- .../wdd/rpc/controller/AgentController.java | 2 +- .../rpc/controller/ExecutionController.java | 2 +- .../rpc/controller/SchedulerController.java | 2 +- .../wdd/rpc/controller/StatusController.java | 2 +- .../io/wdd/rpc/execute}/ExecutionMessage.java | 2 +- .../service/AsyncExecutionService.java | 2 +- .../service/AsyncExecutionServiceImpl.java | 6 +- .../service/ExecutionResultDaemonHandler.java | 1 - .../service/SyncExecutionServiceImpl.java | 4 +- .../io/wdd/rpc/init/AcceptAgentInitInfo.java | 6 +- .../wdd/rpc/init/ServerCacheAgentStatus.java | 5 +- .../io/wdd/rpc/message}/OctopusMessage.java | 2 +- .../wdd/rpc/message}/OctopusMessageType.java | 2 +- .../message/handler/AsyncWaitOMResult.java | 2 +- .../rpc/message/handler/OMReplayContend.java | 4 +- .../handler/OMessageHandlerServer.java | 2 +- .../message/sender/OMessageToAgentSender.java | 4 +- .../rpc/scheduler/config/ExecutionJob.java | 1 - .../config/QuartzSchedulerUtils.java | 2 +- .../status/AgentRuntimeMetricStatus.java | 4 +- .../status/AgentStatusStreamReader.java | 2 +- .../service/status/CollectAgentStatus.java | 6 +- .../service/status/MonitorAllAgentStatus.java | 6 +- .../rpc}/status/AgentHealthyStatusEnum.java | 3 +- .../java/io/wdd/rpc}/status/AgentStatus.java | 2 +- .../io/wdd/rpc}/status/AgentSystemInfo.java | 2 +- .../io/wdd/rpc}/status/AppStatusEnum.java | 2 +- .../io/wdd/rpc}/status/AppStatusInfo.java | 2 +- .../main/java/io/wdd/rpc}/status/CpuInfo.java | 2 +- .../java/io/wdd/rpc}/status/CpuTicks.java | 3 +- .../java/io/wdd/rpc}/status/DiskInfo.java | 2 +- .../java/io/wdd/rpc}/status/MemoryInfo.java | 2 +- .../java/io/wdd/rpc}/status/MetricStatus.java | 2 +- .../java/io/wdd/rpc}/status/NetworkInfo.java | 2 +- .../wdd/rpc}/status/OctopusStatusMessage.java | 2 +- .../wdd/server/controller/AppController.java | 2 +- .../server/controller/DomainController.java | 2 +- .../server/controller/ServerController.java | 2 +- server/src/main/resources/application.yml | 161 ++++++++++++++++++ 64 files changed, 485 insertions(+), 189 deletions(-) create mode 100644 agent-go/executor/function/BaseFunction.go rename {common => server}/src/main/java/io/wdd/common/handler/GlobalExceptionHandler.java (99%) rename {common => server}/src/main/java/io/wdd/common/handler/MyMessageSource.java (100%) rename {common => server}/src/main/java/io/wdd/common/handler/MyRuntimeException.java (95%) rename {common/src/main/java/io/wdd/common/beans => server/src/main/java/io/wdd/common}/response/R.java (95%) rename {common/src/main/java/io/wdd/common/beans => server/src/main/java/io/wdd/common}/response/ResultStat.java (92%) rename {common => server}/src/main/java/io/wdd/common/utils/DataUnit.java (100%) rename {common => server}/src/main/java/io/wdd/common/utils/FormatUtils.java (100%) rename {common => server}/src/main/java/io/wdd/common/utils/FunctionReader.java (100%) rename {common => server}/src/main/java/io/wdd/common/utils/MessageUtils.java (93%) rename {common => server}/src/main/java/io/wdd/common/utils/OctopusObjectMapperConfig.java (100%) rename {common => server}/src/main/java/io/wdd/common/utils/OctopusRabbitTemplateConfig.java (94%) rename {common => server}/src/main/java/io/wdd/common/utils/TimeUtils.java (100%) rename {common/src/main/java/io/wdd/common/beans => server/src/main/java/io/wdd/rpc}/agent/AgentOperationMessage.java (95%) rename {common/src/main/java/io/wdd/common/beans => server/src/main/java/io/wdd/rpc}/agent/AgentOperationType.java (86%) rename {common/src/main/java/io/wdd/common/beans/executor => server/src/main/java/io/wdd/rpc/execute}/ExecutionMessage.java (98%) rename {common/src/main/java/io/wdd/common/beans/rabbitmq => server/src/main/java/io/wdd/rpc/message}/OctopusMessage.java (95%) rename {common/src/main/java/io/wdd/common/beans/rabbitmq => server/src/main/java/io/wdd/rpc/message}/OctopusMessageType.java (86%) rename {common/src/main/java/io/wdd/common/beans => server/src/main/java/io/wdd/rpc}/status/AgentHealthyStatusEnum.java (89%) rename {common/src/main/java/io/wdd/common/beans => server/src/main/java/io/wdd/rpc}/status/AgentStatus.java (95%) rename {common/src/main/java/io/wdd/common/beans => server/src/main/java/io/wdd/rpc}/status/AgentSystemInfo.java (97%) rename {common/src/main/java/io/wdd/common/beans => server/src/main/java/io/wdd/rpc}/status/AppStatusEnum.java (92%) rename {common/src/main/java/io/wdd/common/beans => server/src/main/java/io/wdd/rpc}/status/AppStatusInfo.java (90%) rename {common/src/main/java/io/wdd/common/beans => server/src/main/java/io/wdd/rpc}/status/CpuInfo.java (99%) rename {common/src/main/java/io/wdd/common/beans => server/src/main/java/io/wdd/rpc}/status/CpuTicks.java (97%) rename {common/src/main/java/io/wdd/common/beans => server/src/main/java/io/wdd/rpc}/status/DiskInfo.java (98%) rename {common/src/main/java/io/wdd/common/beans => server/src/main/java/io/wdd/rpc}/status/MemoryInfo.java (97%) rename {common/src/main/java/io/wdd/common/beans => server/src/main/java/io/wdd/rpc}/status/MetricStatus.java (86%) rename {common/src/main/java/io/wdd/common/beans => server/src/main/java/io/wdd/rpc}/status/NetworkInfo.java (99%) rename {common/src/main/java/io/wdd/common/beans => server/src/main/java/io/wdd/rpc}/status/OctopusStatusMessage.java (96%) create mode 100644 server/src/main/resources/application.yml diff --git a/.github/workflows/build-push-docker.yml b/.github/workflows/build-push-docker.yml index 134c2ad..46cfcdc 100644 --- a/.github/workflows/build-push-docker.yml +++ b/.github/workflows/build-push-docker.yml @@ -28,7 +28,7 @@ jobs: with: java-version: '11' distribution: 'temurin' - server-id: github # Value of the distributionManagement/repository/id field of the pom.xml + server-id: github # Value of the distributionManagement/repository/id field of the pom.xml-back settings-path: ${{ github.workspace }} # location for the settings.xml file cache: maven diff --git a/agent-go/executor/function/BaseFunction.go b/agent-go/executor/function/BaseFunction.go new file mode 100644 index 0000000..348ff45 --- /dev/null +++ b/agent-go/executor/function/BaseFunction.go @@ -0,0 +1,136 @@ +package function + +import "strings" + +type BaseFunc interface { + Exec(baseFuncName string, funcArgs ...string) string +} + +type AgentOsOperator struct { + execCommandPrefix string `json:"exec_command_prefix",comment:"apt-get or yum or zapper"` + + canAccessInternet bool `json:"can_access_internet",comment:"是否可以访问公网"` +} + +// Exec 执行基础功能函数 +func (op *AgentOsOperator) Exec(baseFuncName string, funcArgs ...string) string { + + result := "" + + switch baseFuncName { + + case "shutdownFirewall": + result = op.shutdownFirewall() + break + case "modifyHostname": + result = op.modifyHostname(funcArgs) + break + case "disableSwap": + result = op.disableSwap() + break + case "installDocker": + result = op.installDocker(funcArgs) + break + case "installDockerCompose": + result = op.installDockerCompose() + break + case "modifyDockerConfig": + result = op.modifyDockerConfig() + break + case "installHelm": + result = op.installHelm() + break + case "installHarbor": + result = op.installHarbor(funcArgs) + break + case "chronyToPublicNTP": + result = op.chronyToPublicNTP() + break + case "chronyToMaster": + result = op.chronyToMaster(funcArgs) + break + case "installZSH": + result = op.installZSH() + break + case "modifySshPort": + result = op.modifySshPort(funcArgs) + break + case "openBBR": + result = op.openBBR() + break + default: + result = op.ok(funcArgs) + + } + + return result +} + +func (op *AgentOsOperator) shutdownFirewall() string { + + return "" +} + +func (op *AgentOsOperator) modifyHostname(args []string) string { + + return "" +} + +func (op *AgentOsOperator) disableSwap() string { + + return "" +} + +func (op *AgentOsOperator) installDocker(args []string) string { + + return "" +} + +func (op *AgentOsOperator) installDockerCompose() string { + + return "" +} + +func (op *AgentOsOperator) installHelm() string { + + return "" +} + +func (op *AgentOsOperator) modifyDockerConfig() string { + + return "" +} + +func (op *AgentOsOperator) installHarbor(args []string) string { + + return "" +} + +func (op *AgentOsOperator) chronyToPublicNTP() string { + + return "" +} + +func (op *AgentOsOperator) chronyToMaster(args []string) string { + + return "" +} + +func (op *AgentOsOperator) installZSH() string { + + return "" +} + +func (op *AgentOsOperator) modifySshPort(args []string) string { + + return "" +} + +func (op *AgentOsOperator) openBBR() string { + + return "" +} + +func (op *AgentOsOperator) ok(args []string) string { + return "base function is ok , args are => " + strings.Join(args, " ") +} diff --git a/agent-go/g/global.go b/agent-go/g/global.go index c5f7247..80c65eb 100644 --- a/agent-go/g/global.go +++ b/agent-go/g/global.go @@ -18,6 +18,7 @@ const ( ExecOmType = "EXECUTOR" StatusOmType = "STATUS" InitOmType = "INIT" + AgentOmType = "AGENT" ) var pool, _ = ants.NewPool(100, ants.WithNonblocking(false), ants.WithLogger(logger2.Log), ants.WithMaxBlockingTasks(30), ants.WithDisablePurge(true)) diff --git a/agent-go/rabbitmq/OctopusMessage.go b/agent-go/rabbitmq/OctopusMessage.go index 3553445..1d74ab2 100644 --- a/agent-go/rabbitmq/OctopusMessage.go +++ b/agent-go/rabbitmq/OctopusMessage.go @@ -83,6 +83,11 @@ func doHandleOctopusMessage(octopusMessage *OctopusMessage) { P.Submit(func() { statusOMHandler(octopusMessage) }) + case g.AgentOmType: + P.Submit(func() { + agentOMHandler(octopusMessage) + }, + ) default: P.Submit(func() { blackHoleOMHandler(octopusMessage) @@ -91,6 +96,11 @@ func doHandleOctopusMessage(octopusMessage *OctopusMessage) { } +// agentOMHandler 处理Agent的核心操作指令 +func agentOMHandler(octopusMessage *OctopusMessage) { + +} + func executorOMHandler(octopusMessage *OctopusMessage) { executionMsgString := octopusMessage.Content.(string) diff --git a/common/pom.xml b/common/pom.xml index e660761..54f8af6 100644 --- a/common/pom.xml +++ b/common/pom.xml @@ -27,6 +27,10 @@ oshi-core-java11 6.4.0 + + com.fasterxml.jackson.core + jackson-annotations + diff --git a/common/src/main/java/io/wdd/common/CommonApplication.java b/common/src/main/java/io/wdd/common/CommonApplication.java index 532d05b..f86cfb8 100644 --- a/common/src/main/java/io/wdd/common/CommonApplication.java +++ b/common/src/main/java/io/wdd/common/CommonApplication.java @@ -1,13 +1,13 @@ package io.wdd.common; -import org.springframework.boot.SpringApplication; -import org.springframework.boot.autoconfigure.SpringBootApplication; +//import org.springframework.boot.SpringApplication; +//import org.springframework.boot.autoconfigure.SpringBootApplication; -@SpringBootApplication -public class CommonApplication { - - public static void main(String[] args) { - SpringApplication.run(CommonApplication.class, args); - } - -} +//@SpringBootApplication +//public class CommonApplication { +// +// public static void main(String[] args) { +// SpringApplication.run(CommonApplication.class, args); +// } +// +//} diff --git a/pom.xml b/pom.xml index 4fc0dc5..6009b66 100644 --- a/pom.xml +++ b/pom.xml @@ -20,10 +20,10 @@ ProjectOctopus - agent server + @@ -36,101 +36,6 @@ - - - - - org.springframework.cloud - spring-cloud-dependencies - ${spring-cloud.version} - pom - import - - - - com.alibaba.cloud - spring-cloud-alibaba-dependencies - ${alibaba-cloud.version} - pom - import - - - org.springframework.cloud - spring-cloud-starter-bootstrap - 3.1.5 - - - com.alibaba.cloud - spring-cloud-starter-alibaba-nacos-config - ${alibaba-cloud.version} - - - - - - org.springframework.boot - spring-boot-starter-web - - - - - - - org.apache.commons - commons-lang3 - 3.12.0 - - - - - com.google.guava - guava - 31.1-jre - - - - - org.springframework.boot - spring-boot-starter-amqp - - - - - org.springframework.boot - spring-boot-starter-data-redis - - - - - org.apache.commons - commons-pool2 - - - - - commons-beanutils - commons-beanutils - 1.9.4 - - - - org.projectlombok - lombok - true - - + + + + + + + + + + diff --git a/server/pom.xml b/server/pom.xml index 76a7426..dcdef3c 100644 --- a/server/pom.xml +++ b/server/pom.xml @@ -21,9 +21,37 @@ - io.wdd - common - ${project.parent.version} + org.springframework.boot + spring-boot-starter-web + + + + + org.springframework.boot + spring-boot-starter-amqp + + + + + org.springframework.boot + spring-boot-starter-data-redis + + + + + org.apache.commons + commons-pool2 + + + + + commons-beanutils + commons-beanutils + 1.9.4 @@ -67,12 +95,32 @@ 3.5.2 - - + + + org.apache.commons + commons-lang3 + 3.12.0 + + + + com.github.oshi + oshi-core-java11 + 6.4.0 + + + + + com.google.guava + guava + 31.1-jre + + + + + org.projectlombok + lombok + true + @@ -87,6 +135,12 @@ runtime --> + + org.springframework.boot + spring-boot-starter-test + test + + diff --git a/common/src/main/java/io/wdd/common/handler/GlobalExceptionHandler.java b/server/src/main/java/io/wdd/common/handler/GlobalExceptionHandler.java similarity index 99% rename from common/src/main/java/io/wdd/common/handler/GlobalExceptionHandler.java rename to server/src/main/java/io/wdd/common/handler/GlobalExceptionHandler.java index 08909d0..ee0e58c 100644 --- a/common/src/main/java/io/wdd/common/handler/GlobalExceptionHandler.java +++ b/server/src/main/java/io/wdd/common/handler/GlobalExceptionHandler.java @@ -1,8 +1,8 @@ package io.wdd.common.handler; import com.google.common.collect.Maps; -import io.wdd.common.beans.response.R; -import io.wdd.common.beans.response.ResultStat; +import io.wdd.common.response.R; +import io.wdd.common.response.ResultStat; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; import org.springframework.amqp.rabbit.support.ListenerExecutionFailedException; diff --git a/common/src/main/java/io/wdd/common/handler/MyMessageSource.java b/server/src/main/java/io/wdd/common/handler/MyMessageSource.java similarity index 100% rename from common/src/main/java/io/wdd/common/handler/MyMessageSource.java rename to server/src/main/java/io/wdd/common/handler/MyMessageSource.java diff --git a/common/src/main/java/io/wdd/common/handler/MyRuntimeException.java b/server/src/main/java/io/wdd/common/handler/MyRuntimeException.java similarity index 95% rename from common/src/main/java/io/wdd/common/handler/MyRuntimeException.java rename to server/src/main/java/io/wdd/common/handler/MyRuntimeException.java index 21f0df6..7ae4135 100644 --- a/common/src/main/java/io/wdd/common/handler/MyRuntimeException.java +++ b/server/src/main/java/io/wdd/common/handler/MyRuntimeException.java @@ -1,6 +1,6 @@ package io.wdd.common.handler; -import io.wdd.common.beans.response.ResultStat; +import io.wdd.common.response.ResultStat; import lombok.AllArgsConstructor; import lombok.Data; import lombok.NoArgsConstructor; diff --git a/common/src/main/java/io/wdd/common/beans/response/R.java b/server/src/main/java/io/wdd/common/response/R.java similarity index 95% rename from common/src/main/java/io/wdd/common/beans/response/R.java rename to server/src/main/java/io/wdd/common/response/R.java index 25d6bf2..b16421e 100644 --- a/common/src/main/java/io/wdd/common/beans/response/R.java +++ b/server/src/main/java/io/wdd/common/response/R.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.response; +package io.wdd.common.response; import lombok.Data; diff --git a/common/src/main/java/io/wdd/common/beans/response/ResultStat.java b/server/src/main/java/io/wdd/common/response/ResultStat.java similarity index 92% rename from common/src/main/java/io/wdd/common/beans/response/ResultStat.java rename to server/src/main/java/io/wdd/common/response/ResultStat.java index d28fe6e..1e5def6 100644 --- a/common/src/main/java/io/wdd/common/beans/response/ResultStat.java +++ b/server/src/main/java/io/wdd/common/response/ResultStat.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.response; +package io.wdd.common.response; public enum ResultStat { diff --git a/common/src/main/java/io/wdd/common/utils/DataUnit.java b/server/src/main/java/io/wdd/common/utils/DataUnit.java similarity index 100% rename from common/src/main/java/io/wdd/common/utils/DataUnit.java rename to server/src/main/java/io/wdd/common/utils/DataUnit.java diff --git a/common/src/main/java/io/wdd/common/utils/FormatUtils.java b/server/src/main/java/io/wdd/common/utils/FormatUtils.java similarity index 100% rename from common/src/main/java/io/wdd/common/utils/FormatUtils.java rename to server/src/main/java/io/wdd/common/utils/FormatUtils.java diff --git a/common/src/main/java/io/wdd/common/utils/FunctionReader.java b/server/src/main/java/io/wdd/common/utils/FunctionReader.java similarity index 100% rename from common/src/main/java/io/wdd/common/utils/FunctionReader.java rename to server/src/main/java/io/wdd/common/utils/FunctionReader.java diff --git a/common/src/main/java/io/wdd/common/utils/MessageUtils.java b/server/src/main/java/io/wdd/common/utils/MessageUtils.java similarity index 93% rename from common/src/main/java/io/wdd/common/utils/MessageUtils.java rename to server/src/main/java/io/wdd/common/utils/MessageUtils.java index faf2a79..b53ad13 100644 --- a/common/src/main/java/io/wdd/common/utils/MessageUtils.java +++ b/server/src/main/java/io/wdd/common/utils/MessageUtils.java @@ -1,8 +1,8 @@ package io.wdd.common.utils; import com.fasterxml.jackson.databind.ObjectMapper; -import io.wdd.common.beans.rabbitmq.OctopusMessage; import io.wdd.common.handler.MyRuntimeException; +import io.wdd.rpc.message.OctopusMessage; import org.springframework.amqp.core.Message; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; diff --git a/common/src/main/java/io/wdd/common/utils/OctopusObjectMapperConfig.java b/server/src/main/java/io/wdd/common/utils/OctopusObjectMapperConfig.java similarity index 100% rename from common/src/main/java/io/wdd/common/utils/OctopusObjectMapperConfig.java rename to server/src/main/java/io/wdd/common/utils/OctopusObjectMapperConfig.java diff --git a/common/src/main/java/io/wdd/common/utils/OctopusRabbitTemplateConfig.java b/server/src/main/java/io/wdd/common/utils/OctopusRabbitTemplateConfig.java similarity index 94% rename from common/src/main/java/io/wdd/common/utils/OctopusRabbitTemplateConfig.java rename to server/src/main/java/io/wdd/common/utils/OctopusRabbitTemplateConfig.java index 4463848..14ef8d5 100644 --- a/common/src/main/java/io/wdd/common/utils/OctopusRabbitTemplateConfig.java +++ b/server/src/main/java/io/wdd/common/utils/OctopusRabbitTemplateConfig.java @@ -8,7 +8,6 @@ import org.springframework.amqp.rabbit.core.RabbitTemplate; import org.springframework.amqp.support.converter.Jackson2JsonMessageConverter; import org.springframework.amqp.support.converter.MessageConverter; import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; import javax.annotation.Resource; import java.text.SimpleDateFormat; diff --git a/common/src/main/java/io/wdd/common/utils/TimeUtils.java b/server/src/main/java/io/wdd/common/utils/TimeUtils.java similarity index 100% rename from common/src/main/java/io/wdd/common/utils/TimeUtils.java rename to server/src/main/java/io/wdd/common/utils/TimeUtils.java diff --git a/server/src/main/java/io/wdd/func/controller/OSSController.java b/server/src/main/java/io/wdd/func/controller/OSSController.java index a164feb..9cb6cc8 100644 --- a/server/src/main/java/io/wdd/func/controller/OSSController.java +++ b/server/src/main/java/io/wdd/func/controller/OSSController.java @@ -7,7 +7,7 @@ import com.amazonaws.services.s3.model.S3Object; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiParam; -import io.wdd.common.beans.response.R; +import io.wdd.common.response.R; import io.wdd.func.oss.config.OctopusObjectSummary; import io.wdd.func.oss.config.OssConfig; import io.wdd.func.oss.service.OSSCoreService; diff --git a/server/src/main/java/io/wdd/func/controller/XrayController.java b/server/src/main/java/io/wdd/func/controller/XrayController.java index b4b11e5..dfff82a 100644 --- a/server/src/main/java/io/wdd/func/controller/XrayController.java +++ b/server/src/main/java/io/wdd/func/controller/XrayController.java @@ -4,7 +4,7 @@ package io.wdd.func.controller; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiParam; -import io.wdd.common.beans.response.R; +import io.wdd.common.response.R; import io.wdd.func.xray.beans.node.ProxyNode; import io.wdd.func.xray.beans.node.XrayConfigInfo; import io.wdd.func.xray.service.XrayCallAgent; diff --git a/common/src/main/java/io/wdd/common/beans/agent/AgentOperationMessage.java b/server/src/main/java/io/wdd/rpc/agent/AgentOperationMessage.java similarity index 95% rename from common/src/main/java/io/wdd/common/beans/agent/AgentOperationMessage.java rename to server/src/main/java/io/wdd/rpc/agent/AgentOperationMessage.java index 787dc54..ebdfb2f 100644 --- a/common/src/main/java/io/wdd/common/beans/agent/AgentOperationMessage.java +++ b/server/src/main/java/io/wdd/rpc/agent/AgentOperationMessage.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.agent; +package io.wdd.rpc.agent; import com.fasterxml.jackson.annotation.JsonFormat; import lombok.AllArgsConstructor; diff --git a/common/src/main/java/io/wdd/common/beans/agent/AgentOperationType.java b/server/src/main/java/io/wdd/rpc/agent/AgentOperationType.java similarity index 86% rename from common/src/main/java/io/wdd/common/beans/agent/AgentOperationType.java rename to server/src/main/java/io/wdd/rpc/agent/AgentOperationType.java index 3919fad..799c9a3 100644 --- a/common/src/main/java/io/wdd/common/beans/agent/AgentOperationType.java +++ b/server/src/main/java/io/wdd/rpc/agent/AgentOperationType.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.agent; +package io.wdd.rpc.agent; public enum AgentOperationType { diff --git a/server/src/main/java/io/wdd/rpc/agent/OctopusAgentServiceImpl.java b/server/src/main/java/io/wdd/rpc/agent/OctopusAgentServiceImpl.java index 408d47a..59c0f25 100644 --- a/server/src/main/java/io/wdd/rpc/agent/OctopusAgentServiceImpl.java +++ b/server/src/main/java/io/wdd/rpc/agent/OctopusAgentServiceImpl.java @@ -3,11 +3,9 @@ package io.wdd.rpc.agent; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; -import io.wdd.common.beans.agent.AgentOperationMessage; -import io.wdd.common.beans.agent.AgentOperationType; -import io.wdd.common.beans.rabbitmq.OctopusMessage; -import io.wdd.common.beans.rabbitmq.OctopusMessageType; import io.wdd.common.utils.TimeUtils; +import io.wdd.rpc.message.OctopusMessage; +import io.wdd.rpc.message.OctopusMessageType; import io.wdd.rpc.message.handler.AsyncWaitOMResult; import io.wdd.rpc.message.handler.OMReplayContend; import io.wdd.rpc.message.sender.OMessageToAgentSender; diff --git a/server/src/main/java/io/wdd/rpc/controller/AgentController.java b/server/src/main/java/io/wdd/rpc/controller/AgentController.java index ed1c9c4..ddcea7d 100644 --- a/server/src/main/java/io/wdd/rpc/controller/AgentController.java +++ b/server/src/main/java/io/wdd/rpc/controller/AgentController.java @@ -2,7 +2,7 @@ package io.wdd.rpc.controller; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; -import io.wdd.common.beans.response.R; +import io.wdd.common.response.R; import io.wdd.rpc.agent.OctopusAgentService; import io.wdd.server.beans.vo.ServerInfoVO; import org.springframework.web.bind.annotation.GetMapping; diff --git a/server/src/main/java/io/wdd/rpc/controller/ExecutionController.java b/server/src/main/java/io/wdd/rpc/controller/ExecutionController.java index 7d2ba39..71e00c0 100644 --- a/server/src/main/java/io/wdd/rpc/controller/ExecutionController.java +++ b/server/src/main/java/io/wdd/rpc/controller/ExecutionController.java @@ -3,7 +3,7 @@ package io.wdd.rpc.controller; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiParam; -import io.wdd.common.beans.response.R; +import io.wdd.common.response.R; import io.wdd.rpc.execute.result.BuildStreamReader; import io.wdd.rpc.execute.service.AsyncExecutionService; import io.wdd.rpc.execute.service.SyncExecutionService; diff --git a/server/src/main/java/io/wdd/rpc/controller/SchedulerController.java b/server/src/main/java/io/wdd/rpc/controller/SchedulerController.java index c06d1d1..6c50cd8 100644 --- a/server/src/main/java/io/wdd/rpc/controller/SchedulerController.java +++ b/server/src/main/java/io/wdd/rpc/controller/SchedulerController.java @@ -4,7 +4,7 @@ package io.wdd.rpc.controller; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiParam; -import io.wdd.common.beans.response.R; +import io.wdd.common.response.R; import io.wdd.rpc.scheduler.beans.ScriptSchedulerVO; import io.wdd.rpc.scheduler.service.QuartzSchedulerService; import org.quartz.JobDetail; diff --git a/server/src/main/java/io/wdd/rpc/controller/StatusController.java b/server/src/main/java/io/wdd/rpc/controller/StatusController.java index bcaf7b4..d4190dc 100644 --- a/server/src/main/java/io/wdd/rpc/controller/StatusController.java +++ b/server/src/main/java/io/wdd/rpc/controller/StatusController.java @@ -3,7 +3,7 @@ package io.wdd.rpc.controller; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; -import io.wdd.common.beans.response.R; +import io.wdd.common.response.R; import io.wdd.rpc.init.ServerCacheAgentStatus; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PostMapping; diff --git a/common/src/main/java/io/wdd/common/beans/executor/ExecutionMessage.java b/server/src/main/java/io/wdd/rpc/execute/ExecutionMessage.java similarity index 98% rename from common/src/main/java/io/wdd/common/beans/executor/ExecutionMessage.java rename to server/src/main/java/io/wdd/rpc/execute/ExecutionMessage.java index 74848f4..7b0e3e4 100644 --- a/common/src/main/java/io/wdd/common/beans/executor/ExecutionMessage.java +++ b/server/src/main/java/io/wdd/rpc/execute/ExecutionMessage.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.executor; +package io.wdd.rpc.execute; import com.fasterxml.jackson.annotation.JsonProperty; import io.wdd.common.utils.TimeUtils; diff --git a/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionService.java b/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionService.java index dee0fa2..5c831cc 100644 --- a/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionService.java +++ b/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionService.java @@ -1,6 +1,6 @@ package io.wdd.rpc.execute.service; -import io.wdd.common.beans.rabbitmq.OctopusMessage; +import io.wdd.rpc.message.OctopusMessage; import java.util.HashMap; import java.util.List; diff --git a/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionServiceImpl.java b/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionServiceImpl.java index 3d8bafc..f2cc84a 100644 --- a/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionServiceImpl.java +++ b/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionServiceImpl.java @@ -2,11 +2,11 @@ package io.wdd.rpc.execute.service; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; -import io.wdd.common.beans.executor.ExecutionMessage; -import io.wdd.common.beans.rabbitmq.OctopusMessage; -import io.wdd.common.beans.rabbitmq.OctopusMessageType; import io.wdd.common.utils.TimeUtils; +import io.wdd.rpc.execute.ExecutionMessage; import io.wdd.rpc.execute.config.ExecutionLog; +import io.wdd.rpc.message.OctopusMessage; +import io.wdd.rpc.message.OctopusMessageType; import io.wdd.rpc.message.sender.OMessageToAgentSender; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; diff --git a/server/src/main/java/io/wdd/rpc/execute/service/ExecutionResultDaemonHandler.java b/server/src/main/java/io/wdd/rpc/execute/service/ExecutionResultDaemonHandler.java index 1295831..b0cba48 100644 --- a/server/src/main/java/io/wdd/rpc/execute/service/ExecutionResultDaemonHandler.java +++ b/server/src/main/java/io/wdd/rpc/execute/service/ExecutionResultDaemonHandler.java @@ -9,7 +9,6 @@ import io.wdd.server.service.ExecutionLogService; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections.CollectionUtils; import org.springframework.context.annotation.Lazy; -import org.springframework.stereotype.Service; import javax.annotation.PostConstruct; import javax.annotation.Resource; diff --git a/server/src/main/java/io/wdd/rpc/execute/service/SyncExecutionServiceImpl.java b/server/src/main/java/io/wdd/rpc/execute/service/SyncExecutionServiceImpl.java index 06cb143..0ef3060 100644 --- a/server/src/main/java/io/wdd/rpc/execute/service/SyncExecutionServiceImpl.java +++ b/server/src/main/java/io/wdd/rpc/execute/service/SyncExecutionServiceImpl.java @@ -1,7 +1,7 @@ package io.wdd.rpc.execute.service; -import io.wdd.common.beans.rabbitmq.OctopusMessage; -import io.wdd.common.beans.rabbitmq.OctopusMessageType; +import io.wdd.rpc.message.OctopusMessage; +import io.wdd.rpc.message.OctopusMessageType; import io.wdd.rpc.message.handler.AsyncWaitOMResult; import io.wdd.rpc.message.handler.OMReplayContend; import lombok.extern.slf4j.Slf4j; diff --git a/server/src/main/java/io/wdd/rpc/init/AcceptAgentInitInfo.java b/server/src/main/java/io/wdd/rpc/init/AcceptAgentInitInfo.java index 26fabf3..8bdbe49 100644 --- a/server/src/main/java/io/wdd/rpc/init/AcceptAgentInitInfo.java +++ b/server/src/main/java/io/wdd/rpc/init/AcceptAgentInitInfo.java @@ -3,11 +3,11 @@ package io.wdd.rpc.init; import com.fasterxml.jackson.databind.ObjectMapper; import com.rabbitmq.client.Channel; -import io.wdd.common.beans.rabbitmq.OctopusMessage; -import io.wdd.common.beans.rabbitmq.OctopusMessageType; -import io.wdd.common.beans.status.AgentStatus; import io.wdd.common.handler.MyRuntimeException; +import io.wdd.rpc.message.OctopusMessage; +import io.wdd.rpc.message.OctopusMessageType; import io.wdd.rpc.message.sender.OMessageToAgentSender; +import io.wdd.rpc.status.AgentStatus; import io.wdd.server.beans.vo.ServerInfoVO; import io.wdd.server.utils.DaemonDatabaseOperator; import lombok.SneakyThrows; diff --git a/server/src/main/java/io/wdd/rpc/init/ServerCacheAgentStatus.java b/server/src/main/java/io/wdd/rpc/init/ServerCacheAgentStatus.java index 23496c7..fac1296 100644 --- a/server/src/main/java/io/wdd/rpc/init/ServerCacheAgentStatus.java +++ b/server/src/main/java/io/wdd/rpc/init/ServerCacheAgentStatus.java @@ -1,8 +1,8 @@ package io.wdd.rpc.init; -import io.wdd.common.beans.status.AgentHealthyStatusEnum; import io.wdd.common.utils.TimeUtils; +import io.wdd.rpc.status.AgentHealthyStatusEnum; import io.wdd.server.beans.vo.ServerInfoVO; import io.wdd.server.coreService.CoreServerService; import lombok.extern.slf4j.Slf4j; @@ -15,7 +15,8 @@ import javax.annotation.Resource; import java.util.*; import java.util.stream.Collectors; -import static io.wdd.common.beans.status.OctopusStatusMessage.ALL_AGENT_STATUS_REDIS_KEY; +import static io.wdd.rpc.status.OctopusStatusMessage.ALL_AGENT_STATUS_REDIS_KEY; + /** * Server启动或者运行的时候,需要初 缓存一系列的信息 diff --git a/common/src/main/java/io/wdd/common/beans/rabbitmq/OctopusMessage.java b/server/src/main/java/io/wdd/rpc/message/OctopusMessage.java similarity index 95% rename from common/src/main/java/io/wdd/common/beans/rabbitmq/OctopusMessage.java rename to server/src/main/java/io/wdd/rpc/message/OctopusMessage.java index 9dba47d..2027258 100644 --- a/common/src/main/java/io/wdd/common/beans/rabbitmq/OctopusMessage.java +++ b/server/src/main/java/io/wdd/rpc/message/OctopusMessage.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.rabbitmq; +package io.wdd.rpc.message; import com.fasterxml.jackson.annotation.JsonFormat; diff --git a/common/src/main/java/io/wdd/common/beans/rabbitmq/OctopusMessageType.java b/server/src/main/java/io/wdd/rpc/message/OctopusMessageType.java similarity index 86% rename from common/src/main/java/io/wdd/common/beans/rabbitmq/OctopusMessageType.java rename to server/src/main/java/io/wdd/rpc/message/OctopusMessageType.java index 7c69c68..ec7b52c 100644 --- a/common/src/main/java/io/wdd/common/beans/rabbitmq/OctopusMessageType.java +++ b/server/src/main/java/io/wdd/rpc/message/OctopusMessageType.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.rabbitmq; +package io.wdd.rpc.message; public enum OctopusMessageType { diff --git a/server/src/main/java/io/wdd/rpc/message/handler/AsyncWaitOMResult.java b/server/src/main/java/io/wdd/rpc/message/handler/AsyncWaitOMResult.java index 0f29808..7f88542 100644 --- a/server/src/main/java/io/wdd/rpc/message/handler/AsyncWaitOMResult.java +++ b/server/src/main/java/io/wdd/rpc/message/handler/AsyncWaitOMResult.java @@ -1,6 +1,6 @@ package io.wdd.rpc.message.handler; -import io.wdd.common.beans.rabbitmq.OctopusMessage; +import io.wdd.rpc.message.OctopusMessage; import io.wdd.server.config.ServerCommonPool; import lombok.extern.slf4j.Slf4j; import org.springframework.stereotype.Service; diff --git a/server/src/main/java/io/wdd/rpc/message/handler/OMReplayContend.java b/server/src/main/java/io/wdd/rpc/message/handler/OMReplayContend.java index 0abd5dc..d7a8fe2 100644 --- a/server/src/main/java/io/wdd/rpc/message/handler/OMReplayContend.java +++ b/server/src/main/java/io/wdd/rpc/message/handler/OMReplayContend.java @@ -3,8 +3,8 @@ package io.wdd.rpc.message.handler; import com.fasterxml.jackson.annotation.JsonFormat; import io.swagger.annotations.ApiModel; import io.swagger.annotations.ApiModelProperty; -import io.wdd.common.beans.rabbitmq.OctopusMessage; -import io.wdd.common.beans.rabbitmq.OctopusMessageType; +import io.wdd.rpc.message.OctopusMessage; +import io.wdd.rpc.message.OctopusMessageType; import lombok.AllArgsConstructor; import lombok.Data; import lombok.NoArgsConstructor; diff --git a/server/src/main/java/io/wdd/rpc/message/handler/OMessageHandlerServer.java b/server/src/main/java/io/wdd/rpc/message/handler/OMessageHandlerServer.java index 570ff21..6d8053e 100644 --- a/server/src/main/java/io/wdd/rpc/message/handler/OMessageHandlerServer.java +++ b/server/src/main/java/io/wdd/rpc/message/handler/OMessageHandlerServer.java @@ -2,8 +2,8 @@ package io.wdd.rpc.message.handler; import com.fasterxml.jackson.databind.ObjectMapper; -import io.wdd.common.beans.rabbitmq.OctopusMessage; import io.wdd.common.handler.MyRuntimeException; +import io.wdd.rpc.message.OctopusMessage; import lombok.extern.slf4j.Slf4j; import org.springframework.amqp.core.Message; import org.springframework.amqp.rabbit.annotation.RabbitHandler; diff --git a/server/src/main/java/io/wdd/rpc/message/sender/OMessageToAgentSender.java b/server/src/main/java/io/wdd/rpc/message/sender/OMessageToAgentSender.java index 777d208..e4e5fbc 100644 --- a/server/src/main/java/io/wdd/rpc/message/sender/OMessageToAgentSender.java +++ b/server/src/main/java/io/wdd/rpc/message/sender/OMessageToAgentSender.java @@ -2,10 +2,10 @@ package io.wdd.rpc.message.sender; import com.fasterxml.jackson.databind.ObjectMapper; -import io.wdd.common.beans.rabbitmq.OctopusMessage; -import io.wdd.common.beans.rabbitmq.OctopusMessageType; import io.wdd.common.handler.MyRuntimeException; import io.wdd.rpc.init.InitRabbitMQConfig; +import io.wdd.rpc.message.OctopusMessage; +import io.wdd.rpc.message.OctopusMessageType; import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; import org.springframework.amqp.rabbit.core.RabbitTemplate; diff --git a/server/src/main/java/io/wdd/rpc/scheduler/config/ExecutionJob.java b/server/src/main/java/io/wdd/rpc/scheduler/config/ExecutionJob.java index e273ab4..ab3c9dc 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/config/ExecutionJob.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/config/ExecutionJob.java @@ -6,7 +6,6 @@ import io.wdd.server.utils.SpringUtils; import org.apache.commons.lang3.StringUtils; import org.quartz.JobExecutionContext; import org.quartz.JobExecutionException; -import org.springframework.scheduling.annotation.Async; import org.springframework.scheduling.quartz.QuartzJobBean; import org.springframework.util.ReflectionUtils; diff --git a/server/src/main/java/io/wdd/rpc/scheduler/config/QuartzSchedulerUtils.java b/server/src/main/java/io/wdd/rpc/scheduler/config/QuartzSchedulerUtils.java index 2e6e8b7..eb89986 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/config/QuartzSchedulerUtils.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/config/QuartzSchedulerUtils.java @@ -1,7 +1,7 @@ package io.wdd.rpc.scheduler.config; -import io.wdd.common.beans.executor.ExecutionMessage; import io.wdd.common.utils.TimeUtils; +import io.wdd.rpc.execute.ExecutionMessage; import io.wdd.rpc.scheduler.beans.ScriptSchedulerDTO; import org.quartz.Scheduler; import org.quartz.SchedulerException; diff --git a/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentRuntimeMetricStatus.java b/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentRuntimeMetricStatus.java index a461729..1ca87c6 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentRuntimeMetricStatus.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentRuntimeMetricStatus.java @@ -1,7 +1,7 @@ package io.wdd.rpc.scheduler.service.status; -import io.wdd.common.beans.status.OctopusStatusMessage; +import io.wdd.rpc.status.OctopusStatusMessage; import lombok.extern.slf4j.Slf4j; import org.springframework.stereotype.Service; import org.springframework.util.CollectionUtils; @@ -10,8 +10,8 @@ import javax.annotation.Resource; import java.util.List; import java.util.stream.Collectors; -import static io.wdd.common.beans.status.OctopusStatusMessage.METRIC_STATUS_MESSAGE_TYPE; import static io.wdd.rpc.init.ServerCacheAgentStatus.ALL_HEALTHY_AGENT_TOPIC_NAME_LIST; +import static io.wdd.rpc.status.OctopusStatusMessage.METRIC_STATUS_MESSAGE_TYPE; /** * 收集OctopusAgent的运行Metric信息 diff --git a/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentStatusStreamReader.java b/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentStatusStreamReader.java index fee04e6..f0edfa4 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentStatusStreamReader.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentStatusStreamReader.java @@ -4,7 +4,7 @@ import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.ObjectMapper; -import io.wdd.common.beans.status.AgentStatus; +import io.wdd.rpc.status.AgentStatus; import lombok.Getter; import lombok.Setter; import lombok.extern.slf4j.Slf4j; diff --git a/server/src/main/java/io/wdd/rpc/scheduler/service/status/CollectAgentStatus.java b/server/src/main/java/io/wdd/rpc/scheduler/service/status/CollectAgentStatus.java index 6e71a63..b0dd670 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/service/status/CollectAgentStatus.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/service/status/CollectAgentStatus.java @@ -2,11 +2,11 @@ package io.wdd.rpc.scheduler.service.status; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; -import io.wdd.common.beans.rabbitmq.OctopusMessage; -import io.wdd.common.beans.rabbitmq.OctopusMessageType; -import io.wdd.common.beans.status.OctopusStatusMessage; import io.wdd.common.utils.TimeUtils; +import io.wdd.rpc.message.OctopusMessage; +import io.wdd.rpc.message.OctopusMessageType; import io.wdd.rpc.message.sender.OMessageToAgentSender; +import io.wdd.rpc.status.OctopusStatusMessage; import org.springframework.stereotype.Service; import javax.annotation.Resource; diff --git a/server/src/main/java/io/wdd/rpc/scheduler/service/status/MonitorAllAgentStatus.java b/server/src/main/java/io/wdd/rpc/scheduler/service/status/MonitorAllAgentStatus.java index 8614d1b..69385f6 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/service/status/MonitorAllAgentStatus.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/service/status/MonitorAllAgentStatus.java @@ -1,9 +1,9 @@ package io.wdd.rpc.scheduler.service.status; -import io.wdd.common.beans.status.OctopusStatusMessage; import io.wdd.common.utils.TimeUtils; import io.wdd.rpc.init.ServerCacheAgentStatus; import io.wdd.rpc.scheduler.service.BuildStatusScheduleTask; +import io.wdd.rpc.status.OctopusStatusMessage; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections.CollectionUtils; import org.springframework.context.annotation.Lazy; @@ -16,9 +16,9 @@ import java.util.List; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import static io.wdd.common.beans.status.OctopusStatusMessage.ALL_AGENT_STATUS_REDIS_KEY; -import static io.wdd.common.beans.status.OctopusStatusMessage.HEALTHY_STATUS_MESSAGE_TYPE; import static io.wdd.rpc.init.ServerCacheAgentStatus.ALL_AGENT_TOPIC_NAME_LIST; +import static io.wdd.rpc.status.OctopusStatusMessage.ALL_AGENT_STATUS_REDIS_KEY; +import static io.wdd.rpc.status.OctopusStatusMessage.HEALTHY_STATUS_MESSAGE_TYPE; /** * 更新频率被类 BuildStatusScheduleTask.class控制 diff --git a/common/src/main/java/io/wdd/common/beans/status/AgentHealthyStatusEnum.java b/server/src/main/java/io/wdd/rpc/status/AgentHealthyStatusEnum.java similarity index 89% rename from common/src/main/java/io/wdd/common/beans/status/AgentHealthyStatusEnum.java rename to server/src/main/java/io/wdd/rpc/status/AgentHealthyStatusEnum.java index 0494909..ef794e7 100644 --- a/common/src/main/java/io/wdd/common/beans/status/AgentHealthyStatusEnum.java +++ b/server/src/main/java/io/wdd/rpc/status/AgentHealthyStatusEnum.java @@ -1,7 +1,6 @@ -package io.wdd.common.beans.status; +package io.wdd.rpc.status; import lombok.Getter; -import lombok.Setter; /** * AgentHealthy状态描述实体类 diff --git a/common/src/main/java/io/wdd/common/beans/status/AgentStatus.java b/server/src/main/java/io/wdd/rpc/status/AgentStatus.java similarity index 95% rename from common/src/main/java/io/wdd/common/beans/status/AgentStatus.java rename to server/src/main/java/io/wdd/rpc/status/AgentStatus.java index 489a48d..83a6a90 100644 --- a/common/src/main/java/io/wdd/common/beans/status/AgentStatus.java +++ b/server/src/main/java/io/wdd/rpc/status/AgentStatus.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.status; +package io.wdd.rpc.status; import lombok.AllArgsConstructor; diff --git a/common/src/main/java/io/wdd/common/beans/status/AgentSystemInfo.java b/server/src/main/java/io/wdd/rpc/status/AgentSystemInfo.java similarity index 97% rename from common/src/main/java/io/wdd/common/beans/status/AgentSystemInfo.java rename to server/src/main/java/io/wdd/rpc/status/AgentSystemInfo.java index d9056e9..89f5abc 100644 --- a/common/src/main/java/io/wdd/common/beans/status/AgentSystemInfo.java +++ b/server/src/main/java/io/wdd/rpc/status/AgentSystemInfo.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.status; +package io.wdd.rpc.status; import io.wdd.common.utils.TimeUtils; import lombok.AllArgsConstructor; diff --git a/common/src/main/java/io/wdd/common/beans/status/AppStatusEnum.java b/server/src/main/java/io/wdd/rpc/status/AppStatusEnum.java similarity index 92% rename from common/src/main/java/io/wdd/common/beans/status/AppStatusEnum.java rename to server/src/main/java/io/wdd/rpc/status/AppStatusEnum.java index 2173f30..e627994 100644 --- a/common/src/main/java/io/wdd/common/beans/status/AppStatusEnum.java +++ b/server/src/main/java/io/wdd/rpc/status/AppStatusEnum.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.status; +package io.wdd.rpc.status; public enum AppStatusEnum { diff --git a/common/src/main/java/io/wdd/common/beans/status/AppStatusInfo.java b/server/src/main/java/io/wdd/rpc/status/AppStatusInfo.java similarity index 90% rename from common/src/main/java/io/wdd/common/beans/status/AppStatusInfo.java rename to server/src/main/java/io/wdd/rpc/status/AppStatusInfo.java index b8f647f..fd3abb0 100644 --- a/common/src/main/java/io/wdd/common/beans/status/AppStatusInfo.java +++ b/server/src/main/java/io/wdd/rpc/status/AppStatusInfo.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.status; +package io.wdd.rpc.status; import lombok.AllArgsConstructor; import lombok.Data; diff --git a/common/src/main/java/io/wdd/common/beans/status/CpuInfo.java b/server/src/main/java/io/wdd/rpc/status/CpuInfo.java similarity index 99% rename from common/src/main/java/io/wdd/common/beans/status/CpuInfo.java rename to server/src/main/java/io/wdd/rpc/status/CpuInfo.java index 8be8979..43cb652 100644 --- a/common/src/main/java/io/wdd/common/beans/status/CpuInfo.java +++ b/server/src/main/java/io/wdd/rpc/status/CpuInfo.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.status; +package io.wdd.rpc.status; import lombok.AllArgsConstructor; import lombok.Data; diff --git a/common/src/main/java/io/wdd/common/beans/status/CpuTicks.java b/server/src/main/java/io/wdd/rpc/status/CpuTicks.java similarity index 97% rename from common/src/main/java/io/wdd/common/beans/status/CpuTicks.java rename to server/src/main/java/io/wdd/rpc/status/CpuTicks.java index 6d4d587..2065ee7 100644 --- a/common/src/main/java/io/wdd/common/beans/status/CpuTicks.java +++ b/server/src/main/java/io/wdd/rpc/status/CpuTicks.java @@ -1,8 +1,7 @@ -package io.wdd.common.beans.status; +package io.wdd.rpc.status; import lombok.AllArgsConstructor; import lombok.Data; -import lombok.Getter; import lombok.NoArgsConstructor; import oshi.hardware.CentralProcessor; import oshi.util.Util; diff --git a/common/src/main/java/io/wdd/common/beans/status/DiskInfo.java b/server/src/main/java/io/wdd/rpc/status/DiskInfo.java similarity index 98% rename from common/src/main/java/io/wdd/common/beans/status/DiskInfo.java rename to server/src/main/java/io/wdd/rpc/status/DiskInfo.java index 085e0a2..291617a 100644 --- a/common/src/main/java/io/wdd/common/beans/status/DiskInfo.java +++ b/server/src/main/java/io/wdd/rpc/status/DiskInfo.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.status; +package io.wdd.rpc.status; import io.wdd.common.utils.FormatUtils; import lombok.AllArgsConstructor; diff --git a/common/src/main/java/io/wdd/common/beans/status/MemoryInfo.java b/server/src/main/java/io/wdd/rpc/status/MemoryInfo.java similarity index 97% rename from common/src/main/java/io/wdd/common/beans/status/MemoryInfo.java rename to server/src/main/java/io/wdd/rpc/status/MemoryInfo.java index 528e6ea..1afe27b 100644 --- a/common/src/main/java/io/wdd/common/beans/status/MemoryInfo.java +++ b/server/src/main/java/io/wdd/rpc/status/MemoryInfo.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.status; +package io.wdd.rpc.status; import io.wdd.common.utils.FormatUtils; import lombok.AllArgsConstructor; diff --git a/common/src/main/java/io/wdd/common/beans/status/MetricStatus.java b/server/src/main/java/io/wdd/rpc/status/MetricStatus.java similarity index 86% rename from common/src/main/java/io/wdd/common/beans/status/MetricStatus.java rename to server/src/main/java/io/wdd/rpc/status/MetricStatus.java index 7b980fd..2f7bae5 100644 --- a/common/src/main/java/io/wdd/common/beans/status/MetricStatus.java +++ b/server/src/main/java/io/wdd/rpc/status/MetricStatus.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.status; +package io.wdd.rpc.status; import lombok.Data; diff --git a/common/src/main/java/io/wdd/common/beans/status/NetworkInfo.java b/server/src/main/java/io/wdd/rpc/status/NetworkInfo.java similarity index 99% rename from common/src/main/java/io/wdd/common/beans/status/NetworkInfo.java rename to server/src/main/java/io/wdd/rpc/status/NetworkInfo.java index 71a93ff..d540e28 100644 --- a/common/src/main/java/io/wdd/common/beans/status/NetworkInfo.java +++ b/server/src/main/java/io/wdd/rpc/status/NetworkInfo.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.status; +package io.wdd.rpc.status; import io.wdd.common.utils.FormatUtils; diff --git a/common/src/main/java/io/wdd/common/beans/status/OctopusStatusMessage.java b/server/src/main/java/io/wdd/rpc/status/OctopusStatusMessage.java similarity index 96% rename from common/src/main/java/io/wdd/common/beans/status/OctopusStatusMessage.java rename to server/src/main/java/io/wdd/rpc/status/OctopusStatusMessage.java index d58d77a..37f62a7 100644 --- a/common/src/main/java/io/wdd/common/beans/status/OctopusStatusMessage.java +++ b/server/src/main/java/io/wdd/rpc/status/OctopusStatusMessage.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.status; +package io.wdd.rpc.status; import lombok.AllArgsConstructor; import lombok.Data; diff --git a/server/src/main/java/io/wdd/server/controller/AppController.java b/server/src/main/java/io/wdd/server/controller/AppController.java index a7f4c08..f9a5d3a 100644 --- a/server/src/main/java/io/wdd/server/controller/AppController.java +++ b/server/src/main/java/io/wdd/server/controller/AppController.java @@ -1,9 +1,9 @@ package io.wdd.server.controller; +import io.wdd.common.response.R; import io.wdd.server.beans.vo.AppInfoVO; import io.wdd.server.coreService.CoreAppService; -import io.wdd.common.beans.response.R; import org.springframework.validation.annotation.Validated; import org.springframework.web.bind.annotation.*; diff --git a/server/src/main/java/io/wdd/server/controller/DomainController.java b/server/src/main/java/io/wdd/server/controller/DomainController.java index cd41c3c..21c00d3 100644 --- a/server/src/main/java/io/wdd/server/controller/DomainController.java +++ b/server/src/main/java/io/wdd/server/controller/DomainController.java @@ -1,9 +1,9 @@ package io.wdd.server.controller; +import io.wdd.common.response.R; import io.wdd.server.beans.po.DomainInfoPO; import io.wdd.server.beans.vo.DomainInfoVO; import io.wdd.server.coreService.CoreDomainService; -import io.wdd.common.beans.response.R; import org.springframework.validation.annotation.Validated; import org.springframework.web.bind.annotation.*; diff --git a/server/src/main/java/io/wdd/server/controller/ServerController.java b/server/src/main/java/io/wdd/server/controller/ServerController.java index 2a03dfa..411f0eb 100644 --- a/server/src/main/java/io/wdd/server/controller/ServerController.java +++ b/server/src/main/java/io/wdd/server/controller/ServerController.java @@ -4,7 +4,7 @@ package io.wdd.server.controller; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiParam; -import io.wdd.common.beans.response.R; +import io.wdd.common.response.R; import io.wdd.server.beans.po.DomainInfoPO; import io.wdd.server.beans.po.ServerInfoPO; import io.wdd.server.beans.vo.AppInfoVO; diff --git a/server/src/main/resources/application.yml b/server/src/main/resources/application.yml new file mode 100644 index 0000000..ce2cee7 --- /dev/null +++ b/server/src/main/resources/application.yml @@ -0,0 +1,161 @@ +server: + port: 9999 + +spring: + main: + allow-circular-references: true + allow-bean-definition-overriding: true + rabbitmq: + host: 150.230.198.103 + port: 20672 + username: boge + password: boge14@Level5 + virtual-host: / + listener: + simple: + retry: + # ack failed will reentrant the Rabbit Listener + max-attempts: 2 + enabled: true + # retry interval unit ms + max-interval: 65000 + initial-interval: 65000 + redis: + host: 146.56.147.12 + port: 21370 + database: 0 + password: boge14@Level5 + # cluster: + # nodes: + # - 43.154.83.213:21370 + # - 43.154.83.213:21371 + # - 43.154.83.213:21372 + # - 43.154.83.213:21373 + # - 43.154.83.213:21374 + # - 43.154.83.213:21375 + # # 获取失败 最大重定向次数 + # max-redirects: 3 + # timeout: 50000 + #如果用以前的jedis,可以把下面的lettuce换成jedis即可 + lettuce: + pool: + # 连接池最大连接数默认值为8 + max-active: 16 + # 连接池最大阻塞时间(使用负值表示没有限制)默认值为-1 + max-wait: -1 + # 连接池中最大空闲连接数默认值为8 + max-idle: 10 + # 连接池中的最小空闲连接数,默认值为0 + min-idle: 10 + time-between-eviction-runs: 50000 + datasource: + driver-class-name: com.mysql.cj.jdbc.Driver + url: jdbc:mysql://140.238.63.37:21306/wdd_server?autoReconnect=true&useSSL=false&useUnicode=true&characterEncoding=UTF-8&serverTimezone=GMT%2B8 + username: root + password: boge14@Level5 + type: com.zaxxer.hikari.HikariDataSource + hikari: + minimum-idle: 3 + # 空闲连接存活最大时间,默认600000(10分钟) + idle-timeout: 180000 + # 连接池最大连接数,默认是10 + maximum-pool-size: 5 + # 此属性控制从池返回的连接的默认自动提交行为,默认值:true + auto-commit: true + connection-test-query: SELECT 1 + # 最大文件上传 + servlet: + multipart: + max-file-size: 500MB + max-request-size: 500MB + +mybatis-plus: + type-aliases-package: io.wdd.server.beans.po + global-config: + db-column-underline: true + db-config: + # modify ethe id strategy + id-type: assign_id + # logic delete field globally + logicDeleteField: isDelete + logic-not-delete-value: 0 + logic-delete-value: 1 + banner: false + configuration: + # 希望知道所有的sql是怎么执行的, 配置输出日志 + #log-impl: org.apache.ibatis.logging.stdout.StdOutImpl + log-impl: org.apache.ibatis.logging.nologging.NoLoggingImpl + # 数据库下划线--实体类也是下划线 需要为false + map-underscore-to-camel-case: true + # 一级缓存的 缓存级别默认为 session,如果要关闭一级缓存可以设置为 statement + local-cache-scope: session + # 是否开启二级缓存 + cache-enabled: false + # 默认地址为 classpath*:/mapper/**/*.xml +# mapper-locations: classpath*:/real-mappers/**/*.xml + +octopus: + message: + # agent boot up default common exchange + init_exchange: InitExchange + # server will send message to agent using this common queue + init_to_server: InitToServer + # agent boot up default common exchange routing key + init_to_server_key: InitToServerKey + # server will receive message from agent using this common queue + init_from_server: InitFromServer + # agent boot up default common exchange routing key + init_from_server_key: InitFromServerKey + # initialization register time out (unit ms) default is 5 min + init_ttl: "3000000" + # Octopus Exchange Name == server comunicate with agent + octopus_exchange: OctopusExchange + # Octopus Message To Server == all agent send info to server queue and topic + octopus_to_server: OctopusToServer + executor: + name: executor-functions + status: + name: octopus-agent + healthy: + type: cron + cron: 10 */1 * * * ? * + start-delay: 30 + metric: + pinch: 20 + +oss: + # 这里只是因为需要一个层级,不一定下面的都是oracle + oracle: + seoul1: + namespace: cnk8d6fazu16 + region: ap-seoul-1 + key: aed62d24d85e2da809ce02bf272420ba4ed74820 + secret: rQdEcn69K049+JkA1IGoQmC1k8zma8zfWvZvVS0h144= + capacity: 10737418240 + seoul2: + namespace: cncvl8ro2rbf + region: ap-seoul-1 + key: 9e413c6e66269bc65d7ec951d93ba9c6a9781f6e + secret: dkXD7PysjrhsTKfNIbKupUmtxdfOvYCyLXf0MXa4hnU= + capacity: 10737418240 + tokyo1: + namespace: nrjcs6lwr9vy + region: ap-tokyo-1 + key: 0584c323d6c8d24cc2fc8c2d716a4ea35bb99ae6 + secret: +xicO9obeqzC5a/WY1rXvl5pMWSWbVIpMt3Qv691NtU= + capacity: 10737418240 + phoenix1: + namespace: axqr6x6t48wm + region: us-phoenix-1 + key: e87a121f1548b244c7bd649a1f0ca35195d46cf2 + secret: uT+NIgJiKPjSaPT8EVUw3xbLSCv/CFMFuebVauznafk= + capacity: 10737418240 + london1: + namespace: lrmzslyt8jzs + region: uk-london-1 + key: 57671886f9f1bcc5ac7235b5a0e6123f5ca271b3 + secret: ukWae6TXjID2Wqxh+7mAPAf4busZPGzwAh/WDKZ5MOQ= + capacity: 10737418240 + + + From e6c71612aac59c0827c954ea9ba6e6396bb353a1 Mon Sep 17 00:00:00 2001 From: zeaslity Date: Wed, 14 Jun 2023 11:41:01 +0800 Subject: [PATCH 23/45] [Octopus] modify project to SpringBoot version ok --- server/src/main/resources/application.yml | 16 ++++++++-------- .../{bootstrap.yml => bootstrap.yml-back} | 0 .../source/octopus/run-enviroment-compose.yaml | 4 ++-- .../wdd/source/octopus/simple-middleware.yaml | 17 +++++++++++++++++ 4 files changed, 27 insertions(+), 10 deletions(-) rename server/src/main/resources/{bootstrap.yml => bootstrap.yml-back} (100%) diff --git a/server/src/main/resources/application.yml b/server/src/main/resources/application.yml index ce2cee7..e1c8ba9 100644 --- a/server/src/main/resources/application.yml +++ b/server/src/main/resources/application.yml @@ -6,10 +6,10 @@ spring: allow-circular-references: true allow-bean-definition-overriding: true rabbitmq: - host: 150.230.198.103 + host: 42.192.52.227 port: 20672 username: boge - password: boge14@Level5 + password: boge8tingH virtual-host: / listener: simple: @@ -21,10 +21,10 @@ spring: max-interval: 65000 initial-interval: 65000 redis: - host: 146.56.147.12 + host: 42.192.52.227 port: 21370 database: 0 - password: boge14@Level5 + password: boge8tingH # cluster: # nodes: # - 43.154.83.213:21370 @@ -34,7 +34,7 @@ spring: # - 43.154.83.213:21374 # - 43.154.83.213:21375 # # 获取失败 最大重定向次数 - # max-redirects: 3 + # max-redirects: 3 # timeout: 50000 #如果用以前的jedis,可以把下面的lettuce换成jedis即可 lettuce: @@ -50,9 +50,9 @@ spring: time-between-eviction-runs: 50000 datasource: driver-class-name: com.mysql.cj.jdbc.Driver - url: jdbc:mysql://140.238.63.37:21306/wdd_server?autoReconnect=true&useSSL=false&useUnicode=true&characterEncoding=UTF-8&serverTimezone=GMT%2B8 - username: root - password: boge14@Level5 + url: jdbc:mysql://42.192.52.227:21306/octopus_server?autoReconnect=true&useSSL=false&useUnicode=true&characterEncoding=UTF-8&serverTimezone=GMT%2B8 + username: boge + password: boge8tingH type: com.zaxxer.hikari.HikariDataSource hikari: minimum-idle: 3 diff --git a/server/src/main/resources/bootstrap.yml b/server/src/main/resources/bootstrap.yml-back similarity index 100% rename from server/src/main/resources/bootstrap.yml rename to server/src/main/resources/bootstrap.yml-back diff --git a/source/src/main/java/io/wdd/source/octopus/run-enviroment-compose.yaml b/source/src/main/java/io/wdd/source/octopus/run-enviroment-compose.yaml index 107a94e..2bb7487 100644 --- a/source/src/main/java/io/wdd/source/octopus/run-enviroment-compose.yaml +++ b/source/src/main/java/io/wdd/source/octopus/run-enviroment-compose.yaml @@ -24,13 +24,13 @@ services: interval: 15s timeout: 5s retries: 6 - redis-master: + redis: # https://hub.docker.com/r/bitnami/redis image: bitnami/redis:6.2.11-debian-11-r1 networks: - app-tier environment: - - REDIS_PASSWORD=Superwmm.23 + - REDIS_PASSWORD=boge8tingH # Redis 6.0 features a new multi-threading model - REDIS_IO_THREADS=4 - REDIS_IO_THREADS_DO_READS=yes diff --git a/source/src/main/java/io/wdd/source/octopus/simple-middleware.yaml b/source/src/main/java/io/wdd/source/octopus/simple-middleware.yaml index 8934227..8b422b4 100644 --- a/source/src/main/java/io/wdd/source/octopus/simple-middleware.yaml +++ b/source/src/main/java/io/wdd/source/octopus/simple-middleware.yaml @@ -46,6 +46,23 @@ services: - RABBITMQ_MANAGEMENT_PORT_NUMBER=15672 volumes: - 'rabbitmq_data:/bitnami/rabbitmq/mnesia' + redis: + # https://hub.docker.com/r/bitnami/redis + image: bitnami/redis:6.2.11-debian-11-r1 + networks: + - app-tier + environment: + - REDIS_PASSWORD=boge8tingH + # Redis 6.0 features a new multi-threading model + - REDIS_IO_THREADS=4 + - REDIS_IO_THREADS_DO_READS=yes + - REDIS_PORT_NUMBER=6379 + - REDIS_REPLICATION_MODE=master + #- REDIS_DISABLE_COMMANDS=FLUSHDB,FLUSHALL + ports: + - '21370:6379' + volumes: + - 'redis_data:/bitnami/redis/data' volumes: mysql_data: From ec3d5bba1e569174c7547812232883ef6df5b9ee Mon Sep 17 00:00:00 2001 From: zeaslity Date: Thu, 15 Jun 2023 10:37:45 +0800 Subject: [PATCH 24/45] [Exec] modify sync and async execution function --- .../wdd/func/xray/service/XrayCallAgent.java | 6 +- .../rpc/agent/OctopusAgentServiceImpl.java | 4 +- .../rpc/controller/ExecutionController.java | 189 +++---- .../wdd/rpc/controller/StatusController.java | 8 +- .../service/AsyncExecutionService.java | 65 +-- .../service/AsyncExecutionServiceImpl.java | 466 +++++++----------- .../execute/service/SyncExecutionService.java | 65 ++- .../service/SyncExecutionServiceImpl.java | 448 ++++++++++------- ...atus.java => AgentStatusCacheService.java} | 2 +- .../message/handler/AsyncWaitOMResult.java | 14 +- .../scheduler/job/AgentStatusMonitorJob.java | 6 +- .../script/AgentApplyScheduledScript.java | 6 +- .../status/AgentRuntimeMetricStatus.java | 2 +- ...Status.java => CheckAgentAliveStatus.java} | 17 +- .../io/wdd/rpc/status/beans/AgentStatus.java | 8 +- .../beans/{CPUInfo.java => CPUStatus.java} | 4 +- .../beans/{DiskInfo.java => DiskStatus.java} | 2 +- .../{MemoryInfo.java => MemoryStatus.java} | 2 +- .../{NetworkInfo.java => NetworkStatus.java} | 2 +- .../io/wdd/server/ServerApplicationTests.java | 6 +- 20 files changed, 668 insertions(+), 654 deletions(-) rename server/src/main/java/io/wdd/rpc/init/{ServerCacheAgentStatus.java => AgentStatusCacheService.java} (99%) rename server/src/main/java/io/wdd/rpc/scheduler/service/status/{MonitorAllAgentStatus.java => CheckAgentAliveStatus.java} (88%) rename server/src/main/java/io/wdd/rpc/status/beans/{CPUInfo.java => CPUStatus.java} (97%) rename server/src/main/java/io/wdd/rpc/status/beans/{DiskInfo.java => DiskStatus.java} (96%) rename server/src/main/java/io/wdd/rpc/status/beans/{MemoryInfo.java => MemoryStatus.java} (94%) rename server/src/main/java/io/wdd/rpc/status/beans/{NetworkInfo.java => NetworkStatus.java} (96%) diff --git a/server/src/main/java/io/wdd/func/xray/service/XrayCallAgent.java b/server/src/main/java/io/wdd/func/xray/service/XrayCallAgent.java index 672c22f..90fece5 100644 --- a/server/src/main/java/io/wdd/func/xray/service/XrayCallAgent.java +++ b/server/src/main/java/io/wdd/func/xray/service/XrayCallAgent.java @@ -3,7 +3,7 @@ package io.wdd.func.xray.service; import io.wdd.common.utils.TimeUtils; import io.wdd.func.oss.config.OctopusObjectSummary; import io.wdd.func.xray.beans.node.ProxyNode; -import io.wdd.rpc.execute.service.AsyncExecutionService; +import io.wdd.rpc.execute.service.SyncExecutionService; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; import org.springframework.stereotype.Service; @@ -83,7 +83,7 @@ public class XrayCallAgent { } @Resource - AsyncExecutionService executionService; + SyncExecutionService executionService; /** * 为代理链的每一个节点 构建Xray配置更新命令,然后发送至对应的Agent中 @@ -131,7 +131,7 @@ public class XrayCallAgent { ); // 向Agent发送命令,执行更新操作! - String resultKey = executionService.SendCommandToAgent( + String resultKey = executionService.SyncSendCommandToAgent( proxyNode.getAgentTopicName(), updateCommandType, null, diff --git a/server/src/main/java/io/wdd/rpc/agent/OctopusAgentServiceImpl.java b/server/src/main/java/io/wdd/rpc/agent/OctopusAgentServiceImpl.java index 59c0f25..beed801 100644 --- a/server/src/main/java/io/wdd/rpc/agent/OctopusAgentServiceImpl.java +++ b/server/src/main/java/io/wdd/rpc/agent/OctopusAgentServiceImpl.java @@ -26,8 +26,8 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import static io.wdd.rpc.init.ServerCacheAgentStatus.ALL_AGENT_TOPIC_NAME_SET; -import static io.wdd.rpc.init.ServerCacheAgentStatus.ALL_HEALTHY_AGENT_TOPIC_NAME_LIST; +import static io.wdd.rpc.init.AgentStatusCacheService.ALL_AGENT_TOPIC_NAME_SET; +import static io.wdd.rpc.init.AgentStatusCacheService.ALL_HEALTHY_AGENT_TOPIC_NAME_LIST; import static io.wdd.rpc.message.handler.OMessageHandlerServer.AGENT_LATEST_VERSION; import static io.wdd.rpc.message.handler.OMessageHandlerServer.OCTOPUS_MESSAGE_FROM_AGENT; diff --git a/server/src/main/java/io/wdd/rpc/controller/ExecutionController.java b/server/src/main/java/io/wdd/rpc/controller/ExecutionController.java index 71e00c0..7d29abf 100644 --- a/server/src/main/java/io/wdd/rpc/controller/ExecutionController.java +++ b/server/src/main/java/io/wdd/rpc/controller/ExecutionController.java @@ -15,11 +15,12 @@ import org.springframework.web.bind.annotation.RestController; import javax.annotation.Nullable; import javax.annotation.Resource; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import static io.wdd.rpc.execute.result.RedisStreamReaderConfig.AGENT_STATUS_REDIS_STREAM_LISTENER_CONTAINER; -import static io.wdd.rpc.init.ServerCacheAgentStatus.ALL_AGENT_TOPIC_NAME_LIST; -import static io.wdd.rpc.init.ServerCacheAgentStatus.ALL_HEALTHY_AGENT_TOPIC_NAME_LIST; +import static io.wdd.rpc.init.AgentStatusCacheService.ALL_AGENT_TOPIC_NAME_LIST; +import static io.wdd.rpc.init.AgentStatusCacheService.ALL_HEALTHY_AGENT_TOPIC_NAME_LIST; @RestController @RequestMapping("/octopus/server/executor") @@ -27,11 +28,11 @@ import static io.wdd.rpc.init.ServerCacheAgentStatus.ALL_HEALTHY_AGENT_TOPIC_NAM public class ExecutionController { @Resource - AsyncExecutionService asyncExecutionService; + SyncExecutionService syncExecutionService; @Resource BuildStreamReader buildStreamReader; @Resource - SyncExecutionService syncExecutionService; + AsyncExecutionService asyncExecutionService; @PostMapping("/command/one") @ApiOperation("[命令] [异步]- 单台主机") @@ -44,8 +45,8 @@ public class ExecutionController { @ApiParam(name = "isDurationTask", value = "是否是持久化任务") @RequestParam(value = "isDurationTask", defaultValue = "false", required = false) boolean isDurationTask ) { - String streamKey = asyncExecutionService - .SendCommandToAgent( + ArrayList streamKeyList = asyncExecutionService + .AsyncSendCommandToAgentComplete( topicName, type, commandList, @@ -55,12 +56,13 @@ public class ExecutionController { isDurationTask ); - return R.ok(streamKey); + + return R.ok(streamKeyList.toString()); } @PostMapping("/command/batch") @ApiOperation("[命令] [异步] - 批量主机") - public R> patchCommandToAgentList( + public R>> patchCommandToAgentList( @RequestParam(value = "topicNameList") @ApiParam(name = "topicNameList", value = "目标机器列表") List topicNameList, @RequestParam(value = "commandList", required = false) @@ -71,19 +73,20 @@ public class ExecutionController { @ApiParam(name = "isDurationTask", value = "是否是持久化任务") @RequestParam(value = "isDurationTask", defaultValue = "false", required = false) boolean isDurationTask ) { - return R.ok(asyncExecutionService.SendCommandToAgentComplete( + List> arrayListList = asyncExecutionService.AsyncSendCommandToAgentComplete( topicNameList, type, commandList, completeCommandList, isDurationTask - )); + ); + return R.ok(arrayListList); } @PostMapping("/command/all") @ApiOperation("[命令] [异步] - 所有的主机") - public R> patchCommandToAllAgent( + public R>> patchCommandToAllAgent( @RequestParam(value = "commandList", required = false) @ApiParam(name = "commandList", value = "命令行") @Nullable List commandList, @RequestParam(value = "completeCommandList", required = false) @@ -92,7 +95,7 @@ public class ExecutionController { @ApiParam(name = "isDurationTask", value = "是否是持久化任务") @RequestParam(value = "isDurationTask", defaultValue = "false", required = false) boolean isDurationTask ) { - return R.ok(asyncExecutionService.SendCommandToAgentComplete( + return R.ok(asyncExecutionService.AsyncSendCommandToAgentComplete( ALL_AGENT_TOPIC_NAME_LIST, type, commandList, @@ -103,7 +106,7 @@ public class ExecutionController { @PostMapping("/command/healthy") @ApiOperation("[命令] [异步] - 健康的主机") - public R> patchCommandToHealthyAgent( + public R>> patchCommandToHealthyAgent( @RequestParam(value = "commandList", required = false) @ApiParam(name = "commandList", value = "命令行") @Nullable List commandList, @RequestParam(value = "completeCommandList", required = false) @@ -112,7 +115,7 @@ public class ExecutionController { @ApiParam(name = "isDurationTask", value = "是否是持久化任务") @RequestParam(value = "isDurationTask", defaultValue = "false", required = false) boolean isDurationTask ) { - return R.ok(asyncExecutionService.SendCommandToAgentComplete( + return R.ok(asyncExecutionService.AsyncSendCommandToAgentComplete( ALL_HEALTHY_AGENT_TOPIC_NAME_LIST, type, commandList, @@ -133,18 +136,18 @@ public class ExecutionController { ) { return R.ok( - syncExecutionService.SyncSendCommandToAgent( + Collections.singletonList(syncExecutionService.SyncSendCommandToAgentComplete( topicName, type, commandList, completeCommandList - ) + )) ); } @PostMapping("/command/sync/batch") @ApiOperation("[命令] [同步] - 批量-等待命令结果") - public R>> SyncPatchCommandToAgentBatch( + public R> SyncPatchCommandToAgentBatch( @RequestParam(value = "topicNameList") @ApiParam(name = "topicNameList", value = "目标机器列表") List topicNameList, @RequestParam(value = "commandList", required = false) @@ -168,7 +171,7 @@ public class ExecutionController { @PostMapping("/command/sync/all") @ApiOperation("[命令] [同步] - 全部-同步等待命令结果") - public R>> SyncPatchCommandToAgentAll( + public R> SyncPatchCommandToAgentAll( @RequestParam(value = "commandList", required = false) @ApiParam(name = "commandList", value = "命令行") @Nullable List commandList, @RequestParam(value = "completeCommandList", required = false) @@ -206,81 +209,81 @@ public class ExecutionController { // auth required - @PostMapping("/function/update") - @ApiOperation("升级") - public R> AgentUpdate( - @RequestParam(value = "topicNameList") - @ApiParam(name = "topicNameList", value = "目标机器列表") List topicNameList - ) { - - return R.ok( - asyncExecutionService - .SendCommandToAgent( - topicNameList, - "AgentUpdate", - null, - false, - null, - true - )); - } - - @PostMapping("/function/reboot") - @ApiOperation("重启") - public R> AgentReboot( - @RequestParam(value = "topicNameList") - @ApiParam(name = "topicNameList", value = "目标机器列表") List topicNameList - ) { - - return R.ok( - asyncExecutionService - .SendCommandToAgent( - topicNameList, - "AgentReboot", - null, - false, - null, - true - )); - } - - @PostMapping("/function/shutdown") - @ApiOperation("关闭") - public R> AgentShutdown( - @RequestParam(value = "topicNameList") - @ApiParam(name = "topicNameList", value = "目标机器列表") List topicNameList - ) { - - return R.ok( - asyncExecutionService - .SendCommandToAgent( - topicNameList, - "AgentShutdown", - null, - false, - null, - true - )); - } - - @PostMapping("/function/bootUp") - @ApiOperation("重新部署") - public R> AgentBootUp( - @RequestParam(value = "topicNameList") - @ApiParam(name = "topicNameList", value = "目标机器列表") List topicNameList - ) { - - return R.ok( - asyncExecutionService - .SendCommandToAgent( - topicNameList, - "AgentBootUp", - null, - false, - null, - true - )); - } +// @PostMapping("/function/update") +// @ApiOperation("升级") +// public R> AgentUpdate( +// @RequestParam(value = "topicNameList") +// @ApiParam(name = "topicNameList", value = "目标机器列表") List topicNameList +// ) { +// +// return R.ok( +// syncExecutionService +// .SyncSendCommandToAgent( +// topicNameList, +// "AgentUpdate", +// null, +// false, +// null, +// true +// )); +// } +// +// @PostMapping("/function/reboot") +// @ApiOperation("重启") +// public R> AgentReboot( +// @RequestParam(value = "topicNameList") +// @ApiParam(name = "topicNameList", value = "目标机器列表") List topicNameList +// ) { +// +// return R.ok( +// asyncExecutionService +// .SyncSendCommandToAgent( +// topicNameList, +// "AgentReboot", +// null, +// false, +// null, +// true +// )); +// } +// +// @PostMapping("/function/shutdown") +// @ApiOperation("关闭") +// public R> AgentShutdown( +// @RequestParam(value = "topicNameList") +// @ApiParam(name = "topicNameList", value = "目标机器列表") List topicNameList +// ) { +// +// return R.ok( +// syncExecutionService +// .SyncSendCommandToAgent( +// topicNameList, +// "AgentShutdown", +// null, +// false, +// null, +// true +// )); +// } +// +// @PostMapping("/function/bootUp") +// @ApiOperation("重新部署") +// public R> AgentBootUp( +// @RequestParam(value = "topicNameList") +// @ApiParam(name = "topicNameList", value = "目标机器列表") List topicNameList +// ) { +// +// return R.ok( +// asyncExecutionService +// .SyncSendCommandToAgent( +// topicNameList, +// "AgentBootUp", +// null, +// false, +// null, +// true +// )); +// } } diff --git a/server/src/main/java/io/wdd/rpc/controller/StatusController.java b/server/src/main/java/io/wdd/rpc/controller/StatusController.java index d4190dc..cffe3df 100644 --- a/server/src/main/java/io/wdd/rpc/controller/StatusController.java +++ b/server/src/main/java/io/wdd/rpc/controller/StatusController.java @@ -4,7 +4,7 @@ package io.wdd.rpc.controller; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; import io.wdd.common.response.R; -import io.wdd.rpc.init.ServerCacheAgentStatus; +import io.wdd.rpc.init.AgentStatusCacheService; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.RequestMapping; @@ -14,7 +14,7 @@ import javax.annotation.Resource; import java.util.List; import java.util.Map; -import static io.wdd.rpc.init.ServerCacheAgentStatus.*; +import static io.wdd.rpc.init.AgentStatusCacheService.*; @RestController @@ -23,7 +23,7 @@ import static io.wdd.rpc.init.ServerCacheAgentStatus.*; public class StatusController { @Resource - ServerCacheAgentStatus serverCacheAgentStatus; + AgentStatusCacheService agentStatusCacheService; @ApiOperation("[ Agent-状态 ] Map") @GetMapping("/agent/status") @@ -76,7 +76,7 @@ public class StatusController { public R>> ManualUpdateAgentStatus() { // 手动调用更新 - serverCacheAgentStatus.updateAgentStatusMapCache(); + agentStatusCacheService.updateAgentStatusMapCache(); return R.ok(STATUS_AGENT_LIST_MAP); } diff --git a/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionService.java b/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionService.java index 5c831cc..48dbf4c 100644 --- a/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionService.java +++ b/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionService.java @@ -1,20 +1,23 @@ package io.wdd.rpc.execute.service; -import io.wdd.rpc.message.OctopusMessage; - +import java.util.ArrayList; import java.util.HashMap; import java.util.List; - +/** + * 同步命令执行的核心类 + * 需要等待命令执行完毕,完后返回相应的结果 + */ public interface AsyncExecutionService { - String SendCommandToAgent(String agentTopicName, String command); + /** + * ------------------------ Sync Command Executor ------------------------------ + */ + ArrayList AsyncSendCommandToAgent(String agentTopicName, List commandList); - String SendCommandToAgent(String agentTopicName, List commandList); + ArrayList AsyncSendCommandToAgent(String agentTopicName, String type, List commandList); - String SendCommandToAgent(String agentTopicName, String type, List commandList); - - List SendCommandToAgent(List agentTopicNameList, String type, List commandList, boolean needResultReplay, String futureKey, boolean durationTask); + List> AsyncSendCommandToAgent(List agentTopicNameList, String type, List commandList); /** * 调用 单行命令脚本的 最底层函数 @@ -27,7 +30,7 @@ public interface AsyncExecutionService { * @param durationTask * @return */ - String SendCommandToAgent( + ArrayList AsyncSendCommandToAgent( String agentTopicName, String type, List commandList, @@ -41,14 +44,21 @@ public interface AsyncExecutionService { * ------------------------------------------------- */ - String SendCommandToAgentComplete(String agentTopicName, String type, List commandList, List> commandListComplete); + ArrayList AsyncSendCommandToAgentComplete(String agentTopicName, String type, List commandList, List> completeCommandList); + List> AsyncSendCommandToAgentComplete(List agentTopicNameList, String type, List commandList, List> completeCommandList, boolean isDurationTask); - List SendCommandToAgentComplete(List agentTopicNameList, String type, List commandList, List> commandListComplete, boolean isDurationTask); + /** + * 通常为 页面定时脚本任务调用 + * + * @param agentTopicNameList 目标Agent的TopicName列表 + * @param type 任务类型 + * @param completeCommandList 完整的类型 + * @return 每个Agent只返回一个 ResultKey(Script脚本的结果全部拼接到一起),全部的resultKey + */ + List> AsyncSendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList); - List SendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList); - /** * 通常为 页面定时脚本任务调用 * @@ -58,10 +68,10 @@ public interface AsyncExecutionService { * @param atnFutureKey 由于脚本任务为延迟调用,故需要提前生成未来的ResultKey * @return 每个Agent只返回一个 ResultKey(Script脚本的结果全部拼接到一起),全部的resultKey */ - List SendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList, HashMap atnFutureKey); + List> AsyncSendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList, HashMap atnFutureKey); - String SendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, String futureKey); + ArrayList AsyncSendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, String futureKey); /** * 调用 完整脚本的 最底层函数 @@ -74,30 +84,7 @@ public interface AsyncExecutionService { * @param durationTask * @return resultKey 本次操作在Redis中记录的结果Key */ - String SendCommandToAgent( - String agentTopicName, - String type, - List commandList, - List> commandListComplete, - boolean needResultReplay, - String futureKey, - boolean durationTask - ); - - - /** - * 同步命令调用的方法 - * - * @param agentTopicName - * @param type - * @param commandList - * @param commandListComplete - * @param needResultReplay - * @param futureKey - * @param durationTask - * @return - */ - OctopusMessage AsyncCallSendCommandToAgent( + ArrayList AsyncSendCommandToAgentComplete( String agentTopicName, String type, List commandList, diff --git a/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionServiceImpl.java b/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionServiceImpl.java index f2cc84a..a3686f6 100644 --- a/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionServiceImpl.java +++ b/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionServiceImpl.java @@ -1,108 +1,124 @@ package io.wdd.rpc.execute.service; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; -import io.wdd.common.utils.TimeUtils; -import io.wdd.rpc.execute.ExecutionMessage; -import io.wdd.rpc.execute.config.ExecutionLog; import io.wdd.rpc.message.OctopusMessage; import io.wdd.rpc.message.OctopusMessageType; -import io.wdd.rpc.message.sender.OMessageToAgentSender; +import io.wdd.rpc.message.handler.AsyncWaitOMResult; +import io.wdd.rpc.message.handler.OMReplayContend; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; -import org.springframework.data.redis.core.RedisTemplate; import org.springframework.stereotype.Service; import javax.annotation.Resource; import java.time.LocalDateTime; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import static io.wdd.rpc.init.ServerCacheAgentStatus.ALL_AGENT_TOPIC_NAME_SET; - @Service @Slf4j public class AsyncExecutionServiceImpl implements AsyncExecutionService { - private static final String MANUAL_COMMAND_TYPE = "manual-command"; + private static final boolean COMMAND_EXEC_NEED_REPLAY = true; + + private static final OctopusMessageType CurrentAppOctopusMessageType = OctopusMessageType.EXECUTOR; @Resource - OMessageToAgentSender oMessageToAgentSender; + AsyncWaitOMResult asyncWaitOMResult; @Resource - ObjectMapper objectMapper; - @Resource - RedisTemplate redisTemplate; + SyncExecutionService asyncExecutionService; + + /** + * 一个命令执行的最长等待时间 + */ + int processMaxWaitSeconds = 10; @Override - public String SendCommandToAgent(String agentTopicName, String command) { - return this.SendCommandToAgent( - agentTopicName, - List.of(command) - ); - } + public ArrayList AsyncSendCommandToAgent(String agentTopicName, List commandList) { - @Override - public String SendCommandToAgent(String agentTopicName, List commandList) { - return this.SendCommandToAgent( + return this.AsyncSendCommandToAgentComplete( agentTopicName, - MANUAL_COMMAND_TYPE, - commandList - ); - } - - @Override - public String SendCommandToAgent(String agentTopicName, String type, List commandList) { - - return SendCommandToAgent( - agentTopicName, - type, - commandList, - false, null, - false - ); - - } - - @Override - public String SendCommandToAgent(String agentTopicName, String type, List commandList, boolean needResultReplay, String futureKey, boolean durationTask) { - - return this.SendCommandToAgent( - agentTopicName, - type, commandList, null, - needResultReplay, - futureKey, - durationTask - ); - } - - @Override - public String SendCommandToAgentComplete(String agentTopicName, String type, List commandList, List> commandListComplete) { - - return this.SendCommandToAgent( - agentTopicName, - type, - commandList, - commandListComplete, - false, + COMMAND_EXEC_NEED_REPLAY, null, false ); } @Override - public List SendCommandToAgentComplete(List agentTopicNameList, String type, List commandList, List> commandListComplete, boolean isDurationTask) { + public ArrayList AsyncSendCommandToAgent(String agentTopicName, String type, List commandList) { + + + return this.AsyncSendCommandToAgentComplete( + agentTopicName, + type, + commandList, + null, + COMMAND_EXEC_NEED_REPLAY, + null, + false + ); + } + + @Override + public List> AsyncSendCommandToAgent(List agentTopicNameList, String type, List commandList) { + return agentTopicNameList .stream() .map( - agentTopicName -> this.SendCommandToAgent( + agentTopicName -> this.AsyncSendCommandToAgentComplete( agentTopicName, type, commandList, - commandListComplete, - false, + null, + COMMAND_EXEC_NEED_REPLAY, + null, + false + ) + ) + .collect(Collectors.toList()); + } + + @Override + public ArrayList AsyncSendCommandToAgent(String agentTopicName, String type, List commandList, boolean needResultReplay, String futureKey, boolean durationTask) { + + return this.AsyncSendCommandToAgentComplete( + agentTopicName, + type, + commandList, + null, + COMMAND_EXEC_NEED_REPLAY, + futureKey, + false + ); + } + + @Override + public ArrayList AsyncSendCommandToAgentComplete(String agentTopicName, String type, List commandList, List> completeCommandList) { + return this.AsyncSendCommandToAgentComplete( + agentTopicName, + type, + commandList, + completeCommandList, + COMMAND_EXEC_NEED_REPLAY, + null, + false + ); + } + + @Override + public List> AsyncSendCommandToAgentComplete(List agentTopicNameList, String type, List commandList, List> completeCommandList, boolean isDurationTask) { + return agentTopicNameList + .stream() + .map( + agentTopicName -> this.AsyncSendCommandToAgentComplete( + agentTopicName, + type, + commandList, + completeCommandList, + COMMAND_EXEC_NEED_REPLAY, null, isDurationTask ) @@ -111,31 +127,60 @@ public class AsyncExecutionServiceImpl implements AsyncExecutionService { } @Override - public String SendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, String futureKey) { + public List> AsyncSendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList) { - return this.SendCommandToAgent( - agentTopicName, - type, - commandList, - commandListComplete, - false, - futureKey, - false - ); + return agentTopicNameList + .stream() + .map( + agentTopicName -> this.AsyncSendCommandToAgentComplete( + agentTopicName, + type, + null, + completeCommandList, + COMMAND_EXEC_NEED_REPLAY, + null, + false + ) + ) + .collect(Collectors.toList()); } @Override - public String SendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, boolean needResultReplay, String futureKey, boolean durationTask) { + public List> AsyncSendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList, HashMap atnFutureKey) { + return agentTopicNameList + .stream() + .map( + agentTopicName -> this.AsyncSendCommandToAgentComplete( + agentTopicName, + type, + null, + completeCommandList, + COMMAND_EXEC_NEED_REPLAY, + atnFutureKey.get(agentTopicName), + false + ) + ) + .collect(Collectors.toList()); + } - String resultKey = futureKey; - // 判定是否是 FutureKey - if (null == futureKey) { - resultKey = ExecutionMessage.GetResultKey(agentTopicName); - } + @Override + public ArrayList AsyncSendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, String futureKey) { + return this.AsyncSendCommandToAgentComplete( + agentTopicName, + type, + commandList, + commandListComplete, + COMMAND_EXEC_NEED_REPLAY, + futureKey, + false + ); + } - // 调用最底层的方法 - this.AsyncCallSendCommandToAgent( + @Override + public ArrayList AsyncSendCommandToAgentComplete(String agentTopicName, String type, List commandList, List> commandListComplete, boolean needResultReplay, String futureKey, boolean durationTask) { + + OctopusMessage octopusMessage = asyncExecutionService.AsyncCallSendCommandToAgent( agentTopicName, type, commandList, @@ -145,225 +190,66 @@ public class AsyncExecutionServiceImpl implements AsyncExecutionService { durationTask ); - return resultKey; - } + LocalDateTime initTime = octopusMessage.getInit_time(); - @Override - public OctopusMessage AsyncCallSendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, boolean needResultReplay, String futureKey, boolean durationTask) { + ArrayList result = new ArrayList<>(); - // 检查agentTopicName是否存在 - if (!ALL_AGENT_TOPIC_NAME_SET.contains(agentTopicName)) { - log.error( - "agentTopicName异常! 输入为 => {}", - agentTopicName + // 构造消息等待对象 + int commandCount = 1; + if (null != commandListComplete) { + commandCount = Math.max( + commandListComplete.size(), + 1 ); - return null; - //throw new MyRuntimeException("agentTopicName异常!" + agentTopicName); } - // 归一化type - if (StringUtils.isEmpty(type)) { - type = MANUAL_COMMAND_TYPE; - } - String resultKey = futureKey; - // 判定是否是 FutureKey - if (null == futureKey) { - resultKey = ExecutionMessage.GetResultKey(agentTopicName); - } - - // 构造 Execution Command对应的消息体 - ExecutionMessage executionMessage = this - .generateExecutionMessage( - type, - commandList, - resultKey, - commandListComplete, - needResultReplay, - durationTask - ); - OctopusMessage octopusMessage = this.generateOctopusMessage( - agentTopicName, - executionMessage + OMReplayContend omReplayContend = OMReplayContend.build( + commandCount, + CurrentAppOctopusMessageType, + initTime ); + CountDownLatch countDownLatch = omReplayContend.getCountDownLatch(); - // send the message - oMessageToAgentSender.send(octopusMessage); - - // set up the stream read group - String group = redisTemplate - .opsForStream() - .createGroup( - resultKey, - resultKey - ); - - log.debug( - "set consumer group [{}] for the stream key with => [ {} ]", - group, - resultKey - ); - - // change the redis stream listener container - // createStreamReader.registerStreamReader(COMMAND_RESULT_REDIS_STREAM_LISTENER_CONTAINER, resultKey); - - // construct the persistent Bean - /*ExecutionLog executionLog = buildPersistentLogBeanFromOctopusMessage( - octopusMessage, - executionMessage - );*/ - // send resultKey to ExecutionResultDaemonHandler - // 当批量执行,产生大量的resultKey的时候,会出现线程爆炸,导致所有的全部失效 - /*WAIT_EXECUTION_RESULT_LIST.put( - resultKey, - executionLog - );*/ - - // help gc - executionMessage = null; - - return octopusMessage; - } - - private OctopusMessage generateOctopusMessage(String agentTopicName, ExecutionMessage executionMessage) { + // 开始等待结果 + asyncWaitOMResult.waitFor(omReplayContend); + // 监听结果 try { + boolean await = countDownLatch.await( + processMaxWaitSeconds, + TimeUnit.SECONDS + ); - return OctopusMessage - .builder() - .type(OctopusMessageType.EXECUTOR) - .init_time(TimeUtils.currentFormatTime()) - .uuid(agentTopicName) - .content( - objectMapper.writeValueAsString(executionMessage) + } catch (InterruptedException e) { + throw new RuntimeException(e); + } finally { + + // 等待所有的结果返回 + // 停止等待结果 + asyncWaitOMResult.stopWaiting(omReplayContend); + + // 解析结果 + omReplayContend + .getReplayOMList() + .stream() + .map( + om -> { + log.debug( + "replay message is => {}", + om + ); + + return (ArrayList) om.getResult(); + } ) - .build(); + .forEachOrdered( + singleResult -> result.addAll(singleResult) + ); - } catch (JsonProcessingException e) { - throw new RuntimeException(e); } + // 返回 + return result; } - - private ExecutionLog buildPersistentLogBeanFromOctopusMessage(OctopusMessage octopusMessage, ExecutionMessage executionMessage) { - ExecutionLog executionLog = new ExecutionLog(); - executionLog.setAgentTopicName(octopusMessage.getUuid()); - executionLog.setResultKey((String) octopusMessage.getContent()); - executionLog.setCommandList(String.valueOf(executionMessage.getSingleLineCommand())); - executionLog.setType(executionMessage.getType()); - executionLog.setResultKey(executionMessage.getResultKey()); - return executionLog; - } - - - @Override - public List SendCommandToAgent(List agentagentTopicNameList, String type, List commandList, boolean needResultReplay, String futureKey, boolean durationTask) { - - return agentagentTopicNameList - .stream() - .map( - agentTopicName -> this - .SendCommandToAgent - ( - agentTopicName, - type, - commandList, - null, - needResultReplay, - futureKey, - durationTask - ) - ) - .collect(Collectors.toList()); - } - - /** - * @param agentTopicNameList 目标Agent的TopicName列表 - * @param type 任务类型 - * @param completeCommandList 完整的类型 - * @return - */ - @Override - public List SendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList) { - - return agentTopicNameList - .stream() - .map( - agentTopicName -> this.SendCommandToAgentComplete( - agentTopicName, - type, - null, - completeCommandList - ) - ) - .collect(Collectors.toList()); - - } - - @Override - public List SendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList, HashMap atnFutureKey) { - - return agentTopicNameList - .stream() - .map( - agentTopicName -> this.SendCommandToAgent( - agentTopicName, - type, - null, - completeCommandList, - atnFutureKey.getOrDefault( - agentTopicName, - null - ) - ) - ) - .collect(Collectors.toList()); - } - - - @Deprecated - private OctopusMessage generateOctopusMessage(String agentTopicName, String resultKey, String type, List commandList, List> commandListComplete) { - - - ExecutionMessage executionMessage = this.generateExecutionMessage( - type, - commandList, - resultKey, - commandListComplete, - false, - false - ); - - String executionMessageString; - - try { - executionMessageString = objectMapper.writeValueAsString(executionMessage); - - } catch (JsonProcessingException e) { - throw new RuntimeException(e); - } - - return OctopusMessage - .builder() - .type(OctopusMessageType.EXECUTOR) - .init_time(LocalDateTime.now()) - .content(executionMessageString) - .uuid(agentTopicName) - .build(); - } - - private ExecutionMessage generateExecutionMessage(String type, List commandList, String resultKey, List> commandListComplete, boolean needResultReplay, boolean durationTask) { - - return ExecutionMessage - .builder() - .resultKey(resultKey) - .type(type) - .singleLineCommand(commandList) - .multiLineCommand(commandListComplete) - .needResultReplay(needResultReplay) - .durationTask(durationTask) - .build(); - } - - } diff --git a/server/src/main/java/io/wdd/rpc/execute/service/SyncExecutionService.java b/server/src/main/java/io/wdd/rpc/execute/service/SyncExecutionService.java index ed1548d..fc52137 100644 --- a/server/src/main/java/io/wdd/rpc/execute/service/SyncExecutionService.java +++ b/server/src/main/java/io/wdd/rpc/execute/service/SyncExecutionService.java @@ -1,23 +1,20 @@ package io.wdd.rpc.execute.service; -import java.util.ArrayList; +import io.wdd.rpc.message.OctopusMessage; + import java.util.HashMap; import java.util.List; -/** - * 同步命令执行的核心类 - * 需要等待命令执行完毕,完后返回相应的结果 - */ + public interface SyncExecutionService { - /** - * ------------------------ Sync Command Executor ------------------------------ - */ - ArrayList SyncSendCommandToAgent(String agentTopicName, List commandList); + String SyncSendCommandToAgent(String agentTopicName, String command); - ArrayList SyncSendCommandToAgent(String agentTopicName, String type, List commandList); + String SyncSendCommandToAgent(String agentTopicName, List commandList); - List> SyncSendCommandToAgent(List agentTopicNameList, String type, List commandList); + String SyncSendCommandToAgent(String agentTopicName, String type, List commandList); + + List SyncSendCommandToAgent(List agentTopicNameList, String type, List commandList, boolean needResultReplay, String futureKey, boolean durationTask); /** * 调用 单行命令脚本的 最底层函数 @@ -30,7 +27,7 @@ public interface SyncExecutionService { * @param durationTask * @return */ - ArrayList SyncSendCommandToAgent( + String SyncSendCommandToAgent( String agentTopicName, String type, List commandList, @@ -44,21 +41,14 @@ public interface SyncExecutionService { * ------------------------------------------------- */ - ArrayList SyncSendCommandToAgent(String agentTopicName, String type, List commandList, List> completeCommandList); + String SyncSendCommandToAgentComplete(String agentTopicName, String type, List commandList, List> commandListComplete); - List> SyncSendCommandToAgentComplete(List agentTopicNameList, String type, List commandList, List> completeCommandList, boolean isDurationTask); - /** - * 通常为 页面定时脚本任务调用 - * - * @param agentTopicNameList 目标Agent的TopicName列表 - * @param type 任务类型 - * @param completeCommandList 完整的类型 - * @return 每个Agent只返回一个 ResultKey(Script脚本的结果全部拼接到一起),全部的resultKey - */ - List> SyncSendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList); + List SyncSendCommandToAgentComplete(List agentTopicNameList, String type, List commandList, List> commandListComplete, boolean isDurationTask); + List SyncSendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList); + /** * 通常为 页面定时脚本任务调用 * @@ -68,10 +58,10 @@ public interface SyncExecutionService { * @param atnFutureKey 由于脚本任务为延迟调用,故需要提前生成未来的ResultKey * @return 每个Agent只返回一个 ResultKey(Script脚本的结果全部拼接到一起),全部的resultKey */ - List> SyncSendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList, HashMap atnFutureKey); + List SyncSendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList, HashMap atnFutureKey); - ArrayList SyncSendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, String futureKey); + String SyncSendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, String futureKey); /** * 调用 完整脚本的 最底层函数 @@ -84,7 +74,30 @@ public interface SyncExecutionService { * @param durationTask * @return resultKey 本次操作在Redis中记录的结果Key */ - ArrayList SyncSendCommandToAgent( + String SyncSendCommandToAgent( + String agentTopicName, + String type, + List commandList, + List> commandListComplete, + boolean needResultReplay, + String futureKey, + boolean durationTask + ); + + + /** + * 同步命令调用的方法 + * + * @param agentTopicName + * @param type + * @param commandList + * @param commandListComplete + * @param needResultReplay + * @param futureKey + * @param durationTask + * @return + */ + OctopusMessage AsyncCallSendCommandToAgent( String agentTopicName, String type, List commandList, diff --git a/server/src/main/java/io/wdd/rpc/execute/service/SyncExecutionServiceImpl.java b/server/src/main/java/io/wdd/rpc/execute/service/SyncExecutionServiceImpl.java index 0ef3060..8b18c60 100644 --- a/server/src/main/java/io/wdd/rpc/execute/service/SyncExecutionServiceImpl.java +++ b/server/src/main/java/io/wdd/rpc/execute/service/SyncExecutionServiceImpl.java @@ -1,115 +1,100 @@ package io.wdd.rpc.execute.service; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import io.wdd.common.utils.TimeUtils; +import io.wdd.rpc.execute.ExecutionMessage; +import io.wdd.rpc.execute.config.ExecutionLog; import io.wdd.rpc.message.OctopusMessage; import io.wdd.rpc.message.OctopusMessageType; -import io.wdd.rpc.message.handler.AsyncWaitOMResult; -import io.wdd.rpc.message.handler.OMReplayContend; +import io.wdd.rpc.message.sender.OMessageToAgentSender; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import org.springframework.data.redis.core.RedisTemplate; import org.springframework.stereotype.Service; import javax.annotation.Resource; import java.time.LocalDateTime; -import java.util.ArrayList; import java.util.HashMap; import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import static io.wdd.rpc.init.AgentStatusCacheService.ALL_AGENT_TOPIC_NAME_SET; + + @Service @Slf4j public class SyncExecutionServiceImpl implements SyncExecutionService { - private static final boolean COMMAND_EXEC_NEED_REPLAY = true; - - private static final OctopusMessageType CurrentAppOctopusMessageType = OctopusMessageType.EXECUTOR; + private static final String MANUAL_COMMAND_TYPE = "manual-command"; @Resource - AsyncWaitOMResult asyncWaitOMResult; + OMessageToAgentSender oMessageToAgentSender; @Resource - AsyncExecutionService asyncExecutionService; - - /** - * 一个命令执行的最长等待时间 - */ - int processMaxWaitSeconds = 10; + ObjectMapper objectMapper; + @Resource + RedisTemplate redisTemplate; @Override - public ArrayList SyncSendCommandToAgent(String agentTopicName, List commandList) { - + public String SyncSendCommandToAgent(String agentTopicName, String command) { return this.SyncSendCommandToAgent( agentTopicName, - null, - commandList, - null, - COMMAND_EXEC_NEED_REPLAY, - null, - false + List.of(command) ); } @Override - public ArrayList SyncSendCommandToAgent(String agentTopicName, String type, List commandList) { + public String SyncSendCommandToAgent(String agentTopicName, List commandList) { + return this.SyncSendCommandToAgent( + agentTopicName, + MANUAL_COMMAND_TYPE, + commandList + ); + } + @Override + public String SyncSendCommandToAgent(String agentTopicName, String type, List commandList) { + + return SyncSendCommandToAgent( + agentTopicName, + type, + commandList, + false, + null, + false + ); + + } + + @Override + public String SyncSendCommandToAgent(String agentTopicName, String type, List commandList, boolean needResultReplay, String futureKey, boolean durationTask) { return this.SyncSendCommandToAgent( agentTopicName, type, commandList, null, - COMMAND_EXEC_NEED_REPLAY, - null, - false - ); - } - - @Override - public List> SyncSendCommandToAgent(List agentTopicNameList, String type, List commandList) { - - return agentTopicNameList - .stream() - .map( - agentTopicName -> this.SyncSendCommandToAgent( - agentTopicName, - type, - commandList, - null, - COMMAND_EXEC_NEED_REPLAY, - null, - false - ) - ) - .collect(Collectors.toList()); - } - - @Override - public ArrayList SyncSendCommandToAgent(String agentTopicName, String type, List commandList, boolean needResultReplay, String futureKey, boolean durationTask) { - - return this.SyncSendCommandToAgent( - agentTopicName, - type, - commandList, - null, - COMMAND_EXEC_NEED_REPLAY, + needResultReplay, futureKey, - false + durationTask ); } @Override - public ArrayList SyncSendCommandToAgent(String agentTopicName, String type, List commandList, List> completeCommandList) { + public String SyncSendCommandToAgentComplete(String agentTopicName, String type, List commandList, List> commandListComplete) { + return this.SyncSendCommandToAgent( agentTopicName, type, commandList, - completeCommandList, - COMMAND_EXEC_NEED_REPLAY, + commandListComplete, + false, null, false ); } @Override - public List> SyncSendCommandToAgentComplete(List agentTopicNameList, String type, List commandList, List> completeCommandList, boolean isDurationTask) { + public List SyncSendCommandToAgentComplete(List agentTopicNameList, String type, List commandList, List> commandListComplete, boolean isDurationTask) { return agentTopicNameList .stream() .map( @@ -117,8 +102,8 @@ public class SyncExecutionServiceImpl implements SyncExecutionService { agentTopicName, type, commandList, - completeCommandList, - COMMAND_EXEC_NEED_REPLAY, + commandListComplete, + false, null, isDurationTask ) @@ -127,60 +112,31 @@ public class SyncExecutionServiceImpl implements SyncExecutionService { } @Override - public List> SyncSendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList) { + public String SyncSendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, String futureKey) { - return agentTopicNameList - .stream() - .map( - agentTopicName -> this.SyncSendCommandToAgent( - agentTopicName, - type, - null, - completeCommandList, - COMMAND_EXEC_NEED_REPLAY, - null, - false - ) - ) - .collect(Collectors.toList()); - - } - - @Override - public List> SyncSendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList, HashMap atnFutureKey) { - return agentTopicNameList - .stream() - .map( - agentTopicName -> this.SyncSendCommandToAgent( - agentTopicName, - type, - null, - completeCommandList, - COMMAND_EXEC_NEED_REPLAY, - atnFutureKey.get(agentTopicName), - false - ) - ) - .collect(Collectors.toList()); - } - - @Override - public ArrayList SyncSendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, String futureKey) { return this.SyncSendCommandToAgent( agentTopicName, type, commandList, commandListComplete, - COMMAND_EXEC_NEED_REPLAY, + false, futureKey, false ); + } @Override - public ArrayList SyncSendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, boolean needResultReplay, String futureKey, boolean durationTask) { + public String SyncSendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, boolean needResultReplay, String futureKey, boolean durationTask) { - OctopusMessage octopusMessage = asyncExecutionService.AsyncCallSendCommandToAgent( + String resultKey = futureKey; + // 判定是否是 FutureKey + if (null == futureKey) { + resultKey = ExecutionMessage.GetResultKey(agentTopicName); + } + + // 调用最底层的方法 + this.AsyncCallSendCommandToAgent( agentTopicName, type, commandList, @@ -190,65 +146,225 @@ public class SyncExecutionServiceImpl implements SyncExecutionService { durationTask ); - LocalDateTime initTime = octopusMessage.getInit_time(); - - ArrayList result = new ArrayList<>(); - - // 构造消息等待对象 - int commandCount = 1; - if (null != commandListComplete) { - commandCount = Math.max( - commandListComplete.size(), - 1 - ); - } - - OMReplayContend omReplayContend = OMReplayContend.build( - commandCount, - CurrentAppOctopusMessageType, - initTime - ); - CountDownLatch countDownLatch = omReplayContend.getCountDownLatch(); - - // 开始等待结果 - asyncWaitOMResult.waitFor(omReplayContend); - - // 监听结果 - try { - boolean await = countDownLatch.await( - processMaxWaitSeconds, - TimeUnit.SECONDS - ); - - } catch (InterruptedException e) { - throw new RuntimeException(e); - } finally { - - // 等待所有的结果返回 - // 停止等待结果 - asyncWaitOMResult.stopWaiting(omReplayContend); - - // 解析结果 - omReplayContend - .getReplayOMList() - .stream() - .map( - om -> { - log.debug( - "replay message is => {}", - om - ); - - return (ArrayList) om.getResult(); - } - ) - .forEachOrdered( - singleResult -> result.addAll(singleResult) - ); - - } - - // 返回 - return result; + return resultKey; } + + @Override + public OctopusMessage AsyncCallSendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, boolean needResultReplay, String futureKey, boolean durationTask) { + + // 检查agentTopicName是否存在 + if (!ALL_AGENT_TOPIC_NAME_SET.contains(agentTopicName)) { + log.error( + "agentTopicName异常! 输入为 => {}", + agentTopicName + ); + return null; + //throw new MyRuntimeException("agentTopicName异常!" + agentTopicName); + } + + // 归一化type + if (StringUtils.isEmpty(type)) { + type = MANUAL_COMMAND_TYPE; + } + + String resultKey = futureKey; + // 判定是否是 FutureKey + if (null == futureKey) { + resultKey = ExecutionMessage.GetResultKey(agentTopicName); + } + + // 构造 Execution Command对应的消息体 + ExecutionMessage executionMessage = this + .generateExecutionMessage( + type, + commandList, + resultKey, + commandListComplete, + needResultReplay, + durationTask + ); + OctopusMessage octopusMessage = this.generateOctopusMessage( + agentTopicName, + executionMessage + ); + + // send the message + oMessageToAgentSender.send(octopusMessage); + + // set up the stream read group + String group = redisTemplate + .opsForStream() + .createGroup( + resultKey, + resultKey + ); + + log.debug( + "set consumer group [{}] for the stream key with => [ {} ]", + group, + resultKey + ); + + // change the redis stream listener container + // createStreamReader.registerStreamReader(COMMAND_RESULT_REDIS_STREAM_LISTENER_CONTAINER, resultKey); + + // construct the persistent Bean + /*ExecutionLog executionLog = buildPersistentLogBeanFromOctopusMessage( + octopusMessage, + executionMessage + );*/ + // send resultKey to ExecutionResultDaemonHandler + // 当批量执行,产生大量的resultKey的时候,会出现线程爆炸,导致所有的全部失效 + /*WAIT_EXECUTION_RESULT_LIST.put( + resultKey, + executionLog + );*/ + + // help gc + executionMessage = null; + + return octopusMessage; + } + + private OctopusMessage generateOctopusMessage(String agentTopicName, ExecutionMessage executionMessage) { + + try { + + return OctopusMessage + .builder() + .type(OctopusMessageType.EXECUTOR) + .init_time(TimeUtils.currentFormatTime()) + .uuid(agentTopicName) + .content( + objectMapper.writeValueAsString(executionMessage) + ) + .build(); + + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + + } + + private ExecutionLog buildPersistentLogBeanFromOctopusMessage(OctopusMessage octopusMessage, ExecutionMessage executionMessage) { + ExecutionLog executionLog = new ExecutionLog(); + executionLog.setAgentTopicName(octopusMessage.getUuid()); + executionLog.setResultKey((String) octopusMessage.getContent()); + executionLog.setCommandList(String.valueOf(executionMessage.getSingleLineCommand())); + executionLog.setType(executionMessage.getType()); + executionLog.setResultKey(executionMessage.getResultKey()); + return executionLog; + } + + + @Override + public List SyncSendCommandToAgent(List agentagentTopicNameList, String type, List commandList, boolean needResultReplay, String futureKey, boolean durationTask) { + + return agentagentTopicNameList + .stream() + .map( + agentTopicName -> this + .SyncSendCommandToAgent + ( + agentTopicName, + type, + commandList, + null, + needResultReplay, + futureKey, + durationTask + ) + ) + .collect(Collectors.toList()); + } + + /** + * @param agentTopicNameList 目标Agent的TopicName列表 + * @param type 任务类型 + * @param completeCommandList 完整的类型 + * @return + */ + @Override + public List SyncSendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList) { + + return agentTopicNameList + .stream() + .map( + agentTopicName -> this.SyncSendCommandToAgentComplete( + agentTopicName, + type, + null, + completeCommandList + ) + ) + .collect(Collectors.toList()); + + } + + @Override + public List SyncSendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList, HashMap atnFutureKey) { + + return agentTopicNameList + .stream() + .map( + agentTopicName -> this.SyncSendCommandToAgent( + agentTopicName, + type, + null, + completeCommandList, + atnFutureKey.getOrDefault( + agentTopicName, + null + ) + ) + ) + .collect(Collectors.toList()); + } + + + @Deprecated + private OctopusMessage generateOctopusMessage(String agentTopicName, String resultKey, String type, List commandList, List> commandListComplete) { + + + ExecutionMessage executionMessage = this.generateExecutionMessage( + type, + commandList, + resultKey, + commandListComplete, + false, + false + ); + + String executionMessageString; + + try { + executionMessageString = objectMapper.writeValueAsString(executionMessage); + + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + + return OctopusMessage + .builder() + .type(OctopusMessageType.EXECUTOR) + .init_time(LocalDateTime.now()) + .content(executionMessageString) + .uuid(agentTopicName) + .build(); + } + + private ExecutionMessage generateExecutionMessage(String type, List commandList, String resultKey, List> commandListComplete, boolean needResultReplay, boolean durationTask) { + + return ExecutionMessage + .builder() + .resultKey(resultKey) + .type(type) + .singleLineCommand(commandList) + .multiLineCommand(commandListComplete) + .needResultReplay(needResultReplay) + .durationTask(durationTask) + .build(); + } + + } diff --git a/server/src/main/java/io/wdd/rpc/init/ServerCacheAgentStatus.java b/server/src/main/java/io/wdd/rpc/init/AgentStatusCacheService.java similarity index 99% rename from server/src/main/java/io/wdd/rpc/init/ServerCacheAgentStatus.java rename to server/src/main/java/io/wdd/rpc/init/AgentStatusCacheService.java index fac1296..7cf3039 100644 --- a/server/src/main/java/io/wdd/rpc/init/ServerCacheAgentStatus.java +++ b/server/src/main/java/io/wdd/rpc/init/AgentStatusCacheService.java @@ -27,7 +27,7 @@ import static io.wdd.rpc.status.OctopusStatusMessage.ALL_AGENT_STATUS_REDIS_KEY; */ @Service @Slf4j -public class ServerCacheAgentStatus { +public class AgentStatusCacheService { /** * 存储所有的AgentTopicName的缓存 diff --git a/server/src/main/java/io/wdd/rpc/message/handler/AsyncWaitOMResult.java b/server/src/main/java/io/wdd/rpc/message/handler/AsyncWaitOMResult.java index 7f88542..9ac3d53 100644 --- a/server/src/main/java/io/wdd/rpc/message/handler/AsyncWaitOMResult.java +++ b/server/src/main/java/io/wdd/rpc/message/handler/AsyncWaitOMResult.java @@ -27,12 +27,12 @@ public class AsyncWaitOMResult { * KEY -> replayMatchKey * VALUE -> OMReplayContend - 包含countDownLatch 和 result */ - private static final HashMap REPLAY_WAITING_TARGET = new HashMap<>(); + private static final HashMap OM_REPLAY_WAITING_TARGET_MAP = new HashMap<>(); public void waitFor(OMReplayContend omReplayContend) { // 向 REPLAY_CACHE_MAP中写入 Key - REPLAY_WAITING_TARGET.put( + OM_REPLAY_WAITING_TARGET_MAP.put( omReplayContend.getReplayMatchKey(), omReplayContend ); @@ -44,7 +44,7 @@ public class AsyncWaitOMResult { public void stopWaiting(OMReplayContend omReplayContend) { // 在调用线程的countDownLunch结束之后,关闭 清除 REPLAY_CACHE_MAP 中的队列 - REPLAY_WAITING_TARGET.remove(omReplayContend.getReplayMatchKey()); + OM_REPLAY_WAITING_TARGET_MAP.remove(omReplayContend.getReplayMatchKey()); } @@ -86,16 +86,20 @@ public class AsyncWaitOMResult { replayOMessage.getType(), replayOMessage.getInit_time() ); - if (!REPLAY_WAITING_TARGET.containsKey(matchKey)) { + if (!OM_REPLAY_WAITING_TARGET_MAP.containsKey(matchKey)) { // 没有这个Key,说明等待结果已经超时了,直接丢弃,然后继续循环 // todo 错误的数据需要放置于某处 + log.debug( + "等待队列力没有该回复的结果key =>", + matchKey + ); continue; } // Map中包含有Key,那么放置进去 - OMReplayContend replayContend = REPLAY_WAITING_TARGET.get(matchKey); + OMReplayContend replayContend = OM_REPLAY_WAITING_TARGET_MAP.get(matchKey); replayContend .getReplayOMList() .add(replayOMessage); diff --git a/server/src/main/java/io/wdd/rpc/scheduler/job/AgentStatusMonitorJob.java b/server/src/main/java/io/wdd/rpc/scheduler/job/AgentStatusMonitorJob.java index efa60c1..c20a183 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/job/AgentStatusMonitorJob.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/job/AgentStatusMonitorJob.java @@ -1,7 +1,7 @@ package io.wdd.rpc.scheduler.job; import io.wdd.rpc.scheduler.config.QuartzLogOperator; -import io.wdd.rpc.scheduler.service.status.MonitorAllAgentStatus; +import io.wdd.rpc.scheduler.service.status.CheckAgentAliveStatus; import org.quartz.JobExecutionContext; import org.quartz.JobExecutionException; import org.springframework.scheduling.quartz.QuartzJobBean; @@ -11,7 +11,7 @@ import javax.annotation.Resource; public class AgentStatusMonitorJob extends QuartzJobBean { @Resource - MonitorAllAgentStatus monitorAllAgentStatus; + CheckAgentAliveStatus checkAgentAliveStatus; @Resource QuartzLogOperator quartzLogOperator; @@ -23,7 +23,7 @@ public class AgentStatusMonitorJob extends QuartzJobBean { //JobDataMap jobDataMap = jobExecutionContext.getJobDetail().getJobDataMap(); // actually execute the monitor service - monitorAllAgentStatus.go(); + checkAgentAliveStatus.go(); // log to somewhere quartzLogOperator.save(); diff --git a/server/src/main/java/io/wdd/rpc/scheduler/service/script/AgentApplyScheduledScript.java b/server/src/main/java/io/wdd/rpc/scheduler/service/script/AgentApplyScheduledScript.java index 3329193..48682bb 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/service/script/AgentApplyScheduledScript.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/service/script/AgentApplyScheduledScript.java @@ -1,7 +1,7 @@ package io.wdd.rpc.scheduler.service.script; -import io.wdd.rpc.execute.service.AsyncExecutionService; +import io.wdd.rpc.execute.service.SyncExecutionService; import io.wdd.rpc.scheduler.beans.ScriptSchedulerDTO; import io.wdd.rpc.scheduler.config.QuartzSchedulerUtils; import lombok.extern.slf4j.Slf4j; @@ -20,7 +20,7 @@ import java.util.List; public class AgentApplyScheduledScript { @Resource - AsyncExecutionService asyncExecutionService; + SyncExecutionService asyncExecutionService; @Resource QuartzSchedulerUtils quartzSchedulerUtils; @@ -46,7 +46,7 @@ public class AgentApplyScheduledScript { // 发送命令到Agent中 List resultKeyList = asyncExecutionService - .SendCommandToAgentComplete( + .SyncSendCommandToAgentComplete( targetMachineList, scriptType, completeCommandList, diff --git a/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentRuntimeMetricStatus.java b/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentRuntimeMetricStatus.java index 1ca87c6..40e13e0 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentRuntimeMetricStatus.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentRuntimeMetricStatus.java @@ -10,7 +10,7 @@ import javax.annotation.Resource; import java.util.List; import java.util.stream.Collectors; -import static io.wdd.rpc.init.ServerCacheAgentStatus.ALL_HEALTHY_AGENT_TOPIC_NAME_LIST; +import static io.wdd.rpc.init.AgentStatusCacheService.ALL_HEALTHY_AGENT_TOPIC_NAME_LIST; import static io.wdd.rpc.status.OctopusStatusMessage.METRIC_STATUS_MESSAGE_TYPE; /** diff --git a/server/src/main/java/io/wdd/rpc/scheduler/service/status/MonitorAllAgentStatus.java b/server/src/main/java/io/wdd/rpc/scheduler/service/status/CheckAgentAliveStatus.java similarity index 88% rename from server/src/main/java/io/wdd/rpc/scheduler/service/status/MonitorAllAgentStatus.java rename to server/src/main/java/io/wdd/rpc/scheduler/service/status/CheckAgentAliveStatus.java index 69385f6..ebe797e 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/service/status/MonitorAllAgentStatus.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/service/status/CheckAgentAliveStatus.java @@ -1,7 +1,7 @@ package io.wdd.rpc.scheduler.service.status; import io.wdd.common.utils.TimeUtils; -import io.wdd.rpc.init.ServerCacheAgentStatus; +import io.wdd.rpc.init.AgentStatusCacheService; import io.wdd.rpc.scheduler.service.BuildStatusScheduleTask; import io.wdd.rpc.status.OctopusStatusMessage; import lombok.extern.slf4j.Slf4j; @@ -13,10 +13,11 @@ import org.springframework.stereotype.Service; import javax.annotation.Resource; import java.util.HashMap; import java.util.List; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import static io.wdd.rpc.init.ServerCacheAgentStatus.ALL_AGENT_TOPIC_NAME_LIST; +import static io.wdd.rpc.init.AgentStatusCacheService.ALL_AGENT_TOPIC_NAME_LIST; import static io.wdd.rpc.status.OctopusStatusMessage.ALL_AGENT_STATUS_REDIS_KEY; import static io.wdd.rpc.status.OctopusStatusMessage.HEALTHY_STATUS_MESSAGE_TYPE; @@ -37,7 +38,7 @@ import static io.wdd.rpc.status.OctopusStatusMessage.HEALTHY_STATUS_MESSAGE_TYPE @Service @Slf4j @Lazy -public class MonitorAllAgentStatus { +public class CheckAgentAliveStatus { private static final int MAX_WAIT_AGENT_REPORT_STATUS_TIME = 5; @Resource @@ -46,7 +47,7 @@ public class MonitorAllAgentStatus { CollectAgentStatus collectAgentStatus; @Resource - ServerCacheAgentStatus serverCacheAgentStatus; + AgentStatusCacheService agentStatusCacheService; @Resource BuildStatusScheduleTask buildStatusScheduleTask; @@ -57,7 +58,7 @@ public class MonitorAllAgentStatus { try { // 1. 获取所有注册的Agent 手动更新 - serverCacheAgentStatus.updateAllAgentTopicNameCache(); + agentStatusCacheService.updateAllAgentTopicNameCache(); if (CollectionUtils.isEmpty(ALL_AGENT_TOPIC_NAME_LIST)) { log.warn("[Scheduler] No Agent Registered ! End Up Status Monitor !"); return; @@ -67,6 +68,10 @@ public class MonitorAllAgentStatus { checkOrCreateRedisHealthyKey(); // 2.发送状态检查信息, agent需要update相应的HashMap的值 + // 2023年6月14日 2. 发送ping等待所有的Agent返回PONG, 然后进行redis的状态修改 + CountDownLatch aliveStatusCDL = new CountDownLatch(ALL_AGENT_TOPIC_NAME_LIST.size()); + + buildAndSendAgentHealthMessage(); // 3. 休眠 MAX_WAIT_AGENT_REPORT_STATUS_TIME 秒 等待agent的状态上报 @@ -139,7 +144,7 @@ public class MonitorAllAgentStatus { String currentTimeString = TimeUtils.currentTimeString(); // 更新所有的缓存状态 - serverCacheAgentStatus.updateAgentStatusMapCache(); + agentStatusCacheService.updateAgentStatusMapCache(); // 执行Metric上报定时任务 buildStatusScheduleTask.buildAgentMetricScheduleTask(); diff --git a/server/src/main/java/io/wdd/rpc/status/beans/AgentStatus.java b/server/src/main/java/io/wdd/rpc/status/beans/AgentStatus.java index 4ab2767..c6bad73 100644 --- a/server/src/main/java/io/wdd/rpc/status/beans/AgentStatus.java +++ b/server/src/main/java/io/wdd/rpc/status/beans/AgentStatus.java @@ -9,15 +9,15 @@ import lombok.NoArgsConstructor; public class AgentStatus { @JsonProperty("CPUStatus") - private CPUInfo cPUStatus; + private CPUStatus cpuStatus; @JsonProperty("MemoryStatus") - private MemoryInfo memoryStatus; + private MemoryStatus memoryStatus; @JsonProperty("NetworkStatus") - private NetworkInfo networkStatus; + private NetworkStatus networkStatus; @JsonProperty("DiskStatus") - private DiskInfo diskStatus; + private DiskStatus diskStatus; } diff --git a/server/src/main/java/io/wdd/rpc/status/beans/CPUInfo.java b/server/src/main/java/io/wdd/rpc/status/beans/CPUStatus.java similarity index 97% rename from server/src/main/java/io/wdd/rpc/status/beans/CPUInfo.java rename to server/src/main/java/io/wdd/rpc/status/beans/CPUStatus.java index 018bd04..add5821 100644 --- a/server/src/main/java/io/wdd/rpc/status/beans/CPUInfo.java +++ b/server/src/main/java/io/wdd/rpc/status/beans/CPUStatus.java @@ -12,12 +12,12 @@ import java.util.List; @AllArgsConstructor @NoArgsConstructor @SuperBuilder(toBuilder = true) -public class CPUInfo { +public class CPUStatus { @JsonProperty("NumCores") private Integer numCores; - @JsonProperty("CPUInfo") + @JsonProperty("CPUStatus") private List cPUInfo; @JsonProperty("CPUPercent") private Double cPUPercent; diff --git a/server/src/main/java/io/wdd/rpc/status/beans/DiskInfo.java b/server/src/main/java/io/wdd/rpc/status/beans/DiskStatus.java similarity index 96% rename from server/src/main/java/io/wdd/rpc/status/beans/DiskInfo.java rename to server/src/main/java/io/wdd/rpc/status/beans/DiskStatus.java index 7b8c7de..2c728cf 100644 --- a/server/src/main/java/io/wdd/rpc/status/beans/DiskInfo.java +++ b/server/src/main/java/io/wdd/rpc/status/beans/DiskStatus.java @@ -8,7 +8,7 @@ import java.util.List; @NoArgsConstructor @Data -public class DiskInfo { +public class DiskStatus { @JsonProperty("Total") private Long total; diff --git a/server/src/main/java/io/wdd/rpc/status/beans/MemoryInfo.java b/server/src/main/java/io/wdd/rpc/status/beans/MemoryStatus.java similarity index 94% rename from server/src/main/java/io/wdd/rpc/status/beans/MemoryInfo.java rename to server/src/main/java/io/wdd/rpc/status/beans/MemoryStatus.java index 15971af..c16e8ba 100644 --- a/server/src/main/java/io/wdd/rpc/status/beans/MemoryInfo.java +++ b/server/src/main/java/io/wdd/rpc/status/beans/MemoryStatus.java @@ -6,7 +6,7 @@ import lombok.NoArgsConstructor; @NoArgsConstructor @Data -public class MemoryInfo { +public class MemoryStatus { @JsonProperty("TotalMemory") private Long totalMemory; diff --git a/server/src/main/java/io/wdd/rpc/status/beans/NetworkInfo.java b/server/src/main/java/io/wdd/rpc/status/beans/NetworkStatus.java similarity index 96% rename from server/src/main/java/io/wdd/rpc/status/beans/NetworkInfo.java rename to server/src/main/java/io/wdd/rpc/status/beans/NetworkStatus.java index 9bee6cd..c5a067d 100644 --- a/server/src/main/java/io/wdd/rpc/status/beans/NetworkInfo.java +++ b/server/src/main/java/io/wdd/rpc/status/beans/NetworkStatus.java @@ -12,7 +12,7 @@ import java.util.List; @Data @AllArgsConstructor @SuperBuilder(toBuilder = true) -public class NetworkInfo { +public class NetworkStatus { @JsonProperty("name") private String name; diff --git a/server/src/test/java/io/wdd/server/ServerApplicationTests.java b/server/src/test/java/io/wdd/server/ServerApplicationTests.java index 38cc180..bfdd25a 100644 --- a/server/src/test/java/io/wdd/server/ServerApplicationTests.java +++ b/server/src/test/java/io/wdd/server/ServerApplicationTests.java @@ -1,6 +1,6 @@ package io.wdd.server; -import io.wdd.rpc.execute.service.AsyncExecutionService; +import io.wdd.rpc.execute.service.SyncExecutionService; import org.junit.jupiter.api.Test; import org.springframework.boot.test.context.SpringBootTest; @@ -13,7 +13,7 @@ class ServerApplicationTests { @Resource - AsyncExecutionService asyncExecutionService; + SyncExecutionService asyncExecutionService; @Test void testCoreExecutionCompleteScript() { @@ -61,7 +61,7 @@ class ServerApplicationTests { ) ); - List resultList = asyncExecutionService.SendCommandToAgentComplete( + List resultList = asyncExecutionService.SyncSendCommandToAgentComplete( targetMachineList, "Scheduled Script", completeScript From 8574169150f4a9e11bbe39556a685e10e6b8a606 Mon Sep 17 00:00:00 2001 From: zeaslity Date: Thu, 15 Jun 2023 10:51:55 +0800 Subject: [PATCH 25/45] [Exec] modify async om handler structure --- .../rpc/agent/OctopusAgentServiceImpl.java | 34 +++++++++---------- .../service/AsyncExecutionServiceImpl.java | 21 ++++++------ .../AsyncWaitOMResultService.java} | 24 ++++++------- .../OMAsyncReplayContend.java} | 10 +++--- .../{ => sync}/OMessageHandlerServer.java | 2 +- 5 files changed, 46 insertions(+), 45 deletions(-) rename server/src/main/java/io/wdd/rpc/message/handler/{AsyncWaitOMResult.java => async/AsyncWaitOMResultService.java} (77%) rename server/src/main/java/io/wdd/rpc/message/handler/{OMReplayContend.java => async/OMAsyncReplayContend.java} (86%) rename server/src/main/java/io/wdd/rpc/message/handler/{ => sync}/OMessageHandlerServer.java (98%) diff --git a/server/src/main/java/io/wdd/rpc/agent/OctopusAgentServiceImpl.java b/server/src/main/java/io/wdd/rpc/agent/OctopusAgentServiceImpl.java index beed801..271231a 100644 --- a/server/src/main/java/io/wdd/rpc/agent/OctopusAgentServiceImpl.java +++ b/server/src/main/java/io/wdd/rpc/agent/OctopusAgentServiceImpl.java @@ -6,8 +6,8 @@ import com.fasterxml.jackson.databind.ObjectMapper; import io.wdd.common.utils.TimeUtils; import io.wdd.rpc.message.OctopusMessage; import io.wdd.rpc.message.OctopusMessageType; -import io.wdd.rpc.message.handler.AsyncWaitOMResult; -import io.wdd.rpc.message.handler.OMReplayContend; +import io.wdd.rpc.message.handler.async.AsyncWaitOMResultService; +import io.wdd.rpc.message.handler.async.OMAsyncReplayContend; import io.wdd.rpc.message.sender.OMessageToAgentSender; import io.wdd.server.beans.vo.ServerInfoVO; import io.wdd.server.config.ServerCommonPool; @@ -28,8 +28,8 @@ import java.util.stream.Collectors; import static io.wdd.rpc.init.AgentStatusCacheService.ALL_AGENT_TOPIC_NAME_SET; import static io.wdd.rpc.init.AgentStatusCacheService.ALL_HEALTHY_AGENT_TOPIC_NAME_LIST; -import static io.wdd.rpc.message.handler.OMessageHandlerServer.AGENT_LATEST_VERSION; -import static io.wdd.rpc.message.handler.OMessageHandlerServer.OCTOPUS_MESSAGE_FROM_AGENT; +import static io.wdd.rpc.message.handler.sync.OMessageHandlerServer.AGENT_LATEST_VERSION; +import static io.wdd.rpc.message.handler.sync.OMessageHandlerServer.OCTOPUS_MESSAGE_FROM_AGENT; @Service @Slf4j @@ -45,7 +45,7 @@ public class OctopusAgentServiceImpl implements OctopusAgentService { RedisTemplate redisTemplate; @Resource - AsyncWaitOMResult asyncWaitOMResult; + AsyncWaitOMResultService asyncWaitOMResultService; @Override public Map getAllAgentVersion() { @@ -70,17 +70,17 @@ public class OctopusAgentServiceImpl implements OctopusAgentService { ); // 构造 异步结果监听内容 - OMReplayContend omReplayContend = OMReplayContend.build( + OMAsyncReplayContend OMAsyncReplayContend = OMAsyncReplayContend.build( ALL_HEALTHY_AGENT_TOPIC_NAME_LIST.size(), CurrentAppOctopusMessageType, currentTime ); - CountDownLatch countDownLatch = omReplayContend.getCountDownLatch(); + CountDownLatch countDownLatch = OMAsyncReplayContend.getCountDownLatch(); // 调用后台接收处理所有的Replay信息 - asyncWaitOMResult.waitFor(omReplayContend); + asyncWaitOMResultService.waitFor(OMAsyncReplayContend); //此处存在重大bug,会导致CPU占用飙升 /*CompletableFuture getAllAgentVersionInfoFuture = waitCollectAllAgentVersionInfo( @@ -106,10 +106,10 @@ public class OctopusAgentServiceImpl implements OctopusAgentService { } // 此处调用,即可中断 异步任务的收集工作 - asyncWaitOMResult.stopWaiting(omReplayContend); + asyncWaitOMResultService.stopWaiting(OMAsyncReplayContend); // 处理结果 - omReplayContend + OMAsyncReplayContend .getReplayOMList() .stream() .forEach( @@ -122,7 +122,7 @@ public class OctopusAgentServiceImpl implements OctopusAgentService { ); // help gc - omReplayContend = null; + OMAsyncReplayContend = null; } return result; @@ -156,16 +156,16 @@ public class OctopusAgentServiceImpl implements OctopusAgentService { ); // 构造结果 - OMReplayContend omReplayContend = OMReplayContend.build( + OMAsyncReplayContend OMAsyncReplayContend = OMAsyncReplayContend.build( ALL_HEALTHY_AGENT_TOPIC_NAME_LIST.size(), CurrentAppOctopusMessageType, currentTime ); - CountDownLatch countDownLatch = omReplayContend.getCountDownLatch(); + CountDownLatch countDownLatch = OMAsyncReplayContend.getCountDownLatch(); // 调用后台接收处理所有的Replay信息 - asyncWaitOMResult.waitFor(omReplayContend); + asyncWaitOMResultService.waitFor(OMAsyncReplayContend); /* CompletableFuture getAllAgentCoreInfoFuture = waitCollectAllAgentCoreInfo( result, @@ -185,10 +185,10 @@ public class OctopusAgentServiceImpl implements OctopusAgentService { // 超时,或者 全部信息已经收集 // 此处调用,即可中断 异步任务的收集工作 - asyncWaitOMResult.stopWaiting(omReplayContend); + asyncWaitOMResultService.stopWaiting(OMAsyncReplayContend); // 处理结果 - omReplayContend + OMAsyncReplayContend .getReplayOMList() .stream() .forEach( @@ -216,7 +216,7 @@ public class OctopusAgentServiceImpl implements OctopusAgentService { ); // help gc - omReplayContend = null; + OMAsyncReplayContend = null; } return result; diff --git a/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionServiceImpl.java b/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionServiceImpl.java index a3686f6..8a567c3 100644 --- a/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionServiceImpl.java +++ b/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionServiceImpl.java @@ -2,8 +2,8 @@ package io.wdd.rpc.execute.service; import io.wdd.rpc.message.OctopusMessage; import io.wdd.rpc.message.OctopusMessageType; -import io.wdd.rpc.message.handler.AsyncWaitOMResult; -import io.wdd.rpc.message.handler.OMReplayContend; +import io.wdd.rpc.message.handler.async.AsyncWaitOMResultService; +import io.wdd.rpc.message.handler.async.OMAsyncReplayContend; import lombok.extern.slf4j.Slf4j; import org.springframework.stereotype.Service; @@ -24,7 +24,7 @@ public class AsyncExecutionServiceImpl implements AsyncExecutionService { private static final OctopusMessageType CurrentAppOctopusMessageType = OctopusMessageType.EXECUTOR; @Resource - AsyncWaitOMResult asyncWaitOMResult; + AsyncWaitOMResultService asyncWaitOMResultService; @Resource SyncExecutionService asyncExecutionService; @@ -192,6 +192,7 @@ public class AsyncExecutionServiceImpl implements AsyncExecutionService { LocalDateTime initTime = octopusMessage.getInit_time(); + // OM 中的result保存 ArrayList result = new ArrayList<>(); // 构造消息等待对象 @@ -203,16 +204,16 @@ public class AsyncExecutionServiceImpl implements AsyncExecutionService { ); } - - OMReplayContend omReplayContend = OMReplayContend.build( + // 构造回复信息的内容 + OMAsyncReplayContend OMAsyncReplayContend = OMAsyncReplayContend.build( commandCount, CurrentAppOctopusMessageType, initTime ); - CountDownLatch countDownLatch = omReplayContend.getCountDownLatch(); + CountDownLatch countDownLatch = OMAsyncReplayContend.getCountDownLatch(); // 开始等待结果 - asyncWaitOMResult.waitFor(omReplayContend); + asyncWaitOMResultService.waitFor(OMAsyncReplayContend); // 监听结果 try { @@ -227,10 +228,10 @@ public class AsyncExecutionServiceImpl implements AsyncExecutionService { // 等待所有的结果返回 // 停止等待结果 - asyncWaitOMResult.stopWaiting(omReplayContend); + asyncWaitOMResultService.stopWaiting(OMAsyncReplayContend); // 解析结果 - omReplayContend + OMAsyncReplayContend .getReplayOMList() .stream() .map( @@ -249,7 +250,7 @@ public class AsyncExecutionServiceImpl implements AsyncExecutionService { } - // 返回 + // 返回 执行的结果 return result; } } diff --git a/server/src/main/java/io/wdd/rpc/message/handler/AsyncWaitOMResult.java b/server/src/main/java/io/wdd/rpc/message/handler/async/AsyncWaitOMResultService.java similarity index 77% rename from server/src/main/java/io/wdd/rpc/message/handler/AsyncWaitOMResult.java rename to server/src/main/java/io/wdd/rpc/message/handler/async/AsyncWaitOMResultService.java index 9ac3d53..171b847 100644 --- a/server/src/main/java/io/wdd/rpc/message/handler/AsyncWaitOMResult.java +++ b/server/src/main/java/io/wdd/rpc/message/handler/async/AsyncWaitOMResultService.java @@ -1,4 +1,4 @@ -package io.wdd.rpc.message.handler; +package io.wdd.rpc.message.handler.async; import io.wdd.rpc.message.OctopusMessage; import io.wdd.server.config.ServerCommonPool; @@ -10,7 +10,7 @@ import java.util.HashMap; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; -import static io.wdd.rpc.message.handler.OMessageHandlerServer.OCTOPUS_MESSAGE_FROM_AGENT; +import static io.wdd.rpc.message.handler.sync.OMessageHandlerServer.OCTOPUS_MESSAGE_FROM_AGENT; /** * 从Agent收集返回信息的统一处理地点 @@ -20,31 +20,31 @@ import static io.wdd.rpc.message.handler.OMessageHandlerServer.OCTOPUS_MESSAGE_F */ @Service @Slf4j -public class AsyncWaitOMResult { +public class AsyncWaitOMResultService { /** * 为了避免线程不安全的问题,增加一层缓存,仅仅由当前类操作此部分 * KEY -> replayMatchKey - * VALUE -> OMReplayContend - 包含countDownLatch 和 result + * VALUE -> OMAsyncReplayContend - 包含countDownLatch 和 result */ - private static final HashMap OM_REPLAY_WAITING_TARGET_MAP = new HashMap<>(); + private static final HashMap OM_REPLAY_WAITING_TARGET_MAP = new HashMap<>(); - public void waitFor(OMReplayContend omReplayContend) { + public void waitFor(OMAsyncReplayContend OMAsyncReplayContend) { // 向 REPLAY_CACHE_MAP中写入 Key OM_REPLAY_WAITING_TARGET_MAP.put( - omReplayContend.getReplayMatchKey(), - omReplayContend + OMAsyncReplayContend.getReplayMatchKey(), + OMAsyncReplayContend ); // 在调用线程的countDownLunch结束之后,关闭 // 清除 REPLAY_CACHE_MAP 中的队列 } - public void stopWaiting(OMReplayContend omReplayContend) { + public void stopWaiting(OMAsyncReplayContend OMAsyncReplayContend) { // 在调用线程的countDownLunch结束之后,关闭 清除 REPLAY_CACHE_MAP 中的队列 - OM_REPLAY_WAITING_TARGET_MAP.remove(omReplayContend.getReplayMatchKey()); + OM_REPLAY_WAITING_TARGET_MAP.remove(OMAsyncReplayContend.getReplayMatchKey()); } @@ -82,7 +82,7 @@ public class AsyncWaitOMResult { OctopusMessage replayOMessage = OCTOPUS_MESSAGE_FROM_AGENT.poll(); // 构造 replayMatchKey - String matchKey = OMReplayContend.generateMatchKey( + String matchKey = OMAsyncReplayContend.generateMatchKey( replayOMessage.getType(), replayOMessage.getInit_time() ); @@ -99,7 +99,7 @@ public class AsyncWaitOMResult { } // Map中包含有Key,那么放置进去 - OMReplayContend replayContend = OM_REPLAY_WAITING_TARGET_MAP.get(matchKey); + OMAsyncReplayContend replayContend = OM_REPLAY_WAITING_TARGET_MAP.get(matchKey); replayContend .getReplayOMList() .add(replayOMessage); diff --git a/server/src/main/java/io/wdd/rpc/message/handler/OMReplayContend.java b/server/src/main/java/io/wdd/rpc/message/handler/async/OMAsyncReplayContend.java similarity index 86% rename from server/src/main/java/io/wdd/rpc/message/handler/OMReplayContend.java rename to server/src/main/java/io/wdd/rpc/message/handler/async/OMAsyncReplayContend.java index d7a8fe2..abf42e0 100644 --- a/server/src/main/java/io/wdd/rpc/message/handler/OMReplayContend.java +++ b/server/src/main/java/io/wdd/rpc/message/handler/async/OMAsyncReplayContend.java @@ -1,4 +1,4 @@ -package io.wdd.rpc.message.handler; +package io.wdd.rpc.message.handler.async; import com.fasterxml.jackson.annotation.JsonFormat; import io.swagger.annotations.ApiModel; @@ -19,7 +19,7 @@ import java.util.concurrent.CountDownLatch; @NoArgsConstructor @SuperBuilder(toBuilder = true) @ApiModel("众多业务调用RPC,异步等待需要确定返回消息是谁的") -public class OMReplayContend { +public class OMAsyncReplayContend { @ApiModelProperty("rpc消息的类型") OctopusMessageType type; @@ -37,7 +37,7 @@ public class OMReplayContend { @ApiModelProperty("回复的结果列表, 临时保存") ArrayList replayOMList; - protected static String generateMatchKey(OMReplayContend replayIdentifier) { + protected static String generateMatchKey(OMAsyncReplayContend replayIdentifier) { String relayMatchKey = replayIdentifier .getType() @@ -65,9 +65,9 @@ public class OMReplayContend { * * @return */ - public static OMReplayContend build(int waitForReplayNum, OctopusMessageType currentOMType, LocalDateTime currentTime) { + public static OMAsyncReplayContend build(int waitForReplayNum, OctopusMessageType currentOMType, LocalDateTime currentTime) { - return new OMReplayContend( + return new OMAsyncReplayContend( currentOMType, currentTime, generateMatchKey( diff --git a/server/src/main/java/io/wdd/rpc/message/handler/OMessageHandlerServer.java b/server/src/main/java/io/wdd/rpc/message/handler/sync/OMessageHandlerServer.java similarity index 98% rename from server/src/main/java/io/wdd/rpc/message/handler/OMessageHandlerServer.java rename to server/src/main/java/io/wdd/rpc/message/handler/sync/OMessageHandlerServer.java index 6d8053e..35314c1 100644 --- a/server/src/main/java/io/wdd/rpc/message/handler/OMessageHandlerServer.java +++ b/server/src/main/java/io/wdd/rpc/message/handler/sync/OMessageHandlerServer.java @@ -1,4 +1,4 @@ -package io.wdd.rpc.message.handler; +package io.wdd.rpc.message.handler.sync; import com.fasterxml.jackson.databind.ObjectMapper; From 4b3f7be1dd93e0c01ebd2977ab975948145b21b5 Mon Sep 17 00:00:00 2001 From: zeaslity Date: Thu, 15 Jun 2023 16:29:26 +0800 Subject: [PATCH 26/45] [ Status ] add the async way to get agent status --- .../rpc/agent/OctopusAgentServiceImpl.java | 30 ++-- .../wdd/rpc/controller/StatusController.java | 2 +- .../service/AsyncExecutionServiceImpl.java | 16 +- .../wdd/rpc/init/AgentStatusCacheService.java | 78 +++------- ...AsyncWaitOctopusMessageResultService.java} | 27 ++-- ... => OctopusMessageAsyncReplayContend.java} | 28 ++-- ...b.java => AgentAliveStatusMonitorJob.java} | 8 +- .../service/BuildStatusScheduleTask.java | 4 +- ...va => AgentAliveStatusMonitorService.java} | 74 ++++----- .../status/AgentRuntimeMetricStatus.java | 2 +- .../wdd/rpc/status/OctopusStatusMessage.java | 16 +- .../status/service/AsyncStatusService.java | 16 ++ .../service/AsyncStatusServiceImpl.java | 146 ++++++++++++++++++ 13 files changed, 292 insertions(+), 155 deletions(-) rename server/src/main/java/io/wdd/rpc/message/handler/async/{AsyncWaitOMResultService.java => AsyncWaitOctopusMessageResultService.java} (74%) rename server/src/main/java/io/wdd/rpc/message/handler/async/{OMAsyncReplayContend.java => OctopusMessageAsyncReplayContend.java} (73%) rename server/src/main/java/io/wdd/rpc/scheduler/job/{AgentStatusMonitorJob.java => AgentAliveStatusMonitorJob.java} (73%) rename server/src/main/java/io/wdd/rpc/scheduler/service/status/{CheckAgentAliveStatus.java => AgentAliveStatusMonitorService.java} (69%) create mode 100644 server/src/main/java/io/wdd/rpc/status/service/AsyncStatusService.java create mode 100644 server/src/main/java/io/wdd/rpc/status/service/AsyncStatusServiceImpl.java diff --git a/server/src/main/java/io/wdd/rpc/agent/OctopusAgentServiceImpl.java b/server/src/main/java/io/wdd/rpc/agent/OctopusAgentServiceImpl.java index 271231a..e7b8485 100644 --- a/server/src/main/java/io/wdd/rpc/agent/OctopusAgentServiceImpl.java +++ b/server/src/main/java/io/wdd/rpc/agent/OctopusAgentServiceImpl.java @@ -6,8 +6,8 @@ import com.fasterxml.jackson.databind.ObjectMapper; import io.wdd.common.utils.TimeUtils; import io.wdd.rpc.message.OctopusMessage; import io.wdd.rpc.message.OctopusMessageType; -import io.wdd.rpc.message.handler.async.AsyncWaitOMResultService; -import io.wdd.rpc.message.handler.async.OMAsyncReplayContend; +import io.wdd.rpc.message.handler.async.AsyncWaitOctopusMessageResultService; +import io.wdd.rpc.message.handler.async.OctopusMessageAsyncReplayContend; import io.wdd.rpc.message.sender.OMessageToAgentSender; import io.wdd.server.beans.vo.ServerInfoVO; import io.wdd.server.config.ServerCommonPool; @@ -45,7 +45,7 @@ public class OctopusAgentServiceImpl implements OctopusAgentService { RedisTemplate redisTemplate; @Resource - AsyncWaitOMResultService asyncWaitOMResultService; + AsyncWaitOctopusMessageResultService asyncWaitOctopusMessageResultService; @Override public Map getAllAgentVersion() { @@ -70,17 +70,17 @@ public class OctopusAgentServiceImpl implements OctopusAgentService { ); // 构造 异步结果监听内容 - OMAsyncReplayContend OMAsyncReplayContend = OMAsyncReplayContend.build( + OctopusMessageAsyncReplayContend agentReplayContend = OctopusMessageAsyncReplayContend.build( ALL_HEALTHY_AGENT_TOPIC_NAME_LIST.size(), CurrentAppOctopusMessageType, currentTime ); - CountDownLatch countDownLatch = OMAsyncReplayContend.getCountDownLatch(); + CountDownLatch countDownLatch = agentReplayContend.getCountDownLatch(); // 调用后台接收处理所有的Replay信息 - asyncWaitOMResultService.waitFor(OMAsyncReplayContend); + asyncWaitOctopusMessageResultService.waitFor(agentReplayContend); //此处存在重大bug,会导致CPU占用飙升 /*CompletableFuture getAllAgentVersionInfoFuture = waitCollectAllAgentVersionInfo( @@ -106,10 +106,10 @@ public class OctopusAgentServiceImpl implements OctopusAgentService { } // 此处调用,即可中断 异步任务的收集工作 - asyncWaitOMResultService.stopWaiting(OMAsyncReplayContend); + asyncWaitOctopusMessageResultService.stopWaiting(agentReplayContend); // 处理结果 - OMAsyncReplayContend + agentReplayContend .getReplayOMList() .stream() .forEach( @@ -122,7 +122,7 @@ public class OctopusAgentServiceImpl implements OctopusAgentService { ); // help gc - OMAsyncReplayContend = null; + agentReplayContend = null; } return result; @@ -156,16 +156,16 @@ public class OctopusAgentServiceImpl implements OctopusAgentService { ); // 构造结果 - OMAsyncReplayContend OMAsyncReplayContend = OMAsyncReplayContend.build( + OctopusMessageAsyncReplayContend OctopusMessageAsyncReplayContend = io.wdd.rpc.message.handler.async.OctopusMessageAsyncReplayContend.build( ALL_HEALTHY_AGENT_TOPIC_NAME_LIST.size(), CurrentAppOctopusMessageType, currentTime ); - CountDownLatch countDownLatch = OMAsyncReplayContend.getCountDownLatch(); + CountDownLatch countDownLatch = OctopusMessageAsyncReplayContend.getCountDownLatch(); // 调用后台接收处理所有的Replay信息 - asyncWaitOMResultService.waitFor(OMAsyncReplayContend); + asyncWaitOctopusMessageResultService.waitFor(OctopusMessageAsyncReplayContend); /* CompletableFuture getAllAgentCoreInfoFuture = waitCollectAllAgentCoreInfo( result, @@ -185,10 +185,10 @@ public class OctopusAgentServiceImpl implements OctopusAgentService { // 超时,或者 全部信息已经收集 // 此处调用,即可中断 异步任务的收集工作 - asyncWaitOMResultService.stopWaiting(OMAsyncReplayContend); + asyncWaitOctopusMessageResultService.stopWaiting(OctopusMessageAsyncReplayContend); // 处理结果 - OMAsyncReplayContend + OctopusMessageAsyncReplayContend .getReplayOMList() .stream() .forEach( @@ -216,7 +216,7 @@ public class OctopusAgentServiceImpl implements OctopusAgentService { ); // help gc - OMAsyncReplayContend = null; + OctopusMessageAsyncReplayContend = null; } return result; diff --git a/server/src/main/java/io/wdd/rpc/controller/StatusController.java b/server/src/main/java/io/wdd/rpc/controller/StatusController.java index cffe3df..410e8bd 100644 --- a/server/src/main/java/io/wdd/rpc/controller/StatusController.java +++ b/server/src/main/java/io/wdd/rpc/controller/StatusController.java @@ -76,7 +76,7 @@ public class StatusController { public R>> ManualUpdateAgentStatus() { // 手动调用更新 - agentStatusCacheService.updateAgentStatusMapCache(); + agentStatusCacheService.updateAgentStatusMapCache(agentAliveStatusMap); return R.ok(STATUS_AGENT_LIST_MAP); } diff --git a/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionServiceImpl.java b/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionServiceImpl.java index 8a567c3..17c8018 100644 --- a/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionServiceImpl.java +++ b/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionServiceImpl.java @@ -2,8 +2,8 @@ package io.wdd.rpc.execute.service; import io.wdd.rpc.message.OctopusMessage; import io.wdd.rpc.message.OctopusMessageType; -import io.wdd.rpc.message.handler.async.AsyncWaitOMResultService; -import io.wdd.rpc.message.handler.async.OMAsyncReplayContend; +import io.wdd.rpc.message.handler.async.AsyncWaitOctopusMessageResultService; +import io.wdd.rpc.message.handler.async.OctopusMessageAsyncReplayContend; import lombok.extern.slf4j.Slf4j; import org.springframework.stereotype.Service; @@ -24,7 +24,7 @@ public class AsyncExecutionServiceImpl implements AsyncExecutionService { private static final OctopusMessageType CurrentAppOctopusMessageType = OctopusMessageType.EXECUTOR; @Resource - AsyncWaitOMResultService asyncWaitOMResultService; + AsyncWaitOctopusMessageResultService asyncWaitOctopusMessageResultService; @Resource SyncExecutionService asyncExecutionService; @@ -205,15 +205,15 @@ public class AsyncExecutionServiceImpl implements AsyncExecutionService { } // 构造回复信息的内容 - OMAsyncReplayContend OMAsyncReplayContend = OMAsyncReplayContend.build( + OctopusMessageAsyncReplayContend OctopusMessageAsyncReplayContend = OctopusMessageAsyncReplayContend.build( commandCount, CurrentAppOctopusMessageType, initTime ); - CountDownLatch countDownLatch = OMAsyncReplayContend.getCountDownLatch(); + CountDownLatch countDownLatch = OctopusMessageAsyncReplayContend.getCountDownLatch(); // 开始等待结果 - asyncWaitOMResultService.waitFor(OMAsyncReplayContend); + asyncWaitOctopusMessageResultService.waitFor(OctopusMessageAsyncReplayContend); // 监听结果 try { @@ -228,10 +228,10 @@ public class AsyncExecutionServiceImpl implements AsyncExecutionService { // 等待所有的结果返回 // 停止等待结果 - asyncWaitOMResultService.stopWaiting(OMAsyncReplayContend); + asyncWaitOctopusMessageResultService.stopWaiting(OctopusMessageAsyncReplayContend); // 解析结果 - OMAsyncReplayContend + OctopusMessageAsyncReplayContend .getReplayOMList() .stream() .map( diff --git a/server/src/main/java/io/wdd/rpc/init/AgentStatusCacheService.java b/server/src/main/java/io/wdd/rpc/init/AgentStatusCacheService.java index 7cf3039..c4e5b09 100644 --- a/server/src/main/java/io/wdd/rpc/init/AgentStatusCacheService.java +++ b/server/src/main/java/io/wdd/rpc/init/AgentStatusCacheService.java @@ -7,7 +7,6 @@ import io.wdd.server.beans.vo.ServerInfoVO; import io.wdd.server.coreService.CoreServerService; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections.CollectionUtils; -import org.springframework.data.redis.core.RedisTemplate; import org.springframework.stereotype.Service; import javax.annotation.PostConstruct; @@ -15,7 +14,6 @@ import javax.annotation.Resource; import java.util.*; import java.util.stream.Collectors; -import static io.wdd.rpc.status.OctopusStatusMessage.ALL_AGENT_STATUS_REDIS_KEY; /** @@ -50,9 +48,9 @@ public class AgentStatusCacheService { /** * 存储所有Agent状态的Map *

- * 内容为 agentTopicName-健康状态 + * 内容为 agentTopicName- True代表健康 False代表不健康 */ - public static final Map ALL_AGENT_STATUS_MAP = new HashMap<>(); + public static final Map ALL_AGENT_STATUS_MAP = new HashMap<>(); /** * 保存所有健康运行的Agent Topic Name @@ -67,8 +65,6 @@ public class AgentStatusCacheService { @Resource CoreServerService coreServerService; - @Resource - RedisTemplate redisTemplate; @PostConstruct public void GenerateAllCache() { @@ -77,7 +73,7 @@ public class AgentStatusCacheService { updateAllAgentTopicNameCache(); // Agent状态信息的两个Map - updateAgentStatusMapCache(); + // updateAgentStatusMapCache(agentAliveStatusMap); } @@ -118,7 +114,7 @@ public class AgentStatusCacheService { * 由定时任务或者初始化服务触发 * 2023-02-21 前端接口,手动更新 */ - public void updateAgentStatusMapCache() { + public void updateAgentStatusMapCache(Map agentAliveStatusMap) { // 检查,排除没有节点的情况 if (CollectionUtils.isEmpty(ALL_AGENT_TOPIC_NAME_LIST)) { @@ -126,47 +122,17 @@ public class AgentStatusCacheService { return; } - // 从redis中获取所有节点的当前状态 - List statusList = redisTemplate - .opsForHash() - .multiGet( - ALL_AGENT_STATUS_REDIS_KEY, - ALL_AGENT_TOPIC_NAME_LIST - ); - // 初始话 还没有状态的情况,直接return - if (CollectionUtils.isEmpty(statusList)) { - log.warn("agent status from redis is empty !"); - return; - } - - // 增加更新时间 2023年2月21日 - String timeString = TimeUtils.currentTimeString(); - - // 结构保存为agentStatusMap ==> agent-topic-name : STATUS(healthy, failed, unknown) - HashMap agentStatusMap = new HashMap<>(32); - for (int i = 0; i < ALL_AGENT_TOPIC_NAME_LIST.size(); i++) { - agentStatusMap.put( - ALL_AGENT_TOPIC_NAME_LIST.get(i), - uniformHealthyStatus(String.valueOf(statusList.get(i))) - ); - } - - - // 2023-01-16 + // 2023年6月15日 更新状态缓存 ALL_AGENT_STATUS_MAP.clear(); - ALL_AGENT_STATUS_MAP.putAll(agentStatusMap); - ALL_AGENT_STATUS_MAP.put( - STATUS_UPDATE_TIME_KEY, - timeString - ); + ALL_AGENT_STATUS_MAP.putAll(agentAliveStatusMap); // 2023-01-16 // 更新 状态-Agent容器 内容为 // HEALTHY -> ["agentTopicName-1", "agentTopicName-2"] // FAILED -> ["agentTopicName-1", "agentTopicName-2"] - Map> statusAgentListMap = agentStatusMap + Map> statusAgentListMap = agentAliveStatusMap .entrySet() .stream() .collect( @@ -178,7 +144,7 @@ public class AgentStatusCacheService { .stream() .collect( Collectors.toMap( - entry -> entry.getKey(), + entry -> entry.getKey() ? "HEALTHY" : "FAILED", entry -> entry .getValue() .stream() @@ -192,7 +158,9 @@ public class AgentStatusCacheService { // 2023-2-3 bug fix STATUS_AGENT_LIST_MAP.clear(); STATUS_AGENT_LIST_MAP.putAll(statusAgentListMap); + // 2023年2月21日,更新时间 + String timeString = TimeUtils.currentFormatTimeString(); STATUS_AGENT_LIST_MAP.put( STATUS_UPDATE_TIME_KEY, Collections.singletonList(timeString) @@ -200,22 +168,26 @@ public class AgentStatusCacheService { log.debug("Agent存活状态 状态-Agent名称-Map 已经更新了"); - - // Trigger调用Agent Metric 任务 - ArrayList allHealthyAgentTopicNames = new ArrayList<>(32); - for (int i = 0; i < statusList.size(); i++) { - if (null !=statusList.get(i) && statusList - .get(i) - .equals("1")) { - allHealthyAgentTopicNames.add(ALL_AGENT_TOPIC_NAME_LIST.get(i)); - } - } // 缓存相应的存活Agent + List allHealthyAgentTopicNames = agentAliveStatusMap + .entrySet() + .stream() + .filter( + entry -> entry + .getKey() + .equals(Boolean.TRUE) + ) + .map( + Map.Entry::getKey + ) + .collect(Collectors.toList()); + ALL_HEALTHY_AGENT_TOPIC_NAME_LIST.clear(); ALL_HEALTHY_AGENT_TOPIC_NAME_LIST.addAll(allHealthyAgentTopicNames); + // help gc - agentStatusMap = null; + agentAliveStatusMap = null; statusAgentListMap = null; allHealthyAgentTopicNames = null; } diff --git a/server/src/main/java/io/wdd/rpc/message/handler/async/AsyncWaitOMResultService.java b/server/src/main/java/io/wdd/rpc/message/handler/async/AsyncWaitOctopusMessageResultService.java similarity index 74% rename from server/src/main/java/io/wdd/rpc/message/handler/async/AsyncWaitOMResultService.java rename to server/src/main/java/io/wdd/rpc/message/handler/async/AsyncWaitOctopusMessageResultService.java index 171b847..5fdd822 100644 --- a/server/src/main/java/io/wdd/rpc/message/handler/async/AsyncWaitOMResultService.java +++ b/server/src/main/java/io/wdd/rpc/message/handler/async/AsyncWaitOctopusMessageResultService.java @@ -20,31 +20,37 @@ import static io.wdd.rpc.message.handler.sync.OMessageHandlerServer.OCTOPUS_MESS */ @Service @Slf4j -public class AsyncWaitOMResultService { +public class AsyncWaitOctopusMessageResultService { /** * 为了避免线程不安全的问题,增加一层缓存,仅仅由当前类操作此部分 * KEY -> replayMatchKey - * VALUE -> OMAsyncReplayContend - 包含countDownLatch 和 result + * VALUE -> OctopusMessageAsyncReplayContend - 包含countDownLatch 和 result */ - private static final HashMap OM_REPLAY_WAITING_TARGET_MAP = new HashMap<>(); + private static final HashMap OM_REPLAY_WAITING_TARGET_MAP = new HashMap<>(); - public void waitFor(OMAsyncReplayContend OMAsyncReplayContend) { + public void waitFor(OctopusMessageAsyncReplayContend OctopusMessageAsyncReplayContend) { // 向 REPLAY_CACHE_MAP中写入 Key OM_REPLAY_WAITING_TARGET_MAP.put( - OMAsyncReplayContend.getReplayMatchKey(), - OMAsyncReplayContend + OctopusMessageAsyncReplayContend.getReplayMatchKey(), + OctopusMessageAsyncReplayContend ); // 在调用线程的countDownLunch结束之后,关闭 // 清除 REPLAY_CACHE_MAP 中的队列 } - public void stopWaiting(OMAsyncReplayContend OMAsyncReplayContend) { + public void stopWaiting(OctopusMessageAsyncReplayContend OctopusMessageAsyncReplayContend) { // 在调用线程的countDownLunch结束之后,关闭 清除 REPLAY_CACHE_MAP 中的队列 - OM_REPLAY_WAITING_TARGET_MAP.remove(OMAsyncReplayContend.getReplayMatchKey()); + OctopusMessageAsyncReplayContend contend = OM_REPLAY_WAITING_TARGET_MAP.get(OctopusMessageAsyncReplayContend.getReplayMatchKey()); + + // 移除该内容 + OM_REPLAY_WAITING_TARGET_MAP.remove(OctopusMessageAsyncReplayContend.getReplayMatchKey()); + + // help gc + contend = null; } @@ -82,7 +88,7 @@ public class AsyncWaitOMResultService { OctopusMessage replayOMessage = OCTOPUS_MESSAGE_FROM_AGENT.poll(); // 构造 replayMatchKey - String matchKey = OMAsyncReplayContend.generateMatchKey( + String matchKey = OctopusMessageAsyncReplayContend.generateMatchKey( replayOMessage.getType(), replayOMessage.getInit_time() ); @@ -99,11 +105,12 @@ public class AsyncWaitOMResultService { } // Map中包含有Key,那么放置进去 - OMAsyncReplayContend replayContend = OM_REPLAY_WAITING_TARGET_MAP.get(matchKey); + OctopusMessageAsyncReplayContend replayContend = OM_REPLAY_WAITING_TARGET_MAP.get(matchKey); replayContend .getReplayOMList() .add(replayOMessage); + // 需要操作countDown replayContend .getCountDownLatch() diff --git a/server/src/main/java/io/wdd/rpc/message/handler/async/OMAsyncReplayContend.java b/server/src/main/java/io/wdd/rpc/message/handler/async/OctopusMessageAsyncReplayContend.java similarity index 73% rename from server/src/main/java/io/wdd/rpc/message/handler/async/OMAsyncReplayContend.java rename to server/src/main/java/io/wdd/rpc/message/handler/async/OctopusMessageAsyncReplayContend.java index abf42e0..5864190 100644 --- a/server/src/main/java/io/wdd/rpc/message/handler/async/OMAsyncReplayContend.java +++ b/server/src/main/java/io/wdd/rpc/message/handler/async/OctopusMessageAsyncReplayContend.java @@ -19,7 +19,7 @@ import java.util.concurrent.CountDownLatch; @NoArgsConstructor @SuperBuilder(toBuilder = true) @ApiModel("众多业务调用RPC,异步等待需要确定返回消息是谁的") -public class OMAsyncReplayContend { +public class OctopusMessageAsyncReplayContend { @ApiModelProperty("rpc消息的类型") OctopusMessageType type; @@ -37,17 +37,6 @@ public class OMAsyncReplayContend { @ApiModelProperty("回复的结果列表, 临时保存") ArrayList replayOMList; - protected static String generateMatchKey(OMAsyncReplayContend replayIdentifier) { - - String relayMatchKey = replayIdentifier - .getType() - .toString() + replayIdentifier - .getInitTime() - .toString(); - - return relayMatchKey; - } - /** * @param messageType * @param messageInitTime 必须使用 TimeUtils.currentFormatTime(); @@ -61,21 +50,26 @@ public class OMAsyncReplayContend { } /** - * 方便使用的一个构造方法 + * Execution模块使用的模板 * * @return */ - public static OMAsyncReplayContend build(int waitForReplayNum, OctopusMessageType currentOMType, LocalDateTime currentTime) { + public static OctopusMessageAsyncReplayContend build(int waitForReplayNum, OctopusMessageType currentOMType, LocalDateTime currentTime) { - return new OMAsyncReplayContend( + CountDownLatch latch = null; + if (waitForReplayNum != 0) { + latch = new CountDownLatch(waitForReplayNum); + } + + return new OctopusMessageAsyncReplayContend( currentOMType, currentTime, generateMatchKey( currentOMType, currentTime ), - new CountDownLatch(waitForReplayNum), - new ArrayList<>() + latch, + new ArrayList(16) ); } diff --git a/server/src/main/java/io/wdd/rpc/scheduler/job/AgentStatusMonitorJob.java b/server/src/main/java/io/wdd/rpc/scheduler/job/AgentAliveStatusMonitorJob.java similarity index 73% rename from server/src/main/java/io/wdd/rpc/scheduler/job/AgentStatusMonitorJob.java rename to server/src/main/java/io/wdd/rpc/scheduler/job/AgentAliveStatusMonitorJob.java index c20a183..a72a37b 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/job/AgentStatusMonitorJob.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/job/AgentAliveStatusMonitorJob.java @@ -1,17 +1,17 @@ package io.wdd.rpc.scheduler.job; import io.wdd.rpc.scheduler.config.QuartzLogOperator; -import io.wdd.rpc.scheduler.service.status.CheckAgentAliveStatus; +import io.wdd.rpc.scheduler.service.status.AgentAliveStatusMonitorService; import org.quartz.JobExecutionContext; import org.quartz.JobExecutionException; import org.springframework.scheduling.quartz.QuartzJobBean; import javax.annotation.Resource; -public class AgentStatusMonitorJob extends QuartzJobBean { +public class AgentAliveStatusMonitorJob extends QuartzJobBean { @Resource - CheckAgentAliveStatus checkAgentAliveStatus; + AgentAliveStatusMonitorService agentAliveStatusMonitorService; @Resource QuartzLogOperator quartzLogOperator; @@ -23,7 +23,7 @@ public class AgentStatusMonitorJob extends QuartzJobBean { //JobDataMap jobDataMap = jobExecutionContext.getJobDetail().getJobDataMap(); // actually execute the monitor service - checkAgentAliveStatus.go(); + agentAliveStatusMonitorService.go(); // log to somewhere quartzLogOperator.save(); diff --git a/server/src/main/java/io/wdd/rpc/scheduler/service/BuildStatusScheduleTask.java b/server/src/main/java/io/wdd/rpc/scheduler/service/BuildStatusScheduleTask.java index f8cc610..755e25d 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/service/BuildStatusScheduleTask.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/service/BuildStatusScheduleTask.java @@ -1,8 +1,8 @@ package io.wdd.rpc.scheduler.service; +import io.wdd.rpc.scheduler.job.AgentAliveStatusMonitorJob; import io.wdd.rpc.scheduler.job.AgentRunMetricStatusJob; -import io.wdd.rpc.scheduler.job.AgentStatusMonitorJob; import lombok.extern.slf4j.Slf4j; import org.quartz.CronExpression; import org.springframework.beans.factory.annotation.Value; @@ -103,7 +103,7 @@ public class BuildStatusScheduleTask { // build the Job octopusQuartzService.addMission( - AgentStatusMonitorJob.class, + AgentAliveStatusMonitorJob.class, "monitorAllAgentStatusJob", JOB_GROUP_NAME, healthyCheckStartDelaySeconds, diff --git a/server/src/main/java/io/wdd/rpc/scheduler/service/status/CheckAgentAliveStatus.java b/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentAliveStatusMonitorService.java similarity index 69% rename from server/src/main/java/io/wdd/rpc/scheduler/service/status/CheckAgentAliveStatus.java rename to server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentAliveStatusMonitorService.java index ebe797e..d63721f 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/service/status/CheckAgentAliveStatus.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentAliveStatusMonitorService.java @@ -4,6 +4,7 @@ import io.wdd.common.utils.TimeUtils; import io.wdd.rpc.init.AgentStatusCacheService; import io.wdd.rpc.scheduler.service.BuildStatusScheduleTask; import io.wdd.rpc.status.OctopusStatusMessage; +import io.wdd.rpc.status.service.AsyncStatusService; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections.CollectionUtils; import org.springframework.context.annotation.Lazy; @@ -13,8 +14,7 @@ import org.springframework.stereotype.Service; import javax.annotation.Resource; import java.util.HashMap; import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; +import java.util.Map; import java.util.stream.Collectors; import static io.wdd.rpc.init.AgentStatusCacheService.ALL_AGENT_TOPIC_NAME_LIST; @@ -38,7 +38,7 @@ import static io.wdd.rpc.status.OctopusStatusMessage.HEALTHY_STATUS_MESSAGE_TYPE @Service @Slf4j @Lazy -public class CheckAgentAliveStatus { +public class AgentAliveStatusMonitorService { private static final int MAX_WAIT_AGENT_REPORT_STATUS_TIME = 5; @Resource @@ -52,38 +52,35 @@ public class CheckAgentAliveStatus { @Resource BuildStatusScheduleTask buildStatusScheduleTask; + @Resource + AsyncStatusService asyncStatusService; + + private HashMap AGENT_HEALTHY_INIT_MAP; public void go() { - try { - // 1. 获取所有注册的Agent 手动更新 - agentStatusCacheService.updateAllAgentTopicNameCache(); - if (CollectionUtils.isEmpty(ALL_AGENT_TOPIC_NAME_LIST)) { - log.warn("[Scheduler] No Agent Registered ! End Up Status Monitor !"); - return; - } - - // 1.1 检查 Agent状态保存数据结构是否正常 - checkOrCreateRedisHealthyKey(); - - // 2.发送状态检查信息, agent需要update相应的HashMap的值 - // 2023年6月14日 2. 发送ping等待所有的Agent返回PONG, 然后进行redis的状态修改 - CountDownLatch aliveStatusCDL = new CountDownLatch(ALL_AGENT_TOPIC_NAME_LIST.size()); - - - buildAndSendAgentHealthMessage(); - - // 3. 休眠 MAX_WAIT_AGENT_REPORT_STATUS_TIME 秒 等待agent的状态上报 - TimeUnit.SECONDS.sleep(MAX_WAIT_AGENT_REPORT_STATUS_TIME); - - // 4.检查相应的 状态HashMap,然后全部置为零 - // todo 存储到某个地方,目前只是打印日志 - updateAllAgentHealthyStatus(); - - } catch (InterruptedException e) { - throw new RuntimeException(e); + // 1. 获取所有注册的Agent 手动更新 + agentStatusCacheService.updateAllAgentTopicNameCache(); + if (CollectionUtils.isEmpty(ALL_AGENT_TOPIC_NAME_LIST)) { + log.warn("[Scheduler] No Agent Registered ! End Up Status Monitor !"); + return; } + + // 1.1 检查 Agent状态保存数据结构是否正常 + checkOrCreateRedisHealthyKey(); + + // 2.发送状态检查信息, agent需要update相应的HashMap的值 + // 2023年6月14日 2. 发送ping等待所有的Agent返回PONG, 然后进行redis的状态修改 + + // 使用同步更新的策略 + Map agentAliveStatusMap = asyncStatusService.AsyncCollectAgentAliveStatus( + ALL_AGENT_TOPIC_NAME_LIST, + 5 + ); + + // 更新Agent的状态 + updateAllAgentHealthyStatus(agentAliveStatusMap); } private void checkOrCreateRedisHealthyKey() { @@ -129,8 +126,7 @@ public class CheckAgentAliveStatus { .map( agentTopicName -> OctopusStatusMessage .builder() - .agentTopicName(agentTopicName) - .type(HEALTHY_STATUS_MESSAGE_TYPE) + .statusType(HEALTHY_STATUS_MESSAGE_TYPE) .build() ) .collect(Collectors.toList()); @@ -139,15 +135,15 @@ public class CheckAgentAliveStatus { collectAgentStatus.statusMessageToAgent(collect); } - private void updateAllAgentHealthyStatus() { + private void updateAllAgentHealthyStatus(Map agentAliveStatusMap) { String currentTimeString = TimeUtils.currentTimeString(); // 更新所有的缓存状态 - agentStatusCacheService.updateAgentStatusMapCache(); + agentStatusCacheService.updateAgentStatusMapCache(agentAliveStatusMap); // 执行Metric上报定时任务 - buildStatusScheduleTask.buildAgentMetricScheduleTask(); +// buildStatusScheduleTask.buildAgentMetricScheduleTask(); // 这里仅仅是更新时间 redisTemplate @@ -158,6 +154,14 @@ public class CheckAgentAliveStatus { currentTimeString ); + // 更新所有的Agent状态 + redisTemplate + .opsForHash() + .putAll( + ALL_AGENT_STATUS_REDIS_KEY, + agentAliveStatusMap + ); + } diff --git a/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentRuntimeMetricStatus.java b/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentRuntimeMetricStatus.java index 40e13e0..26b5a75 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentRuntimeMetricStatus.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentRuntimeMetricStatus.java @@ -52,7 +52,7 @@ public class AgentRuntimeMetricStatus { agentTopicName -> { return OctopusStatusMessage .builder() - .type(METRIC_STATUS_MESSAGE_TYPE) + .statusType(METRIC_STATUS_MESSAGE_TYPE) .metricRepeatCount(metricRepeatCount) .metricRepeatPinch(metricRepeatPinch) .agentTopicName(agentTopicName) diff --git a/server/src/main/java/io/wdd/rpc/status/OctopusStatusMessage.java b/server/src/main/java/io/wdd/rpc/status/OctopusStatusMessage.java index 37f62a7..49c5098 100644 --- a/server/src/main/java/io/wdd/rpc/status/OctopusStatusMessage.java +++ b/server/src/main/java/io/wdd/rpc/status/OctopusStatusMessage.java @@ -14,20 +14,18 @@ public class OctopusStatusMessage { // below two will be used by both server and agent // 存储所有Agent的实时健康状态, 1代表健康 0代表失败 public static final String ALL_AGENT_STATUS_REDIS_KEY = "ALL_AGENT_HEALTHY_STATUS"; - public static final String HEALTHY_STATUS_MESSAGE_TYPE = "ping"; - public static final String ALL_STATUS_MESSAGE_TYPE = "all"; - public static final String METRIC_STATUS_MESSAGE_TYPE = "metric"; - public static final String APP_STATUS_MESSAGE_TYPE = "app"; + public static final String HEALTHY_STATUS_MESSAGE_TYPE = "PING"; + public static final String ALL_STATUS_MESSAGE_TYPE = "ALL"; + public static final String METRIC_STATUS_MESSAGE_TYPE = "METRIC"; + public static final String APP_STATUS_MESSAGE_TYPE = "APP"; /** - * which kind of status should be return + * which kind of status should be return * metric => short time message * all => all agent status message * healthy => check for healthy - * */ - String type; - - String agentTopicName; + */ + String statusType; int metricRepeatCount; diff --git a/server/src/main/java/io/wdd/rpc/status/service/AsyncStatusService.java b/server/src/main/java/io/wdd/rpc/status/service/AsyncStatusService.java new file mode 100644 index 0000000..844035b --- /dev/null +++ b/server/src/main/java/io/wdd/rpc/status/service/AsyncStatusService.java @@ -0,0 +1,16 @@ +package io.wdd.rpc.status.service; + +import java.util.List; +import java.util.Map; + +public interface AsyncStatusService { + + /** + * 应该是同步收集 agentTopicNameList 的节点的存活状态,并返回所有的状态存活结果 + * + * @param agentTopicNameList + * @param aliveStatusWaitMaxTime + * @return + */ + Map AsyncCollectAgentAliveStatus(List agentTopicNameList, int aliveStatusWaitMaxTime); +} diff --git a/server/src/main/java/io/wdd/rpc/status/service/AsyncStatusServiceImpl.java b/server/src/main/java/io/wdd/rpc/status/service/AsyncStatusServiceImpl.java new file mode 100644 index 0000000..7286c6d --- /dev/null +++ b/server/src/main/java/io/wdd/rpc/status/service/AsyncStatusServiceImpl.java @@ -0,0 +1,146 @@ +package io.wdd.rpc.status.service; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import io.wdd.common.utils.TimeUtils; +import io.wdd.rpc.message.OctopusMessage; +import io.wdd.rpc.message.OctopusMessageType; +import io.wdd.rpc.message.handler.async.AsyncWaitOctopusMessageResultService; +import io.wdd.rpc.message.handler.async.OctopusMessageAsyncReplayContend; +import io.wdd.rpc.message.sender.OMessageToAgentSender; +import io.wdd.rpc.status.OctopusStatusMessage; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; + +import javax.annotation.Resource; +import java.time.LocalDateTime; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static io.wdd.rpc.init.AgentStatusCacheService.ALL_AGENT_TOPIC_NAME_LIST; +import static io.wdd.rpc.status.OctopusStatusMessage.HEALTHY_STATUS_MESSAGE_TYPE; + +@Slf4j +@Service +public class AsyncStatusServiceImpl implements AsyncStatusService { + + private static final OctopusMessageType CurrentAppOctopusMessageType = OctopusMessageType.STATUS; + + @Resource + OMessageToAgentSender oMessageToAgentSender; + + @Resource + ObjectMapper objectMapper; + + @Resource + AsyncWaitOctopusMessageResultService asyncWaitOctopusMessageResultService; + + @Override + public Map AsyncCollectAgentAliveStatus(List agentTopicNameList, int aliveStatusWaitMaxTime) { + + // 构造最后的结果Map + Map agentAliveStatusMap = agentTopicNameList + .stream() + .collect( + Collectors.toMap( + agentTopicName -> agentTopicName, + agentTopicName -> Boolean.FALSE + )); + + LocalDateTime currentTime = TimeUtils.currentFormatTime(); + // 构造OctopusMessage - StatusMessage结构体, 下发所有的消息 + buildAndSendAgentAliveOctopusMessage(currentTime); + + // 异步收集消息 + OctopusMessageAsyncReplayContend statusAsyncReplayContend = OctopusMessageAsyncReplayContend.build( + agentTopicNameList.size(), + CurrentAppOctopusMessageType, + currentTime + ); + asyncWaitOctopusMessageResultService.waitFor(statusAsyncReplayContend); + + // 解析结果 + CountDownLatch countDownLatch = statusAsyncReplayContend.getCountDownLatch(); + + // 等待状态返回的结果 + boolean agentAliveStatusCollectResult = false; + try { + agentAliveStatusCollectResult = countDownLatch.await( + aliveStatusWaitMaxTime, + TimeUnit.SECONDS + ); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } finally { + if (!agentAliveStatusCollectResult) { + log.debug("Agent存活状态检查,没有检查到全部的Agent!"); + } + + // 移除等待队列 + asyncWaitOctopusMessageResultService.stopWaiting(statusAsyncReplayContend); + + // 处理结果 + statusAsyncReplayContend + .getReplayOMList() + .stream() + .forEach( + statusOMessage -> { + if (statusOMessage.getResult() != null) { + agentAliveStatusMap.put( + statusOMessage.getUuid(), + Boolean.TRUE + ); + } + } + ); + } + + // 返回Agent的存活状态内容 + return agentAliveStatusMap; + } + + private void buildAndSendAgentAliveOctopusMessage(LocalDateTime currentTime) { + + List octopusStatusMessageList = ALL_AGENT_TOPIC_NAME_LIST + .stream() + .map( + agentTopicName -> ConstructAgentStatusMessage( + HEALTHY_STATUS_MESSAGE_TYPE, + agentTopicName, + currentTime + ) + ) + .collect(Collectors.toList()); + + // 发送信息 + oMessageToAgentSender.send(octopusStatusMessageList); + + } + + private OctopusMessage ConstructAgentStatusMessage(String statusType, String agentTopicName, LocalDateTime currentTime) { + + OctopusStatusMessage statusMessage = OctopusStatusMessage + .builder() + .statusType(statusType) + .build(); + + String ops; + try { + ops = objectMapper.writeValueAsString(statusMessage); + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + + return OctopusMessage + .builder() + .type(CurrentAppOctopusMessageType) + .uuid(agentTopicName) + .init_time(currentTime) + .content(ops) + .build(); + + } +} From 54569cc41b341ab40e6ebfdb332fc88acac3f0b1 Mon Sep 17 00:00:00 2001 From: IceDerce Date: Thu, 15 Jun 2023 17:03:22 +0800 Subject: [PATCH 27/45] [ Status ] optimize the code --- agent-go/executor/RealTimeExecutor_test.go | 2 +- agent-go/go.mod | 2 +- agent-go/rabbitmq/OctopusMessage.go | 8 ++- agent-go/status/CPU.go | 1 + agent-go/tmp/1.sh | 7 ++ agent-go/tmp/simple.sh | 7 ++ .../message/handler/OMHandlerExecutor.java | 9 +++ .../test/java/io/wdd/agent/CommandTest.java | 5 ++ .../wdd/rpc/controller/StatusController.java | 14 ++-- .../service/AsyncExecutionServiceImpl.java | 10 +-- .../job/AgentRunMetricStatusJob.java | 10 +-- .../service/BuildStatusScheduleTask.java | 4 +- .../AgentAliveStatusMonitorService.java | 24 +------ ...a => AgentMetricStatusCollectService.java} | 43 ++++++----- .../service/status/CollectAgentStatus.java | 72 ------------------- .../wdd/rpc/status/OctopusStatusMessage.java | 34 +++++++++ .../service/AsyncStatusServiceImpl.java | 28 +------- 17 files changed, 123 insertions(+), 157 deletions(-) create mode 100644 agent-go/tmp/1.sh create mode 100755 agent-go/tmp/simple.sh create mode 100644 agent/src/test/java/io/wdd/agent/CommandTest.java rename server/src/main/java/io/wdd/rpc/scheduler/service/status/{AgentRuntimeMetricStatus.java => AgentMetricStatusCollectService.java} (57%) delete mode 100644 server/src/main/java/io/wdd/rpc/scheduler/service/status/CollectAgentStatus.java diff --git a/agent-go/executor/RealTimeExecutor_test.go b/agent-go/executor/RealTimeExecutor_test.go index 715fbf6..43f44f9 100644 --- a/agent-go/executor/RealTimeExecutor_test.go +++ b/agent-go/executor/RealTimeExecutor_test.go @@ -5,7 +5,7 @@ import "testing" func TestReadTimeOutput(t *testing.T) { strings := []string{ "/bin/bash", - "/root/simple.sh", + "/root/IdeaProjects/ProjectOctopus/agent-go/tmp/simple.sh", } ReadTimeCommandExecutor(strings) diff --git a/agent-go/go.mod b/agent-go/go.mod index 8c19d58..7662bdc 100644 --- a/agent-go/go.mod +++ b/agent-go/go.mod @@ -9,7 +9,6 @@ require ( github.com/spf13/viper v1.15.0 github.com/streadway/amqp v1.0.0 go.uber.org/zap v1.24.0 - gopkg.in/yaml.v3 v3.0.1 ) require ( @@ -58,4 +57,5 @@ require ( google.golang.org/protobuf v1.28.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/agent-go/rabbitmq/OctopusMessage.go b/agent-go/rabbitmq/OctopusMessage.go index 1d74ab2..0b7eda7 100644 --- a/agent-go/rabbitmq/OctopusMessage.go +++ b/agent-go/rabbitmq/OctopusMessage.go @@ -118,12 +118,18 @@ func executorOMHandler(octopusMessage *OctopusMessage) { func statusOMHandler(octopusMessage *OctopusMessage) { + v, ok := (octopusMessage.Content).(string) + if !ok { + log.ErrorF("convert to string is wrong %s", v) + } + statusMsgString := octopusMessage.Content.(string) var statusMessage *status.StatusMessage err := json.Unmarshal([]byte(statusMsgString), &statusMessage) if err != nil { - log.Error(fmt.Sprintf("status message convert to json is wrong! msg is => %s", statusMsgString)) + fmt.Println(err.Error()) + log.Error(fmt.Sprintf("status message convert to json is wrong! msg is => %s", octopusMessage)) return } diff --git a/agent-go/status/CPU.go b/agent-go/status/CPU.go index 7cc60ec..710b5e7 100644 --- a/agent-go/status/CPU.go +++ b/agent-go/status/CPU.go @@ -46,4 +46,5 @@ func GetCPUStatus() (*CPUStatus, error) { CPULoads: cpuLoads, SystemLoads: systemLoads, }, nil + } diff --git a/agent-go/tmp/1.sh b/agent-go/tmp/1.sh new file mode 100644 index 0000000..4c394b3 --- /dev/null +++ b/agent-go/tmp/1.sh @@ -0,0 +1,7 @@ +#!/bin/bash + + + +export http_proxy=http://10.250.0.10:10810 && export https_proxy=http://10.250.0.10:10810 + + diff --git a/agent-go/tmp/simple.sh b/agent-go/tmp/simple.sh new file mode 100755 index 0000000..600331d --- /dev/null +++ b/agent-go/tmp/simple.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +for i in {1..30} +do + echo "yes" + sleep 0.3 +done \ No newline at end of file diff --git a/agent/src/main/java/io/wdd/agent/config/message/handler/OMHandlerExecutor.java b/agent/src/main/java/io/wdd/agent/config/message/handler/OMHandlerExecutor.java index b5e3702..7cead64 100644 --- a/agent/src/main/java/io/wdd/agent/config/message/handler/OMHandlerExecutor.java +++ b/agent/src/main/java/io/wdd/agent/config/message/handler/OMHandlerExecutor.java @@ -30,7 +30,15 @@ public class OMHandlerExecutor extends AbstractOctopusMessageHandler { @Override public boolean handle(OctopusMessage octopusMessage) { + + if (!octopusMessage + + + + + + .getType() .equals(OctopusMessageType.EXECUTOR)) { return next.handle(octopusMessage); @@ -41,6 +49,7 @@ public class OMHandlerExecutor extends AbstractOctopusMessageHandler { try { // 需要首先解析成 ExecutionMessage + ExecutionMessage executionMessage = objectMapper.readValue( (String) octopusMessage.getContent(), new TypeReference() { diff --git a/agent/src/test/java/io/wdd/agent/CommandTest.java b/agent/src/test/java/io/wdd/agent/CommandTest.java new file mode 100644 index 0000000..3d797d4 --- /dev/null +++ b/agent/src/test/java/io/wdd/agent/CommandTest.java @@ -0,0 +1,5 @@ +package io.wdd.agent; + + +public class CommandTest { +} diff --git a/server/src/main/java/io/wdd/rpc/controller/StatusController.java b/server/src/main/java/io/wdd/rpc/controller/StatusController.java index 410e8bd..17f3c89 100644 --- a/server/src/main/java/io/wdd/rpc/controller/StatusController.java +++ b/server/src/main/java/io/wdd/rpc/controller/StatusController.java @@ -4,7 +4,8 @@ package io.wdd.rpc.controller; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; import io.wdd.common.response.R; -import io.wdd.rpc.init.AgentStatusCacheService; +import io.wdd.rpc.scheduler.service.status.AgentAliveStatusMonitorService; +import io.wdd.rpc.status.service.AsyncStatusService; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.RequestMapping; @@ -23,11 +24,14 @@ import static io.wdd.rpc.init.AgentStatusCacheService.*; public class StatusController { @Resource - AgentStatusCacheService agentStatusCacheService; + AsyncStatusService asyncStatusService; + + @Resource + AgentAliveStatusMonitorService agentAliveStatusMonitorService; @ApiOperation("[ Agent-状态 ] Map") @GetMapping("/agent/status") - public R> GetAllAgentHealthyStatus() { + public R> GetAllAgentHealthyStatus() { return R.ok(ALL_AGENT_STATUS_MAP); } @@ -76,7 +80,9 @@ public class StatusController { public R>> ManualUpdateAgentStatus() { // 手动调用更新 - agentStatusCacheService.updateAgentStatusMapCache(agentAliveStatusMap); + Map agentAliveStatusMap = asyncStatusService.AsyncCollectAgentAliveStatus(ALL_AGENT_TOPIC_NAME_LIST, 5); + + agentAliveStatusMonitorService.updateAllAgentHealthyStatus(agentAliveStatusMap); return R.ok(STATUS_AGENT_LIST_MAP); } diff --git a/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionServiceImpl.java b/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionServiceImpl.java index 17c8018..d7b55cb 100644 --- a/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionServiceImpl.java +++ b/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionServiceImpl.java @@ -205,15 +205,15 @@ public class AsyncExecutionServiceImpl implements AsyncExecutionService { } // 构造回复信息的内容 - OctopusMessageAsyncReplayContend OctopusMessageAsyncReplayContend = OctopusMessageAsyncReplayContend.build( + OctopusMessageAsyncReplayContend executionReplayContent = OctopusMessageAsyncReplayContend.build( commandCount, CurrentAppOctopusMessageType, initTime ); - CountDownLatch countDownLatch = OctopusMessageAsyncReplayContend.getCountDownLatch(); + CountDownLatch countDownLatch = executionReplayContent.getCountDownLatch(); // 开始等待结果 - asyncWaitOctopusMessageResultService.waitFor(OctopusMessageAsyncReplayContend); + asyncWaitOctopusMessageResultService.waitFor(executionReplayContent); // 监听结果 try { @@ -228,10 +228,10 @@ public class AsyncExecutionServiceImpl implements AsyncExecutionService { // 等待所有的结果返回 // 停止等待结果 - asyncWaitOctopusMessageResultService.stopWaiting(OctopusMessageAsyncReplayContend); + asyncWaitOctopusMessageResultService.stopWaiting(executionReplayContent); // 解析结果 - OctopusMessageAsyncReplayContend + executionReplayContent .getReplayOMList() .stream() .map( diff --git a/server/src/main/java/io/wdd/rpc/scheduler/job/AgentRunMetricStatusJob.java b/server/src/main/java/io/wdd/rpc/scheduler/job/AgentRunMetricStatusJob.java index 87445a9..e34ad2b 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/job/AgentRunMetricStatusJob.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/job/AgentRunMetricStatusJob.java @@ -1,6 +1,6 @@ package io.wdd.rpc.scheduler.job; -import io.wdd.rpc.scheduler.service.status.AgentRuntimeMetricStatus; +import io.wdd.rpc.scheduler.service.status.AgentMetricStatusCollectService; import org.quartz.JobDataMap; import org.quartz.JobExecutionContext; import org.quartz.JobExecutionException; @@ -8,13 +8,13 @@ import org.springframework.scheduling.quartz.QuartzJobBean; import javax.annotation.Resource; -import static io.wdd.rpc.scheduler.service.status.AgentRuntimeMetricStatus.METRIC_REPORT_TIMES_COUNT; -import static io.wdd.rpc.scheduler.service.status.AgentRuntimeMetricStatus.METRIC_REPORT_TIME_PINCH; +import static io.wdd.rpc.scheduler.service.status.AgentMetricStatusCollectService.METRIC_REPORT_TIMES_COUNT; +import static io.wdd.rpc.scheduler.service.status.AgentMetricStatusCollectService.METRIC_REPORT_TIME_PINCH; public class AgentRunMetricStatusJob extends QuartzJobBean { @Resource - AgentRuntimeMetricStatus agentRuntimeMetricStatus; + AgentMetricStatusCollectService agentMetricStatusCollectService; @Override protected void executeInternal(JobExecutionContext jobExecutionContext) throws JobExecutionException { @@ -25,7 +25,7 @@ public class AgentRunMetricStatusJob extends QuartzJobBean { .getJobDataMap(); // 执行Agent Metric 状态收集任务 - agentRuntimeMetricStatus.collect( + agentMetricStatusCollectService.collect( (Integer) jobDataMap.get(METRIC_REPORT_TIMES_COUNT), (Integer) jobDataMap.get(METRIC_REPORT_TIME_PINCH) ); diff --git a/server/src/main/java/io/wdd/rpc/scheduler/service/BuildStatusScheduleTask.java b/server/src/main/java/io/wdd/rpc/scheduler/service/BuildStatusScheduleTask.java index 755e25d..009238d 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/service/BuildStatusScheduleTask.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/service/BuildStatusScheduleTask.java @@ -14,8 +14,8 @@ import java.text.ParseException; import java.util.Date; import java.util.HashMap; -import static io.wdd.rpc.scheduler.service.status.AgentRuntimeMetricStatus.METRIC_REPORT_TIMES_COUNT; -import static io.wdd.rpc.scheduler.service.status.AgentRuntimeMetricStatus.METRIC_REPORT_TIME_PINCH; +import static io.wdd.rpc.scheduler.service.status.AgentMetricStatusCollectService.METRIC_REPORT_TIMES_COUNT; +import static io.wdd.rpc.scheduler.service.status.AgentMetricStatusCollectService.METRIC_REPORT_TIME_PINCH; @Component @Slf4j diff --git a/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentAliveStatusMonitorService.java b/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentAliveStatusMonitorService.java index d63721f..22854f4 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentAliveStatusMonitorService.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentAliveStatusMonitorService.java @@ -3,7 +3,6 @@ package io.wdd.rpc.scheduler.service.status; import io.wdd.common.utils.TimeUtils; import io.wdd.rpc.init.AgentStatusCacheService; import io.wdd.rpc.scheduler.service.BuildStatusScheduleTask; -import io.wdd.rpc.status.OctopusStatusMessage; import io.wdd.rpc.status.service.AsyncStatusService; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections.CollectionUtils; @@ -13,13 +12,10 @@ import org.springframework.stereotype.Service; import javax.annotation.Resource; import java.util.HashMap; -import java.util.List; import java.util.Map; -import java.util.stream.Collectors; import static io.wdd.rpc.init.AgentStatusCacheService.ALL_AGENT_TOPIC_NAME_LIST; import static io.wdd.rpc.status.OctopusStatusMessage.ALL_AGENT_STATUS_REDIS_KEY; -import static io.wdd.rpc.status.OctopusStatusMessage.HEALTHY_STATUS_MESSAGE_TYPE; /** * 更新频率被类 BuildStatusScheduleTask.class控制 @@ -43,8 +39,6 @@ public class AgentAliveStatusMonitorService { private static final int MAX_WAIT_AGENT_REPORT_STATUS_TIME = 5; @Resource RedisTemplate redisTemplate; - @Resource - CollectAgentStatus collectAgentStatus; @Resource AgentStatusCacheService agentStatusCacheService; @@ -119,23 +113,7 @@ public class AgentAliveStatusMonitorService { } - private void buildAndSendAgentHealthMessage() { - - List collect = ALL_AGENT_TOPIC_NAME_LIST - .stream() - .map( - agentTopicName -> OctopusStatusMessage - .builder() - .statusType(HEALTHY_STATUS_MESSAGE_TYPE) - .build() - ) - .collect(Collectors.toList()); - - // 发送信息 - collectAgentStatus.statusMessageToAgent(collect); - } - - private void updateAllAgentHealthyStatus(Map agentAliveStatusMap) { + public void updateAllAgentHealthyStatus(Map agentAliveStatusMap) { String currentTimeString = TimeUtils.currentTimeString(); diff --git a/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentRuntimeMetricStatus.java b/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentMetricStatusCollectService.java similarity index 57% rename from server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentRuntimeMetricStatus.java rename to server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentMetricStatusCollectService.java index 26b5a75..17e5980 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentRuntimeMetricStatus.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentMetricStatusCollectService.java @@ -1,12 +1,17 @@ package io.wdd.rpc.scheduler.service.status; +import com.fasterxml.jackson.databind.ObjectMapper; +import io.wdd.common.utils.TimeUtils; +import io.wdd.rpc.message.OctopusMessage; +import io.wdd.rpc.message.sender.OMessageToAgentSender; import io.wdd.rpc.status.OctopusStatusMessage; import lombok.extern.slf4j.Slf4j; import org.springframework.stereotype.Service; import org.springframework.util.CollectionUtils; import javax.annotation.Resource; +import java.time.LocalDateTime; import java.util.List; import java.util.stream.Collectors; @@ -20,13 +25,20 @@ import static io.wdd.rpc.status.OctopusStatusMessage.METRIC_STATUS_MESSAGE_TYPE; */ @Service @Slf4j -public class AgentRuntimeMetricStatus { +public class AgentMetricStatusCollectService { public static final String METRIC_REPORT_TIME_PINCH = "metricRepeatPinch"; public static final String METRIC_REPORT_TIMES_COUNT = "metricRepeatCount"; @Resource - CollectAgentStatus collectAgentStatus; + OctopusStatusMessage octopusStatusMessage; + + @Resource + OMessageToAgentSender oMessageToAgentSender; + + @Resource + ObjectMapper objectMapper; + public void collect(int metricRepeatCount, int metricRepeatPinch) { @@ -34,35 +46,32 @@ public class AgentRuntimeMetricStatus { if (CollectionUtils.isEmpty(ALL_HEALTHY_AGENT_TOPIC_NAME_LIST)) { log.error("Metric Status Collect Failed ! no ALL_HEALTHY_AGENT_TOPIC_NAMES"); } - // 构建 OctopusMessage - // 只发送一次消息,让Agent循环定时执行任务 + buildMetricStatusMessageAndSend( metricRepeatCount, metricRepeatPinch ); - // } private void buildMetricStatusMessageAndSend(int metricRepeatCount, int metricRepeatPinch) { - List collect = ALL_HEALTHY_AGENT_TOPIC_NAME_LIST + LocalDateTime currentTime = TimeUtils.currentFormatTime(); + + List octopusStatusMessageList = ALL_HEALTHY_AGENT_TOPIC_NAME_LIST .stream() .map( - agentTopicName -> { - return OctopusStatusMessage - .builder() - .statusType(METRIC_STATUS_MESSAGE_TYPE) - .metricRepeatCount(metricRepeatCount) - .metricRepeatPinch(metricRepeatPinch) - .agentTopicName(agentTopicName) - .build(); - } + agentTopicName -> octopusStatusMessage + .ConstructAgentStatusMessage( + METRIC_STATUS_MESSAGE_TYPE, + agentTopicName, + currentTime + ) ) .collect(Collectors.toList()); - // send to the next level - collectAgentStatus.statusMessageToAgent(collect); + // batch send all messages to RabbitMQ + oMessageToAgentSender.send(octopusStatusMessageList); } diff --git a/server/src/main/java/io/wdd/rpc/scheduler/service/status/CollectAgentStatus.java b/server/src/main/java/io/wdd/rpc/scheduler/service/status/CollectAgentStatus.java deleted file mode 100644 index b0dd670..0000000 --- a/server/src/main/java/io/wdd/rpc/scheduler/service/status/CollectAgentStatus.java +++ /dev/null @@ -1,72 +0,0 @@ -package io.wdd.rpc.scheduler.service.status; - -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; -import io.wdd.common.utils.TimeUtils; -import io.wdd.rpc.message.OctopusMessage; -import io.wdd.rpc.message.OctopusMessageType; -import io.wdd.rpc.message.sender.OMessageToAgentSender; -import io.wdd.rpc.status.OctopusStatusMessage; -import org.springframework.stereotype.Service; - -import javax.annotation.Resource; -import java.util.List; -import java.util.stream.Collectors; - -/** - * 1. 定时任务 - * 2. 向RabbitMQ中发送消息,STATUS类型的消息 - * 3. 然后开始监听相应的Result StreamKey - */ -@Service -public class CollectAgentStatus { - - @Resource - OMessageToAgentSender oMessageToAgentSender; - - @Resource - ObjectMapper objectMapper; - - - public void collectAgentStatus(OctopusStatusMessage statusMessage) { - - this.statusMessageToAgent(List.of(statusMessage)); - } - - - public void statusMessageToAgent(List statusMessageList) { - - // build all the OctopusMessage - List octopusMessageList = statusMessageList.stream().map( - statusMessage -> { - OctopusMessage octopusMessage = buildOctopusMessageStatus(statusMessage); - return octopusMessage; - } - ).collect(Collectors.toList()); - - // batch send all messages to RabbitMQ - oMessageToAgentSender.send(octopusMessageList); - - // todo how to get result ? - } - - private OctopusMessage buildOctopusMessageStatus(OctopusStatusMessage octopusStatusMessage) { - - // must be like this or it will be deserialized as LinkedHashMap - String s; - try { - s = objectMapper.writeValueAsString(octopusStatusMessage); - } catch (JsonProcessingException e) { - throw new RuntimeException(e); - } - - return OctopusMessage.builder() - .uuid(octopusStatusMessage.getAgentTopicName()) - .type(OctopusMessageType.STATUS) - .init_time(TimeUtils.currentTime()) - .content(s) - .build(); - } - - -} diff --git a/server/src/main/java/io/wdd/rpc/status/OctopusStatusMessage.java b/server/src/main/java/io/wdd/rpc/status/OctopusStatusMessage.java index 49c5098..7454bd1 100644 --- a/server/src/main/java/io/wdd/rpc/status/OctopusStatusMessage.java +++ b/server/src/main/java/io/wdd/rpc/status/OctopusStatusMessage.java @@ -1,10 +1,17 @@ package io.wdd.rpc.status; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import io.wdd.rpc.message.OctopusMessage; +import io.wdd.rpc.message.OctopusMessageType; import lombok.AllArgsConstructor; import lombok.Data; import lombok.NoArgsConstructor; import lombok.experimental.SuperBuilder; +import javax.annotation.Resource; +import java.time.LocalDateTime; + @Data @AllArgsConstructor @NoArgsConstructor @@ -19,6 +26,9 @@ public class OctopusStatusMessage { public static final String METRIC_STATUS_MESSAGE_TYPE = "METRIC"; public static final String APP_STATUS_MESSAGE_TYPE = "APP"; + @Resource + ObjectMapper objectMapper; + /** * which kind of status should be return * metric => short time message @@ -31,4 +41,28 @@ public class OctopusStatusMessage { int metricRepeatPinch; + public OctopusMessage ConstructAgentStatusMessage(String statusType, String agentTopicName, LocalDateTime currentTime) { + + OctopusStatusMessage statusMessage = OctopusStatusMessage + .builder() + .statusType(statusType) + .build(); + + String ops; + try { + ops = objectMapper.writeValueAsString(statusMessage); + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + + return OctopusMessage + .builder() + .type(OctopusMessageType.STATUS) + .uuid(agentTopicName) + .init_time(currentTime) + .content(ops) + .build(); + + } + } diff --git a/server/src/main/java/io/wdd/rpc/status/service/AsyncStatusServiceImpl.java b/server/src/main/java/io/wdd/rpc/status/service/AsyncStatusServiceImpl.java index 7286c6d..8945332 100644 --- a/server/src/main/java/io/wdd/rpc/status/service/AsyncStatusServiceImpl.java +++ b/server/src/main/java/io/wdd/rpc/status/service/AsyncStatusServiceImpl.java @@ -1,7 +1,5 @@ package io.wdd.rpc.status.service; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; import io.wdd.common.utils.TimeUtils; import io.wdd.rpc.message.OctopusMessage; import io.wdd.rpc.message.OctopusMessageType; @@ -33,7 +31,7 @@ public class AsyncStatusServiceImpl implements AsyncStatusService { OMessageToAgentSender oMessageToAgentSender; @Resource - ObjectMapper objectMapper; + OctopusStatusMessage octopusStatusMessage; @Resource AsyncWaitOctopusMessageResultService asyncWaitOctopusMessageResultService; @@ -107,7 +105,7 @@ public class AsyncStatusServiceImpl implements AsyncStatusService { List octopusStatusMessageList = ALL_AGENT_TOPIC_NAME_LIST .stream() .map( - agentTopicName -> ConstructAgentStatusMessage( + agentTopicName -> octopusStatusMessage.ConstructAgentStatusMessage( HEALTHY_STATUS_MESSAGE_TYPE, agentTopicName, currentTime @@ -120,27 +118,5 @@ public class AsyncStatusServiceImpl implements AsyncStatusService { } - private OctopusMessage ConstructAgentStatusMessage(String statusType, String agentTopicName, LocalDateTime currentTime) { - OctopusStatusMessage statusMessage = OctopusStatusMessage - .builder() - .statusType(statusType) - .build(); - - String ops; - try { - ops = objectMapper.writeValueAsString(statusMessage); - } catch (JsonProcessingException e) { - throw new RuntimeException(e); - } - - return OctopusMessage - .builder() - .type(CurrentAppOctopusMessageType) - .uuid(agentTopicName) - .init_time(currentTime) - .content(ops) - .build(); - - } } From 1aa4c23dfc509ebd1d632db6befeccc0d6673dee Mon Sep 17 00:00:00 2001 From: zeaslity Date: Thu, 15 Jun 2023 17:17:55 +0800 Subject: [PATCH 28/45] [ Status ] optimize code --- .../utils/OctopusObjectMapperConfig.java | 13 +++++++++ .../AgentMetricStatusCollectService.java | 15 +++++------ .../wdd/rpc/status/OctopusStatusMessage.java | 11 +++----- .../service/AsyncStatusServiceImpl.java | 7 ++--- .../server/config/OctopusObjectMapper.java | 27 ++++++++----------- 5 files changed, 36 insertions(+), 37 deletions(-) diff --git a/server/src/main/java/io/wdd/common/utils/OctopusObjectMapperConfig.java b/server/src/main/java/io/wdd/common/utils/OctopusObjectMapperConfig.java index 7971f25..783eb37 100644 --- a/server/src/main/java/io/wdd/common/utils/OctopusObjectMapperConfig.java +++ b/server/src/main/java/io/wdd/common/utils/OctopusObjectMapperConfig.java @@ -1,12 +1,15 @@ package io.wdd.common.utils; import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; import com.fasterxml.jackson.datatype.jsr310.deser.LocalDateTimeDeserializer; import com.fasterxml.jackson.datatype.jsr310.ser.LocalDateTimeSerializer; import org.springframework.boot.autoconfigure.jackson.Jackson2ObjectMapperBuilderCustomizer; import org.springframework.context.annotation.Configuration; +import javax.annotation.PostConstruct; +import javax.annotation.Resource; import java.time.LocalDateTime; import java.time.format.DateTimeFormatter; @@ -14,6 +17,16 @@ import java.time.format.DateTimeFormatter; @Configuration public class OctopusObjectMapperConfig { + public static ObjectMapper OctopusObjectMapper = null; + + @Resource + ObjectMapper objectMapper; + + @PostConstruct + public void setOctopusObjectMapper() { + OctopusObjectMapper = objectMapper; + } + public static Jackson2ObjectMapperBuilderCustomizer common() { return jacksonObjectMapperBuilder -> { diff --git a/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentMetricStatusCollectService.java b/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentMetricStatusCollectService.java index 17e5980..bcaec69 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentMetricStatusCollectService.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentMetricStatusCollectService.java @@ -5,7 +5,6 @@ import com.fasterxml.jackson.databind.ObjectMapper; import io.wdd.common.utils.TimeUtils; import io.wdd.rpc.message.OctopusMessage; import io.wdd.rpc.message.sender.OMessageToAgentSender; -import io.wdd.rpc.status.OctopusStatusMessage; import lombok.extern.slf4j.Slf4j; import org.springframework.stereotype.Service; import org.springframework.util.CollectionUtils; @@ -16,6 +15,7 @@ import java.util.List; import java.util.stream.Collectors; import static io.wdd.rpc.init.AgentStatusCacheService.ALL_HEALTHY_AGENT_TOPIC_NAME_LIST; +import static io.wdd.rpc.status.OctopusStatusMessage.ConstructAgentStatusMessage; import static io.wdd.rpc.status.OctopusStatusMessage.METRIC_STATUS_MESSAGE_TYPE; /** @@ -30,8 +30,6 @@ public class AgentMetricStatusCollectService { public static final String METRIC_REPORT_TIME_PINCH = "metricRepeatPinch"; public static final String METRIC_REPORT_TIMES_COUNT = "metricRepeatCount"; - @Resource - OctopusStatusMessage octopusStatusMessage; @Resource OMessageToAgentSender oMessageToAgentSender; @@ -61,12 +59,11 @@ public class AgentMetricStatusCollectService { List octopusStatusMessageList = ALL_HEALTHY_AGENT_TOPIC_NAME_LIST .stream() .map( - agentTopicName -> octopusStatusMessage - .ConstructAgentStatusMessage( - METRIC_STATUS_MESSAGE_TYPE, - agentTopicName, - currentTime - ) + agentTopicName -> ConstructAgentStatusMessage( + METRIC_STATUS_MESSAGE_TYPE, + agentTopicName, + currentTime + ) ) .collect(Collectors.toList()); diff --git a/server/src/main/java/io/wdd/rpc/status/OctopusStatusMessage.java b/server/src/main/java/io/wdd/rpc/status/OctopusStatusMessage.java index 7454bd1..e6e1a14 100644 --- a/server/src/main/java/io/wdd/rpc/status/OctopusStatusMessage.java +++ b/server/src/main/java/io/wdd/rpc/status/OctopusStatusMessage.java @@ -1,7 +1,6 @@ package io.wdd.rpc.status; import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; import io.wdd.rpc.message.OctopusMessage; import io.wdd.rpc.message.OctopusMessageType; import lombok.AllArgsConstructor; @@ -9,9 +8,10 @@ import lombok.Data; import lombok.NoArgsConstructor; import lombok.experimental.SuperBuilder; -import javax.annotation.Resource; import java.time.LocalDateTime; +import static io.wdd.common.utils.OctopusObjectMapperConfig.OctopusObjectMapper; + @Data @AllArgsConstructor @NoArgsConstructor @@ -26,9 +26,6 @@ public class OctopusStatusMessage { public static final String METRIC_STATUS_MESSAGE_TYPE = "METRIC"; public static final String APP_STATUS_MESSAGE_TYPE = "APP"; - @Resource - ObjectMapper objectMapper; - /** * which kind of status should be return * metric => short time message @@ -41,7 +38,7 @@ public class OctopusStatusMessage { int metricRepeatPinch; - public OctopusMessage ConstructAgentStatusMessage(String statusType, String agentTopicName, LocalDateTime currentTime) { + public static OctopusMessage ConstructAgentStatusMessage(String statusType, String agentTopicName, LocalDateTime currentTime) { OctopusStatusMessage statusMessage = OctopusStatusMessage .builder() @@ -50,7 +47,7 @@ public class OctopusStatusMessage { String ops; try { - ops = objectMapper.writeValueAsString(statusMessage); + ops = OctopusObjectMapper.writeValueAsString(statusMessage); } catch (JsonProcessingException e) { throw new RuntimeException(e); } diff --git a/server/src/main/java/io/wdd/rpc/status/service/AsyncStatusServiceImpl.java b/server/src/main/java/io/wdd/rpc/status/service/AsyncStatusServiceImpl.java index 8945332..a9cb661 100644 --- a/server/src/main/java/io/wdd/rpc/status/service/AsyncStatusServiceImpl.java +++ b/server/src/main/java/io/wdd/rpc/status/service/AsyncStatusServiceImpl.java @@ -6,7 +6,6 @@ import io.wdd.rpc.message.OctopusMessageType; import io.wdd.rpc.message.handler.async.AsyncWaitOctopusMessageResultService; import io.wdd.rpc.message.handler.async.OctopusMessageAsyncReplayContend; import io.wdd.rpc.message.sender.OMessageToAgentSender; -import io.wdd.rpc.status.OctopusStatusMessage; import lombok.extern.slf4j.Slf4j; import org.springframework.stereotype.Service; @@ -19,6 +18,7 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import static io.wdd.rpc.init.AgentStatusCacheService.ALL_AGENT_TOPIC_NAME_LIST; +import static io.wdd.rpc.status.OctopusStatusMessage.ConstructAgentStatusMessage; import static io.wdd.rpc.status.OctopusStatusMessage.HEALTHY_STATUS_MESSAGE_TYPE; @Slf4j @@ -30,9 +30,6 @@ public class AsyncStatusServiceImpl implements AsyncStatusService { @Resource OMessageToAgentSender oMessageToAgentSender; - @Resource - OctopusStatusMessage octopusStatusMessage; - @Resource AsyncWaitOctopusMessageResultService asyncWaitOctopusMessageResultService; @@ -105,7 +102,7 @@ public class AsyncStatusServiceImpl implements AsyncStatusService { List octopusStatusMessageList = ALL_AGENT_TOPIC_NAME_LIST .stream() .map( - agentTopicName -> octopusStatusMessage.ConstructAgentStatusMessage( + agentTopicName -> ConstructAgentStatusMessage( HEALTHY_STATUS_MESSAGE_TYPE, agentTopicName, currentTime diff --git a/server/src/main/java/io/wdd/server/config/OctopusObjectMapper.java b/server/src/main/java/io/wdd/server/config/OctopusObjectMapper.java index f939b6d..d22b3a7 100644 --- a/server/src/main/java/io/wdd/server/config/OctopusObjectMapper.java +++ b/server/src/main/java/io/wdd/server/config/OctopusObjectMapper.java @@ -1,19 +1,14 @@ package io.wdd.server.config; -import io.wdd.common.utils.OctopusObjectMapperConfig; -import org.springframework.boot.autoconfigure.jackson.Jackson2ObjectMapperBuilderCustomizer; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; - -@Configuration -public class OctopusObjectMapper { - - //注意:该段代码并未覆盖SpringBoot自动装配的ObjectMapper对象,而是加强其配置。 - // use the common config of object mapper - @Bean - public Jackson2ObjectMapperBuilderCustomizer customJackson() { - return OctopusObjectMapperConfig.common(); - } - -} +//@Configuration +//public class OctopusObjectMapper { +// +// //注意:该段代码并未覆盖SpringBoot自动装配的ObjectMapper对象,而是加强其配置。 +// // use the common config of object mapper +// @Bean +// public Jackson2ObjectMapperBuilderCustomizer customJackson() { +// return OctopusObjectMapperConfig.common(); +// } +// +//} From 93692eb0af2935ed8777a54a358abe5ed563dd7a Mon Sep 17 00:00:00 2001 From: IceDerce Date: Fri, 16 Jun 2023 10:55:24 +0800 Subject: [PATCH 29/45] [ Execution ] change the excuetion log access way --- .../rpc/controller/ExecutionController.java | 9 - .../config/CommandReaderConfigBean.java | 53 ++- .../rpc/execute/result/BuildStreamReader.java | 378 ++++++++-------- .../result/RedisStreamReaderConfig.java | 242 +++++------ .../service/ExecutionResultDaemonHandler.java | 406 +++++++++--------- 5 files changed, 539 insertions(+), 549 deletions(-) diff --git a/server/src/main/java/io/wdd/rpc/controller/ExecutionController.java b/server/src/main/java/io/wdd/rpc/controller/ExecutionController.java index 7d29abf..b768099 100644 --- a/server/src/main/java/io/wdd/rpc/controller/ExecutionController.java +++ b/server/src/main/java/io/wdd/rpc/controller/ExecutionController.java @@ -4,7 +4,6 @@ import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiParam; import io.wdd.common.response.R; -import io.wdd.rpc.execute.result.BuildStreamReader; import io.wdd.rpc.execute.service.AsyncExecutionService; import io.wdd.rpc.execute.service.SyncExecutionService; import org.springframework.web.bind.annotation.PostMapping; @@ -18,7 +17,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; -import static io.wdd.rpc.execute.result.RedisStreamReaderConfig.AGENT_STATUS_REDIS_STREAM_LISTENER_CONTAINER; import static io.wdd.rpc.init.AgentStatusCacheService.ALL_AGENT_TOPIC_NAME_LIST; import static io.wdd.rpc.init.AgentStatusCacheService.ALL_HEALTHY_AGENT_TOPIC_NAME_LIST; @@ -30,8 +28,6 @@ public class ExecutionController { @Resource SyncExecutionService syncExecutionService; @Resource - BuildStreamReader buildStreamReader; - @Resource AsyncExecutionService asyncExecutionService; @PostMapping("/command/one") @@ -198,11 +194,6 @@ public class ExecutionController { @RequestParam(value = "streamKey") @ApiParam(value = "status的Stream Key") String streamKey ) { - buildStreamReader.registerStreamReader( - AGENT_STATUS_REDIS_STREAM_LISTENER_CONTAINER, - streamKey - ); - return R.ok("请到控制台查看,已经切换至 => " + streamKey); } diff --git a/server/src/main/java/io/wdd/rpc/execute/config/CommandReaderConfigBean.java b/server/src/main/java/io/wdd/rpc/execute/config/CommandReaderConfigBean.java index f4da32e..ac3ae3f 100644 --- a/server/src/main/java/io/wdd/rpc/execute/config/CommandReaderConfigBean.java +++ b/server/src/main/java/io/wdd/rpc/execute/config/CommandReaderConfigBean.java @@ -1,27 +1,26 @@ -package io.wdd.rpc.execute.config; - -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; - -import static io.wdd.rpc.execute.result.RedisStreamReaderConfig.REDIS_STREAM_LISTENER_CONSUMER_NAME; - -@Configuration -public class CommandReaderConfigBean { - - // todo must support for multi thread - // its not thread safe now - @Bean - public CommandReaderConfig commandReaderConfig() { - - return CommandReaderConfig - .builder() - .consumerName(REDIS_STREAM_LISTENER_CONSUMER_NAME) - .streamKey("ccc") - .consumerType(REDIS_STREAM_LISTENER_CONSUMER_NAME) - .group("ccc") - .ExecutionResult(null) - .build(); - } - - -} +//package io.wdd.rpc.execute.config; +// +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Configuration; +// +// +//@Configuration +//public class CommandReaderConfigBean { +// +// // todo must support for multi thread +// // its not thread safe now +// @Bean +// public CommandReaderConfig commandReaderConfig() { +// +// return CommandReaderConfig +// .builder() +// .consumerName(REDIS_STREAM_LISTENER_CONSUMER_NAME) +// .streamKey("ccc") +// .consumerType(REDIS_STREAM_LISTENER_CONSUMER_NAME) +// .group("ccc") +// .ExecutionResult(null) +// .build(); +// } +// +// +//} diff --git a/server/src/main/java/io/wdd/rpc/execute/result/BuildStreamReader.java b/server/src/main/java/io/wdd/rpc/execute/result/BuildStreamReader.java index 5775012..c6168a7 100644 --- a/server/src/main/java/io/wdd/rpc/execute/result/BuildStreamReader.java +++ b/server/src/main/java/io/wdd/rpc/execute/result/BuildStreamReader.java @@ -1,189 +1,189 @@ -package io.wdd.rpc.execute.result; - -import io.wdd.rpc.execute.config.CommandReaderConfig; -import io.wdd.server.utils.SpringUtils; -import lombok.SneakyThrows; -import lombok.extern.slf4j.Slf4j; -import org.springframework.data.redis.connection.stream.ReadOffset; -import org.springframework.data.redis.connection.stream.StreamOffset; -import org.springframework.data.redis.stream.StreamMessageListenerContainer; -import org.springframework.stereotype.Component; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.concurrent.TimeUnit; - -import static io.wdd.rpc.execute.result.RedisStreamReaderConfig.EXECUTION_RESULT_REDIS_STREAM_LISTENER_CONTAINER; - - -@Component -@Slf4j -public class BuildStreamReader { - - private final HashMap REDIS_STREAM_LISTENER_CONTAINER_CACHE = new HashMap<>(16); - private RedisStreamReaderConfig redisStreamReaderConfig; - - private StreamMessageListenerContainer streamMessageListenerContainer; - - private CommandReaderConfig commandReaderConfig; - - public void buildStreamReader(CommandReaderConfig commandReaderConfig) { - - // prepare the environment - prepareExecutionEnv(); - - - // just modify the redis listener container and it's ok - modifyExecutionStreamReader(commandReaderConfig); - - } - - @SneakyThrows - private void modifyExecutionStreamReader(CommandReaderConfig commandReaderConfig) { - - // stop the old stream listener container - if (this.streamMessageListenerContainer.isRunning()) { - this.streamMessageListenerContainer.stop(); - } - - // modify container - this.streamMessageListenerContainer.receive( - StreamOffset.create( - commandReaderConfig.getStreamKey(), - ReadOffset.lastConsumed()), - - new CommandResultReader( - commandReaderConfig - ) - ); - - - // very important - TimeUnit.MILLISECONDS.sleep(500); - this.streamMessageListenerContainer.start(); - } - - private void prepareExecutionEnv() { - - getRedisStreamListenerContainer(); - - getRedisStreamReaderConfig(); - - } - - private void getRedisStreamReaderConfig() { - - this.commandReaderConfig = SpringUtils.getBean("commandReaderConfig", - CommandReaderConfig.class); - } - - private void getRedisStreamListenerContainer() { - - this.streamMessageListenerContainer = SpringUtils.getBean( - EXECUTION_RESULT_REDIS_STREAM_LISTENER_CONTAINER, - StreamMessageListenerContainer.class - ); - } - - public void registerStreamReader(String redisStreamListenerContainerBeanName, String streamKey) { - registerStreamReader(redisStreamListenerContainerBeanName, - streamKey, - null); - } - - public void registerStreamReader(String redisStreamListenerContainerBeanName, String streamKey, ArrayList ExecutionResult) { - - // prepare the environment - prepareEnv(); - - // oldStreamKey equals streamKey don't need to do anything , just return - if (redisStreamReaderConfig.getStreamKey() - .equals(streamKey)) { - log.debug("redis listener container not change !"); - return; - } - - // destroy the old REDIS_STREAM_LISTENER_CONTAINER - destroyStreamReader(streamKey); - - // modify the configuration ==> streamKey - modifyStreamReader(streamKey, - ExecutionResult); - - // re-create the REDIS_STREAM_LISTENER_CONTAINER - createStreamReader(redisStreamListenerContainerBeanName, - streamKey); - - } - - private void prepareEnv() { - - getRedisStreamConfig(); - - } - - private void getRedisStreamConfig() { - - this.redisStreamReaderConfig = SpringUtils.getBean("redisStreamReaderConfig", - RedisStreamReaderConfig.class); - } - - - private void createStreamReader(String redisStreamListenerContainerBeanName, String streamKey) { - - log.debug("start to create the redis stream listener container"); - // create the lazy bean - - StreamMessageListenerContainer streamMessageListenerContainer = SpringUtils.getBean(redisStreamListenerContainerBeanName, - StreamMessageListenerContainer.class); - - REDIS_STREAM_LISTENER_CONTAINER_CACHE.put(streamKey, - streamMessageListenerContainer); - - // very important - log.debug("start the listener container"); - streamMessageListenerContainer.start(); - - - } - - private void modifyStreamReader(String streamKey, ArrayList executionResult) { - - log.debug("start to modify the redis stream listener container stream key"); - String oldStreamKey = redisStreamReaderConfig.getStreamKey(); - - log.debug("change stream key from [{}] to [{}]", - oldStreamKey, - streamKey); - - log.debug("start to set the Redis Stream Reader key"); - redisStreamReaderConfig.setStreamKey(streamKey); - - log.debug("start to set the Redis Stream Execution Result Container"); - redisStreamReaderConfig.setExecutionResult(executionResult); - - } - - - private void destroyStreamReader(String streamKey) { - - String oldStreamKey = redisStreamReaderConfig.getStreamKey(); - - if (REDIS_STREAM_LISTENER_CONTAINER_CACHE.containsKey(oldStreamKey)) { - - StreamMessageListenerContainer streamMessageListenerContainer = REDIS_STREAM_LISTENER_CONTAINER_CACHE.get(oldStreamKey); - - log.debug("destroyed old redis stream listener container is [ {} ]", - streamMessageListenerContainer); - - - // double destroy - SpringUtils.destroyBean(streamMessageListenerContainer); - streamMessageListenerContainer.stop(); - // help gc - streamMessageListenerContainer = null; - } - - - } -} +//package io.wdd.rpc.execute.result; +// +//import io.wdd.rpc.execute.config.CommandReaderConfig; +//import io.wdd.server.utils.SpringUtils; +//import lombok.SneakyThrows; +//import lombok.extern.slf4j.Slf4j; +//import org.springframework.data.redis.connection.stream.ReadOffset; +//import org.springframework.data.redis.connection.stream.StreamOffset; +//import org.springframework.data.redis.stream.StreamMessageListenerContainer; +//import org.springframework.stereotype.Component; +// +//import java.util.ArrayList; +//import java.util.HashMap; +//import java.util.concurrent.TimeUnit; +// +//import static io.wdd.rpc.execute.result.RedisStreamReaderConfig.EXECUTION_RESULT_REDIS_STREAM_LISTENER_CONTAINER; +// +// +//@Component +//@Slf4j +//public class BuildStreamReader { +// +// private final HashMap REDIS_STREAM_LISTENER_CONTAINER_CACHE = new HashMap<>(16); +// private RedisStreamReaderConfig redisStreamReaderConfig; +// +// private StreamMessageListenerContainer streamMessageListenerContainer; +// +// private CommandReaderConfig commandReaderConfig; +// +// public void buildStreamReader(CommandReaderConfig commandReaderConfig) { +// +// // prepare the environment +// prepareExecutionEnv(); +// +// +// // just modify the redis listener container and it's ok +// modifyExecutionStreamReader(commandReaderConfig); +// +// } +// +// @SneakyThrows +// private void modifyExecutionStreamReader(CommandReaderConfig commandReaderConfig) { +// +// // stop the old stream listener container +// if (this.streamMessageListenerContainer.isRunning()) { +// this.streamMessageListenerContainer.stop(); +// } +// +// // modify container +// this.streamMessageListenerContainer.receive( +// StreamOffset.create( +// commandReaderConfig.getStreamKey(), +// ReadOffset.lastConsumed()), +// +// new CommandResultReader( +// commandReaderConfig +// ) +// ); +// +// +// // very important +// TimeUnit.MILLISECONDS.sleep(500); +// this.streamMessageListenerContainer.start(); +// } +// +// private void prepareExecutionEnv() { +// +// getRedisStreamListenerContainer(); +// +// getRedisStreamReaderConfig(); +// +// } +// +// private void getRedisStreamReaderConfig() { +// +// this.commandReaderConfig = SpringUtils.getBean("commandReaderConfig", +// CommandReaderConfig.class); +// } +// +// private void getRedisStreamListenerContainer() { +// +// this.streamMessageListenerContainer = SpringUtils.getBean( +// EXECUTION_RESULT_REDIS_STREAM_LISTENER_CONTAINER, +// StreamMessageListenerContainer.class +// ); +// } +// +// public void registerStreamReader(String redisStreamListenerContainerBeanName, String streamKey) { +// registerStreamReader(redisStreamListenerContainerBeanName, +// streamKey, +// null); +// } +// +// public void registerStreamReader(String redisStreamListenerContainerBeanName, String streamKey, ArrayList ExecutionResult) { +// +// // prepare the environment +// prepareEnv(); +// +// // oldStreamKey equals streamKey don't need to do anything , just return +// if (redisStreamReaderConfig.getStreamKey() +// .equals(streamKey)) { +// log.debug("redis listener container not change !"); +// return; +// } +// +// // destroy the old REDIS_STREAM_LISTENER_CONTAINER +// destroyStreamReader(streamKey); +// +// // modify the configuration ==> streamKey +// modifyStreamReader(streamKey, +// ExecutionResult); +// +// // re-create the REDIS_STREAM_LISTENER_CONTAINER +// createStreamReader(redisStreamListenerContainerBeanName, +// streamKey); +// +// } +// +// private void prepareEnv() { +// +// getRedisStreamConfig(); +// +// } +// +// private void getRedisStreamConfig() { +// +// this.redisStreamReaderConfig = SpringUtils.getBean("redisStreamReaderConfig", +// RedisStreamReaderConfig.class); +// } +// +// +// private void createStreamReader(String redisStreamListenerContainerBeanName, String streamKey) { +// +// log.debug("start to create the redis stream listener container"); +// // create the lazy bean +// +// StreamMessageListenerContainer streamMessageListenerContainer = SpringUtils.getBean(redisStreamListenerContainerBeanName, +// StreamMessageListenerContainer.class); +// +// REDIS_STREAM_LISTENER_CONTAINER_CACHE.put(streamKey, +// streamMessageListenerContainer); +// +// // very important +// log.debug("start the listener container"); +// streamMessageListenerContainer.start(); +// +// +// } +// +// private void modifyStreamReader(String streamKey, ArrayList executionResult) { +// +// log.debug("start to modify the redis stream listener container stream key"); +// String oldStreamKey = redisStreamReaderConfig.getStreamKey(); +// +// log.debug("change stream key from [{}] to [{}]", +// oldStreamKey, +// streamKey); +// +// log.debug("start to set the Redis Stream Reader key"); +// redisStreamReaderConfig.setStreamKey(streamKey); +// +// log.debug("start to set the Redis Stream Execution Result Container"); +// redisStreamReaderConfig.setExecutionResult(executionResult); +// +// } +// +// +// private void destroyStreamReader(String streamKey) { +// +// String oldStreamKey = redisStreamReaderConfig.getStreamKey(); +// +// if (REDIS_STREAM_LISTENER_CONTAINER_CACHE.containsKey(oldStreamKey)) { +// +// StreamMessageListenerContainer streamMessageListenerContainer = REDIS_STREAM_LISTENER_CONTAINER_CACHE.get(oldStreamKey); +// +// log.debug("destroyed old redis stream listener container is [ {} ]", +// streamMessageListenerContainer); +// +// +// // double destroy +// SpringUtils.destroyBean(streamMessageListenerContainer); +// streamMessageListenerContainer.stop(); +// // help gc +// streamMessageListenerContainer = null; +// } +// +// +// } +//} diff --git a/server/src/main/java/io/wdd/rpc/execute/result/RedisStreamReaderConfig.java b/server/src/main/java/io/wdd/rpc/execute/result/RedisStreamReaderConfig.java index a57da80..8d51e9c 100644 --- a/server/src/main/java/io/wdd/rpc/execute/result/RedisStreamReaderConfig.java +++ b/server/src/main/java/io/wdd/rpc/execute/result/RedisStreamReaderConfig.java @@ -1,121 +1,121 @@ -package io.wdd.rpc.execute.result; - - -import io.wdd.rpc.scheduler.service.status.AgentStatusStreamReader; -import lombok.Getter; -import lombok.Setter; -import lombok.extern.slf4j.Slf4j; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; -import org.springframework.context.annotation.Lazy; -import org.springframework.context.annotation.Scope; -import org.springframework.data.redis.connection.RedisConnectionFactory; -import org.springframework.data.redis.connection.stream.MapRecord; -import org.springframework.data.redis.connection.stream.ReadOffset; -import org.springframework.data.redis.connection.stream.StreamOffset; -import org.springframework.data.redis.stream.StreamMessageListenerContainer; - -import javax.annotation.Resource; -import java.time.Duration; -import java.util.ArrayList; - -@Configuration -@Slf4j -@Getter -@Setter -public class RedisStreamReaderConfig { - - @Resource - private RedisConnectionFactory redisConnectionFactory; - - public static final String COMMAND_RESULT_REDIS_STREAM_LISTENER_CONTAINER = "commandResultRedisStreamListenerContainer"; - - public static final String EXECUTION_RESULT_REDIS_STREAM_LISTENER_CONTAINER = "executionResultRedisStreamListenerContainer"; - - public static final String AGENT_STATUS_REDIS_STREAM_LISTENER_CONTAINER = "agentStatusRedisStreamListenerContainer"; - - public static final String REDIS_STREAM_LISTENER_CONSUMER_NAME = "OctopusServer"; - - /** - * used in old model - */ - private String streamKey = "cccc"; - - /** - * no use - */ - private ArrayList executionResult = null; - - - @Bean(value = EXECUTION_RESULT_REDIS_STREAM_LISTENER_CONTAINER) - @Lazy - public StreamMessageListenerContainer> executionResultRedisStreamListenerContainer(){ - - StreamMessageListenerContainer.StreamMessageListenerContainerOptions> options = StreamMessageListenerContainer.StreamMessageListenerContainerOptions - .builder() - .pollTimeout(Duration.ofSeconds(2)) - .build(); - - StreamMessageListenerContainer> listenerContainer = StreamMessageListenerContainer.create(redisConnectionFactory, options); - - return listenerContainer; - } - - - @Bean(value = COMMAND_RESULT_REDIS_STREAM_LISTENER_CONTAINER) - @Scope("prototype") - @Lazy - public StreamMessageListenerContainer> commandResultRedisStreamListenerContainer(){ - - StreamMessageListenerContainer.StreamMessageListenerContainerOptions> options = StreamMessageListenerContainer.StreamMessageListenerContainerOptions - .builder() - .pollTimeout(Duration.ofSeconds(2)) - .build(); - - StreamMessageListenerContainer> listenerContainer = StreamMessageListenerContainer.create(redisConnectionFactory, options); - - // todo 此部分可以被移出到另外的位置,会更加方便,就不需要对此Bean进行创建和销毁了 - listenerContainer.receive( - - StreamOffset.create(streamKey, ReadOffset.lastConsumed()), - - new CommandResultReader( - REDIS_STREAM_LISTENER_CONSUMER_NAME, - streamKey, - REDIS_STREAM_LISTENER_CONSUMER_NAME, - executionResult - ) - - ); - - return listenerContainer; - } - - @Bean(value = AGENT_STATUS_REDIS_STREAM_LISTENER_CONTAINER) - @Scope("prototype") - @Lazy - public StreamMessageListenerContainer> agentStatusRedisStreamListenerContainer(){ - - StreamMessageListenerContainer.StreamMessageListenerContainerOptions> options = StreamMessageListenerContainer.StreamMessageListenerContainerOptions - .builder() - .pollTimeout(Duration.ofSeconds(2)) - .build(); - - StreamMessageListenerContainer> listenerContainer = StreamMessageListenerContainer.create(redisConnectionFactory, options); - - listenerContainer.receive( - - StreamOffset.create(streamKey, ReadOffset.lastConsumed()), - - new AgentStatusStreamReader( - REDIS_STREAM_LISTENER_CONSUMER_NAME, - REDIS_STREAM_LISTENER_CONSUMER_NAME, - REDIS_STREAM_LISTENER_CONSUMER_NAME) - - ); - - return listenerContainer; - } - - -} +//package io.wdd.rpc.execute.result; +// +// +//import io.wdd.rpc.scheduler.service.status.AgentStatusStreamReader; +//import lombok.Getter; +//import lombok.Setter; +//import lombok.extern.slf4j.Slf4j; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Configuration; +//import org.springframework.context.annotation.Lazy; +//import org.springframework.context.annotation.Scope; +//import org.springframework.data.redis.connection.RedisConnectionFactory; +//import org.springframework.data.redis.connection.stream.MapRecord; +//import org.springframework.data.redis.connection.stream.ReadOffset; +//import org.springframework.data.redis.connection.stream.StreamOffset; +//import org.springframework.data.redis.stream.StreamMessageListenerContainer; +// +//import javax.annotation.Resource; +//import java.time.Duration; +//import java.util.ArrayList; +// +//@Configuration +//@Slf4j +//@Getter +//@Setter +//public class RedisStreamReaderConfig { +// +// @Resource +// private RedisConnectionFactory redisConnectionFactory; +// +// public static final String COMMAND_RESULT_REDIS_STREAM_LISTENER_CONTAINER = "commandResultRedisStreamListenerContainer"; +// +// public static final String EXECUTION_RESULT_REDIS_STREAM_LISTENER_CONTAINER = "executionResultRedisStreamListenerContainer"; +// +// public static final String AGENT_STATUS_REDIS_STREAM_LISTENER_CONTAINER = "agentStatusRedisStreamListenerContainer"; +// +// public static final String REDIS_STREAM_LISTENER_CONSUMER_NAME = "OctopusServer"; +// +// /** +// * used in old model +// */ +// private String streamKey = "cccc"; +// +// /** +// * no use +// */ +// private ArrayList executionResult = null; +// +// +// @Bean(value = EXECUTION_RESULT_REDIS_STREAM_LISTENER_CONTAINER) +// @Lazy +// public StreamMessageListenerContainer> executionResultRedisStreamListenerContainer(){ +// +// StreamMessageListenerContainer.StreamMessageListenerContainerOptions> options = StreamMessageListenerContainer.StreamMessageListenerContainerOptions +// .builder() +// .pollTimeout(Duration.ofSeconds(2)) +// .build(); +// +// StreamMessageListenerContainer> listenerContainer = StreamMessageListenerContainer.create(redisConnectionFactory, options); +// +// return listenerContainer; +// } +// +// +// @Bean(value = COMMAND_RESULT_REDIS_STREAM_LISTENER_CONTAINER) +// @Scope("prototype") +// @Lazy +// public StreamMessageListenerContainer> commandResultRedisStreamListenerContainer(){ +// +// StreamMessageListenerContainer.StreamMessageListenerContainerOptions> options = StreamMessageListenerContainer.StreamMessageListenerContainerOptions +// .builder() +// .pollTimeout(Duration.ofSeconds(2)) +// .build(); +// +// StreamMessageListenerContainer> listenerContainer = StreamMessageListenerContainer.create(redisConnectionFactory, options); +// +// // todo 此部分可以被移出到另外的位置,会更加方便,就不需要对此Bean进行创建和销毁了 +// listenerContainer.receive( +// +// StreamOffset.create(streamKey, ReadOffset.lastConsumed()), +// +// new CommandResultReader( +// REDIS_STREAM_LISTENER_CONSUMER_NAME, +// streamKey, +// REDIS_STREAM_LISTENER_CONSUMER_NAME, +// executionResult +// ) +// +// ); +// +// return listenerContainer; +// } +// +// @Bean(value = AGENT_STATUS_REDIS_STREAM_LISTENER_CONTAINER) +// @Scope("prototype") +// @Lazy +// public StreamMessageListenerContainer> agentStatusRedisStreamListenerContainer(){ +// +// StreamMessageListenerContainer.StreamMessageListenerContainerOptions> options = StreamMessageListenerContainer.StreamMessageListenerContainerOptions +// .builder() +// .pollTimeout(Duration.ofSeconds(2)) +// .build(); +// +// StreamMessageListenerContainer> listenerContainer = StreamMessageListenerContainer.create(redisConnectionFactory, options); +// +// listenerContainer.receive( +// +// StreamOffset.create(streamKey, ReadOffset.lastConsumed()), +// +// new AgentStatusStreamReader( +// REDIS_STREAM_LISTENER_CONSUMER_NAME, +// REDIS_STREAM_LISTENER_CONSUMER_NAME, +// REDIS_STREAM_LISTENER_CONSUMER_NAME) +// +// ); +// +// return listenerContainer; +// } +// +// +//} diff --git a/server/src/main/java/io/wdd/rpc/execute/service/ExecutionResultDaemonHandler.java b/server/src/main/java/io/wdd/rpc/execute/service/ExecutionResultDaemonHandler.java index b0cba48..3ff7089 100644 --- a/server/src/main/java/io/wdd/rpc/execute/service/ExecutionResultDaemonHandler.java +++ b/server/src/main/java/io/wdd/rpc/execute/service/ExecutionResultDaemonHandler.java @@ -1,203 +1,203 @@ -package io.wdd.rpc.execute.service; - - -import io.wdd.common.utils.TimeUtils; -import io.wdd.rpc.execute.config.CommandReaderConfig; -import io.wdd.rpc.execute.config.ExecutionLog; -import io.wdd.rpc.execute.result.BuildStreamReader; -import io.wdd.server.service.ExecutionLogService; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.collections.CollectionUtils; -import org.springframework.context.annotation.Lazy; - -import javax.annotation.PostConstruct; -import javax.annotation.Resource; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.concurrent.*; - -/** - * 1. [waiting strategy ] - * 2. [build the redis stream listener] - * 3. [call persistence] - */ -//@Service -@Slf4j -@Lazy -@Deprecated -public class ExecutionResultDaemonHandler { - - /** - * store all execution result key - *

- * which means there are execution running , waiting for their result to handle - */ - public static final ConcurrentHashMap WAIT_EXECUTION_RESULT_LIST = new ConcurrentHashMap<>(32); - private final int MAX_TIMEOUT_WAITING_FOR_EXECUTION_RESULT = 70; - - @Resource - BuildStreamReader buildStreamReader; - - @Resource - CommandReaderConfig commandReaderConfig; - - @Resource - ExecutionLogService executionLogService; - - @PostConstruct - public void startExecutionDaemonHandler() { - - // 启动一个异步线程,运行 Execution结果处理守护进程 - CompletableFuture.runAsync( - () -> realStartExecutionDaemonHandler() - ); - - } - - private void realStartExecutionDaemonHandler() { - - while (true) { - - while (WAIT_EXECUTION_RESULT_LIST.size() == 0) { - try { - // no execution result need to handle - - // wait for 5 seconds - log.debug("realStartExecutionDaemonHandler start to sleep waiting for result !"); - TimeUnit.SECONDS.sleep(5); - - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - } - - // has result to handle , just handle one result at one time - String resultKey = WAIT_EXECUTION_RESULT_LIST - .keys() - .nextElement(); - - log.debug( - "current result key is [{}]", - resultKey - ); - - - CompletableFuture> executionResultFuture = - CompletableFuture - .supplyAsync( - () -> { - // 修改相应的参数 - commandReaderConfig.setStreamKey(resultKey); - // listener container 实际上是根据这个绑定的 - commandReaderConfig.setGroup(resultKey); - // 必须归零 - commandReaderConfig.setExecutionResult(null); - - // 构造 resultKey对应的 Redis Stream Listener Container - buildStreamReader - .buildStreamReader(commandReaderConfig); - - // 获得结果 - ArrayList s = new ArrayList<>( - List.of("no no no") - ); - - try { - s = CompletableFuture - .supplyAsync( - () -> { - while (true) { - // todo 多条命令时,这里只能获取到一个结果 - if (CollectionUtils.isNotEmpty(commandReaderConfig.getExecutionResult())) { - return commandReaderConfig.getExecutionResult(); - } - - try { - TimeUnit.SECONDS.sleep(3); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - } - } - ) - // 获取相应的结果 - .get( - MAX_TIMEOUT_WAITING_FOR_EXECUTION_RESULT, - TimeUnit.SECONDS - ); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } catch (ExecutionException e) { - throw new RuntimeException(e); - } catch (TimeoutException e) { - throw new RuntimeException(e); - } - - - return s; - } - ); - - CompletableFuture> falloutTimeFuture = CompletableFuture.supplyAsync( - () -> { - try { - TimeUnit.SECONDS.sleep(MAX_TIMEOUT_WAITING_FOR_EXECUTION_RESULT); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - - return null; - } - ); - - // 获取结果,然后销毁Stream Listener Container - CompletableFuture complete = CompletableFuture - .anyOf( - falloutTimeFuture, - executionResultFuture - ); - - complete - .whenComplete( - (result, e) -> { - - log.debug( - "execution result are => {}", - result - ); - - // 持久化存储对应的结果 - ExecutionLog executionLog = WAIT_EXECUTION_RESULT_LIST.get(resultKey); - executionLog.setAcTime(TimeUtils.currentTime()); - executionLog.setResultContent(String.valueOf(commandReaderConfig.getExecutionResult())); - executionLog.setResultCode( - CollectionUtils.isEmpty((Collection) result) ? 1 : 0 - ); - executionLog.setRecordId(commandReaderConfig.getRecordId()); - - - // 保存操作 - executionLogService.save(executionLog); - - // 清除此次任务的内容 - WAIT_EXECUTION_RESULT_LIST.remove(resultKey); - log.info( - "[Execution] - command {} result are {} result code is {} ,whole process are complete !", - executionLog.getCommandList(), - executionLog.getResultContent(), - executionLog.getResultCode() - ); - } - ); - - // very important - // stuck the main thread , otherwise it will create a dead loop - complete.join(); - - } - - } - - -} +//package io.wdd.rpc.execute.service; +// +// +//import io.wdd.common.utils.TimeUtils; +//import io.wdd.rpc.execute.config.CommandReaderConfig; +//import io.wdd.rpc.execute.config.ExecutionLog; +//import io.wdd.rpc.execute.result.BuildStreamReader; +//import io.wdd.server.service.ExecutionLogService; +//import lombok.extern.slf4j.Slf4j; +//import org.apache.commons.collections.CollectionUtils; +//import org.springframework.context.annotation.Lazy; +// +//import javax.annotation.PostConstruct; +//import javax.annotation.Resource; +//import java.util.ArrayList; +//import java.util.Collection; +//import java.util.List; +//import java.util.concurrent.*; +// +///** +// * 1. [waiting strategy ] +// * 2. [build the redis stream listener] +// * 3. [call persistence] +// */ +////@Service +//@Slf4j +//@Lazy +//@Deprecated +//public class ExecutionResultDaemonHandler { +// +// /** +// * store all execution result key +// *

+// * which means there are execution running , waiting for their result to handle +// */ +// public static final ConcurrentHashMap WAIT_EXECUTION_RESULT_LIST = new ConcurrentHashMap<>(32); +// private final int MAX_TIMEOUT_WAITING_FOR_EXECUTION_RESULT = 70; +// +// @Resource +// BuildStreamReader buildStreamReader; +// +// @Resource +// CommandReaderConfig commandReaderConfig; +// +// @Resource +// ExecutionLogService executionLogService; +// +// @PostConstruct +// public void startExecutionDaemonHandler() { +// +// // 启动一个异步线程,运行 Execution结果处理守护进程 +// CompletableFuture.runAsync( +// () -> realStartExecutionDaemonHandler() +// ); +// +// } +// +// private void realStartExecutionDaemonHandler() { +// +// while (true) { +// +// while (WAIT_EXECUTION_RESULT_LIST.size() == 0) { +// try { +// // no execution result need to handle +// +// // wait for 5 seconds +// log.debug("realStartExecutionDaemonHandler start to sleep waiting for result !"); +// TimeUnit.SECONDS.sleep(5); +// +// } catch (InterruptedException e) { +// throw new RuntimeException(e); +// } +// } +// +// // has result to handle , just handle one result at one time +// String resultKey = WAIT_EXECUTION_RESULT_LIST +// .keys() +// .nextElement(); +// +// log.debug( +// "current result key is [{}]", +// resultKey +// ); +// +// +// CompletableFuture> executionResultFuture = +// CompletableFuture +// .supplyAsync( +// () -> { +// // 修改相应的参数 +// commandReaderConfig.setStreamKey(resultKey); +// // listener container 实际上是根据这个绑定的 +// commandReaderConfig.setGroup(resultKey); +// // 必须归零 +// commandReaderConfig.setExecutionResult(null); +// +// // 构造 resultKey对应的 Redis Stream Listener Container +// buildStreamReader +// .buildStreamReader(commandReaderConfig); +// +// // 获得结果 +// ArrayList s = new ArrayList<>( +// List.of("no no no") +// ); +// +// try { +// s = CompletableFuture +// .supplyAsync( +// () -> { +// while (true) { +// // todo 多条命令时,这里只能获取到一个结果 +// if (CollectionUtils.isNotEmpty(commandReaderConfig.getExecutionResult())) { +// return commandReaderConfig.getExecutionResult(); +// } +// +// try { +// TimeUnit.SECONDS.sleep(3); +// } catch (InterruptedException e) { +// throw new RuntimeException(e); +// } +// } +// } +// ) +// // 获取相应的结果 +// .get( +// MAX_TIMEOUT_WAITING_FOR_EXECUTION_RESULT, +// TimeUnit.SECONDS +// ); +// } catch (InterruptedException e) { +// throw new RuntimeException(e); +// } catch (ExecutionException e) { +// throw new RuntimeException(e); +// } catch (TimeoutException e) { +// throw new RuntimeException(e); +// } +// +// +// return s; +// } +// ); +// +// CompletableFuture> falloutTimeFuture = CompletableFuture.supplyAsync( +// () -> { +// try { +// TimeUnit.SECONDS.sleep(MAX_TIMEOUT_WAITING_FOR_EXECUTION_RESULT); +// } catch (InterruptedException e) { +// throw new RuntimeException(e); +// } +// +// return null; +// } +// ); +// +// // 获取结果,然后销毁Stream Listener Container +// CompletableFuture complete = CompletableFuture +// .anyOf( +// falloutTimeFuture, +// executionResultFuture +// ); +// +// complete +// .whenComplete( +// (result, e) -> { +// +// log.debug( +// "execution result are => {}", +// result +// ); +// +// // 持久化存储对应的结果 +// ExecutionLog executionLog = WAIT_EXECUTION_RESULT_LIST.get(resultKey); +// executionLog.setAcTime(TimeUtils.currentTime()); +// executionLog.setResultContent(String.valueOf(commandReaderConfig.getExecutionResult())); +// executionLog.setResultCode( +// CollectionUtils.isEmpty((Collection) result) ? 1 : 0 +// ); +// executionLog.setRecordId(commandReaderConfig.getRecordId()); +// +// +// // 保存操作 +// executionLogService.save(executionLog); +// +// // 清除此次任务的内容 +// WAIT_EXECUTION_RESULT_LIST.remove(resultKey); +// log.info( +// "[Execution] - command {} result are {} result code is {} ,whole process are complete !", +// executionLog.getCommandList(), +// executionLog.getResultContent(), +// executionLog.getResultCode() +// ); +// } +// ); +// +// // very important +// // stuck the main thread , otherwise it will create a dead loop +// complete.join(); +// +// } +// +// } +// +// +//} From 6025620eea4f34df0ac2fc2d0d1ee6516c1a6faa Mon Sep 17 00:00:00 2001 From: zeaslity Date: Fri, 16 Jun 2023 14:22:58 +0800 Subject: [PATCH 30/45] [ Agent ] remove nacos in agent --- agent-go/g/global.go | 4 +- agent-go/main.go | 2 +- agent-go/octopus-agent-dev.yaml | 110 ++++++-- agent-go/rabbitmq/OMsgConnector.go | 6 +- agent-go/rabbitmq/RabbitMsgQueue.go | 14 +- agent-go/register/AgentInitialization.go | 2 +- agent-go/register/ConfigParser.go | 34 +++ agent-go/register/NacosInitalization.go | 339 ++++++++++++----------- 8 files changed, 307 insertions(+), 204 deletions(-) create mode 100644 agent-go/register/ConfigParser.go diff --git a/agent-go/g/global.go b/agent-go/g/global.go index 80c65eb..4c35d06 100644 --- a/agent-go/g/global.go +++ b/agent-go/g/global.go @@ -8,7 +8,7 @@ import ( type Global struct { AgentHasRegister bool - NacosConfig *viper.Viper + AgentConfig *viper.Viper P *ants.Pool } @@ -31,7 +31,7 @@ var G = NewGlobal( func NewGlobal(pool *ants.Pool) *Global { return &Global{ AgentHasRegister: false, - NacosConfig: nil, + AgentConfig: nil, P: pool, } } diff --git a/agent-go/main.go b/agent-go/main.go index 8ed778d..544b72b 100644 --- a/agent-go/main.go +++ b/agent-go/main.go @@ -21,7 +21,7 @@ func main() { println(filename) // 初始化Nacos的连接配置 - g.G.NacosConfig = register.InitNacos(filename) + g.G.AgentConfig = register.ParseConfiguration(filename) // 执行初始化之策工作 register.AgentServerInfoCache = register.INIT() diff --git a/agent-go/octopus-agent-dev.yaml b/agent-go/octopus-agent-dev.yaml index ac474f7..5179bdf 100644 --- a/agent-go/octopus-agent-dev.yaml +++ b/agent-go/octopus-agent-dev.yaml @@ -1,22 +1,90 @@ -spring: - application: - name: octopus-agent - profiles: - active: dev - cloud: - nacos: - config: - group: dev - config-retry-time: 3000 - file-extension: yaml - max-retry: 3 - # server-addr: "150.230.198.103:21060" - server-addr: "42.192.52.227:21060" - timeout: 5000 - config-long-poll-timeout: 5000 - extension-configs: - - group: dev - data-id: "common-dev.yaml" - server: - port: 8000 \ No newline at end of file + port: 8000 + +logging: + level: + web: info + +octopus: + message: + # agent boot up default common exchange + init_exchange: InitExchange + # server will send message to agent using this common queue + init_to_server: InitToServer + # agent boot up default common exchange routing key + init_to_server_key: InitToServerKey + # server will receive message from agent using this common queue + init_from_server: InitFromServer + # agent boot up default common exchange routing key + init_from_server_key: InitFromServerKey + # initialization register time out (unit ms) default is 5 min + init_ttl: "3000000" + # Octopus Exchange Name == server comunicate with agent + octopus_exchange: OctopusExchange + # Octopus Message To Server == all agent send info to server queue and topic + octopus_to_server: OctopusToServer + executor: + name: executor-functions + status: + name: octopus-agent + healthy: + type: cron + cron: 10 */1 * * * ? * + start-delay: 30 + metric: + pinch: 20 + agent: + executor: + # agent执行一条Command的最长超时时间 + processMaxTimeOut: 60 + status: + app: + - Nginx/nginx + - MySQL/mysql + - Xray/xray + - OctopusAgent/octopus-agent + - Redis/redis + - RabbitMQ/rabbitmq + +spring: + main: + allow-circular-references: true + allow-bean-definition-overriding: true + rabbitmq: + host: 42.192.52.227 + port: 20672 + username: boge + password: boge8tingH + virtual-host: / + listener: + simple: + retry: + # ack failed will reentrant the Rabbit Listener + max-attempts: 2 + enabled: true + # retry interval unit ms + max-interval: 65000 + initial-interval: 65000 + +#spring: +# application: +# name: octopus-agent +# profiles: +# active: dev +# cloud: +# nacos: +# config: +# group: dev +# config-retry-time: 3000 +# file-extension: yaml +# max-retry: 3 +# # server-addr: "150.230.198.103:21060" +# server-addr: "42.192.52.227:21060" +# timeout: 5000 +# config-long-poll-timeout: 5000 +# extension-configs: +# - group: dev +# data-id: "common-dev.yaml" +# +#server: +# port: 8000 \ No newline at end of file diff --git a/agent-go/rabbitmq/OMsgConnector.go b/agent-go/rabbitmq/OMsgConnector.go index 5d28d25..d02bc2b 100644 --- a/agent-go/rabbitmq/OMsgConnector.go +++ b/agent-go/rabbitmq/OMsgConnector.go @@ -12,9 +12,9 @@ func BuildOMsgRuntimeConnectorQueue(agentTopicName string) { // 建立 业务消息 接收队列 // agentTopicName为名称的队列 - nacosConfig := g.G.NacosConfig + agentConfig := g.G.AgentConfig - octopusExchangeName := nacosConfig.GetString("octopus.message.octopus_exchange") + octopusExchangeName := agentConfig.GetString("octopus.message.octopus_exchange") octopusConnectProp := &ConnectProperty{ ExchangeName: octopusExchangeName, @@ -31,7 +31,7 @@ func BuildOMsgRuntimeConnectorQueue(agentTopicName string) { // 建立 业务消息 返回队列 // 统一为 OctopusToServer - octopusToServerQueueName := nacosConfig.GetString("octopus.message.octopus_to_server") + octopusToServerQueueName := agentConfig.GetString("octopus.message.octopus_to_server") octopusToServerProp := &ConnectProperty{ ExchangeName: octopusExchangeName, diff --git a/agent-go/rabbitmq/RabbitMsgQueue.go b/agent-go/rabbitmq/RabbitMsgQueue.go index 0193f84..8edff5f 100644 --- a/agent-go/rabbitmq/RabbitMsgQueue.go +++ b/agent-go/rabbitmq/RabbitMsgQueue.go @@ -181,18 +181,18 @@ func (r *RabbitQueue) Read(autoAck bool) <-chan amqp.Delivery { return msgs } -// parseRabbitMQEndpoint 根据全局变量NacosConfig解析出RabbitMQ的连接地址 +// parseRabbitMQEndpoint 根据全局变量agentConfig解析出RabbitMQ的连接地址 func parseRabbitMQEndpointFromG() string { - nacosConfig := g.G.NacosConfig + agentConfig := g.G.AgentConfig var res strings.Builder - host := nacosConfig.GetString("spring.rabbitmq.host") - port := nacosConfig.GetString("spring.rabbitmq.port") - username := nacosConfig.GetString("spring.rabbitmq.username") - password := nacosConfig.GetString("spring.rabbitmq.password") - virtualHost := nacosConfig.GetString("spring.rabbitmq.virtual-host") + host := agentConfig.GetString("spring.rabbitmq.host") + port := agentConfig.GetString("spring.rabbitmq.port") + username := agentConfig.GetString("spring.rabbitmq.username") + password := agentConfig.GetString("spring.rabbitmq.password") + virtualHost := agentConfig.GetString("spring.rabbitmq.virtual-host") // amqp://{username}:{password}@{hostname}:{port}/{virtual_host} res.WriteString("amqp://") diff --git a/agent-go/register/AgentInitialization.go b/agent-go/register/AgentInitialization.go index c762d0c..62e72bf 100644 --- a/agent-go/register/AgentInitialization.go +++ b/agent-go/register/AgentInitialization.go @@ -22,7 +22,7 @@ func INIT() *AgentServerInfo { // 获取系统的环境变量 agentServerInfo := parseAgentServerInfo() - nacosConfig := g.G.NacosConfig + nacosConfig := g.G.AgentConfig initToServerProp := &rabbitmq.ConnectProperty{ ExchangeName: nacosConfig.GetString("octopus.message.init_exchange"), diff --git a/agent-go/register/ConfigParser.go b/agent-go/register/ConfigParser.go new file mode 100644 index 0000000..a876040 --- /dev/null +++ b/agent-go/register/ConfigParser.go @@ -0,0 +1,34 @@ +package register + +import ( + "fmt" + "github.com/spf13/viper" +) + +func ParseConfiguration(configFileName string) *viper.Viper { + + agentConfig := parseAgentConfigFile(configFileName, nil) + + return agentConfig +} + +func parseAgentConfigFile(configFileName string, v *viper.Viper) *viper.Viper { + + // 使用Viper框架读取 + if v == nil { + v = viper.New() + } + + // 设置配置文件路径和名称 + v.SetConfigName(configFileName) + v.AddConfigPath(".") + v.SetConfigType("yaml") + + // 读取默认的总配置文件 + err := v.ReadInConfig() + if err != nil { + panic(fmt.Errorf("fatal error config file: %s", err)) + } + + return v +} diff --git a/agent-go/register/NacosInitalization.go b/agent-go/register/NacosInitalization.go index 455ebf1..2aa11b3 100644 --- a/agent-go/register/NacosInitalization.go +++ b/agent-go/register/NacosInitalization.go @@ -1,171 +1,172 @@ package register -import ( - "bytes" - "fmt" - "github.com/nacos-group/nacos-sdk-go/v2/clients" - "github.com/nacos-group/nacos-sdk-go/v2/clients/config_client" - "github.com/nacos-group/nacos-sdk-go/v2/common/constant" - "github.com/nacos-group/nacos-sdk-go/v2/vo" - "github.com/spf13/viper" - "go.uber.org/zap" - "strconv" - "strings" -) - -var group = "" - -func InitNacos(configFileName string) *viper.Viper { - - v := parseAgentConfigFile(configFileName, nil) - group = v.GetString("spring.cloud.nacos.config.group") - - // build the nacos connection - configClient := startNacosConnection(v) - - // get all needed nacos config and merge - allNacosConfig := getAllNacosConfig(v, group, configClient) - - for _, nacosConfigContent := range allNacosConfig { - log.Debug(fmt.Sprintf("nacos config conetent is %s", nacosConfigContent)) - - parseNacosConfigContend(nacosConfigContent, v) - } - - log.Info(fmt.Sprintf("%s config read result are %v", configFileName, v.AllSettings())) - - return v -} - -func parseAgentConfigFile(configFileName string, v *viper.Viper) *viper.Viper { - - // 使用Viper框架读取 - if v == nil { - v = viper.New() - } - - // 设置配置文件路径和名称 - v.SetConfigName(configFileName) - v.AddConfigPath(".") - v.SetConfigType("yaml") - - // 读取默认的总配置文件 - err := v.ReadInConfig() - if err != nil { - panic(fmt.Errorf("fatal error config file: %s", err)) - } - - return v -} - -func parseNacosConfigContend(configContent string, v *viper.Viper) *viper.Viper { - - v.SetConfigType("yaml") - - // use merge - - err := v.MergeConfig(bytes.NewBuffer([]byte(configContent))) - if err != nil { - log.Error("nacos config contend read error !", zap.Error(err)) - } - - return v -} -func startNacosConnection(v *viper.Viper) config_client.IConfigClient { - - serverAddr := v.GetString("spring.cloud.nacos.config.server-addr") - - clientConfig := constant.ClientConfig{ - //Endpoint: serverAddr, - NamespaceId: "", - TimeoutMs: v.GetUint64("spring.cloud.nacos.config.timeout"), - NotLoadCacheAtStart: true, - AppendToStdout: true, - UpdateCacheWhenEmpty: true, - //LogDir: "/tmp/nacos/log", - //CacheDir: "/tmp/nacos/cache", - Username: "nacos", - Password: "Superwmm.23", - } - - split := strings.Split(serverAddr, ":") - if len(split) != 2 { - log.Error("nacos server addr error!") - } - - port, _ := strconv.ParseUint(split[1], 10, 64) - serverConfigs := []constant.ServerConfig{ - { - IpAddr: split[0], - Port: port, - GrpcPort: port + 1000, - }, - } - - // Another way of create config client for dynamic configuration (recommend) - configClient, err := clients.NewConfigClient( - vo.NacosClientParam{ - ClientConfig: &clientConfig, - ServerConfigs: serverConfigs, - }, - ) - if err != nil { - panic(err) - } - - return configClient -} - -func getAllNacosConfig(v *viper.Viper, group string, configClient config_client.IConfigClient) []string { - - result := make([]string, 0) - - // main nacos configs - mainNacosConfigFileName := v.GetString("spring.application.name") + "-" + v.GetString("spring.profiles.active") + "." + v.GetString("spring.cloud.nacos.config.file-extension") - - log.Debug(fmt.Sprintf("main nacos config file name is %s", mainNacosConfigFileName)) - configContent := getConfig(mainNacosConfigFileName, group, configClient) - result = append(result, configContent) - - // additional nacos config - additionalNacosConfig := v.Get("spring.cloud.nacos.config.extension-configs") - // 增加断言,判定map的类型 - m, ok := additionalNacosConfig.([]interface{}) - if !ok { - fmt.Println("additionalNacosConfig is not a slice") - return nil - } - - for _, addConfigMap := range m { - - realMap, _ := addConfigMap.(map[string]interface{}) - - // 拿到配置的Key - dataId := realMap["data-id"].(string) - group := realMap["group"].(string) - - // 查询 - config := getConfig(dataId, group, configClient) - result = append(result, config) - } - - return result -} - -// getConfig 从Nacos中获取相应的 -func getConfig(dataId string, group string, configClient config_client.IConfigClient) string { - - log.Debug(fmt.Sprintf("nacos config get method dataID is %s, group is %s", dataId, group)) - - content, err := configClient.GetConfig(vo.ConfigParam{ - DataId: dataId, - Group: group, - }) - if err != nil { - log.Error("nacos config get error !", zap.Error(err)) - } - - log.Debug(fmt.Sprintf("dataId %s , group %s, nacos config content is %s", dataId, group, content)) - - return content -} +// +//import ( +// "bytes" +// "fmt" +// "github.com/nacos-group/nacos-sdk-go/v2/clients" +// "github.com/nacos-group/nacos-sdk-go/v2/clients/config_client" +// "github.com/nacos-group/nacos-sdk-go/v2/common/constant" +// "github.com/nacos-group/nacos-sdk-go/v2/vo" +// "github.com/spf13/viper" +// "go.uber.org/zap" +// "strconv" +// "strings" +//) +// +//var group = "" +// +//func InitNacos(configFileName string) *viper.Viper { +// +// v := parseAgentConfigFile(configFileName, nil) +// group = v.GetString("spring.cloud.nacos.config.group") +// +// // build the nacos connection +// configClient := startNacosConnection(v) +// +// // get all needed nacos config and merge +// allNacosConfig := getAllNacosConfig(v, group, configClient) +// +// for _, nacosConfigContent := range allNacosConfig { +// log.Debug(fmt.Sprintf("nacos config conetent is %s", nacosConfigContent)) +// +// parseNacosConfigContend(nacosConfigContent, v) +// } +// +// log.Info(fmt.Sprintf("%s config read result are %v", configFileName, v.AllSettings())) +// +// return v +//} +// +//func parseAgentConfigFile(configFileName string, v *viper.Viper) *viper.Viper { +// +// // 使用Viper框架读取 +// if v == nil { +// v = viper.New() +// } +// +// // 设置配置文件路径和名称 +// v.SetConfigName(configFileName) +// v.AddConfigPath(".") +// v.SetConfigType("yaml") +// +// // 读取默认的总配置文件 +// err := v.ReadInConfig() +// if err != nil { +// panic(fmt.Errorf("fatal error config file: %s", err)) +// } +// +// return v +//} +// +//func parseNacosConfigContend(configContent string, v *viper.Viper) *viper.Viper { +// +// v.SetConfigType("yaml") +// +// // use merge +// +// err := v.MergeConfig(bytes.NewBuffer([]byte(configContent))) +// if err != nil { +// log.Error("nacos config contend read error !", zap.Error(err)) +// } +// +// return v +//} +//func startNacosConnection(v *viper.Viper) config_client.IConfigClient { +// +// serverAddr := v.GetString("spring.cloud.nacos.config.server-addr") +// +// clientConfig := constant.ClientConfig{ +// //Endpoint: serverAddr, +// NamespaceId: "", +// TimeoutMs: v.GetUint64("spring.cloud.nacos.config.timeout"), +// NotLoadCacheAtStart: true, +// AppendToStdout: true, +// UpdateCacheWhenEmpty: true, +// //LogDir: "/tmp/nacos/log", +// //CacheDir: "/tmp/nacos/cache", +// Username: "nacos", +// Password: "Superwmm.23", +// } +// +// split := strings.Split(serverAddr, ":") +// if len(split) != 2 { +// log.Error("nacos server addr error!") +// } +// +// port, _ := strconv.ParseUint(split[1], 10, 64) +// serverConfigs := []constant.ServerConfig{ +// { +// IpAddr: split[0], +// Port: port, +// GrpcPort: port + 1000, +// }, +// } +// +// // Another way of create config client for dynamic configuration (recommend) +// configClient, err := clients.NewConfigClient( +// vo.NacosClientParam{ +// ClientConfig: &clientConfig, +// ServerConfigs: serverConfigs, +// }, +// ) +// if err != nil { +// panic(err) +// } +// +// return configClient +//} +// +//func getAllNacosConfig(v *viper.Viper, group string, configClient config_client.IConfigClient) []string { +// +// result := make([]string, 0) +// +// // main nacos configs +// mainNacosConfigFileName := v.GetString("spring.application.name") + "-" + v.GetString("spring.profiles.active") + "." + v.GetString("spring.cloud.nacos.config.file-extension") +// +// log.Debug(fmt.Sprintf("main nacos config file name is %s", mainNacosConfigFileName)) +// configContent := getConfig(mainNacosConfigFileName, group, configClient) +// result = append(result, configContent) +// +// // additional nacos config +// additionalNacosConfig := v.Get("spring.cloud.nacos.config.extension-configs") +// // 增加断言,判定map的类型 +// m, ok := additionalNacosConfig.([]interface{}) +// if !ok { +// fmt.Println("additionalNacosConfig is not a slice") +// return nil +// } +// +// for _, addConfigMap := range m { +// +// realMap, _ := addConfigMap.(map[string]interface{}) +// +// // 拿到配置的Key +// dataId := realMap["data-id"].(string) +// group := realMap["group"].(string) +// +// // 查询 +// config := getConfig(dataId, group, configClient) +// result = append(result, config) +// } +// +// return result +//} +// +//// getConfig 从Nacos中获取相应的 +//func getConfig(dataId string, group string, configClient config_client.IConfigClient) string { +// +// log.Debug(fmt.Sprintf("nacos config get method dataID is %s, group is %s", dataId, group)) +// +// content, err := configClient.GetConfig(vo.ConfigParam{ +// DataId: dataId, +// Group: group, +// }) +// if err != nil { +// log.Error("nacos config get error !", zap.Error(err)) +// } +// +// log.Debug(fmt.Sprintf("dataId %s , group %s, nacos config content is %s", dataId, group, content)) +// +// return content +//} From 92a2a16aea5913ba711581409c5b6a3e400ec294 Mon Sep 17 00:00:00 2001 From: IceDerce Date: Fri, 16 Jun 2023 16:12:31 +0800 Subject: [PATCH 31/45] [agent] 111 --- agent-go/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent-go/main.go b/agent-go/main.go index 544b72b..2784f0e 100644 --- a/agent-go/main.go +++ b/agent-go/main.go @@ -18,7 +18,7 @@ func main() { flag.Parse() // 读取对应版本的配置文件 filename := fmt.Sprintf("octopus-agent-%s.yaml", version) - println(filename) + println("config file name is => " + filename) // 初始化Nacos的连接配置 g.G.AgentConfig = register.ParseConfiguration(filename) From a96d3e51ac323246faab1bf672fe3d69d057d2a0 Mon Sep 17 00:00:00 2001 From: zeaslity Date: Tue, 20 Jun 2023 10:02:49 +0800 Subject: [PATCH 32/45] [ Server ] accomplish alive status precedure --- .../Root/Default Group/directory.json | 9 +++ .fastRequest/collections/Root/directory.json | 9 +++ .../server/ExecutionController/directory.json | 8 +++ .../patchCommandToAgentAll.rapi | 37 +++++++++++++ .../createScriptScheduler.rapi | 37 +++++++++++++ .../server/SchedulerController/directory.json | 8 +++ .../queryAllQuartzJob.rapi | 37 +++++++++++++ .../SchedulerController/queryAllTriggers.rapi | 37 +++++++++++++ .../queryRunQuartzJob.rapi | 37 +++++++++++++ .../GetHealthyStatusAgentList.rapi | 37 +++++++++++++ .../ManualUpdateAgentStatus.rapi | 37 +++++++++++++ .../server/StatusController/directory.json | 8 +++ .../collections/Root/server/directory.json | 8 +++ .../fastRequestCurrentProjectConfig.json | 37 +++++++++++++ .run/RunServerToRemote.run.xml | 32 ----------- .run/Server-dev.run.xml | 16 ------ .run/SkipTest-Package.run.xml | 31 ----------- agent-go/logger/logger.go | 2 +- agent-go/octopus-agent-dev.yaml | 2 +- agent-go/rabbitmq/OMsgConnector.go | 1 - agent-go/rabbitmq/OctopusMessage.go | 20 ++++--- agent-go/register/AgentInitialization.go | 48 +++++++++------- agent-go/register/AgentServerInfo.go | 2 +- agent-go/server-env.yaml | 4 +- agent-go/status/Status.go | 13 ++--- agent-go/tmp/init-from-server-message.json | 5 ++ agent-go/tmp/init-to-server-message.json | 8 +++ agent-go/utils/TimeUtils.go | 11 +++- .../io/wdd/rpc/init/AcceptAgentInitInfo.java | 55 ++++++++++--------- .../message/sender/OMessageToAgentSender.java | 32 ++++++++--- .../service/BuildStatusScheduleTask.java | 6 +- .../AgentAliveStatusMonitorService.java | 29 +++++++--- server/src/main/resources/application.yml | 4 +- 33 files changed, 497 insertions(+), 170 deletions(-) create mode 100644 .fastRequest/collections/Root/Default Group/directory.json create mode 100644 .fastRequest/collections/Root/directory.json create mode 100644 .fastRequest/collections/Root/server/ExecutionController/directory.json create mode 100644 .fastRequest/collections/Root/server/ExecutionController/patchCommandToAgentAll.rapi create mode 100644 .fastRequest/collections/Root/server/SchedulerController/createScriptScheduler.rapi create mode 100644 .fastRequest/collections/Root/server/SchedulerController/directory.json create mode 100644 .fastRequest/collections/Root/server/SchedulerController/queryAllQuartzJob.rapi create mode 100644 .fastRequest/collections/Root/server/SchedulerController/queryAllTriggers.rapi create mode 100644 .fastRequest/collections/Root/server/SchedulerController/queryRunQuartzJob.rapi create mode 100644 .fastRequest/collections/Root/server/StatusController/GetHealthyStatusAgentList.rapi create mode 100644 .fastRequest/collections/Root/server/StatusController/ManualUpdateAgentStatus.rapi create mode 100644 .fastRequest/collections/Root/server/StatusController/directory.json create mode 100644 .fastRequest/collections/Root/server/directory.json create mode 100644 .fastRequest/config/fastRequestCurrentProjectConfig.json delete mode 100644 .run/RunServerToRemote.run.xml delete mode 100644 .run/Server-dev.run.xml delete mode 100644 .run/SkipTest-Package.run.xml create mode 100644 agent-go/tmp/init-from-server-message.json create mode 100644 agent-go/tmp/init-to-server-message.json diff --git a/.fastRequest/collections/Root/Default Group/directory.json b/.fastRequest/collections/Root/Default Group/directory.json new file mode 100644 index 0000000..da8abdb --- /dev/null +++ b/.fastRequest/collections/Root/Default Group/directory.json @@ -0,0 +1,9 @@ +{ + "description": "", + "directory": "~.fastRequest~collections~Root~Default Group", + "filePath": "~.fastRequest~collections~Root~Default Group~", + "groupId": "1", + "id": "1", + "name": "Default Group", + "type": 1 +} \ No newline at end of file diff --git a/.fastRequest/collections/Root/directory.json b/.fastRequest/collections/Root/directory.json new file mode 100644 index 0000000..218c7cd --- /dev/null +++ b/.fastRequest/collections/Root/directory.json @@ -0,0 +1,9 @@ +{ + "description": "", + "directory": "~.fastRequest~collections~Root", + "filePath": "~.fastRequest~collections~Root~", + "groupId": "-1", + "id": "0", + "name": "Root", + "type": 1 +} \ No newline at end of file diff --git a/.fastRequest/collections/Root/server/ExecutionController/directory.json b/.fastRequest/collections/Root/server/ExecutionController/directory.json new file mode 100644 index 0000000..36f435a --- /dev/null +++ b/.fastRequest/collections/Root/server/ExecutionController/directory.json @@ -0,0 +1,8 @@ +{ + "description": "", + "directory": "~.fastRequest~collections~Root~server~ExecutionController", + "filePath": "~.fastRequest~collections~Root~server~ExecutionController~", + "id": "20230222093108443", + "name": "ExecutionController", + "type": 3 +} \ No newline at end of file diff --git a/.fastRequest/collections/Root/server/ExecutionController/patchCommandToAgentAll.rapi b/.fastRequest/collections/Root/server/ExecutionController/patchCommandToAgentAll.rapi new file mode 100644 index 0000000..443ffbd --- /dev/null +++ b/.fastRequest/collections/Root/server/ExecutionController/patchCommandToAgentAll.rapi @@ -0,0 +1,37 @@ +{ + "activeGroup": "Default", + "apiParamGroup": {}, + "childList": [], + "description": "", + "directory": "~.fastRequest~collections~Root~server~ExecutionController", + "domain": "http://localhost:9999", + "enableEnv": "local", + "enableProject": "ProjectOctopus", + "filePath": "~.fastRequest~collections~Root~server~ExecutionController~patchCommandToAgentAll.rapi", + "headerList": [], + "id": "api_io.wdd.rpc.controller.ExecutionController.patchCommandToAgentAll", + "name": "[命令]- 发送命令至所有的主机", + "paramGroup": { + "bodyKeyValueListJson": "", + "className": "io.wdd.rpc.controller.ExecutionController", + "jsonDocument": "", + "method": "patchCommandToAgentAll", + "methodDescription": "[命令]- 发送命令至所有的主机", + "methodType": "POST", + "multipartKeyValueListJson": "[]", + "originUrl": "/octopus/server/executor/command/all", + "pathParamsKeyValueListJson": "[]", + "postScript": "", + "postType": "json", + "preScript": "", + "returnDocument": "{\n\t\"code\":\"No comment,Type =Number\",\n\t\"msg\":\"No comment,Type =String\",\n\t\"data\":{}\n}", + "tempId": "", + "url": "/octopus/server/executor/command/all", + "urlEncodedKeyValueListJson": "[{\"comment\":\"命令行\",\"customFlag\":2,\"enabled\":true,\"key\":\"commandList\",\"type\":\"String\",\"value\":\"apt-get,update\"},{\"comment\":\"\",\"customFlag\":2,\"enabled\":true,\"key\":\"type\",\"type\":\"String\",\"value\":\"\"}]", + "urlEncodedKeyValueListText": "commandList=apt-get,update\n&type=", + "urlParamsKeyValueListJson": "[]", + "urlParamsKeyValueListText": "" + }, + "tempId": "id_io.wdd.rpc.controller.ExecutionController.patchCommandToAgentAll", + "type": 2 +} \ No newline at end of file diff --git a/.fastRequest/collections/Root/server/SchedulerController/createScriptScheduler.rapi b/.fastRequest/collections/Root/server/SchedulerController/createScriptScheduler.rapi new file mode 100644 index 0000000..f702cd0 --- /dev/null +++ b/.fastRequest/collections/Root/server/SchedulerController/createScriptScheduler.rapi @@ -0,0 +1,37 @@ +{ + "activeGroup": "Default", + "apiParamGroup": {}, + "childList": [], + "description": "", + "directory": "~.fastRequest~collections~Root~server~SchedulerController", + "domain": "http://localhost:9999", + "enableEnv": "local", + "enableProject": "ProjectOctopus", + "filePath": "~.fastRequest~collections~Root~server~SchedulerController~createScriptScheduler.rapi", + "headerList": [], + "id": "api_io.wdd.rpc.controller.SchedulerController.createScriptScheduler", + "name": "新增一个定时脚本任务", + "paramGroup": { + "bodyKeyValueListJson": "{\n \"schedulerUuid\": \"\",\n \"name\": \"测试任务\",\n \"cronExpress\": \"30 */30 * * * ? *\",\n \"description\": \"这是注释内容\",\n \"scriptContent\": \"echo yes \\napt-get update \\necho no \\napt-get install nginx -y\",\n \"targetMachine\": \"Chengdu-amd64-98-98066f\",\n \"lastExecutionId\": null,\n \"lastExecutionResultKey\": \"\",\n \"lastExecutionStatus\": null ,\n \"createTime\": \"\",\n \"updateTime\": \"\",\n \"nextScheduleTime\": \"\",\n \"lastScheduleTime\": \"\"\n}", + "className": "io.wdd.rpc.controller.SchedulerController", + "jsonDocument": "{\n \"schedulerUuid\": \"No comment,Value =schedulerUuid_9dr3w\",\n \"name\": \"No comment,Value =name_ucmeh\",\n \"cronExpress\": \"No comment,Value =cronExpress_qbwqm\",\n \"description\": \"No comment,Value =description_drj0c\",\n \"scriptContent\": \"脚本任务的内容\",\n \"targetMachine\": \"执行目标机器agent_topic_name列表,使用, 分隔\",\n \"lastExecutionId\": \"与 execution_log表的主键对应,方便查询执行日志\",\n \"lastExecutionResultKey\": \"与 execution_log表的 result_key 对应,方便查询执行日志\",\n \"lastExecutionStatus\": \"任务上次执行状态\",\n \"createTime\": \"定时脚本任务创建时间\",\n \"updateTime\": \"上次更新时间\",\n \"nextScheduleTime\": \"任务下次计划执行时间\",\n \"lastScheduleTime\": \"任务上次计划执行时间\"\n}", + "method": "createScriptScheduler", + "methodDescription": "新增一个定时脚本任务", + "methodType": "POST", + "multipartKeyValueListJson": "[]", + "originUrl": "/octopus/server/scheduler/script/create", + "pathParamsKeyValueListJson": "[]", + "postScript": "", + "postType": "json", + "preScript": "", + "returnDocument": "{\n\t\"code\":\"No comment,Type =Number\",\n\t\"msg\":\"No comment,Type =String\",\n\t\"data\":{}\n}", + "tempId": "", + "url": "/octopus/server/scheduler/script/create", + "urlEncodedKeyValueListJson": "[]", + "urlEncodedKeyValueListText": "", + "urlParamsKeyValueListJson": "[]", + "urlParamsKeyValueListText": "" + }, + "tempId": "id_io.wdd.rpc.controller.SchedulerController.createScriptScheduler", + "type": 2 +} \ No newline at end of file diff --git a/.fastRequest/collections/Root/server/SchedulerController/directory.json b/.fastRequest/collections/Root/server/SchedulerController/directory.json new file mode 100644 index 0000000..7cc508f --- /dev/null +++ b/.fastRequest/collections/Root/server/SchedulerController/directory.json @@ -0,0 +1,8 @@ +{ + "description": "", + "directory": "~.fastRequest~collections~Root~server~SchedulerController", + "filePath": "~.fastRequest~collections~Root~server~SchedulerController~", + "id": "20230116173428298", + "name": "SchedulerController", + "type": 3 +} \ No newline at end of file diff --git a/.fastRequest/collections/Root/server/SchedulerController/queryAllQuartzJob.rapi b/.fastRequest/collections/Root/server/SchedulerController/queryAllQuartzJob.rapi new file mode 100644 index 0000000..80338e0 --- /dev/null +++ b/.fastRequest/collections/Root/server/SchedulerController/queryAllQuartzJob.rapi @@ -0,0 +1,37 @@ +{ + "activeGroup": "Default", + "apiParamGroup": {}, + "childList": [], + "description": "", + "directory": "~.fastRequest~collections~Root~server~SchedulerController", + "domain": "http://localhost:9999", + "enableEnv": "local", + "enableProject": "ProjectOctopus", + "filePath": "~.fastRequest~collections~Root~server~SchedulerController~queryAllQuartzJob.rapi", + "headerList": [], + "id": "api_io.wdd.rpc.controller.SchedulerController.queryAllQuartzJob", + "name": "查询所有job", + "paramGroup": { + "bodyKeyValueListJson": "", + "className": "io.wdd.rpc.controller.SchedulerController", + "jsonDocument": "", + "method": "queryAllQuartzJob", + "methodDescription": "查询所有job", + "methodType": "GET", + "multipartKeyValueListJson": "[]", + "originUrl": "/octopus/server/scheduler/queryAllJob", + "pathParamsKeyValueListJson": "[]", + "postScript": "", + "postType": "json", + "preScript": "", + "returnDocument": "{\n\t\"code\":\"No comment,Type =Number\",\n\t\"msg\":\"No comment,Type =String\",\n\t\"data\":{}\n}", + "tempId": "", + "url": "/octopus/server/scheduler/queryAllJob", + "urlEncodedKeyValueListJson": "[]", + "urlEncodedKeyValueListText": "", + "urlParamsKeyValueListJson": "[]", + "urlParamsKeyValueListText": "" + }, + "tempId": "id_io.wdd.rpc.controller.SchedulerController.queryAllQuartzJob", + "type": 2 +} \ No newline at end of file diff --git a/.fastRequest/collections/Root/server/SchedulerController/queryAllTriggers.rapi b/.fastRequest/collections/Root/server/SchedulerController/queryAllTriggers.rapi new file mode 100644 index 0000000..6858ff6 --- /dev/null +++ b/.fastRequest/collections/Root/server/SchedulerController/queryAllTriggers.rapi @@ -0,0 +1,37 @@ +{ + "activeGroup": "Default", + "apiParamGroup": {}, + "childList": [], + "description": "", + "directory": "~.fastRequest~collections~Root~server~SchedulerController", + "domain": "http://localhost:9999", + "enableEnv": "local", + "enableProject": "ProjectOctopus", + "filePath": "~.fastRequest~collections~Root~server~SchedulerController~queryAllTriggers.rapi", + "headerList": [], + "id": "api_io.wdd.rpc.controller.SchedulerController.queryAllTriggers", + "name": "查询所有的触发器Trigger", + "paramGroup": { + "bodyKeyValueListJson": "", + "className": "io.wdd.rpc.controller.SchedulerController", + "jsonDocument": "", + "method": "queryAllTriggers", + "methodDescription": "查询所有的触发器Trigger", + "methodType": "GET", + "multipartKeyValueListJson": "[]", + "originUrl": "/octopus/server/scheduler/allTriggers", + "pathParamsKeyValueListJson": "[]", + "postScript": "", + "postType": "json", + "preScript": "", + "returnDocument": "{\n\t\"code\":\"No comment,Type =Number\",\n\t\"msg\":\"No comment,Type =String\",\n\t\"data\":{}\n}", + "tempId": "", + "url": "/octopus/server/scheduler/allTriggers", + "urlEncodedKeyValueListJson": "[]", + "urlEncodedKeyValueListText": "", + "urlParamsKeyValueListJson": "[]", + "urlParamsKeyValueListText": "" + }, + "tempId": "id_io.wdd.rpc.controller.SchedulerController.queryAllTriggers", + "type": 2 +} \ No newline at end of file diff --git a/.fastRequest/collections/Root/server/SchedulerController/queryRunQuartzJob.rapi b/.fastRequest/collections/Root/server/SchedulerController/queryRunQuartzJob.rapi new file mode 100644 index 0000000..3a2d4ec --- /dev/null +++ b/.fastRequest/collections/Root/server/SchedulerController/queryRunQuartzJob.rapi @@ -0,0 +1,37 @@ +{ + "activeGroup": "Default", + "apiParamGroup": {}, + "childList": [], + "description": "", + "directory": "~.fastRequest~collections~Root~server~SchedulerController", + "domain": "http://localhost:9999", + "enableEnv": "local", + "enableProject": "ProjectOctopus", + "filePath": "~.fastRequest~collections~Root~server~SchedulerController~queryRunQuartzJob.rapi", + "headerList": [], + "id": "api_io.wdd.rpc.controller.SchedulerController.queryRunQuartzJob", + "name": "查询所有运行job", + "paramGroup": { + "bodyKeyValueListJson": "", + "className": "io.wdd.rpc.controller.SchedulerController", + "jsonDocument": "", + "method": "queryRunQuartzJob", + "methodDescription": "查询所有运行job", + "methodType": "POST", + "multipartKeyValueListJson": "[]", + "originUrl": "/octopus/server/scheduler/queryRunJob", + "pathParamsKeyValueListJson": "[]", + "postScript": "", + "postType": "json", + "preScript": "", + "returnDocument": "{\n\t\"code\":\"No comment,Type =Number\",\n\t\"msg\":\"No comment,Type =String\",\n\t\"data\":{}\n}", + "tempId": "", + "url": "/octopus/server/scheduler/queryRunJob", + "urlEncodedKeyValueListJson": "[]", + "urlEncodedKeyValueListText": "", + "urlParamsKeyValueListJson": "[]", + "urlParamsKeyValueListText": "" + }, + "tempId": "id_io.wdd.rpc.controller.SchedulerController.queryRunQuartzJob", + "type": 2 +} \ No newline at end of file diff --git a/.fastRequest/collections/Root/server/StatusController/GetHealthyStatusAgentList.rapi b/.fastRequest/collections/Root/server/StatusController/GetHealthyStatusAgentList.rapi new file mode 100644 index 0000000..0d673c6 --- /dev/null +++ b/.fastRequest/collections/Root/server/StatusController/GetHealthyStatusAgentList.rapi @@ -0,0 +1,37 @@ +{ + "activeGroup": "Default", + "apiParamGroup": {}, + "childList": [], + "description": "", + "directory": "~.fastRequest~collections~Root~server~StatusController", + "domain": "http://localhost:9999", + "enableEnv": "local", + "enableProject": "ProjectOctopus", + "filePath": "~.fastRequest~collections~Root~server~StatusController~GetHealthyStatusAgentList.rapi", + "headerList": [], + "id": "api_io.wdd.rpc.controller.StatusController.GetHealthyStatusAgentList", + "name": "[ 状态-Agent ] Map", + "paramGroup": { + "bodyKeyValueListJson": "", + "className": "io.wdd.rpc.controller.StatusController", + "jsonDocument": "", + "method": "GetHealthyStatusAgentList", + "methodDescription": "[ 状态-Agent ] Map", + "methodType": "GET", + "multipartKeyValueListJson": "[]", + "originUrl": "/octopus/server/status/status/agent", + "pathParamsKeyValueListJson": "[]", + "postScript": "", + "postType": "json", + "preScript": "", + "returnDocument": "{\n\t\"code\":\"No comment,Type =Number\",\n\t\"msg\":\"No comment,Type =String\",\n\t\"data\":{}\n}", + "tempId": "", + "url": "/octopus/server/status/status/agent", + "urlEncodedKeyValueListJson": "[]", + "urlEncodedKeyValueListText": "", + "urlParamsKeyValueListJson": "[]", + "urlParamsKeyValueListText": "" + }, + "tempId": "id_io.wdd.rpc.controller.StatusController.GetHealthyStatusAgentList", + "type": 2 +} \ No newline at end of file diff --git a/.fastRequest/collections/Root/server/StatusController/ManualUpdateAgentStatus.rapi b/.fastRequest/collections/Root/server/StatusController/ManualUpdateAgentStatus.rapi new file mode 100644 index 0000000..97f1b45 --- /dev/null +++ b/.fastRequest/collections/Root/server/StatusController/ManualUpdateAgentStatus.rapi @@ -0,0 +1,37 @@ +{ + "activeGroup": "Default", + "apiParamGroup": {}, + "childList": [], + "description": "", + "directory": "~.fastRequest~collections~Root~server~StatusController", + "domain": "http://localhost:9999", + "enableEnv": "local", + "enableProject": "ProjectOctopus", + "filePath": "~.fastRequest~collections~Root~server~StatusController~ManualUpdateAgentStatus.rapi", + "headerList": [], + "id": "api_io.wdd.rpc.controller.StatusController.ManualUpdateAgentStatus", + "name": "手动更新Agent的状态", + "paramGroup": { + "bodyKeyValueListJson": "", + "className": "io.wdd.rpc.controller.StatusController", + "jsonDocument": "", + "method": "ManualUpdateAgentStatus", + "methodDescription": "手动更新Agent的状态", + "methodType": "POST", + "multipartKeyValueListJson": "[]", + "originUrl": "/octopus/server/status/agent/status/update", + "pathParamsKeyValueListJson": "[]", + "postScript": "", + "postType": "json", + "preScript": "", + "returnDocument": "{\n\t\"code\":\"No comment,Type =Number\",\n\t\"msg\":\"No comment,Type =String\",\n\t\"data\":{}\n}", + "tempId": "", + "url": "/octopus/server/status/agent/status/update", + "urlEncodedKeyValueListJson": "[]", + "urlEncodedKeyValueListText": "", + "urlParamsKeyValueListJson": "[]", + "urlParamsKeyValueListText": "" + }, + "tempId": "id_io.wdd.rpc.controller.StatusController.ManualUpdateAgentStatus", + "type": 2 +} \ No newline at end of file diff --git a/.fastRequest/collections/Root/server/StatusController/directory.json b/.fastRequest/collections/Root/server/StatusController/directory.json new file mode 100644 index 0000000..29f0ea3 --- /dev/null +++ b/.fastRequest/collections/Root/server/StatusController/directory.json @@ -0,0 +1,8 @@ +{ + "description": "", + "directory": "~.fastRequest~collections~Root~server~StatusController", + "filePath": "~.fastRequest~collections~Root~server~StatusController~", + "id": "20230222092955736", + "name": "StatusController", + "type": 3 +} \ No newline at end of file diff --git a/.fastRequest/collections/Root/server/directory.json b/.fastRequest/collections/Root/server/directory.json new file mode 100644 index 0000000..ee09b3f --- /dev/null +++ b/.fastRequest/collections/Root/server/directory.json @@ -0,0 +1,8 @@ +{ + "description": "", + "directory": "~.fastRequest~collections~Root~server", + "filePath": "~.fastRequest~collections~Root~server~", + "id": "20221230113234995", + "name": "server", + "type": 4 +} \ No newline at end of file diff --git a/.fastRequest/config/fastRequestCurrentProjectConfig.json b/.fastRequest/config/fastRequestCurrentProjectConfig.json new file mode 100644 index 0000000..7d39a50 --- /dev/null +++ b/.fastRequest/config/fastRequestCurrentProjectConfig.json @@ -0,0 +1,37 @@ +{ + "dataList": [ + { + "hostGroup": [ + { + "env": "local", + "url": "localhost:9090" + } + ], + "name": "OctpusGO" + } + ], + "envList": [ + "local" + ], + "headerList": [], + "postScript": "", + "preScript": "", + "projectList": [ + "OctpusGO" + ], + "syncModel": { + "branch": "master", + "domain": "https://github.com", + "enabled": false, + "namingPolicy": "byDoc", + "owner": "", + "repo": "", + "repoUrl": "", + "syncAfterRun": false, + "token": "", + "type": "github" + }, + "urlEncodedKeyValueList": [], + "urlParamsKeyValueList": [], + "urlSuffix": "" +} \ No newline at end of file diff --git a/.run/RunServerToRemote.run.xml b/.run/RunServerToRemote.run.xml deleted file mode 100644 index 1005446..0000000 --- a/.run/RunServerToRemote.run.xml +++ /dev/null @@ -1,32 +0,0 @@ - - - - - - - - - - - - \ No newline at end of file diff --git a/.run/Server-dev.run.xml b/.run/Server-dev.run.xml deleted file mode 100644 index 15e77dc..0000000 --- a/.run/Server-dev.run.xml +++ /dev/null @@ -1,16 +0,0 @@ - - - - \ No newline at end of file diff --git a/.run/SkipTest-Package.run.xml b/.run/SkipTest-Package.run.xml deleted file mode 100644 index ebfa812..0000000 --- a/.run/SkipTest-Package.run.xml +++ /dev/null @@ -1,31 +0,0 @@ - - - - - - - - \ No newline at end of file diff --git a/agent-go/logger/logger.go b/agent-go/logger/logger.go index 8a0acc0..2c3ecb9 100644 --- a/agent-go/logger/logger.go +++ b/agent-go/logger/logger.go @@ -25,7 +25,7 @@ func NewLogger() (*Logger, error) { LevelKey: "level", TimeKey: "time", //CallerKey: "caller", - EncodeLevel: zapcore.CapitalColorLevelEncoder, + EncodeLevel: zapcore.CapitalLevelEncoder, EncodeTime: zapcore.RFC3339TimeEncoder, //EncodeCaller: zapcore.FullCallerEncoder, }, diff --git a/agent-go/octopus-agent-dev.yaml b/agent-go/octopus-agent-dev.yaml index 5179bdf..12c3a19 100644 --- a/agent-go/octopus-agent-dev.yaml +++ b/agent-go/octopus-agent-dev.yaml @@ -55,7 +55,7 @@ spring: port: 20672 username: boge password: boge8tingH - virtual-host: / + virtual-host: /wdd listener: simple: retry: diff --git a/agent-go/rabbitmq/OMsgConnector.go b/agent-go/rabbitmq/OMsgConnector.go index d02bc2b..4cd4c85 100644 --- a/agent-go/rabbitmq/OMsgConnector.go +++ b/agent-go/rabbitmq/OMsgConnector.go @@ -30,7 +30,6 @@ func BuildOMsgRuntimeConnectorQueue(agentTopicName string) { // 建立 业务消息 返回队列 // 统一为 OctopusToServer - octopusToServerQueueName := agentConfig.GetString("octopus.message.octopus_to_server") octopusToServerProp := &ConnectProperty{ diff --git a/agent-go/rabbitmq/OctopusMessage.go b/agent-go/rabbitmq/OctopusMessage.go index 0b7eda7..69a0c1f 100644 --- a/agent-go/rabbitmq/OctopusMessage.go +++ b/agent-go/rabbitmq/OctopusMessage.go @@ -8,7 +8,6 @@ import ( "encoding/json" "fmt" "strings" - "time" ) var P = g.G.P @@ -33,11 +32,11 @@ type OctopusMsgBuilder interface { type OctopusMessage struct { UUID string `json:"uuid"` - InitTime time.Time `json:"init_time" format:"2023-03-21 16:38:30"` + InitTime string `json:"init_time" format:"2023-03-21 16:38:30"` Type string `json:"type"` Content interface{} `json:"content"` Result interface{} `json:"result"` - ACTime time.Time `json:"ac_time" format:"2023-03-21 16:38:30"` + ACTime string `json:"ac_time" format:"2023-03-21 16:38:30"` } func (om *OctopusMessage) Handle() { @@ -52,7 +51,7 @@ func (om *OctopusMessage) Send(rabbitQueue *RabbitQueue, msg []byte) { func (om *OctopusMessage) Build(omType string, content interface{}) *OctopusMessage { // 当前时间 - curTimeString := utils.CurTimeString() + curTimeString := utils.ParseDateTimeTime() // must write to string format, otherwise it's very hard to deserialize bytes, err := json.Marshal(content) @@ -62,11 +61,11 @@ func (om *OctopusMessage) Build(omType string, content interface{}) *OctopusMess return &OctopusMessage{ UUID: curTimeString, - InitTime: time.Now(), + InitTime: curTimeString, Type: omType, Content: string(bytes), Result: nil, - ACTime: time.Time{}, + ACTime: curTimeString, } } @@ -134,7 +133,7 @@ func statusOMHandler(octopusMessage *OctopusMessage) { } var statusRes string - if strings.HasPrefix(statusMessage.Type, "p") { + if strings.HasPrefix(statusMessage.StatusType, "P") { // ping info statusRes = status.Ping() } else { @@ -144,7 +143,14 @@ func statusOMHandler(octopusMessage *OctopusMessage) { } // 返回消息 + // 组装消息 + octopusMessage.ACTime = utils.ParseDateTimeTime() + octopusMessage.Result = statusRes + // 发送回去 + statusOctopusReplayMessage, _ := json.Marshal(octopusMessage) + OctopusToServerQueue.Send(statusOctopusReplayMessage) + // 输出日志 log.InfoF("接收到查询Agent状态的请求,结果为 => %s", statusRes) } diff --git a/agent-go/register/AgentInitialization.go b/agent-go/register/AgentInitialization.go index 62e72bf..f0a02a0 100644 --- a/agent-go/register/AgentInitialization.go +++ b/agent-go/register/AgentInitialization.go @@ -22,20 +22,20 @@ func INIT() *AgentServerInfo { // 获取系统的环境变量 agentServerInfo := parseAgentServerInfo() - nacosConfig := g.G.AgentConfig + agentConfig := g.G.AgentConfig initToServerProp := &rabbitmq.ConnectProperty{ - ExchangeName: nacosConfig.GetString("octopus.message.init_exchange"), - QueueName: nacosConfig.GetString("octopus.message.init_to_server"), + ExchangeName: agentConfig.GetString("octopus.message.init_exchange"), + QueueName: agentConfig.GetString("octopus.message.init_to_server"), ExchangeType: g.QueueDirect, - TopicKey: nacosConfig.GetString("octopus.message.init_to_server_key"), + TopicKey: agentConfig.GetString("octopus.message.init_to_server_key"), } initFromServerProp := &rabbitmq.ConnectProperty{ - ExchangeName: nacosConfig.GetString("octopus.message.init_exchange"), - QueueName: nacosConfig.GetString("octopus.message.init_from_server"), + ExchangeName: agentConfig.GetString("octopus.message.init_exchange"), + QueueName: agentConfig.GetString("octopus.message.init_from_server"), ExchangeType: g.QueueDirect, - TopicKey: nacosConfig.GetString("octopus.message.init_from_server_key"), + TopicKey: agentConfig.GetString("octopus.message.init_from_server_key"), } // 建立RabbitMQ的连接 @@ -96,6 +96,9 @@ func handleInitMsgFromServer(initFromServerQueue *rabbitmq.RabbitQueue, initToSe initOctopusMessageDeliveries := initFromServerQueue.Read(false) + // 2023年6月19日 修复注册信息一直没有完全消费的问题 + findRealAgentTopicName := "" + // 同步很多抢占注册的情况 for delivery := range initOctopusMessageDeliveries { @@ -128,29 +131,34 @@ func handleInitMsgFromServer(initFromServerQueue *rabbitmq.RabbitQueue, initToSe // 修改系统参数 g.G.AgentHasRegister = true - // 建立 运行时 RabbitMQ连接 - agentTopicName := initOctopusMsg.Result.(string) - rabbitmq.BuildOMsgRuntimeConnectorQueue(agentTopicName) + // 保存真实的AgentTopicName + findRealAgentTopicName = serverInfo.TopicName // 手动关闭 注册队列的连接 shutdownRegisterQueueConnection(initFromServerQueue, initToServerQueue) - return + + } else { + // 不是自身的 注册回复信息 -- 拒绝 2023年6月19日 此处存在错误! 会死循环Nack 导致异常 + log.Warn(fmt.Sprintf("OctopusMessage INIT from server not this agent ! => %v, ==>%s", initOctopusMsg, delivery.Body)) + delivery.Ack(false) + + // 需要休眠等待不再获取相应的信息 + time.Sleep(5 * time.Second) } - - // 不是自身的 注册回复信息 -- 拒绝 - log.Warn(fmt.Sprintf("OctopusMessage INIT from server not this agent ! => %v, ==>%s", initOctopusMsg, delivery.Body)) - delivery.Nack(false, true) - - // 需要休眠等待不再获取相应的信息 - time.Sleep(5 * time.Second) - } + // 建立 运行时 RabbitMQ连接 + rabbitmq.BuildOMsgRuntimeConnectorQueue(findRealAgentTopicName) + } // shutdownRegisterQueueConnection 关闭初始化连接的两个队列 func shutdownRegisterQueueConnection(initFromServerQueue *rabbitmq.RabbitQueue, initToServerQueue *rabbitmq.RabbitQueue) { - log.InfoF("Shutdown register queue connection !") + + initFromServerQueue.Close() + initToServerQueue.Close() + + log.InfoF("Pretend to Shutdown register queue connection !") } func parseAgentServerInfo() *AgentServerInfo { diff --git a/agent-go/register/AgentServerInfo.go b/agent-go/register/AgentServerInfo.go index 3026134..b39825e 100644 --- a/agent-go/register/AgentServerInfo.go +++ b/agent-go/register/AgentServerInfo.go @@ -22,5 +22,5 @@ type AgentServerInfo struct { Comment string `json:"comment" yaml:"comment"` MachineID string `json:"machineId" yaml:"machineId"` AgentVersion string `json:"agentVersion" yaml:"agentVersion"` - AgentTopicName string `json:"agentTopicName" yaml:"agentTopicName"` + TopicName string `json:"topicName" yaml:"topicName"` } diff --git a/agent-go/server-env.yaml b/agent-go/server-env.yaml index 4bf3f5d..dd5ab75 100644 --- a/agent-go/server-env.yaml +++ b/agent-go/server-env.yaml @@ -17,6 +17,6 @@ osKernelInfo: "5.4.0-135-generic" tcpControl: "cubic" virtualization: "Dedicated" ioSpeed: "150 MB/s" -machineId: "" +machineId: "fakemachinid123" agentVersion: "" -agentTopicName: "" +topicName: "" diff --git a/agent-go/status/Status.go b/agent-go/status/Status.go index 63d65db..8ab5626 100644 --- a/agent-go/status/Status.go +++ b/agent-go/status/Status.go @@ -11,14 +11,13 @@ var log = logger2.Log type StatusMessage struct { /** * which kind of status should be return - * metric => short time message - * all => all agent status message - * healthy => check for healthy + "PING"; + * METRIC => short time message + * ALL => all agent status message * */ - Type string `json:"type,omitempty"` - AgentTopicName string `json:"agent_topic_name,omitempty"` - MetricRepeatCount int `json:"metric_repeat_count,omitempty"` - metricRepeatPinch int `json:"metric_repeat_pinch,omitempty"` + StatusType string `json:"statusType,omitempty"` + MetricRepeatCount int `json:"metricRepeatCount,omitempty"` + metricRepeatPinch int `json:"metricRepeatPinch,omitempty"` } type AgentStatus struct { diff --git a/agent-go/tmp/init-from-server-message.json b/agent-go/tmp/init-from-server-message.json new file mode 100644 index 0000000..6660562 --- /dev/null +++ b/agent-go/tmp/init-from-server-message.json @@ -0,0 +1,5 @@ +{ + "uuid\":\"Chengdu-amd64-98-fakema\",\"init_time\":\"2023-06-19 15:21:02\",\"type\":\"INIT\",\"content\":\"{\\\"serverName\\\":\\\"Chengdu-amd64-98\\\",\\\"serverIpPbV4\\\":\\\"183.220.149.17\\\",\\\"serverIpInV4\\\":\\\"\\\",\\\"serverIpPbV6\\\":\\\"\\\",\\\"serverIpInV6\\\":\\\"\\\",\\\"registerTime\\\":null,\\\"expireTime\\\":null,\\\"createTime\\\":null,\\\"updateTime\\\":null,\\\"proxyType\\\":null,\\\"location\\\":\\\"Chengdu Sichuan CN\\\",\\\"provider\\\":\\\"AS139080 The Internet Data Center of Sichuan Mobile Communication Company Limited\\\",\\\"managePort\\ + \ + ":\\\"22\\\",\\\"cpuBrand\\\":\\\"Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz\\\",\\\"cpuCore\\\":\\\"12 @ 4299.998 MHz\\\",\\\"memoryTotal\\\":\\\"7.6 GB\\\",\\\"diskTotal\\\":\\\"914.9 GB\\\",\\\"diskUsage\\\":\\\"12.3 GB\\\",\\\"ioSpeed\\\":\\\"150 MB/s\\\",\\\"tcpControl\\\":\\\"cubic\\\",\\\"virtualization\\\":\\\"Dedicated\\\",\\\"osInfo\\\":\\\"Ubuntu 20.04.5 LTS\\\",\\\"osKernelInfo\\\":\\\"5.4.0-135-generic\\\",\\\"machineId\\\":\\\"fakemachinid123\\\",\\\"topicName\\\":\\\"Chengdu-amd64-98-fakema\\\",\\\"comment\\\":\\\"\\\",\\\"agentVersion\\\":\\\"\\\"}\",\"result\":null,\"ac_time\":null}" +} \ No newline at end of file diff --git a/agent-go/tmp/init-to-server-message.json b/agent-go/tmp/init-to-server-message.json new file mode 100644 index 0000000..24de275 --- /dev/null +++ b/agent-go/tmp/init-to-server-message.json @@ -0,0 +1,8 @@ +{ + "uuid": "2023-06-19 14:29:20", + "init_time": "2023-06-19 14:29:20", + "type": "INIT", + "content": "{\"serverName\":\"Chengdu-amd64-98\",\"serverIpPbV4\":\"183.220.149.17\",\"serverIpInV4\":\"\",\"serverIpPbV6\":\"\",\"serverIpInV6\":\"\",\"location\":\"Chengdu Sichuan CN\",\"provider\":\"AS139080 The Internet Data Center of Sichuan Mobile Communication Company Limited\",\"managePort\":\"22\",\"cpuCore\":\"12 @ 4299.998 MHz\",\"cpuBrand\":\"Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz\",\"osInfo\":\"Ubuntu 20.04.5 LTS\",\"osKernelInfo\":\"5.4.0-135-generic\",\"tcpControl\":\"cubic\",\"virtualization\":\"Dedicated\",\"ioSpeed\":\"150 MB/s\",\"memoryTotal\":\"7.6 GB\",\"diskTotal\":\"914.9 GB\",\"diskUsage\":\"12.3 GB\",\"comment\":\"\",\"machineId\":\"\",\"agentVersion\":\"\",\"agentTopicName\":\"\"}", + "result": null, + "ac_time": "2023-06-19 14:29:20" +} \ No newline at end of file diff --git a/agent-go/utils/TimeUtils.go b/agent-go/utils/TimeUtils.go index 0981dca..be6d10a 100644 --- a/agent-go/utils/TimeUtils.go +++ b/agent-go/utils/TimeUtils.go @@ -4,13 +4,18 @@ import ( "time" ) -// CurTimeString 输出系统时间的格式为"2006-01-02 15:04:05"形式的时间字符串 -func CurTimeString() string { +// ParseDateTimeTime 输出系统时间的格式为"2006-01-02 15:04:05"形式的时间字符串 +func ParseDateTimeTime() string { now := time.Now() /*loc := time.FixedZone("UTC+8", 8*60*60) // 创建东八区时区对象 localTime := now.In(loc) // 转换为东八区时间*/ - return now.Format("2006-01-02 15:04:05") + return now.Format(time.DateTime) +} + +func ParseISOLocalDateTime() string { + now := time.Now() + return now.Format(time.RFC3339) } diff --git a/server/src/main/java/io/wdd/rpc/init/AcceptAgentInitInfo.java b/server/src/main/java/io/wdd/rpc/init/AcceptAgentInitInfo.java index 8bdbe49..501e4b7 100644 --- a/server/src/main/java/io/wdd/rpc/init/AcceptAgentInitInfo.java +++ b/server/src/main/java/io/wdd/rpc/init/AcceptAgentInitInfo.java @@ -1,9 +1,11 @@ package io.wdd.rpc.init; +import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.rabbitmq.client.Channel; import io.wdd.common.handler.MyRuntimeException; +import io.wdd.common.utils.TimeUtils; import io.wdd.rpc.message.OctopusMessage; import io.wdd.rpc.message.OctopusMessageType; import io.wdd.rpc.message.sender.OMessageToAgentSender; @@ -22,10 +24,11 @@ import org.springframework.stereotype.Service; import javax.annotation.Resource; import java.io.IOException; -import java.time.LocalDateTime; import java.util.*; import java.util.concurrent.TimeUnit; +import static io.wdd.common.utils.OctopusObjectMapperConfig.OctopusObjectMapper; + /** * The type Accept boot up info message. */ @@ -108,8 +111,13 @@ public class AcceptAgentInitInfo { try { - serverInfoVO = objectMapper.readValue( + OctopusMessage initOctopusMessageFromAgent = OctopusObjectMapper.readValue( message.getBody(), + OctopusMessage.class + ); + + serverInfoVO = OctopusObjectMapper.readValue( + (String) initOctopusMessageFromAgent.getContent(), ServerInfoVO.class ); @@ -126,6 +134,7 @@ public class AcceptAgentInitInfo { // if (!checkAgentAlreadyRegister(agentQueueTopic)) { // log.info("[AGENT INIT] - agent not exist ! start to register !"); // } + // whether agent is registered already // save or update the octopus agent server info // 3. save the agent info into database @@ -135,7 +144,7 @@ public class AcceptAgentInitInfo { } // 4. generate the Octopus Agent Status Redis Stream Key & Consumer-Group - generateAgentStatusRedisStreamConsumerGroup(serverInfoVO.getTopicName()); + //generateAgentStatusRedisStreamConsumerGroup(serverInfoVO.getTopicName()); // 5. send InitMessage to agent sendInitMessageToAgent(serverInfoVO); @@ -169,7 +178,7 @@ public class AcceptAgentInitInfo { */ - throw new MyRuntimeException(" Octopus Server Initialization Error, please check !"); + throw new MyRuntimeException("Octopus Server Initialization Error, please check !"); } /** @@ -208,18 +217,6 @@ public class AcceptAgentInitInfo { ); } - // check for octopus-server consumer group - /*if (redisTemplate.opsForStream().groups(statusStreamKey) - .stream() - .filter( - group -> group.groupName().startsWith("Octopus") - ).collect(Collectors.toSet()).contains(Boolean.FALSE)) { - - - - redisTemplate.opsForStream().createGroup(statusStreamKey, "OctopusServer"); - }*/ - log.debug( "octopus agent [ {} ] status report stream key [ {} ] has been created !", agentTopicName, @@ -240,16 +237,24 @@ public class AcceptAgentInitInfo { private boolean sendInitMessageToAgent(ServerInfoVO serverInfoVO) { - OctopusMessage octopusMessage = OctopusMessage - .builder() - .type(OctopusMessageType.INIT) - // should be the OctopusExchange Name - .content(String.valueOf(initRabbitMQConfig.OCTOPUS_EXCHANGE)) - .init_time(LocalDateTime.now()) - .uuid(serverInfoVO.getTopicName()) - .build(); + try { + String serverInfoContent = OctopusObjectMapper.writeValueAsString(serverInfoVO); + + OctopusMessage octopusMessage = OctopusMessage + .builder() + .type(OctopusMessageType.INIT) + // should be the OctopusExchange Name + .content(serverInfoContent) + .init_time(TimeUtils.currentFormatTime()) + .uuid(serverInfoVO.getTopicName()) + .build(); + + oMessageToAgentSender.sendINIT(octopusMessage); + + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } - oMessageToAgentSender.sendINIT(octopusMessage); return true; } diff --git a/server/src/main/java/io/wdd/rpc/message/sender/OMessageToAgentSender.java b/server/src/main/java/io/wdd/rpc/message/sender/OMessageToAgentSender.java index e4e5fbc..56dd6e6 100644 --- a/server/src/main/java/io/wdd/rpc/message/sender/OMessageToAgentSender.java +++ b/server/src/main/java/io/wdd/rpc/message/sender/OMessageToAgentSender.java @@ -44,32 +44,46 @@ public class OMessageToAgentSender { } // send to Queue -- InitFromServer - log.info("send INIT OrderCommand to Agent = {}", message); + log.info( + "send INIT OrderCommand to Agent = {}", + message + ); - rabbitTemplate.convertAndSend(initRabbitMQConfig.INIT_EXCHANGE, initRabbitMQConfig.INIT_FROM_SERVER_KEY, writeData(message)); + rabbitTemplate.convertAndSend( + initRabbitMQConfig.INIT_EXCHANGE, + initRabbitMQConfig.INIT_FROM_SERVER_KEY, + writeData(message) + ); } public void send(OctopusMessage octopusMessage) { - log.debug("OctopusMessage {} send to agent {}", octopusMessage, octopusMessage.getUuid()); + log.debug( + "OctopusMessage {} send to agent {}", + octopusMessage, + octopusMessage.getUuid() + ); rabbitTemplate.convertAndSend( initRabbitMQConfig.OCTOPUS_EXCHANGE, octopusMessage.getUuid() + "*", - writeData(octopusMessage)); + writeData(octopusMessage) + ); } public void send(List octopusMessageList) { - octopusMessageList.stream().forEach( - octopusMessage -> { - this.send(octopusMessage); - } - ); + octopusMessageList + .stream() + .forEach( + octopusMessage -> { + this.send(octopusMessage); + } + ); } diff --git a/server/src/main/java/io/wdd/rpc/scheduler/service/BuildStatusScheduleTask.java b/server/src/main/java/io/wdd/rpc/scheduler/service/BuildStatusScheduleTask.java index 009238d..ae2353d 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/service/BuildStatusScheduleTask.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/service/BuildStatusScheduleTask.java @@ -43,7 +43,7 @@ public class BuildStatusScheduleTask { private void buildAll() { // Agent存活健康状态检查 - buildMonitorAllAgentStatusScheduleTask(); + buildMonitorAllAgentAliveStatusScheduleTask(); // Agent运行信息检查 Metric @@ -99,12 +99,12 @@ public class BuildStatusScheduleTask { * 延迟触发时间 healthyCheckStartDelaySeconds * 定时任务间隔 healthyCronTimeExpress */ - private void buildMonitorAllAgentStatusScheduleTask() { + private void buildMonitorAllAgentAliveStatusScheduleTask() { // build the Job octopusQuartzService.addMission( AgentAliveStatusMonitorJob.class, - "monitorAllAgentStatusJob", + "monitorAllAgentAliveStatusJob", JOB_GROUP_NAME, healthyCheckStartDelaySeconds, healthyCronTimeExpress, diff --git a/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentAliveStatusMonitorService.java b/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentAliveStatusMonitorService.java index 22854f4..fa423df 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentAliveStatusMonitorService.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentAliveStatusMonitorService.java @@ -49,8 +49,7 @@ public class AgentAliveStatusMonitorService { @Resource AsyncStatusService asyncStatusService; - - private HashMap AGENT_HEALTHY_INIT_MAP; + private HashMap AGENT_HEALTHY_INIT_MAP; public void go() { @@ -77,29 +76,27 @@ public class AgentAliveStatusMonitorService { updateAllAgentHealthyStatus(agentAliveStatusMap); } + /** + * 初始化Agent存活状态的Redis缓存的信息,全部设置为False,然后等待存活状态检测 + */ private void checkOrCreateRedisHealthyKey() { // 检查开始的时候 需要手动将所有Agent的状态置为0 // Agent如果存活,那么就可以将其自身状态修改为1 // build the redis all agent healthy map struct - HashMap initMap = new HashMap<>(32); + HashMap initMap = new HashMap<>(32); ALL_AGENT_TOPIC_NAME_LIST .stream() .forEach( agentTopicName -> { initMap.put( agentTopicName, - "0" + Boolean.FALSE ); } ); - initMap.put( - "updateTime", - TimeUtils.currentTimeString() - ); - // cache this map struct AGENT_HEALTHY_INIT_MAP = initMap; @@ -111,6 +108,14 @@ public class AgentAliveStatusMonitorService { initMap ); + redisTemplate + .opsForHash() + .put( + ALL_AGENT_STATUS_REDIS_KEY, + "initTime", + TimeUtils.currentTimeString() + ); + } public void updateAllAgentHealthyStatus(Map agentAliveStatusMap) { @@ -123,6 +128,12 @@ public class AgentAliveStatusMonitorService { // 执行Metric上报定时任务 // buildStatusScheduleTask.buildAgentMetricScheduleTask(); + log.debug( + "[存活状态] - 当前时间为 [ %s ] , 所有的Agent存活状态为=> %s", + currentTimeString, + agentAliveStatusMap + ); + // 这里仅仅是更新时间 redisTemplate .opsForHash() diff --git a/server/src/main/resources/application.yml b/server/src/main/resources/application.yml index e1c8ba9..4cdbd16 100644 --- a/server/src/main/resources/application.yml +++ b/server/src/main/resources/application.yml @@ -10,7 +10,7 @@ spring: port: 20672 username: boge password: boge8tingH - virtual-host: / + virtual-host: /wdd listener: simple: retry: @@ -118,7 +118,7 @@ octopus: name: octopus-agent healthy: type: cron - cron: 10 */1 * * * ? * + cron: 10 * * * * ? * start-delay: 30 metric: pinch: 20 From 6f655a772d35b96490e3d12b6d75c0f2fde9a9b7 Mon Sep 17 00:00:00 2001 From: zeaslity Date: Tue, 20 Jun 2023 14:22:24 +0800 Subject: [PATCH 33/45] [ Executor ] exec function - 1 --- agent-go/executor/BaseFunction.go | 252 ++++++++++++++++++ agent-go/executor/BaseFunction_test.go | 21 ++ agent-go/executor/function/BaseFunction.go | 136 ---------- agent-go/executor/script/shutdownFirewall.txt | 2 + 4 files changed, 275 insertions(+), 136 deletions(-) create mode 100644 agent-go/executor/BaseFunction.go create mode 100644 agent-go/executor/BaseFunction_test.go delete mode 100644 agent-go/executor/function/BaseFunction.go create mode 100644 agent-go/executor/script/shutdownFirewall.txt diff --git a/agent-go/executor/BaseFunction.go b/agent-go/executor/BaseFunction.go new file mode 100644 index 0000000..83755b7 --- /dev/null +++ b/agent-go/executor/BaseFunction.go @@ -0,0 +1,252 @@ +package executor + +import "strings" + +type BaseFunc interface { + Exec(baseFuncName string, funcArgs ...string) string +} + +type AgentOsOperator struct { + InstallCommandPrefix string `json:"install_command_prefix",comment:"apt-get install or yum install"` + + RemoveCommandPrefix string `json:"remove_command_prefix",comment:"apt-get remove or yum remove"` + + CanAccessInternet bool `json:"can_access_internet",comment:"是否可以访问公网"` + + IsOsTypeUbuntu bool `json:"is_os_type_ubuntu",comment:"主机操作系统是否为ubuntu系列"` + + IsAgentInnerWall bool `json:"is_agent_inner_wall", comment:"主机是否身处国内"` +} + +// Exec 执行基础功能函数 +func (op *AgentOsOperator) Exec(baseFuncName string, funcArgs ...string) string { + + var multiLineCommand [][]string + + switch baseFuncName { + + case "shutdownFirewall": + multiLineCommand = op.shutdownFirewall() + break + case "modifyHostname": + multiLineCommand = op.modifyHostname(funcArgs) + break + case "disableSwap": + multiLineCommand = op.disableSwap() + break + case "installDocker": + multiLineCommand = op.installDocker(funcArgs) + break + case "removeDocker": + multiLineCommand = op.removeDocker() + break + case "installDockerCompose": + multiLineCommand = op.installDockerCompose() + break + case "modifyDockerConfig": + multiLineCommand = op.modifyDockerConfig() + break + case "installHelm": + multiLineCommand = op.installHelm() + break + case "installHarbor": + multiLineCommand = op.installHarbor(funcArgs) + break + case "chronyToPublicNTP": + multiLineCommand = op.chronyToPublicNTP() + break + case "chronyToMaster": + multiLineCommand = op.chronyToMaster(funcArgs) + break + case "installZSH": + multiLineCommand = op.installZSH() + break + case "modifySshPort": + multiLineCommand = op.modifySshPort(funcArgs) + break + case "openBBR": + multiLineCommand = op.openBBR() + break + default: + multiLineCommand = op.ok(funcArgs) + + } + + // exec the command here + result, _ := MultiLineCommandExecutor(multiLineCommand) + + // 归一化处理 + return strings.Join(result, "") +} + +func (op *AgentOsOperator) shutdownFirewall() [][]string { + + shutdownFunc := [][]string{ + {"systemctl", "stop", "firewalld"}, + {"systemctl", "disable", "firewalld"}, + } + + if !op.IsOsTypeUbuntu { + shutdownFunc = append(shutdownFunc, + []string{ + "sed", + "-i", + "s/SELINUX=enforcing/SELINUX=disabled/g", + "/etc/selinux/config", + }, + ) + } + + return shutdownFunc +} + +func (op *AgentOsOperator) modifyHostname(args []string) [][]string { + + return [][]string{} +} + +func (op *AgentOsOperator) disableSwap() [][]string { + + disableSwapFunc := [][]string{ + { + "swapoff", + "-a", + }, + { + "cp", + "-f", + "/etc/fstab", + "/etc/fstab_bak", + }, + { + "cat", + "/etc/fstab_bak", + "|", + "grep", + "-v", + "swap", + ">", + "/etc/fstab", + }, + } + + return disableSwapFunc +} + +func (op *AgentOsOperator) removeDocker() [][]string { + + removeDockerFunc := [][]string{ + { + op.RemoveCommandPrefix, + "docker", + "docker-client", + "docker-client-latest", + "docker-ce-cli", + "docker-common", + "docker-latest", + "docker-latest-logrotate", + "docker-logrotate", + "docker-selinux", + "docker-engine-selinux", + "docker-engine", + "kubelet", + "kubeadm", + "kubectl", + }, + } + + return removeDockerFunc +} + +func (op *AgentOsOperator) installDocker(args []string) [][]string { + + // remove docker all staff + installDockerFunc := op.removeDocker() + + if op.IsOsTypeUbuntu { + installDockerFunc = append(installDockerFunc, [][]string{ + { + op.InstallCommandPrefix, + "apt-transport-https ca-certificates curl gnupg-agent software-properties-common", + }, + { + "apt-key", + "add", + "-", + "$(curl -fsSL https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu/gpg)", + }, + }...) + } else { + installDockerFunc = append(installDockerFunc, [][]string{ + { + op.InstallCommandPrefix, + "yum-utils device-mapper-persistent-data lvm2", + }, + { + "yum-config-manager", + "--add-repo", + "https://mirrors.ustc.edu.cn/docker-ce/linux/centos/docker-ce.repo", + }, + { + "sed ", + "-i ", + "'s/download.docker.com/mirrors.ustc.edu.cn\\/docker-ce/g' ", + "/etc/yum.repos.d/docker-ce.repo", + }, + {}, + }...) + } + + return installDockerFunc +} + +func (op *AgentOsOperator) installDockerCompose() [][]string { + + return [][]string{} +} + +func (op *AgentOsOperator) installHelm() [][]string { + + return [][]string{} +} + +func (op *AgentOsOperator) modifyDockerConfig() [][]string { + + return [][]string{} +} + +func (op *AgentOsOperator) installHarbor(args []string) [][]string { + + return [][]string{} +} + +func (op *AgentOsOperator) chronyToPublicNTP() [][]string { + + return [][]string{} +} + +func (op *AgentOsOperator) chronyToMaster(args []string) [][]string { + + return [][]string{} +} + +func (op *AgentOsOperator) installZSH() [][]string { + + return [][]string{} +} + +func (op *AgentOsOperator) modifySshPort(args []string) [][]string { + + return [][]string{} +} + +func (op *AgentOsOperator) openBBR() [][]string { + + return [][]string{} +} + +func (op *AgentOsOperator) ok(args []string) [][]string { + return [][]string{ + {"base function is ok , args are => " + strings.Join(args, " ")}, + } +} diff --git a/agent-go/executor/BaseFunction_test.go b/agent-go/executor/BaseFunction_test.go new file mode 100644 index 0000000..6a860b9 --- /dev/null +++ b/agent-go/executor/BaseFunction_test.go @@ -0,0 +1,21 @@ +package executor + +import "testing" + +var agentOP = AgentOsOperator{ + InstallCommandPrefix: "apt-get install", + RemoveCommandPrefix: "apt-get remove", + CanAccessInternet: true, + IsOsTypeUbuntu: true, + IsAgentInnerWall: true, +} + +func TestBaseFunc(t *testing.T) { + + agentOP.Exec("shutdownFirewall") + agentOP.Exec("modifyHostname") + agentOP.Exec("disableSwap") + agentOP.Exec("installDocker") + agentOP.Exec("removeDocker") + +} diff --git a/agent-go/executor/function/BaseFunction.go b/agent-go/executor/function/BaseFunction.go deleted file mode 100644 index 348ff45..0000000 --- a/agent-go/executor/function/BaseFunction.go +++ /dev/null @@ -1,136 +0,0 @@ -package function - -import "strings" - -type BaseFunc interface { - Exec(baseFuncName string, funcArgs ...string) string -} - -type AgentOsOperator struct { - execCommandPrefix string `json:"exec_command_prefix",comment:"apt-get or yum or zapper"` - - canAccessInternet bool `json:"can_access_internet",comment:"是否可以访问公网"` -} - -// Exec 执行基础功能函数 -func (op *AgentOsOperator) Exec(baseFuncName string, funcArgs ...string) string { - - result := "" - - switch baseFuncName { - - case "shutdownFirewall": - result = op.shutdownFirewall() - break - case "modifyHostname": - result = op.modifyHostname(funcArgs) - break - case "disableSwap": - result = op.disableSwap() - break - case "installDocker": - result = op.installDocker(funcArgs) - break - case "installDockerCompose": - result = op.installDockerCompose() - break - case "modifyDockerConfig": - result = op.modifyDockerConfig() - break - case "installHelm": - result = op.installHelm() - break - case "installHarbor": - result = op.installHarbor(funcArgs) - break - case "chronyToPublicNTP": - result = op.chronyToPublicNTP() - break - case "chronyToMaster": - result = op.chronyToMaster(funcArgs) - break - case "installZSH": - result = op.installZSH() - break - case "modifySshPort": - result = op.modifySshPort(funcArgs) - break - case "openBBR": - result = op.openBBR() - break - default: - result = op.ok(funcArgs) - - } - - return result -} - -func (op *AgentOsOperator) shutdownFirewall() string { - - return "" -} - -func (op *AgentOsOperator) modifyHostname(args []string) string { - - return "" -} - -func (op *AgentOsOperator) disableSwap() string { - - return "" -} - -func (op *AgentOsOperator) installDocker(args []string) string { - - return "" -} - -func (op *AgentOsOperator) installDockerCompose() string { - - return "" -} - -func (op *AgentOsOperator) installHelm() string { - - return "" -} - -func (op *AgentOsOperator) modifyDockerConfig() string { - - return "" -} - -func (op *AgentOsOperator) installHarbor(args []string) string { - - return "" -} - -func (op *AgentOsOperator) chronyToPublicNTP() string { - - return "" -} - -func (op *AgentOsOperator) chronyToMaster(args []string) string { - - return "" -} - -func (op *AgentOsOperator) installZSH() string { - - return "" -} - -func (op *AgentOsOperator) modifySshPort(args []string) string { - - return "" -} - -func (op *AgentOsOperator) openBBR() string { - - return "" -} - -func (op *AgentOsOperator) ok(args []string) string { - return "base function is ok , args are => " + strings.Join(args, " ") -} diff --git a/agent-go/executor/script/shutdownFirewall.txt b/agent-go/executor/script/shutdownFirewall.txt new file mode 100644 index 0000000..f311be2 --- /dev/null +++ b/agent-go/executor/script/shutdownFirewall.txt @@ -0,0 +1,2 @@ +systemctl stop firewalld +systemctl disable firewalld \ No newline at end of file From 4bdd97ca7394c9c17e7b32108055950f5974dbc9 Mon Sep 17 00:00:00 2001 From: IceDerce Date: Tue, 20 Jun 2023 16:43:33 +0800 Subject: [PATCH 34/45] [Excution] - base function start -2 --- agent-go/executor/BaseFunction.go | 182 ++++++++++++------ agent-go/executor/BaseFunction_test.go | 21 +- agent-go/executor/CommandExecutor.go | 1 - agent-go/executor/RealTimeExecutor.go | 8 +- .../main/java/io/wdd/ServerApplication.java | 1 + 5 files changed, 141 insertions(+), 72 deletions(-) diff --git a/agent-go/executor/BaseFunction.go b/agent-go/executor/BaseFunction.go index 83755b7..e523071 100644 --- a/agent-go/executor/BaseFunction.go +++ b/agent-go/executor/BaseFunction.go @@ -7,9 +7,9 @@ type BaseFunc interface { } type AgentOsOperator struct { - InstallCommandPrefix string `json:"install_command_prefix",comment:"apt-get install or yum install"` + InstallCommandPrefix []string `json:"install_command_prefix",comment:"apt-get install or yum install"` - RemoveCommandPrefix string `json:"remove_command_prefix",comment:"apt-get remove or yum remove"` + RemoveCommandPrefix []string `json:"remove_command_prefix",comment:"apt-get remove or yum remove"` CanAccessInternet bool `json:"can_access_internet",comment:"是否可以访问公网"` @@ -31,6 +31,9 @@ func (op *AgentOsOperator) Exec(baseFuncName string, funcArgs ...string) string case "modifyHostname": multiLineCommand = op.modifyHostname(funcArgs) break + case "enableSwap": + multiLineCommand = op.enableSwap() + break case "disableSwap": multiLineCommand = op.disableSwap() break @@ -72,11 +75,15 @@ func (op *AgentOsOperator) Exec(baseFuncName string, funcArgs ...string) string } + log.DebugF("multiLineCommand are => %v", multiLineCommand) + // exec the command here - result, _ := MultiLineCommandExecutor(multiLineCommand) + for _, singleLineCommand := range multiLineCommand { + ReadTimeCommandExecutor(singleLineCommand) + } // 归一化处理 - return strings.Join(result, "") + return strings.Join([]string{}, "") } func (op *AgentOsOperator) shutdownFirewall() [][]string { @@ -105,6 +112,24 @@ func (op *AgentOsOperator) modifyHostname(args []string) [][]string { return [][]string{} } +func (op *AgentOsOperator) enableSwap() [][]string { + + enableSwapFunc := [][]string{ + { + "cp", + "-f", + "/etc/fstab_back", + "/etc/fstab", + }, + { + "cat", + "/etc/fstab", + }, + } + + return enableSwapFunc +} + func (op *AgentOsOperator) disableSwap() [][]string { disableSwapFunc := [][]string{ @@ -116,16 +141,12 @@ func (op *AgentOsOperator) disableSwap() [][]string { "cp", "-f", "/etc/fstab", - "/etc/fstab_bak", + "/etc/fstab_back", }, { - "cat", - "/etc/fstab_bak", - "|", - "grep", - "-v", - "swap", - ">", + "sed", + "-i", + "/swap/d", "/etc/fstab", }, } @@ -135,24 +156,25 @@ func (op *AgentOsOperator) disableSwap() [][]string { func (op *AgentOsOperator) removeDocker() [][]string { + removeDockerLine := append(op.RemoveCommandPrefix, []string{ + "docker", + "docker-client", + "docker-client-latest", + "docker-ce-cli", + "docker-common", + "docker-latest", + "docker-latest-logrotate", + "docker-logrotate", + "docker-selinux", + "docker-engine-selinux", + "docker-engine", + "kubelet", + "kubeadm", + "kubectl", + }...) + removeDockerFunc := [][]string{ - { - op.RemoveCommandPrefix, - "docker", - "docker-client", - "docker-client-latest", - "docker-ce-cli", - "docker-common", - "docker-latest", - "docker-latest-logrotate", - "docker-logrotate", - "docker-selinux", - "docker-engine-selinux", - "docker-engine", - "kubelet", - "kubeadm", - "kubectl", - }, + removeDockerLine, } return removeDockerFunc @@ -164,37 +186,81 @@ func (op *AgentOsOperator) installDocker(args []string) [][]string { installDockerFunc := op.removeDocker() if op.IsOsTypeUbuntu { - installDockerFunc = append(installDockerFunc, [][]string{ - { - op.InstallCommandPrefix, - "apt-transport-https ca-certificates curl gnupg-agent software-properties-common", - }, - { - "apt-key", - "add", - "-", - "$(curl -fsSL https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu/gpg)", - }, + // + installFirstLine := append(op.InstallCommandPrefix, []string{ + "apt-transport-https", + "ca-certificates", + "curl", + "gnupg-agent", + "software-properties-common", }...) + + if op.IsAgentInnerWall { + // inner gfw + installDockerFunc = append(installDockerFunc, [][]string{ + installFirstLine, + { + "curl", + "-o", + "/etc/docker/docker-utsc.gpg", + "https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu/gpg", + }, + { + "apt-key", + "add", + "/etc/docker/docker-utsc.gpg", + }, + }...) + } else { + // outside world + installDockerFunc = append(installDockerFunc, [][]string{ + installFirstLine, + { + "curl", + "-o", + "/etc/docker/docker-utsc.gpg", + "https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu/gpg", + }, + { + "apt-key", + "add", + "/etc/docker/docker-utsc.gpg", + }, + }...) + } + + // look for specific docker-version to install + } else { - installDockerFunc = append(installDockerFunc, [][]string{ - { - op.InstallCommandPrefix, - "yum-utils device-mapper-persistent-data lvm2", - }, - { - "yum-config-manager", - "--add-repo", - "https://mirrors.ustc.edu.cn/docker-ce/linux/centos/docker-ce.repo", - }, - { - "sed ", - "-i ", - "'s/download.docker.com/mirrors.ustc.edu.cn\\/docker-ce/g' ", - "/etc/yum.repos.d/docker-ce.repo", - }, - {}, - }...) + installFirstLine := append(op.InstallCommandPrefix, + []string{ + "yum-utils", + "device-mapper-persistent-data", + "lvm2", + }..., + ) + + if op.IsAgentInnerWall { + // inner gfw + installDockerFunc = append(installDockerFunc, [][]string{ + installFirstLine, + { + "yum-config-manager", + "--add-repo", + "https://mirrors.ustc.edu.cn/docker-ce/linux/centos/docker-ce.repo", + }, + { + "sed ", + "-i ", + "'s/download.docker.com/mirrors.ustc.edu.cn\\/docker-ce/g' ", + "/etc/yum.repos.d/docker-ce.repo", + }, + {}, + }...) + } else { + // outside world + } + } return installDockerFunc diff --git a/agent-go/executor/BaseFunction_test.go b/agent-go/executor/BaseFunction_test.go index 6a860b9..37920f3 100644 --- a/agent-go/executor/BaseFunction_test.go +++ b/agent-go/executor/BaseFunction_test.go @@ -3,19 +3,22 @@ package executor import "testing" var agentOP = AgentOsOperator{ - InstallCommandPrefix: "apt-get install", - RemoveCommandPrefix: "apt-get remove", - CanAccessInternet: true, - IsOsTypeUbuntu: true, - IsAgentInnerWall: true, + InstallCommandPrefix: []string{ + "apt-get", "install", "-y", + }, + RemoveCommandPrefix: []string{"/usr/bin/apt", "remove", "-y"}, + CanAccessInternet: true, + IsOsTypeUbuntu: true, + IsAgentInnerWall: true, } func TestBaseFunc(t *testing.T) { - agentOP.Exec("shutdownFirewall") - agentOP.Exec("modifyHostname") - agentOP.Exec("disableSwap") + //agentOP.Exec("shutdownFirewall") + //agentOP.Exec("modifyHostname") + //agentOP.Exec("disableSwap") + //agentOP.Exec("enableSwap") + //agentOP.Exec("removeDocker") agentOP.Exec("installDocker") - agentOP.Exec("removeDocker") } diff --git a/agent-go/executor/CommandExecutor.go b/agent-go/executor/CommandExecutor.go index 3c65b4d..1a16a89 100644 --- a/agent-go/executor/CommandExecutor.go +++ b/agent-go/executor/CommandExecutor.go @@ -102,7 +102,6 @@ func SingleLineCommandExecutor(singleLineCommand []string) ([]string, error) { var result []string for scanner.Scan() { result = append(result, scanner.Text()) - } if err != nil { diff --git a/agent-go/executor/RealTimeExecutor.go b/agent-go/executor/RealTimeExecutor.go index ff16f8b..b56c642 100644 --- a/agent-go/executor/RealTimeExecutor.go +++ b/agent-go/executor/RealTimeExecutor.go @@ -12,22 +12,22 @@ func ReadTimeCommandExecutor(singleLineCommand []string) { cmd := exec.Command(singleLineCommand[0], singleLineCommand[1:]...) stdout, err := cmd.StdoutPipe() if err != nil { - panic(err) + log.ErrorF("command %v stdout error => %v", singleLineCommand, err) } stderr, err := cmd.StderrPipe() if err != nil { - panic(err) + log.ErrorF("command %v stderr error => %v", singleLineCommand, err) } if err := cmd.Start(); err != nil { - panic(err) + log.ErrorF("command %v runtime error => %v", singleLineCommand, err) } go copyOutput(stdout) go copyOutput(stderr) if err := cmd.Wait(); err != nil { - panic(err) + log.ErrorF("command %v result error => %v", singleLineCommand, err) } } diff --git a/server/src/main/java/io/wdd/ServerApplication.java b/server/src/main/java/io/wdd/ServerApplication.java index c3effd6..07d8464 100644 --- a/server/src/main/java/io/wdd/ServerApplication.java +++ b/server/src/main/java/io/wdd/ServerApplication.java @@ -8,6 +8,7 @@ import org.springframework.boot.autoconfigure.SpringBootApplication; @MapperScan("io.wdd.server.mapper") public class ServerApplication { + public static void main(String[] args) { SpringApplication.run(ServerApplication.class, args); } From 9cb89be88eac0e02e0ca7cb75b15af3e21e3a095 Mon Sep 17 00:00:00 2001 From: zeaslity Date: Tue, 20 Jun 2023 17:09:44 +0800 Subject: [PATCH 35/45] [ Executor ] install docker ubuntu --- agent-go/executor/BaseFunction.go | 37 ++++++++++++++++++++++++++++--- 1 file changed, 34 insertions(+), 3 deletions(-) diff --git a/agent-go/executor/BaseFunction.go b/agent-go/executor/BaseFunction.go index e523071..1737231 100644 --- a/agent-go/executor/BaseFunction.go +++ b/agent-go/executor/BaseFunction.go @@ -16,6 +16,10 @@ type AgentOsOperator struct { IsOsTypeUbuntu bool `json:"is_os_type_ubuntu",comment:"主机操作系统是否为ubuntu系列"` IsAgentInnerWall bool `json:"is_agent_inner_wall", comment:"主机是否身处国内"` + + AgentArch string `json:"agent_arch",comment:"主机的CPU架构,可选为amd64 arm64"` + + AgentOSReleaseCode string `json:"agent_os_release_code",comment:"主机操作系统的发行版代号, focal之类的"` } // Exec 执行基础功能函数 @@ -210,6 +214,10 @@ func (op *AgentOsOperator) installDocker(args []string) [][]string { "add", "/etc/docker/docker-utsc.gpg", }, + { + "add-apt-repository", + "deb [arch=" + op.AgentArch + "] https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu " + op.AgentOSReleaseCode + " stable", + }, }...) } else { // outside world @@ -218,18 +226,41 @@ func (op *AgentOsOperator) installDocker(args []string) [][]string { { "curl", "-o", - "/etc/docker/docker-utsc.gpg", - "https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu/gpg", + "/etc/docker/docker.gpg", + "https://download.docker.com/linux/ubuntu/gpg ", }, { "apt-key", "add", - "/etc/docker/docker-utsc.gpg", + "/etc/docker/docker.gpg", + }, + { + "add-apt-repository", + "deb [arch=" + op.AgentArch + "] https://download.docker.com/linux/ubuntu " + op.AgentOSReleaseCode + " stable", }, }...) } // look for specific docker-version to install + installDockerFunc = append(installDockerFunc, []string{"apt-get", "update"}) + + var specificDockerVersion string + // hard code here 5:20.10.10~3-0~ubuntu-focal + if strings.HasPrefix(args[0], "19") { + specificDockerVersion = "5:19.03.15~3-0~ubuntu-focal" + } else { + specificDockerVersion = "5:20.10.10~3-0~ubuntu-focal" + } + + installDockerFunc = append(installDockerFunc, + append( + op.InstallCommandPrefix, + "docker-ce="+specificDockerVersion, + "docker-ce-cli="+specificDockerVersion, + "containerd.io", + "docker-compose-plugin", + ), + ) } else { installFirstLine := append(op.InstallCommandPrefix, From 102f043204f82e20cdac5a2de956ae2b3b7c70bf Mon Sep 17 00:00:00 2001 From: zeaslity Date: Wed, 21 Jun 2023 09:25:08 +0800 Subject: [PATCH 36/45] [ Executor ] add base sehll script --- agent-go/executor/script/1_node_important.sh | 2016 ++++++++++++++++++ 1 file changed, 2016 insertions(+) create mode 100644 agent-go/executor/script/1_node_important.sh diff --git a/agent-go/executor/script/1_node_important.sh b/agent-go/executor/script/1_node_important.sh new file mode 100644 index 0000000..3d879b2 --- /dev/null +++ b/agent-go/executor/script/1_node_important.sh @@ -0,0 +1,2016 @@ +#!/bin/bash +# bashsupport disable=BP2001 + +# 本脚本可以一键初始化Linux服务器的环境 +# 为rancher的自动部署k8s做准备 + +# 确定在哪个节点上运行此脚本,请按照如下的的顺序运行 +# HarborServer +# WorkerServer +# MinioServer +# GlusterServer (Storage-2上执行) +# HeketiServer (Storage-1上执行) + +### 需要修改以下的内容 ### +### 需要修改以下的内容 ### +### 需要修改以下的内容 ### + +WhichNodeRun=main +DOCKER_VERSION=20.10.15 # docker的版本,一般不修改 +HarborHostName=192.168.0.8 # 需要确保此IP能够被内网服务器访问,一般为公网服务器的外网IP +HarborHostPort=8033 # harbor服务器的端口,一般不修改 +HarborAdminPass=V2ryStr@ngPss # harbor服务器管理员密码,一般不修改 +PublicServerIPs=(192.168.0.8) # 需要修改为能够访问公网的服务器IP, 内网IP地址 +PrivateServerIPs=(192.168.0.65 192.168.0.45 192.168.0.7) # 内网服务器的IP地址,不包括可以访问公网IP的服务器 +StorageServerIPs=(192.168.0.2) # 存储服务器的IP地址,默认在第一台上安装heketi,默认第一台作为NFS服务端 +NfsPath=/nfsdata # nfs路径,需要事先创建并进行文件夹赋权 +MinioStorageType=volume # 选项:volume 或者 pv Minio集群底层的存储方式,使用4PV还是使用 1PV-4Volume的形式 +OSSPublicURL=https://oss2.demo.uavcmlc.com:18000/wangdada/ # 可以不配置,会从DockerHub拉取所有镜像(3GB) + +### 需要修改以上的内容 ### +### 需要修改以上的内容 ### +### 需要修改以上的内容 ### + +CMD_INSTALL="" +CMD_UPDATE="" +CMD_REMOVE="" +SOFTWARE_UPDATED=0 +LinuxReleaseVersion="" +DockerRegisterDomain=${HarborHostName}:${HarborHostPort} +ProxyOfflineFile=${OSSPublicURL}"proxy-offline.tar.gz" +HarborOfflineFile=${OSSPublicURL}"harbor-offline-installer-v2.1.0.tgz" # 可以不配置,会从GitHub拉取Harbor离线安装包(550MB) +DockerComposeFile=${OSSPublicURL}"docker-compose-Linux-x86_64" +HelmOfflineFile=${OSSPublicURL}"helm-v3.4.0-linux-amd64.tar.gz" +NginxOfflineFile=${OSSPublicURL}"nginx-1.20.1-1.el7.ngx.x86_64.rpm" +HeketiOfficeFile=${OSSPublicURL}"heketi-v9.0.0.linux.amd64.tar.gz" +HeketiConfigOfflineFile=${OSSPublicURL}"heketi-config.tar.gz" + +RED="31m" ## 姨妈红 +GREEN="32m" ## 水鸭青 +YELLOW="33m" ## 鸭屎黄 +PURPLE="35m" ## 基佬紫 +BLUE="36m" ## 天依蓝 + +######## 颜色函数方法很精妙 ############ +######## 颜色函数方法很精妙 ############ +colorEcho() { + # shellcheck disable=SC2145 + echo -e "\033[${1}${@:2}\033[0m" 1>&2 +} + +check_root() { + if [[ $EUID != 0 ]]; then + colorEcho ${RED} "当前非root账号(或没有root权限),无法继续操作,请更换root账号!" + colorEcho ${YELLOW} "使用sudo -命令获取临时root权限(执行后可能会提示输入root密码)" + exit 1 + fi +} + +# 判断命令是否存在 +command_exists() { + command -v "$@" >/dev/null 2>&1 +} + +####### 获取系统版本及64位或32位信息 +check_sys() { + ## 判定Linux的发行版本 + if [ -f /etc/redhat-release ]; then + LinuxReleaseVersion="centos" + elif cat /etc/issue | grep -Eqi "debian"; then + LinuxReleaseVersion="debian" + elif cat /etc/issue | grep -Eqi "ubuntu"; then + LinuxReleaseVersion="ubuntu" + elif cat /etc/issue | grep -Eqi "centos|red hat|redhat"; then + LinuxReleaseVersion="centos" + elif cat /proc/version | grep -Eqi "debian"; then + LinuxReleaseVersion="debian" + elif cat /proc/version | grep -Eqi "ubuntu"; then + LinuxReleaseVersion="ubuntu" + elif cat /proc/version | grep -Eqi "centos|red hat|redhat"; then + LinuxReleaseVersion="centos" + else + LinuxReleaseVersion="" + fi + + # 判断系统的包管理工具 apt, yum, or zypper + getPackageManageTool() { + if [[ -n $(command -v apt-get) ]]; then + CMD_INSTALL="apt-get -y -qq install" + CMD_UPDATE="apt-get -qq update" + CMD_REMOVE="apt-get -y remove" + elif [[ -n $(command -v yum) ]]; then + CMD_INSTALL="yum -y -q install" + CMD_UPDATE="yum -q makecache" + CMD_REMOVE="yum -y remove" + elif [[ -n $(command -v zypper) ]]; then + CMD_INSTALL="zypper -y install" + CMD_UPDATE="zypper ref" + CMD_REMOVE="zypper -y remove" + else + return 1 + fi + return 0 + } + + # 检查系统包管理方式,更新包 + getPackageManageTool + if [[ $? -eq 1 ]]; then + colorEcho ${RED} "系统的包管理不是 APT or YUM, 请手动安装所需要的软件." + return 1 + fi + + ### 更新程序引索 + if [[ $SOFTWARE_UPDATED -eq 0 ]]; then + colorEcho ${BLUE} "正在更新软件包管理..." + $CMD_UPDATE + SOFTWARE_UPDATED=1 + fi + return 0 +} + +## 安装所需要的程序,及依赖程序 +installDemandSoftwares() { + for software in "$@"; do + ## 安装该软件 + if [[ -n $(command -v "${software}") ]]; then + colorEcho ${GREEN} "${software}已经安装了...跳过..." + echo "" + else + colorEcho ${BLUE} "正在安装 ${software}..." + $CMD_INSTALL "${software}" + ## 判断该软件是否安装成功 + if [[ $? -ne 0 ]]; then + colorEcho ${RED} "安装 ${software} 失败。" + colorEcho ${RED} "如果是重要软件,本脚本会自动终止!!" + colorEcho ${PURPLE} "一般软件,本脚本会忽略错误并继续运行,请之后手动安装该程序。" + return 1 + else + colorEcho ${GREEN} "已经成功安装 ${software}." + colorEcho ${GREEN} "-----------------------------------------------------------" + echo "" + fi + fi + done + return 0 +} + +shutdownFirewall() { + ## 关闭防火墙、SElinux、Swap + colorEcho ${PURPLE} "---------------------------------------------------------------------------------" + colorEcho ${BLUE} "开始关闭系统的防火墙…………" + systemctl stop firewalld + systemctl disable firewalld + echo "" + + if [ "${LinuxReleaseVersion}" = "centos" ]; then + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion}!!" + colorEcho ${GREEN} "--------------------------------------------------" + colorEcho ${BLUE} "开始关闭SELinux……" + setenforce 0 + sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config + colorEcho ${GREEN} " SELinux关闭完成 " + else + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion}!!" + colorEcho ${GREEN} "无需关闭SELinux,现在 跳过" + fi + echo "" +} + +disableSwap() { + colorEcho ${PURPLE} "---------------------------------------------------------------------------------" + colorEcho ${BLUE} "开始关闭系统的虚拟内存…………" + swapoff -a + colorEcho ${GREEN} " 关闭完成 " + echo "" + colorEcho ${BLUE} "正在备份系统的文件系统表……" + cp -f /etc/fstab /etc/fstab_bak + colorEcho ${GREEN} " 备份完成 " + echo "" + colorEcho ${BLUE} "正在修改文件系统表,去除虚拟内存的部分……" + cat /etc/fstab_bak | grep -v swap >/etc/fstab + colorEcho ${GREEN} " 修改完成 " + echo "" + colorEcho ${GREEN} "----------------------------------------------------------------------------------" + echo "" +} + +## 安装docker时,修改系统的配置文件 +modifySystemConfig_Docker() { + colorEcho ${PURPLE} "---------------------------------------------------------------------------------" + colorEcho ${BLUE} "开始修改系统内核参数…………" + ## 配置内核参数 + cat >/etc/sysctl.d/k8s.conf <>~/.ssh/authorized_keys + chmod 600 ~/.ssh/authorized_keys + colorEcho ${GREEN} "--------------------------------------------------------------" + colorEcho ${GREEN} "-----------本机配置完成!-------------" + echo "" + + # bashsupport disable=BP2001 + AllInnerServer=("${PrivateServerIPs[@]}" "${StorageServerIPs[@]}") + + for ip in "${AllInnerServer[@]}"; do + colorEcho ${BLUE} "请手动将如下的命令,以 root 权限在主机 ${ip} 上运行" + colorEcho ${BLUE} "请手动将如下的命令,以 root 权限在主机 ${ip} 上运行" + colorEcho ${BLUE} "请手动将如下的命令,以 root 权限在主机 ${ip} 上运行" + colorEcho ${BLUE} "-----------------------------------------------" + echo "" + echo "" + colorEcho ${RED} " 请以 root 角色 运行!!! " + colorEcho ${RED} " 请以 root 角色 运行!!! " + colorEcho ${RED} " 请以 root 角色 运行!!! " + echo "" + colorEcho ${YELLOW} 'sed -i -e "/PermitRootLogin no/ d" -e "$ a PermitRootLogin yes" /etc/ssh/sshd_config && systemctl restart sshd' + echo "" + colorEcho ${YELLOW} "ssh-keygen -t rsa -P \"\" -f ~/.ssh/id_rsa && echo \"$(cat ~/.ssh/id_rsa.pub)\" >> ~/.ssh/authorized_keys && echo \"\" && cat ~/.ssh/authorized_keys" + echo "" + echo "" + while true; do + colorEcho ${RED} "请确保您已经将上述的命令在主机${ip}上执行了!!" + read -r -p "请输入yes进行确认,脚本才可继续运行!!" input + case $input in + yes) + colorEcho ${GREEN} "您已确认在主机${ip}上添加了私有的ssh key!" + echo "" + break + ;; + *) + echo "" + colorEcho ${RED} "输入有误!!! 请输入 >> yes << 进行确认" + colorEcho ${RED} "请在主机${ip}上执行上述命令!!!" + colorEcho ${RED} "否则本脚本的功能会失效!!" + colorEcho ${RED} "-----------------------------------------------------" + echo "" + ;; + esac + done + + colorEcho ${GREEN} "----------------------------------------------------------" + done + echo "" +} + +downloadDocker() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "准备下载Docker的离线安装包…………" + colorEcho ${GREEN} "--------------------------------------------------------------" + colorEcho ${BLUE} "您选择安装的docker版本为:${DOCKER_VERSION}" + echo "" + + ## 清理docker环境 + colorEcho ${BLUE} "开始清理docker环境,卸载先前的相关安装内容!!" + $CMD_REMOVE docker docker-client docker-client-latest docker-ce-cli \ + docker-common docker-latest docker-latest-logrotate docker-logrotate docker-selinux docker-engine-selinux \ + docker-engine kubelet kubeadm kubectl + colorEcho ${GREEN} "----------------- docker环境清理完成 -----------------" + echo "" + + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion}!!" + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion}!!" + echo "" + if [ "$LinuxReleaseVersion" = "centos" ]; then + ## 安装docker的依赖 + colorEcho ${BLUE} "正在下载安装docker的所需要的依赖" + yum install -y -q --downloadonly --downloaddir=/tmp/docker-${DOCKER_VERSION}/depends yum-utils device-mapper-persistent-data lvm2 + colorEcho ${GREEN} " 下载完成 " + colorEcho ${GREEN} "查看已经下载的相关依赖安装包……" + ls /tmp/docker-${DOCKER_VERSION}/depends/ + colorEcho ${GREEN} "--------------------------------------------------------------" + echo "" + ## 添加docker的yum源 + colorEcho ${BLUE} "正在添加docker的yum源…………" + yum-config-manager --add-repo https://mirrors.ustc.edu.cn/docker-ce/linux/centos/docker-ce.repo + if [[ -f /etc/yum.repos.d/docker-ce.repo ]]; then + sed -i 's/download.docker.com/mirrors.ustc.edu.cn\/docker-ce/g' /etc/yum.repos.d/docker-ce.repo + colorEcho ${BLUE} "已成功添加中科大的docker-ce的yum源!" + echo "" + colorEcho ${BLUE} "可以安装的docker-ce的 $(echo ${DOCKER_VERSION} | cut -d"." -f1) 版本为:" + colorEcho ${GREEN} "--------------------------------------------------------------" + yum list docker-ce --showduplicates | grep $(echo ${DOCKER_VERSION} | cut -d"." -f1) | awk '{print$2}' | cut -d ":" -f2 | sort -n -t - -k 1.7 + colorEcho ${GREEN} "--------------------------------------------------------------" + echo "" + yum install -y -q --downloadonly --downloaddir=/tmp/docker-${DOCKER_VERSION} docker-ce-${DOCKER_VERSION} docker-ce-cli-${DOCKER_VERSION} + colorEcho ${GREEN} " 下载完成 " + echo "" + colorEcho ${GREEN} "查看已经下载的Docker安装包……" + ls /tmp/docker-${DOCKER_VERSION}/ + colorEcho ${GREEN} "--------------------------------------------------------------" + echo "" + else + colorEcho ${RED} "docker的yum源添加失败,请手动添加" + exit 1 + fi + else + colorEcho ${BLUE} "开始安装相关的Docker基础组件" + installDemandSoftwares apt-transport-https ca-certificates curl gnupg-agent software-properties-common + + colorEcho ${BLUE} "开始添加中科大的docker源的apt-key" + curl -fsSL https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu/gpg | sudo apt-key add - + + colorEcho ${BLUE} "开始添加中科大的docker源的apt源" + add-apt-repository \ + "deb [arch=amd64] https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu \ + $(lsb_release -cs) \ + stable" + + # 国外的情况 + # colorEcho ${BLUE} "开始添加中科大的docker源的apt-key" + # curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg + # + # colorEcho ${BLUE} "开始添加中科大的docker源的apt源" + # echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \ + # $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + + colorEcho ${BLUE} "正在执行更新操作!!" + apt-get update + + colorEcho ${BLUE} "查看特定版本的Docker镜像!" + colorEcho ${BLUE} "可以安装的docker-ce的 $(echo ${DOCKER_VERSION} | cut -d"." -f1) 版本为:" + colorEcho ${GREEN} "--------------------------------------------------------------" + apt-cache madison docker-ce | grep $(echo ${DOCKER_VERSION} | cut -d"." -f1) | awk '{print$3}' + + colorEcho ${BLUE} "开始下载 ${DOCKER_VERSION} 版本的离线安装包!" + dockerSpecific=$(apt-cache madison docker-ce | grep $(echo ${DOCKER_VERSION} | cut -d"." -f1) | awk '{print$3}' | grep ${DOCKER_VERSION}) + + # 需要获取其所依赖包的包 + colorEcho $BLUE "开始解析依赖!" + export DOCKER_VERSION=20.10.15 + export dockerSpecific=$(apt-cache madison docker-ce | grep $(echo ${DOCKER_VERSION} | cut -d"." -f1) | awk '{print$3}' | grep ${DOCKER_VERSION} | head -1) + + apt-get install "docker-ce=${dockerSpecific}" "docker-ce-cli=${dockerSpecific}" "containerd.io" "docker-compose-plugin" + + mkdir -p /tmp/docker-${DOCKER_VERSION} + cd /tmp/docker-${DOCKER_VERSION} + colorEcho $BLUE "开始下载所有的依赖!" + for staff in "${dockerStaff[@]}"; do + colorEcho ${BLUE} "开始下载 ${staff} 的依赖!" + apt download $(apt-rdepends ${staff} | grep -v "^ ") + colorEcho ${GREEN} "下载完成!" + done + + # apt-get download $libs + colorEcho ${GREEN} " 下载完成 " + echo "" + colorEcho ${GREEN} "查看已经下载的Docker安装包……" + ls /tmp/docker-${DOCKER_VERSION}/ + colorEcho ${GREEN} "--------------------------------------------------------------" + echo "" + fi + echo "" +} + +distributeDocker() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始分发Docker的离线安装包…………" + echo "" + + for ip in "${PrivateServerIPs[@]}"; do + colorEcho ${BLUE} "正在将Docker的离线安装包分发至主机 ${ip} 上……" + echo "yes + yes + " | scp -r /tmp/docker-${DOCKER_VERSION} root@${ip}:/tmp/docker-${DOCKER_VERSION} + if [[ $? -eq 0 ]]; then + colorEcho ${GREEN} "Docker离线安装包已经分发完成!" + colorEcho ${GREEN} "----------------------------------------------------------" + else + colorEcho ${RED} "ERROR:Docker离线安装包 没有正常分发!!" + colorEcho ${RED} "----------------------------------------------------------" + fi + done + echo "" +} + +# 能够联网的机器上 +downloadGlusterFSHeketi() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始下载存储服务器相关的组件…………" + + colorEcho ${BLUE} "正在安装 gluster 源!" + yum install centos-release-gluster -y + + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "glusterfs的源添加完成…………" + echo "" + GlusterFSVersion=$(yum list glusterfs | grep "gluster" | awk '{print$2}') + HeketiVersion=$(yum list heketi | grep "heketi" | awk '{print$2}') + + colorEcho ${BLUE} "开始下载 glusterFS的离线安装包!" + colorEcho ${BLUE} " glusterFS的版本为: ${GlusterFSVersion}!!" + colorEcho ${BLUE} " glusterFS的版本为: ${GlusterFSVersion}!!" + yum install -y -q --downloadonly --downloaddir=/tmp/storage/ glusterfs glusterfs-server glusterfs-fuse glusterfs-rdma glusterfs-geo-replication glusterfs-devel + colorEcho ${GREEN} " 下载完成 " + + colorEcho ${BLUE} "开始下载 heketi 的离线安装包!" + colorEcho ${BLUE} " heketi 的版本为 ${HeketiVersion}!!" + colorEcho ${BLUE} " heketi 的版本为 ${HeketiVersion}!!" + yum install -y -q --downloadonly --downloaddir=/tmp/storage/ hekeit heketi-client + colorEcho ${GREEN} " 下载完成 " + + colorEcho ${GREEN} "查看已经下载的相关依赖安装包……" + ls /tmp/storage/ + colorEcho ${GREEN} "--------------------------------------------------------------" + echo "" + else + colorEcho ${RED} "glusterfs的源添加失败!! 无法继续进行存储服务器的初始化" + colorEcho ${RED} "glusterfs的源添加失败!! 无法继续进行存储服务器的初始化" + colorEcho ${RED} "glusterfs的源添加失败!! 无法继续进行存储服务器的初始化" + return 1 + fi + +} + +distributeGlusterFSHeketiRPMs() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始分发GlusterFSHeketi的离线安装包…………" + echo "" + + for ip in "${StorageServerIPs[@]}"; do + colorEcho ${BLUE} "正在将GlusterFS-Heketi的离线安装包分发至主机 ${ip} 上……" + echo "yes + yes + " | scp -r /tmp/storage/ root@${ip}:/tmp/storage/ + if [[ $? -eq 0 ]]; then + colorEcho ${GREEN} "GlusterFS-Heketi 的离线安装包已经分发完成!" + colorEcho ${GREEN} "----------------------------------------------------------" + else + colorEcho ${RED} "ERROR:GlusterFS-Heketi 离线安装包 没有正常分发!!" + colorEcho ${RED} "----------------------------------------------------------" + fi + done + echo "" +} + +installGlusterFS() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始安装 存储服务 GlusterFS 的基础组件 的相关服务…………" + echo "" + + if [ -d /tmp/storage ]; then + ls /tmp/storage | grep -q gluster + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "检测到存储服务的离线安装包,开始执行安装过程!" + yum install -y -q /tmp/storage/*.rpm + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "GlusterFS安装完成,开始启动服务!" + systemctl start glusterd.service + colorEcho ${BLUE} "------------------------------------------------------------" + systemctl status glusterd.service + colorEcho ${BLUE} "------------------------------------------------------------" + systemctl enable glusterd.service + echo "" + echo "" + colorEcho ${BLUE} "开始配置 GlusterFS,连接对方主机!" + StorageNodeNum=$(echo ${WhichNodeRun} | cut -d"-" -f2) + if [ "${StorageNodeNum}" -eq "1" ]; then + colorEcho ${BLUE} "检测到当前主机为 Storage-1,请确保是否正确!" + colorEcho ${BLUE} "检测到当前主机为 Storage-1,请确保是否正确!" + colorEcho ${BLUE} "检测到当前主机为 Storage-1,请确保是否正确!" + gluster peer probe storage-2 + echo "" + colorEcho ${BLUE} "glusterFS的节点状态为:" + gluster peer status + elif [ "${StorageNodeNum}" -eq "2" ]; then + colorEcho ${BLUE} "检测到当前主机为 Storage-2,请确保是否正确!" + colorEcho ${BLUE} "检测到当前主机为 Storage-2,请确保是否正确!" + colorEcho ${BLUE} "检测到当前主机为 Storage-2,请确保是否正确!" + gluster peer probe storage-1 + echo "" + colorEcho ${BLUE} "GlusterFS的节点状态为:" + gluster peer status + fi + fi + else + colorEcho ${RED} "未检测到GlusterFS的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到GlusterFS的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到GlusterFS的基础安装包,程序将终止!!" + return 1 + fi + fi +} + +installHeketi() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始安装 存储服务 Heketi 的相关服务…………" + echo "" + ls /tmp/storage | grep heketi | grep -v heketi-client + if [ "$?" -ne "0" ]; then + colorEcho ${RED} "未检测到heketi的离线安装rpm包!!!" + colorEcho ${RED} "未检测到heketi的离线安装rpm包!!!" + echo "" + colorEcho ${BLUE} "开始进行heketi的离线下载,安装过程!!!!" + wget ${HeketiOfficeFile} -O heketi9-offline.tar.gz + tar -zvxf heketi9-offline.tar.gz + cd heketi + pwd + mv ./heketi /usr/bin/heketi + mv ./heketi-cli /usr/bin/heketi-cli + cd .. + pwd + echo "" + colorEcho ${BLUE} "请检查heketi的命令是否输出正常!!" + heketi --version + colorEcho ${BLUE} "----------------------------------------------------" + heketi-cli --version + echo "" + else + colorEcho ${BLUE} "检测到heketi-server的离线安装包!" + echo "" + colorEcho ${BLUE} "按照道理目前的 heketi已经安装!!" + colorEcho ${BLUE} "开始检测…………" + echo "" + fi + + if heketi --version &>/dev/null && heketi-cli --version &>/dev/null; then + colorEcho ${GREEN} "检查到heketi已经安装成功!! 开始进行相关的配置。" + echo "" + colorEcho ${BLUE} "开始为heketi-server添加系统用户 heketi!" + sudo groupadd --system heketi + sudo useradd -s /sbin/nologin --system -g heketi heketi + echo "" + colorEcho ${BLUE} "开始创建 heketi-server 的配置、工作、日志目录" + sudo mkdir -p /var/lib/heketi /etc/heketi /var/log/heketi + echo "" + colorEcho ${BLUE} "开始下载heketi的配置文件压缩包,heketi-config.tar.gz…" + wget "${HeketiConfigOfflineFile}" -O /etc/heketi/heketi-config.tar.gz + echo "" + + if [ -s /etc/heketi/heketi-config.tar.gz ]; then + colorEcho ${GREEN} "heketi的配置文件压缩包下载成功!!!" + tar -zxvf /etc/heketi/heketi-config.tar.gz + echo "" + fi + + colorEcho ${BLUE} "开始创建heketi的ssh key文件,使得heketi-server能够访问glusterfs的server" + echo "y + |" ssh-keygen -f /etc/heketi/heketi_key -t rsa -N '' + + chown heketi:heketi /etc/heketi/heketi_key* + + for ip in "${PrivateServerIPs[@]}"; do + colorEcho ${BLUE} "请手动将如下的命令,以 root 权限在主机 ${ip} 上运行" + colorEcho ${BLUE} "-----------------------------------------------" + echo "" + echo "" + colorEcho ${RED} " 请以 root 角色 运行!!! " + colorEcho ${RED} " 请以 root 角色 运行!!! " + colorEcho ${RED} " 请以 root 角色 运行!!! " + echo "" + colorEcho ${YELLOW} 'sed -i -e "/PermitRootLogin no/ d" -e "$ a PermitRootLogin yes" /etc/ssh/sshd_config && systemctl restart sshd' + echo "" + colorEcho ${YELLOW} "echo \"$(cat /etc/heketi/heketi_key.pub)\" >> ~/.ssh/authorized_keys && echo \"\" && cat ~/.ssh/authorized_keys" + echo "" + echo "" + while true; do + colorEcho ${RED} "请确保您已经将上述的命令在主机${ip}上执行了!!" + read -r -p "请输入yes进行确认,脚本才可继续运行!!" input + case $input in + yes) + colorEcho ${GREEN} "您已确认在主机${ip}上添加了私有的ssh key!" + echo "" + break + ;; + *) + echo "" + colorEcho ${RED} "输入有误!!! 请输入 >> yes << 进行确认" + colorEcho ${RED} "请在主机${ip}上执行上述命令!!!" + colorEcho ${RED} "否则本脚本的功能会失效!!" + colorEcho ${RED} "-----------------------------------------------------" + echo "" + ;; + esac + done + done + + colorEcho ${GREEN} "heketi的密钥已经分发完毕!!" + echo "" + corlorEcho ${BLUE} "--------------------------------------------------------------" + corlorEcho ${BLUE} "请根据主机实际的物理磁盘信息,修改相应的 device!" + corlorEcho ${BLUE} "请根据主机实际的物理磁盘信息,修改相应的 device!" + corlorEcho ${BLUE} "请根据主机实际的物理磁盘信息,修改相应的 device!" + corlorEcho ${BLUE} "--------------------------------------------------------------" + while true; do + colorEcho ${RED} "请确保您已经新开终端界面并修改了topology.json文件!!!" + read -r -p "请输入yes进行确认,脚本才可继续运行!!" input + case $input in + yes) + colorEcho ${GREEN} "您已确认新开终端界面并修改了topology.json文件!!!!" + echo "" + break + ;; + *) + echo "" + colorEcho ${RED} "输入有误!!! 请输入 >> yes << 进行确认" + colorEcho ${RED} "请确保您已经新开终端界面并修改了topology.json文件!!!" + colorEcho ${RED} "否则本脚本的功能会失效!!" + colorEcho ${RED} "-----------------------------------------------------" + echo "" + ;; + esac + done + + corlorEcho ${BLUE} "--------------------------------------------------------------" + colorEcho ${BLUE} "修改所有的heketi-server目录的权限" + chown -R heketi:heketi /var/lib/heketi /var/log/heketi /etc/heketi + echo "" + colorEcho ${BLUE} "重新加载Systemd并启动 Heketi service" + cp /etc/heketi/heketi.service /usr/lib/systemd/system/heketi.service + + sudo systemctl daemon-reload + sudo systemctl enable --now heketi + echo "" + + colorEcho ${BLUE} "输出heketi-server的状态!!" + systemctl status heketi -l + fi +} + +# 能够联网的机器上 +downloadNFS() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始下载NFS相关的组件…………" + + echo "" + # NFSVersion=$(yum list nfs-utils | grep "nfs" | awk '{print$2}') + + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion}!!" + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion}!!" + echo "" + if [ "$LinuxReleaseVersion" = "centos" ]; then + colorEcho ${BLUE} "开始下载 NFS的离线安装包!" + # colorEcho ${BLUE} " NFS的版本为: ${NFSVersion}!!" + yum install -y -q --downloadonly --downloaddir=/tmp/storage/nfs/ nfs-utils + colorEcho ${GREEN} " 下载完成 " + + colorEcho ${BLUE} "开始下载 NFS-rpcbind的离线安装包!" + # colorEcho ${BLUE} " NFS的版本为: ${NFSVersion}!!" + yum install -y -q --downloadonly --downloaddir=/tmp/storage/rpc/ rpcbind + colorEcho ${GREEN} " 下载完成 " + else + colorEcho ${BLUE} "开始下载 NFS的离线安装包!" + apt-cache madison nfs-common | awk '{print$3}' + # installDemandSoftwares nfs-kernel-server + mkdir -p /tmp/storage/nfs/ + mkdir -p /tmp/storage/rpc/ + + echo + colorEcho ${BLUE} "需要下载的依赖为 ${libs}" + + colorEcho ${BLUE} "开始下载 NFS的离线安装包!" + cd /tmp/storage/nfs + colorEcho $BLUE "开始解析依赖!" + + colorEcho ${BLUE} "开始下载 NFS-Client 的离线安装包!" + # ubuntu 20.04 使用如下的几项内容即可 + # apt-get download keyutils libnfsidmap2 libtirpc-common libtirpc3 nfs-common rpcbind + apt-get download $(apt-rdepends nfs-common | grep -v "^ ") + + colorEcho ${GREEN} " 下载完成 " + colorEcho ${BLUE} "开始下载 NFS-Server 的离线安装包!" + cd /tmp/storage/rpc + # ubuntu 20.04 使用如下的几项内容即可 + # apt-get download keyutils libnfsidmap2 libtirpc-common libtirpc3 nfs-common nfs-kernel-server rpcbind + apt-get download $(apt-rdepends nfs-kernel-server | grep -v "^ ") + colorEcho ${GREEN} " 下载完成 " + echo "" + fi + + colorEcho ${GREEN} "查看已经下载的相关依赖安装包……" + ls /tmp/storage/nfs/ + ls /tmp/storage/rpc/ + colorEcho ${GREEN} "--------------------------------------------------------------" + echo "" + +} + +distributeNFS() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始分发NFS的离线安装包…………" + echo "" + + for ip in "${StorageServerIPs[@]}"; do + colorEcho ${BLUE} "正在将NFS的离线安装包分发至主机 ${ip} 上……" + echo "yes + yes + " | scp -r /tmp/storage/ root@${ip}:/tmp/storage/ + if [[ $? -eq 0 ]]; then + colorEcho ${GREEN} "NFS的离线安装包已经分发完成!" + colorEcho ${GREEN} "----------------------------------------------------------" + else + colorEcho ${RED} "ERROR:NFS 离线安装包 没有正常分发!!" + colorEcho ${RED} "----------------------------------------------------------" + fi + done + echo "" +} + +installNFS() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始安装 存储服务 NFS 的基础组件 的相关服务…………" + echo "" + + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion}!!" + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion}!!" + echo "" + if [ "$LinuxReleaseVersion" = "centos" ]; then + if [ -d /tmp/storage/nfs/ ]; then + ls /tmp/storage/nfs/ | grep -q nfs-utils + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "检测到存储服务的离线安装包,开始执行安装过程!" + yum install -y -q /tmp/storage/nfs/*.rpm + echo "" + else + colorEcho ${RED} "未检测到NFS的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到NFS的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到NFS的基础安装包,程序将终止!!" + return 1 + fi + fi + else + ls /tmp/storage/nfs | grep -q "nfs-common" + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "检测到存储服务的离线安装包,开始执行安装过程!" + dpkg -i /tmp/storage/nfs/*.deb + echo "" + else + colorEcho ${RED} "未检测到NFS的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到NFS的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到NFS的基础安装包,程序将终止!!" + return 1 + fi + fi + + colorEcho ${GREEN} "NFS安装完成,开始启动服务!" + systemctl start nfs nfs-client nfs-common + systemctl enable nfs nfs-client nfs-common + colorEcho ${BLUE} "------------------------------------------------------------" + systemctl status nfs nfs-client nfs-common -l | grep "active (running)" + if [[ $? -eq 0 ]]; then + colorEcho ${GREEN} " NFS-Client 启动完成 " + fi + colorEcho ${BLUE} "------------------------------------------------------------" + echo "" + +} + +#nfs 服务端 +installNFSServer() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始安装 存储服务 NFS 的基础组件 的相关服务…………" + echo "" + + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion}!!" + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion}!!" + echo "" + if [ "$LinuxReleaseVersion" = "centos" ]; then + if [ -d /tmp/storage/nfs/ ]; then + ls /tmp/storage/nfs/ | grep -q nfs-utils + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "检测到存储服务的离线安装包,开始执行安装过程!" + yum install -y -q /tmp/storage/nfs/*.rpm + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "NFS安装完成,开始启动服务!" + systemctl enable nfs + systemctl start nfs + colorEcho ${BLUE} "------------------------------------------------------------" + systemctl status nfs -l | grep "active (running)" + if [[ $? -eq 0 ]]; then + colorEcho ${GREEN} " NFS-Client 启动完成 " + fi + colorEcho ${BLUE} "------------------------------------------------------------" + echo "" + fi + else + colorEcho ${RED} "未检测到NFS的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到NFS的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到NFS的基础安装包,程序将终止!!" + return 1 + fi + fi + if [ -d /tmp/storage/rpc/ ]; then + ls /tmp/storage/rpc/ | grep -q rpcbind + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "检测到存储服务的离线安装包,开始执行安装过程!" + yum install -y -q /tmp/storage/rpc/*.rpm + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "NFS-RPCBIND 安装完成,开始启动服务!" + systemctl start rpcbind + systemctl enable rpcbind + colorEcho ${BLUE} "------------------------------------------------------------" + systemctl status rpcbind -l | grep "active (running)" + if [[ $? -eq 0 ]]; then + colorEcho ${GREEN} " NFS-Server 启动完成 " + fi + colorEcho ${BLUE} "------------------------------------------------------------" + echo "" + + fi + else + colorEcho ${RED} "未检测到NFS-RPCBIND的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到NFS-RPCBIND的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到NFS-RPCBIND的基础安装包,程序将终止!!" + return 1 + fi + fi + else + colorEcho ${GREEN} "执行发行版为-- ${LinuxReleaseVersion} 的NFS-Server安装进程!" + if [ -d /tmp/storage/nfs/ ]; then + ls /tmp/storage/nfs/ | grep -q nfs-common + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "检测到存储服务的离线安装包,开始执行安装过程!" + dpkg -i /tmp/storage/nfs/*.deb + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "NFS安装完成,开始启动服务!" + systemctl start nfs + systemctl enable nfs + colorEcho ${BLUE} "------------------------------------------------------------" + systemctl status nfs -l | grep "active (running)" + if [[ $? -eq 0 ]]; then + colorEcho ${GREEN} " NFS-Client 启动完成 " + fi + colorEcho ${BLUE} "------------------------------------------------------------" + echo "" + fi + else + colorEcho ${RED} "未检测到NFS的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到NFS的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到NFS的基础安装包,程序将终止!!" + return 1 + fi + fi + if [ -d /tmp/storage/rpc/ ]; then + ls /tmp/storage/rpc/ | grep -q server + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "检测到存储服务的离线安装包,开始执行安装过程!" + dpkg -i /tmp/storage/rpc/*.deb + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "NFS-Server 安装完成,开始启动服务!" + systemctl start nfs-server + systemctl enable nfs-server + colorEcho ${BLUE} "------------------------------------------------------------" + systemctl status nfs-server -l | grep "active (running)" + if [[ $? -eq 0 ]]; then + colorEcho ${GREEN} " NFS-Server 启动完成 " + fi + colorEcho ${BLUE} "------------------------------------------------------------" + echo "" + + fi + else + colorEcho ${RED} "未检测到NFS-RPCBIND的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到NFS-RPCBIND的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到NFS-RPCBIND的基础安装包,程序将终止!!" + return 1 + fi + fi + fi + + echo "" + colorEcho ${BLUE} "开始创建NFS共享目录!" + + export NfsPath=/nfsdata + mkdir -p ${NfsPath} + chmod 777 ${NfsPath} + cat /etc/exports | grep ${NfsPath} -q + if [[ $? -ne 0 ]]; then + # exports文件中没有相关的额配置,才添加! + echo "nfs config not exist !" + echo "${NfsPath} *(rw,no_root_squash,no_all_squash,sync)" >>/etc/exports + fi + echo "" + + colorEcho ${BLUE} "开始重启nfs服务!" + + if [ "$LinuxReleaseVersion" = "centos" ]; then + systemctl restart rpcbind && systemctl restart nfs + else + systemctl restart nfs-server && systemctl restart nfs + fi + echo "" + echo "" + colorEcho ${BLUE} "检查NFS的运行状况:" + rpcinfo -p localhost + colorEcho ${YELLOW} "------------------------------------------------------------" + echo "" +} + +installProxyServer() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始安装网络代理(v2ray)的相关服务…………" + echo "" + colorEcho ${BLUE} "开始从西云的对象存储下载离线安装包…………" + colorEcho ${BLUE} "当前目录为 $(pwd),创建代理服务器的临时安装目录:" + mkdir proxy_offline_install && cd ./proxy_offline_install + wget "${ProxyOfflineFile}" -O proxy-offline.tar.gz + if [ ! -s proxy-offline.tar.gz ]; then + echo "" + colorEcho ${RED} "代理服务器安装包下载失败!!!" + colorEcho ${RED} "代理服务器安装包下载失败!!!" + colorEcho ${RED} "代理服务器安装包下载失败!!!" + return 1 + fi + colorEcho ${GREEN} " 下载完成 " + tar -zxvf proxy-offline.tar.gz + colorEcho ${GREEN} " 代理服务器离线安装包解压缩完成 " + pwd + chmod +x v2ray-install.sh + colorEcho ${BLUE} "开始离线安装 网络代理(v2ray)服务器……" + echo " + " | ./v2ray-install.sh --local v2ray-linux-64_v4.32.1.zip + echo "" + colorEcho ${GREEN} "网络代理(v2ray v4.32.1)服务器已经安装成功!" + colorEcho ${GREEN} "---------------------------------------------" + echo "" + colorEcho ${BLUE} "开始配置代理服务器的相关设置…………" + sed -i "s/User=nobody/User=root/g" /etc/systemd/system/v2ray.service + rm /usr/local/etc/v2ray/config.json + cp ./config.json /usr/local/etc/v2ray/config.json + colorEcho ${GREEN} " 配置完成 " + cd .. + pwd + echo "" + colorEcho ${BLUE} "正在开启代理服务器v2ray的服务程序……" + systemctl daemon-reload && systemctl start v2ray + colorEcho ${GREEN} " 服务启动配置完成 " + echo "" + colorEcho ${BLUE} "查看代理服务器v2ray的程序运行状态……" + systemctl status v2ray -l | grep "Active: active (running)" + if [ $? -ne 0 ]; then + echo "" + colorEcho ${RED} "代理服务器启动失败!!" + colorEcho ${RED} "代理服务器启动失败!!" + colorEcho ${RED} "代理服务器启动失败!!" + colorEcho ${RED} "代理服务器启动失败!!" + return 1 + fi + colorEcho ${BLUE} "正在设置v2ray的开机自启动……" + systemctl enable v2ray + colorEcho ${GREEN} " 开机自启动配置完成 " + colorEcho ${GREEN} "--------------------------------------------------------------" + echo "" + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "------------下面是代理服务器的使用方法说明------------" + colorEcho ${BLUE} "------------下面是代理服务器的使用方法说明------------" + colorEcho ${YELLOW} "------------------------------------------------------------" + colorEcho ${BLUE} "请确保当前主机能够访问公网!!!!" + colorEcho ${BLUE} "在需要使用代理上网的服务器上输入如下的命令:" + echo "" + echo "" + colorEcho ${YELLOW} " export http_proxy=http://${PublicServerIPs}:12333 && export https_proxy=http://${PublicServerIPs}:12333 " + echo "" + echo "" + colorEcho ${YELLOW} "------------------------------------------------------------" + colorEcho ${BLUE} "------------上面是代理服务器的使用方法说明------------" + colorEcho ${BLUE} "------------上面是代理服务器的使用方法说明------------" + echo "" + echo "" +} + +modifySystemNetworkProxy() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始配置系统网络代理…………" + colorEcho ${GREEN} "--------------------------------------------------------------" + echo "" + + addYumProxy + + addShellProxy + + # addDockerProxy +} + +addYumProxy() { + colorEcho ${BLUE} "开始配置yum包管理工具的网络代理…………" + + cat /etc/yum.conf | grep "http_proxy="http://"${PublicServerIPs}":12333"" + + if [ $? -eq 0 ]; then + colorEcho ${BLUE} "检测到yum的代理已经添加,跳过!" + return 1 + fi + sed -i "$ a http_proxy="http://"${PublicServerIPs}":12333"" /etc/yum.conf + sed -i "$ a https_proxy="http://"${PublicServerIPs}":12333"" /etc/yum.conf + + if [ $? -eq 0 ]; then + colorEcho ${GREEN} "yum源代理配置修改完成! 目前yum命令可以通过master 节点代理上网" + echo "" + fi +} + +addShellProxy() { + colorEcho ${BLUE} "开始配置shell终端的网络代理…………" + + export http_proxy=http://${PublicServerIPs}:12333 && export https_proxy=http://${PublicServerIPs}:12333 + + cat /etc/profile | grep "http_proxy="http://"${PublicServerIPs}":12333"" + + if [ $? -eq 0 ]; then + colorEcho ${BLUE} "检测到shell的代理已经添加,跳过!" + return 1 + fi + + sed -i "$ a export http_proxy="http://"${PublicServerIPs}":12333"" /etc/profile + sed -i "$ a export https_proxy="http://"${PublicServerIPs}":12333"" /etc/profile + + if [ -a ~/.bashrc ]; then + colorEcho ${BLUE} "检测到bash shell存在,开始配置其代理。。" + cat ~/.bashrc | grep "http_proxy="http://"${PublicServerIPs}":12333"" + + if [ $? -eq 0 ]; then + colorEcho ${BLUE} "检测到shell的代理已经添加,跳过!" + return 1 + fi + sed -i "$ a export http_proxy="http://"${PublicServerIPs}":12333"" ~/.bashrc + sed -i "$ a export https_proxy="http://"${PublicServerIPs}":12333"" ~/.bashrc + fi + + if [ -a ~/.profile ]; then + colorEcho ${BLUE} "检测到~/.profile存在,开始配置其代理。。" + cat ~/.profile | grep "http_proxy="http://"${PublicServerIPs}":12333"" + + if [ $? -eq 0 ]; then + colorEcho ${BLUE} "检测到shell的代理已经添加,跳过!" + return 1 + fi + sed -i "$ a export http_proxy="http://"${PublicServerIPs}":12333"" ~/.profile + sed -i "$ a export https_proxy="http://"${PublicServerIPs}":12333"" ~/.profile + fi + + if [ -a ~/.zshrc ]; then + colorEcho ${BLUE} "检测到zsh shell存在,开始配置其代理。。" + cat ~/.zshrc | grep "http_proxy="http://"${PublicServerIPs}":12333"" + + if [ $? -eq 0 ]; then + colorEcho ${BLUE} "检测到shell的代理已经添加,跳过!" + return 1 + fi + sed -i "$ a export http_proxy="http://"${PublicServerIPs}":12333"" ~/.zshrc + sed -i "$ a export https_proxy="http://"${PublicServerIPs}":12333"" ~/.zshrc + fi + + if [ $? -eq 0 ]; then + colorEcho ${GREEN} "shell的代理配置修改完成! 目前curl wget等命令可以通过master节点代理上网" + echo "" + fi +} + +installDocker() { + ### 国内的环境 ### + ### 依赖colorEcho + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始安装Docker的相关服务…………" + colorEcho ${GREEN} "--------------------------------------------------------------" + colorEcho ${BLUE} "您选择安装的docker版本为:${DOCKER_VERSION}" + echo "" + + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion}!!" + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion}!!" + echo "" + if [ "$LinuxReleaseVersion" = "centos" ]; then + colorEcho ${BLUE} "正在安装安装docker的所需要的依赖…………" + colorEcho ${YELLOW} "----------------------------------------------------------------------" + ls /tmp/docker-${DOCKER_VERSION}/depends/ + colorEcho ${YELLOW} "----------------------------------------------------------------------" + echo "" + yum install -y -q /tmp/docker-${DOCKER_VERSION}/depends/*.rpm + colorEcho ${GREEN} " 安装完成 " + colorEcho ${GREEN} "--------------------------------------------------------------" + echo "" + + colorEcho ${GREEN} "开始安装docker-ce,版本为${DOCKER_VERSION}" + colorEcho ${YELLOW} "----------------------------------------------------------------------" + ls /tmp/docker-${DOCKER_VERSION}/ + colorEcho ${YELLOW} "----------------------------------------------------------------------" + echo "" + yum install -y -q /tmp/docker-${DOCKER_VERSION}/*.rpm + else + + colorEcho ${GREEN} "开始安装docker-ce,版本为${DOCKER_VERSION}" + colorEcho ${YELLOW} "----------------------------------------------------------------------" + ls /tmp/docker-${DOCKER_VERSION}/ + colorEcho ${YELLOW} "----------------------------------------------------------------------" + echo "" + dpkg -i /tmp/docker-${DOCKER_VERSION}/*.deb + echo "" + fi + + colorEcho ${GREEN} " Docker安装完成 " + colorEcho ${GREEN} " Docker安装完成 " + colorEcho ${GREEN} " Docker安装完成 " + colorEcho ${GREEN} "--------------------------------------------------------------" + echo "" + + colorEcho ${BLUE} "正在启动docker的服务进程…………" + systemctl enable docker.service + systemctl start docker.service + colorEcho ${BLUE} "等待docker的服务进程启动…………" + sleep 3 + colorEcho ${BLUE} "查看docker的服务进程运行状态…………" + + systemctl status docker.service -l | grep "active (running)" + if [[ $? -eq 0 ]]; then + colorEcho ${GREEN} " Docker 启动完成 " + fi + echo "" +} +## 安装docker时,修改系统的配置文件 +modifySystemConfig() { + colorEcho ${PURPLE} "---------------------------------------------------------------------------------" + colorEcho ${BLUE} "开始修改系统内核参数…………" + ## 配置内核参数 + cat >/etc/sysctl.d/k8s.conf </dev/null + if [[ $? -eq 0 ]]; then + colorEcho ${GREEN} "docker-compose安装成功!!版本为$(docker-compose --version | cut -d" " -f3)尽情享用" + else + ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose + fi + else + colorEcho ${RED} "docker-compose文件下载失败!! 无法访问github的资源。。" + colorEcho ${RED} "请手动下载docker-compose的安装文件!" + fi +} + +downloadKubectlMinio() { + echo "" + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} " 本部分应该在master节点上执行!………" + colorEcho ${BLUE} " 开始下载minio集群的安装初始化工具 ………" + + sudo wget https://github.com/minio/operator/releases/download/v4.4.13/kubectl-minio_4.4.13_linux_amd64 \ + -O /usr/bin/kubectl-minio + + sudo chmod +x /usr/bin/kubectl-minio + + colorEcho ${BLUE} "请确保在需要安装 minio的服务器上创建好了目录!" + colorEcho ${BLUE} "请确保在需要安装 minio的服务器上创建好了目录!" + + # 2. 初始化 minio 部署工具 + kubectl minio init + +} + +buildDirectoryForMinio() { + echo "" + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} " 本部分应该在 安装minio集群的 节点上执行!………" + + colorEcho ${BLUE} "开始 为MINIO服务器创建目录…………" + + while true; do + colorEcho ${BLUE} "运行到此处,说明,您选择了一个pv挂载4目录的形式!" + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${RED} "请确保您已经 手动初始化磁盘pv,并将其挂在至 /data 目录!" + read -r -p "请输入yes进行确认,脚本才可继续运行!!" input + case $input in + yes) + + colorEcho ${GREEN} "您已确认 手动初始化磁盘pv,并将其挂在至 /data 目录!" + echo "" + break + ;; + *) + echo "" + colorEcho ${RED} "输入有误!!! 请输入 >> yes << 进行确认" + colorEcho ${RED} "请确保您已经 手动初始化磁盘pv,并将其挂在至 /data 目录!" + colorEcho ${RED} "否则本脚本的功能会失效!!" + colorEcho ${RED} "-----------------------------------------------------" + echo "" + ;; + esac + done + + # 向下兼容 适应8pod或者4pod的情况 + for i in {1..8}; do + mkdir -p /data/minio-pv/pv${i} + if [ -d "/data/minio-pv/pv${i}" ]; then + echo "yes" + else + return 1 + fi + echo "" + done + + colorEcho ${GREEN} "Minio的目录均已创建完成!!" + +} + +buildPVForMinio() { + echo "" + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始 为MINIO服务器 初始化磁盘pv,创建整PV的存储…………" + + echo "此部分功能暂时掠过!" +} + +installZSH() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始安装宇宙第一shell的相关服务…………" + installDemandSoftwares zsh git || return $? + # 脚本会自动更换默认的shell + # echo y | sh -c "$(curl -fsSL https://cdn.jsdelivr.net/gh/robbyrussell/oh-my-zsh@master/tools/install.sh)" + wget https://cdn.jsdelivr.net/gh/robbyrussell/oh-my-zsh@master/tools/install.sh -O zsh-install.sh + # sed -i "s/github.com/github.com.cnpmjs.org/g" zsh-install.sh + # if [ $? -eq 0 ]; then + # colorEcho ${GREEN} "zsh仓库地址替换完成,已更换为国内的下载加速镜像" + # fi + chmod +x zsh-install.sh + colorEcho ${BLUE} "开始执行zsh的安装过程!!" + echo y | sh -c "./zsh-install.sh" + if [ "$(ls -A /root/.oh-my-zsh | wc -w)" -eq "0" ]; then + echo "" + colorEcho ${RED} "zsh下载失败!!跳过安装步骤!" + echo "" + return 1 + fi + echo "" +} + +modifyZSH() { + colorEcho ${PURPLE} "---------------------------------------------------------------------------" + colorEcho ${GREEN} "zsh应该已经安装成功!!!" + colorEcho ${BLUE} "开始修改zsh的相关配置信息,使其更加好用…………" + echo "" + cat >~/oh-my-zsh-plugins-list.txt <自动提示< 插件…………" + git clone https://github.com.cnpmjs.org/zsh-users/zsh-autosuggestions ~/.oh-my-zsh/plugins/zsh-autosuggestions + echo "" + colorEcho ${BLUE} "开始从GitHub下载 >命令高亮< 插件…………" + git clone https://github.com.cnpmjs.org/zsh-users/zsh-syntax-highlighting.git ~/.oh-my-zsh/plugins/zsh-syntax-highlighting + + echo "" + colorEcho ${BLUE} "开始从JSDeliver下载另外一些插件…………" + wget -c -i ~/oh-my-zsh-plugins-list.txt -P ~/.oh-my-zsh/plugins/ + echo "" + colorEcho ${PURPLE} "---------------------------------------------------------------------------" + colorEcho ${GREEN} "插件已经下载完毕,现在开始修改zsh的配置文件…………" + echo "" + + sed -i "s/robbyrussell/agnoster/g" ~/.zshrc + sed -i 's/^# DISABLE_AUTO_UPDATE="true"/DISABLE_AUTO_UPDATE="true"/g' ~/.zshrc + sed -i 's/plugins=(git)/plugins=(git zsh-autosuggestions zsh-syntax-highlighting command-not-found z themes)/g' ~/.zshrc + colorEcho ${GREEN} "请检查当前zsh的插件开启情况:" + colorEcho ${GREEN} "------------------------------------------" + cat ~/.zshrc | grep "plugins=" | grep -v "\#" + cat ~/.zshrc | grep "plugins=" | grep -v "\#" + cat ~/.zshrc | grep "plugins=" | grep -v "\#" + colorEcho ${GREEN} "------------------------------------------" + + echo "" + echo "----------------------------------------------------" + echo "这里的错误输出无需在意" + source /root/.zshrc + echo "这里的错误输出无需在意" + echo "----------------------------------------------------" + + if [[ $? -eq 0 ]]; then + colorEcho ${BLUE} "开始修改默认shell为zsh……" + for i in {6..1..-1}; do + colorEcho ${BLUE} "倒计时开始 ->> $i 秒 <<-,准备切换shell,上文的日志输出将会消失!!" + sleep 2 + done + chsh -s /bin/zsh + zsh + else + colorEcho ${RED} "zsh 安装失败,大概率是已经安装!!小概率是无法连接GitHub服务器~~" + fi + + colorEcho ${GREEN} "zsh 安装成功,已更换主题,禁止更新,尽情享用~~~" + colorEcho ${GREEN} "-----------------------------------------------------------------------------" + colorEcho ${PURPLE} "宇宙第一shell的zsh已经安装成功了!!!" + colorEcho ${GREEN} "宇宙第一shell的zsh已经安装成功了!!!" + colorEcho ${BLUE} "宇宙第一shell的zsh已经安装成功了!!!" + colorEcho ${GREEN} "-----------------------------------------------------------------------------" + echo "" +} +# 修改docker的国内加速镜像 +changeDockerRegisterMirror() { + colorEcho ${PURPLE} "---------------------------------------------------------------------------------" + colorEcho ${BLUE} "开始配置docker的(加速+私有)镜像…………" + echo "" + if [[ -e /etc/docker/daemon.json ]]; then + colorEcho ${BLUE} "已经存在docker的daemon文件。。" + mv /etc/docker/daemon.json /etc/docker/daemon.backup.json + colorEcho ${GREEN} "已经将daemon文件备份" + fi + colorEcho ${BLUE} "正在写入docker的daemon配置文件……" + cat >>/etc/docker/daemon.json <>/etc/systemd/system/docker.service.d/http-proxy.conf <>~/.docker/config.json </dev/null + + colorEcho ${BLUE} "开始添加Nginx的apt源!" + echo "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \ +http://nginx.org/packages/ubuntu $(lsb_release -cs) nginx" \ + | sudo tee /etc/apt/sources.list.d/nginx.list + + colorEcho ${BLUE} "开始更新apt源" + sudo apt update + echo "" + + colorEcho ${BLUE} "查看所有可以安装的nginx版本" + apt-cache madison nginx | awk '{print$3}' + echo "" + echo "" + colorEcho ${BLUE} "开始安装最新版本的nginx" + sudo apt install "nginx=$(apt-cache madison nginx | awk '{print$3}' | head -1)" + + fi + + systemctl status nginx + + systemctl start nginx + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "nginx安装完成!已成功运行!" + fi + +} + +modifyNginx() { + colorEcho ${PURPLE} "---------------------------------------------------------------------------------" + colorEcho ${BLUE} "开始安装Nginx作为对面暴露面…………" + echo "" + colorEcho ${BLUE} "开始配置nginx的总配置文件!!" + cat >/etc/nginx/nginx.conf </etc/nginx/conf.d/real-public-nginx.conf </dev/null + if [[ $? -ne 0 ]]; then + colorEcho ${RED} "检测到docker-compose未安装!! Harbor服务器的安装过程将中断!!" + colorEcho ${RED} "检测到docker-compose未安装!! Harbor服务器的安装过程将中断!!" + colorEcho ${RED} "检测到docker-compose未安装!! Harbor服务器的安装过程将中断!!" + return 1 + fi + echo "" + if [[ $OSSPublicURL == "" ]]; then + colorEcho ${BLUE} "未指定harbor镜像仓库的离线安装包下载地址!!" + colorEcho ${BLUE} "开始从GitHub下载 harbor的离线安装包!!" + echo "" + wget --no-check-certificate https://github.com/goharbor/harbor/releases/download/v2.1.0/harbor-offline-installer-v2.1.0.tgz + else + colorEcho ${BLUE} "已经指定harbor镜像仓库的离线安装包下载地址!!" + wget --no-check-certificate "${HarborOfflineFile}" -O harbor-offline-installer-v2.1.0.tgz + fi + if [ ! -s harbor-offline-installer-v2.1.0.tgz ]; then + colorEcho ${RED} "harbor离线安装包下载失败! 跳过Harbor安装过程!" + return 1 + fi + colorEcho ${GREEN} "---------------离线安装包下载完成!!----------------" + echo "" + colorEcho ${BLUE} "开始解压缩harbor的离线安装包!!" + tar xvf harbor-offline-installer-v2.1.0.tgz + colorEcho ${GREEN} "---------------解压缩完成!!---------------" + echo "" + colorEcho ${BLUE} "开始配置harbor仓库的相关设置!!" + rm ./harbor/harbor.yml + cat >>./harbor/harbor.yml < ${RKESystemImages} <上下载RKE系统镜像!!" + echo "" + + fi + +} + +#downloadChrony(){ +# colorEcho ${PURPLE} "--------------------------------------------------------------" +# colorEcho ${BLUE} "准备下载 Chrony 的离线安装包…………" +# colorEcho ${GREEN} "--------------------------------------------------------------" +# echo "" +# +# +# mkdir /tmp/chrony +# cd /tmp/chrony +# +# command_exists apt-rdepends +# if [ "$?" -eq "0" ]; then +# let staff=chrony +# colorEcho ${BLUE} "开始下载 ${staff} 的依赖!" +# apt download $(apt-rdepends ${staff} | grep -v "^ ") +# colorEcho ${GREEN} "下载完成!" +# else +# colorEcho ${RED} "依赖检测工具不存在!" +# apt-get download libnss-systemd libpam-systemd libsystemd0 systemd systemd-sysv chrony +# fi +# +#} + +# 使用chrony进行NTP时间同步 +TimeSyncToNTPByChrony() { + colorEcho ${PURPLE} "---------------------------------------------------------------------------------" + colorEcho ${BLUE} "开始使用 chrony 工具进行时间同步…………" + colorEcho ${GREEN} "----------------------------------------------------------------------------------" + echo "" + colorEcho ${BLUE} "开始安装chrony工具……" + installDemandSoftwares chrony || return $? + colorEcho ${GREEN} " 安装完成 " + # 这里使用的是 默认的NTP源,又不是不能用,为啥要换啊。 + colorEcho ${BLUE} "开始启动并开启开机启动chrony……" + systemctl enable chronyd && systemctl start chronyd + colorEcho ${BLUE} "开始配置chrony……" + + suffixIP=$(echo ${PublicServerIPs[0]} | cut -d "." -f1-2) + + internalCIDR=$(echo "${suffixIP}.0.0/16") + + if [[ ${LinuxReleaseVersion} == "centos" ]]; then + local chronyFile=/etc/chrony.conf + else + local chronyFile=/etc/chrony/chrony.conf + fi + + # sed -i "/^#allow 192.168.0.0\/16/ a allow ${internalCIDR}" ${chronyFile} + sed -i "$ a allow ${internalCIDR}" ${chronyFile} + + sed -i "s/server 0.centos.pool.ntp.org iburst/server ntp2.aliyun.com iburst/g" ${chronyFile} + + colorEcho ${BLUE} "开始重启chrony server服务!" + systemctl restart chronyd + echo "" + + systemctl status chronyd -l | grep "active (running)" -q + if [[ $? -eq 0 ]]; then + chronyc -n sources -v + chronyc tracking + + colorEcho ${GREEN} "时间同步配置完成,已与阿里云进行时间同步!!" + colorEcho ${GREEN} "NTP同步时间完成。现在时间为:" + colorEcho ${GREEN} "--------------------------------------------------" + colorEcho ${PURPLE} "$(date -R)" + colorEcho ${GREEN} "--------------------------------------------------" + else + colorEcho ${RED} "时间同步服务器启动失败!!" + colorEcho ${RED} "时间同步服务器启动失败!!" + colorEcho ${RED} "时间同步服务器启动失败!!" + return 1 + fi + + changeTimeZoneAndNTP +} + +modifyChronySyncToMaster() { + colorEcho ${PURPLE} "---------------------------------------------------------------------------------" + colorEcho ${BLUE} "开始配置 chrony 时间同步至master节点…………" + colorEcho ${GREEN} "----------------------------------------------------------------------------------" + echo "" + colorEcho ${BLUE} "开始安装chrony工具……" + installDemandSoftwares chrony || return $? + colorEcho ${GREEN} " 安装完成 " + colorEcho ${BLUE} "开始启动并开启开机启动chrony……" + systemctl enable chronyd && systemctl start chronyd + colorEcho ${BLUE} "开始配置chrony……" + + sed -i "s/server 0.centos.pool.ntp.org iburst/server ${PublicServerIPs} minpoll 4 maxpoll 10 iburst/g" /etc/chrony.conf + systemctl restart chronyd + + systemctl status chronyd -l | grep "active (running)" + if [[ $? -eq 0 ]]; then + chronyc -n sources -v + chronyc tracking + + colorEcho ${GREEN} "时间同步配置完成,已与Master节点 ${PublicServerIPs} 进行时间同步!!" + colorEcho ${GREEN} "NTP同步时间完成。现在时间为:" + colorEcho ${GREEN} "--------------------------------------------------" + colorEcho ${PURPLE} "$(date -R)" + colorEcho ${GREEN} "--------------------------------------------------" + else + colorEcho ${RED} "时间同步服务器启动失败!!" + colorEcho ${RED} "时间同步服务器启动失败!!" + colorEcho ${RED} "时间同步服务器启动失败!!" + return 1 + fi + + changeTimeZoneAndNTP +} + +modifyTimeSyncdToMasterUbuntu() { + colorEcho ${PURPLE} "---------------------------------------------------------------------------------" + colorEcho ${BLUE} "开始配置 timesyncd 时间同步至master节点…………" + colorEcho ${GREEN} "----------------------------------------------------------------------------------" + echo "" + + colorEcho ${BLUE} "开始修改配置文件,时间同步到 Master节点!" + + sed -i "$ a NTP=${PublicServerIPs}" /etc/systemd/timesyncd.conf + systemctl daemon-reload + + systemctl restart systemd-timesyncd.service + systemctl status systemd-timesyncd.service -l | grep "active (running)" + if [[ $? -eq 0 ]]; then + colorEcho $GREEN "时间同步客户端正在正常运行!" + colorEcho ${YELLOW} "------------------------------------------------" + timedatectl show-timesync --all + echo "" + colorEcho ${YELLOW} "------------------------------------------------" + timedatectl status + echo "" + colorEcho ${YELLOW} "------------------------------------------------" + + else + colorEcho ${RED} "时间同步服务器安装失败! 请检查原因" + return 23 + fi +} + +changeTimeZoneAndNTP() { + colorEcho ${PURPLE} "---------------------------------------------------------------------------------" + colorEcho ${BLUE} "开始使用 timedatectl 工具进行时间同步…………" + colorEcho ${GREEN} "----------------------------------------------------------------------------------" + echo "" + if [[ -n $(command -v timedatectl) ]]; then + colorEcho ${BLUE} "检测到工具存在,正在设置时间和时区为 上海(UTC+8)时间" + timedatectl set-timezone Asia/Shanghai && timedatectl set-ntp true + colorEcho ${GREEN} "同步时间完成。现在时间为:" + colorEcho ${GREEN} "--------------------------------------------------" + colorEcho ${PURPLE} "$(date -R)" + colorEcho ${GREEN} "--------------------------------------------------" + colorEcho ${BLUE} "开始重启系统日志服务,使得系统日志的时间戳也立即生效" + systemctl restart rsyslog + colorEcho ${GREEN} " 重启完成 " + else + colorEcho ${RED} "timedatectl 工具不存在,时间同步失败!! 请手动更换时间!" + fi + colorEcho ${GREEN} "----------------------------------------------------------------------------------" + echo "" +} + +## 为了本脚本能够满足Ubuntu系统,做出设当的更改 +commonToolInstall() { + colorEcho ${PURPLE} "---------------------------------------------------------------------------------" + colorEcho ${BLUE} "开始进行Linux常用工具的安装过程…………" + colorEcho ${GREEN} "--------------------------------------------------" + echo "" + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion} !!" + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion} !!" + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion} !!" + echo "" + if [[ ${LinuxReleaseVersion} == "centos" ]]; then + centosCommonTool=(deltarpm net-tools iputils bind-utils lsof curl wget vim mtr htop screen unzip git openssl iftop) + installDemandSoftwares "${centosCommonTool[@]}" + elif [[ ${LinuxReleaseVersion} == "ubuntu" ]] || [[ ${LinuxReleaseVersion} == "debian" ]]; then + ubuntuCommonTool=(iputils-ping net-tools dnsutils lsof curl wget mtr-tiny vim htop screen git apt-rdepends nethogs iftop) + installDemandSoftwares "${ubuntuCommonTool[@]}" + fi +} + +main() { + + installHarbor || return $? + + installNginx + modifyNginx + + installZSH || return $? + modifyZSH || return $? + +} + +HarborServer() { + # 当harbor位于k8s-master节点相同时 + check_root + check_sys + + disableSwap + + shutdownFirewall + modifySystemConfig + commonToolInstall + installHelm + + TimeSyncToNTPByChrony || return $? + changeTimeZoneAndNTP || return $? + + # installProxyServer || return $? + + generateSSHKey || return $? + + downloadDocker || return $? + distributeDocker || return $? + + installDocker || return $? + + installDockerCompose || return $? + + downloadNFS || return $? + distributeNFS || return $? + installNFSServer || return $? + + installHarbor || return $? + + installNginx + modifyNginx + + installZSH || return $? + modifyZSH || return $? +} + +WorkerServer() { + # check_root + # + # check_sys + # disableSwap + # shutdownFirewall + # modifySystemConfig + # + # modifyTimeSyncdToMasterUbuntu + ## changeTimeZoneAndNTP || return $? + # installDocker || return $? + changeDockerRegisterMirror + +} + +MinioServer() { + check_root + addYumProxy + addShellProxy + check_sys + disableSwap + shutdownFirewall + modifySystemConfig + + # changeTimeZoneAndNTP || return $? + modifyChronySyncToMaster + installDocker || return $? + changeDockerRegisterMirror + addDockerProxy + + if [[ ${MinioStorageType} -eq "pv" ]]; then + buildPVForMinio + else + buildDirectoryForMinio + fi + +} + +HeketiServer() { + check_root + addYumProxy + addShellProxy + check_sys + disableSwap + shutdownFirewall + modifySystemConfig + modifyChronySyncToMaster || return $? + + installGlusterFS || return $? + installHeketi || return $? +} +GlusterServer() { + check_root + addYumProxy + addShellProxy + check_sys + disableSwap + shutdownFirewall + modifySystemConfig + modifyChronySyncToMaster || return $? + + installGlusterFS || return $? +} +${WhichNodeRun} From 2b27a5b5fd18e058bd32d452d6f0a56f1368a296 Mon Sep 17 00:00:00 2001 From: IceDerce Date: Wed, 21 Jun 2023 11:36:27 +0800 Subject: [PATCH 37/45] [Excution] - accomplish base Function - 1 --- agent-go/executor/BaseFunction.go | 419 +++++++++++++++++-- agent-go/executor/BaseFunction_test.go | 7 +- agent-go/executor/script/1_node_important.sh | 2 +- 3 files changed, 394 insertions(+), 34 deletions(-) diff --git a/agent-go/executor/BaseFunction.go b/agent-go/executor/BaseFunction.go index 1737231..a8d8250 100644 --- a/agent-go/executor/BaseFunction.go +++ b/agent-go/executor/BaseFunction.go @@ -1,6 +1,9 @@ package executor -import "strings" +import ( + "agent-go/register" + "strings" +) type BaseFunc interface { Exec(baseFuncName string, funcArgs ...string) string @@ -15,11 +18,13 @@ type AgentOsOperator struct { IsOsTypeUbuntu bool `json:"is_os_type_ubuntu",comment:"主机操作系统是否为ubuntu系列"` - IsAgentInnerWall bool `json:"is_agent_inner_wall", comment:"主机是否身处国内"` + IsAgentInnerWall bool `json:"is_agent_inner_wall",comment:"主机是否身处国内"` AgentArch string `json:"agent_arch",comment:"主机的CPU架构,可选为amd64 arm64"` AgentOSReleaseCode string `json:"agent_os_release_code",comment:"主机操作系统的发行版代号, focal之类的"` + + AgentServerInfo register.AgentServerInfo `json:"agent_server_info"` } // Exec 执行基础功能函数 @@ -51,7 +56,7 @@ func (op *AgentOsOperator) Exec(baseFuncName string, funcArgs ...string) string multiLineCommand = op.installDockerCompose() break case "modifyDockerConfig": - multiLineCommand = op.modifyDockerConfig() + multiLineCommand = op.modifyDockerConfig(funcArgs) break case "installHelm": multiLineCommand = op.installHelm() @@ -161,20 +166,22 @@ func (op *AgentOsOperator) disableSwap() [][]string { func (op *AgentOsOperator) removeDocker() [][]string { removeDockerLine := append(op.RemoveCommandPrefix, []string{ - "docker", - "docker-client", - "docker-client-latest", + "docker-ce", + "docker.io", "docker-ce-cli", - "docker-common", - "docker-latest", - "docker-latest-logrotate", - "docker-logrotate", - "docker-selinux", - "docker-engine-selinux", - "docker-engine", - "kubelet", - "kubeadm", - "kubectl", + //"docker", + //"docker-common", + //"docker-latest", + //"docker-latest-logrotate", + //"docker-logrotate", + //"docker-selinux", + //"docker-engine-selinux", + //"docker-engine", + //"kubelet", + //"kubeadm", + //"kubectl", + //"docker-client", + //"docker-client-latest", }...) removeDockerFunc := [][]string{ @@ -206,13 +213,13 @@ func (op *AgentOsOperator) installDocker(args []string) [][]string { { "curl", "-o", - "/etc/docker/docker-utsc.gpg", + "/usr/share/keyrings/docker-utsc.gpg", "https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu/gpg", }, { "apt-key", "add", - "/etc/docker/docker-utsc.gpg", + "/usr/share/keyrings/docker-utsc.gpg", }, { "add-apt-repository", @@ -226,13 +233,13 @@ func (op *AgentOsOperator) installDocker(args []string) [][]string { { "curl", "-o", - "/etc/docker/docker.gpg", + "/usr/share/keyrings/docker.gpg", "https://download.docker.com/linux/ubuntu/gpg ", }, { "apt-key", "add", - "/etc/docker/docker.gpg", + "/usr/share/keyrings/docker.gpg", }, { "add-apt-repository", @@ -247,9 +254,9 @@ func (op *AgentOsOperator) installDocker(args []string) [][]string { var specificDockerVersion string // hard code here 5:20.10.10~3-0~ubuntu-focal if strings.HasPrefix(args[0], "19") { - specificDockerVersion = "5:19.03.15~3-0~ubuntu-focal" + specificDockerVersion = "5:19.03.15~3-0~ubuntu-" + op.AgentOSReleaseCode } else { - specificDockerVersion = "5:20.10.10~3-0~ubuntu-focal" + specificDockerVersion = "5:20.10.10~3-0~ubuntu-" + op.AgentOSReleaseCode } installDockerFunc = append(installDockerFunc, @@ -297,39 +304,388 @@ func (op *AgentOsOperator) installDocker(args []string) [][]string { return installDockerFunc } +func (op *AgentOsOperator) removeDockerCompose() [][]string { + + installDockerComposeFunc := [][]string{ + append( + op.RemoveCommandPrefix, + "docker-compose", + ), + } + + return installDockerComposeFunc +} + func (op *AgentOsOperator) installDockerCompose() [][]string { - return [][]string{} + installDockerComposeFunc := [][]string{ + append( + op.InstallCommandPrefix, + "docker-compose", + ), + } + + return installDockerComposeFunc } func (op *AgentOsOperator) installHelm() [][]string { + var installHelmFunc [][]string - return [][]string{} + if op.IsOsTypeUbuntu { + installHelmFunc = [][]string{ + + { + "curl", + "-o", + "/usr/share/keyrings/helm.gpg", + "https://baltocdn.com/helm/signing.asc", + }, + { + "apt-key", + "add", + "/usr/share/keyrings/helm.gpg", + }, + { + "add-apt-repository", + "https://baltocdn.com/helm/stable/debian/ all main", + }, + { + "apt-get", + "update", + }, + append(op.InstallCommandPrefix, "helm"), + } + } else { + log.ErrorF("Operation OS is CentOS, Helm not installed!") + } + + return installHelmFunc } -func (op *AgentOsOperator) modifyDockerConfig() [][]string { +func (op *AgentOsOperator) modifyDockerConfig(args []string) [][]string { - return [][]string{} + harborIPAddr := args[0] + ":8033" + + modifyDockerConfigFunc := [][]string{ + { + "mv", + "/etc/docker/daemon.json", + "/etc/docker/daemon.backup.json", + }, + { + "wget", + "https://oss2.demo.uavcmlc.com:18000/wangdada/daemon-config.json", + "-O", + "/etc/docker/daemon.json", + }, + { + "sed", + "-i", + "s/$DockerRegisterDomain/" + harborIPAddr + "/g", + "/etc/docker/daemon.json", + }, + { + "systemctl", + "restart", + "docker.service", + }, + } + + return modifyDockerConfigFunc } func (op *AgentOsOperator) installHarbor(args []string) [][]string { - return [][]string{} + installHarborFunc := [][]string{ + { + "wget", + "--no-check-certificate", + "https://oss2.demo.uavcmlc.com:18000/wangdada/harbor-offline-installer-v2.1.0.tgz", + "-O", + "harbor-offline-installer-v2.1.0.tgz", + }, + { + "tar", + "-zvxf", + "harbor-offline-installer-v2.1.0.tgz", + }, + { + "rm", + "-rf", + "./harbor/harbor.yml", + }, + { + "wget", + "--no-check-certificate", + "https://oss2.demo.uavcmlc.com:18000/wangdada/harbor.yml", + "-O", + "./harbor/harbor.yml", + }, + { + "sed", + "-i", + "s/$HarborHostName/" + op.AgentServerInfo.ServerIPInV4 + "/g", + "./harbor/harbor.yml", + }, + { + "sed", + "-i", + "s/$HarborHostPort/8033/g", + "./harbor/harbor.yml", + }, + { + "sed", + "-i", + "s/$HarborHostPort/V2ryStr@ngPss/g", + "./harbor/harbor.yml", + }, + { + "./harbor/install.sh", + "--with-chartmuseum", + }, + } + + return installHarborFunc } func (op *AgentOsOperator) chronyToPublicNTP() [][]string { - return [][]string{} + serverIPInV4 := op.AgentServerInfo.ServerIPInV4 + internalIPCIDR := strings.Join(strings.Split(serverIPInV4, ".")[:2], ".") + ".0.0/16" + + chronyToPublicNTPFunc := [][]string{ + append( + op.InstallCommandPrefix, + "chrony", + ), + { + "systemctl", + "enable", + "chronyd", + }, + { + "systemctl", + "start", + "chronyd", + }, + } + + var chronyFile string + if op.IsOsTypeUbuntu { + chronyFile = "/etc/chrony/chrony.conf" + } else { + chronyFile = "/etc/chrony.conf" + } + + chronyToPublicNTPFunc = append(chronyToPublicNTPFunc, + [][]string{ + { + "sed", + "-i", + "$ a allow " + internalIPCIDR, + chronyFile, + }, + { + "sed", + "-i", + "s/server 0.centos.pool.ntp.org iburst/server ntp2.aliyun.com iburst/g", + chronyFile, + }, + { + "systemctl", + "restart", + "chronyd", + }, + { + "sleep", + "2", + }, + { + "chronyc", + "-n", + "sources", + "-v", + "chronyc", + "tracking", + }, + { + "timedatectl", + "set-timezone", + "Asia/Shanghai", + }, + { + "timedatectl", + "set-ntp", + "true", + }, + { + "systemctl", + "restart", + "rsyslog", + }, + }..., + ) + + return chronyToPublicNTPFunc } func (op *AgentOsOperator) chronyToMaster(args []string) [][]string { + masterInnerIP := args[0] - return [][]string{} + chronyToMasterFunc := [][]string{ + { + "sed", + "-i", + "$ a NTP=" + masterInnerIP, + "/etc/systemd/timesyncd.conf", + }, + { + "systemctl", + "daemon-reload", + }, + { + "systemctl", + "restart", + "systemd-timesyncd.service", + }, + { + "sleep", + "3", + }, + { + "timedatectl", + "show-timesync", + "--all", + }, + { + "timedatectl", + "status", + }, + } + + return chronyToMasterFunc } func (op *AgentOsOperator) installZSH() [][]string { - return [][]string{} + installZSHFunc := [][]string{ + append( + op.InstallCommandPrefix, + "zsh", + "git", + ), + } + + if op.IsAgentInnerWall { + installZSHFunc = append( + installZSHFunc, + [][]string{ + { + "wget", + "https://cdn.jsdelivr.net/gh/robbyrussell/oh-my-zsh@master/tools/install.sh", + "-O", + "zsh-install.sh", + }, + }..., + ) + + } else { + installZSHFunc = append( + installZSHFunc, + [][]string{ + { + "wget", + "https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh", + "-O", + "zsh-install.sh", + }, + }..., + ) + } + + // install + installZSHFunc = append( + installZSHFunc, + [][]string{ + { + "chmod", + "+x", + "zsh-install.sh", + }, + { + "sh", + "-c", + "./zsh-install.sh", + }, + }..., + ) + + // modify ZSH + if !op.IsAgentInnerWall { + installZSHFunc = append( + installZSHFunc, + [][]string{ + { + "git", + "clone", + "https://github.com.cnpmjs.org/zsh-users/zsh-autosuggestions", + "~/.oh-my-zsh/plugins/zsh-autosuggestions", + }, + { + "git", + "clone", + "https://github.com.cnpmjs.org/zsh-users/zsh-syntax-highlighting.git", + "~/.oh-my-zsh/plugins/zsh-syntax-highlighting", + }, + { + "wget", + "https://b2.107421.xyz/oh-my-zsh-plugins-list.txt", + "-O", + "oh-my-zsh-plugins-list.txt", + }, + { + "wget", + "-c", + "-i", + "./oh-my-zsh-plugins-list.txt", + "-P", + "~/.oh-my-zsh/plugins/", + }, + { + "sed", + "-i", + "s/robbyrussell/agnoster/g", + "~/.zshrc", + }, + { + "sed", + "-i", + "s/^# DISABLE_AUTO_UPDATE=\"true\"/DISABLE_AUTO_UPDATE=\"true\"/g", + "~/.zshrc", + }, + { + "sed", + "-i", + "s/plugins=(git)/plugins=(git zsh-autosuggestions zsh-syntax-highlighting command-not-found z themes)/g", + "~/.zshrc", + }, + { + "source", + "~/.zshrc", + }, + { + "chsh", + "-s", + "/bin/zsh", + }, + { + "zsh", + }, + }..., + ) + } + + return installZSHFunc } func (op *AgentOsOperator) modifySshPort(args []string) [][]string { @@ -343,7 +699,8 @@ func (op *AgentOsOperator) openBBR() [][]string { } func (op *AgentOsOperator) ok(args []string) [][]string { + log.InfoF("base function is ok , args are => " + strings.Join(args, " ")) return [][]string{ - {"base function is ok , args are => " + strings.Join(args, " ")}, + {"ifconfig"}, } } diff --git a/agent-go/executor/BaseFunction_test.go b/agent-go/executor/BaseFunction_test.go index 37920f3..77de6e1 100644 --- a/agent-go/executor/BaseFunction_test.go +++ b/agent-go/executor/BaseFunction_test.go @@ -6,10 +6,12 @@ var agentOP = AgentOsOperator{ InstallCommandPrefix: []string{ "apt-get", "install", "-y", }, - RemoveCommandPrefix: []string{"/usr/bin/apt", "remove", "-y"}, + RemoveCommandPrefix: []string{"apt", "remove", "-y"}, CanAccessInternet: true, IsOsTypeUbuntu: true, IsAgentInnerWall: true, + AgentArch: "amd64", + AgentOSReleaseCode: "focal", } func TestBaseFunc(t *testing.T) { @@ -19,6 +21,7 @@ func TestBaseFunc(t *testing.T) { //agentOP.Exec("disableSwap") //agentOP.Exec("enableSwap") //agentOP.Exec("removeDocker") - agentOP.Exec("installDocker") + //agentOP.Exec("installDocker", "20") + agentOP.Exec("installDockerCompose") } diff --git a/agent-go/executor/script/1_node_important.sh b/agent-go/executor/script/1_node_important.sh index 3d879b2..47335f6 100644 --- a/agent-go/executor/script/1_node_important.sh +++ b/agent-go/executor/script/1_node_important.sh @@ -1290,7 +1290,7 @@ modifyZSH() { colorEcho ${GREEN} "zsh应该已经安装成功!!!" colorEcho ${BLUE} "开始修改zsh的相关配置信息,使其更加好用…………" echo "" - cat >~/oh-my-zsh-plugins-list.txt <oh-my-zsh-plugins-list.txt < Date: Sun, 25 Jun 2023 09:49:25 +0800 Subject: [PATCH 38/45] [ Executor ] add base sehll script --- agent-go/executor/BaseFunction_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/agent-go/executor/BaseFunction_test.go b/agent-go/executor/BaseFunction_test.go index 77de6e1..e2dc4af 100644 --- a/agent-go/executor/BaseFunction_test.go +++ b/agent-go/executor/BaseFunction_test.go @@ -23,5 +23,10 @@ func TestBaseFunc(t *testing.T) { //agentOP.Exec("removeDocker") //agentOP.Exec("installDocker", "20") agentOP.Exec("installDockerCompose") + agentOP.Exec("installHelm") + agentOP.Exec("installHarbor") + agentOP.Exec("chronyToPublicNTP") + agentOP.Exec("chronyToMaster", "192.168.0.8") + agentOP.Exec("installZSH") } From fb38ab8f15fca26308a0ad3f4e0f19af17b9d9e2 Mon Sep 17 00:00:00 2001 From: IceDerce Date: Sun, 25 Jun 2023 11:26:33 +0800 Subject: [PATCH 39/45] [Excution] - base function start - 3 --- .../{register => }/AgentInitialization.go | 21 ++-- agent-go/{rabbitmq => }/OctopusMessage.go | 11 +- agent-go/executor/BaseFunction.go | 101 ++++++++++++++---- agent-go/executor/BaseFunction_test.go | 40 +++++-- agent-go/g/global.go | 2 + agent-go/main.go | 2 +- agent-go/rabbitmq/OMsgConnector.go | 7 +- 7 files changed, 136 insertions(+), 48 deletions(-) rename agent-go/{register => }/AgentInitialization.go (93%) rename agent-go/{rabbitmq => }/OctopusMessage.go (93%) diff --git a/agent-go/register/AgentInitialization.go b/agent-go/AgentInitialization.go similarity index 93% rename from agent-go/register/AgentInitialization.go rename to agent-go/AgentInitialization.go index f0a02a0..40c82e6 100644 --- a/agent-go/register/AgentInitialization.go +++ b/agent-go/AgentInitialization.go @@ -1,9 +1,9 @@ -package register +package main import ( "agent-go/g" - logger2 "agent-go/logger" "agent-go/rabbitmq" + "agent-go/register" "encoding/json" "fmt" "gopkg.in/yaml.v3" @@ -12,12 +12,11 @@ import ( ) var omType = g.InitOmType -var log = logger2.Log var P = g.G.P -var AgentServerInfoCache = &AgentServerInfo{} +var AgentServerInfoCache = ®ister.AgentServerInfo{} -func INIT() *AgentServerInfo { +func INIT() *register.AgentServerInfo { // 获取系统的环境变量 agentServerInfo := parseAgentServerInfo() @@ -48,7 +47,7 @@ func INIT() *AgentServerInfo { initToServerQueue.Connect() // 组装OctopusMessage - var octopusMsg *rabbitmq.OctopusMessage + var octopusMsg *OctopusMessage octopusMsg = octopusMsg.Build( omType, agentServerInfo, @@ -92,7 +91,7 @@ func INIT() *AgentServerInfo { } // handleInitMsgFromServer 处理从Server接收的 注册信息 -func handleInitMsgFromServer(initFromServerQueue *rabbitmq.RabbitQueue, initToServerQueue *rabbitmq.RabbitQueue, agentServerInfo *AgentServerInfo) { +func handleInitMsgFromServer(initFromServerQueue *rabbitmq.RabbitQueue, initToServerQueue *rabbitmq.RabbitQueue, agentServerInfo *register.AgentServerInfo) { initOctopusMessageDeliveries := initFromServerQueue.Read(false) @@ -104,14 +103,14 @@ func handleInitMsgFromServer(initFromServerQueue *rabbitmq.RabbitQueue, initToSe log.Debug(fmt.Sprintf("message received from server is %s", string(delivery.Body))) - var initOctopusMsg *rabbitmq.OctopusMessage + var initOctopusMsg *OctopusMessage err := json.Unmarshal(delivery.Body, &initOctopusMsg) if err != nil { log.Error(fmt.Sprintf("parse init message from server wroong, message is => %s ", string(delivery.Body))) } - var serverInfo AgentServerInfo + var serverInfo register.AgentServerInfo s, _ := initOctopusMsg.Content.(string) cc := json.Unmarshal([]byte(s), &serverInfo) @@ -161,11 +160,11 @@ func shutdownRegisterQueueConnection(initFromServerQueue *rabbitmq.RabbitQueue, log.InfoF("Pretend to Shutdown register queue connection !") } -func parseAgentServerInfo() *AgentServerInfo { +func parseAgentServerInfo() *register.AgentServerInfo { // 约定文件地址为 /etc/environment.d/octopus-agent.conf // 目前使用 - var agentServerInfo *AgentServerInfo + var agentServerInfo *register.AgentServerInfo //yamlFile, err := ioutil.ReadFile("C:\\Users\\wdd\\IdeaProjects\\ProjectOctopus\\agent-go\\server-env.yaml") yamlFile, err := ioutil.ReadFile("server-env.yaml") diff --git a/agent-go/rabbitmq/OctopusMessage.go b/agent-go/OctopusMessage.go similarity index 93% rename from agent-go/rabbitmq/OctopusMessage.go rename to agent-go/OctopusMessage.go index 69a0c1f..045fa60 100644 --- a/agent-go/rabbitmq/OctopusMessage.go +++ b/agent-go/OctopusMessage.go @@ -1,8 +1,9 @@ -package rabbitmq +package main import ( "agent-go/executor" "agent-go/g" + "agent-go/rabbitmq" "agent-go/status" "agent-go/utils" "encoding/json" @@ -10,8 +11,6 @@ import ( "strings" ) -var P = g.G.P - type IOctopusMessage interface { OctopusMsgHandler OctopusMsgSender @@ -23,7 +22,7 @@ type OctopusMsgHandler interface { } type OctopusMsgSender interface { - Send(rabbitQueue *RabbitQueue, msg []byte) + Send(rabbitQueue *rabbitmq.RabbitQueue, msg []byte) } type OctopusMsgBuilder interface { @@ -44,7 +43,7 @@ func (om *OctopusMessage) Handle() { doHandleOctopusMessage(om) } -func (om *OctopusMessage) Send(rabbitQueue *RabbitQueue, msg []byte) { +func (om *OctopusMessage) Send(rabbitQueue *rabbitmq.RabbitQueue, msg []byte) { rabbitQueue.Send(msg) } @@ -148,7 +147,7 @@ func statusOMHandler(octopusMessage *OctopusMessage) { octopusMessage.Result = statusRes // 发送回去 statusOctopusReplayMessage, _ := json.Marshal(octopusMessage) - OctopusToServerQueue.Send(statusOctopusReplayMessage) + rabbitmq.OctopusToServerQueue.Send(statusOctopusReplayMessage) // 输出日志 log.InfoF("接收到查询Agent状态的请求,结果为 => %s", statusRes) diff --git a/agent-go/executor/BaseFunction.go b/agent-go/executor/BaseFunction.go index a8d8250..d0cdf9a 100644 --- a/agent-go/executor/BaseFunction.go +++ b/agent-go/executor/BaseFunction.go @@ -1,6 +1,7 @@ package executor import ( + "agent-go/g" "agent-go/register" "strings" ) @@ -52,6 +53,9 @@ func (op *AgentOsOperator) Exec(baseFuncName string, funcArgs ...string) string case "removeDocker": multiLineCommand = op.removeDocker() break + case "removeDockerCompose": + multiLineCommand = op.removeDockerCompose() + break case "installDockerCompose": multiLineCommand = op.installDockerCompose() break @@ -329,11 +333,52 @@ func (op *AgentOsOperator) installDockerCompose() [][]string { } func (op *AgentOsOperator) installHelm() [][]string { - var installHelmFunc [][]string + installHelmFunc := [][]string{ + { + "mkdir", + "-p", + "/root/wdd/", + }, + { + "rm", + "-rf", + "/root/wdd/helm-v*", + }, + { + "rm", + "-rf", + "/root/wdd/linux-amd64", + }, + { + "wget", + "--no-check-certificate", + g.BaseFuncOssUrlPrefix + "helm-v3.12.1-linux-amd64.tar.gz", + "-O", + "/root/wdd/helm-v3.12.1-linux-amd64.tar.gz", + }, + { + "tar", + "-zvxf", + "/root/wdd/helm-v3.12.1-linux-amd64.tar.gz", + }, + { + "chmod", + "+x", + "/root/wdd/linux-amd64/helm", + }, + { + "mv", + "/root/wdd/linux-amd64/helm", + "/usr/local/bin/helm", + }, + { + "helm", + "version", + }, + } - if op.IsOsTypeUbuntu { + /*if op.IsOsTypeUbuntu { installHelmFunc = [][]string{ - { "curl", "-o", @@ -357,7 +402,7 @@ func (op *AgentOsOperator) installHelm() [][]string { } } else { log.ErrorF("Operation OS is CentOS, Helm not installed!") - } + }*/ return installHelmFunc } @@ -374,7 +419,7 @@ func (op *AgentOsOperator) modifyDockerConfig(args []string) [][]string { }, { "wget", - "https://oss2.demo.uavcmlc.com:18000/wangdada/daemon-config.json", + g.BaseFuncOssUrlPrefix + "daemon-config.json", "-O", "/etc/docker/daemon.json", }, @@ -397,50 +442,62 @@ func (op *AgentOsOperator) modifyDockerConfig(args []string) [][]string { func (op *AgentOsOperator) installHarbor(args []string) [][]string { installHarborFunc := [][]string{ - { - "wget", - "--no-check-certificate", - "https://oss2.demo.uavcmlc.com:18000/wangdada/harbor-offline-installer-v2.1.0.tgz", - "-O", - "harbor-offline-installer-v2.1.0.tgz", - }, + //{ + // "mkdir", + // "-p", + // "/root/wdd/", + //}, + //{ + // "rm", + // "-rf", + // "/root/wdd/harbor-offline-installer-v2.1.0.tgz", + //}, + //{ + // "wget", + // "--no-check-certificate", + // g.BaseFuncOssUrlPrefix + "harbor-offline-installer-v2.1.0.tgz", + // "-O", + // "/root/wdd/harbor-offline-installer-v2.1.0.tgz", + //}, { "tar", "-zvxf", - "harbor-offline-installer-v2.1.0.tgz", + "/root/wdd/harbor-offline-installer-v2.1.0.tgz", + "-C", + "/root/wdd/", }, { "rm", "-rf", - "./harbor/harbor.yml", + "/root/wdd/harbor/harbor.yml", }, { "wget", "--no-check-certificate", - "https://oss2.demo.uavcmlc.com:18000/wangdada/harbor.yml", + g.BaseFuncOssUrlPrefix + "harbor-config-template.yml", "-O", - "./harbor/harbor.yml", + "/root/wdd/harbor/harbor.yml", }, { "sed", "-i", "s/$HarborHostName/" + op.AgentServerInfo.ServerIPInV4 + "/g", - "./harbor/harbor.yml", + "/root/wdd/harbor/harbor.yml", }, { "sed", "-i", "s/$HarborHostPort/8033/g", - "./harbor/harbor.yml", + "/root/wdd/harbor/harbor.yml", }, { "sed", "-i", "s/$HarborHostPort/V2ryStr@ngPss/g", - "./harbor/harbor.yml", + "/root/wdd/harbor/harbor.yml", }, { - "./harbor/install.sh", + "/root/wdd/harbor/install.sh", "--with-chartmuseum", }, } @@ -488,7 +545,7 @@ func (op *AgentOsOperator) chronyToPublicNTP() [][]string { { "sed", "-i", - "s/server 0.centos.pool.ntp.org iburst/server ntp2.aliyun.com iburst/g", + "s/pool ntp.ubuntu.com iburst/server ntp2.aliyun.com iburst/g", chronyFile, }, { @@ -505,6 +562,8 @@ func (op *AgentOsOperator) chronyToPublicNTP() [][]string { "-n", "sources", "-v", + }, + { "chronyc", "tracking", }, diff --git a/agent-go/executor/BaseFunction_test.go b/agent-go/executor/BaseFunction_test.go index e2dc4af..1002a11 100644 --- a/agent-go/executor/BaseFunction_test.go +++ b/agent-go/executor/BaseFunction_test.go @@ -1,6 +1,9 @@ package executor -import "testing" +import ( + "agent-go/register" + "testing" +) var agentOP = AgentOsOperator{ InstallCommandPrefix: []string{ @@ -12,6 +15,30 @@ var agentOP = AgentOsOperator{ IsAgentInnerWall: true, AgentArch: "amd64", AgentOSReleaseCode: "focal", + AgentServerInfo: register.AgentServerInfo{ + ServerName: "", + ServerIPPbV4: "", + ServerIPInV4: "192.168.0.8", + ServerIPPbV6: "", + ServerIPInV6: "", + Location: "", + Provider: "", + ManagePort: "", + CPUCore: "", + CPUBrand: "", + OSInfo: "", + OSKernelInfo: "", + TCPControl: "", + Virtualization: "", + IoSpeed: "", + MemoryTotal: "", + DiskTotal: "", + DiskUsage: "", + Comment: "", + MachineID: "", + AgentVersion: "", + TopicName: "", + }, } func TestBaseFunc(t *testing.T) { @@ -22,11 +49,12 @@ func TestBaseFunc(t *testing.T) { //agentOP.Exec("enableSwap") //agentOP.Exec("removeDocker") //agentOP.Exec("installDocker", "20") - agentOP.Exec("installDockerCompose") - agentOP.Exec("installHelm") - agentOP.Exec("installHarbor") - agentOP.Exec("chronyToPublicNTP") - agentOP.Exec("chronyToMaster", "192.168.0.8") + //agentOP.Exec("removeDockerCompose") + //agentOP.Exec("installDockerCompose") + //agentOP.Exec("installHelm") + //agentOP.Exec("installHarbor") + //agentOP.Exec("chronyToPublicNTP") + //agentOP.Exec("chronyToMaster", "192.168.0.8") agentOP.Exec("installZSH") } diff --git a/agent-go/g/global.go b/agent-go/g/global.go index 4c35d06..e070da7 100644 --- a/agent-go/g/global.go +++ b/agent-go/g/global.go @@ -19,6 +19,8 @@ const ( StatusOmType = "STATUS" InitOmType = "INIT" AgentOmType = "AGENT" + + BaseFuncOssUrlPrefix = "https://b2.107421.xyz/" ) var pool, _ = ants.NewPool(100, ants.WithNonblocking(false), ants.WithLogger(logger2.Log), ants.WithMaxBlockingTasks(30), ants.WithDisablePurge(true)) diff --git a/agent-go/main.go b/agent-go/main.go index 2784f0e..47f0ad9 100644 --- a/agent-go/main.go +++ b/agent-go/main.go @@ -24,6 +24,6 @@ func main() { g.G.AgentConfig = register.ParseConfiguration(filename) // 执行初始化之策工作 - register.AgentServerInfoCache = register.INIT() + AgentServerInfoCache = INIT() } diff --git a/agent-go/rabbitmq/OMsgConnector.go b/agent-go/rabbitmq/OMsgConnector.go index 4cd4c85..cd72e29 100644 --- a/agent-go/rabbitmq/OMsgConnector.go +++ b/agent-go/rabbitmq/OMsgConnector.go @@ -1,6 +1,7 @@ package rabbitmq import ( + "agent-go" "agent-go/g" "encoding/json" "fmt" @@ -50,12 +51,12 @@ func BuildOMsgRuntimeConnectorQueue(agentTopicName string) { deliveries := octopusMsgQueue.Read(true) forever := make(chan bool) - P.Submit( + main.P.Submit( func() { // 死循环,处理Octopus Message for delivery := range deliveries { - var om *OctopusMessage + var om *main.OctopusMessage err := json.Unmarshal(delivery.Body, &om) if err != nil { log.Error(fmt.Sprintf("octopus message convert to json is wrong! msg is => %s", delivery.Body)) @@ -64,7 +65,7 @@ func BuildOMsgRuntimeConnectorQueue(agentTopicName string) { } // 策略模式 处理消息 - P.Submit(func() { + main.P.Submit(func() { om.Handle() }) } From 80198930c4d340ca1c566b8483ad0c6d69025cba Mon Sep 17 00:00:00 2001 From: IceDerce Date: Sun, 25 Jun 2023 11:31:27 +0800 Subject: [PATCH 40/45] [Excution] - base function start - 3 --- agent-go/AgentInitialization.go | 4 ++-- agent-go/rabbitmq/OMsgConnector.go | 9 +++++---- agent-go/{ => rabbitmq}/OctopusMessage.go | 9 ++++----- 3 files changed, 11 insertions(+), 11 deletions(-) rename agent-go/{ => rabbitmq}/OctopusMessage.go (93%) diff --git a/agent-go/AgentInitialization.go b/agent-go/AgentInitialization.go index 40c82e6..b92e3a0 100644 --- a/agent-go/AgentInitialization.go +++ b/agent-go/AgentInitialization.go @@ -47,7 +47,7 @@ func INIT() *register.AgentServerInfo { initToServerQueue.Connect() // 组装OctopusMessage - var octopusMsg *OctopusMessage + var octopusMsg *rabbitmq.OctopusMessage octopusMsg = octopusMsg.Build( omType, agentServerInfo, @@ -103,7 +103,7 @@ func handleInitMsgFromServer(initFromServerQueue *rabbitmq.RabbitQueue, initToSe log.Debug(fmt.Sprintf("message received from server is %s", string(delivery.Body))) - var initOctopusMsg *OctopusMessage + var initOctopusMsg *rabbitmq.OctopusMessage err := json.Unmarshal(delivery.Body, &initOctopusMsg) if err != nil { log.Error(fmt.Sprintf("parse init message from server wroong, message is => %s ", diff --git a/agent-go/rabbitmq/OMsgConnector.go b/agent-go/rabbitmq/OMsgConnector.go index cd72e29..89bf16d 100644 --- a/agent-go/rabbitmq/OMsgConnector.go +++ b/agent-go/rabbitmq/OMsgConnector.go @@ -1,7 +1,6 @@ package rabbitmq import ( - "agent-go" "agent-go/g" "encoding/json" "fmt" @@ -9,6 +8,8 @@ import ( var OctopusToServerQueue = &RabbitQueue{} +var P = g.G.P + func BuildOMsgRuntimeConnectorQueue(agentTopicName string) { // 建立 业务消息 接收队列 @@ -51,12 +52,12 @@ func BuildOMsgRuntimeConnectorQueue(agentTopicName string) { deliveries := octopusMsgQueue.Read(true) forever := make(chan bool) - main.P.Submit( + P.Submit( func() { // 死循环,处理Octopus Message for delivery := range deliveries { - var om *main.OctopusMessage + var om *OctopusMessage err := json.Unmarshal(delivery.Body, &om) if err != nil { log.Error(fmt.Sprintf("octopus message convert to json is wrong! msg is => %s", delivery.Body)) @@ -65,7 +66,7 @@ func BuildOMsgRuntimeConnectorQueue(agentTopicName string) { } // 策略模式 处理消息 - main.P.Submit(func() { + P.Submit(func() { om.Handle() }) } diff --git a/agent-go/OctopusMessage.go b/agent-go/rabbitmq/OctopusMessage.go similarity index 93% rename from agent-go/OctopusMessage.go rename to agent-go/rabbitmq/OctopusMessage.go index 045fa60..51a2f67 100644 --- a/agent-go/OctopusMessage.go +++ b/agent-go/rabbitmq/OctopusMessage.go @@ -1,9 +1,8 @@ -package main +package rabbitmq import ( "agent-go/executor" "agent-go/g" - "agent-go/rabbitmq" "agent-go/status" "agent-go/utils" "encoding/json" @@ -22,7 +21,7 @@ type OctopusMsgHandler interface { } type OctopusMsgSender interface { - Send(rabbitQueue *rabbitmq.RabbitQueue, msg []byte) + Send(rabbitQueue *RabbitQueue, msg []byte) } type OctopusMsgBuilder interface { @@ -43,7 +42,7 @@ func (om *OctopusMessage) Handle() { doHandleOctopusMessage(om) } -func (om *OctopusMessage) Send(rabbitQueue *rabbitmq.RabbitQueue, msg []byte) { +func (om *OctopusMessage) Send(rabbitQueue *RabbitQueue, msg []byte) { rabbitQueue.Send(msg) } @@ -147,7 +146,7 @@ func statusOMHandler(octopusMessage *OctopusMessage) { octopusMessage.Result = statusRes // 发送回去 statusOctopusReplayMessage, _ := json.Marshal(octopusMessage) - rabbitmq.OctopusToServerQueue.Send(statusOctopusReplayMessage) + OctopusToServerQueue.Send(statusOctopusReplayMessage) // 输出日志 log.InfoF("接收到查询Agent状态的请求,结果为 => %s", statusRes) From 97187363ccccd8d5e0f598f0c2420d6b52d254d1 Mon Sep 17 00:00:00 2001 From: IceDerce Date: Mon, 26 Jun 2023 13:50:14 +0800 Subject: [PATCH 41/45] [Excution] - base function accomplish - 4 --- agent-go/AgentInitialization.go | 23 +++++++++++++ agent-go/executor/BaseFunction.go | 32 ++++++++++++----- agent-go/executor/BaseFunction_test.go | 8 ++--- agent-go/executor/CommandExecutor.go | 34 +++++++++++++------ agent-go/executor/RealTimeExecutor.go | 21 ++++++++---- ...ion.go => NacosInitalizationDeprecated.go} | 0 6 files changed, 88 insertions(+), 30 deletions(-) rename agent-go/register/{NacosInitalization.go => NacosInitalizationDeprecated.go} (100%) diff --git a/agent-go/AgentInitialization.go b/agent-go/AgentInitialization.go index b92e3a0..065d57c 100644 --- a/agent-go/AgentInitialization.go +++ b/agent-go/AgentInitialization.go @@ -1,6 +1,7 @@ package main import ( + "agent-go/executor" "agent-go/g" "agent-go/rabbitmq" "agent-go/register" @@ -184,5 +185,27 @@ func parseAgentServerInfo() *register.AgentServerInfo { } log.Info(fmt.Sprintf("agent server info is %v", string(jsonFormat))) + // build a operator cache + BuildAgentOsOperator(agentServerInfo) + return agentServerInfo } + +func BuildAgentOsOperator(agentServerInfo *register.AgentServerInfo) { + + executor.AgentOsOperatorCache = &executor.AgentOsOperator{ + InstallCommandPrefix: nil, + RemoveCommandPrefix: nil, + CanAccessInternet: false, + IsOsTypeUbuntu: false, + IsAgentInnerWall: false, + AgentArch: "", + AgentOSReleaseCode: "", + AgentServerInfo: agentServerInfo, + } + + // debug + marshal, _ := json.Marshal(executor.AgentOsOperatorCache) + log.DebugF("cached agent operator is %s", marshal) + +} diff --git a/agent-go/executor/BaseFunction.go b/agent-go/executor/BaseFunction.go index d0cdf9a..bb6249c 100644 --- a/agent-go/executor/BaseFunction.go +++ b/agent-go/executor/BaseFunction.go @@ -3,11 +3,12 @@ package executor import ( "agent-go/g" "agent-go/register" + "fmt" "strings" ) type BaseFunc interface { - Exec(baseFuncName string, funcArgs ...string) string + Exec(baseFuncName string, funcArgs ...string) []string } type AgentOsOperator struct { @@ -25,11 +26,11 @@ type AgentOsOperator struct { AgentOSReleaseCode string `json:"agent_os_release_code",comment:"主机操作系统的发行版代号, focal之类的"` - AgentServerInfo register.AgentServerInfo `json:"agent_server_info"` + AgentServerInfo *register.AgentServerInfo `json:"agent_server_info"` } // Exec 执行基础功能函数 -func (op *AgentOsOperator) Exec(baseFuncName string, funcArgs ...string) string { +func (op *AgentOsOperator) Exec(baseFuncName string, funcArgs ...string) []string { var multiLineCommand [][]string @@ -90,13 +91,21 @@ func (op *AgentOsOperator) Exec(baseFuncName string, funcArgs ...string) string log.DebugF("multiLineCommand are => %v", multiLineCommand) + var result []string + // exec the command here for _, singleLineCommand := range multiLineCommand { - ReadTimeCommandExecutor(singleLineCommand) + result = append(result, ReadTimeCommandExecutor(singleLineCommand)...) + } + + // debug usage + //log.DebugF("exec result are => %v", result) + for _, logLine := range result { + fmt.Println(logLine) } // 归一化处理 - return strings.Join([]string{}, "") + return result } func (op *AgentOsOperator) shutdownFirewall() [][]string { @@ -628,6 +637,11 @@ func (op *AgentOsOperator) chronyToMaster(args []string) [][]string { func (op *AgentOsOperator) installZSH() [][]string { installZSHFunc := [][]string{ + { + "mkdir", + "-p", + "/root/wdd/", + }, append( op.InstallCommandPrefix, "zsh", @@ -643,7 +657,7 @@ func (op *AgentOsOperator) installZSH() [][]string { "wget", "https://cdn.jsdelivr.net/gh/robbyrussell/oh-my-zsh@master/tools/install.sh", "-O", - "zsh-install.sh", + "/root/wdd/zsh-install.sh", }, }..., ) @@ -656,7 +670,7 @@ func (op *AgentOsOperator) installZSH() [][]string { "wget", "https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh", "-O", - "zsh-install.sh", + "/root/wdd/zsh-install.sh", }, }..., ) @@ -669,12 +683,12 @@ func (op *AgentOsOperator) installZSH() [][]string { { "chmod", "+x", - "zsh-install.sh", + "/root/wdd/zsh-install.sh", }, { "sh", "-c", - "./zsh-install.sh", + "/root/wdd/zsh-install.sh", }, }..., ) diff --git a/agent-go/executor/BaseFunction_test.go b/agent-go/executor/BaseFunction_test.go index 1002a11..63518d7 100644 --- a/agent-go/executor/BaseFunction_test.go +++ b/agent-go/executor/BaseFunction_test.go @@ -5,7 +5,7 @@ import ( "testing" ) -var agentOP = AgentOsOperator{ +var agentOP = &AgentOsOperator{ InstallCommandPrefix: []string{ "apt-get", "install", "-y", }, @@ -15,7 +15,7 @@ var agentOP = AgentOsOperator{ IsAgentInnerWall: true, AgentArch: "amd64", AgentOSReleaseCode: "focal", - AgentServerInfo: register.AgentServerInfo{ + AgentServerInfo: ®ister.AgentServerInfo{ ServerName: "", ServerIPPbV4: "", ServerIPInV4: "192.168.0.8", @@ -47,7 +47,7 @@ func TestBaseFunc(t *testing.T) { //agentOP.Exec("modifyHostname") //agentOP.Exec("disableSwap") //agentOP.Exec("enableSwap") - //agentOP.Exec("removeDocker") + agentOP.Exec("removeDocker") //agentOP.Exec("installDocker", "20") //agentOP.Exec("removeDockerCompose") //agentOP.Exec("installDockerCompose") @@ -55,6 +55,6 @@ func TestBaseFunc(t *testing.T) { //agentOP.Exec("installHarbor") //agentOP.Exec("chronyToPublicNTP") //agentOP.Exec("chronyToMaster", "192.168.0.8") - agentOP.Exec("installZSH") + //agentOP.Exec("installZSH") } diff --git a/agent-go/executor/CommandExecutor.go b/agent-go/executor/CommandExecutor.go index 1a16a89..727198c 100644 --- a/agent-go/executor/CommandExecutor.go +++ b/agent-go/executor/CommandExecutor.go @@ -13,6 +13,7 @@ type ExecutionMessage struct { NeedResultReplay bool `json:"needResultReplay"` DurationTask bool `json:"durationTask,default:false"` Type string `json:"type"` + BaseFuncContent []string `json:"baseFuncContent"` SingleLineCommand []string `json:"singleLineCommand"` MultiLineCommand [][]string `json:"multiLineCommand"` PipeLineCommand [][]string `json:"pipeLineCommand"` @@ -21,24 +22,35 @@ type ExecutionMessage struct { var log = logger2.Log +var AgentOsOperatorCache = &AgentOsOperator{} + func Execute(em *ExecutionMessage) ([]string, error) { var resultLog []string var err error var realCommand [][]string - if em.PipeLineCommand != nil && len(em.PipeLineCommand) != 0 { - // 管道命令 - resultLog, err = PipeLineCommandExecutor(em.PipeLineCommand) - realCommand = em.PipeLineCommand - } else if em.MultiLineCommand != nil && len(em.MultiLineCommand) != 0 { - // 多行命令 - resultLog, err = MultiLineCommandExecutor(em.MultiLineCommand) - realCommand = em.MultiLineCommand + if strings.HasPrefix(em.Type, "BASE") { + // base function + resultLog = AgentOsOperatorCache.Exec(em.BaseFuncContent[0], em.BaseFuncContent[1:]...) + err = nil + } else { - // 单行命令 - resultLog, err = SingleLineCommandExecutor(em.SingleLineCommand) - realCommand = [][]string{em.SingleLineCommand} + // shell command + + if em.PipeLineCommand != nil && len(em.PipeLineCommand) != 0 { + // 管道命令 + resultLog, err = PipeLineCommandExecutor(em.PipeLineCommand) + realCommand = em.PipeLineCommand + } else if em.MultiLineCommand != nil && len(em.MultiLineCommand) != 0 { + // 多行命令 + resultLog, err = MultiLineCommandExecutor(em.MultiLineCommand) + realCommand = em.MultiLineCommand + } else { + // 单行命令 + resultLog, err = SingleLineCommandExecutor(em.SingleLineCommand) + realCommand = [][]string{em.SingleLineCommand} + } } // 归一化错误和日志 diff --git a/agent-go/executor/RealTimeExecutor.go b/agent-go/executor/RealTimeExecutor.go index b56c642..b6d6de1 100644 --- a/agent-go/executor/RealTimeExecutor.go +++ b/agent-go/executor/RealTimeExecutor.go @@ -2,12 +2,11 @@ package executor import ( "bufio" - "fmt" "io" "os/exec" ) -func ReadTimeCommandExecutor(singleLineCommand []string) { +func ReadTimeCommandExecutor(singleLineCommand []string) []string { cmd := exec.Command(singleLineCommand[0], singleLineCommand[1:]...) stdout, err := cmd.StdoutPipe() @@ -23,18 +22,28 @@ func ReadTimeCommandExecutor(singleLineCommand []string) { log.ErrorF("command %v runtime error => %v", singleLineCommand, err) } - go copyOutput(stdout) - go copyOutput(stderr) + var resultSlice []string + resultSlice = append(resultSlice, copyOutput(stdout, resultSlice)...) + resultSlice = append(resultSlice, copyOutput(stderr, resultSlice)...) if err := cmd.Wait(); err != nil { log.ErrorF("command %v result error => %v", singleLineCommand, err) } + //log.DebugF("real time exec result are %v", resultSlice) + + return resultSlice } -func copyOutput(r io.Reader) { +func copyOutput(r io.Reader, resultSlice []string) []string { scanner := bufio.NewScanner(r) for scanner.Scan() { - fmt.Println(scanner.Text()) + resultLine := scanner.Text() + + resultSlice = append(resultSlice, resultLine) + // debug usage + //fmt.Println(resultLine) } + + return resultSlice } diff --git a/agent-go/register/NacosInitalization.go b/agent-go/register/NacosInitalizationDeprecated.go similarity index 100% rename from agent-go/register/NacosInitalization.go rename to agent-go/register/NacosInitalizationDeprecated.go From 193c1118f21603b5e9080b85021956dc197f453d Mon Sep 17 00:00:00 2001 From: zeaslity Date: Mon, 26 Jun 2023 14:52:53 +0800 Subject: [PATCH 42/45] [ Executor ] modify base function part - 1 --- agent-go/executor/BaseFunction.go | 4 ++-- agent-go/rabbitmq/OctopusMessage.go | 13 +++++++------ 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/agent-go/executor/BaseFunction.go b/agent-go/executor/BaseFunction.go index bb6249c..b02323e 100644 --- a/agent-go/executor/BaseFunction.go +++ b/agent-go/executor/BaseFunction.go @@ -67,7 +67,7 @@ func (op *AgentOsOperator) Exec(baseFuncName string, funcArgs ...string) []strin multiLineCommand = op.installHelm() break case "installHarbor": - multiLineCommand = op.installHarbor(funcArgs) + multiLineCommand = op.installHarbor() break case "chronyToPublicNTP": multiLineCommand = op.chronyToPublicNTP() @@ -448,7 +448,7 @@ func (op *AgentOsOperator) modifyDockerConfig(args []string) [][]string { return modifyDockerConfigFunc } -func (op *AgentOsOperator) installHarbor(args []string) [][]string { +func (op *AgentOsOperator) installHarbor() [][]string { installHarborFunc := [][]string{ //{ diff --git a/agent-go/rabbitmq/OctopusMessage.go b/agent-go/rabbitmq/OctopusMessage.go index 51a2f67..57ab6ce 100644 --- a/agent-go/rabbitmq/OctopusMessage.go +++ b/agent-go/rabbitmq/OctopusMessage.go @@ -29,12 +29,13 @@ type OctopusMsgBuilder interface { } type OctopusMessage struct { - UUID string `json:"uuid"` - InitTime string `json:"init_time" format:"2023-03-21 16:38:30"` - Type string `json:"type"` - Content interface{} `json:"content"` - Result interface{} `json:"result"` - ACTime string `json:"ac_time" format:"2023-03-21 16:38:30"` + UUID string `json:"uuid"` + InitTime string `json:"init_time" format:"2023-03-21 16:38:30"` + Type string `json:"type"` + Content interface{} `json:"content"` + Result interface{} `json:"result"` + ResultCode string `json:"resultCode"` + ACTime string `json:"ac_time" format:"2023-03-21 16:38:30"` } func (om *OctopusMessage) Handle() { From ca255e1b3fdfc7d7d3ea11853cf6418fa63362a1 Mon Sep 17 00:00:00 2001 From: IceDerce Date: Mon, 26 Jun 2023 16:04:46 +0800 Subject: [PATCH 43/45] [Excution] - base function accomplish - 4 --- agent-go/executor/BaseFunction.go | 11 +++++----- agent-go/executor/BaseFunction_test.go | 4 ++-- agent-go/rabbitmq/OctopusMessage.go | 30 +++++++++++++++++++++++++- 3 files changed, 37 insertions(+), 8 deletions(-) diff --git a/agent-go/executor/BaseFunction.go b/agent-go/executor/BaseFunction.go index b02323e..f54f95a 100644 --- a/agent-go/executor/BaseFunction.go +++ b/agent-go/executor/BaseFunction.go @@ -96,12 +96,13 @@ func (op *AgentOsOperator) Exec(baseFuncName string, funcArgs ...string) []strin // exec the command here for _, singleLineCommand := range multiLineCommand { result = append(result, ReadTimeCommandExecutor(singleLineCommand)...) - } - // debug usage - //log.DebugF("exec result are => %v", result) - for _, logLine := range result { - fmt.Println(logLine) + // debug usage + log.DebugF("exec result are => %v", result) + for _, logLine := range result { + fmt.Println(logLine) + } + } // 归一化处理 diff --git a/agent-go/executor/BaseFunction_test.go b/agent-go/executor/BaseFunction_test.go index 63518d7..358cdaa 100644 --- a/agent-go/executor/BaseFunction_test.go +++ b/agent-go/executor/BaseFunction_test.go @@ -47,8 +47,8 @@ func TestBaseFunc(t *testing.T) { //agentOP.Exec("modifyHostname") //agentOP.Exec("disableSwap") //agentOP.Exec("enableSwap") - agentOP.Exec("removeDocker") - //agentOP.Exec("installDocker", "20") + //agentOP.Exec("removeDocker") + agentOP.Exec("installDocker", "20") //agentOP.Exec("removeDockerCompose") //agentOP.Exec("installDockerCompose") //agentOP.Exec("installHelm") diff --git a/agent-go/rabbitmq/OctopusMessage.go b/agent-go/rabbitmq/OctopusMessage.go index 57ab6ce..9de17b0 100644 --- a/agent-go/rabbitmq/OctopusMessage.go +++ b/agent-go/rabbitmq/OctopusMessage.go @@ -22,6 +22,8 @@ type OctopusMsgHandler interface { type OctopusMsgSender interface { Send(rabbitQueue *RabbitQueue, msg []byte) + + SendToOctopusServer() } type OctopusMsgBuilder interface { @@ -47,6 +49,20 @@ func (om *OctopusMessage) Send(rabbitQueue *RabbitQueue, msg []byte) { rabbitQueue.Send(msg) } +// SendToOctopusServer send octopus message back to octopusToServer queue +func (om *OctopusMessage) SendToOctopusServer() { + + // write the octopus message to bytes + octopusMessageReplayBytes, err := json.Marshal(om) + if err != nil { + log.ErrorF("replay octopus message write error => %v", err) + } + + // Send back the result to queue + OctopusToServerQueue.Send(octopusMessageReplayBytes) + +} + func (om *OctopusMessage) Build(omType string, content interface{}) *OctopusMessage { // 当前时间 @@ -111,7 +127,19 @@ func executorOMHandler(octopusMessage *OctopusMessage) { } // 交给后端的实际处理器处理, 再次策略 - executor.Execute(executionMessage) + resultLog, err := executor.Execute(executionMessage) + if err != nil { + octopusMessage.ResultCode = "200" + } else { + octopusMessage.ResultCode = "300" + } + + // send back the result log + octopusMessage.Result = resultLog + octopusMessage.ACTime = utils.ParseISOLocalDateTime() + + // Send + octopusMessage.SendToOctopusServer() } func statusOMHandler(octopusMessage *OctopusMessage) { From 44ce0959d9cc734988533132d258cd282d8d6f33 Mon Sep 17 00:00:00 2001 From: IceDerce Date: Tue, 27 Jun 2023 15:13:48 +0800 Subject: [PATCH 44/45] [Excution] - base function accomplish union code - 1 --- agent-go/AgentInitialization.go | 25 +++++++++++++++---------- agent-go/octopus-agent-dev.yaml | 5 +++-- agent-go/server-env.yaml | 2 +- 3 files changed, 19 insertions(+), 13 deletions(-) diff --git a/agent-go/AgentInitialization.go b/agent-go/AgentInitialization.go index 065d57c..316590c 100644 --- a/agent-go/AgentInitialization.go +++ b/agent-go/AgentInitialization.go @@ -80,7 +80,7 @@ func INIT() *register.AgentServerInfo { initFromServerQueue := &rabbitmq.RabbitQueue{ RabbitProp: initFromServerProp, } - defer initFromServerQueue.Close() + //defer initFromServerQueue.Close() // 建立连接 initFromServerQueue.Connect() @@ -113,7 +113,10 @@ func handleInitMsgFromServer(initFromServerQueue *rabbitmq.RabbitQueue, initToSe var serverInfo register.AgentServerInfo - s, _ := initOctopusMsg.Content.(string) + s, er := initOctopusMsg.Content.(string) + if !er { + log.ErrorF("convet to string error! => %v", er) + } cc := json.Unmarshal([]byte(s), &serverInfo) if cc != nil { log.Error(fmt.Sprintf("parse init message from server wroong, message is => %v ", cc)) @@ -194,14 +197,16 @@ func parseAgentServerInfo() *register.AgentServerInfo { func BuildAgentOsOperator(agentServerInfo *register.AgentServerInfo) { executor.AgentOsOperatorCache = &executor.AgentOsOperator{ - InstallCommandPrefix: nil, - RemoveCommandPrefix: nil, - CanAccessInternet: false, - IsOsTypeUbuntu: false, - IsAgentInnerWall: false, - AgentArch: "", - AgentOSReleaseCode: "", - AgentServerInfo: agentServerInfo, + InstallCommandPrefix: []string{ + "apt-get", "install", "-y", + }, + RemoveCommandPrefix: []string{"apt", "remove", "-y"}, + CanAccessInternet: true, + IsOsTypeUbuntu: true, + IsAgentInnerWall: true, + AgentArch: "amd64", + AgentOSReleaseCode: "focal", + AgentServerInfo: agentServerInfo, } // debug diff --git a/agent-go/octopus-agent-dev.yaml b/agent-go/octopus-agent-dev.yaml index 12c3a19..1b683bc 100644 --- a/agent-go/octopus-agent-dev.yaml +++ b/agent-go/octopus-agent-dev.yaml @@ -51,11 +51,12 @@ spring: allow-circular-references: true allow-bean-definition-overriding: true rabbitmq: - host: 42.192.52.227 +# host: 42.192.52.227 + host: 192.168.35.71 port: 20672 username: boge password: boge8tingH - virtual-host: /wdd + virtual-host: / listener: simple: retry: diff --git a/agent-go/server-env.yaml b/agent-go/server-env.yaml index dd5ab75..2144b59 100644 --- a/agent-go/server-env.yaml +++ b/agent-go/server-env.yaml @@ -1,4 +1,4 @@ -serverName: "Chengdu-amd64-98" +serverName: "Chengdu-amd64-90" serverIpPbV4: "183.220.149.17" serverIpInV4: "" serverIpPbV6: "" From 1ea59110ca883cbb2d2a00c06b13cec9703a09de Mon Sep 17 00:00:00 2001 From: zeaslity Date: Wed, 28 Jun 2023 16:33:06 +0800 Subject: [PATCH 45/45] [ Executor ] modify base function part - 1 --- agent-go/executor/BaseFunction.go | 2 +- agent-go/executor/RealTimeExecutor.go | 40 +++++++++++++++++-- agent-go/executor/RealTimeExecutor_test.go | 2 +- .../main/java/io/wdd/server/核心功能设计.md | 3 ++ 4 files changed, 41 insertions(+), 6 deletions(-) create mode 100644 server/src/main/java/io/wdd/server/核心功能设计.md diff --git a/agent-go/executor/BaseFunction.go b/agent-go/executor/BaseFunction.go index f54f95a..cf26c36 100644 --- a/agent-go/executor/BaseFunction.go +++ b/agent-go/executor/BaseFunction.go @@ -95,7 +95,7 @@ func (op *AgentOsOperator) Exec(baseFuncName string, funcArgs ...string) []strin // exec the command here for _, singleLineCommand := range multiLineCommand { - result = append(result, ReadTimeCommandExecutor(singleLineCommand)...) + result = append(result, AllOutputCommandExecutor(singleLineCommand)...) // debug usage log.DebugF("exec result are => %v", result) diff --git a/agent-go/executor/RealTimeExecutor.go b/agent-go/executor/RealTimeExecutor.go index b6d6de1..805346f 100644 --- a/agent-go/executor/RealTimeExecutor.go +++ b/agent-go/executor/RealTimeExecutor.go @@ -2,11 +2,36 @@ package executor import ( "bufio" + "fmt" "io" "os/exec" ) -func ReadTimeCommandExecutor(singleLineCommand []string) []string { +func ReadTimeCommandExecutor(singleLineCommand []string) { + cmd := exec.Command(singleLineCommand[0], singleLineCommand[1:]...) + stdout, err := cmd.StdoutPipe() + if err != nil { + log.ErrorF("command %v stdout error => %v", singleLineCommand, err) + } + stderr, err := cmd.StderrPipe() + if err != nil { + log.ErrorF("command %v stderr error => %v", singleLineCommand, err) + } + + if err := cmd.Start(); err != nil { + log.ErrorF("command %v runtime error => %v", singleLineCommand, err) + } + + go realTimeOutput(stdout) + go realTimeOutput(stderr) + + if err := cmd.Wait(); err != nil { + log.ErrorF("command %v result error => %v", singleLineCommand, err) + } + +} + +func AllOutputCommandExecutor(singleLineCommand []string) []string { cmd := exec.Command(singleLineCommand[0], singleLineCommand[1:]...) stdout, err := cmd.StdoutPipe() @@ -23,8 +48,8 @@ func ReadTimeCommandExecutor(singleLineCommand []string) []string { } var resultSlice []string - resultSlice = append(resultSlice, copyOutput(stdout, resultSlice)...) - resultSlice = append(resultSlice, copyOutput(stderr, resultSlice)...) + resultSlice = append(resultSlice, collectOutput(stdout, resultSlice)...) + resultSlice = append(resultSlice, collectOutput(stderr, resultSlice)...) if err := cmd.Wait(); err != nil { log.ErrorF("command %v result error => %v", singleLineCommand, err) @@ -35,7 +60,14 @@ func ReadTimeCommandExecutor(singleLineCommand []string) []string { return resultSlice } -func copyOutput(r io.Reader, resultSlice []string) []string { +func realTimeOutput(r io.Reader) { + scanner := bufio.NewScanner(r) + for scanner.Scan() { + fmt.Println(scanner.Text()) + } +} + +func collectOutput(r io.Reader, resultSlice []string) []string { scanner := bufio.NewScanner(r) for scanner.Scan() { resultLine := scanner.Text() diff --git a/agent-go/executor/RealTimeExecutor_test.go b/agent-go/executor/RealTimeExecutor_test.go index 43f44f9..e08621a 100644 --- a/agent-go/executor/RealTimeExecutor_test.go +++ b/agent-go/executor/RealTimeExecutor_test.go @@ -8,6 +8,6 @@ func TestReadTimeOutput(t *testing.T) { "/root/IdeaProjects/ProjectOctopus/agent-go/tmp/simple.sh", } - ReadTimeCommandExecutor(strings) + AllOutputCommandExecutor(strings) } diff --git a/server/src/main/java/io/wdd/server/核心功能设计.md b/server/src/main/java/io/wdd/server/核心功能设计.md new file mode 100644 index 0000000..cdf88e4 --- /dev/null +++ b/server/src/main/java/io/wdd/server/核心功能设计.md @@ -0,0 +1,3 @@ +1. 使用Java实现WebShell的功能 + +2. \ No newline at end of file