diff --git a/.fastRequest/collections/Root/Default Group/directory.json b/.fastRequest/collections/Root/Default Group/directory.json new file mode 100644 index 0000000..da8abdb --- /dev/null +++ b/.fastRequest/collections/Root/Default Group/directory.json @@ -0,0 +1,9 @@ +{ + "description": "", + "directory": "~.fastRequest~collections~Root~Default Group", + "filePath": "~.fastRequest~collections~Root~Default Group~", + "groupId": "1", + "id": "1", + "name": "Default Group", + "type": 1 +} \ No newline at end of file diff --git a/.fastRequest/collections/Root/directory.json b/.fastRequest/collections/Root/directory.json new file mode 100644 index 0000000..218c7cd --- /dev/null +++ b/.fastRequest/collections/Root/directory.json @@ -0,0 +1,9 @@ +{ + "description": "", + "directory": "~.fastRequest~collections~Root", + "filePath": "~.fastRequest~collections~Root~", + "groupId": "-1", + "id": "0", + "name": "Root", + "type": 1 +} \ No newline at end of file diff --git a/.fastRequest/collections/Root/server/ExecutionController/directory.json b/.fastRequest/collections/Root/server/ExecutionController/directory.json new file mode 100644 index 0000000..36f435a --- /dev/null +++ b/.fastRequest/collections/Root/server/ExecutionController/directory.json @@ -0,0 +1,8 @@ +{ + "description": "", + "directory": "~.fastRequest~collections~Root~server~ExecutionController", + "filePath": "~.fastRequest~collections~Root~server~ExecutionController~", + "id": "20230222093108443", + "name": "ExecutionController", + "type": 3 +} \ No newline at end of file diff --git a/.fastRequest/collections/Root/server/ExecutionController/patchCommandToAgentAll.rapi b/.fastRequest/collections/Root/server/ExecutionController/patchCommandToAgentAll.rapi new file mode 100644 index 0000000..443ffbd --- /dev/null +++ b/.fastRequest/collections/Root/server/ExecutionController/patchCommandToAgentAll.rapi @@ -0,0 +1,37 @@ +{ + "activeGroup": "Default", + "apiParamGroup": {}, + "childList": [], + "description": "", + "directory": "~.fastRequest~collections~Root~server~ExecutionController", + "domain": "http://localhost:9999", + "enableEnv": "local", + "enableProject": "ProjectOctopus", + "filePath": "~.fastRequest~collections~Root~server~ExecutionController~patchCommandToAgentAll.rapi", + "headerList": [], + "id": "api_io.wdd.rpc.controller.ExecutionController.patchCommandToAgentAll", + "name": "[命令]- 发送命令至所有的主机", + "paramGroup": { + "bodyKeyValueListJson": "", + "className": "io.wdd.rpc.controller.ExecutionController", + "jsonDocument": "", + "method": "patchCommandToAgentAll", + "methodDescription": "[命令]- 发送命令至所有的主机", + "methodType": "POST", + "multipartKeyValueListJson": "[]", + "originUrl": "/octopus/server/executor/command/all", + "pathParamsKeyValueListJson": "[]", + "postScript": "", + "postType": "json", + "preScript": "", + "returnDocument": "{\n\t\"code\":\"No comment,Type =Number\",\n\t\"msg\":\"No comment,Type =String\",\n\t\"data\":{}\n}", + "tempId": "", + "url": "/octopus/server/executor/command/all", + "urlEncodedKeyValueListJson": "[{\"comment\":\"命令行\",\"customFlag\":2,\"enabled\":true,\"key\":\"commandList\",\"type\":\"String\",\"value\":\"apt-get,update\"},{\"comment\":\"\",\"customFlag\":2,\"enabled\":true,\"key\":\"type\",\"type\":\"String\",\"value\":\"\"}]", + "urlEncodedKeyValueListText": "commandList=apt-get,update\n&type=", + "urlParamsKeyValueListJson": "[]", + "urlParamsKeyValueListText": "" + }, + "tempId": "id_io.wdd.rpc.controller.ExecutionController.patchCommandToAgentAll", + "type": 2 +} \ No newline at end of file diff --git a/.fastRequest/collections/Root/server/SchedulerController/createScriptScheduler.rapi b/.fastRequest/collections/Root/server/SchedulerController/createScriptScheduler.rapi new file mode 100644 index 0000000..f702cd0 --- /dev/null +++ b/.fastRequest/collections/Root/server/SchedulerController/createScriptScheduler.rapi @@ -0,0 +1,37 @@ +{ + "activeGroup": "Default", + "apiParamGroup": {}, + "childList": [], + "description": "", + "directory": "~.fastRequest~collections~Root~server~SchedulerController", + "domain": "http://localhost:9999", + "enableEnv": "local", + "enableProject": "ProjectOctopus", + "filePath": "~.fastRequest~collections~Root~server~SchedulerController~createScriptScheduler.rapi", + "headerList": [], + "id": "api_io.wdd.rpc.controller.SchedulerController.createScriptScheduler", + "name": "新增一个定时脚本任务", + "paramGroup": { + "bodyKeyValueListJson": "{\n \"schedulerUuid\": \"\",\n \"name\": \"测试任务\",\n \"cronExpress\": \"30 */30 * * * ? *\",\n \"description\": \"这是注释内容\",\n \"scriptContent\": \"echo yes \\napt-get update \\necho no \\napt-get install nginx -y\",\n \"targetMachine\": \"Chengdu-amd64-98-98066f\",\n \"lastExecutionId\": null,\n \"lastExecutionResultKey\": \"\",\n \"lastExecutionStatus\": null ,\n \"createTime\": \"\",\n \"updateTime\": \"\",\n \"nextScheduleTime\": \"\",\n \"lastScheduleTime\": \"\"\n}", + "className": "io.wdd.rpc.controller.SchedulerController", + "jsonDocument": "{\n \"schedulerUuid\": \"No comment,Value =schedulerUuid_9dr3w\",\n \"name\": \"No comment,Value =name_ucmeh\",\n \"cronExpress\": \"No comment,Value =cronExpress_qbwqm\",\n \"description\": \"No comment,Value =description_drj0c\",\n \"scriptContent\": \"脚本任务的内容\",\n \"targetMachine\": \"执行目标机器agent_topic_name列表,使用, 分隔\",\n \"lastExecutionId\": \"与 execution_log表的主键对应,方便查询执行日志\",\n \"lastExecutionResultKey\": \"与 execution_log表的 result_key 对应,方便查询执行日志\",\n \"lastExecutionStatus\": \"任务上次执行状态\",\n \"createTime\": \"定时脚本任务创建时间\",\n \"updateTime\": \"上次更新时间\",\n \"nextScheduleTime\": \"任务下次计划执行时间\",\n \"lastScheduleTime\": \"任务上次计划执行时间\"\n}", + "method": "createScriptScheduler", + "methodDescription": "新增一个定时脚本任务", + "methodType": "POST", + "multipartKeyValueListJson": "[]", + "originUrl": "/octopus/server/scheduler/script/create", + "pathParamsKeyValueListJson": "[]", + "postScript": "", + "postType": "json", + "preScript": "", + "returnDocument": "{\n\t\"code\":\"No comment,Type =Number\",\n\t\"msg\":\"No comment,Type =String\",\n\t\"data\":{}\n}", + "tempId": "", + "url": "/octopus/server/scheduler/script/create", + "urlEncodedKeyValueListJson": "[]", + "urlEncodedKeyValueListText": "", + "urlParamsKeyValueListJson": "[]", + "urlParamsKeyValueListText": "" + }, + "tempId": "id_io.wdd.rpc.controller.SchedulerController.createScriptScheduler", + "type": 2 +} \ No newline at end of file diff --git a/.fastRequest/collections/Root/server/SchedulerController/directory.json b/.fastRequest/collections/Root/server/SchedulerController/directory.json new file mode 100644 index 0000000..7cc508f --- /dev/null +++ b/.fastRequest/collections/Root/server/SchedulerController/directory.json @@ -0,0 +1,8 @@ +{ + "description": "", + "directory": "~.fastRequest~collections~Root~server~SchedulerController", + "filePath": "~.fastRequest~collections~Root~server~SchedulerController~", + "id": "20230116173428298", + "name": "SchedulerController", + "type": 3 +} \ No newline at end of file diff --git a/.fastRequest/collections/Root/server/SchedulerController/queryAllQuartzJob.rapi b/.fastRequest/collections/Root/server/SchedulerController/queryAllQuartzJob.rapi new file mode 100644 index 0000000..80338e0 --- /dev/null +++ b/.fastRequest/collections/Root/server/SchedulerController/queryAllQuartzJob.rapi @@ -0,0 +1,37 @@ +{ + "activeGroup": "Default", + "apiParamGroup": {}, + "childList": [], + "description": "", + "directory": "~.fastRequest~collections~Root~server~SchedulerController", + "domain": "http://localhost:9999", + "enableEnv": "local", + "enableProject": "ProjectOctopus", + "filePath": "~.fastRequest~collections~Root~server~SchedulerController~queryAllQuartzJob.rapi", + "headerList": [], + "id": "api_io.wdd.rpc.controller.SchedulerController.queryAllQuartzJob", + "name": "查询所有job", + "paramGroup": { + "bodyKeyValueListJson": "", + "className": "io.wdd.rpc.controller.SchedulerController", + "jsonDocument": "", + "method": "queryAllQuartzJob", + "methodDescription": "查询所有job", + "methodType": "GET", + "multipartKeyValueListJson": "[]", + "originUrl": "/octopus/server/scheduler/queryAllJob", + "pathParamsKeyValueListJson": "[]", + "postScript": "", + "postType": "json", + "preScript": "", + "returnDocument": "{\n\t\"code\":\"No comment,Type =Number\",\n\t\"msg\":\"No comment,Type =String\",\n\t\"data\":{}\n}", + "tempId": "", + "url": "/octopus/server/scheduler/queryAllJob", + "urlEncodedKeyValueListJson": "[]", + "urlEncodedKeyValueListText": "", + "urlParamsKeyValueListJson": "[]", + "urlParamsKeyValueListText": "" + }, + "tempId": "id_io.wdd.rpc.controller.SchedulerController.queryAllQuartzJob", + "type": 2 +} \ No newline at end of file diff --git a/.fastRequest/collections/Root/server/SchedulerController/queryAllTriggers.rapi b/.fastRequest/collections/Root/server/SchedulerController/queryAllTriggers.rapi new file mode 100644 index 0000000..6858ff6 --- /dev/null +++ b/.fastRequest/collections/Root/server/SchedulerController/queryAllTriggers.rapi @@ -0,0 +1,37 @@ +{ + "activeGroup": "Default", + "apiParamGroup": {}, + "childList": [], + "description": "", + "directory": "~.fastRequest~collections~Root~server~SchedulerController", + "domain": "http://localhost:9999", + "enableEnv": "local", + "enableProject": "ProjectOctopus", + "filePath": "~.fastRequest~collections~Root~server~SchedulerController~queryAllTriggers.rapi", + "headerList": [], + "id": "api_io.wdd.rpc.controller.SchedulerController.queryAllTriggers", + "name": "查询所有的触发器Trigger", + "paramGroup": { + "bodyKeyValueListJson": "", + "className": "io.wdd.rpc.controller.SchedulerController", + "jsonDocument": "", + "method": "queryAllTriggers", + "methodDescription": "查询所有的触发器Trigger", + "methodType": "GET", + "multipartKeyValueListJson": "[]", + "originUrl": "/octopus/server/scheduler/allTriggers", + "pathParamsKeyValueListJson": "[]", + "postScript": "", + "postType": "json", + "preScript": "", + "returnDocument": "{\n\t\"code\":\"No comment,Type =Number\",\n\t\"msg\":\"No comment,Type =String\",\n\t\"data\":{}\n}", + "tempId": "", + "url": "/octopus/server/scheduler/allTriggers", + "urlEncodedKeyValueListJson": "[]", + "urlEncodedKeyValueListText": "", + "urlParamsKeyValueListJson": "[]", + "urlParamsKeyValueListText": "" + }, + "tempId": "id_io.wdd.rpc.controller.SchedulerController.queryAllTriggers", + "type": 2 +} \ No newline at end of file diff --git a/.fastRequest/collections/Root/server/SchedulerController/queryRunQuartzJob.rapi b/.fastRequest/collections/Root/server/SchedulerController/queryRunQuartzJob.rapi new file mode 100644 index 0000000..3a2d4ec --- /dev/null +++ b/.fastRequest/collections/Root/server/SchedulerController/queryRunQuartzJob.rapi @@ -0,0 +1,37 @@ +{ + "activeGroup": "Default", + "apiParamGroup": {}, + "childList": [], + "description": "", + "directory": "~.fastRequest~collections~Root~server~SchedulerController", + "domain": "http://localhost:9999", + "enableEnv": "local", + "enableProject": "ProjectOctopus", + "filePath": "~.fastRequest~collections~Root~server~SchedulerController~queryRunQuartzJob.rapi", + "headerList": [], + "id": "api_io.wdd.rpc.controller.SchedulerController.queryRunQuartzJob", + "name": "查询所有运行job", + "paramGroup": { + "bodyKeyValueListJson": "", + "className": "io.wdd.rpc.controller.SchedulerController", + "jsonDocument": "", + "method": "queryRunQuartzJob", + "methodDescription": "查询所有运行job", + "methodType": "POST", + "multipartKeyValueListJson": "[]", + "originUrl": "/octopus/server/scheduler/queryRunJob", + "pathParamsKeyValueListJson": "[]", + "postScript": "", + "postType": "json", + "preScript": "", + "returnDocument": "{\n\t\"code\":\"No comment,Type =Number\",\n\t\"msg\":\"No comment,Type =String\",\n\t\"data\":{}\n}", + "tempId": "", + "url": "/octopus/server/scheduler/queryRunJob", + "urlEncodedKeyValueListJson": "[]", + "urlEncodedKeyValueListText": "", + "urlParamsKeyValueListJson": "[]", + "urlParamsKeyValueListText": "" + }, + "tempId": "id_io.wdd.rpc.controller.SchedulerController.queryRunQuartzJob", + "type": 2 +} \ No newline at end of file diff --git a/.fastRequest/collections/Root/server/StatusController/GetHealthyStatusAgentList.rapi b/.fastRequest/collections/Root/server/StatusController/GetHealthyStatusAgentList.rapi new file mode 100644 index 0000000..0d673c6 --- /dev/null +++ b/.fastRequest/collections/Root/server/StatusController/GetHealthyStatusAgentList.rapi @@ -0,0 +1,37 @@ +{ + "activeGroup": "Default", + "apiParamGroup": {}, + "childList": [], + "description": "", + "directory": "~.fastRequest~collections~Root~server~StatusController", + "domain": "http://localhost:9999", + "enableEnv": "local", + "enableProject": "ProjectOctopus", + "filePath": "~.fastRequest~collections~Root~server~StatusController~GetHealthyStatusAgentList.rapi", + "headerList": [], + "id": "api_io.wdd.rpc.controller.StatusController.GetHealthyStatusAgentList", + "name": "[ 状态-Agent ] Map", + "paramGroup": { + "bodyKeyValueListJson": "", + "className": "io.wdd.rpc.controller.StatusController", + "jsonDocument": "", + "method": "GetHealthyStatusAgentList", + "methodDescription": "[ 状态-Agent ] Map", + "methodType": "GET", + "multipartKeyValueListJson": "[]", + "originUrl": "/octopus/server/status/status/agent", + "pathParamsKeyValueListJson": "[]", + "postScript": "", + "postType": "json", + "preScript": "", + "returnDocument": "{\n\t\"code\":\"No comment,Type =Number\",\n\t\"msg\":\"No comment,Type =String\",\n\t\"data\":{}\n}", + "tempId": "", + "url": "/octopus/server/status/status/agent", + "urlEncodedKeyValueListJson": "[]", + "urlEncodedKeyValueListText": "", + "urlParamsKeyValueListJson": "[]", + "urlParamsKeyValueListText": "" + }, + "tempId": "id_io.wdd.rpc.controller.StatusController.GetHealthyStatusAgentList", + "type": 2 +} \ No newline at end of file diff --git a/.fastRequest/collections/Root/server/StatusController/ManualUpdateAgentStatus.rapi b/.fastRequest/collections/Root/server/StatusController/ManualUpdateAgentStatus.rapi new file mode 100644 index 0000000..97f1b45 --- /dev/null +++ b/.fastRequest/collections/Root/server/StatusController/ManualUpdateAgentStatus.rapi @@ -0,0 +1,37 @@ +{ + "activeGroup": "Default", + "apiParamGroup": {}, + "childList": [], + "description": "", + "directory": "~.fastRequest~collections~Root~server~StatusController", + "domain": "http://localhost:9999", + "enableEnv": "local", + "enableProject": "ProjectOctopus", + "filePath": "~.fastRequest~collections~Root~server~StatusController~ManualUpdateAgentStatus.rapi", + "headerList": [], + "id": "api_io.wdd.rpc.controller.StatusController.ManualUpdateAgentStatus", + "name": "手动更新Agent的状态", + "paramGroup": { + "bodyKeyValueListJson": "", + "className": "io.wdd.rpc.controller.StatusController", + "jsonDocument": "", + "method": "ManualUpdateAgentStatus", + "methodDescription": "手动更新Agent的状态", + "methodType": "POST", + "multipartKeyValueListJson": "[]", + "originUrl": "/octopus/server/status/agent/status/update", + "pathParamsKeyValueListJson": "[]", + "postScript": "", + "postType": "json", + "preScript": "", + "returnDocument": "{\n\t\"code\":\"No comment,Type =Number\",\n\t\"msg\":\"No comment,Type =String\",\n\t\"data\":{}\n}", + "tempId": "", + "url": "/octopus/server/status/agent/status/update", + "urlEncodedKeyValueListJson": "[]", + "urlEncodedKeyValueListText": "", + "urlParamsKeyValueListJson": "[]", + "urlParamsKeyValueListText": "" + }, + "tempId": "id_io.wdd.rpc.controller.StatusController.ManualUpdateAgentStatus", + "type": 2 +} \ No newline at end of file diff --git a/.fastRequest/collections/Root/server/StatusController/directory.json b/.fastRequest/collections/Root/server/StatusController/directory.json new file mode 100644 index 0000000..29f0ea3 --- /dev/null +++ b/.fastRequest/collections/Root/server/StatusController/directory.json @@ -0,0 +1,8 @@ +{ + "description": "", + "directory": "~.fastRequest~collections~Root~server~StatusController", + "filePath": "~.fastRequest~collections~Root~server~StatusController~", + "id": "20230222092955736", + "name": "StatusController", + "type": 3 +} \ No newline at end of file diff --git a/.fastRequest/collections/Root/server/directory.json b/.fastRequest/collections/Root/server/directory.json new file mode 100644 index 0000000..ee09b3f --- /dev/null +++ b/.fastRequest/collections/Root/server/directory.json @@ -0,0 +1,8 @@ +{ + "description": "", + "directory": "~.fastRequest~collections~Root~server", + "filePath": "~.fastRequest~collections~Root~server~", + "id": "20221230113234995", + "name": "server", + "type": 4 +} \ No newline at end of file diff --git a/.fastRequest/config/fastRequestCurrentProjectConfig.json b/.fastRequest/config/fastRequestCurrentProjectConfig.json new file mode 100644 index 0000000..7d39a50 --- /dev/null +++ b/.fastRequest/config/fastRequestCurrentProjectConfig.json @@ -0,0 +1,37 @@ +{ + "dataList": [ + { + "hostGroup": [ + { + "env": "local", + "url": "localhost:9090" + } + ], + "name": "OctpusGO" + } + ], + "envList": [ + "local" + ], + "headerList": [], + "postScript": "", + "preScript": "", + "projectList": [ + "OctpusGO" + ], + "syncModel": { + "branch": "master", + "domain": "https://github.com", + "enabled": false, + "namingPolicy": "byDoc", + "owner": "", + "repo": "", + "repoUrl": "", + "syncAfterRun": false, + "token": "", + "type": "github" + }, + "urlEncodedKeyValueList": [], + "urlParamsKeyValueList": [], + "urlSuffix": "" +} \ No newline at end of file diff --git a/.github/workflows/build-push-docker.yml b/.github/workflows/build-push-docker.yml index 134c2ad..46cfcdc 100644 --- a/.github/workflows/build-push-docker.yml +++ b/.github/workflows/build-push-docker.yml @@ -28,7 +28,7 @@ jobs: with: java-version: '11' distribution: 'temurin' - server-id: github # Value of the distributionManagement/repository/id field of the pom.xml + server-id: github # Value of the distributionManagement/repository/id field of the pom.xml-back settings-path: ${{ github.workspace }} # location for the settings.xml file cache: maven diff --git a/.run/Agent-Dev-LapPro-Ubuntu.run.xml b/.run/Agent-Dev-LapPro-Ubuntu.run.xml deleted file mode 100644 index cc4b0c2..0000000 --- a/.run/Agent-Dev-LapPro-Ubuntu.run.xml +++ /dev/null @@ -1,41 +0,0 @@ - - - - \ No newline at end of file diff --git a/.run/Agent-dev-1.run.xml b/.run/Agent-dev-1.run.xml deleted file mode 100644 index cb65223..0000000 --- a/.run/Agent-dev-1.run.xml +++ /dev/null @@ -1,36 +0,0 @@ - - - - \ No newline at end of file diff --git a/.run/Agent-dev-oracle-s5.run.xml b/.run/Agent-dev-oracle-s5.run.xml deleted file mode 100644 index 98bef8c..0000000 --- a/.run/Agent-dev-oracle-s5.run.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - - \ No newline at end of file diff --git a/.run/RunServerToRemote.run.xml b/.run/RunServerToRemote.run.xml deleted file mode 100644 index 1005446..0000000 --- a/.run/RunServerToRemote.run.xml +++ /dev/null @@ -1,32 +0,0 @@ - - - - - - - - - - - - \ No newline at end of file diff --git a/.run/Server-dev.run.xml b/.run/Server-dev.run.xml deleted file mode 100644 index 15e77dc..0000000 --- a/.run/Server-dev.run.xml +++ /dev/null @@ -1,16 +0,0 @@ - - - - \ No newline at end of file diff --git a/.run/Server-k3s.run.xml b/.run/Server-k3s.run.xml deleted file mode 100644 index e47d4e0..0000000 --- a/.run/Server-k3s.run.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - \ No newline at end of file diff --git a/.run/SkipTest-Package.run.xml b/.run/SkipTest-Package.run.xml deleted file mode 100644 index ebfa812..0000000 --- a/.run/SkipTest-Package.run.xml +++ /dev/null @@ -1,31 +0,0 @@ - - - - - - - - \ No newline at end of file diff --git a/agent-go/AgentInitialization.go b/agent-go/AgentInitialization.go new file mode 100644 index 0000000..316590c --- /dev/null +++ b/agent-go/AgentInitialization.go @@ -0,0 +1,216 @@ +package main + +import ( + "agent-go/executor" + "agent-go/g" + "agent-go/rabbitmq" + "agent-go/register" + "encoding/json" + "fmt" + "gopkg.in/yaml.v3" + "io/ioutil" + "time" +) + +var omType = g.InitOmType +var P = g.G.P + +var AgentServerInfoCache = ®ister.AgentServerInfo{} + +func INIT() *register.AgentServerInfo { + + // 获取系统的环境变量 + agentServerInfo := parseAgentServerInfo() + + agentConfig := g.G.AgentConfig + + initToServerProp := &rabbitmq.ConnectProperty{ + ExchangeName: agentConfig.GetString("octopus.message.init_exchange"), + QueueName: agentConfig.GetString("octopus.message.init_to_server"), + ExchangeType: g.QueueDirect, + TopicKey: agentConfig.GetString("octopus.message.init_to_server_key"), + } + + initFromServerProp := &rabbitmq.ConnectProperty{ + ExchangeName: agentConfig.GetString("octopus.message.init_exchange"), + QueueName: agentConfig.GetString("octopus.message.init_from_server"), + ExchangeType: g.QueueDirect, + TopicKey: agentConfig.GetString("octopus.message.init_from_server_key"), + } + + // 建立RabbitMQ的连接 + initToServerQueue := &rabbitmq.RabbitQueue{ + RabbitProp: initToServerProp, + } + defer initToServerQueue.Close() + + // 建立连接 + initToServerQueue.Connect() + + // 组装OctopusMessage + var octopusMsg *rabbitmq.OctopusMessage + octopusMsg = octopusMsg.Build( + omType, + agentServerInfo, + ) + msgBytes, err := json.Marshal(octopusMsg) + if err != nil { + log.Error(fmt.Sprintf("octopus message convert to json is wrong! msg is => %v", octopusMsg)) + } + + // 发送OM至MQ中 + P.Submit( + func() { + for g.G.AgentHasRegister == false { + + log.Debug(fmt.Sprintf("Send init message to server! ==> %s", string(msgBytes))) + + //如果agent存活 而Server不存活 那么需要持续不断的向Server中发送消息 + initToServerQueue.Send( + msgBytes, + ) + // 休眠 + time.Sleep(10 * time.Minute) + + } + + }) + + // 监听初始化连接中的信息 + initFromServerQueue := &rabbitmq.RabbitQueue{ + RabbitProp: initFromServerProp, + } + //defer initFromServerQueue.Close() + + // 建立连接 + initFromServerQueue.Connect() + + // 建立运行时RabbitMQ连接 + handleInitMsgFromServer(initFromServerQueue, initToServerQueue, agentServerInfo) + + return agentServerInfo +} + +// handleInitMsgFromServer 处理从Server接收的 注册信息 +func handleInitMsgFromServer(initFromServerQueue *rabbitmq.RabbitQueue, initToServerQueue *rabbitmq.RabbitQueue, agentServerInfo *register.AgentServerInfo) { + + initOctopusMessageDeliveries := initFromServerQueue.Read(false) + + // 2023年6月19日 修复注册信息一直没有完全消费的问题 + findRealAgentTopicName := "" + + // 同步很多抢占注册的情况 + for delivery := range initOctopusMessageDeliveries { + + log.Debug(fmt.Sprintf("message received from server is %s", string(delivery.Body))) + + var initOctopusMsg *rabbitmq.OctopusMessage + err := json.Unmarshal(delivery.Body, &initOctopusMsg) + if err != nil { + log.Error(fmt.Sprintf("parse init message from server wroong, message is => %s ", + string(delivery.Body))) + } + + var serverInfo register.AgentServerInfo + + s, er := initOctopusMsg.Content.(string) + if !er { + log.ErrorF("convet to string error! => %v", er) + } + cc := json.Unmarshal([]byte(s), &serverInfo) + if cc != nil { + log.Error(fmt.Sprintf("parse init message from server wroong, message is => %v ", cc)) + } + serverName := serverInfo.ServerName + + // 处理OM信息 + if initOctopusMsg != nil && initOctopusMsg.Type == g.InitOmType && serverName == agentServerInfo.ServerName { + // 是本机的注册回复信息 + log.InfoF("OctopusMessage INIT from server is this agent !") + + // 手动确认信息 + delivery.Ack(false) + + // 修改系统参数 + g.G.AgentHasRegister = true + + // 保存真实的AgentTopicName + findRealAgentTopicName = serverInfo.TopicName + + // 手动关闭 注册队列的连接 + shutdownRegisterQueueConnection(initFromServerQueue, initToServerQueue) + + } else { + // 不是自身的 注册回复信息 -- 拒绝 2023年6月19日 此处存在错误! 会死循环Nack 导致异常 + log.Warn(fmt.Sprintf("OctopusMessage INIT from server not this agent ! => %v, ==>%s", initOctopusMsg, delivery.Body)) + delivery.Ack(false) + + // 需要休眠等待不再获取相应的信息 + time.Sleep(5 * time.Second) + } + } + + // 建立 运行时 RabbitMQ连接 + rabbitmq.BuildOMsgRuntimeConnectorQueue(findRealAgentTopicName) + +} + +// shutdownRegisterQueueConnection 关闭初始化连接的两个队列 +func shutdownRegisterQueueConnection(initFromServerQueue *rabbitmq.RabbitQueue, initToServerQueue *rabbitmq.RabbitQueue) { + + initFromServerQueue.Close() + initToServerQueue.Close() + + log.InfoF("Pretend to Shutdown register queue connection !") +} + +func parseAgentServerInfo() *register.AgentServerInfo { + + // 约定文件地址为 /etc/environment.d/octopus-agent.conf + // 目前使用 + var agentServerInfo *register.AgentServerInfo + //yamlFile, err := ioutil.ReadFile("C:\\Users\\wdd\\IdeaProjects\\ProjectOctopus\\agent-go\\server-env.yaml") + yamlFile, err := ioutil.ReadFile("server-env.yaml") + + if err != nil { + panic(fmt.Errorf("failed to read YAML file: %v", err)) + } + + err = yaml.Unmarshal(yamlFile, &agentServerInfo) + if err != nil { + panic(fmt.Errorf("failed to unmarshal YAML: %v", err)) + } + + jsonFormat, err := json.Marshal(agentServerInfo) + if err != nil { + log.Error(fmt.Sprintf("agent server info convert error ! agentserverinfo is %v", agentServerInfo)) + panic(err) + } + log.Info(fmt.Sprintf("agent server info is %v", string(jsonFormat))) + + // build a operator cache + BuildAgentOsOperator(agentServerInfo) + + return agentServerInfo +} + +func BuildAgentOsOperator(agentServerInfo *register.AgentServerInfo) { + + executor.AgentOsOperatorCache = &executor.AgentOsOperator{ + InstallCommandPrefix: []string{ + "apt-get", "install", "-y", + }, + RemoveCommandPrefix: []string{"apt", "remove", "-y"}, + CanAccessInternet: true, + IsOsTypeUbuntu: true, + IsAgentInnerWall: true, + AgentArch: "amd64", + AgentOSReleaseCode: "focal", + AgentServerInfo: agentServerInfo, + } + + // debug + marshal, _ := json.Marshal(executor.AgentOsOperatorCache) + log.DebugF("cached agent operator is %s", marshal) + +} diff --git a/agent-go/IRabbitSendWriter.go b/agent-go/IRabbitSendWriter.go deleted file mode 100644 index 11186ab..0000000 --- a/agent-go/IRabbitSendWriter.go +++ /dev/null @@ -1,11 +0,0 @@ -package main - -/*type RabbitSendWriter interface { - - Send(conn *RabbitMQConn, connProp *ConnectProperty, message []byte) - - Read(conn *RabbitMQConn, connProp *ConnectProperty, autoAck bool) <-chan amqp.Delivery - -} - -*/ diff --git a/agent-go/config/OctopusMessage.go b/agent-go/config/OctopusMessage.go deleted file mode 100644 index cef83cd..0000000 --- a/agent-go/config/OctopusMessage.go +++ /dev/null @@ -1,50 +0,0 @@ -package config - -import ( - "agent-go/utils" - "encoding/json" - "fmt" - "time" -) - -type OctopusMessage struct { - UUID string `json:"uuid"` - InitTime time.Time `json:"init_time" format:"2023-03-21 16:38:30"` - Type string `json:"type"` - Content interface{} `json:"content"` - Result interface{} `json:"result"` - ACTime time.Time `json:"ac_time" format:"2023-03-21 16:38:30"` -} - -type ExecutionMessage struct { - NeedResultReplay bool `json:"needResultReplay"` - DurationTask bool `json:"durationTask,default:false"` - Type string `json:"type"` - SingleLineCommand []string `json:"singleLineCommand"` - MultiLineCommand [][]string `json:"multiLineCommand"` - PipeLineCommand [][]string `json:"pipeLineCommand"` - ResultKey string `json:"resultKey"` -} - -// BuildOctopusMsg 生成OctopusMessage -func (m *OctopusMessage) BuildOctopusMsg(omType string, content interface{}) *OctopusMessage { - - // 当前时间 - curTimeString := utils.CurTimeString() - - // must write to string format, otherwise it's very hard to deserialize - - bytes, err := json.Marshal(content) - if err != nil { - fmt.Sprintf("OctopusMessage Build Error ! %v", err) - } - - return &OctopusMessage{ - UUID: curTimeString, - InitTime: time.Now(), - Type: omType, - Content: string(bytes), - Result: nil, - ACTime: time.Time{}, - } -} diff --git a/agent-go/executor/BaseFunction.go b/agent-go/executor/BaseFunction.go new file mode 100644 index 0000000..cf26c36 --- /dev/null +++ b/agent-go/executor/BaseFunction.go @@ -0,0 +1,780 @@ +package executor + +import ( + "agent-go/g" + "agent-go/register" + "fmt" + "strings" +) + +type BaseFunc interface { + Exec(baseFuncName string, funcArgs ...string) []string +} + +type AgentOsOperator struct { + InstallCommandPrefix []string `json:"install_command_prefix",comment:"apt-get install or yum install"` + + RemoveCommandPrefix []string `json:"remove_command_prefix",comment:"apt-get remove or yum remove"` + + CanAccessInternet bool `json:"can_access_internet",comment:"是否可以访问公网"` + + IsOsTypeUbuntu bool `json:"is_os_type_ubuntu",comment:"主机操作系统是否为ubuntu系列"` + + IsAgentInnerWall bool `json:"is_agent_inner_wall",comment:"主机是否身处国内"` + + AgentArch string `json:"agent_arch",comment:"主机的CPU架构,可选为amd64 arm64"` + + AgentOSReleaseCode string `json:"agent_os_release_code",comment:"主机操作系统的发行版代号, focal之类的"` + + AgentServerInfo *register.AgentServerInfo `json:"agent_server_info"` +} + +// Exec 执行基础功能函数 +func (op *AgentOsOperator) Exec(baseFuncName string, funcArgs ...string) []string { + + var multiLineCommand [][]string + + switch baseFuncName { + + case "shutdownFirewall": + multiLineCommand = op.shutdownFirewall() + break + case "modifyHostname": + multiLineCommand = op.modifyHostname(funcArgs) + break + case "enableSwap": + multiLineCommand = op.enableSwap() + break + case "disableSwap": + multiLineCommand = op.disableSwap() + break + case "installDocker": + multiLineCommand = op.installDocker(funcArgs) + break + case "removeDocker": + multiLineCommand = op.removeDocker() + break + case "removeDockerCompose": + multiLineCommand = op.removeDockerCompose() + break + case "installDockerCompose": + multiLineCommand = op.installDockerCompose() + break + case "modifyDockerConfig": + multiLineCommand = op.modifyDockerConfig(funcArgs) + break + case "installHelm": + multiLineCommand = op.installHelm() + break + case "installHarbor": + multiLineCommand = op.installHarbor() + break + case "chronyToPublicNTP": + multiLineCommand = op.chronyToPublicNTP() + break + case "chronyToMaster": + multiLineCommand = op.chronyToMaster(funcArgs) + break + case "installZSH": + multiLineCommand = op.installZSH() + break + case "modifySshPort": + multiLineCommand = op.modifySshPort(funcArgs) + break + case "openBBR": + multiLineCommand = op.openBBR() + break + default: + multiLineCommand = op.ok(funcArgs) + + } + + log.DebugF("multiLineCommand are => %v", multiLineCommand) + + var result []string + + // exec the command here + for _, singleLineCommand := range multiLineCommand { + result = append(result, AllOutputCommandExecutor(singleLineCommand)...) + + // debug usage + log.DebugF("exec result are => %v", result) + for _, logLine := range result { + fmt.Println(logLine) + } + + } + + // 归一化处理 + return result +} + +func (op *AgentOsOperator) shutdownFirewall() [][]string { + + shutdownFunc := [][]string{ + {"systemctl", "stop", "firewalld"}, + {"systemctl", "disable", "firewalld"}, + } + + if !op.IsOsTypeUbuntu { + shutdownFunc = append(shutdownFunc, + []string{ + "sed", + "-i", + "s/SELINUX=enforcing/SELINUX=disabled/g", + "/etc/selinux/config", + }, + ) + } + + return shutdownFunc +} + +func (op *AgentOsOperator) modifyHostname(args []string) [][]string { + + return [][]string{} +} + +func (op *AgentOsOperator) enableSwap() [][]string { + + enableSwapFunc := [][]string{ + { + "cp", + "-f", + "/etc/fstab_back", + "/etc/fstab", + }, + { + "cat", + "/etc/fstab", + }, + } + + return enableSwapFunc +} + +func (op *AgentOsOperator) disableSwap() [][]string { + + disableSwapFunc := [][]string{ + { + "swapoff", + "-a", + }, + { + "cp", + "-f", + "/etc/fstab", + "/etc/fstab_back", + }, + { + "sed", + "-i", + "/swap/d", + "/etc/fstab", + }, + } + + return disableSwapFunc +} + +func (op *AgentOsOperator) removeDocker() [][]string { + + removeDockerLine := append(op.RemoveCommandPrefix, []string{ + "docker-ce", + "docker.io", + "docker-ce-cli", + //"docker", + //"docker-common", + //"docker-latest", + //"docker-latest-logrotate", + //"docker-logrotate", + //"docker-selinux", + //"docker-engine-selinux", + //"docker-engine", + //"kubelet", + //"kubeadm", + //"kubectl", + //"docker-client", + //"docker-client-latest", + }...) + + removeDockerFunc := [][]string{ + removeDockerLine, + } + + return removeDockerFunc +} + +func (op *AgentOsOperator) installDocker(args []string) [][]string { + + // remove docker all staff + installDockerFunc := op.removeDocker() + + if op.IsOsTypeUbuntu { + // + installFirstLine := append(op.InstallCommandPrefix, []string{ + "apt-transport-https", + "ca-certificates", + "curl", + "gnupg-agent", + "software-properties-common", + }...) + + if op.IsAgentInnerWall { + // inner gfw + installDockerFunc = append(installDockerFunc, [][]string{ + installFirstLine, + { + "curl", + "-o", + "/usr/share/keyrings/docker-utsc.gpg", + "https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu/gpg", + }, + { + "apt-key", + "add", + "/usr/share/keyrings/docker-utsc.gpg", + }, + { + "add-apt-repository", + "deb [arch=" + op.AgentArch + "] https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu " + op.AgentOSReleaseCode + " stable", + }, + }...) + } else { + // outside world + installDockerFunc = append(installDockerFunc, [][]string{ + installFirstLine, + { + "curl", + "-o", + "/usr/share/keyrings/docker.gpg", + "https://download.docker.com/linux/ubuntu/gpg ", + }, + { + "apt-key", + "add", + "/usr/share/keyrings/docker.gpg", + }, + { + "add-apt-repository", + "deb [arch=" + op.AgentArch + "] https://download.docker.com/linux/ubuntu " + op.AgentOSReleaseCode + " stable", + }, + }...) + } + + // look for specific docker-version to install + installDockerFunc = append(installDockerFunc, []string{"apt-get", "update"}) + + var specificDockerVersion string + // hard code here 5:20.10.10~3-0~ubuntu-focal + if strings.HasPrefix(args[0], "19") { + specificDockerVersion = "5:19.03.15~3-0~ubuntu-" + op.AgentOSReleaseCode + } else { + specificDockerVersion = "5:20.10.10~3-0~ubuntu-" + op.AgentOSReleaseCode + } + + installDockerFunc = append(installDockerFunc, + append( + op.InstallCommandPrefix, + "docker-ce="+specificDockerVersion, + "docker-ce-cli="+specificDockerVersion, + "containerd.io", + "docker-compose-plugin", + ), + ) + + } else { + installFirstLine := append(op.InstallCommandPrefix, + []string{ + "yum-utils", + "device-mapper-persistent-data", + "lvm2", + }..., + ) + + if op.IsAgentInnerWall { + // inner gfw + installDockerFunc = append(installDockerFunc, [][]string{ + installFirstLine, + { + "yum-config-manager", + "--add-repo", + "https://mirrors.ustc.edu.cn/docker-ce/linux/centos/docker-ce.repo", + }, + { + "sed ", + "-i ", + "'s/download.docker.com/mirrors.ustc.edu.cn\\/docker-ce/g' ", + "/etc/yum.repos.d/docker-ce.repo", + }, + {}, + }...) + } else { + // outside world + } + + } + + return installDockerFunc +} + +func (op *AgentOsOperator) removeDockerCompose() [][]string { + + installDockerComposeFunc := [][]string{ + append( + op.RemoveCommandPrefix, + "docker-compose", + ), + } + + return installDockerComposeFunc +} + +func (op *AgentOsOperator) installDockerCompose() [][]string { + + installDockerComposeFunc := [][]string{ + append( + op.InstallCommandPrefix, + "docker-compose", + ), + } + + return installDockerComposeFunc +} + +func (op *AgentOsOperator) installHelm() [][]string { + installHelmFunc := [][]string{ + { + "mkdir", + "-p", + "/root/wdd/", + }, + { + "rm", + "-rf", + "/root/wdd/helm-v*", + }, + { + "rm", + "-rf", + "/root/wdd/linux-amd64", + }, + { + "wget", + "--no-check-certificate", + g.BaseFuncOssUrlPrefix + "helm-v3.12.1-linux-amd64.tar.gz", + "-O", + "/root/wdd/helm-v3.12.1-linux-amd64.tar.gz", + }, + { + "tar", + "-zvxf", + "/root/wdd/helm-v3.12.1-linux-amd64.tar.gz", + }, + { + "chmod", + "+x", + "/root/wdd/linux-amd64/helm", + }, + { + "mv", + "/root/wdd/linux-amd64/helm", + "/usr/local/bin/helm", + }, + { + "helm", + "version", + }, + } + + /*if op.IsOsTypeUbuntu { + installHelmFunc = [][]string{ + { + "curl", + "-o", + "/usr/share/keyrings/helm.gpg", + "https://baltocdn.com/helm/signing.asc", + }, + { + "apt-key", + "add", + "/usr/share/keyrings/helm.gpg", + }, + { + "add-apt-repository", + "https://baltocdn.com/helm/stable/debian/ all main", + }, + { + "apt-get", + "update", + }, + append(op.InstallCommandPrefix, "helm"), + } + } else { + log.ErrorF("Operation OS is CentOS, Helm not installed!") + }*/ + + return installHelmFunc +} + +func (op *AgentOsOperator) modifyDockerConfig(args []string) [][]string { + + harborIPAddr := args[0] + ":8033" + + modifyDockerConfigFunc := [][]string{ + { + "mv", + "/etc/docker/daemon.json", + "/etc/docker/daemon.backup.json", + }, + { + "wget", + g.BaseFuncOssUrlPrefix + "daemon-config.json", + "-O", + "/etc/docker/daemon.json", + }, + { + "sed", + "-i", + "s/$DockerRegisterDomain/" + harborIPAddr + "/g", + "/etc/docker/daemon.json", + }, + { + "systemctl", + "restart", + "docker.service", + }, + } + + return modifyDockerConfigFunc +} + +func (op *AgentOsOperator) installHarbor() [][]string { + + installHarborFunc := [][]string{ + //{ + // "mkdir", + // "-p", + // "/root/wdd/", + //}, + //{ + // "rm", + // "-rf", + // "/root/wdd/harbor-offline-installer-v2.1.0.tgz", + //}, + //{ + // "wget", + // "--no-check-certificate", + // g.BaseFuncOssUrlPrefix + "harbor-offline-installer-v2.1.0.tgz", + // "-O", + // "/root/wdd/harbor-offline-installer-v2.1.0.tgz", + //}, + { + "tar", + "-zvxf", + "/root/wdd/harbor-offline-installer-v2.1.0.tgz", + "-C", + "/root/wdd/", + }, + { + "rm", + "-rf", + "/root/wdd/harbor/harbor.yml", + }, + { + "wget", + "--no-check-certificate", + g.BaseFuncOssUrlPrefix + "harbor-config-template.yml", + "-O", + "/root/wdd/harbor/harbor.yml", + }, + { + "sed", + "-i", + "s/$HarborHostName/" + op.AgentServerInfo.ServerIPInV4 + "/g", + "/root/wdd/harbor/harbor.yml", + }, + { + "sed", + "-i", + "s/$HarborHostPort/8033/g", + "/root/wdd/harbor/harbor.yml", + }, + { + "sed", + "-i", + "s/$HarborHostPort/V2ryStr@ngPss/g", + "/root/wdd/harbor/harbor.yml", + }, + { + "/root/wdd/harbor/install.sh", + "--with-chartmuseum", + }, + } + + return installHarborFunc +} + +func (op *AgentOsOperator) chronyToPublicNTP() [][]string { + + serverIPInV4 := op.AgentServerInfo.ServerIPInV4 + internalIPCIDR := strings.Join(strings.Split(serverIPInV4, ".")[:2], ".") + ".0.0/16" + + chronyToPublicNTPFunc := [][]string{ + append( + op.InstallCommandPrefix, + "chrony", + ), + { + "systemctl", + "enable", + "chronyd", + }, + { + "systemctl", + "start", + "chronyd", + }, + } + + var chronyFile string + if op.IsOsTypeUbuntu { + chronyFile = "/etc/chrony/chrony.conf" + } else { + chronyFile = "/etc/chrony.conf" + } + + chronyToPublicNTPFunc = append(chronyToPublicNTPFunc, + [][]string{ + { + "sed", + "-i", + "$ a allow " + internalIPCIDR, + chronyFile, + }, + { + "sed", + "-i", + "s/pool ntp.ubuntu.com iburst/server ntp2.aliyun.com iburst/g", + chronyFile, + }, + { + "systemctl", + "restart", + "chronyd", + }, + { + "sleep", + "2", + }, + { + "chronyc", + "-n", + "sources", + "-v", + }, + { + "chronyc", + "tracking", + }, + { + "timedatectl", + "set-timezone", + "Asia/Shanghai", + }, + { + "timedatectl", + "set-ntp", + "true", + }, + { + "systemctl", + "restart", + "rsyslog", + }, + }..., + ) + + return chronyToPublicNTPFunc +} + +func (op *AgentOsOperator) chronyToMaster(args []string) [][]string { + masterInnerIP := args[0] + + chronyToMasterFunc := [][]string{ + { + "sed", + "-i", + "$ a NTP=" + masterInnerIP, + "/etc/systemd/timesyncd.conf", + }, + { + "systemctl", + "daemon-reload", + }, + { + "systemctl", + "restart", + "systemd-timesyncd.service", + }, + { + "sleep", + "3", + }, + { + "timedatectl", + "show-timesync", + "--all", + }, + { + "timedatectl", + "status", + }, + } + + return chronyToMasterFunc +} + +func (op *AgentOsOperator) installZSH() [][]string { + + installZSHFunc := [][]string{ + { + "mkdir", + "-p", + "/root/wdd/", + }, + append( + op.InstallCommandPrefix, + "zsh", + "git", + ), + } + + if op.IsAgentInnerWall { + installZSHFunc = append( + installZSHFunc, + [][]string{ + { + "wget", + "https://cdn.jsdelivr.net/gh/robbyrussell/oh-my-zsh@master/tools/install.sh", + "-O", + "/root/wdd/zsh-install.sh", + }, + }..., + ) + + } else { + installZSHFunc = append( + installZSHFunc, + [][]string{ + { + "wget", + "https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh", + "-O", + "/root/wdd/zsh-install.sh", + }, + }..., + ) + } + + // install + installZSHFunc = append( + installZSHFunc, + [][]string{ + { + "chmod", + "+x", + "/root/wdd/zsh-install.sh", + }, + { + "sh", + "-c", + "/root/wdd/zsh-install.sh", + }, + }..., + ) + + // modify ZSH + if !op.IsAgentInnerWall { + installZSHFunc = append( + installZSHFunc, + [][]string{ + { + "git", + "clone", + "https://github.com.cnpmjs.org/zsh-users/zsh-autosuggestions", + "~/.oh-my-zsh/plugins/zsh-autosuggestions", + }, + { + "git", + "clone", + "https://github.com.cnpmjs.org/zsh-users/zsh-syntax-highlighting.git", + "~/.oh-my-zsh/plugins/zsh-syntax-highlighting", + }, + { + "wget", + "https://b2.107421.xyz/oh-my-zsh-plugins-list.txt", + "-O", + "oh-my-zsh-plugins-list.txt", + }, + { + "wget", + "-c", + "-i", + "./oh-my-zsh-plugins-list.txt", + "-P", + "~/.oh-my-zsh/plugins/", + }, + { + "sed", + "-i", + "s/robbyrussell/agnoster/g", + "~/.zshrc", + }, + { + "sed", + "-i", + "s/^# DISABLE_AUTO_UPDATE=\"true\"/DISABLE_AUTO_UPDATE=\"true\"/g", + "~/.zshrc", + }, + { + "sed", + "-i", + "s/plugins=(git)/plugins=(git zsh-autosuggestions zsh-syntax-highlighting command-not-found z themes)/g", + "~/.zshrc", + }, + { + "source", + "~/.zshrc", + }, + { + "chsh", + "-s", + "/bin/zsh", + }, + { + "zsh", + }, + }..., + ) + } + + return installZSHFunc +} + +func (op *AgentOsOperator) modifySshPort(args []string) [][]string { + + return [][]string{} +} + +func (op *AgentOsOperator) openBBR() [][]string { + + return [][]string{} +} + +func (op *AgentOsOperator) ok(args []string) [][]string { + log.InfoF("base function is ok , args are => " + strings.Join(args, " ")) + return [][]string{ + {"ifconfig"}, + } +} diff --git a/agent-go/executor/BaseFunction_test.go b/agent-go/executor/BaseFunction_test.go new file mode 100644 index 0000000..358cdaa --- /dev/null +++ b/agent-go/executor/BaseFunction_test.go @@ -0,0 +1,60 @@ +package executor + +import ( + "agent-go/register" + "testing" +) + +var agentOP = &AgentOsOperator{ + InstallCommandPrefix: []string{ + "apt-get", "install", "-y", + }, + RemoveCommandPrefix: []string{"apt", "remove", "-y"}, + CanAccessInternet: true, + IsOsTypeUbuntu: true, + IsAgentInnerWall: true, + AgentArch: "amd64", + AgentOSReleaseCode: "focal", + AgentServerInfo: ®ister.AgentServerInfo{ + ServerName: "", + ServerIPPbV4: "", + ServerIPInV4: "192.168.0.8", + ServerIPPbV6: "", + ServerIPInV6: "", + Location: "", + Provider: "", + ManagePort: "", + CPUCore: "", + CPUBrand: "", + OSInfo: "", + OSKernelInfo: "", + TCPControl: "", + Virtualization: "", + IoSpeed: "", + MemoryTotal: "", + DiskTotal: "", + DiskUsage: "", + Comment: "", + MachineID: "", + AgentVersion: "", + TopicName: "", + }, +} + +func TestBaseFunc(t *testing.T) { + + //agentOP.Exec("shutdownFirewall") + //agentOP.Exec("modifyHostname") + //agentOP.Exec("disableSwap") + //agentOP.Exec("enableSwap") + //agentOP.Exec("removeDocker") + agentOP.Exec("installDocker", "20") + //agentOP.Exec("removeDockerCompose") + //agentOP.Exec("installDockerCompose") + //agentOP.Exec("installHelm") + //agentOP.Exec("installHarbor") + //agentOP.Exec("chronyToPublicNTP") + //agentOP.Exec("chronyToMaster", "192.168.0.8") + //agentOP.Exec("installZSH") + +} diff --git a/agent-go/executor/CommandExecutor.go b/agent-go/executor/CommandExecutor.go index 076ffe6..727198c 100644 --- a/agent-go/executor/CommandExecutor.go +++ b/agent-go/executor/CommandExecutor.go @@ -1,31 +1,56 @@ package executor import ( - "agent-go/config" - "agent-go/g" + logger2 "agent-go/logger" "bufio" "bytes" "fmt" "os/exec" - "time" + "strings" ) -var log = g.G.LOG +type ExecutionMessage struct { + NeedResultReplay bool `json:"needResultReplay"` + DurationTask bool `json:"durationTask,default:false"` + Type string `json:"type"` + BaseFuncContent []string `json:"baseFuncContent"` + SingleLineCommand []string `json:"singleLineCommand"` + MultiLineCommand [][]string `json:"multiLineCommand"` + PipeLineCommand [][]string `json:"pipeLineCommand"` + ResultKey string `json:"resultKey"` +} -func Execute(om *config.OctopusMessage, em *config.ExecutionMessage) ([]string, error) { +var log = logger2.Log + +var AgentOsOperatorCache = &AgentOsOperator{} + +func Execute(em *ExecutionMessage) ([]string, error) { var resultLog []string var err error + var realCommand [][]string + + if strings.HasPrefix(em.Type, "BASE") { + // base function + resultLog = AgentOsOperatorCache.Exec(em.BaseFuncContent[0], em.BaseFuncContent[1:]...) + err = nil - if em.PipeLineCommand != nil && len(em.PipeLineCommand) != 0 { - // 管道命令 - resultLog, err = PipeLineCommandExecutor(em.PipeLineCommand) - } else if em.MultiLineCommand != nil && len(em.MultiLineCommand) != 0 { - // 多行命令 - resultLog, err = MultiLineCommandExecutor(em.MultiLineCommand) } else { - // 单行命令 - resultLog, err = SingleLineCommandExecutor(em.SingleLineCommand) + // shell command + + if em.PipeLineCommand != nil && len(em.PipeLineCommand) != 0 { + // 管道命令 + resultLog, err = PipeLineCommandExecutor(em.PipeLineCommand) + realCommand = em.PipeLineCommand + } else if em.MultiLineCommand != nil && len(em.MultiLineCommand) != 0 { + // 多行命令 + resultLog, err = MultiLineCommandExecutor(em.MultiLineCommand) + realCommand = em.MultiLineCommand + } else { + // 单行命令 + resultLog, err = SingleLineCommandExecutor(em.SingleLineCommand) + realCommand = [][]string{em.SingleLineCommand} + } } // 归一化错误和日志 @@ -33,69 +58,40 @@ func Execute(om *config.OctopusMessage, em *config.ExecutionMessage) ([]string, resultLog = append(resultLog, fmt.Sprintf("Error: %s", err.Error())) } - // 处理执行日志 - // 是否需要返回处理日志,现在默认返回 - if em.NeedResultReplay { - // 需要返回处理结果 - om.ACTime = time.Now() - om.Result = resultLog - } + commandResult := fmt.Sprintf("Excution Comand are=> %v, Executor Result: %v", realCommand, resultLog) - log.Info(fmt.Sprintf("Executor Result: %s", resultLog)) + log.Info(commandResult) return resultLog, err } func PipeLineCommandExecutor(pipeLineCommand [][]string) ([]string, error) { - var cmds []*exec.Cmd - - // 创建每个命令对象,并将前一个命令的标准输出连接到当前命令的标准输入 - for i, partOfCommand := range pipeLineCommand { - cmd := exec.Command(partOfCommand[0], partOfCommand[1:]...) - if i > 0 { - prevCmd := cmds[i-1] - out, err := prevCmd.StdoutPipe() - if err != nil { - return nil, err - } - cmd.Stdin = out + var output []byte + var err error + for i, command := range pipeLineCommand { + cmd := exec.Command(command[0], command[1:]...) + cmd.Stdin = bytes.NewReader(output) + output, err = cmd.Output() + if err != nil { + return strings.Split(string(output), "\n"), err + } + if i == len(pipeLineCommand)-1 { + return strings.Split(string(output), "\n"), nil } - cmds = append(cmds, cmd) } - - // 执行最后一个命令,并获取其输出 - lastCmd := cmds[len(cmds)-1] - - var out bytes.Buffer - - lastCmd.Stdout = &out - lastCmd.Stderr = &out - err := lastCmd.Run() - - scanner := bufio.NewScanner(&out) - var result []string - for scanner.Scan() { - result = append(result, scanner.Text()) - } - - if err != nil { - return nil, err - } - - return result, nil + return []string{}, nil } func MultiLineCommandExecutor(multiLineCommandExecutor [][]string) ([]string, error) { var res []string - for _, singleLineCommand := range multiLineCommandExecutor { - singleLogs, err := SingleLineCommandExecutor(singleLineCommand) - res := append(res, singleLogs...) + res = append(res, singleLogs...) if err != nil { + log.Error(fmt.Sprintf("Execution error ! command is %v, error is %v", singleLineCommand, err)) return res, err } @@ -118,7 +114,6 @@ func SingleLineCommandExecutor(singleLineCommand []string) ([]string, error) { var result []string for scanner.Scan() { result = append(result, scanner.Text()) - } if err != nil { diff --git a/agent-go/executor/RealTimeExecutor.go b/agent-go/executor/RealTimeExecutor.go new file mode 100644 index 0000000..805346f --- /dev/null +++ b/agent-go/executor/RealTimeExecutor.go @@ -0,0 +1,81 @@ +package executor + +import ( + "bufio" + "fmt" + "io" + "os/exec" +) + +func ReadTimeCommandExecutor(singleLineCommand []string) { + cmd := exec.Command(singleLineCommand[0], singleLineCommand[1:]...) + stdout, err := cmd.StdoutPipe() + if err != nil { + log.ErrorF("command %v stdout error => %v", singleLineCommand, err) + } + stderr, err := cmd.StderrPipe() + if err != nil { + log.ErrorF("command %v stderr error => %v", singleLineCommand, err) + } + + if err := cmd.Start(); err != nil { + log.ErrorF("command %v runtime error => %v", singleLineCommand, err) + } + + go realTimeOutput(stdout) + go realTimeOutput(stderr) + + if err := cmd.Wait(); err != nil { + log.ErrorF("command %v result error => %v", singleLineCommand, err) + } + +} + +func AllOutputCommandExecutor(singleLineCommand []string) []string { + + cmd := exec.Command(singleLineCommand[0], singleLineCommand[1:]...) + stdout, err := cmd.StdoutPipe() + if err != nil { + log.ErrorF("command %v stdout error => %v", singleLineCommand, err) + } + stderr, err := cmd.StderrPipe() + if err != nil { + log.ErrorF("command %v stderr error => %v", singleLineCommand, err) + } + + if err := cmd.Start(); err != nil { + log.ErrorF("command %v runtime error => %v", singleLineCommand, err) + } + + var resultSlice []string + resultSlice = append(resultSlice, collectOutput(stdout, resultSlice)...) + resultSlice = append(resultSlice, collectOutput(stderr, resultSlice)...) + + if err := cmd.Wait(); err != nil { + log.ErrorF("command %v result error => %v", singleLineCommand, err) + } + + //log.DebugF("real time exec result are %v", resultSlice) + + return resultSlice +} + +func realTimeOutput(r io.Reader) { + scanner := bufio.NewScanner(r) + for scanner.Scan() { + fmt.Println(scanner.Text()) + } +} + +func collectOutput(r io.Reader, resultSlice []string) []string { + scanner := bufio.NewScanner(r) + for scanner.Scan() { + resultLine := scanner.Text() + + resultSlice = append(resultSlice, resultLine) + // debug usage + //fmt.Println(resultLine) + } + + return resultSlice +} diff --git a/agent-go/executor/RealTimeExecutor_test.go b/agent-go/executor/RealTimeExecutor_test.go new file mode 100644 index 0000000..e08621a --- /dev/null +++ b/agent-go/executor/RealTimeExecutor_test.go @@ -0,0 +1,13 @@ +package executor + +import "testing" + +func TestReadTimeOutput(t *testing.T) { + strings := []string{ + "/bin/bash", + "/root/IdeaProjects/ProjectOctopus/agent-go/tmp/simple.sh", + } + + AllOutputCommandExecutor(strings) + +} diff --git a/agent-go/executor/script/1_node_important.sh b/agent-go/executor/script/1_node_important.sh new file mode 100644 index 0000000..47335f6 --- /dev/null +++ b/agent-go/executor/script/1_node_important.sh @@ -0,0 +1,2016 @@ +#!/bin/bash +# bashsupport disable=BP2001 + +# 本脚本可以一键初始化Linux服务器的环境 +# 为rancher的自动部署k8s做准备 + +# 确定在哪个节点上运行此脚本,请按照如下的的顺序运行 +# HarborServer +# WorkerServer +# MinioServer +# GlusterServer (Storage-2上执行) +# HeketiServer (Storage-1上执行) + +### 需要修改以下的内容 ### +### 需要修改以下的内容 ### +### 需要修改以下的内容 ### + +WhichNodeRun=main +DOCKER_VERSION=20.10.15 # docker的版本,一般不修改 +HarborHostName=192.168.0.8 # 需要确保此IP能够被内网服务器访问,一般为公网服务器的外网IP +HarborHostPort=8033 # harbor服务器的端口,一般不修改 +HarborAdminPass=V2ryStr@ngPss # harbor服务器管理员密码,一般不修改 +PublicServerIPs=(192.168.0.8) # 需要修改为能够访问公网的服务器IP, 内网IP地址 +PrivateServerIPs=(192.168.0.65 192.168.0.45 192.168.0.7) # 内网服务器的IP地址,不包括可以访问公网IP的服务器 +StorageServerIPs=(192.168.0.2) # 存储服务器的IP地址,默认在第一台上安装heketi,默认第一台作为NFS服务端 +NfsPath=/nfsdata # nfs路径,需要事先创建并进行文件夹赋权 +MinioStorageType=volume # 选项:volume 或者 pv Minio集群底层的存储方式,使用4PV还是使用 1PV-4Volume的形式 +OSSPublicURL=https://oss2.demo.uavcmlc.com:18000/wangdada/ # 可以不配置,会从DockerHub拉取所有镜像(3GB) + +### 需要修改以上的内容 ### +### 需要修改以上的内容 ### +### 需要修改以上的内容 ### + +CMD_INSTALL="" +CMD_UPDATE="" +CMD_REMOVE="" +SOFTWARE_UPDATED=0 +LinuxReleaseVersion="" +DockerRegisterDomain=${HarborHostName}:${HarborHostPort} +ProxyOfflineFile=${OSSPublicURL}"proxy-offline.tar.gz" +HarborOfflineFile=${OSSPublicURL}"harbor-offline-installer-v2.1.0.tgz" # 可以不配置,会从GitHub拉取Harbor离线安装包(550MB) +DockerComposeFile=${OSSPublicURL}"docker-compose-Linux-x86_64" +HelmOfflineFile=${OSSPublicURL}"helm-v3.4.0-linux-amd64.tar.gz" +NginxOfflineFile=${OSSPublicURL}"nginx-1.20.1-1.el7.ngx.x86_64.rpm" +HeketiOfficeFile=${OSSPublicURL}"heketi-v9.0.0.linux.amd64.tar.gz" +HeketiConfigOfflineFile=${OSSPublicURL}"heketi-config.tar.gz" + +RED="31m" ## 姨妈红 +GREEN="32m" ## 水鸭青 +YELLOW="33m" ## 鸭屎黄 +PURPLE="35m" ## 基佬紫 +BLUE="36m" ## 天依蓝 + +######## 颜色函数方法很精妙 ############ +######## 颜色函数方法很精妙 ############ +colorEcho() { + # shellcheck disable=SC2145 + echo -e "\033[${1}${@:2}\033[0m" 1>&2 +} + +check_root() { + if [[ $EUID != 0 ]]; then + colorEcho ${RED} "当前非root账号(或没有root权限),无法继续操作,请更换root账号!" + colorEcho ${YELLOW} "使用sudo -命令获取临时root权限(执行后可能会提示输入root密码)" + exit 1 + fi +} + +# 判断命令是否存在 +command_exists() { + command -v "$@" >/dev/null 2>&1 +} + +####### 获取系统版本及64位或32位信息 +check_sys() { + ## 判定Linux的发行版本 + if [ -f /etc/redhat-release ]; then + LinuxReleaseVersion="centos" + elif cat /etc/issue | grep -Eqi "debian"; then + LinuxReleaseVersion="debian" + elif cat /etc/issue | grep -Eqi "ubuntu"; then + LinuxReleaseVersion="ubuntu" + elif cat /etc/issue | grep -Eqi "centos|red hat|redhat"; then + LinuxReleaseVersion="centos" + elif cat /proc/version | grep -Eqi "debian"; then + LinuxReleaseVersion="debian" + elif cat /proc/version | grep -Eqi "ubuntu"; then + LinuxReleaseVersion="ubuntu" + elif cat /proc/version | grep -Eqi "centos|red hat|redhat"; then + LinuxReleaseVersion="centos" + else + LinuxReleaseVersion="" + fi + + # 判断系统的包管理工具 apt, yum, or zypper + getPackageManageTool() { + if [[ -n $(command -v apt-get) ]]; then + CMD_INSTALL="apt-get -y -qq install" + CMD_UPDATE="apt-get -qq update" + CMD_REMOVE="apt-get -y remove" + elif [[ -n $(command -v yum) ]]; then + CMD_INSTALL="yum -y -q install" + CMD_UPDATE="yum -q makecache" + CMD_REMOVE="yum -y remove" + elif [[ -n $(command -v zypper) ]]; then + CMD_INSTALL="zypper -y install" + CMD_UPDATE="zypper ref" + CMD_REMOVE="zypper -y remove" + else + return 1 + fi + return 0 + } + + # 检查系统包管理方式,更新包 + getPackageManageTool + if [[ $? -eq 1 ]]; then + colorEcho ${RED} "系统的包管理不是 APT or YUM, 请手动安装所需要的软件." + return 1 + fi + + ### 更新程序引索 + if [[ $SOFTWARE_UPDATED -eq 0 ]]; then + colorEcho ${BLUE} "正在更新软件包管理..." + $CMD_UPDATE + SOFTWARE_UPDATED=1 + fi + return 0 +} + +## 安装所需要的程序,及依赖程序 +installDemandSoftwares() { + for software in "$@"; do + ## 安装该软件 + if [[ -n $(command -v "${software}") ]]; then + colorEcho ${GREEN} "${software}已经安装了...跳过..." + echo "" + else + colorEcho ${BLUE} "正在安装 ${software}..." + $CMD_INSTALL "${software}" + ## 判断该软件是否安装成功 + if [[ $? -ne 0 ]]; then + colorEcho ${RED} "安装 ${software} 失败。" + colorEcho ${RED} "如果是重要软件,本脚本会自动终止!!" + colorEcho ${PURPLE} "一般软件,本脚本会忽略错误并继续运行,请之后手动安装该程序。" + return 1 + else + colorEcho ${GREEN} "已经成功安装 ${software}." + colorEcho ${GREEN} "-----------------------------------------------------------" + echo "" + fi + fi + done + return 0 +} + +shutdownFirewall() { + ## 关闭防火墙、SElinux、Swap + colorEcho ${PURPLE} "---------------------------------------------------------------------------------" + colorEcho ${BLUE} "开始关闭系统的防火墙…………" + systemctl stop firewalld + systemctl disable firewalld + echo "" + + if [ "${LinuxReleaseVersion}" = "centos" ]; then + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion}!!" + colorEcho ${GREEN} "--------------------------------------------------" + colorEcho ${BLUE} "开始关闭SELinux……" + setenforce 0 + sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config + colorEcho ${GREEN} " SELinux关闭完成 " + else + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion}!!" + colorEcho ${GREEN} "无需关闭SELinux,现在 跳过" + fi + echo "" +} + +disableSwap() { + colorEcho ${PURPLE} "---------------------------------------------------------------------------------" + colorEcho ${BLUE} "开始关闭系统的虚拟内存…………" + swapoff -a + colorEcho ${GREEN} " 关闭完成 " + echo "" + colorEcho ${BLUE} "正在备份系统的文件系统表……" + cp -f /etc/fstab /etc/fstab_bak + colorEcho ${GREEN} " 备份完成 " + echo "" + colorEcho ${BLUE} "正在修改文件系统表,去除虚拟内存的部分……" + cat /etc/fstab_bak | grep -v swap >/etc/fstab + colorEcho ${GREEN} " 修改完成 " + echo "" + colorEcho ${GREEN} "----------------------------------------------------------------------------------" + echo "" +} + +## 安装docker时,修改系统的配置文件 +modifySystemConfig_Docker() { + colorEcho ${PURPLE} "---------------------------------------------------------------------------------" + colorEcho ${BLUE} "开始修改系统内核参数…………" + ## 配置内核参数 + cat >/etc/sysctl.d/k8s.conf <>~/.ssh/authorized_keys + chmod 600 ~/.ssh/authorized_keys + colorEcho ${GREEN} "--------------------------------------------------------------" + colorEcho ${GREEN} "-----------本机配置完成!-------------" + echo "" + + # bashsupport disable=BP2001 + AllInnerServer=("${PrivateServerIPs[@]}" "${StorageServerIPs[@]}") + + for ip in "${AllInnerServer[@]}"; do + colorEcho ${BLUE} "请手动将如下的命令,以 root 权限在主机 ${ip} 上运行" + colorEcho ${BLUE} "请手动将如下的命令,以 root 权限在主机 ${ip} 上运行" + colorEcho ${BLUE} "请手动将如下的命令,以 root 权限在主机 ${ip} 上运行" + colorEcho ${BLUE} "-----------------------------------------------" + echo "" + echo "" + colorEcho ${RED} " 请以 root 角色 运行!!! " + colorEcho ${RED} " 请以 root 角色 运行!!! " + colorEcho ${RED} " 请以 root 角色 运行!!! " + echo "" + colorEcho ${YELLOW} 'sed -i -e "/PermitRootLogin no/ d" -e "$ a PermitRootLogin yes" /etc/ssh/sshd_config && systemctl restart sshd' + echo "" + colorEcho ${YELLOW} "ssh-keygen -t rsa -P \"\" -f ~/.ssh/id_rsa && echo \"$(cat ~/.ssh/id_rsa.pub)\" >> ~/.ssh/authorized_keys && echo \"\" && cat ~/.ssh/authorized_keys" + echo "" + echo "" + while true; do + colorEcho ${RED} "请确保您已经将上述的命令在主机${ip}上执行了!!" + read -r -p "请输入yes进行确认,脚本才可继续运行!!" input + case $input in + yes) + colorEcho ${GREEN} "您已确认在主机${ip}上添加了私有的ssh key!" + echo "" + break + ;; + *) + echo "" + colorEcho ${RED} "输入有误!!! 请输入 >> yes << 进行确认" + colorEcho ${RED} "请在主机${ip}上执行上述命令!!!" + colorEcho ${RED} "否则本脚本的功能会失效!!" + colorEcho ${RED} "-----------------------------------------------------" + echo "" + ;; + esac + done + + colorEcho ${GREEN} "----------------------------------------------------------" + done + echo "" +} + +downloadDocker() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "准备下载Docker的离线安装包…………" + colorEcho ${GREEN} "--------------------------------------------------------------" + colorEcho ${BLUE} "您选择安装的docker版本为:${DOCKER_VERSION}" + echo "" + + ## 清理docker环境 + colorEcho ${BLUE} "开始清理docker环境,卸载先前的相关安装内容!!" + $CMD_REMOVE docker docker-client docker-client-latest docker-ce-cli \ + docker-common docker-latest docker-latest-logrotate docker-logrotate docker-selinux docker-engine-selinux \ + docker-engine kubelet kubeadm kubectl + colorEcho ${GREEN} "----------------- docker环境清理完成 -----------------" + echo "" + + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion}!!" + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion}!!" + echo "" + if [ "$LinuxReleaseVersion" = "centos" ]; then + ## 安装docker的依赖 + colorEcho ${BLUE} "正在下载安装docker的所需要的依赖" + yum install -y -q --downloadonly --downloaddir=/tmp/docker-${DOCKER_VERSION}/depends yum-utils device-mapper-persistent-data lvm2 + colorEcho ${GREEN} " 下载完成 " + colorEcho ${GREEN} "查看已经下载的相关依赖安装包……" + ls /tmp/docker-${DOCKER_VERSION}/depends/ + colorEcho ${GREEN} "--------------------------------------------------------------" + echo "" + ## 添加docker的yum源 + colorEcho ${BLUE} "正在添加docker的yum源…………" + yum-config-manager --add-repo https://mirrors.ustc.edu.cn/docker-ce/linux/centos/docker-ce.repo + if [[ -f /etc/yum.repos.d/docker-ce.repo ]]; then + sed -i 's/download.docker.com/mirrors.ustc.edu.cn\/docker-ce/g' /etc/yum.repos.d/docker-ce.repo + colorEcho ${BLUE} "已成功添加中科大的docker-ce的yum源!" + echo "" + colorEcho ${BLUE} "可以安装的docker-ce的 $(echo ${DOCKER_VERSION} | cut -d"." -f1) 版本为:" + colorEcho ${GREEN} "--------------------------------------------------------------" + yum list docker-ce --showduplicates | grep $(echo ${DOCKER_VERSION} | cut -d"." -f1) | awk '{print$2}' | cut -d ":" -f2 | sort -n -t - -k 1.7 + colorEcho ${GREEN} "--------------------------------------------------------------" + echo "" + yum install -y -q --downloadonly --downloaddir=/tmp/docker-${DOCKER_VERSION} docker-ce-${DOCKER_VERSION} docker-ce-cli-${DOCKER_VERSION} + colorEcho ${GREEN} " 下载完成 " + echo "" + colorEcho ${GREEN} "查看已经下载的Docker安装包……" + ls /tmp/docker-${DOCKER_VERSION}/ + colorEcho ${GREEN} "--------------------------------------------------------------" + echo "" + else + colorEcho ${RED} "docker的yum源添加失败,请手动添加" + exit 1 + fi + else + colorEcho ${BLUE} "开始安装相关的Docker基础组件" + installDemandSoftwares apt-transport-https ca-certificates curl gnupg-agent software-properties-common + + colorEcho ${BLUE} "开始添加中科大的docker源的apt-key" + curl -fsSL https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu/gpg | sudo apt-key add - + + colorEcho ${BLUE} "开始添加中科大的docker源的apt源" + add-apt-repository \ + "deb [arch=amd64] https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu \ + $(lsb_release -cs) \ + stable" + + # 国外的情况 + # colorEcho ${BLUE} "开始添加中科大的docker源的apt-key" + # curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg + # + # colorEcho ${BLUE} "开始添加中科大的docker源的apt源" + # echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \ + # $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + + colorEcho ${BLUE} "正在执行更新操作!!" + apt-get update + + colorEcho ${BLUE} "查看特定版本的Docker镜像!" + colorEcho ${BLUE} "可以安装的docker-ce的 $(echo ${DOCKER_VERSION} | cut -d"." -f1) 版本为:" + colorEcho ${GREEN} "--------------------------------------------------------------" + apt-cache madison docker-ce | grep $(echo ${DOCKER_VERSION} | cut -d"." -f1) | awk '{print$3}' + + colorEcho ${BLUE} "开始下载 ${DOCKER_VERSION} 版本的离线安装包!" + dockerSpecific=$(apt-cache madison docker-ce | grep $(echo ${DOCKER_VERSION} | cut -d"." -f1) | awk '{print$3}' | grep ${DOCKER_VERSION}) + + # 需要获取其所依赖包的包 + colorEcho $BLUE "开始解析依赖!" + export DOCKER_VERSION=20.10.15 + export dockerSpecific=$(apt-cache madison docker-ce | grep $(echo ${DOCKER_VERSION} | cut -d"." -f1) | awk '{print$3}' | grep ${DOCKER_VERSION} | head -1) + + apt-get install "docker-ce=${dockerSpecific}" "docker-ce-cli=${dockerSpecific}" "containerd.io" "docker-compose-plugin" + + mkdir -p /tmp/docker-${DOCKER_VERSION} + cd /tmp/docker-${DOCKER_VERSION} + colorEcho $BLUE "开始下载所有的依赖!" + for staff in "${dockerStaff[@]}"; do + colorEcho ${BLUE} "开始下载 ${staff} 的依赖!" + apt download $(apt-rdepends ${staff} | grep -v "^ ") + colorEcho ${GREEN} "下载完成!" + done + + # apt-get download $libs + colorEcho ${GREEN} " 下载完成 " + echo "" + colorEcho ${GREEN} "查看已经下载的Docker安装包……" + ls /tmp/docker-${DOCKER_VERSION}/ + colorEcho ${GREEN} "--------------------------------------------------------------" + echo "" + fi + echo "" +} + +distributeDocker() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始分发Docker的离线安装包…………" + echo "" + + for ip in "${PrivateServerIPs[@]}"; do + colorEcho ${BLUE} "正在将Docker的离线安装包分发至主机 ${ip} 上……" + echo "yes + yes + " | scp -r /tmp/docker-${DOCKER_VERSION} root@${ip}:/tmp/docker-${DOCKER_VERSION} + if [[ $? -eq 0 ]]; then + colorEcho ${GREEN} "Docker离线安装包已经分发完成!" + colorEcho ${GREEN} "----------------------------------------------------------" + else + colorEcho ${RED} "ERROR:Docker离线安装包 没有正常分发!!" + colorEcho ${RED} "----------------------------------------------------------" + fi + done + echo "" +} + +# 能够联网的机器上 +downloadGlusterFSHeketi() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始下载存储服务器相关的组件…………" + + colorEcho ${BLUE} "正在安装 gluster 源!" + yum install centos-release-gluster -y + + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "glusterfs的源添加完成…………" + echo "" + GlusterFSVersion=$(yum list glusterfs | grep "gluster" | awk '{print$2}') + HeketiVersion=$(yum list heketi | grep "heketi" | awk '{print$2}') + + colorEcho ${BLUE} "开始下载 glusterFS的离线安装包!" + colorEcho ${BLUE} " glusterFS的版本为: ${GlusterFSVersion}!!" + colorEcho ${BLUE} " glusterFS的版本为: ${GlusterFSVersion}!!" + yum install -y -q --downloadonly --downloaddir=/tmp/storage/ glusterfs glusterfs-server glusterfs-fuse glusterfs-rdma glusterfs-geo-replication glusterfs-devel + colorEcho ${GREEN} " 下载完成 " + + colorEcho ${BLUE} "开始下载 heketi 的离线安装包!" + colorEcho ${BLUE} " heketi 的版本为 ${HeketiVersion}!!" + colorEcho ${BLUE} " heketi 的版本为 ${HeketiVersion}!!" + yum install -y -q --downloadonly --downloaddir=/tmp/storage/ hekeit heketi-client + colorEcho ${GREEN} " 下载完成 " + + colorEcho ${GREEN} "查看已经下载的相关依赖安装包……" + ls /tmp/storage/ + colorEcho ${GREEN} "--------------------------------------------------------------" + echo "" + else + colorEcho ${RED} "glusterfs的源添加失败!! 无法继续进行存储服务器的初始化" + colorEcho ${RED} "glusterfs的源添加失败!! 无法继续进行存储服务器的初始化" + colorEcho ${RED} "glusterfs的源添加失败!! 无法继续进行存储服务器的初始化" + return 1 + fi + +} + +distributeGlusterFSHeketiRPMs() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始分发GlusterFSHeketi的离线安装包…………" + echo "" + + for ip in "${StorageServerIPs[@]}"; do + colorEcho ${BLUE} "正在将GlusterFS-Heketi的离线安装包分发至主机 ${ip} 上……" + echo "yes + yes + " | scp -r /tmp/storage/ root@${ip}:/tmp/storage/ + if [[ $? -eq 0 ]]; then + colorEcho ${GREEN} "GlusterFS-Heketi 的离线安装包已经分发完成!" + colorEcho ${GREEN} "----------------------------------------------------------" + else + colorEcho ${RED} "ERROR:GlusterFS-Heketi 离线安装包 没有正常分发!!" + colorEcho ${RED} "----------------------------------------------------------" + fi + done + echo "" +} + +installGlusterFS() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始安装 存储服务 GlusterFS 的基础组件 的相关服务…………" + echo "" + + if [ -d /tmp/storage ]; then + ls /tmp/storage | grep -q gluster + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "检测到存储服务的离线安装包,开始执行安装过程!" + yum install -y -q /tmp/storage/*.rpm + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "GlusterFS安装完成,开始启动服务!" + systemctl start glusterd.service + colorEcho ${BLUE} "------------------------------------------------------------" + systemctl status glusterd.service + colorEcho ${BLUE} "------------------------------------------------------------" + systemctl enable glusterd.service + echo "" + echo "" + colorEcho ${BLUE} "开始配置 GlusterFS,连接对方主机!" + StorageNodeNum=$(echo ${WhichNodeRun} | cut -d"-" -f2) + if [ "${StorageNodeNum}" -eq "1" ]; then + colorEcho ${BLUE} "检测到当前主机为 Storage-1,请确保是否正确!" + colorEcho ${BLUE} "检测到当前主机为 Storage-1,请确保是否正确!" + colorEcho ${BLUE} "检测到当前主机为 Storage-1,请确保是否正确!" + gluster peer probe storage-2 + echo "" + colorEcho ${BLUE} "glusterFS的节点状态为:" + gluster peer status + elif [ "${StorageNodeNum}" -eq "2" ]; then + colorEcho ${BLUE} "检测到当前主机为 Storage-2,请确保是否正确!" + colorEcho ${BLUE} "检测到当前主机为 Storage-2,请确保是否正确!" + colorEcho ${BLUE} "检测到当前主机为 Storage-2,请确保是否正确!" + gluster peer probe storage-1 + echo "" + colorEcho ${BLUE} "GlusterFS的节点状态为:" + gluster peer status + fi + fi + else + colorEcho ${RED} "未检测到GlusterFS的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到GlusterFS的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到GlusterFS的基础安装包,程序将终止!!" + return 1 + fi + fi +} + +installHeketi() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始安装 存储服务 Heketi 的相关服务…………" + echo "" + ls /tmp/storage | grep heketi | grep -v heketi-client + if [ "$?" -ne "0" ]; then + colorEcho ${RED} "未检测到heketi的离线安装rpm包!!!" + colorEcho ${RED} "未检测到heketi的离线安装rpm包!!!" + echo "" + colorEcho ${BLUE} "开始进行heketi的离线下载,安装过程!!!!" + wget ${HeketiOfficeFile} -O heketi9-offline.tar.gz + tar -zvxf heketi9-offline.tar.gz + cd heketi + pwd + mv ./heketi /usr/bin/heketi + mv ./heketi-cli /usr/bin/heketi-cli + cd .. + pwd + echo "" + colorEcho ${BLUE} "请检查heketi的命令是否输出正常!!" + heketi --version + colorEcho ${BLUE} "----------------------------------------------------" + heketi-cli --version + echo "" + else + colorEcho ${BLUE} "检测到heketi-server的离线安装包!" + echo "" + colorEcho ${BLUE} "按照道理目前的 heketi已经安装!!" + colorEcho ${BLUE} "开始检测…………" + echo "" + fi + + if heketi --version &>/dev/null && heketi-cli --version &>/dev/null; then + colorEcho ${GREEN} "检查到heketi已经安装成功!! 开始进行相关的配置。" + echo "" + colorEcho ${BLUE} "开始为heketi-server添加系统用户 heketi!" + sudo groupadd --system heketi + sudo useradd -s /sbin/nologin --system -g heketi heketi + echo "" + colorEcho ${BLUE} "开始创建 heketi-server 的配置、工作、日志目录" + sudo mkdir -p /var/lib/heketi /etc/heketi /var/log/heketi + echo "" + colorEcho ${BLUE} "开始下载heketi的配置文件压缩包,heketi-config.tar.gz…" + wget "${HeketiConfigOfflineFile}" -O /etc/heketi/heketi-config.tar.gz + echo "" + + if [ -s /etc/heketi/heketi-config.tar.gz ]; then + colorEcho ${GREEN} "heketi的配置文件压缩包下载成功!!!" + tar -zxvf /etc/heketi/heketi-config.tar.gz + echo "" + fi + + colorEcho ${BLUE} "开始创建heketi的ssh key文件,使得heketi-server能够访问glusterfs的server" + echo "y + |" ssh-keygen -f /etc/heketi/heketi_key -t rsa -N '' + + chown heketi:heketi /etc/heketi/heketi_key* + + for ip in "${PrivateServerIPs[@]}"; do + colorEcho ${BLUE} "请手动将如下的命令,以 root 权限在主机 ${ip} 上运行" + colorEcho ${BLUE} "-----------------------------------------------" + echo "" + echo "" + colorEcho ${RED} " 请以 root 角色 运行!!! " + colorEcho ${RED} " 请以 root 角色 运行!!! " + colorEcho ${RED} " 请以 root 角色 运行!!! " + echo "" + colorEcho ${YELLOW} 'sed -i -e "/PermitRootLogin no/ d" -e "$ a PermitRootLogin yes" /etc/ssh/sshd_config && systemctl restart sshd' + echo "" + colorEcho ${YELLOW} "echo \"$(cat /etc/heketi/heketi_key.pub)\" >> ~/.ssh/authorized_keys && echo \"\" && cat ~/.ssh/authorized_keys" + echo "" + echo "" + while true; do + colorEcho ${RED} "请确保您已经将上述的命令在主机${ip}上执行了!!" + read -r -p "请输入yes进行确认,脚本才可继续运行!!" input + case $input in + yes) + colorEcho ${GREEN} "您已确认在主机${ip}上添加了私有的ssh key!" + echo "" + break + ;; + *) + echo "" + colorEcho ${RED} "输入有误!!! 请输入 >> yes << 进行确认" + colorEcho ${RED} "请在主机${ip}上执行上述命令!!!" + colorEcho ${RED} "否则本脚本的功能会失效!!" + colorEcho ${RED} "-----------------------------------------------------" + echo "" + ;; + esac + done + done + + colorEcho ${GREEN} "heketi的密钥已经分发完毕!!" + echo "" + corlorEcho ${BLUE} "--------------------------------------------------------------" + corlorEcho ${BLUE} "请根据主机实际的物理磁盘信息,修改相应的 device!" + corlorEcho ${BLUE} "请根据主机实际的物理磁盘信息,修改相应的 device!" + corlorEcho ${BLUE} "请根据主机实际的物理磁盘信息,修改相应的 device!" + corlorEcho ${BLUE} "--------------------------------------------------------------" + while true; do + colorEcho ${RED} "请确保您已经新开终端界面并修改了topology.json文件!!!" + read -r -p "请输入yes进行确认,脚本才可继续运行!!" input + case $input in + yes) + colorEcho ${GREEN} "您已确认新开终端界面并修改了topology.json文件!!!!" + echo "" + break + ;; + *) + echo "" + colorEcho ${RED} "输入有误!!! 请输入 >> yes << 进行确认" + colorEcho ${RED} "请确保您已经新开终端界面并修改了topology.json文件!!!" + colorEcho ${RED} "否则本脚本的功能会失效!!" + colorEcho ${RED} "-----------------------------------------------------" + echo "" + ;; + esac + done + + corlorEcho ${BLUE} "--------------------------------------------------------------" + colorEcho ${BLUE} "修改所有的heketi-server目录的权限" + chown -R heketi:heketi /var/lib/heketi /var/log/heketi /etc/heketi + echo "" + colorEcho ${BLUE} "重新加载Systemd并启动 Heketi service" + cp /etc/heketi/heketi.service /usr/lib/systemd/system/heketi.service + + sudo systemctl daemon-reload + sudo systemctl enable --now heketi + echo "" + + colorEcho ${BLUE} "输出heketi-server的状态!!" + systemctl status heketi -l + fi +} + +# 能够联网的机器上 +downloadNFS() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始下载NFS相关的组件…………" + + echo "" + # NFSVersion=$(yum list nfs-utils | grep "nfs" | awk '{print$2}') + + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion}!!" + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion}!!" + echo "" + if [ "$LinuxReleaseVersion" = "centos" ]; then + colorEcho ${BLUE} "开始下载 NFS的离线安装包!" + # colorEcho ${BLUE} " NFS的版本为: ${NFSVersion}!!" + yum install -y -q --downloadonly --downloaddir=/tmp/storage/nfs/ nfs-utils + colorEcho ${GREEN} " 下载完成 " + + colorEcho ${BLUE} "开始下载 NFS-rpcbind的离线安装包!" + # colorEcho ${BLUE} " NFS的版本为: ${NFSVersion}!!" + yum install -y -q --downloadonly --downloaddir=/tmp/storage/rpc/ rpcbind + colorEcho ${GREEN} " 下载完成 " + else + colorEcho ${BLUE} "开始下载 NFS的离线安装包!" + apt-cache madison nfs-common | awk '{print$3}' + # installDemandSoftwares nfs-kernel-server + mkdir -p /tmp/storage/nfs/ + mkdir -p /tmp/storage/rpc/ + + echo + colorEcho ${BLUE} "需要下载的依赖为 ${libs}" + + colorEcho ${BLUE} "开始下载 NFS的离线安装包!" + cd /tmp/storage/nfs + colorEcho $BLUE "开始解析依赖!" + + colorEcho ${BLUE} "开始下载 NFS-Client 的离线安装包!" + # ubuntu 20.04 使用如下的几项内容即可 + # apt-get download keyutils libnfsidmap2 libtirpc-common libtirpc3 nfs-common rpcbind + apt-get download $(apt-rdepends nfs-common | grep -v "^ ") + + colorEcho ${GREEN} " 下载完成 " + colorEcho ${BLUE} "开始下载 NFS-Server 的离线安装包!" + cd /tmp/storage/rpc + # ubuntu 20.04 使用如下的几项内容即可 + # apt-get download keyutils libnfsidmap2 libtirpc-common libtirpc3 nfs-common nfs-kernel-server rpcbind + apt-get download $(apt-rdepends nfs-kernel-server | grep -v "^ ") + colorEcho ${GREEN} " 下载完成 " + echo "" + fi + + colorEcho ${GREEN} "查看已经下载的相关依赖安装包……" + ls /tmp/storage/nfs/ + ls /tmp/storage/rpc/ + colorEcho ${GREEN} "--------------------------------------------------------------" + echo "" + +} + +distributeNFS() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始分发NFS的离线安装包…………" + echo "" + + for ip in "${StorageServerIPs[@]}"; do + colorEcho ${BLUE} "正在将NFS的离线安装包分发至主机 ${ip} 上……" + echo "yes + yes + " | scp -r /tmp/storage/ root@${ip}:/tmp/storage/ + if [[ $? -eq 0 ]]; then + colorEcho ${GREEN} "NFS的离线安装包已经分发完成!" + colorEcho ${GREEN} "----------------------------------------------------------" + else + colorEcho ${RED} "ERROR:NFS 离线安装包 没有正常分发!!" + colorEcho ${RED} "----------------------------------------------------------" + fi + done + echo "" +} + +installNFS() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始安装 存储服务 NFS 的基础组件 的相关服务…………" + echo "" + + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion}!!" + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion}!!" + echo "" + if [ "$LinuxReleaseVersion" = "centos" ]; then + if [ -d /tmp/storage/nfs/ ]; then + ls /tmp/storage/nfs/ | grep -q nfs-utils + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "检测到存储服务的离线安装包,开始执行安装过程!" + yum install -y -q /tmp/storage/nfs/*.rpm + echo "" + else + colorEcho ${RED} "未检测到NFS的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到NFS的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到NFS的基础安装包,程序将终止!!" + return 1 + fi + fi + else + ls /tmp/storage/nfs | grep -q "nfs-common" + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "检测到存储服务的离线安装包,开始执行安装过程!" + dpkg -i /tmp/storage/nfs/*.deb + echo "" + else + colorEcho ${RED} "未检测到NFS的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到NFS的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到NFS的基础安装包,程序将终止!!" + return 1 + fi + fi + + colorEcho ${GREEN} "NFS安装完成,开始启动服务!" + systemctl start nfs nfs-client nfs-common + systemctl enable nfs nfs-client nfs-common + colorEcho ${BLUE} "------------------------------------------------------------" + systemctl status nfs nfs-client nfs-common -l | grep "active (running)" + if [[ $? -eq 0 ]]; then + colorEcho ${GREEN} " NFS-Client 启动完成 " + fi + colorEcho ${BLUE} "------------------------------------------------------------" + echo "" + +} + +#nfs 服务端 +installNFSServer() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始安装 存储服务 NFS 的基础组件 的相关服务…………" + echo "" + + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion}!!" + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion}!!" + echo "" + if [ "$LinuxReleaseVersion" = "centos" ]; then + if [ -d /tmp/storage/nfs/ ]; then + ls /tmp/storage/nfs/ | grep -q nfs-utils + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "检测到存储服务的离线安装包,开始执行安装过程!" + yum install -y -q /tmp/storage/nfs/*.rpm + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "NFS安装完成,开始启动服务!" + systemctl enable nfs + systemctl start nfs + colorEcho ${BLUE} "------------------------------------------------------------" + systemctl status nfs -l | grep "active (running)" + if [[ $? -eq 0 ]]; then + colorEcho ${GREEN} " NFS-Client 启动完成 " + fi + colorEcho ${BLUE} "------------------------------------------------------------" + echo "" + fi + else + colorEcho ${RED} "未检测到NFS的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到NFS的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到NFS的基础安装包,程序将终止!!" + return 1 + fi + fi + if [ -d /tmp/storage/rpc/ ]; then + ls /tmp/storage/rpc/ | grep -q rpcbind + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "检测到存储服务的离线安装包,开始执行安装过程!" + yum install -y -q /tmp/storage/rpc/*.rpm + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "NFS-RPCBIND 安装完成,开始启动服务!" + systemctl start rpcbind + systemctl enable rpcbind + colorEcho ${BLUE} "------------------------------------------------------------" + systemctl status rpcbind -l | grep "active (running)" + if [[ $? -eq 0 ]]; then + colorEcho ${GREEN} " NFS-Server 启动完成 " + fi + colorEcho ${BLUE} "------------------------------------------------------------" + echo "" + + fi + else + colorEcho ${RED} "未检测到NFS-RPCBIND的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到NFS-RPCBIND的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到NFS-RPCBIND的基础安装包,程序将终止!!" + return 1 + fi + fi + else + colorEcho ${GREEN} "执行发行版为-- ${LinuxReleaseVersion} 的NFS-Server安装进程!" + if [ -d /tmp/storage/nfs/ ]; then + ls /tmp/storage/nfs/ | grep -q nfs-common + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "检测到存储服务的离线安装包,开始执行安装过程!" + dpkg -i /tmp/storage/nfs/*.deb + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "NFS安装完成,开始启动服务!" + systemctl start nfs + systemctl enable nfs + colorEcho ${BLUE} "------------------------------------------------------------" + systemctl status nfs -l | grep "active (running)" + if [[ $? -eq 0 ]]; then + colorEcho ${GREEN} " NFS-Client 启动完成 " + fi + colorEcho ${BLUE} "------------------------------------------------------------" + echo "" + fi + else + colorEcho ${RED} "未检测到NFS的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到NFS的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到NFS的基础安装包,程序将终止!!" + return 1 + fi + fi + if [ -d /tmp/storage/rpc/ ]; then + ls /tmp/storage/rpc/ | grep -q server + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "检测到存储服务的离线安装包,开始执行安装过程!" + dpkg -i /tmp/storage/rpc/*.deb + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "NFS-Server 安装完成,开始启动服务!" + systemctl start nfs-server + systemctl enable nfs-server + colorEcho ${BLUE} "------------------------------------------------------------" + systemctl status nfs-server -l | grep "active (running)" + if [[ $? -eq 0 ]]; then + colorEcho ${GREEN} " NFS-Server 启动完成 " + fi + colorEcho ${BLUE} "------------------------------------------------------------" + echo "" + + fi + else + colorEcho ${RED} "未检测到NFS-RPCBIND的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到NFS-RPCBIND的基础安装包,程序将终止!!" + colorEcho ${RED} "未检测到NFS-RPCBIND的基础安装包,程序将终止!!" + return 1 + fi + fi + fi + + echo "" + colorEcho ${BLUE} "开始创建NFS共享目录!" + + export NfsPath=/nfsdata + mkdir -p ${NfsPath} + chmod 777 ${NfsPath} + cat /etc/exports | grep ${NfsPath} -q + if [[ $? -ne 0 ]]; then + # exports文件中没有相关的额配置,才添加! + echo "nfs config not exist !" + echo "${NfsPath} *(rw,no_root_squash,no_all_squash,sync)" >>/etc/exports + fi + echo "" + + colorEcho ${BLUE} "开始重启nfs服务!" + + if [ "$LinuxReleaseVersion" = "centos" ]; then + systemctl restart rpcbind && systemctl restart nfs + else + systemctl restart nfs-server && systemctl restart nfs + fi + echo "" + echo "" + colorEcho ${BLUE} "检查NFS的运行状况:" + rpcinfo -p localhost + colorEcho ${YELLOW} "------------------------------------------------------------" + echo "" +} + +installProxyServer() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始安装网络代理(v2ray)的相关服务…………" + echo "" + colorEcho ${BLUE} "开始从西云的对象存储下载离线安装包…………" + colorEcho ${BLUE} "当前目录为 $(pwd),创建代理服务器的临时安装目录:" + mkdir proxy_offline_install && cd ./proxy_offline_install + wget "${ProxyOfflineFile}" -O proxy-offline.tar.gz + if [ ! -s proxy-offline.tar.gz ]; then + echo "" + colorEcho ${RED} "代理服务器安装包下载失败!!!" + colorEcho ${RED} "代理服务器安装包下载失败!!!" + colorEcho ${RED} "代理服务器安装包下载失败!!!" + return 1 + fi + colorEcho ${GREEN} " 下载完成 " + tar -zxvf proxy-offline.tar.gz + colorEcho ${GREEN} " 代理服务器离线安装包解压缩完成 " + pwd + chmod +x v2ray-install.sh + colorEcho ${BLUE} "开始离线安装 网络代理(v2ray)服务器……" + echo " + " | ./v2ray-install.sh --local v2ray-linux-64_v4.32.1.zip + echo "" + colorEcho ${GREEN} "网络代理(v2ray v4.32.1)服务器已经安装成功!" + colorEcho ${GREEN} "---------------------------------------------" + echo "" + colorEcho ${BLUE} "开始配置代理服务器的相关设置…………" + sed -i "s/User=nobody/User=root/g" /etc/systemd/system/v2ray.service + rm /usr/local/etc/v2ray/config.json + cp ./config.json /usr/local/etc/v2ray/config.json + colorEcho ${GREEN} " 配置完成 " + cd .. + pwd + echo "" + colorEcho ${BLUE} "正在开启代理服务器v2ray的服务程序……" + systemctl daemon-reload && systemctl start v2ray + colorEcho ${GREEN} " 服务启动配置完成 " + echo "" + colorEcho ${BLUE} "查看代理服务器v2ray的程序运行状态……" + systemctl status v2ray -l | grep "Active: active (running)" + if [ $? -ne 0 ]; then + echo "" + colorEcho ${RED} "代理服务器启动失败!!" + colorEcho ${RED} "代理服务器启动失败!!" + colorEcho ${RED} "代理服务器启动失败!!" + colorEcho ${RED} "代理服务器启动失败!!" + return 1 + fi + colorEcho ${BLUE} "正在设置v2ray的开机自启动……" + systemctl enable v2ray + colorEcho ${GREEN} " 开机自启动配置完成 " + colorEcho ${GREEN} "--------------------------------------------------------------" + echo "" + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "------------下面是代理服务器的使用方法说明------------" + colorEcho ${BLUE} "------------下面是代理服务器的使用方法说明------------" + colorEcho ${YELLOW} "------------------------------------------------------------" + colorEcho ${BLUE} "请确保当前主机能够访问公网!!!!" + colorEcho ${BLUE} "在需要使用代理上网的服务器上输入如下的命令:" + echo "" + echo "" + colorEcho ${YELLOW} " export http_proxy=http://${PublicServerIPs}:12333 && export https_proxy=http://${PublicServerIPs}:12333 " + echo "" + echo "" + colorEcho ${YELLOW} "------------------------------------------------------------" + colorEcho ${BLUE} "------------上面是代理服务器的使用方法说明------------" + colorEcho ${BLUE} "------------上面是代理服务器的使用方法说明------------" + echo "" + echo "" +} + +modifySystemNetworkProxy() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始配置系统网络代理…………" + colorEcho ${GREEN} "--------------------------------------------------------------" + echo "" + + addYumProxy + + addShellProxy + + # addDockerProxy +} + +addYumProxy() { + colorEcho ${BLUE} "开始配置yum包管理工具的网络代理…………" + + cat /etc/yum.conf | grep "http_proxy="http://"${PublicServerIPs}":12333"" + + if [ $? -eq 0 ]; then + colorEcho ${BLUE} "检测到yum的代理已经添加,跳过!" + return 1 + fi + sed -i "$ a http_proxy="http://"${PublicServerIPs}":12333"" /etc/yum.conf + sed -i "$ a https_proxy="http://"${PublicServerIPs}":12333"" /etc/yum.conf + + if [ $? -eq 0 ]; then + colorEcho ${GREEN} "yum源代理配置修改完成! 目前yum命令可以通过master 节点代理上网" + echo "" + fi +} + +addShellProxy() { + colorEcho ${BLUE} "开始配置shell终端的网络代理…………" + + export http_proxy=http://${PublicServerIPs}:12333 && export https_proxy=http://${PublicServerIPs}:12333 + + cat /etc/profile | grep "http_proxy="http://"${PublicServerIPs}":12333"" + + if [ $? -eq 0 ]; then + colorEcho ${BLUE} "检测到shell的代理已经添加,跳过!" + return 1 + fi + + sed -i "$ a export http_proxy="http://"${PublicServerIPs}":12333"" /etc/profile + sed -i "$ a export https_proxy="http://"${PublicServerIPs}":12333"" /etc/profile + + if [ -a ~/.bashrc ]; then + colorEcho ${BLUE} "检测到bash shell存在,开始配置其代理。。" + cat ~/.bashrc | grep "http_proxy="http://"${PublicServerIPs}":12333"" + + if [ $? -eq 0 ]; then + colorEcho ${BLUE} "检测到shell的代理已经添加,跳过!" + return 1 + fi + sed -i "$ a export http_proxy="http://"${PublicServerIPs}":12333"" ~/.bashrc + sed -i "$ a export https_proxy="http://"${PublicServerIPs}":12333"" ~/.bashrc + fi + + if [ -a ~/.profile ]; then + colorEcho ${BLUE} "检测到~/.profile存在,开始配置其代理。。" + cat ~/.profile | grep "http_proxy="http://"${PublicServerIPs}":12333"" + + if [ $? -eq 0 ]; then + colorEcho ${BLUE} "检测到shell的代理已经添加,跳过!" + return 1 + fi + sed -i "$ a export http_proxy="http://"${PublicServerIPs}":12333"" ~/.profile + sed -i "$ a export https_proxy="http://"${PublicServerIPs}":12333"" ~/.profile + fi + + if [ -a ~/.zshrc ]; then + colorEcho ${BLUE} "检测到zsh shell存在,开始配置其代理。。" + cat ~/.zshrc | grep "http_proxy="http://"${PublicServerIPs}":12333"" + + if [ $? -eq 0 ]; then + colorEcho ${BLUE} "检测到shell的代理已经添加,跳过!" + return 1 + fi + sed -i "$ a export http_proxy="http://"${PublicServerIPs}":12333"" ~/.zshrc + sed -i "$ a export https_proxy="http://"${PublicServerIPs}":12333"" ~/.zshrc + fi + + if [ $? -eq 0 ]; then + colorEcho ${GREEN} "shell的代理配置修改完成! 目前curl wget等命令可以通过master节点代理上网" + echo "" + fi +} + +installDocker() { + ### 国内的环境 ### + ### 依赖colorEcho + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始安装Docker的相关服务…………" + colorEcho ${GREEN} "--------------------------------------------------------------" + colorEcho ${BLUE} "您选择安装的docker版本为:${DOCKER_VERSION}" + echo "" + + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion}!!" + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion}!!" + echo "" + if [ "$LinuxReleaseVersion" = "centos" ]; then + colorEcho ${BLUE} "正在安装安装docker的所需要的依赖…………" + colorEcho ${YELLOW} "----------------------------------------------------------------------" + ls /tmp/docker-${DOCKER_VERSION}/depends/ + colorEcho ${YELLOW} "----------------------------------------------------------------------" + echo "" + yum install -y -q /tmp/docker-${DOCKER_VERSION}/depends/*.rpm + colorEcho ${GREEN} " 安装完成 " + colorEcho ${GREEN} "--------------------------------------------------------------" + echo "" + + colorEcho ${GREEN} "开始安装docker-ce,版本为${DOCKER_VERSION}" + colorEcho ${YELLOW} "----------------------------------------------------------------------" + ls /tmp/docker-${DOCKER_VERSION}/ + colorEcho ${YELLOW} "----------------------------------------------------------------------" + echo "" + yum install -y -q /tmp/docker-${DOCKER_VERSION}/*.rpm + else + + colorEcho ${GREEN} "开始安装docker-ce,版本为${DOCKER_VERSION}" + colorEcho ${YELLOW} "----------------------------------------------------------------------" + ls /tmp/docker-${DOCKER_VERSION}/ + colorEcho ${YELLOW} "----------------------------------------------------------------------" + echo "" + dpkg -i /tmp/docker-${DOCKER_VERSION}/*.deb + echo "" + fi + + colorEcho ${GREEN} " Docker安装完成 " + colorEcho ${GREEN} " Docker安装完成 " + colorEcho ${GREEN} " Docker安装完成 " + colorEcho ${GREEN} "--------------------------------------------------------------" + echo "" + + colorEcho ${BLUE} "正在启动docker的服务进程…………" + systemctl enable docker.service + systemctl start docker.service + colorEcho ${BLUE} "等待docker的服务进程启动…………" + sleep 3 + colorEcho ${BLUE} "查看docker的服务进程运行状态…………" + + systemctl status docker.service -l | grep "active (running)" + if [[ $? -eq 0 ]]; then + colorEcho ${GREEN} " Docker 启动完成 " + fi + echo "" +} +## 安装docker时,修改系统的配置文件 +modifySystemConfig() { + colorEcho ${PURPLE} "---------------------------------------------------------------------------------" + colorEcho ${BLUE} "开始修改系统内核参数…………" + ## 配置内核参数 + cat >/etc/sysctl.d/k8s.conf </dev/null + if [[ $? -eq 0 ]]; then + colorEcho ${GREEN} "docker-compose安装成功!!版本为$(docker-compose --version | cut -d" " -f3)尽情享用" + else + ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose + fi + else + colorEcho ${RED} "docker-compose文件下载失败!! 无法访问github的资源。。" + colorEcho ${RED} "请手动下载docker-compose的安装文件!" + fi +} + +downloadKubectlMinio() { + echo "" + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} " 本部分应该在master节点上执行!………" + colorEcho ${BLUE} " 开始下载minio集群的安装初始化工具 ………" + + sudo wget https://github.com/minio/operator/releases/download/v4.4.13/kubectl-minio_4.4.13_linux_amd64 \ + -O /usr/bin/kubectl-minio + + sudo chmod +x /usr/bin/kubectl-minio + + colorEcho ${BLUE} "请确保在需要安装 minio的服务器上创建好了目录!" + colorEcho ${BLUE} "请确保在需要安装 minio的服务器上创建好了目录!" + + # 2. 初始化 minio 部署工具 + kubectl minio init + +} + +buildDirectoryForMinio() { + echo "" + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} " 本部分应该在 安装minio集群的 节点上执行!………" + + colorEcho ${BLUE} "开始 为MINIO服务器创建目录…………" + + while true; do + colorEcho ${BLUE} "运行到此处,说明,您选择了一个pv挂载4目录的形式!" + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${RED} "请确保您已经 手动初始化磁盘pv,并将其挂在至 /data 目录!" + read -r -p "请输入yes进行确认,脚本才可继续运行!!" input + case $input in + yes) + + colorEcho ${GREEN} "您已确认 手动初始化磁盘pv,并将其挂在至 /data 目录!" + echo "" + break + ;; + *) + echo "" + colorEcho ${RED} "输入有误!!! 请输入 >> yes << 进行确认" + colorEcho ${RED} "请确保您已经 手动初始化磁盘pv,并将其挂在至 /data 目录!" + colorEcho ${RED} "否则本脚本的功能会失效!!" + colorEcho ${RED} "-----------------------------------------------------" + echo "" + ;; + esac + done + + # 向下兼容 适应8pod或者4pod的情况 + for i in {1..8}; do + mkdir -p /data/minio-pv/pv${i} + if [ -d "/data/minio-pv/pv${i}" ]; then + echo "yes" + else + return 1 + fi + echo "" + done + + colorEcho ${GREEN} "Minio的目录均已创建完成!!" + +} + +buildPVForMinio() { + echo "" + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始 为MINIO服务器 初始化磁盘pv,创建整PV的存储…………" + + echo "此部分功能暂时掠过!" +} + +installZSH() { + colorEcho ${PURPLE} "--------------------------------------------------------------" + colorEcho ${BLUE} "开始安装宇宙第一shell的相关服务…………" + installDemandSoftwares zsh git || return $? + # 脚本会自动更换默认的shell + # echo y | sh -c "$(curl -fsSL https://cdn.jsdelivr.net/gh/robbyrussell/oh-my-zsh@master/tools/install.sh)" + wget https://cdn.jsdelivr.net/gh/robbyrussell/oh-my-zsh@master/tools/install.sh -O zsh-install.sh + # sed -i "s/github.com/github.com.cnpmjs.org/g" zsh-install.sh + # if [ $? -eq 0 ]; then + # colorEcho ${GREEN} "zsh仓库地址替换完成,已更换为国内的下载加速镜像" + # fi + chmod +x zsh-install.sh + colorEcho ${BLUE} "开始执行zsh的安装过程!!" + echo y | sh -c "./zsh-install.sh" + if [ "$(ls -A /root/.oh-my-zsh | wc -w)" -eq "0" ]; then + echo "" + colorEcho ${RED} "zsh下载失败!!跳过安装步骤!" + echo "" + return 1 + fi + echo "" +} + +modifyZSH() { + colorEcho ${PURPLE} "---------------------------------------------------------------------------" + colorEcho ${GREEN} "zsh应该已经安装成功!!!" + colorEcho ${BLUE} "开始修改zsh的相关配置信息,使其更加好用…………" + echo "" + cat >oh-my-zsh-plugins-list.txt <自动提示< 插件…………" + git clone https://github.com.cnpmjs.org/zsh-users/zsh-autosuggestions ~/.oh-my-zsh/plugins/zsh-autosuggestions + echo "" + colorEcho ${BLUE} "开始从GitHub下载 >命令高亮< 插件…………" + git clone https://github.com.cnpmjs.org/zsh-users/zsh-syntax-highlighting.git ~/.oh-my-zsh/plugins/zsh-syntax-highlighting + + echo "" + colorEcho ${BLUE} "开始从JSDeliver下载另外一些插件…………" + wget -c -i ~/oh-my-zsh-plugins-list.txt -P ~/.oh-my-zsh/plugins/ + echo "" + colorEcho ${PURPLE} "---------------------------------------------------------------------------" + colorEcho ${GREEN} "插件已经下载完毕,现在开始修改zsh的配置文件…………" + echo "" + + sed -i "s/robbyrussell/agnoster/g" ~/.zshrc + sed -i 's/^# DISABLE_AUTO_UPDATE="true"/DISABLE_AUTO_UPDATE="true"/g' ~/.zshrc + sed -i 's/plugins=(git)/plugins=(git zsh-autosuggestions zsh-syntax-highlighting command-not-found z themes)/g' ~/.zshrc + colorEcho ${GREEN} "请检查当前zsh的插件开启情况:" + colorEcho ${GREEN} "------------------------------------------" + cat ~/.zshrc | grep "plugins=" | grep -v "\#" + cat ~/.zshrc | grep "plugins=" | grep -v "\#" + cat ~/.zshrc | grep "plugins=" | grep -v "\#" + colorEcho ${GREEN} "------------------------------------------" + + echo "" + echo "----------------------------------------------------" + echo "这里的错误输出无需在意" + source /root/.zshrc + echo "这里的错误输出无需在意" + echo "----------------------------------------------------" + + if [[ $? -eq 0 ]]; then + colorEcho ${BLUE} "开始修改默认shell为zsh……" + for i in {6..1..-1}; do + colorEcho ${BLUE} "倒计时开始 ->> $i 秒 <<-,准备切换shell,上文的日志输出将会消失!!" + sleep 2 + done + chsh -s /bin/zsh + zsh + else + colorEcho ${RED} "zsh 安装失败,大概率是已经安装!!小概率是无法连接GitHub服务器~~" + fi + + colorEcho ${GREEN} "zsh 安装成功,已更换主题,禁止更新,尽情享用~~~" + colorEcho ${GREEN} "-----------------------------------------------------------------------------" + colorEcho ${PURPLE} "宇宙第一shell的zsh已经安装成功了!!!" + colorEcho ${GREEN} "宇宙第一shell的zsh已经安装成功了!!!" + colorEcho ${BLUE} "宇宙第一shell的zsh已经安装成功了!!!" + colorEcho ${GREEN} "-----------------------------------------------------------------------------" + echo "" +} +# 修改docker的国内加速镜像 +changeDockerRegisterMirror() { + colorEcho ${PURPLE} "---------------------------------------------------------------------------------" + colorEcho ${BLUE} "开始配置docker的(加速+私有)镜像…………" + echo "" + if [[ -e /etc/docker/daemon.json ]]; then + colorEcho ${BLUE} "已经存在docker的daemon文件。。" + mv /etc/docker/daemon.json /etc/docker/daemon.backup.json + colorEcho ${GREEN} "已经将daemon文件备份" + fi + colorEcho ${BLUE} "正在写入docker的daemon配置文件……" + cat >>/etc/docker/daemon.json <>/etc/systemd/system/docker.service.d/http-proxy.conf <>~/.docker/config.json </dev/null + + colorEcho ${BLUE} "开始添加Nginx的apt源!" + echo "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \ +http://nginx.org/packages/ubuntu $(lsb_release -cs) nginx" \ + | sudo tee /etc/apt/sources.list.d/nginx.list + + colorEcho ${BLUE} "开始更新apt源" + sudo apt update + echo "" + + colorEcho ${BLUE} "查看所有可以安装的nginx版本" + apt-cache madison nginx | awk '{print$3}' + echo "" + echo "" + colorEcho ${BLUE} "开始安装最新版本的nginx" + sudo apt install "nginx=$(apt-cache madison nginx | awk '{print$3}' | head -1)" + + fi + + systemctl status nginx + + systemctl start nginx + if [ "$?" -eq "0" ]; then + colorEcho ${GREEN} "nginx安装完成!已成功运行!" + fi + +} + +modifyNginx() { + colorEcho ${PURPLE} "---------------------------------------------------------------------------------" + colorEcho ${BLUE} "开始安装Nginx作为对面暴露面…………" + echo "" + colorEcho ${BLUE} "开始配置nginx的总配置文件!!" + cat >/etc/nginx/nginx.conf </etc/nginx/conf.d/real-public-nginx.conf </dev/null + if [[ $? -ne 0 ]]; then + colorEcho ${RED} "检测到docker-compose未安装!! Harbor服务器的安装过程将中断!!" + colorEcho ${RED} "检测到docker-compose未安装!! Harbor服务器的安装过程将中断!!" + colorEcho ${RED} "检测到docker-compose未安装!! Harbor服务器的安装过程将中断!!" + return 1 + fi + echo "" + if [[ $OSSPublicURL == "" ]]; then + colorEcho ${BLUE} "未指定harbor镜像仓库的离线安装包下载地址!!" + colorEcho ${BLUE} "开始从GitHub下载 harbor的离线安装包!!" + echo "" + wget --no-check-certificate https://github.com/goharbor/harbor/releases/download/v2.1.0/harbor-offline-installer-v2.1.0.tgz + else + colorEcho ${BLUE} "已经指定harbor镜像仓库的离线安装包下载地址!!" + wget --no-check-certificate "${HarborOfflineFile}" -O harbor-offline-installer-v2.1.0.tgz + fi + if [ ! -s harbor-offline-installer-v2.1.0.tgz ]; then + colorEcho ${RED} "harbor离线安装包下载失败! 跳过Harbor安装过程!" + return 1 + fi + colorEcho ${GREEN} "---------------离线安装包下载完成!!----------------" + echo "" + colorEcho ${BLUE} "开始解压缩harbor的离线安装包!!" + tar xvf harbor-offline-installer-v2.1.0.tgz + colorEcho ${GREEN} "---------------解压缩完成!!---------------" + echo "" + colorEcho ${BLUE} "开始配置harbor仓库的相关设置!!" + rm ./harbor/harbor.yml + cat >>./harbor/harbor.yml < ${RKESystemImages} <上下载RKE系统镜像!!" + echo "" + + fi + +} + +#downloadChrony(){ +# colorEcho ${PURPLE} "--------------------------------------------------------------" +# colorEcho ${BLUE} "准备下载 Chrony 的离线安装包…………" +# colorEcho ${GREEN} "--------------------------------------------------------------" +# echo "" +# +# +# mkdir /tmp/chrony +# cd /tmp/chrony +# +# command_exists apt-rdepends +# if [ "$?" -eq "0" ]; then +# let staff=chrony +# colorEcho ${BLUE} "开始下载 ${staff} 的依赖!" +# apt download $(apt-rdepends ${staff} | grep -v "^ ") +# colorEcho ${GREEN} "下载完成!" +# else +# colorEcho ${RED} "依赖检测工具不存在!" +# apt-get download libnss-systemd libpam-systemd libsystemd0 systemd systemd-sysv chrony +# fi +# +#} + +# 使用chrony进行NTP时间同步 +TimeSyncToNTPByChrony() { + colorEcho ${PURPLE} "---------------------------------------------------------------------------------" + colorEcho ${BLUE} "开始使用 chrony 工具进行时间同步…………" + colorEcho ${GREEN} "----------------------------------------------------------------------------------" + echo "" + colorEcho ${BLUE} "开始安装chrony工具……" + installDemandSoftwares chrony || return $? + colorEcho ${GREEN} " 安装完成 " + # 这里使用的是 默认的NTP源,又不是不能用,为啥要换啊。 + colorEcho ${BLUE} "开始启动并开启开机启动chrony……" + systemctl enable chronyd && systemctl start chronyd + colorEcho ${BLUE} "开始配置chrony……" + + suffixIP=$(echo ${PublicServerIPs[0]} | cut -d "." -f1-2) + + internalCIDR=$(echo "${suffixIP}.0.0/16") + + if [[ ${LinuxReleaseVersion} == "centos" ]]; then + local chronyFile=/etc/chrony.conf + else + local chronyFile=/etc/chrony/chrony.conf + fi + + # sed -i "/^#allow 192.168.0.0\/16/ a allow ${internalCIDR}" ${chronyFile} + sed -i "$ a allow ${internalCIDR}" ${chronyFile} + + sed -i "s/server 0.centos.pool.ntp.org iburst/server ntp2.aliyun.com iburst/g" ${chronyFile} + + colorEcho ${BLUE} "开始重启chrony server服务!" + systemctl restart chronyd + echo "" + + systemctl status chronyd -l | grep "active (running)" -q + if [[ $? -eq 0 ]]; then + chronyc -n sources -v + chronyc tracking + + colorEcho ${GREEN} "时间同步配置完成,已与阿里云进行时间同步!!" + colorEcho ${GREEN} "NTP同步时间完成。现在时间为:" + colorEcho ${GREEN} "--------------------------------------------------" + colorEcho ${PURPLE} "$(date -R)" + colorEcho ${GREEN} "--------------------------------------------------" + else + colorEcho ${RED} "时间同步服务器启动失败!!" + colorEcho ${RED} "时间同步服务器启动失败!!" + colorEcho ${RED} "时间同步服务器启动失败!!" + return 1 + fi + + changeTimeZoneAndNTP +} + +modifyChronySyncToMaster() { + colorEcho ${PURPLE} "---------------------------------------------------------------------------------" + colorEcho ${BLUE} "开始配置 chrony 时间同步至master节点…………" + colorEcho ${GREEN} "----------------------------------------------------------------------------------" + echo "" + colorEcho ${BLUE} "开始安装chrony工具……" + installDemandSoftwares chrony || return $? + colorEcho ${GREEN} " 安装完成 " + colorEcho ${BLUE} "开始启动并开启开机启动chrony……" + systemctl enable chronyd && systemctl start chronyd + colorEcho ${BLUE} "开始配置chrony……" + + sed -i "s/server 0.centos.pool.ntp.org iburst/server ${PublicServerIPs} minpoll 4 maxpoll 10 iburst/g" /etc/chrony.conf + systemctl restart chronyd + + systemctl status chronyd -l | grep "active (running)" + if [[ $? -eq 0 ]]; then + chronyc -n sources -v + chronyc tracking + + colorEcho ${GREEN} "时间同步配置完成,已与Master节点 ${PublicServerIPs} 进行时间同步!!" + colorEcho ${GREEN} "NTP同步时间完成。现在时间为:" + colorEcho ${GREEN} "--------------------------------------------------" + colorEcho ${PURPLE} "$(date -R)" + colorEcho ${GREEN} "--------------------------------------------------" + else + colorEcho ${RED} "时间同步服务器启动失败!!" + colorEcho ${RED} "时间同步服务器启动失败!!" + colorEcho ${RED} "时间同步服务器启动失败!!" + return 1 + fi + + changeTimeZoneAndNTP +} + +modifyTimeSyncdToMasterUbuntu() { + colorEcho ${PURPLE} "---------------------------------------------------------------------------------" + colorEcho ${BLUE} "开始配置 timesyncd 时间同步至master节点…………" + colorEcho ${GREEN} "----------------------------------------------------------------------------------" + echo "" + + colorEcho ${BLUE} "开始修改配置文件,时间同步到 Master节点!" + + sed -i "$ a NTP=${PublicServerIPs}" /etc/systemd/timesyncd.conf + systemctl daemon-reload + + systemctl restart systemd-timesyncd.service + systemctl status systemd-timesyncd.service -l | grep "active (running)" + if [[ $? -eq 0 ]]; then + colorEcho $GREEN "时间同步客户端正在正常运行!" + colorEcho ${YELLOW} "------------------------------------------------" + timedatectl show-timesync --all + echo "" + colorEcho ${YELLOW} "------------------------------------------------" + timedatectl status + echo "" + colorEcho ${YELLOW} "------------------------------------------------" + + else + colorEcho ${RED} "时间同步服务器安装失败! 请检查原因" + return 23 + fi +} + +changeTimeZoneAndNTP() { + colorEcho ${PURPLE} "---------------------------------------------------------------------------------" + colorEcho ${BLUE} "开始使用 timedatectl 工具进行时间同步…………" + colorEcho ${GREEN} "----------------------------------------------------------------------------------" + echo "" + if [[ -n $(command -v timedatectl) ]]; then + colorEcho ${BLUE} "检测到工具存在,正在设置时间和时区为 上海(UTC+8)时间" + timedatectl set-timezone Asia/Shanghai && timedatectl set-ntp true + colorEcho ${GREEN} "同步时间完成。现在时间为:" + colorEcho ${GREEN} "--------------------------------------------------" + colorEcho ${PURPLE} "$(date -R)" + colorEcho ${GREEN} "--------------------------------------------------" + colorEcho ${BLUE} "开始重启系统日志服务,使得系统日志的时间戳也立即生效" + systemctl restart rsyslog + colorEcho ${GREEN} " 重启完成 " + else + colorEcho ${RED} "timedatectl 工具不存在,时间同步失败!! 请手动更换时间!" + fi + colorEcho ${GREEN} "----------------------------------------------------------------------------------" + echo "" +} + +## 为了本脚本能够满足Ubuntu系统,做出设当的更改 +commonToolInstall() { + colorEcho ${PURPLE} "---------------------------------------------------------------------------------" + colorEcho ${BLUE} "开始进行Linux常用工具的安装过程…………" + colorEcho ${GREEN} "--------------------------------------------------" + echo "" + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion} !!" + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion} !!" + colorEcho ${GREEN} "当前系统的发行版为-- ${LinuxReleaseVersion} !!" + echo "" + if [[ ${LinuxReleaseVersion} == "centos" ]]; then + centosCommonTool=(deltarpm net-tools iputils bind-utils lsof curl wget vim mtr htop screen unzip git openssl iftop) + installDemandSoftwares "${centosCommonTool[@]}" + elif [[ ${LinuxReleaseVersion} == "ubuntu" ]] || [[ ${LinuxReleaseVersion} == "debian" ]]; then + ubuntuCommonTool=(iputils-ping net-tools dnsutils lsof curl wget mtr-tiny vim htop screen git apt-rdepends nethogs iftop) + installDemandSoftwares "${ubuntuCommonTool[@]}" + fi +} + +main() { + + installHarbor || return $? + + installNginx + modifyNginx + + installZSH || return $? + modifyZSH || return $? + +} + +HarborServer() { + # 当harbor位于k8s-master节点相同时 + check_root + check_sys + + disableSwap + + shutdownFirewall + modifySystemConfig + commonToolInstall + installHelm + + TimeSyncToNTPByChrony || return $? + changeTimeZoneAndNTP || return $? + + # installProxyServer || return $? + + generateSSHKey || return $? + + downloadDocker || return $? + distributeDocker || return $? + + installDocker || return $? + + installDockerCompose || return $? + + downloadNFS || return $? + distributeNFS || return $? + installNFSServer || return $? + + installHarbor || return $? + + installNginx + modifyNginx + + installZSH || return $? + modifyZSH || return $? +} + +WorkerServer() { + # check_root + # + # check_sys + # disableSwap + # shutdownFirewall + # modifySystemConfig + # + # modifyTimeSyncdToMasterUbuntu + ## changeTimeZoneAndNTP || return $? + # installDocker || return $? + changeDockerRegisterMirror + +} + +MinioServer() { + check_root + addYumProxy + addShellProxy + check_sys + disableSwap + shutdownFirewall + modifySystemConfig + + # changeTimeZoneAndNTP || return $? + modifyChronySyncToMaster + installDocker || return $? + changeDockerRegisterMirror + addDockerProxy + + if [[ ${MinioStorageType} -eq "pv" ]]; then + buildPVForMinio + else + buildDirectoryForMinio + fi + +} + +HeketiServer() { + check_root + addYumProxy + addShellProxy + check_sys + disableSwap + shutdownFirewall + modifySystemConfig + modifyChronySyncToMaster || return $? + + installGlusterFS || return $? + installHeketi || return $? +} +GlusterServer() { + check_root + addYumProxy + addShellProxy + check_sys + disableSwap + shutdownFirewall + modifySystemConfig + modifyChronySyncToMaster || return $? + + installGlusterFS || return $? +} +${WhichNodeRun} diff --git a/agent-go/executor/script/shutdownFirewall.txt b/agent-go/executor/script/shutdownFirewall.txt new file mode 100644 index 0000000..f311be2 --- /dev/null +++ b/agent-go/executor/script/shutdownFirewall.txt @@ -0,0 +1,2 @@ +systemctl stop firewalld +systemctl disable firewalld \ No newline at end of file diff --git a/agent-go/g/Nacos.go b/agent-go/g/Nacos.go deleted file mode 100644 index c43b680..0000000 --- a/agent-go/g/Nacos.go +++ /dev/null @@ -1 +0,0 @@ -package g diff --git a/agent-go/g/NacosConfig.go b/agent-go/g/NacosConfig.go deleted file mode 100644 index 452cf5e..0000000 --- a/agent-go/g/NacosConfig.go +++ /dev/null @@ -1,172 +0,0 @@ -package g - -import ( - "bytes" - "fmt" - "github.com/nacos-group/nacos-sdk-go/v2/clients" - "github.com/nacos-group/nacos-sdk-go/v2/clients/config_client" - "github.com/nacos-group/nacos-sdk-go/v2/common/constant" - "github.com/nacos-group/nacos-sdk-go/v2/vo" - "github.com/spf13/viper" - "go.uber.org/zap" - "strconv" - "strings" -) - -var log = G.LOG -var group = "" - -func InitNacos(configFileName string) *viper.Viper { - - v := parseAgentConfigFile(configFileName, nil) - group = v.GetString("spring.cloud.nacos.config.group") - - // build the nacos connection - configClient := startNacosConnection(v) - - // get all needed nacos config and merge - allNacosConfig := getAllNacosConfig(v, group, configClient) - - for _, nacosConfigContent := range allNacosConfig { - log.Debug(fmt.Sprintf("nacos config conetent is %s", nacosConfigContent)) - - parseNacosConfigContend(nacosConfigContent, v) - } - - log.Info(fmt.Sprintf("%s config read result are %v", configFileName, v.AllSettings())) - - return v -} - -func parseAgentConfigFile(configFileName string, v *viper.Viper) *viper.Viper { - - // 使用Viper框架读取 - if v == nil { - v = viper.New() - } - - // 设置配置文件路径和名称 - v.SetConfigName(configFileName) - v.AddConfigPath(".") - v.SetConfigType("yaml") - - // 读取默认的总配置文件 - err := v.ReadInConfig() - if err != nil { - panic(fmt.Errorf("fatal error config file: %s", err)) - } - - return v -} - -func parseNacosConfigContend(configContent string, v *viper.Viper) *viper.Viper { - - v.SetConfigType("yaml") - - // use merge - - err := v.MergeConfig(bytes.NewBuffer([]byte(configContent))) - if err != nil { - log.Error("nacos config contend read error !", zap.Error(err)) - } - - return v -} -func startNacosConnection(v *viper.Viper) config_client.IConfigClient { - - serverAddr := v.GetString("spring.cloud.nacos.config.server-addr") - - clientConfig := constant.ClientConfig{ - //Endpoint: serverAddr, - NamespaceId: "", - TimeoutMs: v.GetUint64("spring.cloud.nacos.config.timeout"), - NotLoadCacheAtStart: true, - AppendToStdout: true, - UpdateCacheWhenEmpty: true, - //LogDir: "/tmp/nacos/log", - //CacheDir: "/tmp/nacos/cache", - Username: "nacos", - Password: "Superwmm.23", - } - - split := strings.Split(serverAddr, ":") - if len(split) != 2 { - log.Error("nacos server addr error!") - } - - port, _ := strconv.ParseUint(split[1], 10, 64) - serverConfigs := []constant.ServerConfig{ - { - IpAddr: split[0], - Port: port, - GrpcPort: port + 1000, - }, - } - - // Another way of create config client for dynamic configuration (recommend) - configClient, err := clients.NewConfigClient( - vo.NacosClientParam{ - ClientConfig: &clientConfig, - ServerConfigs: serverConfigs, - }, - ) - if err != nil { - panic(err) - } - - return configClient -} - -func getAllNacosConfig(v *viper.Viper, group string, configClient config_client.IConfigClient) []string { - - result := make([]string, 0) - - // main nacos configs - mainNacosConfigFileName := v.GetString("spring.application.name") + "-" + v.GetString("spring.profiles.active") + "." + v.GetString("spring.cloud.nacos.config.file-extension") - - log.Debug(fmt.Sprintf("main nacos config file name is %s", mainNacosConfigFileName)) - configContent := getConfig(mainNacosConfigFileName, group, configClient) - result = append(result, configContent) - - // additional nacos config - additionalNacosConfig := v.Get("spring.cloud.nacos.config.extension-configs") - // 增加断言,判定map的类型 - m, ok := additionalNacosConfig.([]interface{}) - if !ok { - fmt.Println("additionalNacosConfig is not a slice") - return nil - } - - for _, addConfigMap := range m { - - realMap, _ := addConfigMap.(map[string]interface{}) - - // 拿到配置的Key - dataId := realMap["data-id"].(string) - group := realMap["group"].(string) - - // 查询 - config := getConfig(dataId, group, configClient) - result = append(result, config) - } - - return result -} - -// getConfig 从Nacos中获取相应的 -func getConfig(dataId string, group string, configClient config_client.IConfigClient) string { - - log.Debug(fmt.Sprintf("nacos config get method dataID is %s, group is %s", dataId, group)) - - content, err := configClient.GetConfig(vo.ConfigParam{ - DataId: dataId, - Group: group, - }) - if err != nil { - log.Error("nacos config get error !", zap.Error(err)) - } - - log.Debug(fmt.Sprintf("dataId %s , group %s, nacos config content is %s", dataId, group, content)) - - return content -} diff --git a/agent-go/g/global.go b/agent-go/g/global.go index cdd3915..e070da7 100644 --- a/agent-go/g/global.go +++ b/agent-go/g/global.go @@ -1,16 +1,15 @@ package g import ( - "agent-go/config" + logger2 "agent-go/logger" "github.com/panjf2000/ants/v2" "github.com/spf13/viper" ) type Global struct { - LOG *Logger - NacosConfig *viper.Viper - AgentServerInfo *config.AgentServerInfo - P *ants.Pool + AgentHasRegister bool + AgentConfig *viper.Viper + P *ants.Pool } const ( @@ -19,23 +18,22 @@ const ( ExecOmType = "EXECUTOR" StatusOmType = "STATUS" InitOmType = "INIT" + AgentOmType = "AGENT" + + BaseFuncOssUrlPrefix = "https://b2.107421.xyz/" ) -var logger, _ = NewLogger() - -var pool, _ = ants.NewPool(100, ants.WithNonblocking(true), ants.WithLogger(logger)) +var pool, _ = ants.NewPool(100, ants.WithNonblocking(false), ants.WithLogger(logger2.Log), ants.WithMaxBlockingTasks(30), ants.WithDisablePurge(true)) var G = NewGlobal( - logger, pool, ) // NewGlobal NewGlobal构造函数返回一个新的Global实例,其中包含指定的Logger。 -func NewGlobal(logger *Logger, pool *ants.Pool) *Global { +func NewGlobal(pool *ants.Pool) *Global { return &Global{ - LOG: logger, - NacosConfig: nil, - AgentServerInfo: nil, - P: pool, + AgentHasRegister: false, + AgentConfig: nil, + P: pool, } } diff --git a/agent-go/go.mod b/agent-go/go.mod index 4ffda9b..7662bdc 100644 --- a/agent-go/go.mod +++ b/agent-go/go.mod @@ -5,10 +5,10 @@ go 1.18 require ( github.com/nacos-group/nacos-sdk-go/v2 v2.2.0 github.com/panjf2000/ants/v2 v2.7.2 + github.com/shirou/gopsutil/v3 v3.23.3 github.com/spf13/viper v1.15.0 github.com/streadway/amqp v1.0.0 go.uber.org/zap v1.24.0 - gopkg.in/yaml.v3 v3.0.1 ) require ( @@ -17,11 +17,13 @@ require ( github.com/buger/jsonparser v1.1.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect github.com/golang/mock v1.6.0 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect @@ -29,20 +31,25 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pelletier/go-toml/v2 v2.0.6 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/prometheus/client_golang v1.12.2 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect + github.com/shoenig/go-m1cpu v0.1.4 // indirect github.com/spf13/afero v1.9.3 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/subosito/gotenv v1.4.2 // indirect + github.com/tklauser/go-sysconf v0.3.11 // indirect + github.com/tklauser/numcpus v0.6.0 // indirect + github.com/yusufpapurcu/wmi v1.2.2 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect golang.org/x/net v0.4.0 // indirect golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.3.0 // indirect + golang.org/x/sys v0.6.0 // indirect golang.org/x/text v0.5.0 // indirect golang.org/x/time v0.1.0 // indirect google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect @@ -50,4 +57,5 @@ require ( google.golang.org/protobuf v1.28.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/agent-go/go.sum b/agent-go/go.sum index 88dfd31..8d9c8f3 100644 --- a/agent-go/go.sum +++ b/agent-go/go.sum @@ -93,6 +93,8 @@ github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vb github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= @@ -140,6 +142,7 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -189,6 +192,8 @@ github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= @@ -217,6 +222,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= @@ -242,6 +249,12 @@ github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/shirou/gopsutil/v3 v3.23.3 h1:Syt5vVZXUDXPEXpIBt5ziWsJ4LdSAAxF4l/xZeQgSEE= +github.com/shirou/gopsutil/v3 v3.23.3/go.mod h1:lSBNN6t3+D6W5e5nXTxc8KIMMVxAcS+6IJlffjRRlMU= +github.com/shoenig/go-m1cpu v0.1.4 h1:SZPIgRM2sEF9NJy50mRHu9PKGwxyyTTJIWvCtgVbozs= +github.com/shoenig/go-m1cpu v0.1.4/go.mod h1:Wwvst4LR89UxjeFtLRMrpgRiyY4xPsejnVZym39dbAQ= +github.com/shoenig/test v0.6.3 h1:GVXWJFk9PiOjN0KoJ7VrJGH6uLPnqxR7/fe3HUPfE0c= +github.com/shoenig/test v0.6.3/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -268,15 +281,22 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= +github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= +github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= +github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -410,6 +430,7 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -433,6 +454,7 @@ golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -445,8 +467,9 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/agent-go/g/logger.go b/agent-go/logger/logger.go similarity index 90% rename from agent-go/g/logger.go rename to agent-go/logger/logger.go index b099413..2c3ecb9 100644 --- a/agent-go/g/logger.go +++ b/agent-go/logger/logger.go @@ -1,4 +1,4 @@ -package g +package logger import ( "fmt" @@ -11,6 +11,8 @@ type Logger struct { *zap.Logger } +var Log, _ = NewLogger() + // NewLogger creates a new Logger instance. func NewLogger() (*Logger, error) { config := zap.Config{ @@ -23,7 +25,7 @@ func NewLogger() (*Logger, error) { LevelKey: "level", TimeKey: "time", //CallerKey: "caller", - EncodeLevel: zapcore.CapitalColorLevelEncoder, + EncodeLevel: zapcore.CapitalLevelEncoder, EncodeTime: zapcore.RFC3339TimeEncoder, //EncodeCaller: zapcore.FullCallerEncoder, }, @@ -65,8 +67,9 @@ func (l *Logger) Warn(msg string, fields ...zap.Field) { } // Error logs an error message. -func (l *Logger) Error(msg string, fields ...zap.Field) { - l.Logger.Error(msg, fields...) + +func (l *Logger) ErrorF(msg string, args ...interface{}) { + l.Logger.Error(fmt.Sprintf(msg, args...)) } // Fatal logs a fatal message and exits the program with a non-zero status code. diff --git a/agent-go/main.go b/agent-go/main.go index 4b61e8b..47f0ad9 100644 --- a/agent-go/main.go +++ b/agent-go/main.go @@ -2,12 +2,13 @@ package main import ( "agent-go/g" + logger2 "agent-go/logger" "agent-go/register" "flag" "fmt" ) -var log = g.G.LOG +var log = logger2.Log func main() { @@ -17,12 +18,12 @@ func main() { flag.Parse() // 读取对应版本的配置文件 filename := fmt.Sprintf("octopus-agent-%s.yaml", version) - println(filename) + println("config file name is => " + filename) // 初始化Nacos的连接配置 - g.G.NacosConfig = g.InitNacos(filename) + g.G.AgentConfig = register.ParseConfiguration(filename) // 执行初始化之策工作 - g.G.AgentServerInfo = register.INIT() + AgentServerInfoCache = INIT() } diff --git a/agent-go/octopus-agent-dev.yaml b/agent-go/octopus-agent-dev.yaml index ac474f7..1b683bc 100644 --- a/agent-go/octopus-agent-dev.yaml +++ b/agent-go/octopus-agent-dev.yaml @@ -1,22 +1,91 @@ -spring: - application: - name: octopus-agent - profiles: - active: dev - cloud: - nacos: - config: - group: dev - config-retry-time: 3000 - file-extension: yaml - max-retry: 3 - # server-addr: "150.230.198.103:21060" - server-addr: "42.192.52.227:21060" - timeout: 5000 - config-long-poll-timeout: 5000 - extension-configs: - - group: dev - data-id: "common-dev.yaml" - server: - port: 8000 \ No newline at end of file + port: 8000 + +logging: + level: + web: info + +octopus: + message: + # agent boot up default common exchange + init_exchange: InitExchange + # server will send message to agent using this common queue + init_to_server: InitToServer + # agent boot up default common exchange routing key + init_to_server_key: InitToServerKey + # server will receive message from agent using this common queue + init_from_server: InitFromServer + # agent boot up default common exchange routing key + init_from_server_key: InitFromServerKey + # initialization register time out (unit ms) default is 5 min + init_ttl: "3000000" + # Octopus Exchange Name == server comunicate with agent + octopus_exchange: OctopusExchange + # Octopus Message To Server == all agent send info to server queue and topic + octopus_to_server: OctopusToServer + executor: + name: executor-functions + status: + name: octopus-agent + healthy: + type: cron + cron: 10 */1 * * * ? * + start-delay: 30 + metric: + pinch: 20 + agent: + executor: + # agent执行一条Command的最长超时时间 + processMaxTimeOut: 60 + status: + app: + - Nginx/nginx + - MySQL/mysql + - Xray/xray + - OctopusAgent/octopus-agent + - Redis/redis + - RabbitMQ/rabbitmq + +spring: + main: + allow-circular-references: true + allow-bean-definition-overriding: true + rabbitmq: +# host: 42.192.52.227 + host: 192.168.35.71 + port: 20672 + username: boge + password: boge8tingH + virtual-host: / + listener: + simple: + retry: + # ack failed will reentrant the Rabbit Listener + max-attempts: 2 + enabled: true + # retry interval unit ms + max-interval: 65000 + initial-interval: 65000 + +#spring: +# application: +# name: octopus-agent +# profiles: +# active: dev +# cloud: +# nacos: +# config: +# group: dev +# config-retry-time: 3000 +# file-extension: yaml +# max-retry: 3 +# # server-addr: "150.230.198.103:21060" +# server-addr: "42.192.52.227:21060" +# timeout: 5000 +# config-long-poll-timeout: 5000 +# extension-configs: +# - group: dev +# data-id: "common-dev.yaml" +# +#server: +# port: 8000 \ No newline at end of file diff --git a/agent-go/rabbitmq/MessageReaderWriter.go b/agent-go/rabbitmq/MessageReaderWriter.go deleted file mode 100644 index 87e1c63..0000000 --- a/agent-go/rabbitmq/MessageReaderWriter.go +++ /dev/null @@ -1,63 +0,0 @@ -package rabbitmq - -import ( - "fmt" - "github.com/nacos-group/nacos-sdk-go/v2/common/logger" - "github.com/streadway/amqp" -) - -// RabbitMQConn is a struct that holds the connection and channel objects -type RabbitMQConn struct { - Connection *amqp.Connection - Channel *amqp.Channel -} - -type ConnectProperty struct { - ExchangeName string - QueueName string - ExchangeType string - TopicKey string -} - -// Send 向RabbitMQ中发送消息 -func Send(conn *RabbitMQConn, connProp *ConnectProperty, message []byte) { - // 往哪里发 - channel := conn.Channel - - // 发送 - err := channel.Publish( - connProp.ExchangeName, - connProp.TopicKey, - false, - false, - amqp.Publishing{ - ContentType: "text/plain", - Body: message, - }, - ) - if err != nil { - logger.Error(fmt.Sprintf("Failed to publish a message: %v", err)) - } -} - -func Read(conn *RabbitMQConn, connProp *ConnectProperty, autoAck bool) <-chan amqp.Delivery { - - // 拿到特定的Channel - channel := conn.Channel - - // 开始读取队列中的全部消息 - msgs, err := channel.Consume( - connProp.QueueName, // 队列名称 - "", // 消费者名称 - autoAck, // auto-ack - false, // exclusive - false, // no-local - false, // no-wait - nil, // arguments - ) - if err != nil { - log.Error(fmt.Sprintf("Failed to register a consumer: %v", err)) - } - - return msgs -} diff --git a/agent-go/rabbitmq/OMsgConnector.go b/agent-go/rabbitmq/OMsgConnector.go new file mode 100644 index 0000000..89bf16d --- /dev/null +++ b/agent-go/rabbitmq/OMsgConnector.go @@ -0,0 +1,78 @@ +package rabbitmq + +import ( + "agent-go/g" + "encoding/json" + "fmt" +) + +var OctopusToServerQueue = &RabbitQueue{} + +var P = g.G.P + +func BuildOMsgRuntimeConnectorQueue(agentTopicName string) { + + // 建立 业务消息 接收队列 + // agentTopicName为名称的队列 + agentConfig := g.G.AgentConfig + + octopusExchangeName := agentConfig.GetString("octopus.message.octopus_exchange") + + octopusConnectProp := &ConnectProperty{ + ExchangeName: octopusExchangeName, + QueueName: agentTopicName, + ExchangeType: g.QueueTopic, + TopicKey: agentTopicName + "*", + } + + octopusMsgQueue := &RabbitQueue{ + RabbitProp: octopusConnectProp, + } + octopusMsgQueue.Connect() + + // 建立 业务消息 返回队列 + // 统一为 OctopusToServer + octopusToServerQueueName := agentConfig.GetString("octopus.message.octopus_to_server") + + octopusToServerProp := &ConnectProperty{ + ExchangeName: octopusExchangeName, + QueueName: octopusToServerQueueName, + ExchangeType: g.QueueTopic, + TopicKey: octopusToServerQueueName, + } + + OctopusToServerQueue = &RabbitQueue{ + RabbitProp: octopusToServerProp, + } + + // 开启运行时消息返回队列 + OctopusToServerQueue.Connect() + + log.InfoF("Octopus Message Business Runtime Queue is established ! => %v", OctopusToServerQueue) + + deliveries := octopusMsgQueue.Read(true) + forever := make(chan bool) + P.Submit( + func() { + // 死循环,处理Octopus Message + for delivery := range deliveries { + + var om *OctopusMessage + err := json.Unmarshal(delivery.Body, &om) + if err != nil { + log.Error(fmt.Sprintf("octopus message convert to json is wrong! msg is => %s", delivery.Body)) + // 保存到某处 + continue + } + + // 策略模式 处理消息 + P.Submit(func() { + om.Handle() + }) + } + }) + + // wait forever + <-forever + +} diff --git a/agent-go/rabbitmq/OctopusMessage.go b/agent-go/rabbitmq/OctopusMessage.go new file mode 100644 index 0000000..9de17b0 --- /dev/null +++ b/agent-go/rabbitmq/OctopusMessage.go @@ -0,0 +1,186 @@ +package rabbitmq + +import ( + "agent-go/executor" + "agent-go/g" + "agent-go/status" + "agent-go/utils" + "encoding/json" + "fmt" + "strings" +) + +type IOctopusMessage interface { + OctopusMsgHandler + OctopusMsgSender + OctopusMsgBuilder +} + +type OctopusMsgHandler interface { + Handle(octopusMessage *OctopusMessage) +} + +type OctopusMsgSender interface { + Send(rabbitQueue *RabbitQueue, msg []byte) + + SendToOctopusServer() +} + +type OctopusMsgBuilder interface { + Build(omType string, content interface{}) *OctopusMessage +} + +type OctopusMessage struct { + UUID string `json:"uuid"` + InitTime string `json:"init_time" format:"2023-03-21 16:38:30"` + Type string `json:"type"` + Content interface{} `json:"content"` + Result interface{} `json:"result"` + ResultCode string `json:"resultCode"` + ACTime string `json:"ac_time" format:"2023-03-21 16:38:30"` +} + +func (om *OctopusMessage) Handle() { + // 实际执行 OM handle进程 + doHandleOctopusMessage(om) +} + +func (om *OctopusMessage) Send(rabbitQueue *RabbitQueue, msg []byte) { + rabbitQueue.Send(msg) +} + +// SendToOctopusServer send octopus message back to octopusToServer queue +func (om *OctopusMessage) SendToOctopusServer() { + + // write the octopus message to bytes + octopusMessageReplayBytes, err := json.Marshal(om) + if err != nil { + log.ErrorF("replay octopus message write error => %v", err) + } + + // Send back the result to queue + OctopusToServerQueue.Send(octopusMessageReplayBytes) + +} + +func (om *OctopusMessage) Build(omType string, content interface{}) *OctopusMessage { + + // 当前时间 + curTimeString := utils.ParseDateTimeTime() + + // must write to string format, otherwise it's very hard to deserialize + bytes, err := json.Marshal(content) + if err != nil { + fmt.Sprintf("OctopusMessage Build Error ! %v", err) + } + + return &OctopusMessage{ + UUID: curTimeString, + InitTime: curTimeString, + Type: omType, + Content: string(bytes), + Result: nil, + ACTime: curTimeString, + } +} + +func doHandleOctopusMessage(octopusMessage *OctopusMessage) { + + switch octopusMessage.Type { + case g.InitOmType: + go func() {}() + case g.ExecOmType: + P.Submit(func() { + executorOMHandler(octopusMessage) + }) + case g.StatusOmType: + P.Submit(func() { + statusOMHandler(octopusMessage) + }) + case g.AgentOmType: + P.Submit(func() { + agentOMHandler(octopusMessage) + }, + ) + default: + P.Submit(func() { + blackHoleOMHandler(octopusMessage) + }) + } + +} + +// agentOMHandler 处理Agent的核心操作指令 +func agentOMHandler(octopusMessage *OctopusMessage) { + +} + +func executorOMHandler(octopusMessage *OctopusMessage) { + + executionMsgString := octopusMessage.Content.(string) + + var executionMessage *executor.ExecutionMessage + err := json.Unmarshal([]byte(executionMsgString), &executionMessage) + if err != nil { + log.Error(fmt.Sprintf("execution message convert to json is wrong! msg is => %s", executionMsgString)) + return + } + + // 交给后端的实际处理器处理, 再次策略 + resultLog, err := executor.Execute(executionMessage) + if err != nil { + octopusMessage.ResultCode = "200" + } else { + octopusMessage.ResultCode = "300" + } + + // send back the result log + octopusMessage.Result = resultLog + octopusMessage.ACTime = utils.ParseISOLocalDateTime() + + // Send + octopusMessage.SendToOctopusServer() +} + +func statusOMHandler(octopusMessage *OctopusMessage) { + + v, ok := (octopusMessage.Content).(string) + if !ok { + log.ErrorF("convert to string is wrong %s", v) + } + + statusMsgString := octopusMessage.Content.(string) + + var statusMessage *status.StatusMessage + err := json.Unmarshal([]byte(statusMsgString), &statusMessage) + if err != nil { + fmt.Println(err.Error()) + log.Error(fmt.Sprintf("status message convert to json is wrong! msg is => %s", octopusMessage)) + return + } + + var statusRes string + if strings.HasPrefix(statusMessage.StatusType, "P") { + // ping info + statusRes = status.Ping() + } else { + // status info + agentStatusString, _ := json.Marshal(status.ReportAppStatus()) + statusRes = string(agentStatusString) + } + + // 返回消息 + // 组装消息 + octopusMessage.ACTime = utils.ParseDateTimeTime() + octopusMessage.Result = statusRes + // 发送回去 + statusOctopusReplayMessage, _ := json.Marshal(octopusMessage) + OctopusToServerQueue.Send(statusOctopusReplayMessage) + + // 输出日志 + log.InfoF("接收到查询Agent状态的请求,结果为 => %s", statusRes) +} + +func blackHoleOMHandler(octopusMessage *OctopusMessage) { + log.Error(fmt.Sprintf("[BLACK HOLE] octopusMessage type wrong! msg is => %v", octopusMessage)) +} diff --git a/agent-go/rabbitmq/OctopusMsgHandler.go b/agent-go/rabbitmq/OctopusMsgHandler.go deleted file mode 100644 index cded014..0000000 --- a/agent-go/rabbitmq/OctopusMsgHandler.go +++ /dev/null @@ -1,106 +0,0 @@ -package rabbitmq - -import ( - "agent-go/config" - "agent-go/executor" - "agent-go/g" - "encoding/json" - "fmt" -) - -var P = g.G.P - -func HandleOMsg(initOMsgFromServer *config.OctopusMessage) { - - agentTopicName := initOMsgFromServer.Result.(string) - - OctopusExchange := g.G.NacosConfig.GetString("octopus.message.octopus_exchange") - - octopusConnectProp := &ConnectProperty{ - ExchangeName: OctopusExchange, - QueueName: agentTopicName, - ExchangeType: g.QueueTopic, - TopicKey: agentTopicName + "*", - } - - octopusConn, err := NewRabbitMQConn(octopusConnectProp) - if err != nil { - log.Error(fmt.Sprintf("Octopus Message Queue create Error ! => %v", octopusConnectProp)) - panic(err) - } - - // 开始接收消息 - channel := octopusConn.Channel - deliveries, err := channel.Consume( - agentTopicName, - agentTopicName, - true, - false, - false, - false, - nil, - ) - if err != nil { - return - } - - // 死循环,处理Ocotpus Message - for delivery := range deliveries { - - var om *config.OctopusMessage - err := json.Unmarshal(delivery.Body, &om) - if err != nil { - log.Error(fmt.Sprintf("octopus message convert to json is wrong! msg is => %s", delivery.Body)) - // 保存到某处 - continue - } - - // 策略模式 处理消息 - doHandleOctopusMessage(om) - } - -} - -func doHandleOctopusMessage(octopusMessage *config.OctopusMessage) { - - switch octopusMessage.Type { - case g.InitOmType: - go func() {}() - case g.ExecOmType: - P.Submit(func() { - executorOMHandler(octopusMessage) - }) - case g.StatusOmType: - P.Submit(func() { - statusOMHandler(octopusMessage) - }) - default: - P.Submit(func() { - blackHoleOMHandler(octopusMessage) - }) - } - -} - -func executorOMHandler(octopusMessage *config.OctopusMessage) { - - executionMsgString := octopusMessage.Content.(string) - - var executionMessage *config.ExecutionMessage - err := json.Unmarshal([]byte(executionMsgString), &executionMessage) - if err != nil { - log.Error(fmt.Sprintf("execution message convert to json is wrong! msg is => %s", executionMsgString)) - return - } - - // 交给后端的实际处理器处理, 再次策略 - executor.Execute(octopusMessage, executionMessage) -} - -func statusOMHandler(octopusMessage *config.OctopusMessage) { - -} - -func blackHoleOMHandler(octopusMessage *config.OctopusMessage) { - log.Error(fmt.Sprintf("octopusMessage type wrong! msg is => %v", octopusMessage)) -} diff --git a/agent-go/rabbitmq/RabbitMQConnector.go b/agent-go/rabbitmq/RabbitMQConnector.go deleted file mode 100644 index f079934..0000000 --- a/agent-go/rabbitmq/RabbitMQConnector.go +++ /dev/null @@ -1,154 +0,0 @@ -package rabbitmq - -import ( - "agent-go/g" - "fmt" - "github.com/streadway/amqp" - "strings" - "sync" -) - -var log = g.G.LOG - -// 定义全局唯一的 Singleton 实例 -var instance *amqp.Connection - -// 用 sync.Once 变量确保初始化函数只会被调用一次 -var once sync.Once - -// 初始化 Singleton 实例的函数 -func createInstance() { - // 在这里进行 Singleton 的初始化操作 - - // 获取RabbitMQ的连接地址 - rabbitMQEndpointFromG := parseRabbitMQEndpointFromG() - - // 创建全局唯一连接 RabbitMQ连接 - connection, err := amqp.Dial(rabbitMQEndpointFromG) - if err != nil { - log.Error(fmt.Sprintf("failed to connect to RabbitMQ: %v", err)) - } - - instance = connection -} - -// GetInstance 获取全局唯一的 Singleton 实例的函数 -func GetInstance() *amqp.Connection { - // 使用 sync.Once 确保 createInstance 只会被调用一次 - once.Do(createInstance) - return instance -} - -// NewRabbitMQConn creates a new RabbitMQ connection object -func NewRabbitMQConn(property *ConnectProperty) (*RabbitMQConn, error) { - - // 获取RabbitMQ的连接 - conn := GetInstance() - // 获取RabbitMQ的连接地址 - //rabbitMQEndpointFromG := parseRabbitMQEndpointFromG() - //conn, err := amqp.Dial(rabbitMQEndpointFromG) - //if err != nil { - // log.Error(fmt.Sprintf("failed to connect to RabbitMQ: %v", err)) - //} - - ch, err := conn.Channel() - if err != nil { - return nil, fmt.Errorf("failed to create RabbitMQ channel: %w", err) - } - - if err = ch.ExchangeDeclare( - property.ExchangeName, // name of the exchange - property.ExchangeType, // type of the exchange - false, // durable - false, // delete when complete - false, // internal - false, // noWait - nil, // arguments - ); err != nil { - return nil, fmt.Errorf("failed to declare RabbitMQ exchange: %w", err) - } - - _, err = ch.QueueDeclare( - property.QueueName, // name of the queue - false, // durable - false, // delete when unused - false, // exclusive - false, // noWait - nil, // arguments - ) - if err != nil { - return nil, fmt.Errorf("failed to declare RabbitMQ queue: %w", err) - } - - if err = ch.QueueBind( - property.QueueName, // name of the queue - property.TopicKey, // routing key - all topics - property.ExchangeName, // name of the exchange - false, // noWait - nil, // arguments - ); err != nil { - return nil, fmt.Errorf("failed to bind RabbitMQ queue: %w", err) - } - - return &RabbitMQConn{Connection: conn, Channel: ch}, nil -} - -// parseRabbitMQEndpoint 根据全局变量NacosConfig解析出RabbitMQ的连接地址 -func parseRabbitMQEndpointFromG() string { - - nacosConfig := g.G.NacosConfig - - var res strings.Builder - - host := nacosConfig.GetString("spring.rabbitmq.host") - port := nacosConfig.GetString("spring.rabbitmq.port") - username := nacosConfig.GetString("spring.rabbitmq.username") - password := nacosConfig.GetString("spring.rabbitmq.password") - virtualHost := nacosConfig.GetString("spring.rabbitmq.virtual-host") - - // amqp://{username}:{password}@{hostname}:{port}/{virtual_host} - res.WriteString("amqp://") - res.WriteString(username) - res.WriteString(":") - res.WriteString(password) - res.WriteString("@") - res.WriteString(host) - res.WriteString(":") - res.WriteString(port) - res.WriteString("/") - res.WriteString(virtualHost) - - s := res.String() - log.Debug(fmt.Sprintf("generate RabbitMQ endpoint is %s", s)) - return s -} - -func CloseChannel(conn *RabbitMQConn) error { - var err error - - if conn.Channel != nil { - if err = conn.Channel.Close(); err != nil { - log.Error(fmt.Sprintf("Failed to close RabbitMQ channel: %v", err)) - } - } - return err -} - -// CloseRabbitMQAll closes the RabbitMQ connection and channel -func (r *RabbitMQConn) CloseRabbitMQAll() error { - var err error - - if r.Channel != nil { - if err = r.Channel.Close(); err != nil { - log.Error(fmt.Sprintf("Failed to close RabbitMQ channel: %v", err)) - } - } - - if r.Connection != nil { - if err = r.Connection.Close(); err != nil { - log.Error(fmt.Sprintf("Failed to close RabbitMQ connection: %v", err)) - } - } - - return err -} diff --git a/agent-go/rabbitmq/RabbitMsgQueue.go b/agent-go/rabbitmq/RabbitMsgQueue.go new file mode 100644 index 0000000..8edff5f --- /dev/null +++ b/agent-go/rabbitmq/RabbitMsgQueue.go @@ -0,0 +1,214 @@ +package rabbitmq + +import ( + "agent-go/g" + logger2 "agent-go/logger" + "fmt" + "github.com/streadway/amqp" + "strings" + "sync" +) + +type RabbitMQ interface { + RabbitSendWriter + + RabbitConnectCloser +} + +type RabbitSendWriter interface { + Send(message []byte) + + Read(autoAck bool) <-chan amqp.Delivery +} + +type RabbitConnectCloser interface { + Connect() + + Close() error +} + +type RabbitQueue struct { + RabbitConn *RabbitMQConn + RabbitProp *ConnectProperty +} + +// RabbitMQConn is a struct that holds the connection and channel objects +type RabbitMQConn struct { + Connection *amqp.Connection + Channel *amqp.Channel +} + +type ConnectProperty struct { + ExchangeName string + QueueName string + ExchangeType string + TopicKey string +} + +var log = logger2.Log + +// 定义全局唯一的 Singleton 实例 +var instance *amqp.Connection + +// 用 sync.Once 变量确保初始化函数只会被调用一次 +var once sync.Once + +// 初始化 Singleton 实例的函数 +func createInstance() { + // 在这里进行 Singleton 的初始化操作 + + // 获取RabbitMQ的连接地址 + rabbitMQEndpointFromG := parseRabbitMQEndpointFromG() + + // 创建全局唯一连接 RabbitMQ连接 + connection, err := amqp.Dial(rabbitMQEndpointFromG) + if err != nil { + log.Error(fmt.Sprintf("failed to connect to RabbitMQ: %v", err)) + } + + instance = connection +} + +// GetInstance 获取全局唯一的 Singleton 实例的函数 +func GetInstance() *amqp.Connection { + // 使用 sync.Once 确保 createInstance 只会被调用一次 + once.Do(createInstance) + return instance +} + +// Connect creates a new RabbitMQ connection object +func (r *RabbitQueue) Connect() { + + // 获取RabbitMQ的连接 + conn := GetInstance() + + ch, err := conn.Channel() + if err != nil { + log.Error(fmt.Sprintf("failed to create RabbitMQ channel: %w", err)) + } + + if err = ch.ExchangeDeclare( + r.RabbitProp.ExchangeName, // name of the exchange + r.RabbitProp.ExchangeType, // type of the exchange + true, // durable + false, // delete when complete + false, // internal + false, // noWait + nil, // arguments + ); err != nil { + log.Error(fmt.Sprintf("failed to declare exchange !: %w", err)) + } + + _, err = ch.QueueDeclare( + r.RabbitProp.QueueName, // name of the queue + true, // durable + false, // delete when unused + false, // exclusive + false, // noWait + nil, // arguments + ) + if err != nil { + log.Error(fmt.Sprintf("failed to declare RabbitMQ queue: %w", err)) + } + + if err = ch.QueueBind( + r.RabbitProp.QueueName, // name of the queue + r.RabbitProp.TopicKey, // routing key - all topics + r.RabbitProp.ExchangeName, // name of the exchange + false, // noWait + nil, // arguments + ); err != nil { + log.Error(fmt.Sprintf("failed to bind RabbitMQ queue: %w", err)) + } + + r.RabbitConn = &RabbitMQConn{ + Connection: conn, + Channel: ch, + } +} + +func (r *RabbitQueue) Close() error { + var err error + + if r.RabbitConn.Channel != nil { + if err = r.RabbitConn.Channel.Close(); err != nil { + log.Error(fmt.Sprintf("Failed to close RabbitMQ channel: %v", err)) + } + } + return err +} + +// Send 向RabbitMQ中发送消息 +func (r *RabbitQueue) Send(message []byte) { + // 往哪里发 + channel := r.RabbitConn.Channel + + // 发送 + err := channel.Publish( + r.RabbitProp.ExchangeName, + r.RabbitProp.TopicKey, + false, + false, + amqp.Publishing{ + ContentType: "text/plain", + Body: message, + }, + ) + if err != nil { + log.Error(fmt.Sprintf("Failed to publish a message: %v", err)) + } +} + +func (r *RabbitQueue) Read(autoAck bool) <-chan amqp.Delivery { + + // 拿到特定的Channel + channel := r.RabbitConn.Channel + + // 开始读取队列中的全部消息 + msgs, err := channel.Consume( + r.RabbitProp.QueueName, // 队列名称 + "", // 消费者名称 + autoAck, // auto-ack + false, // exclusive + false, // no-local + false, // no-wait + nil, // arguments + ) + if err != nil { + log.Error(fmt.Sprintf("Failed to register a consumer: %v", err)) + } + + return msgs +} + +// parseRabbitMQEndpoint 根据全局变量agentConfig解析出RabbitMQ的连接地址 +func parseRabbitMQEndpointFromG() string { + + agentConfig := g.G.AgentConfig + + var res strings.Builder + + host := agentConfig.GetString("spring.rabbitmq.host") + port := agentConfig.GetString("spring.rabbitmq.port") + username := agentConfig.GetString("spring.rabbitmq.username") + password := agentConfig.GetString("spring.rabbitmq.password") + virtualHost := agentConfig.GetString("spring.rabbitmq.virtual-host") + + // amqp://{username}:{password}@{hostname}:{port}/{virtual_host} + res.WriteString("amqp://") + res.WriteString(username) + res.WriteString(":") + res.WriteString(password) + res.WriteString("@") + res.WriteString(host) + res.WriteString(":") + res.WriteString(port) + res.WriteString("/") + res.WriteString(virtualHost) + + s := res.String() + + log.Debug(fmt.Sprintf("generate RabbitMQ endpoint is %s", s)) + + return s +} diff --git a/agent-go/register/AgentIntitilization.go b/agent-go/register/AgentIntitilization.go deleted file mode 100644 index 36ad407..0000000 --- a/agent-go/register/AgentIntitilization.go +++ /dev/null @@ -1,173 +0,0 @@ -package register - -import ( - "agent-go/config" - "agent-go/g" - "agent-go/rabbitmq" - "encoding/json" - "fmt" - "gopkg.in/yaml.v3" - "io/ioutil" -) - -var omType = g.InitOmType -var log = g.G.LOG - -func INIT() *config.AgentServerInfo { - - // 获取系统的环境变量 - agentServerInfo := parseAgentServerInfo() - - nacosConfig := g.G.NacosConfig - - initToServerProp := &rabbitmq.ConnectProperty{ - ExchangeName: nacosConfig.GetString("octopus.message.init_exchange"), - QueueName: nacosConfig.GetString("octopus.message.init_to_server"), - ExchangeType: g.QueueDirect, - TopicKey: nacosConfig.GetString("octopus.message.init_to_server_key"), - } - - initFromServerProp := &rabbitmq.ConnectProperty{ - ExchangeName: nacosConfig.GetString("octopus.message.init_exchange"), - QueueName: nacosConfig.GetString("octopus.message.init_from_server"), - ExchangeType: g.QueueDirect, - TopicKey: nacosConfig.GetString("octopus.message.init_from_server_key"), - } - - // 建立RabbitMQ的连接 - // defer 关闭初始化连接 - initToServer, err := rabbitmq.NewRabbitMQConn( - initToServerProp, - ) - if err != nil { - log.Error("init to server queue established error!") - panic(err) - } - //defer rabbitmq.CloseChannel(initToServer) - //defer rabbitmq.CloseChannel(initFromServer) - - // 组装OctopusMessage - var octopusMsg *config.OctopusMessage - octopusMsg = octopusMsg.BuildOctopusMsg( - omType, - agentServerInfo, - ) - - msgBytes, err := json.Marshal(octopusMsg) - if err != nil { - log.Error(fmt.Sprintf("octopus message convert to json is wrong! msg is => %v", octopusMsg)) - } - - log.Debug(fmt.Sprintf("Prepare to send init message to server! ==> %s", string(msgBytes))) - - // 发送OM至MQ中 - rabbitmq.Send( - initToServer, - initToServerProp, - msgBytes, - ) - - // 监听初始化连接中的信息 - initFromServer, err := rabbitmq.NewRabbitMQConn( - initFromServerProp, - ) - if err != nil { - log.Error("init from server queue established error!") - panic(err) - } - - // 建立运行时RabbitMQ连接 - handleInitMsgFromServer(initFromServer, initFromServerProp, initToServer, initToServerProp, agentServerInfo) - - return agentServerInfo -} - -// handleInitMsgFromServer 处理从Server接收的注册信息 -func handleInitMsgFromServer(initFromServer *rabbitmq.RabbitMQConn, initFromServerProp *rabbitmq.ConnectProperty, initToServer *rabbitmq.RabbitMQConn, initToServerProp *rabbitmq.ConnectProperty, agentServerInfo *config.AgentServerInfo) { - - deliveries := rabbitmq.Read(initFromServer, initFromServerProp, false) - - forever := make(chan bool) - - go func() { - - // 同步很多抢占注册的情况 - for delivery := range deliveries { - - log.Debug(fmt.Sprintf("message received from server is %s", string(delivery.Body))) - - var om *config.OctopusMessage - err := json.Unmarshal(delivery.Body, &om) - if err != nil { - log.Error(fmt.Sprintf("parse init message from server wroong, message is => %s ", - string(delivery.Body))) - } - - var serverInfo config.AgentServerInfo - - s, _ := om.Content.(string) - cc := json.Unmarshal([]byte(s), &serverInfo) - if cc != nil { - log.Error(fmt.Sprintf("parse init message from server wroong, message is => %v ", cc)) - } - - serverName := serverInfo.ServerName - - // 处理OM信息 - if om != nil && om.Type == g.InitOmType && serverName == agentServerInfo.ServerName { - // 是本机的注册回复信息 - - // 建立运行时RabbitMQ连接 - // change to async - go rabbitmq.HandleOMsg(om) - - // 手动确认信息 - delivery.Ack(false) - - // 手动关闭 注册队列的连接 - shutdownRegisterQueueConnection(initFromServer, initFromServerProp, initToServer, initToServerProp) - - return - } - - // 不是自身的 注册回复信息 -- 拒绝 - log.Warn(fmt.Sprintf("OctopusMessage INIT from server not this agent ! => %v, ==>%s", om, delivery.Body)) - delivery.Nack(false, true) - } - - }() - - // wait forever - <-forever - -} - -// shutdownRegisterQueueConnection 关闭初始化连接的两个队列 -func shutdownRegisterQueueConnection(initFromServer *rabbitmq.RabbitMQConn, initFromServerProp *rabbitmq.ConnectProperty, initToServer *rabbitmq.RabbitMQConn, initToServerProp *rabbitmq.ConnectProperty) { - -} - -func parseAgentServerInfo() *config.AgentServerInfo { - - // 约定文件地址为 /etc/environment.d/octopus-agent.conf - // 目前使用 - var agentServerInfo *config.AgentServerInfo - yamlFile, err := ioutil.ReadFile("C:\\Users\\wdd\\IdeaProjects\\ProjectOctopus\\agent-go\\server-env.yaml") - if err != nil { - panic(fmt.Errorf("failed to read YAML file: %v", err)) - } - - err = yaml.Unmarshal(yamlFile, &agentServerInfo) - if err != nil { - panic(fmt.Errorf("failed to unmarshal YAML: %v", err)) - } - - jsonFormat, err := json.Marshal(agentServerInfo) - if err != nil { - log.Error(fmt.Sprintf("agent server info convert error ! agentserverinfo is %v", agentServerInfo)) - panic(err) - } - log.Info(fmt.Sprintf("agent server info is %v", string(jsonFormat))) - - return agentServerInfo -} diff --git a/agent-go/config/AgentServerInfo.go b/agent-go/register/AgentServerInfo.go similarity index 93% rename from agent-go/config/AgentServerInfo.go rename to agent-go/register/AgentServerInfo.go index ad20396..b39825e 100644 --- a/agent-go/config/AgentServerInfo.go +++ b/agent-go/register/AgentServerInfo.go @@ -1,4 +1,4 @@ -package config +package register type AgentServerInfo struct { ServerName string `json:"serverName" yaml:"serverName"` @@ -22,5 +22,5 @@ type AgentServerInfo struct { Comment string `json:"comment" yaml:"comment"` MachineID string `json:"machineId" yaml:"machineId"` AgentVersion string `json:"agentVersion" yaml:"agentVersion"` - AgentTopicName string `json:"agentTopicName" yaml:"agentTopicName"` + TopicName string `json:"topicName" yaml:"topicName"` } diff --git a/agent-go/register/ConfigParser.go b/agent-go/register/ConfigParser.go new file mode 100644 index 0000000..a876040 --- /dev/null +++ b/agent-go/register/ConfigParser.go @@ -0,0 +1,34 @@ +package register + +import ( + "fmt" + "github.com/spf13/viper" +) + +func ParseConfiguration(configFileName string) *viper.Viper { + + agentConfig := parseAgentConfigFile(configFileName, nil) + + return agentConfig +} + +func parseAgentConfigFile(configFileName string, v *viper.Viper) *viper.Viper { + + // 使用Viper框架读取 + if v == nil { + v = viper.New() + } + + // 设置配置文件路径和名称 + v.SetConfigName(configFileName) + v.AddConfigPath(".") + v.SetConfigType("yaml") + + // 读取默认的总配置文件 + err := v.ReadInConfig() + if err != nil { + panic(fmt.Errorf("fatal error config file: %s", err)) + } + + return v +} diff --git a/agent-go/register/NacosInitalizationDeprecated.go b/agent-go/register/NacosInitalizationDeprecated.go new file mode 100644 index 0000000..2aa11b3 --- /dev/null +++ b/agent-go/register/NacosInitalizationDeprecated.go @@ -0,0 +1,172 @@ +package register + +// +//import ( +// "bytes" +// "fmt" +// "github.com/nacos-group/nacos-sdk-go/v2/clients" +// "github.com/nacos-group/nacos-sdk-go/v2/clients/config_client" +// "github.com/nacos-group/nacos-sdk-go/v2/common/constant" +// "github.com/nacos-group/nacos-sdk-go/v2/vo" +// "github.com/spf13/viper" +// "go.uber.org/zap" +// "strconv" +// "strings" +//) +// +//var group = "" +// +//func InitNacos(configFileName string) *viper.Viper { +// +// v := parseAgentConfigFile(configFileName, nil) +// group = v.GetString("spring.cloud.nacos.config.group") +// +// // build the nacos connection +// configClient := startNacosConnection(v) +// +// // get all needed nacos config and merge +// allNacosConfig := getAllNacosConfig(v, group, configClient) +// +// for _, nacosConfigContent := range allNacosConfig { +// log.Debug(fmt.Sprintf("nacos config conetent is %s", nacosConfigContent)) +// +// parseNacosConfigContend(nacosConfigContent, v) +// } +// +// log.Info(fmt.Sprintf("%s config read result are %v", configFileName, v.AllSettings())) +// +// return v +//} +// +//func parseAgentConfigFile(configFileName string, v *viper.Viper) *viper.Viper { +// +// // 使用Viper框架读取 +// if v == nil { +// v = viper.New() +// } +// +// // 设置配置文件路径和名称 +// v.SetConfigName(configFileName) +// v.AddConfigPath(".") +// v.SetConfigType("yaml") +// +// // 读取默认的总配置文件 +// err := v.ReadInConfig() +// if err != nil { +// panic(fmt.Errorf("fatal error config file: %s", err)) +// } +// +// return v +//} +// +//func parseNacosConfigContend(configContent string, v *viper.Viper) *viper.Viper { +// +// v.SetConfigType("yaml") +// +// // use merge +// +// err := v.MergeConfig(bytes.NewBuffer([]byte(configContent))) +// if err != nil { +// log.Error("nacos config contend read error !", zap.Error(err)) +// } +// +// return v +//} +//func startNacosConnection(v *viper.Viper) config_client.IConfigClient { +// +// serverAddr := v.GetString("spring.cloud.nacos.config.server-addr") +// +// clientConfig := constant.ClientConfig{ +// //Endpoint: serverAddr, +// NamespaceId: "", +// TimeoutMs: v.GetUint64("spring.cloud.nacos.config.timeout"), +// NotLoadCacheAtStart: true, +// AppendToStdout: true, +// UpdateCacheWhenEmpty: true, +// //LogDir: "/tmp/nacos/log", +// //CacheDir: "/tmp/nacos/cache", +// Username: "nacos", +// Password: "Superwmm.23", +// } +// +// split := strings.Split(serverAddr, ":") +// if len(split) != 2 { +// log.Error("nacos server addr error!") +// } +// +// port, _ := strconv.ParseUint(split[1], 10, 64) +// serverConfigs := []constant.ServerConfig{ +// { +// IpAddr: split[0], +// Port: port, +// GrpcPort: port + 1000, +// }, +// } +// +// // Another way of create config client for dynamic configuration (recommend) +// configClient, err := clients.NewConfigClient( +// vo.NacosClientParam{ +// ClientConfig: &clientConfig, +// ServerConfigs: serverConfigs, +// }, +// ) +// if err != nil { +// panic(err) +// } +// +// return configClient +//} +// +//func getAllNacosConfig(v *viper.Viper, group string, configClient config_client.IConfigClient) []string { +// +// result := make([]string, 0) +// +// // main nacos configs +// mainNacosConfigFileName := v.GetString("spring.application.name") + "-" + v.GetString("spring.profiles.active") + "." + v.GetString("spring.cloud.nacos.config.file-extension") +// +// log.Debug(fmt.Sprintf("main nacos config file name is %s", mainNacosConfigFileName)) +// configContent := getConfig(mainNacosConfigFileName, group, configClient) +// result = append(result, configContent) +// +// // additional nacos config +// additionalNacosConfig := v.Get("spring.cloud.nacos.config.extension-configs") +// // 增加断言,判定map的类型 +// m, ok := additionalNacosConfig.([]interface{}) +// if !ok { +// fmt.Println("additionalNacosConfig is not a slice") +// return nil +// } +// +// for _, addConfigMap := range m { +// +// realMap, _ := addConfigMap.(map[string]interface{}) +// +// // 拿到配置的Key +// dataId := realMap["data-id"].(string) +// group := realMap["group"].(string) +// +// // 查询 +// config := getConfig(dataId, group, configClient) +// result = append(result, config) +// } +// +// return result +//} +// +//// getConfig 从Nacos中获取相应的 +//func getConfig(dataId string, group string, configClient config_client.IConfigClient) string { +// +// log.Debug(fmt.Sprintf("nacos config get method dataID is %s, group is %s", dataId, group)) +// +// content, err := configClient.GetConfig(vo.ConfigParam{ +// DataId: dataId, +// Group: group, +// }) +// if err != nil { +// log.Error("nacos config get error !", zap.Error(err)) +// } +// +// log.Debug(fmt.Sprintf("dataId %s , group %s, nacos config content is %s", dataId, group, content)) +// +// return content +//} diff --git a/agent-go/server-env.yaml b/agent-go/server-env.yaml index 4bf3f5d..2144b59 100644 --- a/agent-go/server-env.yaml +++ b/agent-go/server-env.yaml @@ -1,4 +1,4 @@ -serverName: "Chengdu-amd64-98" +serverName: "Chengdu-amd64-90" serverIpPbV4: "183.220.149.17" serverIpInV4: "" serverIpPbV6: "" @@ -17,6 +17,6 @@ osKernelInfo: "5.4.0-135-generic" tcpControl: "cubic" virtualization: "Dedicated" ioSpeed: "150 MB/s" -machineId: "" +machineId: "fakemachinid123" agentVersion: "" -agentTopicName: "" +topicName: "" diff --git a/agent-go/status/CPU.go b/agent-go/status/CPU.go new file mode 100644 index 0000000..710b5e7 --- /dev/null +++ b/agent-go/status/CPU.go @@ -0,0 +1,50 @@ +package status + +import ( + "github.com/shirou/gopsutil/v3/cpu" + "github.com/shirou/gopsutil/v3/load" +) + +type CPUStatus struct { + NumCores int + CPUInfo []cpu.InfoStat + CPUPercent float64 + CPULoads *load.AvgStat + SystemLoads *load.AvgStat +} + +func GetCPUStatus() (*CPUStatus, error) { + numCores, err := cpu.Counts(true) + if err != nil { + return nil, err + } + + cpuInfo, err := cpu.Info() + if err != nil { + return nil, err + } + + cpuPercent, err := cpu.Percent(0, false) + if err != nil { + return nil, err + } + + cpuLoads, err := load.Avg() + if err != nil { + return nil, err + } + + systemLoads, err := load.Avg() + if err != nil { + return nil, err + } + + return &CPUStatus{ + NumCores: numCores, + CPUInfo: cpuInfo, + CPUPercent: cpuPercent[0], + CPULoads: cpuLoads, + SystemLoads: systemLoads, + }, nil + +} diff --git a/agent-go/status/CPU_test.go b/agent-go/status/CPU_test.go new file mode 100644 index 0000000..0183fb3 --- /dev/null +++ b/agent-go/status/CPU_test.go @@ -0,0 +1,22 @@ +package status + +import ( + "encoding/json" + "fmt" + "testing" +) + +func TestGetCPUStatus(t *testing.T) { + cpuStatus, err := GetCPUStatus() + if err != nil { + return + } + + marshalIndent, err := json.MarshalIndent(cpuStatus, "", " ") + if err != nil { + fmt.Printf("error") + } + + fmt.Println(string(marshalIndent)) + +} diff --git a/agent-go/status/Disk.go b/agent-go/status/Disk.go new file mode 100644 index 0000000..09d93ef --- /dev/null +++ b/agent-go/status/Disk.go @@ -0,0 +1,48 @@ +package status + +import ( + "fmt" + "github.com/shirou/gopsutil/v3/disk" + "runtime" + "time" +) + +type DiskStatus struct { + Total uint64 + Used uint64 + LogicalDisk []disk.PartitionStat +} + +func GetDiskStatus() *DiskStatus { + + ds := &DiskStatus{} + + // Get disk usage + du, _ := disk.Usage("/") + ds.Total = du.Total + ds.Used = du.Used + + // Get logical disk info for Linux systems + if runtime.GOOS == "linux" { + ld, _ := disk.Partitions(true) + ds.LogicalDisk = ld + } + + return ds +} + +func CalculateDiskIO() { + + // Get initial disk IO counters + counters1, _ := disk.IOCounters() + time.Sleep(time.Second) + // Get disk IO counters after 1 second + counters2, _ := disk.IOCounters() + + for device, counter1 := range counters1 { + counter2 := counters2[device] + readSpeed := float64(counter2.ReadBytes-counter1.ReadBytes) / 1024 + writeSpeed := float64(counter2.WriteBytes-counter1.WriteBytes) / 1024 + fmt.Printf("%v: read %vKB/s, write %vKB/s\n", device, readSpeed, writeSpeed) + } +} diff --git a/agent-go/status/Disk_test.go b/agent-go/status/Disk_test.go new file mode 100644 index 0000000..dbf191a --- /dev/null +++ b/agent-go/status/Disk_test.go @@ -0,0 +1,22 @@ +package status + +import ( + "encoding/json" + "fmt" + "testing" +) + +func TestGetDiskStatus(t *testing.T) { + + ds := GetDiskStatus() + fmt.Printf("Total: %v, Used: %v\n", ds.Total, ds.Used) + fmt.Printf("Logical Disks: %v\n", ds.LogicalDisk) + + marshalIndent, err := json.MarshalIndent(ds, "", " ") + if err != nil { + fmt.Printf("error") + } + + fmt.Println(string(marshalIndent)) + +} diff --git a/agent-go/status/Memory.go b/agent-go/status/Memory.go new file mode 100644 index 0000000..4f65d65 --- /dev/null +++ b/agent-go/status/Memory.go @@ -0,0 +1,44 @@ +package status + +import ( + "fmt" + "github.com/shirou/gopsutil/v3/mem" +) + +type MemoryStatus struct { + TotalMemory uint64 + UsedMemory uint64 + AvailableMemory uint64 + TotalVirtualMemory uint64 + UsedVirtualMemory uint64 +} + +func GetMemoryStatus() (*MemoryStatus, error) { + memoryStatus := &MemoryStatus{} + + virtualMemoryStat, err := mem.VirtualMemory() + if err != nil { + return memoryStatus, err + } + + memoryStatus.TotalMemory = virtualMemoryStat.Total + memoryStatus.UsedMemory = virtualMemoryStat.Used + memoryStatus.AvailableMemory = virtualMemoryStat.Available + memoryStatus.TotalVirtualMemory = virtualMemoryStat.Total + memoryStatus.UsedVirtualMemory = virtualMemoryStat.Used + + return memoryStatus, nil +} + +func FormatMemorySize(size uint64) string { + const unit = 1024 + if size < unit { + return fmt.Sprintf("%d B", size) + } + div, exp := int64(unit), 0 + for n := size / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.1f %cB", float64(size)/float64(div), "KMGTPE"[exp]) +} diff --git a/agent-go/status/Memory_test.go b/agent-go/status/Memory_test.go new file mode 100644 index 0000000..baa2a61 --- /dev/null +++ b/agent-go/status/Memory_test.go @@ -0,0 +1,28 @@ +package status + +import ( + "encoding/json" + "fmt" + "testing" +) + +func TestGetMemoryStatus(t *testing.T) { + + memoryStatus, err := GetMemoryStatus() + if err != nil { + return + } + + fmt.Printf("Total Memory: %s\n", FormatMemorySize(memoryStatus.TotalMemory)) + fmt.Printf("Used Memory: %s\n", FormatMemorySize(memoryStatus.UsedMemory)) + fmt.Printf("Available Memory: %s\n", FormatMemorySize(memoryStatus.AvailableMemory)) + fmt.Printf("Total Virtual Memory: %s\n", FormatMemorySize(memoryStatus.TotalVirtualMemory)) + fmt.Printf("Used Virtual Memory: %s\n", FormatMemorySize(memoryStatus.UsedVirtualMemory)) + + marshalIndent, err := json.MarshalIndent(memoryStatus, "", " ") + if err != nil { + fmt.Printf("error") + } + + fmt.Println(string(marshalIndent)) +} diff --git a/agent-go/status/Network.go b/agent-go/status/Network.go new file mode 100644 index 0000000..69186d4 --- /dev/null +++ b/agent-go/status/Network.go @@ -0,0 +1,122 @@ +package status + +import ( + "fmt" + "github.com/shirou/gopsutil/v3/net" + net2 "net" + "strings" + "time" +) + +type NetworkStatus struct { + Name string `json:"name,omitempty"` + InternalIPv4 []string `json:"internal_ip_v4,omitempty"` + InternalIPv6 []string `json:"internal_ip_v6,omitempty"` + ExternalIPv4 []string `json:"external_ip_v4,omitempty"` + ExternalIPv6 []string `json:"external_ip_v6,omitempty"` + Mac string `json:"mac,omitempty"` + Sent uint64 `json:"sent,omitempty"` + Recv uint64 `json:"recv,omitempty"` + SentRate string `json:"sent_rate,omitempty"` + RecvRate string `json:"recv_rate,omitempty"` +} + +func GetNetworkStatus() (*NetworkStatus, error) { + interfaces, err := net.Interfaces() + if err != nil { + return nil, err + } + var mainInterface net.InterfaceStat + for _, intf := range interfaces { + if strings.HasPrefix(intf.Name, "ens") || strings.HasPrefix(intf.Name, "eth") || strings.HasPrefix(intf.Name, "eno") { + mainInterface = intf + break + } + } + var allAddrs []string + log.DebugF("all main interface address are %v", mainInterface.Addrs) + for _, addr := range mainInterface.Addrs { + allAddrs = append(allAddrs, addr.Addr) + } + ipv4List, ipv6List := GetInternelIpAddrs(allAddrs) + log.DebugF("ipv4 list are => %v, ipv6 list are => %v", ipv4List, ipv6List) + + counters, err := net.IOCounters(true) + if err != nil { + return nil, err + } + var sent uint64 + var recv uint64 + for _, counter := range counters { + if counter.Name == mainInterface.Name { + sent = counter.BytesSent + recv = counter.BytesRecv + break + } + } + + // 休眠3秒 + + time.Sleep(3 * time.Second) + + var sentAfter uint64 + var recvAfter uint64 + countersAfter, err := net.IOCounters(true) + if err != nil { + return nil, err + } + for _, counter := range countersAfter { + if counter.Name == mainInterface.Name { + sentAfter = counter.BytesSent + recvAfter = counter.BytesRecv + break + } + } + sendRate := fmt.Sprintf(formatBytes((sentAfter-sent)/3) + "/s") + recvRate := fmt.Sprintf(formatBytes((recvAfter-recv)/3) + "/s") + + info := &NetworkStatus{ + Name: mainInterface.Name, + InternalIPv4: ipv4List, + InternalIPv6: ipv6List, + ExternalIPv4: nil, + ExternalIPv6: nil, + Mac: mainInterface.HardwareAddr, + Sent: sent, + Recv: recv, + SentRate: sendRate, + RecvRate: recvRate, + } + return info, nil +} + +func formatBytes(bytes uint64) string { + const unit = 1024 + if bytes < unit { + return fmt.Sprintf("%d B", bytes) + } + div, exp := int64(unit), 0 + for n := bytes / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp]) +} + +func GetInternelIpAddrs(addresses []string) ([]string, []string) { + var ipv4 []string + var ipv6 []string + for _, addr := range addresses { + // it parse (0.0.0.0) not cidr + ip, _, err := net2.ParseCIDR(addr) + if err != nil { + continue + } + if ip.To4() != nil { + ipv4 = append(ipv4, addr) + } else if ip.To16() != nil { + ipv6 = append(ipv6, addr) + } + } + return ipv4, ipv6 +} diff --git a/agent-go/status/Network_test.go b/agent-go/status/Network_test.go new file mode 100644 index 0000000..b3d3ffd --- /dev/null +++ b/agent-go/status/Network_test.go @@ -0,0 +1,22 @@ +package status + +import ( + "encoding/json" + "fmt" + "testing" +) + +func TestGetNetworkInfo(t *testing.T) { + + networkInfo, err := GetNetworkStatus() + if err != nil { + return + } + + marshalIndent, err := json.MarshalIndent(networkInfo, "", " ") + if err != nil { + fmt.Printf("error") + } + + fmt.Println(string(marshalIndent)) +} diff --git a/agent-go/status/Status.go b/agent-go/status/Status.go new file mode 100644 index 0000000..8ab5626 --- /dev/null +++ b/agent-go/status/Status.go @@ -0,0 +1,63 @@ +package status + +import ( + logger2 "agent-go/logger" + "fmt" + "time" +) + +var log = logger2.Log + +type StatusMessage struct { + /** + * which kind of status should be return + "PING"; + * METRIC => short time message + * ALL => all agent status message + * */ + StatusType string `json:"statusType,omitempty"` + MetricRepeatCount int `json:"metricRepeatCount,omitempty"` + metricRepeatPinch int `json:"metricRepeatPinch,omitempty"` +} + +type AgentStatus struct { + CPUStatus *CPUStatus + MemoryStatus *MemoryStatus + NetworkStatus *NetworkStatus + DiskStatus *DiskStatus +} + +func ConvertToFormat(eventData float64) string { + duration := time.Duration(eventData) * time.Second + + fmt.Println(duration) + + hours := int(duration.Hours()) + minutes := int(duration.Minutes()) % 60 + seconds := int(duration.Seconds()) % 60 + milliseconds := duration.Milliseconds() % 1000 + return fmt.Sprintf("%02d:%02d:%02d,%03d", hours, minutes, seconds, milliseconds) +} + +func Ping() string { + return "PONG" +} + +func ReportAppStatus() *AgentStatus { + + cpuStatus, cpuerr := GetCPUStatus() + memoryStatus, memerr := GetMemoryStatus() + networkStatus, neterr := GetNetworkStatus() + if cpuerr != nil || memerr != nil || neterr != nil { + log.ErrorF("获取Agent的状态出现错误! 请检查") + return nil + } + diskStatus := GetDiskStatus() + return &AgentStatus{ + CPUStatus: cpuStatus, + MemoryStatus: memoryStatus, + NetworkStatus: networkStatus, + DiskStatus: diskStatus, + } + +} diff --git a/agent-go/status/Status_test.go b/agent-go/status/Status_test.go new file mode 100644 index 0000000..2ba647d --- /dev/null +++ b/agent-go/status/Status_test.go @@ -0,0 +1,26 @@ +package status + +import ( + "encoding/json" + "fmt" + "testing" +) + +func TestConvertToFormat(t *testing.T) { + + convertToFormat := ConvertToFormat(99.92) + + fmt.Println(convertToFormat) +} + +func TestReportAppStatus(t *testing.T) { + + agentStatus := ReportAppStatus() + + marshal, err := json.Marshal(agentStatus) + if err != nil { + return + } + + fmt.Printf(string(marshal)) +} diff --git a/agent-go/tmp/1.sh b/agent-go/tmp/1.sh new file mode 100644 index 0000000..4c394b3 --- /dev/null +++ b/agent-go/tmp/1.sh @@ -0,0 +1,7 @@ +#!/bin/bash + + + +export http_proxy=http://10.250.0.10:10810 && export https_proxy=http://10.250.0.10:10810 + + diff --git a/agent-go/tmp/executor-om-multiline.json b/agent-go/tmp/executor-om-multiline.json new file mode 100644 index 0000000..c6b9377 --- /dev/null +++ b/agent-go/tmp/executor-om-multiline.json @@ -0,0 +1,8 @@ +{ + "uuid": "2023-03-27 14:38:49", + "init_time": "2023-03-27T14:38:49.8162801+08:00", + "type": "EXECUTOR", + "content": "{\n \"needResultReplay\": true,\n \"durationTask,default:false\": false,\n \"type\": \"pipeline\",\n \"singleLineCommand\": null,\n \"multiLineCommand\": [[\"ls\",\"-l\"],[\"pwd\"]],\n \"pipeLineCommand\": null,\n \"resultKey\": \"output\"\n}", + "result": "", + "ac_time": "0001-01-01T00:00:00Z" +} diff --git a/agent-go/tmp/executor-om-pipeline.json b/agent-go/tmp/executor-om-pipeline.json new file mode 100644 index 0000000..3f907d6 --- /dev/null +++ b/agent-go/tmp/executor-om-pipeline.json @@ -0,0 +1,9 @@ +{ + "uuid": "2023-03-27 14:38:49", + "init_time": "2023-03-27T14:38:49.8162801+08:00", + "type": "EXECUTOR", + "content": "{\n \"needResultReplay\": true,\n \"durationTask,default:false\": false,\n \"type\": \"pipeline\",\n \"singleLineCommand\": null,\n \"multiLineCommand\": null,\n \"pipeLineCommand\": [[\"ls\",\"-la\"],[\"grep\", \"dev\"],[\"awk\",\"{print $9}\"]],\n \"resultKey\": \"output\"\n}\n", + "result": "", + "ac_time": "0001-01-01T00:00:00Z" +} + diff --git a/agent-go/tmp/executor-om-single.json b/agent-go/tmp/executor-om-single.json index f48b02e..fa14caa 100644 --- a/agent-go/tmp/executor-om-single.json +++ b/agent-go/tmp/executor-om-single.json @@ -2,7 +2,7 @@ "uuid": "2023-03-27 14:38:49", "init_time": "2023-03-27T14:38:49.8162801+08:00", "type": "EXECUTOR", - "content": "{\n \"needResultReplay\": true,\n \"durationTask,default:false\": false,\n \"type\": \"command\",\n \"singleLineCommand\": [\n \"ls\",\n \"-la\"\n ],\n \"multiLineCommand\": null,\n \"pipeLineCommand\": null,\n \"resultKey\": \"output\"\n}\n", + "content": "{\n \"needResultReplay\": true,\n \"durationTask,default:false\": false,\n \"type\": \"command\",\n \"singleLineCommand\": [\n \"ls\",\n \"-l\"\n ],\n \"multiLineCommand\": null,\n \"pipeLineCommand\": null,\n \"resultKey\": \"output\"\n}\n", "result": "", "ac_time": "0001-01-01T00:00:00Z" } diff --git a/agent-go/tmp/init-from-server-message.json b/agent-go/tmp/init-from-server-message.json new file mode 100644 index 0000000..6660562 --- /dev/null +++ b/agent-go/tmp/init-from-server-message.json @@ -0,0 +1,5 @@ +{ + "uuid\":\"Chengdu-amd64-98-fakema\",\"init_time\":\"2023-06-19 15:21:02\",\"type\":\"INIT\",\"content\":\"{\\\"serverName\\\":\\\"Chengdu-amd64-98\\\",\\\"serverIpPbV4\\\":\\\"183.220.149.17\\\",\\\"serverIpInV4\\\":\\\"\\\",\\\"serverIpPbV6\\\":\\\"\\\",\\\"serverIpInV6\\\":\\\"\\\",\\\"registerTime\\\":null,\\\"expireTime\\\":null,\\\"createTime\\\":null,\\\"updateTime\\\":null,\\\"proxyType\\\":null,\\\"location\\\":\\\"Chengdu Sichuan CN\\\",\\\"provider\\\":\\\"AS139080 The Internet Data Center of Sichuan Mobile Communication Company Limited\\\",\\\"managePort\\ + \ + ":\\\"22\\\",\\\"cpuBrand\\\":\\\"Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz\\\",\\\"cpuCore\\\":\\\"12 @ 4299.998 MHz\\\",\\\"memoryTotal\\\":\\\"7.6 GB\\\",\\\"diskTotal\\\":\\\"914.9 GB\\\",\\\"diskUsage\\\":\\\"12.3 GB\\\",\\\"ioSpeed\\\":\\\"150 MB/s\\\",\\\"tcpControl\\\":\\\"cubic\\\",\\\"virtualization\\\":\\\"Dedicated\\\",\\\"osInfo\\\":\\\"Ubuntu 20.04.5 LTS\\\",\\\"osKernelInfo\\\":\\\"5.4.0-135-generic\\\",\\\"machineId\\\":\\\"fakemachinid123\\\",\\\"topicName\\\":\\\"Chengdu-amd64-98-fakema\\\",\\\"comment\\\":\\\"\\\",\\\"agentVersion\\\":\\\"\\\"}\",\"result\":null,\"ac_time\":null}" +} \ No newline at end of file diff --git a/agent-go/tmp/init-to-server-message.json b/agent-go/tmp/init-to-server-message.json new file mode 100644 index 0000000..24de275 --- /dev/null +++ b/agent-go/tmp/init-to-server-message.json @@ -0,0 +1,8 @@ +{ + "uuid": "2023-06-19 14:29:20", + "init_time": "2023-06-19 14:29:20", + "type": "INIT", + "content": "{\"serverName\":\"Chengdu-amd64-98\",\"serverIpPbV4\":\"183.220.149.17\",\"serverIpInV4\":\"\",\"serverIpPbV6\":\"\",\"serverIpInV6\":\"\",\"location\":\"Chengdu Sichuan CN\",\"provider\":\"AS139080 The Internet Data Center of Sichuan Mobile Communication Company Limited\",\"managePort\":\"22\",\"cpuCore\":\"12 @ 4299.998 MHz\",\"cpuBrand\":\"Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz\",\"osInfo\":\"Ubuntu 20.04.5 LTS\",\"osKernelInfo\":\"5.4.0-135-generic\",\"tcpControl\":\"cubic\",\"virtualization\":\"Dedicated\",\"ioSpeed\":\"150 MB/s\",\"memoryTotal\":\"7.6 GB\",\"diskTotal\":\"914.9 GB\",\"diskUsage\":\"12.3 GB\",\"comment\":\"\",\"machineId\":\"\",\"agentVersion\":\"\",\"agentTopicName\":\"\"}", + "result": null, + "ac_time": "2023-06-19 14:29:20" +} \ No newline at end of file diff --git a/agent-go/tmp/nacos_config_export_20230330143045.zip b/agent-go/tmp/nacos_config_export_20230330143045.zip new file mode 100644 index 0000000..2c33e55 Binary files /dev/null and b/agent-go/tmp/nacos_config_export_20230330143045.zip differ diff --git a/agent-go/tmp/rapid-api.sh b/agent-go/tmp/rapid-api.sh new file mode 100644 index 0000000..ce9452c --- /dev/null +++ b/agent-go/tmp/rapid-api.sh @@ -0,0 +1 @@ +92a968c0d5msh36a70a2da667c96p1bcc99jsnae97d91732f3 diff --git a/agent-go/tmp/simple.sh b/agent-go/tmp/simple.sh new file mode 100755 index 0000000..600331d --- /dev/null +++ b/agent-go/tmp/simple.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +for i in {1..30} +do + echo "yes" + sleep 0.3 +done \ No newline at end of file diff --git a/agent-go/tmp/status-agentStatus.json b/agent-go/tmp/status-agentStatus.json new file mode 100644 index 0000000..514957e --- /dev/null +++ b/agent-go/tmp/status-agentStatus.json @@ -0,0 +1,8 @@ +{ + "uuid": "2023-03-27 14:38:49", + "init_time": "2023-03-27T14:38:49.8162801+08:00", + "type": "STATUS", + "content": "{\n\"type\": \"status\",\n\"agent_topic_name\": \"exampleAgentTopicName\",\n\"metric_repeat_count\": 0,\n\"metric_repeat_pinch\": 0\n}", + "result": "", + "ac_time": "0001-01-01T00:00:00Z" +} diff --git a/agent-go/tmp/status-ping.json b/agent-go/tmp/status-ping.json new file mode 100644 index 0000000..c821d60 --- /dev/null +++ b/agent-go/tmp/status-ping.json @@ -0,0 +1,8 @@ +{ + "uuid": "2023-03-27 14:38:49", + "init_time": "2023-03-27T14:38:49.8162801+08:00", + "type": "STATUS", + "content": "{\n\"type\": \"ping\",\n\"agent_topic_name\": \"exampleAgentTopicName\",\n\"metric_repeat_count\": 0,\n\"metric_repeat_pinch\": 0\n}", + "result": "", + "ac_time": "0001-01-01T00:00:00Z" +} diff --git a/agent-go/utils/TimeUtils.go b/agent-go/utils/TimeUtils.go index 0981dca..be6d10a 100644 --- a/agent-go/utils/TimeUtils.go +++ b/agent-go/utils/TimeUtils.go @@ -4,13 +4,18 @@ import ( "time" ) -// CurTimeString 输出系统时间的格式为"2006-01-02 15:04:05"形式的时间字符串 -func CurTimeString() string { +// ParseDateTimeTime 输出系统时间的格式为"2006-01-02 15:04:05"形式的时间字符串 +func ParseDateTimeTime() string { now := time.Now() /*loc := time.FixedZone("UTC+8", 8*60*60) // 创建东八区时区对象 localTime := now.In(loc) // 转换为东八区时间*/ - return now.Format("2006-01-02 15:04:05") + return now.Format(time.DateTime) +} + +func ParseISOLocalDateTime() string { + now := time.Now() + return now.Format(time.RFC3339) } diff --git a/agent/src/main/java/io/wdd/agent/config/message/handler/OMHandlerExecutor.java b/agent/src/main/java/io/wdd/agent/config/message/handler/OMHandlerExecutor.java index b5e3702..7cead64 100644 --- a/agent/src/main/java/io/wdd/agent/config/message/handler/OMHandlerExecutor.java +++ b/agent/src/main/java/io/wdd/agent/config/message/handler/OMHandlerExecutor.java @@ -30,7 +30,15 @@ public class OMHandlerExecutor extends AbstractOctopusMessageHandler { @Override public boolean handle(OctopusMessage octopusMessage) { + + if (!octopusMessage + + + + + + .getType() .equals(OctopusMessageType.EXECUTOR)) { return next.handle(octopusMessage); @@ -41,6 +49,7 @@ public class OMHandlerExecutor extends AbstractOctopusMessageHandler { try { // 需要首先解析成 ExecutionMessage + ExecutionMessage executionMessage = objectMapper.readValue( (String) octopusMessage.getContent(), new TypeReference() { diff --git a/agent/src/test/java/io/wdd/agent/CommandTest.java b/agent/src/test/java/io/wdd/agent/CommandTest.java new file mode 100644 index 0000000..3d797d4 --- /dev/null +++ b/agent/src/test/java/io/wdd/agent/CommandTest.java @@ -0,0 +1,5 @@ +package io.wdd.agent; + + +public class CommandTest { +} diff --git a/common/pom.xml b/common/pom.xml index e660761..54f8af6 100644 --- a/common/pom.xml +++ b/common/pom.xml @@ -27,6 +27,10 @@ oshi-core-java11 6.4.0 + + com.fasterxml.jackson.core + jackson-annotations + diff --git a/common/src/main/java/io/wdd/common/CommonApplication.java b/common/src/main/java/io/wdd/common/CommonApplication.java index 532d05b..f86cfb8 100644 --- a/common/src/main/java/io/wdd/common/CommonApplication.java +++ b/common/src/main/java/io/wdd/common/CommonApplication.java @@ -1,13 +1,13 @@ package io.wdd.common; -import org.springframework.boot.SpringApplication; -import org.springframework.boot.autoconfigure.SpringBootApplication; +//import org.springframework.boot.SpringApplication; +//import org.springframework.boot.autoconfigure.SpringBootApplication; -@SpringBootApplication -public class CommonApplication { - - public static void main(String[] args) { - SpringApplication.run(CommonApplication.class, args); - } - -} +//@SpringBootApplication +//public class CommonApplication { +// +// public static void main(String[] args) { +// SpringApplication.run(CommonApplication.class, args); +// } +// +//} diff --git a/common/src/main/java/io/wdd/common/beans/status/OctopusStatusMessage.java b/common/src/main/java/io/wdd/common/beans/status/OctopusStatusMessage.java deleted file mode 100644 index d58d77a..0000000 --- a/common/src/main/java/io/wdd/common/beans/status/OctopusStatusMessage.java +++ /dev/null @@ -1,36 +0,0 @@ -package io.wdd.common.beans.status; - -import lombok.AllArgsConstructor; -import lombok.Data; -import lombok.NoArgsConstructor; -import lombok.experimental.SuperBuilder; - -@Data -@AllArgsConstructor -@NoArgsConstructor -@SuperBuilder(toBuilder = true) -public class OctopusStatusMessage { - - // below two will be used by both server and agent - // 存储所有Agent的实时健康状态, 1代表健康 0代表失败 - public static final String ALL_AGENT_STATUS_REDIS_KEY = "ALL_AGENT_HEALTHY_STATUS"; - public static final String HEALTHY_STATUS_MESSAGE_TYPE = "ping"; - public static final String ALL_STATUS_MESSAGE_TYPE = "all"; - public static final String METRIC_STATUS_MESSAGE_TYPE = "metric"; - public static final String APP_STATUS_MESSAGE_TYPE = "app"; - - /** - * which kind of status should be return - * metric => short time message - * all => all agent status message - * healthy => check for healthy - * */ - String type; - - String agentTopicName; - - int metricRepeatCount; - - int metricRepeatPinch; - -} diff --git a/pom.xml b/pom.xml index 4fc0dc5..6009b66 100644 --- a/pom.xml +++ b/pom.xml @@ -20,10 +20,10 @@ ProjectOctopus - agent server + @@ -36,101 +36,6 @@ - - - - - org.springframework.cloud - spring-cloud-dependencies - ${spring-cloud.version} - pom - import - - - - com.alibaba.cloud - spring-cloud-alibaba-dependencies - ${alibaba-cloud.version} - pom - import - - - org.springframework.cloud - spring-cloud-starter-bootstrap - 3.1.5 - - - com.alibaba.cloud - spring-cloud-starter-alibaba-nacos-config - ${alibaba-cloud.version} - - - - - - org.springframework.boot - spring-boot-starter-web - - - - - - - org.apache.commons - commons-lang3 - 3.12.0 - - - - - com.google.guava - guava - 31.1-jre - - - - - org.springframework.boot - spring-boot-starter-amqp - - - - - org.springframework.boot - spring-boot-starter-data-redis - - - - - org.apache.commons - commons-pool2 - - - - - commons-beanutils - commons-beanutils - 1.9.4 - - - - org.projectlombok - lombok - true - - + + + + + + + + + + diff --git a/server/pom.xml b/server/pom.xml index 76a7426..dcdef3c 100644 --- a/server/pom.xml +++ b/server/pom.xml @@ -21,9 +21,37 @@ - io.wdd - common - ${project.parent.version} + org.springframework.boot + spring-boot-starter-web + + + + + org.springframework.boot + spring-boot-starter-amqp + + + + + org.springframework.boot + spring-boot-starter-data-redis + + + + + org.apache.commons + commons-pool2 + + + + + commons-beanutils + commons-beanutils + 1.9.4 @@ -67,12 +95,32 @@ 3.5.2 - - + + + org.apache.commons + commons-lang3 + 3.12.0 + + + + com.github.oshi + oshi-core-java11 + 6.4.0 + + + + + com.google.guava + guava + 31.1-jre + + + + + org.projectlombok + lombok + true + @@ -87,6 +135,12 @@ runtime --> + + org.springframework.boot + spring-boot-starter-test + test + + diff --git a/server/src/main/java/io/wdd/ServerApplication.java b/server/src/main/java/io/wdd/ServerApplication.java index c3effd6..07d8464 100644 --- a/server/src/main/java/io/wdd/ServerApplication.java +++ b/server/src/main/java/io/wdd/ServerApplication.java @@ -8,6 +8,7 @@ import org.springframework.boot.autoconfigure.SpringBootApplication; @MapperScan("io.wdd.server.mapper") public class ServerApplication { + public static void main(String[] args) { SpringApplication.run(ServerApplication.class, args); } diff --git a/common/src/main/java/io/wdd/common/handler/GlobalExceptionHandler.java b/server/src/main/java/io/wdd/common/handler/GlobalExceptionHandler.java similarity index 99% rename from common/src/main/java/io/wdd/common/handler/GlobalExceptionHandler.java rename to server/src/main/java/io/wdd/common/handler/GlobalExceptionHandler.java index 08909d0..ee0e58c 100644 --- a/common/src/main/java/io/wdd/common/handler/GlobalExceptionHandler.java +++ b/server/src/main/java/io/wdd/common/handler/GlobalExceptionHandler.java @@ -1,8 +1,8 @@ package io.wdd.common.handler; import com.google.common.collect.Maps; -import io.wdd.common.beans.response.R; -import io.wdd.common.beans.response.ResultStat; +import io.wdd.common.response.R; +import io.wdd.common.response.ResultStat; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; import org.springframework.amqp.rabbit.support.ListenerExecutionFailedException; diff --git a/common/src/main/java/io/wdd/common/handler/MyMessageSource.java b/server/src/main/java/io/wdd/common/handler/MyMessageSource.java similarity index 100% rename from common/src/main/java/io/wdd/common/handler/MyMessageSource.java rename to server/src/main/java/io/wdd/common/handler/MyMessageSource.java diff --git a/common/src/main/java/io/wdd/common/handler/MyRuntimeException.java b/server/src/main/java/io/wdd/common/handler/MyRuntimeException.java similarity index 95% rename from common/src/main/java/io/wdd/common/handler/MyRuntimeException.java rename to server/src/main/java/io/wdd/common/handler/MyRuntimeException.java index 21f0df6..7ae4135 100644 --- a/common/src/main/java/io/wdd/common/handler/MyRuntimeException.java +++ b/server/src/main/java/io/wdd/common/handler/MyRuntimeException.java @@ -1,6 +1,6 @@ package io.wdd.common.handler; -import io.wdd.common.beans.response.ResultStat; +import io.wdd.common.response.ResultStat; import lombok.AllArgsConstructor; import lombok.Data; import lombok.NoArgsConstructor; diff --git a/common/src/main/java/io/wdd/common/beans/response/R.java b/server/src/main/java/io/wdd/common/response/R.java similarity index 95% rename from common/src/main/java/io/wdd/common/beans/response/R.java rename to server/src/main/java/io/wdd/common/response/R.java index 25d6bf2..b16421e 100644 --- a/common/src/main/java/io/wdd/common/beans/response/R.java +++ b/server/src/main/java/io/wdd/common/response/R.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.response; +package io.wdd.common.response; import lombok.Data; diff --git a/common/src/main/java/io/wdd/common/beans/response/ResultStat.java b/server/src/main/java/io/wdd/common/response/ResultStat.java similarity index 92% rename from common/src/main/java/io/wdd/common/beans/response/ResultStat.java rename to server/src/main/java/io/wdd/common/response/ResultStat.java index d28fe6e..1e5def6 100644 --- a/common/src/main/java/io/wdd/common/beans/response/ResultStat.java +++ b/server/src/main/java/io/wdd/common/response/ResultStat.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.response; +package io.wdd.common.response; public enum ResultStat { diff --git a/common/src/main/java/io/wdd/common/utils/DataUnit.java b/server/src/main/java/io/wdd/common/utils/DataUnit.java similarity index 100% rename from common/src/main/java/io/wdd/common/utils/DataUnit.java rename to server/src/main/java/io/wdd/common/utils/DataUnit.java diff --git a/common/src/main/java/io/wdd/common/utils/FormatUtils.java b/server/src/main/java/io/wdd/common/utils/FormatUtils.java similarity index 100% rename from common/src/main/java/io/wdd/common/utils/FormatUtils.java rename to server/src/main/java/io/wdd/common/utils/FormatUtils.java diff --git a/common/src/main/java/io/wdd/common/utils/FunctionReader.java b/server/src/main/java/io/wdd/common/utils/FunctionReader.java similarity index 100% rename from common/src/main/java/io/wdd/common/utils/FunctionReader.java rename to server/src/main/java/io/wdd/common/utils/FunctionReader.java diff --git a/common/src/main/java/io/wdd/common/utils/MessageUtils.java b/server/src/main/java/io/wdd/common/utils/MessageUtils.java similarity index 93% rename from common/src/main/java/io/wdd/common/utils/MessageUtils.java rename to server/src/main/java/io/wdd/common/utils/MessageUtils.java index faf2a79..b53ad13 100644 --- a/common/src/main/java/io/wdd/common/utils/MessageUtils.java +++ b/server/src/main/java/io/wdd/common/utils/MessageUtils.java @@ -1,8 +1,8 @@ package io.wdd.common.utils; import com.fasterxml.jackson.databind.ObjectMapper; -import io.wdd.common.beans.rabbitmq.OctopusMessage; import io.wdd.common.handler.MyRuntimeException; +import io.wdd.rpc.message.OctopusMessage; import org.springframework.amqp.core.Message; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; diff --git a/common/src/main/java/io/wdd/common/utils/OctopusObjectMapperConfig.java b/server/src/main/java/io/wdd/common/utils/OctopusObjectMapperConfig.java similarity index 83% rename from common/src/main/java/io/wdd/common/utils/OctopusObjectMapperConfig.java rename to server/src/main/java/io/wdd/common/utils/OctopusObjectMapperConfig.java index 7971f25..783eb37 100644 --- a/common/src/main/java/io/wdd/common/utils/OctopusObjectMapperConfig.java +++ b/server/src/main/java/io/wdd/common/utils/OctopusObjectMapperConfig.java @@ -1,12 +1,15 @@ package io.wdd.common.utils; import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; import com.fasterxml.jackson.datatype.jsr310.deser.LocalDateTimeDeserializer; import com.fasterxml.jackson.datatype.jsr310.ser.LocalDateTimeSerializer; import org.springframework.boot.autoconfigure.jackson.Jackson2ObjectMapperBuilderCustomizer; import org.springframework.context.annotation.Configuration; +import javax.annotation.PostConstruct; +import javax.annotation.Resource; import java.time.LocalDateTime; import java.time.format.DateTimeFormatter; @@ -14,6 +17,16 @@ import java.time.format.DateTimeFormatter; @Configuration public class OctopusObjectMapperConfig { + public static ObjectMapper OctopusObjectMapper = null; + + @Resource + ObjectMapper objectMapper; + + @PostConstruct + public void setOctopusObjectMapper() { + OctopusObjectMapper = objectMapper; + } + public static Jackson2ObjectMapperBuilderCustomizer common() { return jacksonObjectMapperBuilder -> { diff --git a/common/src/main/java/io/wdd/common/utils/OctopusRabbitTemplateConfig.java b/server/src/main/java/io/wdd/common/utils/OctopusRabbitTemplateConfig.java similarity index 94% rename from common/src/main/java/io/wdd/common/utils/OctopusRabbitTemplateConfig.java rename to server/src/main/java/io/wdd/common/utils/OctopusRabbitTemplateConfig.java index 4463848..14ef8d5 100644 --- a/common/src/main/java/io/wdd/common/utils/OctopusRabbitTemplateConfig.java +++ b/server/src/main/java/io/wdd/common/utils/OctopusRabbitTemplateConfig.java @@ -8,7 +8,6 @@ import org.springframework.amqp.rabbit.core.RabbitTemplate; import org.springframework.amqp.support.converter.Jackson2JsonMessageConverter; import org.springframework.amqp.support.converter.MessageConverter; import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; import javax.annotation.Resource; import java.text.SimpleDateFormat; diff --git a/common/src/main/java/io/wdd/common/utils/TimeUtils.java b/server/src/main/java/io/wdd/common/utils/TimeUtils.java similarity index 100% rename from common/src/main/java/io/wdd/common/utils/TimeUtils.java rename to server/src/main/java/io/wdd/common/utils/TimeUtils.java diff --git a/server/src/main/java/io/wdd/func/controller/OSSController.java b/server/src/main/java/io/wdd/func/controller/OSSController.java index a164feb..9cb6cc8 100644 --- a/server/src/main/java/io/wdd/func/controller/OSSController.java +++ b/server/src/main/java/io/wdd/func/controller/OSSController.java @@ -7,7 +7,7 @@ import com.amazonaws.services.s3.model.S3Object; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiParam; -import io.wdd.common.beans.response.R; +import io.wdd.common.response.R; import io.wdd.func.oss.config.OctopusObjectSummary; import io.wdd.func.oss.config.OssConfig; import io.wdd.func.oss.service.OSSCoreService; diff --git a/server/src/main/java/io/wdd/func/controller/XrayController.java b/server/src/main/java/io/wdd/func/controller/XrayController.java index b4b11e5..dfff82a 100644 --- a/server/src/main/java/io/wdd/func/controller/XrayController.java +++ b/server/src/main/java/io/wdd/func/controller/XrayController.java @@ -4,7 +4,7 @@ package io.wdd.func.controller; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiParam; -import io.wdd.common.beans.response.R; +import io.wdd.common.response.R; import io.wdd.func.xray.beans.node.ProxyNode; import io.wdd.func.xray.beans.node.XrayConfigInfo; import io.wdd.func.xray.service.XrayCallAgent; diff --git a/server/src/main/java/io/wdd/func/xray/service/XrayCallAgent.java b/server/src/main/java/io/wdd/func/xray/service/XrayCallAgent.java index 672c22f..90fece5 100644 --- a/server/src/main/java/io/wdd/func/xray/service/XrayCallAgent.java +++ b/server/src/main/java/io/wdd/func/xray/service/XrayCallAgent.java @@ -3,7 +3,7 @@ package io.wdd.func.xray.service; import io.wdd.common.utils.TimeUtils; import io.wdd.func.oss.config.OctopusObjectSummary; import io.wdd.func.xray.beans.node.ProxyNode; -import io.wdd.rpc.execute.service.AsyncExecutionService; +import io.wdd.rpc.execute.service.SyncExecutionService; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; import org.springframework.stereotype.Service; @@ -83,7 +83,7 @@ public class XrayCallAgent { } @Resource - AsyncExecutionService executionService; + SyncExecutionService executionService; /** * 为代理链的每一个节点 构建Xray配置更新命令,然后发送至对应的Agent中 @@ -131,7 +131,7 @@ public class XrayCallAgent { ); // 向Agent发送命令,执行更新操作! - String resultKey = executionService.SendCommandToAgent( + String resultKey = executionService.SyncSendCommandToAgent( proxyNode.getAgentTopicName(), updateCommandType, null, diff --git a/common/src/main/java/io/wdd/common/beans/agent/AgentOperationMessage.java b/server/src/main/java/io/wdd/rpc/agent/AgentOperationMessage.java similarity index 95% rename from common/src/main/java/io/wdd/common/beans/agent/AgentOperationMessage.java rename to server/src/main/java/io/wdd/rpc/agent/AgentOperationMessage.java index 787dc54..ebdfb2f 100644 --- a/common/src/main/java/io/wdd/common/beans/agent/AgentOperationMessage.java +++ b/server/src/main/java/io/wdd/rpc/agent/AgentOperationMessage.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.agent; +package io.wdd.rpc.agent; import com.fasterxml.jackson.annotation.JsonFormat; import lombok.AllArgsConstructor; diff --git a/common/src/main/java/io/wdd/common/beans/agent/AgentOperationType.java b/server/src/main/java/io/wdd/rpc/agent/AgentOperationType.java similarity index 86% rename from common/src/main/java/io/wdd/common/beans/agent/AgentOperationType.java rename to server/src/main/java/io/wdd/rpc/agent/AgentOperationType.java index 3919fad..799c9a3 100644 --- a/common/src/main/java/io/wdd/common/beans/agent/AgentOperationType.java +++ b/server/src/main/java/io/wdd/rpc/agent/AgentOperationType.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.agent; +package io.wdd.rpc.agent; public enum AgentOperationType { diff --git a/server/src/main/java/io/wdd/rpc/agent/OctopusAgentServiceImpl.java b/server/src/main/java/io/wdd/rpc/agent/OctopusAgentServiceImpl.java index 408d47a..e7b8485 100644 --- a/server/src/main/java/io/wdd/rpc/agent/OctopusAgentServiceImpl.java +++ b/server/src/main/java/io/wdd/rpc/agent/OctopusAgentServiceImpl.java @@ -3,13 +3,11 @@ package io.wdd.rpc.agent; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; -import io.wdd.common.beans.agent.AgentOperationMessage; -import io.wdd.common.beans.agent.AgentOperationType; -import io.wdd.common.beans.rabbitmq.OctopusMessage; -import io.wdd.common.beans.rabbitmq.OctopusMessageType; import io.wdd.common.utils.TimeUtils; -import io.wdd.rpc.message.handler.AsyncWaitOMResult; -import io.wdd.rpc.message.handler.OMReplayContend; +import io.wdd.rpc.message.OctopusMessage; +import io.wdd.rpc.message.OctopusMessageType; +import io.wdd.rpc.message.handler.async.AsyncWaitOctopusMessageResultService; +import io.wdd.rpc.message.handler.async.OctopusMessageAsyncReplayContend; import io.wdd.rpc.message.sender.OMessageToAgentSender; import io.wdd.server.beans.vo.ServerInfoVO; import io.wdd.server.config.ServerCommonPool; @@ -28,10 +26,10 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import static io.wdd.rpc.init.ServerCacheAgentStatus.ALL_AGENT_TOPIC_NAME_SET; -import static io.wdd.rpc.init.ServerCacheAgentStatus.ALL_HEALTHY_AGENT_TOPIC_NAME_LIST; -import static io.wdd.rpc.message.handler.OMessageHandlerServer.AGENT_LATEST_VERSION; -import static io.wdd.rpc.message.handler.OMessageHandlerServer.OCTOPUS_MESSAGE_FROM_AGENT; +import static io.wdd.rpc.init.AgentStatusCacheService.ALL_AGENT_TOPIC_NAME_SET; +import static io.wdd.rpc.init.AgentStatusCacheService.ALL_HEALTHY_AGENT_TOPIC_NAME_LIST; +import static io.wdd.rpc.message.handler.sync.OMessageHandlerServer.AGENT_LATEST_VERSION; +import static io.wdd.rpc.message.handler.sync.OMessageHandlerServer.OCTOPUS_MESSAGE_FROM_AGENT; @Service @Slf4j @@ -47,7 +45,7 @@ public class OctopusAgentServiceImpl implements OctopusAgentService { RedisTemplate redisTemplate; @Resource - AsyncWaitOMResult asyncWaitOMResult; + AsyncWaitOctopusMessageResultService asyncWaitOctopusMessageResultService; @Override public Map getAllAgentVersion() { @@ -72,17 +70,17 @@ public class OctopusAgentServiceImpl implements OctopusAgentService { ); // 构造 异步结果监听内容 - OMReplayContend omReplayContend = OMReplayContend.build( + OctopusMessageAsyncReplayContend agentReplayContend = OctopusMessageAsyncReplayContend.build( ALL_HEALTHY_AGENT_TOPIC_NAME_LIST.size(), CurrentAppOctopusMessageType, currentTime ); - CountDownLatch countDownLatch = omReplayContend.getCountDownLatch(); + CountDownLatch countDownLatch = agentReplayContend.getCountDownLatch(); // 调用后台接收处理所有的Replay信息 - asyncWaitOMResult.waitFor(omReplayContend); + asyncWaitOctopusMessageResultService.waitFor(agentReplayContend); //此处存在重大bug,会导致CPU占用飙升 /*CompletableFuture getAllAgentVersionInfoFuture = waitCollectAllAgentVersionInfo( @@ -108,10 +106,10 @@ public class OctopusAgentServiceImpl implements OctopusAgentService { } // 此处调用,即可中断 异步任务的收集工作 - asyncWaitOMResult.stopWaiting(omReplayContend); + asyncWaitOctopusMessageResultService.stopWaiting(agentReplayContend); // 处理结果 - omReplayContend + agentReplayContend .getReplayOMList() .stream() .forEach( @@ -124,7 +122,7 @@ public class OctopusAgentServiceImpl implements OctopusAgentService { ); // help gc - omReplayContend = null; + agentReplayContend = null; } return result; @@ -158,16 +156,16 @@ public class OctopusAgentServiceImpl implements OctopusAgentService { ); // 构造结果 - OMReplayContend omReplayContend = OMReplayContend.build( + OctopusMessageAsyncReplayContend OctopusMessageAsyncReplayContend = io.wdd.rpc.message.handler.async.OctopusMessageAsyncReplayContend.build( ALL_HEALTHY_AGENT_TOPIC_NAME_LIST.size(), CurrentAppOctopusMessageType, currentTime ); - CountDownLatch countDownLatch = omReplayContend.getCountDownLatch(); + CountDownLatch countDownLatch = OctopusMessageAsyncReplayContend.getCountDownLatch(); // 调用后台接收处理所有的Replay信息 - asyncWaitOMResult.waitFor(omReplayContend); + asyncWaitOctopusMessageResultService.waitFor(OctopusMessageAsyncReplayContend); /* CompletableFuture getAllAgentCoreInfoFuture = waitCollectAllAgentCoreInfo( result, @@ -187,10 +185,10 @@ public class OctopusAgentServiceImpl implements OctopusAgentService { // 超时,或者 全部信息已经收集 // 此处调用,即可中断 异步任务的收集工作 - asyncWaitOMResult.stopWaiting(omReplayContend); + asyncWaitOctopusMessageResultService.stopWaiting(OctopusMessageAsyncReplayContend); // 处理结果 - omReplayContend + OctopusMessageAsyncReplayContend .getReplayOMList() .stream() .forEach( @@ -218,7 +216,7 @@ public class OctopusAgentServiceImpl implements OctopusAgentService { ); // help gc - omReplayContend = null; + OctopusMessageAsyncReplayContend = null; } return result; diff --git a/server/src/main/java/io/wdd/rpc/controller/AgentController.java b/server/src/main/java/io/wdd/rpc/controller/AgentController.java index ed1c9c4..ddcea7d 100644 --- a/server/src/main/java/io/wdd/rpc/controller/AgentController.java +++ b/server/src/main/java/io/wdd/rpc/controller/AgentController.java @@ -2,7 +2,7 @@ package io.wdd.rpc.controller; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; -import io.wdd.common.beans.response.R; +import io.wdd.common.response.R; import io.wdd.rpc.agent.OctopusAgentService; import io.wdd.server.beans.vo.ServerInfoVO; import org.springframework.web.bind.annotation.GetMapping; diff --git a/server/src/main/java/io/wdd/rpc/controller/ExecutionController.java b/server/src/main/java/io/wdd/rpc/controller/ExecutionController.java index 7d2ba39..b768099 100644 --- a/server/src/main/java/io/wdd/rpc/controller/ExecutionController.java +++ b/server/src/main/java/io/wdd/rpc/controller/ExecutionController.java @@ -3,8 +3,7 @@ package io.wdd.rpc.controller; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiParam; -import io.wdd.common.beans.response.R; -import io.wdd.rpc.execute.result.BuildStreamReader; +import io.wdd.common.response.R; import io.wdd.rpc.execute.service.AsyncExecutionService; import io.wdd.rpc.execute.service.SyncExecutionService; import org.springframework.web.bind.annotation.PostMapping; @@ -15,23 +14,21 @@ import org.springframework.web.bind.annotation.RestController; import javax.annotation.Nullable; import javax.annotation.Resource; import java.util.ArrayList; +import java.util.Collections; import java.util.List; -import static io.wdd.rpc.execute.result.RedisStreamReaderConfig.AGENT_STATUS_REDIS_STREAM_LISTENER_CONTAINER; -import static io.wdd.rpc.init.ServerCacheAgentStatus.ALL_AGENT_TOPIC_NAME_LIST; -import static io.wdd.rpc.init.ServerCacheAgentStatus.ALL_HEALTHY_AGENT_TOPIC_NAME_LIST; +import static io.wdd.rpc.init.AgentStatusCacheService.ALL_AGENT_TOPIC_NAME_LIST; +import static io.wdd.rpc.init.AgentStatusCacheService.ALL_HEALTHY_AGENT_TOPIC_NAME_LIST; @RestController @RequestMapping("/octopus/server/executor") @Api(value = "Agent执行命令的Controller", tags = "Execution") public class ExecutionController { - @Resource - AsyncExecutionService asyncExecutionService; - @Resource - BuildStreamReader buildStreamReader; @Resource SyncExecutionService syncExecutionService; + @Resource + AsyncExecutionService asyncExecutionService; @PostMapping("/command/one") @ApiOperation("[命令] [异步]- 单台主机") @@ -44,8 +41,8 @@ public class ExecutionController { @ApiParam(name = "isDurationTask", value = "是否是持久化任务") @RequestParam(value = "isDurationTask", defaultValue = "false", required = false) boolean isDurationTask ) { - String streamKey = asyncExecutionService - .SendCommandToAgent( + ArrayList streamKeyList = asyncExecutionService + .AsyncSendCommandToAgentComplete( topicName, type, commandList, @@ -55,12 +52,13 @@ public class ExecutionController { isDurationTask ); - return R.ok(streamKey); + + return R.ok(streamKeyList.toString()); } @PostMapping("/command/batch") @ApiOperation("[命令] [异步] - 批量主机") - public R> patchCommandToAgentList( + public R>> patchCommandToAgentList( @RequestParam(value = "topicNameList") @ApiParam(name = "topicNameList", value = "目标机器列表") List topicNameList, @RequestParam(value = "commandList", required = false) @@ -71,19 +69,20 @@ public class ExecutionController { @ApiParam(name = "isDurationTask", value = "是否是持久化任务") @RequestParam(value = "isDurationTask", defaultValue = "false", required = false) boolean isDurationTask ) { - return R.ok(asyncExecutionService.SendCommandToAgentComplete( + List> arrayListList = asyncExecutionService.AsyncSendCommandToAgentComplete( topicNameList, type, commandList, completeCommandList, isDurationTask - )); + ); + return R.ok(arrayListList); } @PostMapping("/command/all") @ApiOperation("[命令] [异步] - 所有的主机") - public R> patchCommandToAllAgent( + public R>> patchCommandToAllAgent( @RequestParam(value = "commandList", required = false) @ApiParam(name = "commandList", value = "命令行") @Nullable List commandList, @RequestParam(value = "completeCommandList", required = false) @@ -92,7 +91,7 @@ public class ExecutionController { @ApiParam(name = "isDurationTask", value = "是否是持久化任务") @RequestParam(value = "isDurationTask", defaultValue = "false", required = false) boolean isDurationTask ) { - return R.ok(asyncExecutionService.SendCommandToAgentComplete( + return R.ok(asyncExecutionService.AsyncSendCommandToAgentComplete( ALL_AGENT_TOPIC_NAME_LIST, type, commandList, @@ -103,7 +102,7 @@ public class ExecutionController { @PostMapping("/command/healthy") @ApiOperation("[命令] [异步] - 健康的主机") - public R> patchCommandToHealthyAgent( + public R>> patchCommandToHealthyAgent( @RequestParam(value = "commandList", required = false) @ApiParam(name = "commandList", value = "命令行") @Nullable List commandList, @RequestParam(value = "completeCommandList", required = false) @@ -112,7 +111,7 @@ public class ExecutionController { @ApiParam(name = "isDurationTask", value = "是否是持久化任务") @RequestParam(value = "isDurationTask", defaultValue = "false", required = false) boolean isDurationTask ) { - return R.ok(asyncExecutionService.SendCommandToAgentComplete( + return R.ok(asyncExecutionService.AsyncSendCommandToAgentComplete( ALL_HEALTHY_AGENT_TOPIC_NAME_LIST, type, commandList, @@ -133,18 +132,18 @@ public class ExecutionController { ) { return R.ok( - syncExecutionService.SyncSendCommandToAgent( + Collections.singletonList(syncExecutionService.SyncSendCommandToAgentComplete( topicName, type, commandList, completeCommandList - ) + )) ); } @PostMapping("/command/sync/batch") @ApiOperation("[命令] [同步] - 批量-等待命令结果") - public R>> SyncPatchCommandToAgentBatch( + public R> SyncPatchCommandToAgentBatch( @RequestParam(value = "topicNameList") @ApiParam(name = "topicNameList", value = "目标机器列表") List topicNameList, @RequestParam(value = "commandList", required = false) @@ -168,7 +167,7 @@ public class ExecutionController { @PostMapping("/command/sync/all") @ApiOperation("[命令] [同步] - 全部-同步等待命令结果") - public R>> SyncPatchCommandToAgentAll( + public R> SyncPatchCommandToAgentAll( @RequestParam(value = "commandList", required = false) @ApiParam(name = "commandList", value = "命令行") @Nullable List commandList, @RequestParam(value = "completeCommandList", required = false) @@ -195,92 +194,87 @@ public class ExecutionController { @RequestParam(value = "streamKey") @ApiParam(value = "status的Stream Key") String streamKey ) { - buildStreamReader.registerStreamReader( - AGENT_STATUS_REDIS_STREAM_LISTENER_CONTAINER, - streamKey - ); - return R.ok("请到控制台查看,已经切换至 => " + streamKey); } // auth required - @PostMapping("/function/update") - @ApiOperation("升级") - public R> AgentUpdate( - @RequestParam(value = "topicNameList") - @ApiParam(name = "topicNameList", value = "目标机器列表") List topicNameList - ) { - - return R.ok( - asyncExecutionService - .SendCommandToAgent( - topicNameList, - "AgentUpdate", - null, - false, - null, - true - )); - } - - @PostMapping("/function/reboot") - @ApiOperation("重启") - public R> AgentReboot( - @RequestParam(value = "topicNameList") - @ApiParam(name = "topicNameList", value = "目标机器列表") List topicNameList - ) { - - return R.ok( - asyncExecutionService - .SendCommandToAgent( - topicNameList, - "AgentReboot", - null, - false, - null, - true - )); - } - - @PostMapping("/function/shutdown") - @ApiOperation("关闭") - public R> AgentShutdown( - @RequestParam(value = "topicNameList") - @ApiParam(name = "topicNameList", value = "目标机器列表") List topicNameList - ) { - - return R.ok( - asyncExecutionService - .SendCommandToAgent( - topicNameList, - "AgentShutdown", - null, - false, - null, - true - )); - } - - @PostMapping("/function/bootUp") - @ApiOperation("重新部署") - public R> AgentBootUp( - @RequestParam(value = "topicNameList") - @ApiParam(name = "topicNameList", value = "目标机器列表") List topicNameList - ) { - - return R.ok( - asyncExecutionService - .SendCommandToAgent( - topicNameList, - "AgentBootUp", - null, - false, - null, - true - )); - } +// @PostMapping("/function/update") +// @ApiOperation("升级") +// public R> AgentUpdate( +// @RequestParam(value = "topicNameList") +// @ApiParam(name = "topicNameList", value = "目标机器列表") List topicNameList +// ) { +// +// return R.ok( +// syncExecutionService +// .SyncSendCommandToAgent( +// topicNameList, +// "AgentUpdate", +// null, +// false, +// null, +// true +// )); +// } +// +// @PostMapping("/function/reboot") +// @ApiOperation("重启") +// public R> AgentReboot( +// @RequestParam(value = "topicNameList") +// @ApiParam(name = "topicNameList", value = "目标机器列表") List topicNameList +// ) { +// +// return R.ok( +// asyncExecutionService +// .SyncSendCommandToAgent( +// topicNameList, +// "AgentReboot", +// null, +// false, +// null, +// true +// )); +// } +// +// @PostMapping("/function/shutdown") +// @ApiOperation("关闭") +// public R> AgentShutdown( +// @RequestParam(value = "topicNameList") +// @ApiParam(name = "topicNameList", value = "目标机器列表") List topicNameList +// ) { +// +// return R.ok( +// syncExecutionService +// .SyncSendCommandToAgent( +// topicNameList, +// "AgentShutdown", +// null, +// false, +// null, +// true +// )); +// } +// +// @PostMapping("/function/bootUp") +// @ApiOperation("重新部署") +// public R> AgentBootUp( +// @RequestParam(value = "topicNameList") +// @ApiParam(name = "topicNameList", value = "目标机器列表") List topicNameList +// ) { +// +// return R.ok( +// asyncExecutionService +// .SyncSendCommandToAgent( +// topicNameList, +// "AgentBootUp", +// null, +// false, +// null, +// true +// )); +// } } diff --git a/server/src/main/java/io/wdd/rpc/controller/SchedulerController.java b/server/src/main/java/io/wdd/rpc/controller/SchedulerController.java index c06d1d1..6c50cd8 100644 --- a/server/src/main/java/io/wdd/rpc/controller/SchedulerController.java +++ b/server/src/main/java/io/wdd/rpc/controller/SchedulerController.java @@ -4,7 +4,7 @@ package io.wdd.rpc.controller; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiParam; -import io.wdd.common.beans.response.R; +import io.wdd.common.response.R; import io.wdd.rpc.scheduler.beans.ScriptSchedulerVO; import io.wdd.rpc.scheduler.service.QuartzSchedulerService; import org.quartz.JobDetail; diff --git a/server/src/main/java/io/wdd/rpc/controller/StatusController.java b/server/src/main/java/io/wdd/rpc/controller/StatusController.java index bcaf7b4..17f3c89 100644 --- a/server/src/main/java/io/wdd/rpc/controller/StatusController.java +++ b/server/src/main/java/io/wdd/rpc/controller/StatusController.java @@ -3,8 +3,9 @@ package io.wdd.rpc.controller; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; -import io.wdd.common.beans.response.R; -import io.wdd.rpc.init.ServerCacheAgentStatus; +import io.wdd.common.response.R; +import io.wdd.rpc.scheduler.service.status.AgentAliveStatusMonitorService; +import io.wdd.rpc.status.service.AsyncStatusService; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.RequestMapping; @@ -14,7 +15,7 @@ import javax.annotation.Resource; import java.util.List; import java.util.Map; -import static io.wdd.rpc.init.ServerCacheAgentStatus.*; +import static io.wdd.rpc.init.AgentStatusCacheService.*; @RestController @@ -23,11 +24,14 @@ import static io.wdd.rpc.init.ServerCacheAgentStatus.*; public class StatusController { @Resource - ServerCacheAgentStatus serverCacheAgentStatus; + AsyncStatusService asyncStatusService; + + @Resource + AgentAliveStatusMonitorService agentAliveStatusMonitorService; @ApiOperation("[ Agent-状态 ] Map") @GetMapping("/agent/status") - public R> GetAllAgentHealthyStatus() { + public R> GetAllAgentHealthyStatus() { return R.ok(ALL_AGENT_STATUS_MAP); } @@ -76,7 +80,9 @@ public class StatusController { public R>> ManualUpdateAgentStatus() { // 手动调用更新 - serverCacheAgentStatus.updateAgentStatusMapCache(); + Map agentAliveStatusMap = asyncStatusService.AsyncCollectAgentAliveStatus(ALL_AGENT_TOPIC_NAME_LIST, 5); + + agentAliveStatusMonitorService.updateAllAgentHealthyStatus(agentAliveStatusMap); return R.ok(STATUS_AGENT_LIST_MAP); } diff --git a/common/src/main/java/io/wdd/common/beans/executor/ExecutionMessage.java b/server/src/main/java/io/wdd/rpc/execute/ExecutionMessage.java similarity index 98% rename from common/src/main/java/io/wdd/common/beans/executor/ExecutionMessage.java rename to server/src/main/java/io/wdd/rpc/execute/ExecutionMessage.java index 74848f4..7b0e3e4 100644 --- a/common/src/main/java/io/wdd/common/beans/executor/ExecutionMessage.java +++ b/server/src/main/java/io/wdd/rpc/execute/ExecutionMessage.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.executor; +package io.wdd.rpc.execute; import com.fasterxml.jackson.annotation.JsonProperty; import io.wdd.common.utils.TimeUtils; diff --git a/server/src/main/java/io/wdd/rpc/execute/config/CommandReaderConfigBean.java b/server/src/main/java/io/wdd/rpc/execute/config/CommandReaderConfigBean.java index f4da32e..ac3ae3f 100644 --- a/server/src/main/java/io/wdd/rpc/execute/config/CommandReaderConfigBean.java +++ b/server/src/main/java/io/wdd/rpc/execute/config/CommandReaderConfigBean.java @@ -1,27 +1,26 @@ -package io.wdd.rpc.execute.config; - -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; - -import static io.wdd.rpc.execute.result.RedisStreamReaderConfig.REDIS_STREAM_LISTENER_CONSUMER_NAME; - -@Configuration -public class CommandReaderConfigBean { - - // todo must support for multi thread - // its not thread safe now - @Bean - public CommandReaderConfig commandReaderConfig() { - - return CommandReaderConfig - .builder() - .consumerName(REDIS_STREAM_LISTENER_CONSUMER_NAME) - .streamKey("ccc") - .consumerType(REDIS_STREAM_LISTENER_CONSUMER_NAME) - .group("ccc") - .ExecutionResult(null) - .build(); - } - - -} +//package io.wdd.rpc.execute.config; +// +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Configuration; +// +// +//@Configuration +//public class CommandReaderConfigBean { +// +// // todo must support for multi thread +// // its not thread safe now +// @Bean +// public CommandReaderConfig commandReaderConfig() { +// +// return CommandReaderConfig +// .builder() +// .consumerName(REDIS_STREAM_LISTENER_CONSUMER_NAME) +// .streamKey("ccc") +// .consumerType(REDIS_STREAM_LISTENER_CONSUMER_NAME) +// .group("ccc") +// .ExecutionResult(null) +// .build(); +// } +// +// +//} diff --git a/server/src/main/java/io/wdd/rpc/execute/result/BuildStreamReader.java b/server/src/main/java/io/wdd/rpc/execute/result/BuildStreamReader.java index 5775012..c6168a7 100644 --- a/server/src/main/java/io/wdd/rpc/execute/result/BuildStreamReader.java +++ b/server/src/main/java/io/wdd/rpc/execute/result/BuildStreamReader.java @@ -1,189 +1,189 @@ -package io.wdd.rpc.execute.result; - -import io.wdd.rpc.execute.config.CommandReaderConfig; -import io.wdd.server.utils.SpringUtils; -import lombok.SneakyThrows; -import lombok.extern.slf4j.Slf4j; -import org.springframework.data.redis.connection.stream.ReadOffset; -import org.springframework.data.redis.connection.stream.StreamOffset; -import org.springframework.data.redis.stream.StreamMessageListenerContainer; -import org.springframework.stereotype.Component; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.concurrent.TimeUnit; - -import static io.wdd.rpc.execute.result.RedisStreamReaderConfig.EXECUTION_RESULT_REDIS_STREAM_LISTENER_CONTAINER; - - -@Component -@Slf4j -public class BuildStreamReader { - - private final HashMap REDIS_STREAM_LISTENER_CONTAINER_CACHE = new HashMap<>(16); - private RedisStreamReaderConfig redisStreamReaderConfig; - - private StreamMessageListenerContainer streamMessageListenerContainer; - - private CommandReaderConfig commandReaderConfig; - - public void buildStreamReader(CommandReaderConfig commandReaderConfig) { - - // prepare the environment - prepareExecutionEnv(); - - - // just modify the redis listener container and it's ok - modifyExecutionStreamReader(commandReaderConfig); - - } - - @SneakyThrows - private void modifyExecutionStreamReader(CommandReaderConfig commandReaderConfig) { - - // stop the old stream listener container - if (this.streamMessageListenerContainer.isRunning()) { - this.streamMessageListenerContainer.stop(); - } - - // modify container - this.streamMessageListenerContainer.receive( - StreamOffset.create( - commandReaderConfig.getStreamKey(), - ReadOffset.lastConsumed()), - - new CommandResultReader( - commandReaderConfig - ) - ); - - - // very important - TimeUnit.MILLISECONDS.sleep(500); - this.streamMessageListenerContainer.start(); - } - - private void prepareExecutionEnv() { - - getRedisStreamListenerContainer(); - - getRedisStreamReaderConfig(); - - } - - private void getRedisStreamReaderConfig() { - - this.commandReaderConfig = SpringUtils.getBean("commandReaderConfig", - CommandReaderConfig.class); - } - - private void getRedisStreamListenerContainer() { - - this.streamMessageListenerContainer = SpringUtils.getBean( - EXECUTION_RESULT_REDIS_STREAM_LISTENER_CONTAINER, - StreamMessageListenerContainer.class - ); - } - - public void registerStreamReader(String redisStreamListenerContainerBeanName, String streamKey) { - registerStreamReader(redisStreamListenerContainerBeanName, - streamKey, - null); - } - - public void registerStreamReader(String redisStreamListenerContainerBeanName, String streamKey, ArrayList ExecutionResult) { - - // prepare the environment - prepareEnv(); - - // oldStreamKey equals streamKey don't need to do anything , just return - if (redisStreamReaderConfig.getStreamKey() - .equals(streamKey)) { - log.debug("redis listener container not change !"); - return; - } - - // destroy the old REDIS_STREAM_LISTENER_CONTAINER - destroyStreamReader(streamKey); - - // modify the configuration ==> streamKey - modifyStreamReader(streamKey, - ExecutionResult); - - // re-create the REDIS_STREAM_LISTENER_CONTAINER - createStreamReader(redisStreamListenerContainerBeanName, - streamKey); - - } - - private void prepareEnv() { - - getRedisStreamConfig(); - - } - - private void getRedisStreamConfig() { - - this.redisStreamReaderConfig = SpringUtils.getBean("redisStreamReaderConfig", - RedisStreamReaderConfig.class); - } - - - private void createStreamReader(String redisStreamListenerContainerBeanName, String streamKey) { - - log.debug("start to create the redis stream listener container"); - // create the lazy bean - - StreamMessageListenerContainer streamMessageListenerContainer = SpringUtils.getBean(redisStreamListenerContainerBeanName, - StreamMessageListenerContainer.class); - - REDIS_STREAM_LISTENER_CONTAINER_CACHE.put(streamKey, - streamMessageListenerContainer); - - // very important - log.debug("start the listener container"); - streamMessageListenerContainer.start(); - - - } - - private void modifyStreamReader(String streamKey, ArrayList executionResult) { - - log.debug("start to modify the redis stream listener container stream key"); - String oldStreamKey = redisStreamReaderConfig.getStreamKey(); - - log.debug("change stream key from [{}] to [{}]", - oldStreamKey, - streamKey); - - log.debug("start to set the Redis Stream Reader key"); - redisStreamReaderConfig.setStreamKey(streamKey); - - log.debug("start to set the Redis Stream Execution Result Container"); - redisStreamReaderConfig.setExecutionResult(executionResult); - - } - - - private void destroyStreamReader(String streamKey) { - - String oldStreamKey = redisStreamReaderConfig.getStreamKey(); - - if (REDIS_STREAM_LISTENER_CONTAINER_CACHE.containsKey(oldStreamKey)) { - - StreamMessageListenerContainer streamMessageListenerContainer = REDIS_STREAM_LISTENER_CONTAINER_CACHE.get(oldStreamKey); - - log.debug("destroyed old redis stream listener container is [ {} ]", - streamMessageListenerContainer); - - - // double destroy - SpringUtils.destroyBean(streamMessageListenerContainer); - streamMessageListenerContainer.stop(); - // help gc - streamMessageListenerContainer = null; - } - - - } -} +//package io.wdd.rpc.execute.result; +// +//import io.wdd.rpc.execute.config.CommandReaderConfig; +//import io.wdd.server.utils.SpringUtils; +//import lombok.SneakyThrows; +//import lombok.extern.slf4j.Slf4j; +//import org.springframework.data.redis.connection.stream.ReadOffset; +//import org.springframework.data.redis.connection.stream.StreamOffset; +//import org.springframework.data.redis.stream.StreamMessageListenerContainer; +//import org.springframework.stereotype.Component; +// +//import java.util.ArrayList; +//import java.util.HashMap; +//import java.util.concurrent.TimeUnit; +// +//import static io.wdd.rpc.execute.result.RedisStreamReaderConfig.EXECUTION_RESULT_REDIS_STREAM_LISTENER_CONTAINER; +// +// +//@Component +//@Slf4j +//public class BuildStreamReader { +// +// private final HashMap REDIS_STREAM_LISTENER_CONTAINER_CACHE = new HashMap<>(16); +// private RedisStreamReaderConfig redisStreamReaderConfig; +// +// private StreamMessageListenerContainer streamMessageListenerContainer; +// +// private CommandReaderConfig commandReaderConfig; +// +// public void buildStreamReader(CommandReaderConfig commandReaderConfig) { +// +// // prepare the environment +// prepareExecutionEnv(); +// +// +// // just modify the redis listener container and it's ok +// modifyExecutionStreamReader(commandReaderConfig); +// +// } +// +// @SneakyThrows +// private void modifyExecutionStreamReader(CommandReaderConfig commandReaderConfig) { +// +// // stop the old stream listener container +// if (this.streamMessageListenerContainer.isRunning()) { +// this.streamMessageListenerContainer.stop(); +// } +// +// // modify container +// this.streamMessageListenerContainer.receive( +// StreamOffset.create( +// commandReaderConfig.getStreamKey(), +// ReadOffset.lastConsumed()), +// +// new CommandResultReader( +// commandReaderConfig +// ) +// ); +// +// +// // very important +// TimeUnit.MILLISECONDS.sleep(500); +// this.streamMessageListenerContainer.start(); +// } +// +// private void prepareExecutionEnv() { +// +// getRedisStreamListenerContainer(); +// +// getRedisStreamReaderConfig(); +// +// } +// +// private void getRedisStreamReaderConfig() { +// +// this.commandReaderConfig = SpringUtils.getBean("commandReaderConfig", +// CommandReaderConfig.class); +// } +// +// private void getRedisStreamListenerContainer() { +// +// this.streamMessageListenerContainer = SpringUtils.getBean( +// EXECUTION_RESULT_REDIS_STREAM_LISTENER_CONTAINER, +// StreamMessageListenerContainer.class +// ); +// } +// +// public void registerStreamReader(String redisStreamListenerContainerBeanName, String streamKey) { +// registerStreamReader(redisStreamListenerContainerBeanName, +// streamKey, +// null); +// } +// +// public void registerStreamReader(String redisStreamListenerContainerBeanName, String streamKey, ArrayList ExecutionResult) { +// +// // prepare the environment +// prepareEnv(); +// +// // oldStreamKey equals streamKey don't need to do anything , just return +// if (redisStreamReaderConfig.getStreamKey() +// .equals(streamKey)) { +// log.debug("redis listener container not change !"); +// return; +// } +// +// // destroy the old REDIS_STREAM_LISTENER_CONTAINER +// destroyStreamReader(streamKey); +// +// // modify the configuration ==> streamKey +// modifyStreamReader(streamKey, +// ExecutionResult); +// +// // re-create the REDIS_STREAM_LISTENER_CONTAINER +// createStreamReader(redisStreamListenerContainerBeanName, +// streamKey); +// +// } +// +// private void prepareEnv() { +// +// getRedisStreamConfig(); +// +// } +// +// private void getRedisStreamConfig() { +// +// this.redisStreamReaderConfig = SpringUtils.getBean("redisStreamReaderConfig", +// RedisStreamReaderConfig.class); +// } +// +// +// private void createStreamReader(String redisStreamListenerContainerBeanName, String streamKey) { +// +// log.debug("start to create the redis stream listener container"); +// // create the lazy bean +// +// StreamMessageListenerContainer streamMessageListenerContainer = SpringUtils.getBean(redisStreamListenerContainerBeanName, +// StreamMessageListenerContainer.class); +// +// REDIS_STREAM_LISTENER_CONTAINER_CACHE.put(streamKey, +// streamMessageListenerContainer); +// +// // very important +// log.debug("start the listener container"); +// streamMessageListenerContainer.start(); +// +// +// } +// +// private void modifyStreamReader(String streamKey, ArrayList executionResult) { +// +// log.debug("start to modify the redis stream listener container stream key"); +// String oldStreamKey = redisStreamReaderConfig.getStreamKey(); +// +// log.debug("change stream key from [{}] to [{}]", +// oldStreamKey, +// streamKey); +// +// log.debug("start to set the Redis Stream Reader key"); +// redisStreamReaderConfig.setStreamKey(streamKey); +// +// log.debug("start to set the Redis Stream Execution Result Container"); +// redisStreamReaderConfig.setExecutionResult(executionResult); +// +// } +// +// +// private void destroyStreamReader(String streamKey) { +// +// String oldStreamKey = redisStreamReaderConfig.getStreamKey(); +// +// if (REDIS_STREAM_LISTENER_CONTAINER_CACHE.containsKey(oldStreamKey)) { +// +// StreamMessageListenerContainer streamMessageListenerContainer = REDIS_STREAM_LISTENER_CONTAINER_CACHE.get(oldStreamKey); +// +// log.debug("destroyed old redis stream listener container is [ {} ]", +// streamMessageListenerContainer); +// +// +// // double destroy +// SpringUtils.destroyBean(streamMessageListenerContainer); +// streamMessageListenerContainer.stop(); +// // help gc +// streamMessageListenerContainer = null; +// } +// +// +// } +//} diff --git a/server/src/main/java/io/wdd/rpc/execute/result/RedisStreamReaderConfig.java b/server/src/main/java/io/wdd/rpc/execute/result/RedisStreamReaderConfig.java index a57da80..8d51e9c 100644 --- a/server/src/main/java/io/wdd/rpc/execute/result/RedisStreamReaderConfig.java +++ b/server/src/main/java/io/wdd/rpc/execute/result/RedisStreamReaderConfig.java @@ -1,121 +1,121 @@ -package io.wdd.rpc.execute.result; - - -import io.wdd.rpc.scheduler.service.status.AgentStatusStreamReader; -import lombok.Getter; -import lombok.Setter; -import lombok.extern.slf4j.Slf4j; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; -import org.springframework.context.annotation.Lazy; -import org.springframework.context.annotation.Scope; -import org.springframework.data.redis.connection.RedisConnectionFactory; -import org.springframework.data.redis.connection.stream.MapRecord; -import org.springframework.data.redis.connection.stream.ReadOffset; -import org.springframework.data.redis.connection.stream.StreamOffset; -import org.springframework.data.redis.stream.StreamMessageListenerContainer; - -import javax.annotation.Resource; -import java.time.Duration; -import java.util.ArrayList; - -@Configuration -@Slf4j -@Getter -@Setter -public class RedisStreamReaderConfig { - - @Resource - private RedisConnectionFactory redisConnectionFactory; - - public static final String COMMAND_RESULT_REDIS_STREAM_LISTENER_CONTAINER = "commandResultRedisStreamListenerContainer"; - - public static final String EXECUTION_RESULT_REDIS_STREAM_LISTENER_CONTAINER = "executionResultRedisStreamListenerContainer"; - - public static final String AGENT_STATUS_REDIS_STREAM_LISTENER_CONTAINER = "agentStatusRedisStreamListenerContainer"; - - public static final String REDIS_STREAM_LISTENER_CONSUMER_NAME = "OctopusServer"; - - /** - * used in old model - */ - private String streamKey = "cccc"; - - /** - * no use - */ - private ArrayList executionResult = null; - - - @Bean(value = EXECUTION_RESULT_REDIS_STREAM_LISTENER_CONTAINER) - @Lazy - public StreamMessageListenerContainer> executionResultRedisStreamListenerContainer(){ - - StreamMessageListenerContainer.StreamMessageListenerContainerOptions> options = StreamMessageListenerContainer.StreamMessageListenerContainerOptions - .builder() - .pollTimeout(Duration.ofSeconds(2)) - .build(); - - StreamMessageListenerContainer> listenerContainer = StreamMessageListenerContainer.create(redisConnectionFactory, options); - - return listenerContainer; - } - - - @Bean(value = COMMAND_RESULT_REDIS_STREAM_LISTENER_CONTAINER) - @Scope("prototype") - @Lazy - public StreamMessageListenerContainer> commandResultRedisStreamListenerContainer(){ - - StreamMessageListenerContainer.StreamMessageListenerContainerOptions> options = StreamMessageListenerContainer.StreamMessageListenerContainerOptions - .builder() - .pollTimeout(Duration.ofSeconds(2)) - .build(); - - StreamMessageListenerContainer> listenerContainer = StreamMessageListenerContainer.create(redisConnectionFactory, options); - - // todo 此部分可以被移出到另外的位置,会更加方便,就不需要对此Bean进行创建和销毁了 - listenerContainer.receive( - - StreamOffset.create(streamKey, ReadOffset.lastConsumed()), - - new CommandResultReader( - REDIS_STREAM_LISTENER_CONSUMER_NAME, - streamKey, - REDIS_STREAM_LISTENER_CONSUMER_NAME, - executionResult - ) - - ); - - return listenerContainer; - } - - @Bean(value = AGENT_STATUS_REDIS_STREAM_LISTENER_CONTAINER) - @Scope("prototype") - @Lazy - public StreamMessageListenerContainer> agentStatusRedisStreamListenerContainer(){ - - StreamMessageListenerContainer.StreamMessageListenerContainerOptions> options = StreamMessageListenerContainer.StreamMessageListenerContainerOptions - .builder() - .pollTimeout(Duration.ofSeconds(2)) - .build(); - - StreamMessageListenerContainer> listenerContainer = StreamMessageListenerContainer.create(redisConnectionFactory, options); - - listenerContainer.receive( - - StreamOffset.create(streamKey, ReadOffset.lastConsumed()), - - new AgentStatusStreamReader( - REDIS_STREAM_LISTENER_CONSUMER_NAME, - REDIS_STREAM_LISTENER_CONSUMER_NAME, - REDIS_STREAM_LISTENER_CONSUMER_NAME) - - ); - - return listenerContainer; - } - - -} +//package io.wdd.rpc.execute.result; +// +// +//import io.wdd.rpc.scheduler.service.status.AgentStatusStreamReader; +//import lombok.Getter; +//import lombok.Setter; +//import lombok.extern.slf4j.Slf4j; +//import org.springframework.context.annotation.Bean; +//import org.springframework.context.annotation.Configuration; +//import org.springframework.context.annotation.Lazy; +//import org.springframework.context.annotation.Scope; +//import org.springframework.data.redis.connection.RedisConnectionFactory; +//import org.springframework.data.redis.connection.stream.MapRecord; +//import org.springframework.data.redis.connection.stream.ReadOffset; +//import org.springframework.data.redis.connection.stream.StreamOffset; +//import org.springframework.data.redis.stream.StreamMessageListenerContainer; +// +//import javax.annotation.Resource; +//import java.time.Duration; +//import java.util.ArrayList; +// +//@Configuration +//@Slf4j +//@Getter +//@Setter +//public class RedisStreamReaderConfig { +// +// @Resource +// private RedisConnectionFactory redisConnectionFactory; +// +// public static final String COMMAND_RESULT_REDIS_STREAM_LISTENER_CONTAINER = "commandResultRedisStreamListenerContainer"; +// +// public static final String EXECUTION_RESULT_REDIS_STREAM_LISTENER_CONTAINER = "executionResultRedisStreamListenerContainer"; +// +// public static final String AGENT_STATUS_REDIS_STREAM_LISTENER_CONTAINER = "agentStatusRedisStreamListenerContainer"; +// +// public static final String REDIS_STREAM_LISTENER_CONSUMER_NAME = "OctopusServer"; +// +// /** +// * used in old model +// */ +// private String streamKey = "cccc"; +// +// /** +// * no use +// */ +// private ArrayList executionResult = null; +// +// +// @Bean(value = EXECUTION_RESULT_REDIS_STREAM_LISTENER_CONTAINER) +// @Lazy +// public StreamMessageListenerContainer> executionResultRedisStreamListenerContainer(){ +// +// StreamMessageListenerContainer.StreamMessageListenerContainerOptions> options = StreamMessageListenerContainer.StreamMessageListenerContainerOptions +// .builder() +// .pollTimeout(Duration.ofSeconds(2)) +// .build(); +// +// StreamMessageListenerContainer> listenerContainer = StreamMessageListenerContainer.create(redisConnectionFactory, options); +// +// return listenerContainer; +// } +// +// +// @Bean(value = COMMAND_RESULT_REDIS_STREAM_LISTENER_CONTAINER) +// @Scope("prototype") +// @Lazy +// public StreamMessageListenerContainer> commandResultRedisStreamListenerContainer(){ +// +// StreamMessageListenerContainer.StreamMessageListenerContainerOptions> options = StreamMessageListenerContainer.StreamMessageListenerContainerOptions +// .builder() +// .pollTimeout(Duration.ofSeconds(2)) +// .build(); +// +// StreamMessageListenerContainer> listenerContainer = StreamMessageListenerContainer.create(redisConnectionFactory, options); +// +// // todo 此部分可以被移出到另外的位置,会更加方便,就不需要对此Bean进行创建和销毁了 +// listenerContainer.receive( +// +// StreamOffset.create(streamKey, ReadOffset.lastConsumed()), +// +// new CommandResultReader( +// REDIS_STREAM_LISTENER_CONSUMER_NAME, +// streamKey, +// REDIS_STREAM_LISTENER_CONSUMER_NAME, +// executionResult +// ) +// +// ); +// +// return listenerContainer; +// } +// +// @Bean(value = AGENT_STATUS_REDIS_STREAM_LISTENER_CONTAINER) +// @Scope("prototype") +// @Lazy +// public StreamMessageListenerContainer> agentStatusRedisStreamListenerContainer(){ +// +// StreamMessageListenerContainer.StreamMessageListenerContainerOptions> options = StreamMessageListenerContainer.StreamMessageListenerContainerOptions +// .builder() +// .pollTimeout(Duration.ofSeconds(2)) +// .build(); +// +// StreamMessageListenerContainer> listenerContainer = StreamMessageListenerContainer.create(redisConnectionFactory, options); +// +// listenerContainer.receive( +// +// StreamOffset.create(streamKey, ReadOffset.lastConsumed()), +// +// new AgentStatusStreamReader( +// REDIS_STREAM_LISTENER_CONSUMER_NAME, +// REDIS_STREAM_LISTENER_CONSUMER_NAME, +// REDIS_STREAM_LISTENER_CONSUMER_NAME) +// +// ); +// +// return listenerContainer; +// } +// +// +//} diff --git a/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionService.java b/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionService.java index dee0fa2..48dbf4c 100644 --- a/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionService.java +++ b/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionService.java @@ -1,20 +1,23 @@ package io.wdd.rpc.execute.service; -import io.wdd.common.beans.rabbitmq.OctopusMessage; - +import java.util.ArrayList; import java.util.HashMap; import java.util.List; - +/** + * 同步命令执行的核心类 + * 需要等待命令执行完毕,完后返回相应的结果 + */ public interface AsyncExecutionService { - String SendCommandToAgent(String agentTopicName, String command); + /** + * ------------------------ Sync Command Executor ------------------------------ + */ + ArrayList AsyncSendCommandToAgent(String agentTopicName, List commandList); - String SendCommandToAgent(String agentTopicName, List commandList); + ArrayList AsyncSendCommandToAgent(String agentTopicName, String type, List commandList); - String SendCommandToAgent(String agentTopicName, String type, List commandList); - - List SendCommandToAgent(List agentTopicNameList, String type, List commandList, boolean needResultReplay, String futureKey, boolean durationTask); + List> AsyncSendCommandToAgent(List agentTopicNameList, String type, List commandList); /** * 调用 单行命令脚本的 最底层函数 @@ -27,7 +30,7 @@ public interface AsyncExecutionService { * @param durationTask * @return */ - String SendCommandToAgent( + ArrayList AsyncSendCommandToAgent( String agentTopicName, String type, List commandList, @@ -41,14 +44,21 @@ public interface AsyncExecutionService { * ------------------------------------------------- */ - String SendCommandToAgentComplete(String agentTopicName, String type, List commandList, List> commandListComplete); + ArrayList AsyncSendCommandToAgentComplete(String agentTopicName, String type, List commandList, List> completeCommandList); + List> AsyncSendCommandToAgentComplete(List agentTopicNameList, String type, List commandList, List> completeCommandList, boolean isDurationTask); - List SendCommandToAgentComplete(List agentTopicNameList, String type, List commandList, List> commandListComplete, boolean isDurationTask); + /** + * 通常为 页面定时脚本任务调用 + * + * @param agentTopicNameList 目标Agent的TopicName列表 + * @param type 任务类型 + * @param completeCommandList 完整的类型 + * @return 每个Agent只返回一个 ResultKey(Script脚本的结果全部拼接到一起),全部的resultKey + */ + List> AsyncSendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList); - List SendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList); - /** * 通常为 页面定时脚本任务调用 * @@ -58,10 +68,10 @@ public interface AsyncExecutionService { * @param atnFutureKey 由于脚本任务为延迟调用,故需要提前生成未来的ResultKey * @return 每个Agent只返回一个 ResultKey(Script脚本的结果全部拼接到一起),全部的resultKey */ - List SendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList, HashMap atnFutureKey); + List> AsyncSendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList, HashMap atnFutureKey); - String SendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, String futureKey); + ArrayList AsyncSendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, String futureKey); /** * 调用 完整脚本的 最底层函数 @@ -74,30 +84,7 @@ public interface AsyncExecutionService { * @param durationTask * @return resultKey 本次操作在Redis中记录的结果Key */ - String SendCommandToAgent( - String agentTopicName, - String type, - List commandList, - List> commandListComplete, - boolean needResultReplay, - String futureKey, - boolean durationTask - ); - - - /** - * 同步命令调用的方法 - * - * @param agentTopicName - * @param type - * @param commandList - * @param commandListComplete - * @param needResultReplay - * @param futureKey - * @param durationTask - * @return - */ - OctopusMessage AsyncCallSendCommandToAgent( + ArrayList AsyncSendCommandToAgentComplete( String agentTopicName, String type, List commandList, diff --git a/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionServiceImpl.java b/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionServiceImpl.java index 3d8bafc..d7b55cb 100644 --- a/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionServiceImpl.java +++ b/server/src/main/java/io/wdd/rpc/execute/service/AsyncExecutionServiceImpl.java @@ -1,108 +1,124 @@ package io.wdd.rpc.execute.service; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; -import io.wdd.common.beans.executor.ExecutionMessage; -import io.wdd.common.beans.rabbitmq.OctopusMessage; -import io.wdd.common.beans.rabbitmq.OctopusMessageType; -import io.wdd.common.utils.TimeUtils; -import io.wdd.rpc.execute.config.ExecutionLog; -import io.wdd.rpc.message.sender.OMessageToAgentSender; +import io.wdd.rpc.message.OctopusMessage; +import io.wdd.rpc.message.OctopusMessageType; +import io.wdd.rpc.message.handler.async.AsyncWaitOctopusMessageResultService; +import io.wdd.rpc.message.handler.async.OctopusMessageAsyncReplayContend; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; -import org.springframework.data.redis.core.RedisTemplate; import org.springframework.stereotype.Service; import javax.annotation.Resource; import java.time.LocalDateTime; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import static io.wdd.rpc.init.ServerCacheAgentStatus.ALL_AGENT_TOPIC_NAME_SET; - @Service @Slf4j public class AsyncExecutionServiceImpl implements AsyncExecutionService { - private static final String MANUAL_COMMAND_TYPE = "manual-command"; + private static final boolean COMMAND_EXEC_NEED_REPLAY = true; + + private static final OctopusMessageType CurrentAppOctopusMessageType = OctopusMessageType.EXECUTOR; @Resource - OMessageToAgentSender oMessageToAgentSender; + AsyncWaitOctopusMessageResultService asyncWaitOctopusMessageResultService; @Resource - ObjectMapper objectMapper; - @Resource - RedisTemplate redisTemplate; + SyncExecutionService asyncExecutionService; + + /** + * 一个命令执行的最长等待时间 + */ + int processMaxWaitSeconds = 10; @Override - public String SendCommandToAgent(String agentTopicName, String command) { - return this.SendCommandToAgent( - agentTopicName, - List.of(command) - ); - } + public ArrayList AsyncSendCommandToAgent(String agentTopicName, List commandList) { - @Override - public String SendCommandToAgent(String agentTopicName, List commandList) { - return this.SendCommandToAgent( + return this.AsyncSendCommandToAgentComplete( agentTopicName, - MANUAL_COMMAND_TYPE, - commandList - ); - } - - @Override - public String SendCommandToAgent(String agentTopicName, String type, List commandList) { - - return SendCommandToAgent( - agentTopicName, - type, - commandList, - false, null, - false - ); - - } - - @Override - public String SendCommandToAgent(String agentTopicName, String type, List commandList, boolean needResultReplay, String futureKey, boolean durationTask) { - - return this.SendCommandToAgent( - agentTopicName, - type, commandList, null, - needResultReplay, - futureKey, - durationTask - ); - } - - @Override - public String SendCommandToAgentComplete(String agentTopicName, String type, List commandList, List> commandListComplete) { - - return this.SendCommandToAgent( - agentTopicName, - type, - commandList, - commandListComplete, - false, + COMMAND_EXEC_NEED_REPLAY, null, false ); } @Override - public List SendCommandToAgentComplete(List agentTopicNameList, String type, List commandList, List> commandListComplete, boolean isDurationTask) { + public ArrayList AsyncSendCommandToAgent(String agentTopicName, String type, List commandList) { + + + return this.AsyncSendCommandToAgentComplete( + agentTopicName, + type, + commandList, + null, + COMMAND_EXEC_NEED_REPLAY, + null, + false + ); + } + + @Override + public List> AsyncSendCommandToAgent(List agentTopicNameList, String type, List commandList) { + return agentTopicNameList .stream() .map( - agentTopicName -> this.SendCommandToAgent( + agentTopicName -> this.AsyncSendCommandToAgentComplete( agentTopicName, type, commandList, - commandListComplete, - false, + null, + COMMAND_EXEC_NEED_REPLAY, + null, + false + ) + ) + .collect(Collectors.toList()); + } + + @Override + public ArrayList AsyncSendCommandToAgent(String agentTopicName, String type, List commandList, boolean needResultReplay, String futureKey, boolean durationTask) { + + return this.AsyncSendCommandToAgentComplete( + agentTopicName, + type, + commandList, + null, + COMMAND_EXEC_NEED_REPLAY, + futureKey, + false + ); + } + + @Override + public ArrayList AsyncSendCommandToAgentComplete(String agentTopicName, String type, List commandList, List> completeCommandList) { + return this.AsyncSendCommandToAgentComplete( + agentTopicName, + type, + commandList, + completeCommandList, + COMMAND_EXEC_NEED_REPLAY, + null, + false + ); + } + + @Override + public List> AsyncSendCommandToAgentComplete(List agentTopicNameList, String type, List commandList, List> completeCommandList, boolean isDurationTask) { + return agentTopicNameList + .stream() + .map( + agentTopicName -> this.AsyncSendCommandToAgentComplete( + agentTopicName, + type, + commandList, + completeCommandList, + COMMAND_EXEC_NEED_REPLAY, null, isDurationTask ) @@ -111,31 +127,60 @@ public class AsyncExecutionServiceImpl implements AsyncExecutionService { } @Override - public String SendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, String futureKey) { + public List> AsyncSendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList) { - return this.SendCommandToAgent( - agentTopicName, - type, - commandList, - commandListComplete, - false, - futureKey, - false - ); + return agentTopicNameList + .stream() + .map( + agentTopicName -> this.AsyncSendCommandToAgentComplete( + agentTopicName, + type, + null, + completeCommandList, + COMMAND_EXEC_NEED_REPLAY, + null, + false + ) + ) + .collect(Collectors.toList()); } @Override - public String SendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, boolean needResultReplay, String futureKey, boolean durationTask) { + public List> AsyncSendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList, HashMap atnFutureKey) { + return agentTopicNameList + .stream() + .map( + agentTopicName -> this.AsyncSendCommandToAgentComplete( + agentTopicName, + type, + null, + completeCommandList, + COMMAND_EXEC_NEED_REPLAY, + atnFutureKey.get(agentTopicName), + false + ) + ) + .collect(Collectors.toList()); + } - String resultKey = futureKey; - // 判定是否是 FutureKey - if (null == futureKey) { - resultKey = ExecutionMessage.GetResultKey(agentTopicName); - } + @Override + public ArrayList AsyncSendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, String futureKey) { + return this.AsyncSendCommandToAgentComplete( + agentTopicName, + type, + commandList, + commandListComplete, + COMMAND_EXEC_NEED_REPLAY, + futureKey, + false + ); + } - // 调用最底层的方法 - this.AsyncCallSendCommandToAgent( + @Override + public ArrayList AsyncSendCommandToAgentComplete(String agentTopicName, String type, List commandList, List> commandListComplete, boolean needResultReplay, String futureKey, boolean durationTask) { + + OctopusMessage octopusMessage = asyncExecutionService.AsyncCallSendCommandToAgent( agentTopicName, type, commandList, @@ -145,225 +190,67 @@ public class AsyncExecutionServiceImpl implements AsyncExecutionService { durationTask ); - return resultKey; - } + LocalDateTime initTime = octopusMessage.getInit_time(); - @Override - public OctopusMessage AsyncCallSendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, boolean needResultReplay, String futureKey, boolean durationTask) { + // OM 中的result保存 + ArrayList result = new ArrayList<>(); - // 检查agentTopicName是否存在 - if (!ALL_AGENT_TOPIC_NAME_SET.contains(agentTopicName)) { - log.error( - "agentTopicName异常! 输入为 => {}", - agentTopicName + // 构造消息等待对象 + int commandCount = 1; + if (null != commandListComplete) { + commandCount = Math.max( + commandListComplete.size(), + 1 ); - return null; - //throw new MyRuntimeException("agentTopicName异常!" + agentTopicName); } - // 归一化type - if (StringUtils.isEmpty(type)) { - type = MANUAL_COMMAND_TYPE; - } - - String resultKey = futureKey; - // 判定是否是 FutureKey - if (null == futureKey) { - resultKey = ExecutionMessage.GetResultKey(agentTopicName); - } - - // 构造 Execution Command对应的消息体 - ExecutionMessage executionMessage = this - .generateExecutionMessage( - type, - commandList, - resultKey, - commandListComplete, - needResultReplay, - durationTask - ); - OctopusMessage octopusMessage = this.generateOctopusMessage( - agentTopicName, - executionMessage + // 构造回复信息的内容 + OctopusMessageAsyncReplayContend executionReplayContent = OctopusMessageAsyncReplayContend.build( + commandCount, + CurrentAppOctopusMessageType, + initTime ); + CountDownLatch countDownLatch = executionReplayContent.getCountDownLatch(); - // send the message - oMessageToAgentSender.send(octopusMessage); - - // set up the stream read group - String group = redisTemplate - .opsForStream() - .createGroup( - resultKey, - resultKey - ); - - log.debug( - "set consumer group [{}] for the stream key with => [ {} ]", - group, - resultKey - ); - - // change the redis stream listener container - // createStreamReader.registerStreamReader(COMMAND_RESULT_REDIS_STREAM_LISTENER_CONTAINER, resultKey); - - // construct the persistent Bean - /*ExecutionLog executionLog = buildPersistentLogBeanFromOctopusMessage( - octopusMessage, - executionMessage - );*/ - // send resultKey to ExecutionResultDaemonHandler - // 当批量执行,产生大量的resultKey的时候,会出现线程爆炸,导致所有的全部失效 - /*WAIT_EXECUTION_RESULT_LIST.put( - resultKey, - executionLog - );*/ - - // help gc - executionMessage = null; - - return octopusMessage; - } - - private OctopusMessage generateOctopusMessage(String agentTopicName, ExecutionMessage executionMessage) { + // 开始等待结果 + asyncWaitOctopusMessageResultService.waitFor(executionReplayContent); + // 监听结果 try { + boolean await = countDownLatch.await( + processMaxWaitSeconds, + TimeUnit.SECONDS + ); - return OctopusMessage - .builder() - .type(OctopusMessageType.EXECUTOR) - .init_time(TimeUtils.currentFormatTime()) - .uuid(agentTopicName) - .content( - objectMapper.writeValueAsString(executionMessage) + } catch (InterruptedException e) { + throw new RuntimeException(e); + } finally { + + // 等待所有的结果返回 + // 停止等待结果 + asyncWaitOctopusMessageResultService.stopWaiting(executionReplayContent); + + // 解析结果 + executionReplayContent + .getReplayOMList() + .stream() + .map( + om -> { + log.debug( + "replay message is => {}", + om + ); + + return (ArrayList) om.getResult(); + } ) - .build(); + .forEachOrdered( + singleResult -> result.addAll(singleResult) + ); - } catch (JsonProcessingException e) { - throw new RuntimeException(e); } + // 返回 执行的结果 + return result; } - - private ExecutionLog buildPersistentLogBeanFromOctopusMessage(OctopusMessage octopusMessage, ExecutionMessage executionMessage) { - ExecutionLog executionLog = new ExecutionLog(); - executionLog.setAgentTopicName(octopusMessage.getUuid()); - executionLog.setResultKey((String) octopusMessage.getContent()); - executionLog.setCommandList(String.valueOf(executionMessage.getSingleLineCommand())); - executionLog.setType(executionMessage.getType()); - executionLog.setResultKey(executionMessage.getResultKey()); - return executionLog; - } - - - @Override - public List SendCommandToAgent(List agentagentTopicNameList, String type, List commandList, boolean needResultReplay, String futureKey, boolean durationTask) { - - return agentagentTopicNameList - .stream() - .map( - agentTopicName -> this - .SendCommandToAgent - ( - agentTopicName, - type, - commandList, - null, - needResultReplay, - futureKey, - durationTask - ) - ) - .collect(Collectors.toList()); - } - - /** - * @param agentTopicNameList 目标Agent的TopicName列表 - * @param type 任务类型 - * @param completeCommandList 完整的类型 - * @return - */ - @Override - public List SendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList) { - - return agentTopicNameList - .stream() - .map( - agentTopicName -> this.SendCommandToAgentComplete( - agentTopicName, - type, - null, - completeCommandList - ) - ) - .collect(Collectors.toList()); - - } - - @Override - public List SendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList, HashMap atnFutureKey) { - - return agentTopicNameList - .stream() - .map( - agentTopicName -> this.SendCommandToAgent( - agentTopicName, - type, - null, - completeCommandList, - atnFutureKey.getOrDefault( - agentTopicName, - null - ) - ) - ) - .collect(Collectors.toList()); - } - - - @Deprecated - private OctopusMessage generateOctopusMessage(String agentTopicName, String resultKey, String type, List commandList, List> commandListComplete) { - - - ExecutionMessage executionMessage = this.generateExecutionMessage( - type, - commandList, - resultKey, - commandListComplete, - false, - false - ); - - String executionMessageString; - - try { - executionMessageString = objectMapper.writeValueAsString(executionMessage); - - } catch (JsonProcessingException e) { - throw new RuntimeException(e); - } - - return OctopusMessage - .builder() - .type(OctopusMessageType.EXECUTOR) - .init_time(LocalDateTime.now()) - .content(executionMessageString) - .uuid(agentTopicName) - .build(); - } - - private ExecutionMessage generateExecutionMessage(String type, List commandList, String resultKey, List> commandListComplete, boolean needResultReplay, boolean durationTask) { - - return ExecutionMessage - .builder() - .resultKey(resultKey) - .type(type) - .singleLineCommand(commandList) - .multiLineCommand(commandListComplete) - .needResultReplay(needResultReplay) - .durationTask(durationTask) - .build(); - } - - } diff --git a/server/src/main/java/io/wdd/rpc/execute/service/ExecutionResultDaemonHandler.java b/server/src/main/java/io/wdd/rpc/execute/service/ExecutionResultDaemonHandler.java index 1295831..3ff7089 100644 --- a/server/src/main/java/io/wdd/rpc/execute/service/ExecutionResultDaemonHandler.java +++ b/server/src/main/java/io/wdd/rpc/execute/service/ExecutionResultDaemonHandler.java @@ -1,204 +1,203 @@ -package io.wdd.rpc.execute.service; - - -import io.wdd.common.utils.TimeUtils; -import io.wdd.rpc.execute.config.CommandReaderConfig; -import io.wdd.rpc.execute.config.ExecutionLog; -import io.wdd.rpc.execute.result.BuildStreamReader; -import io.wdd.server.service.ExecutionLogService; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.collections.CollectionUtils; -import org.springframework.context.annotation.Lazy; -import org.springframework.stereotype.Service; - -import javax.annotation.PostConstruct; -import javax.annotation.Resource; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.concurrent.*; - -/** - * 1. [waiting strategy ] - * 2. [build the redis stream listener] - * 3. [call persistence] - */ -//@Service -@Slf4j -@Lazy -@Deprecated -public class ExecutionResultDaemonHandler { - - /** - * store all execution result key - *

- * which means there are execution running , waiting for their result to handle - */ - public static final ConcurrentHashMap WAIT_EXECUTION_RESULT_LIST = new ConcurrentHashMap<>(32); - private final int MAX_TIMEOUT_WAITING_FOR_EXECUTION_RESULT = 70; - - @Resource - BuildStreamReader buildStreamReader; - - @Resource - CommandReaderConfig commandReaderConfig; - - @Resource - ExecutionLogService executionLogService; - - @PostConstruct - public void startExecutionDaemonHandler() { - - // 启动一个异步线程,运行 Execution结果处理守护进程 - CompletableFuture.runAsync( - () -> realStartExecutionDaemonHandler() - ); - - } - - private void realStartExecutionDaemonHandler() { - - while (true) { - - while (WAIT_EXECUTION_RESULT_LIST.size() == 0) { - try { - // no execution result need to handle - - // wait for 5 seconds - log.debug("realStartExecutionDaemonHandler start to sleep waiting for result !"); - TimeUnit.SECONDS.sleep(5); - - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - } - - // has result to handle , just handle one result at one time - String resultKey = WAIT_EXECUTION_RESULT_LIST - .keys() - .nextElement(); - - log.debug( - "current result key is [{}]", - resultKey - ); - - - CompletableFuture> executionResultFuture = - CompletableFuture - .supplyAsync( - () -> { - // 修改相应的参数 - commandReaderConfig.setStreamKey(resultKey); - // listener container 实际上是根据这个绑定的 - commandReaderConfig.setGroup(resultKey); - // 必须归零 - commandReaderConfig.setExecutionResult(null); - - // 构造 resultKey对应的 Redis Stream Listener Container - buildStreamReader - .buildStreamReader(commandReaderConfig); - - // 获得结果 - ArrayList s = new ArrayList<>( - List.of("no no no") - ); - - try { - s = CompletableFuture - .supplyAsync( - () -> { - while (true) { - // todo 多条命令时,这里只能获取到一个结果 - if (CollectionUtils.isNotEmpty(commandReaderConfig.getExecutionResult())) { - return commandReaderConfig.getExecutionResult(); - } - - try { - TimeUnit.SECONDS.sleep(3); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - } - } - ) - // 获取相应的结果 - .get( - MAX_TIMEOUT_WAITING_FOR_EXECUTION_RESULT, - TimeUnit.SECONDS - ); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } catch (ExecutionException e) { - throw new RuntimeException(e); - } catch (TimeoutException e) { - throw new RuntimeException(e); - } - - - return s; - } - ); - - CompletableFuture> falloutTimeFuture = CompletableFuture.supplyAsync( - () -> { - try { - TimeUnit.SECONDS.sleep(MAX_TIMEOUT_WAITING_FOR_EXECUTION_RESULT); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - - return null; - } - ); - - // 获取结果,然后销毁Stream Listener Container - CompletableFuture complete = CompletableFuture - .anyOf( - falloutTimeFuture, - executionResultFuture - ); - - complete - .whenComplete( - (result, e) -> { - - log.debug( - "execution result are => {}", - result - ); - - // 持久化存储对应的结果 - ExecutionLog executionLog = WAIT_EXECUTION_RESULT_LIST.get(resultKey); - executionLog.setAcTime(TimeUtils.currentTime()); - executionLog.setResultContent(String.valueOf(commandReaderConfig.getExecutionResult())); - executionLog.setResultCode( - CollectionUtils.isEmpty((Collection) result) ? 1 : 0 - ); - executionLog.setRecordId(commandReaderConfig.getRecordId()); - - - // 保存操作 - executionLogService.save(executionLog); - - // 清除此次任务的内容 - WAIT_EXECUTION_RESULT_LIST.remove(resultKey); - log.info( - "[Execution] - command {} result are {} result code is {} ,whole process are complete !", - executionLog.getCommandList(), - executionLog.getResultContent(), - executionLog.getResultCode() - ); - } - ); - - // very important - // stuck the main thread , otherwise it will create a dead loop - complete.join(); - - } - - } - - -} +//package io.wdd.rpc.execute.service; +// +// +//import io.wdd.common.utils.TimeUtils; +//import io.wdd.rpc.execute.config.CommandReaderConfig; +//import io.wdd.rpc.execute.config.ExecutionLog; +//import io.wdd.rpc.execute.result.BuildStreamReader; +//import io.wdd.server.service.ExecutionLogService; +//import lombok.extern.slf4j.Slf4j; +//import org.apache.commons.collections.CollectionUtils; +//import org.springframework.context.annotation.Lazy; +// +//import javax.annotation.PostConstruct; +//import javax.annotation.Resource; +//import java.util.ArrayList; +//import java.util.Collection; +//import java.util.List; +//import java.util.concurrent.*; +// +///** +// * 1. [waiting strategy ] +// * 2. [build the redis stream listener] +// * 3. [call persistence] +// */ +////@Service +//@Slf4j +//@Lazy +//@Deprecated +//public class ExecutionResultDaemonHandler { +// +// /** +// * store all execution result key +// *

+// * which means there are execution running , waiting for their result to handle +// */ +// public static final ConcurrentHashMap WAIT_EXECUTION_RESULT_LIST = new ConcurrentHashMap<>(32); +// private final int MAX_TIMEOUT_WAITING_FOR_EXECUTION_RESULT = 70; +// +// @Resource +// BuildStreamReader buildStreamReader; +// +// @Resource +// CommandReaderConfig commandReaderConfig; +// +// @Resource +// ExecutionLogService executionLogService; +// +// @PostConstruct +// public void startExecutionDaemonHandler() { +// +// // 启动一个异步线程,运行 Execution结果处理守护进程 +// CompletableFuture.runAsync( +// () -> realStartExecutionDaemonHandler() +// ); +// +// } +// +// private void realStartExecutionDaemonHandler() { +// +// while (true) { +// +// while (WAIT_EXECUTION_RESULT_LIST.size() == 0) { +// try { +// // no execution result need to handle +// +// // wait for 5 seconds +// log.debug("realStartExecutionDaemonHandler start to sleep waiting for result !"); +// TimeUnit.SECONDS.sleep(5); +// +// } catch (InterruptedException e) { +// throw new RuntimeException(e); +// } +// } +// +// // has result to handle , just handle one result at one time +// String resultKey = WAIT_EXECUTION_RESULT_LIST +// .keys() +// .nextElement(); +// +// log.debug( +// "current result key is [{}]", +// resultKey +// ); +// +// +// CompletableFuture> executionResultFuture = +// CompletableFuture +// .supplyAsync( +// () -> { +// // 修改相应的参数 +// commandReaderConfig.setStreamKey(resultKey); +// // listener container 实际上是根据这个绑定的 +// commandReaderConfig.setGroup(resultKey); +// // 必须归零 +// commandReaderConfig.setExecutionResult(null); +// +// // 构造 resultKey对应的 Redis Stream Listener Container +// buildStreamReader +// .buildStreamReader(commandReaderConfig); +// +// // 获得结果 +// ArrayList s = new ArrayList<>( +// List.of("no no no") +// ); +// +// try { +// s = CompletableFuture +// .supplyAsync( +// () -> { +// while (true) { +// // todo 多条命令时,这里只能获取到一个结果 +// if (CollectionUtils.isNotEmpty(commandReaderConfig.getExecutionResult())) { +// return commandReaderConfig.getExecutionResult(); +// } +// +// try { +// TimeUnit.SECONDS.sleep(3); +// } catch (InterruptedException e) { +// throw new RuntimeException(e); +// } +// } +// } +// ) +// // 获取相应的结果 +// .get( +// MAX_TIMEOUT_WAITING_FOR_EXECUTION_RESULT, +// TimeUnit.SECONDS +// ); +// } catch (InterruptedException e) { +// throw new RuntimeException(e); +// } catch (ExecutionException e) { +// throw new RuntimeException(e); +// } catch (TimeoutException e) { +// throw new RuntimeException(e); +// } +// +// +// return s; +// } +// ); +// +// CompletableFuture> falloutTimeFuture = CompletableFuture.supplyAsync( +// () -> { +// try { +// TimeUnit.SECONDS.sleep(MAX_TIMEOUT_WAITING_FOR_EXECUTION_RESULT); +// } catch (InterruptedException e) { +// throw new RuntimeException(e); +// } +// +// return null; +// } +// ); +// +// // 获取结果,然后销毁Stream Listener Container +// CompletableFuture complete = CompletableFuture +// .anyOf( +// falloutTimeFuture, +// executionResultFuture +// ); +// +// complete +// .whenComplete( +// (result, e) -> { +// +// log.debug( +// "execution result are => {}", +// result +// ); +// +// // 持久化存储对应的结果 +// ExecutionLog executionLog = WAIT_EXECUTION_RESULT_LIST.get(resultKey); +// executionLog.setAcTime(TimeUtils.currentTime()); +// executionLog.setResultContent(String.valueOf(commandReaderConfig.getExecutionResult())); +// executionLog.setResultCode( +// CollectionUtils.isEmpty((Collection) result) ? 1 : 0 +// ); +// executionLog.setRecordId(commandReaderConfig.getRecordId()); +// +// +// // 保存操作 +// executionLogService.save(executionLog); +// +// // 清除此次任务的内容 +// WAIT_EXECUTION_RESULT_LIST.remove(resultKey); +// log.info( +// "[Execution] - command {} result are {} result code is {} ,whole process are complete !", +// executionLog.getCommandList(), +// executionLog.getResultContent(), +// executionLog.getResultCode() +// ); +// } +// ); +// +// // very important +// // stuck the main thread , otherwise it will create a dead loop +// complete.join(); +// +// } +// +// } +// +// +//} diff --git a/server/src/main/java/io/wdd/rpc/execute/service/SyncExecutionService.java b/server/src/main/java/io/wdd/rpc/execute/service/SyncExecutionService.java index ed1548d..fc52137 100644 --- a/server/src/main/java/io/wdd/rpc/execute/service/SyncExecutionService.java +++ b/server/src/main/java/io/wdd/rpc/execute/service/SyncExecutionService.java @@ -1,23 +1,20 @@ package io.wdd.rpc.execute.service; -import java.util.ArrayList; +import io.wdd.rpc.message.OctopusMessage; + import java.util.HashMap; import java.util.List; -/** - * 同步命令执行的核心类 - * 需要等待命令执行完毕,完后返回相应的结果 - */ + public interface SyncExecutionService { - /** - * ------------------------ Sync Command Executor ------------------------------ - */ - ArrayList SyncSendCommandToAgent(String agentTopicName, List commandList); + String SyncSendCommandToAgent(String agentTopicName, String command); - ArrayList SyncSendCommandToAgent(String agentTopicName, String type, List commandList); + String SyncSendCommandToAgent(String agentTopicName, List commandList); - List> SyncSendCommandToAgent(List agentTopicNameList, String type, List commandList); + String SyncSendCommandToAgent(String agentTopicName, String type, List commandList); + + List SyncSendCommandToAgent(List agentTopicNameList, String type, List commandList, boolean needResultReplay, String futureKey, boolean durationTask); /** * 调用 单行命令脚本的 最底层函数 @@ -30,7 +27,7 @@ public interface SyncExecutionService { * @param durationTask * @return */ - ArrayList SyncSendCommandToAgent( + String SyncSendCommandToAgent( String agentTopicName, String type, List commandList, @@ -44,21 +41,14 @@ public interface SyncExecutionService { * ------------------------------------------------- */ - ArrayList SyncSendCommandToAgent(String agentTopicName, String type, List commandList, List> completeCommandList); + String SyncSendCommandToAgentComplete(String agentTopicName, String type, List commandList, List> commandListComplete); - List> SyncSendCommandToAgentComplete(List agentTopicNameList, String type, List commandList, List> completeCommandList, boolean isDurationTask); - /** - * 通常为 页面定时脚本任务调用 - * - * @param agentTopicNameList 目标Agent的TopicName列表 - * @param type 任务类型 - * @param completeCommandList 完整的类型 - * @return 每个Agent只返回一个 ResultKey(Script脚本的结果全部拼接到一起),全部的resultKey - */ - List> SyncSendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList); + List SyncSendCommandToAgentComplete(List agentTopicNameList, String type, List commandList, List> commandListComplete, boolean isDurationTask); + List SyncSendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList); + /** * 通常为 页面定时脚本任务调用 * @@ -68,10 +58,10 @@ public interface SyncExecutionService { * @param atnFutureKey 由于脚本任务为延迟调用,故需要提前生成未来的ResultKey * @return 每个Agent只返回一个 ResultKey(Script脚本的结果全部拼接到一起),全部的resultKey */ - List> SyncSendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList, HashMap atnFutureKey); + List SyncSendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList, HashMap atnFutureKey); - ArrayList SyncSendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, String futureKey); + String SyncSendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, String futureKey); /** * 调用 完整脚本的 最底层函数 @@ -84,7 +74,30 @@ public interface SyncExecutionService { * @param durationTask * @return resultKey 本次操作在Redis中记录的结果Key */ - ArrayList SyncSendCommandToAgent( + String SyncSendCommandToAgent( + String agentTopicName, + String type, + List commandList, + List> commandListComplete, + boolean needResultReplay, + String futureKey, + boolean durationTask + ); + + + /** + * 同步命令调用的方法 + * + * @param agentTopicName + * @param type + * @param commandList + * @param commandListComplete + * @param needResultReplay + * @param futureKey + * @param durationTask + * @return + */ + OctopusMessage AsyncCallSendCommandToAgent( String agentTopicName, String type, List commandList, diff --git a/server/src/main/java/io/wdd/rpc/execute/service/SyncExecutionServiceImpl.java b/server/src/main/java/io/wdd/rpc/execute/service/SyncExecutionServiceImpl.java index 06cb143..8b18c60 100644 --- a/server/src/main/java/io/wdd/rpc/execute/service/SyncExecutionServiceImpl.java +++ b/server/src/main/java/io/wdd/rpc/execute/service/SyncExecutionServiceImpl.java @@ -1,115 +1,100 @@ package io.wdd.rpc.execute.service; -import io.wdd.common.beans.rabbitmq.OctopusMessage; -import io.wdd.common.beans.rabbitmq.OctopusMessageType; -import io.wdd.rpc.message.handler.AsyncWaitOMResult; -import io.wdd.rpc.message.handler.OMReplayContend; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import io.wdd.common.utils.TimeUtils; +import io.wdd.rpc.execute.ExecutionMessage; +import io.wdd.rpc.execute.config.ExecutionLog; +import io.wdd.rpc.message.OctopusMessage; +import io.wdd.rpc.message.OctopusMessageType; +import io.wdd.rpc.message.sender.OMessageToAgentSender; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import org.springframework.data.redis.core.RedisTemplate; import org.springframework.stereotype.Service; import javax.annotation.Resource; import java.time.LocalDateTime; -import java.util.ArrayList; import java.util.HashMap; import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import static io.wdd.rpc.init.AgentStatusCacheService.ALL_AGENT_TOPIC_NAME_SET; + + @Service @Slf4j public class SyncExecutionServiceImpl implements SyncExecutionService { - private static final boolean COMMAND_EXEC_NEED_REPLAY = true; - - private static final OctopusMessageType CurrentAppOctopusMessageType = OctopusMessageType.EXECUTOR; + private static final String MANUAL_COMMAND_TYPE = "manual-command"; @Resource - AsyncWaitOMResult asyncWaitOMResult; + OMessageToAgentSender oMessageToAgentSender; @Resource - AsyncExecutionService asyncExecutionService; - - /** - * 一个命令执行的最长等待时间 - */ - int processMaxWaitSeconds = 10; + ObjectMapper objectMapper; + @Resource + RedisTemplate redisTemplate; @Override - public ArrayList SyncSendCommandToAgent(String agentTopicName, List commandList) { - + public String SyncSendCommandToAgent(String agentTopicName, String command) { return this.SyncSendCommandToAgent( agentTopicName, - null, - commandList, - null, - COMMAND_EXEC_NEED_REPLAY, - null, - false + List.of(command) ); } @Override - public ArrayList SyncSendCommandToAgent(String agentTopicName, String type, List commandList) { + public String SyncSendCommandToAgent(String agentTopicName, List commandList) { + return this.SyncSendCommandToAgent( + agentTopicName, + MANUAL_COMMAND_TYPE, + commandList + ); + } + @Override + public String SyncSendCommandToAgent(String agentTopicName, String type, List commandList) { + + return SyncSendCommandToAgent( + agentTopicName, + type, + commandList, + false, + null, + false + ); + + } + + @Override + public String SyncSendCommandToAgent(String agentTopicName, String type, List commandList, boolean needResultReplay, String futureKey, boolean durationTask) { return this.SyncSendCommandToAgent( agentTopicName, type, commandList, null, - COMMAND_EXEC_NEED_REPLAY, - null, - false - ); - } - - @Override - public List> SyncSendCommandToAgent(List agentTopicNameList, String type, List commandList) { - - return agentTopicNameList - .stream() - .map( - agentTopicName -> this.SyncSendCommandToAgent( - agentTopicName, - type, - commandList, - null, - COMMAND_EXEC_NEED_REPLAY, - null, - false - ) - ) - .collect(Collectors.toList()); - } - - @Override - public ArrayList SyncSendCommandToAgent(String agentTopicName, String type, List commandList, boolean needResultReplay, String futureKey, boolean durationTask) { - - return this.SyncSendCommandToAgent( - agentTopicName, - type, - commandList, - null, - COMMAND_EXEC_NEED_REPLAY, + needResultReplay, futureKey, - false + durationTask ); } @Override - public ArrayList SyncSendCommandToAgent(String agentTopicName, String type, List commandList, List> completeCommandList) { + public String SyncSendCommandToAgentComplete(String agentTopicName, String type, List commandList, List> commandListComplete) { + return this.SyncSendCommandToAgent( agentTopicName, type, commandList, - completeCommandList, - COMMAND_EXEC_NEED_REPLAY, + commandListComplete, + false, null, false ); } @Override - public List> SyncSendCommandToAgentComplete(List agentTopicNameList, String type, List commandList, List> completeCommandList, boolean isDurationTask) { + public List SyncSendCommandToAgentComplete(List agentTopicNameList, String type, List commandList, List> commandListComplete, boolean isDurationTask) { return agentTopicNameList .stream() .map( @@ -117,8 +102,8 @@ public class SyncExecutionServiceImpl implements SyncExecutionService { agentTopicName, type, commandList, - completeCommandList, - COMMAND_EXEC_NEED_REPLAY, + commandListComplete, + false, null, isDurationTask ) @@ -127,60 +112,31 @@ public class SyncExecutionServiceImpl implements SyncExecutionService { } @Override - public List> SyncSendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList) { + public String SyncSendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, String futureKey) { - return agentTopicNameList - .stream() - .map( - agentTopicName -> this.SyncSendCommandToAgent( - agentTopicName, - type, - null, - completeCommandList, - COMMAND_EXEC_NEED_REPLAY, - null, - false - ) - ) - .collect(Collectors.toList()); - - } - - @Override - public List> SyncSendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList, HashMap atnFutureKey) { - return agentTopicNameList - .stream() - .map( - agentTopicName -> this.SyncSendCommandToAgent( - agentTopicName, - type, - null, - completeCommandList, - COMMAND_EXEC_NEED_REPLAY, - atnFutureKey.get(agentTopicName), - false - ) - ) - .collect(Collectors.toList()); - } - - @Override - public ArrayList SyncSendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, String futureKey) { return this.SyncSendCommandToAgent( agentTopicName, type, commandList, commandListComplete, - COMMAND_EXEC_NEED_REPLAY, + false, futureKey, false ); + } @Override - public ArrayList SyncSendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, boolean needResultReplay, String futureKey, boolean durationTask) { + public String SyncSendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, boolean needResultReplay, String futureKey, boolean durationTask) { - OctopusMessage octopusMessage = asyncExecutionService.AsyncCallSendCommandToAgent( + String resultKey = futureKey; + // 判定是否是 FutureKey + if (null == futureKey) { + resultKey = ExecutionMessage.GetResultKey(agentTopicName); + } + + // 调用最底层的方法 + this.AsyncCallSendCommandToAgent( agentTopicName, type, commandList, @@ -190,65 +146,225 @@ public class SyncExecutionServiceImpl implements SyncExecutionService { durationTask ); - LocalDateTime initTime = octopusMessage.getInit_time(); - - ArrayList result = new ArrayList<>(); - - // 构造消息等待对象 - int commandCount = 1; - if (null != commandListComplete) { - commandCount = Math.max( - commandListComplete.size(), - 1 - ); - } - - OMReplayContend omReplayContend = OMReplayContend.build( - commandCount, - CurrentAppOctopusMessageType, - initTime - ); - CountDownLatch countDownLatch = omReplayContend.getCountDownLatch(); - - // 开始等待结果 - asyncWaitOMResult.waitFor(omReplayContend); - - // 监听结果 - try { - boolean await = countDownLatch.await( - processMaxWaitSeconds, - TimeUnit.SECONDS - ); - - } catch (InterruptedException e) { - throw new RuntimeException(e); - } finally { - - // 等待所有的结果返回 - // 停止等待结果 - asyncWaitOMResult.stopWaiting(omReplayContend); - - // 解析结果 - omReplayContend - .getReplayOMList() - .stream() - .map( - om -> { - log.debug( - "replay message is => {}", - om - ); - - return (ArrayList) om.getResult(); - } - ) - .forEachOrdered( - singleResult -> result.addAll(singleResult) - ); - - } - - // 返回 - return result; + return resultKey; } + + @Override + public OctopusMessage AsyncCallSendCommandToAgent(String agentTopicName, String type, List commandList, List> commandListComplete, boolean needResultReplay, String futureKey, boolean durationTask) { + + // 检查agentTopicName是否存在 + if (!ALL_AGENT_TOPIC_NAME_SET.contains(agentTopicName)) { + log.error( + "agentTopicName异常! 输入为 => {}", + agentTopicName + ); + return null; + //throw new MyRuntimeException("agentTopicName异常!" + agentTopicName); + } + + // 归一化type + if (StringUtils.isEmpty(type)) { + type = MANUAL_COMMAND_TYPE; + } + + String resultKey = futureKey; + // 判定是否是 FutureKey + if (null == futureKey) { + resultKey = ExecutionMessage.GetResultKey(agentTopicName); + } + + // 构造 Execution Command对应的消息体 + ExecutionMessage executionMessage = this + .generateExecutionMessage( + type, + commandList, + resultKey, + commandListComplete, + needResultReplay, + durationTask + ); + OctopusMessage octopusMessage = this.generateOctopusMessage( + agentTopicName, + executionMessage + ); + + // send the message + oMessageToAgentSender.send(octopusMessage); + + // set up the stream read group + String group = redisTemplate + .opsForStream() + .createGroup( + resultKey, + resultKey + ); + + log.debug( + "set consumer group [{}] for the stream key with => [ {} ]", + group, + resultKey + ); + + // change the redis stream listener container + // createStreamReader.registerStreamReader(COMMAND_RESULT_REDIS_STREAM_LISTENER_CONTAINER, resultKey); + + // construct the persistent Bean + /*ExecutionLog executionLog = buildPersistentLogBeanFromOctopusMessage( + octopusMessage, + executionMessage + );*/ + // send resultKey to ExecutionResultDaemonHandler + // 当批量执行,产生大量的resultKey的时候,会出现线程爆炸,导致所有的全部失效 + /*WAIT_EXECUTION_RESULT_LIST.put( + resultKey, + executionLog + );*/ + + // help gc + executionMessage = null; + + return octopusMessage; + } + + private OctopusMessage generateOctopusMessage(String agentTopicName, ExecutionMessage executionMessage) { + + try { + + return OctopusMessage + .builder() + .type(OctopusMessageType.EXECUTOR) + .init_time(TimeUtils.currentFormatTime()) + .uuid(agentTopicName) + .content( + objectMapper.writeValueAsString(executionMessage) + ) + .build(); + + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + + } + + private ExecutionLog buildPersistentLogBeanFromOctopusMessage(OctopusMessage octopusMessage, ExecutionMessage executionMessage) { + ExecutionLog executionLog = new ExecutionLog(); + executionLog.setAgentTopicName(octopusMessage.getUuid()); + executionLog.setResultKey((String) octopusMessage.getContent()); + executionLog.setCommandList(String.valueOf(executionMessage.getSingleLineCommand())); + executionLog.setType(executionMessage.getType()); + executionLog.setResultKey(executionMessage.getResultKey()); + return executionLog; + } + + + @Override + public List SyncSendCommandToAgent(List agentagentTopicNameList, String type, List commandList, boolean needResultReplay, String futureKey, boolean durationTask) { + + return agentagentTopicNameList + .stream() + .map( + agentTopicName -> this + .SyncSendCommandToAgent + ( + agentTopicName, + type, + commandList, + null, + needResultReplay, + futureKey, + durationTask + ) + ) + .collect(Collectors.toList()); + } + + /** + * @param agentTopicNameList 目标Agent的TopicName列表 + * @param type 任务类型 + * @param completeCommandList 完整的类型 + * @return + */ + @Override + public List SyncSendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList) { + + return agentTopicNameList + .stream() + .map( + agentTopicName -> this.SyncSendCommandToAgentComplete( + agentTopicName, + type, + null, + completeCommandList + ) + ) + .collect(Collectors.toList()); + + } + + @Override + public List SyncSendCommandToAgentComplete(List agentTopicNameList, String type, List> completeCommandList, HashMap atnFutureKey) { + + return agentTopicNameList + .stream() + .map( + agentTopicName -> this.SyncSendCommandToAgent( + agentTopicName, + type, + null, + completeCommandList, + atnFutureKey.getOrDefault( + agentTopicName, + null + ) + ) + ) + .collect(Collectors.toList()); + } + + + @Deprecated + private OctopusMessage generateOctopusMessage(String agentTopicName, String resultKey, String type, List commandList, List> commandListComplete) { + + + ExecutionMessage executionMessage = this.generateExecutionMessage( + type, + commandList, + resultKey, + commandListComplete, + false, + false + ); + + String executionMessageString; + + try { + executionMessageString = objectMapper.writeValueAsString(executionMessage); + + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + + return OctopusMessage + .builder() + .type(OctopusMessageType.EXECUTOR) + .init_time(LocalDateTime.now()) + .content(executionMessageString) + .uuid(agentTopicName) + .build(); + } + + private ExecutionMessage generateExecutionMessage(String type, List commandList, String resultKey, List> commandListComplete, boolean needResultReplay, boolean durationTask) { + + return ExecutionMessage + .builder() + .resultKey(resultKey) + .type(type) + .singleLineCommand(commandList) + .multiLineCommand(commandListComplete) + .needResultReplay(needResultReplay) + .durationTask(durationTask) + .build(); + } + + } diff --git a/server/src/main/java/io/wdd/rpc/init/AcceptAgentInitInfo.java b/server/src/main/java/io/wdd/rpc/init/AcceptAgentInitInfo.java index 26fabf3..501e4b7 100644 --- a/server/src/main/java/io/wdd/rpc/init/AcceptAgentInitInfo.java +++ b/server/src/main/java/io/wdd/rpc/init/AcceptAgentInitInfo.java @@ -1,13 +1,15 @@ package io.wdd.rpc.init; +import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.rabbitmq.client.Channel; -import io.wdd.common.beans.rabbitmq.OctopusMessage; -import io.wdd.common.beans.rabbitmq.OctopusMessageType; -import io.wdd.common.beans.status.AgentStatus; import io.wdd.common.handler.MyRuntimeException; +import io.wdd.common.utils.TimeUtils; +import io.wdd.rpc.message.OctopusMessage; +import io.wdd.rpc.message.OctopusMessageType; import io.wdd.rpc.message.sender.OMessageToAgentSender; +import io.wdd.rpc.status.AgentStatus; import io.wdd.server.beans.vo.ServerInfoVO; import io.wdd.server.utils.DaemonDatabaseOperator; import lombok.SneakyThrows; @@ -22,10 +24,11 @@ import org.springframework.stereotype.Service; import javax.annotation.Resource; import java.io.IOException; -import java.time.LocalDateTime; import java.util.*; import java.util.concurrent.TimeUnit; +import static io.wdd.common.utils.OctopusObjectMapperConfig.OctopusObjectMapper; + /** * The type Accept boot up info message. */ @@ -108,8 +111,13 @@ public class AcceptAgentInitInfo { try { - serverInfoVO = objectMapper.readValue( + OctopusMessage initOctopusMessageFromAgent = OctopusObjectMapper.readValue( message.getBody(), + OctopusMessage.class + ); + + serverInfoVO = OctopusObjectMapper.readValue( + (String) initOctopusMessageFromAgent.getContent(), ServerInfoVO.class ); @@ -126,6 +134,7 @@ public class AcceptAgentInitInfo { // if (!checkAgentAlreadyRegister(agentQueueTopic)) { // log.info("[AGENT INIT] - agent not exist ! start to register !"); // } + // whether agent is registered already // save or update the octopus agent server info // 3. save the agent info into database @@ -135,7 +144,7 @@ public class AcceptAgentInitInfo { } // 4. generate the Octopus Agent Status Redis Stream Key & Consumer-Group - generateAgentStatusRedisStreamConsumerGroup(serverInfoVO.getTopicName()); + //generateAgentStatusRedisStreamConsumerGroup(serverInfoVO.getTopicName()); // 5. send InitMessage to agent sendInitMessageToAgent(serverInfoVO); @@ -169,7 +178,7 @@ public class AcceptAgentInitInfo { */ - throw new MyRuntimeException(" Octopus Server Initialization Error, please check !"); + throw new MyRuntimeException("Octopus Server Initialization Error, please check !"); } /** @@ -208,18 +217,6 @@ public class AcceptAgentInitInfo { ); } - // check for octopus-server consumer group - /*if (redisTemplate.opsForStream().groups(statusStreamKey) - .stream() - .filter( - group -> group.groupName().startsWith("Octopus") - ).collect(Collectors.toSet()).contains(Boolean.FALSE)) { - - - - redisTemplate.opsForStream().createGroup(statusStreamKey, "OctopusServer"); - }*/ - log.debug( "octopus agent [ {} ] status report stream key [ {} ] has been created !", agentTopicName, @@ -240,16 +237,24 @@ public class AcceptAgentInitInfo { private boolean sendInitMessageToAgent(ServerInfoVO serverInfoVO) { - OctopusMessage octopusMessage = OctopusMessage - .builder() - .type(OctopusMessageType.INIT) - // should be the OctopusExchange Name - .content(String.valueOf(initRabbitMQConfig.OCTOPUS_EXCHANGE)) - .init_time(LocalDateTime.now()) - .uuid(serverInfoVO.getTopicName()) - .build(); + try { + String serverInfoContent = OctopusObjectMapper.writeValueAsString(serverInfoVO); + + OctopusMessage octopusMessage = OctopusMessage + .builder() + .type(OctopusMessageType.INIT) + // should be the OctopusExchange Name + .content(serverInfoContent) + .init_time(TimeUtils.currentFormatTime()) + .uuid(serverInfoVO.getTopicName()) + .build(); + + oMessageToAgentSender.sendINIT(octopusMessage); + + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } - oMessageToAgentSender.sendINIT(octopusMessage); return true; } diff --git a/server/src/main/java/io/wdd/rpc/init/ServerCacheAgentStatus.java b/server/src/main/java/io/wdd/rpc/init/AgentStatusCacheService.java similarity index 70% rename from server/src/main/java/io/wdd/rpc/init/ServerCacheAgentStatus.java rename to server/src/main/java/io/wdd/rpc/init/AgentStatusCacheService.java index 23496c7..c4e5b09 100644 --- a/server/src/main/java/io/wdd/rpc/init/ServerCacheAgentStatus.java +++ b/server/src/main/java/io/wdd/rpc/init/AgentStatusCacheService.java @@ -1,13 +1,12 @@ package io.wdd.rpc.init; -import io.wdd.common.beans.status.AgentHealthyStatusEnum; import io.wdd.common.utils.TimeUtils; +import io.wdd.rpc.status.AgentHealthyStatusEnum; import io.wdd.server.beans.vo.ServerInfoVO; import io.wdd.server.coreService.CoreServerService; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections.CollectionUtils; -import org.springframework.data.redis.core.RedisTemplate; import org.springframework.stereotype.Service; import javax.annotation.PostConstruct; @@ -15,7 +14,7 @@ import javax.annotation.Resource; import java.util.*; import java.util.stream.Collectors; -import static io.wdd.common.beans.status.OctopusStatusMessage.ALL_AGENT_STATUS_REDIS_KEY; + /** * Server启动或者运行的时候,需要初 缓存一系列的信息 @@ -26,7 +25,7 @@ import static io.wdd.common.beans.status.OctopusStatusMessage.ALL_AGENT_STATUS_R */ @Service @Slf4j -public class ServerCacheAgentStatus { +public class AgentStatusCacheService { /** * 存储所有的AgentTopicName的缓存 @@ -49,9 +48,9 @@ public class ServerCacheAgentStatus { /** * 存储所有Agent状态的Map *

- * 内容为 agentTopicName-健康状态 + * 内容为 agentTopicName- True代表健康 False代表不健康 */ - public static final Map ALL_AGENT_STATUS_MAP = new HashMap<>(); + public static final Map ALL_AGENT_STATUS_MAP = new HashMap<>(); /** * 保存所有健康运行的Agent Topic Name @@ -66,8 +65,6 @@ public class ServerCacheAgentStatus { @Resource CoreServerService coreServerService; - @Resource - RedisTemplate redisTemplate; @PostConstruct public void GenerateAllCache() { @@ -76,7 +73,7 @@ public class ServerCacheAgentStatus { updateAllAgentTopicNameCache(); // Agent状态信息的两个Map - updateAgentStatusMapCache(); + // updateAgentStatusMapCache(agentAliveStatusMap); } @@ -117,7 +114,7 @@ public class ServerCacheAgentStatus { * 由定时任务或者初始化服务触发 * 2023-02-21 前端接口,手动更新 */ - public void updateAgentStatusMapCache() { + public void updateAgentStatusMapCache(Map agentAliveStatusMap) { // 检查,排除没有节点的情况 if (CollectionUtils.isEmpty(ALL_AGENT_TOPIC_NAME_LIST)) { @@ -125,47 +122,17 @@ public class ServerCacheAgentStatus { return; } - // 从redis中获取所有节点的当前状态 - List statusList = redisTemplate - .opsForHash() - .multiGet( - ALL_AGENT_STATUS_REDIS_KEY, - ALL_AGENT_TOPIC_NAME_LIST - ); - // 初始话 还没有状态的情况,直接return - if (CollectionUtils.isEmpty(statusList)) { - log.warn("agent status from redis is empty !"); - return; - } - - // 增加更新时间 2023年2月21日 - String timeString = TimeUtils.currentTimeString(); - - // 结构保存为agentStatusMap ==> agent-topic-name : STATUS(healthy, failed, unknown) - HashMap agentStatusMap = new HashMap<>(32); - for (int i = 0; i < ALL_AGENT_TOPIC_NAME_LIST.size(); i++) { - agentStatusMap.put( - ALL_AGENT_TOPIC_NAME_LIST.get(i), - uniformHealthyStatus(String.valueOf(statusList.get(i))) - ); - } - - - // 2023-01-16 + // 2023年6月15日 更新状态缓存 ALL_AGENT_STATUS_MAP.clear(); - ALL_AGENT_STATUS_MAP.putAll(agentStatusMap); - ALL_AGENT_STATUS_MAP.put( - STATUS_UPDATE_TIME_KEY, - timeString - ); + ALL_AGENT_STATUS_MAP.putAll(agentAliveStatusMap); // 2023-01-16 // 更新 状态-Agent容器 内容为 // HEALTHY -> ["agentTopicName-1", "agentTopicName-2"] // FAILED -> ["agentTopicName-1", "agentTopicName-2"] - Map> statusAgentListMap = agentStatusMap + Map> statusAgentListMap = agentAliveStatusMap .entrySet() .stream() .collect( @@ -177,7 +144,7 @@ public class ServerCacheAgentStatus { .stream() .collect( Collectors.toMap( - entry -> entry.getKey(), + entry -> entry.getKey() ? "HEALTHY" : "FAILED", entry -> entry .getValue() .stream() @@ -191,7 +158,9 @@ public class ServerCacheAgentStatus { // 2023-2-3 bug fix STATUS_AGENT_LIST_MAP.clear(); STATUS_AGENT_LIST_MAP.putAll(statusAgentListMap); + // 2023年2月21日,更新时间 + String timeString = TimeUtils.currentFormatTimeString(); STATUS_AGENT_LIST_MAP.put( STATUS_UPDATE_TIME_KEY, Collections.singletonList(timeString) @@ -199,22 +168,26 @@ public class ServerCacheAgentStatus { log.debug("Agent存活状态 状态-Agent名称-Map 已经更新了"); - - // Trigger调用Agent Metric 任务 - ArrayList allHealthyAgentTopicNames = new ArrayList<>(32); - for (int i = 0; i < statusList.size(); i++) { - if (null !=statusList.get(i) && statusList - .get(i) - .equals("1")) { - allHealthyAgentTopicNames.add(ALL_AGENT_TOPIC_NAME_LIST.get(i)); - } - } // 缓存相应的存活Agent + List allHealthyAgentTopicNames = agentAliveStatusMap + .entrySet() + .stream() + .filter( + entry -> entry + .getKey() + .equals(Boolean.TRUE) + ) + .map( + Map.Entry::getKey + ) + .collect(Collectors.toList()); + ALL_HEALTHY_AGENT_TOPIC_NAME_LIST.clear(); ALL_HEALTHY_AGENT_TOPIC_NAME_LIST.addAll(allHealthyAgentTopicNames); + // help gc - agentStatusMap = null; + agentAliveStatusMap = null; statusAgentListMap = null; allHealthyAgentTopicNames = null; } diff --git a/common/src/main/java/io/wdd/common/beans/rabbitmq/OctopusMessage.java b/server/src/main/java/io/wdd/rpc/message/OctopusMessage.java similarity index 95% rename from common/src/main/java/io/wdd/common/beans/rabbitmq/OctopusMessage.java rename to server/src/main/java/io/wdd/rpc/message/OctopusMessage.java index 9dba47d..2027258 100644 --- a/common/src/main/java/io/wdd/common/beans/rabbitmq/OctopusMessage.java +++ b/server/src/main/java/io/wdd/rpc/message/OctopusMessage.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.rabbitmq; +package io.wdd.rpc.message; import com.fasterxml.jackson.annotation.JsonFormat; diff --git a/common/src/main/java/io/wdd/common/beans/rabbitmq/OctopusMessageType.java b/server/src/main/java/io/wdd/rpc/message/OctopusMessageType.java similarity index 86% rename from common/src/main/java/io/wdd/common/beans/rabbitmq/OctopusMessageType.java rename to server/src/main/java/io/wdd/rpc/message/OctopusMessageType.java index 7c69c68..ec7b52c 100644 --- a/common/src/main/java/io/wdd/common/beans/rabbitmq/OctopusMessageType.java +++ b/server/src/main/java/io/wdd/rpc/message/OctopusMessageType.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.rabbitmq; +package io.wdd.rpc.message; public enum OctopusMessageType { diff --git a/server/src/main/java/io/wdd/rpc/message/handler/AsyncWaitOMResult.java b/server/src/main/java/io/wdd/rpc/message/handler/async/AsyncWaitOctopusMessageResultService.java similarity index 63% rename from server/src/main/java/io/wdd/rpc/message/handler/AsyncWaitOMResult.java rename to server/src/main/java/io/wdd/rpc/message/handler/async/AsyncWaitOctopusMessageResultService.java index 0f29808..5fdd822 100644 --- a/server/src/main/java/io/wdd/rpc/message/handler/AsyncWaitOMResult.java +++ b/server/src/main/java/io/wdd/rpc/message/handler/async/AsyncWaitOctopusMessageResultService.java @@ -1,6 +1,6 @@ -package io.wdd.rpc.message.handler; +package io.wdd.rpc.message.handler.async; -import io.wdd.common.beans.rabbitmq.OctopusMessage; +import io.wdd.rpc.message.OctopusMessage; import io.wdd.server.config.ServerCommonPool; import lombok.extern.slf4j.Slf4j; import org.springframework.stereotype.Service; @@ -10,7 +10,7 @@ import java.util.HashMap; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; -import static io.wdd.rpc.message.handler.OMessageHandlerServer.OCTOPUS_MESSAGE_FROM_AGENT; +import static io.wdd.rpc.message.handler.sync.OMessageHandlerServer.OCTOPUS_MESSAGE_FROM_AGENT; /** * 从Agent收集返回信息的统一处理地点 @@ -20,31 +20,37 @@ import static io.wdd.rpc.message.handler.OMessageHandlerServer.OCTOPUS_MESSAGE_F */ @Service @Slf4j -public class AsyncWaitOMResult { +public class AsyncWaitOctopusMessageResultService { /** * 为了避免线程不安全的问题,增加一层缓存,仅仅由当前类操作此部分 * KEY -> replayMatchKey - * VALUE -> OMReplayContend - 包含countDownLatch 和 result + * VALUE -> OctopusMessageAsyncReplayContend - 包含countDownLatch 和 result */ - private static final HashMap REPLAY_WAITING_TARGET = new HashMap<>(); + private static final HashMap OM_REPLAY_WAITING_TARGET_MAP = new HashMap<>(); - public void waitFor(OMReplayContend omReplayContend) { + public void waitFor(OctopusMessageAsyncReplayContend OctopusMessageAsyncReplayContend) { // 向 REPLAY_CACHE_MAP中写入 Key - REPLAY_WAITING_TARGET.put( - omReplayContend.getReplayMatchKey(), - omReplayContend + OM_REPLAY_WAITING_TARGET_MAP.put( + OctopusMessageAsyncReplayContend.getReplayMatchKey(), + OctopusMessageAsyncReplayContend ); // 在调用线程的countDownLunch结束之后,关闭 // 清除 REPLAY_CACHE_MAP 中的队列 } - public void stopWaiting(OMReplayContend omReplayContend) { + public void stopWaiting(OctopusMessageAsyncReplayContend OctopusMessageAsyncReplayContend) { // 在调用线程的countDownLunch结束之后,关闭 清除 REPLAY_CACHE_MAP 中的队列 - REPLAY_WAITING_TARGET.remove(omReplayContend.getReplayMatchKey()); + OctopusMessageAsyncReplayContend contend = OM_REPLAY_WAITING_TARGET_MAP.get(OctopusMessageAsyncReplayContend.getReplayMatchKey()); + + // 移除该内容 + OM_REPLAY_WAITING_TARGET_MAP.remove(OctopusMessageAsyncReplayContend.getReplayMatchKey()); + + // help gc + contend = null; } @@ -82,24 +88,29 @@ public class AsyncWaitOMResult { OctopusMessage replayOMessage = OCTOPUS_MESSAGE_FROM_AGENT.poll(); // 构造 replayMatchKey - String matchKey = OMReplayContend.generateMatchKey( + String matchKey = OctopusMessageAsyncReplayContend.generateMatchKey( replayOMessage.getType(), replayOMessage.getInit_time() ); - if (!REPLAY_WAITING_TARGET.containsKey(matchKey)) { + if (!OM_REPLAY_WAITING_TARGET_MAP.containsKey(matchKey)) { // 没有这个Key,说明等待结果已经超时了,直接丢弃,然后继续循环 // todo 错误的数据需要放置于某处 + log.debug( + "等待队列力没有该回复的结果key =>", + matchKey + ); continue; } // Map中包含有Key,那么放置进去 - OMReplayContend replayContend = REPLAY_WAITING_TARGET.get(matchKey); + OctopusMessageAsyncReplayContend replayContend = OM_REPLAY_WAITING_TARGET_MAP.get(matchKey); replayContend .getReplayOMList() .add(replayOMessage); + // 需要操作countDown replayContend .getCountDownLatch() diff --git a/server/src/main/java/io/wdd/rpc/message/handler/OMReplayContend.java b/server/src/main/java/io/wdd/rpc/message/handler/async/OctopusMessageAsyncReplayContend.java similarity index 68% rename from server/src/main/java/io/wdd/rpc/message/handler/OMReplayContend.java rename to server/src/main/java/io/wdd/rpc/message/handler/async/OctopusMessageAsyncReplayContend.java index 0abd5dc..5864190 100644 --- a/server/src/main/java/io/wdd/rpc/message/handler/OMReplayContend.java +++ b/server/src/main/java/io/wdd/rpc/message/handler/async/OctopusMessageAsyncReplayContend.java @@ -1,10 +1,10 @@ -package io.wdd.rpc.message.handler; +package io.wdd.rpc.message.handler.async; import com.fasterxml.jackson.annotation.JsonFormat; import io.swagger.annotations.ApiModel; import io.swagger.annotations.ApiModelProperty; -import io.wdd.common.beans.rabbitmq.OctopusMessage; -import io.wdd.common.beans.rabbitmq.OctopusMessageType; +import io.wdd.rpc.message.OctopusMessage; +import io.wdd.rpc.message.OctopusMessageType; import lombok.AllArgsConstructor; import lombok.Data; import lombok.NoArgsConstructor; @@ -19,7 +19,7 @@ import java.util.concurrent.CountDownLatch; @NoArgsConstructor @SuperBuilder(toBuilder = true) @ApiModel("众多业务调用RPC,异步等待需要确定返回消息是谁的") -public class OMReplayContend { +public class OctopusMessageAsyncReplayContend { @ApiModelProperty("rpc消息的类型") OctopusMessageType type; @@ -37,17 +37,6 @@ public class OMReplayContend { @ApiModelProperty("回复的结果列表, 临时保存") ArrayList replayOMList; - protected static String generateMatchKey(OMReplayContend replayIdentifier) { - - String relayMatchKey = replayIdentifier - .getType() - .toString() + replayIdentifier - .getInitTime() - .toString(); - - return relayMatchKey; - } - /** * @param messageType * @param messageInitTime 必须使用 TimeUtils.currentFormatTime(); @@ -61,21 +50,26 @@ public class OMReplayContend { } /** - * 方便使用的一个构造方法 + * Execution模块使用的模板 * * @return */ - public static OMReplayContend build(int waitForReplayNum, OctopusMessageType currentOMType, LocalDateTime currentTime) { + public static OctopusMessageAsyncReplayContend build(int waitForReplayNum, OctopusMessageType currentOMType, LocalDateTime currentTime) { - return new OMReplayContend( + CountDownLatch latch = null; + if (waitForReplayNum != 0) { + latch = new CountDownLatch(waitForReplayNum); + } + + return new OctopusMessageAsyncReplayContend( currentOMType, currentTime, generateMatchKey( currentOMType, currentTime ), - new CountDownLatch(waitForReplayNum), - new ArrayList<>() + latch, + new ArrayList(16) ); } diff --git a/server/src/main/java/io/wdd/rpc/message/handler/OMessageHandlerServer.java b/server/src/main/java/io/wdd/rpc/message/handler/sync/OMessageHandlerServer.java similarity index 97% rename from server/src/main/java/io/wdd/rpc/message/handler/OMessageHandlerServer.java rename to server/src/main/java/io/wdd/rpc/message/handler/sync/OMessageHandlerServer.java index 570ff21..35314c1 100644 --- a/server/src/main/java/io/wdd/rpc/message/handler/OMessageHandlerServer.java +++ b/server/src/main/java/io/wdd/rpc/message/handler/sync/OMessageHandlerServer.java @@ -1,9 +1,9 @@ -package io.wdd.rpc.message.handler; +package io.wdd.rpc.message.handler.sync; import com.fasterxml.jackson.databind.ObjectMapper; -import io.wdd.common.beans.rabbitmq.OctopusMessage; import io.wdd.common.handler.MyRuntimeException; +import io.wdd.rpc.message.OctopusMessage; import lombok.extern.slf4j.Slf4j; import org.springframework.amqp.core.Message; import org.springframework.amqp.rabbit.annotation.RabbitHandler; diff --git a/server/src/main/java/io/wdd/rpc/message/sender/OMessageToAgentSender.java b/server/src/main/java/io/wdd/rpc/message/sender/OMessageToAgentSender.java index 777d208..56dd6e6 100644 --- a/server/src/main/java/io/wdd/rpc/message/sender/OMessageToAgentSender.java +++ b/server/src/main/java/io/wdd/rpc/message/sender/OMessageToAgentSender.java @@ -2,10 +2,10 @@ package io.wdd.rpc.message.sender; import com.fasterxml.jackson.databind.ObjectMapper; -import io.wdd.common.beans.rabbitmq.OctopusMessage; -import io.wdd.common.beans.rabbitmq.OctopusMessageType; import io.wdd.common.handler.MyRuntimeException; import io.wdd.rpc.init.InitRabbitMQConfig; +import io.wdd.rpc.message.OctopusMessage; +import io.wdd.rpc.message.OctopusMessageType; import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; import org.springframework.amqp.rabbit.core.RabbitTemplate; @@ -44,32 +44,46 @@ public class OMessageToAgentSender { } // send to Queue -- InitFromServer - log.info("send INIT OrderCommand to Agent = {}", message); + log.info( + "send INIT OrderCommand to Agent = {}", + message + ); - rabbitTemplate.convertAndSend(initRabbitMQConfig.INIT_EXCHANGE, initRabbitMQConfig.INIT_FROM_SERVER_KEY, writeData(message)); + rabbitTemplate.convertAndSend( + initRabbitMQConfig.INIT_EXCHANGE, + initRabbitMQConfig.INIT_FROM_SERVER_KEY, + writeData(message) + ); } public void send(OctopusMessage octopusMessage) { - log.debug("OctopusMessage {} send to agent {}", octopusMessage, octopusMessage.getUuid()); + log.debug( + "OctopusMessage {} send to agent {}", + octopusMessage, + octopusMessage.getUuid() + ); rabbitTemplate.convertAndSend( initRabbitMQConfig.OCTOPUS_EXCHANGE, octopusMessage.getUuid() + "*", - writeData(octopusMessage)); + writeData(octopusMessage) + ); } public void send(List octopusMessageList) { - octopusMessageList.stream().forEach( - octopusMessage -> { - this.send(octopusMessage); - } - ); + octopusMessageList + .stream() + .forEach( + octopusMessage -> { + this.send(octopusMessage); + } + ); } diff --git a/server/src/main/java/io/wdd/rpc/scheduler/config/ExecutionJob.java b/server/src/main/java/io/wdd/rpc/scheduler/config/ExecutionJob.java index e273ab4..ab3c9dc 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/config/ExecutionJob.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/config/ExecutionJob.java @@ -6,7 +6,6 @@ import io.wdd.server.utils.SpringUtils; import org.apache.commons.lang3.StringUtils; import org.quartz.JobExecutionContext; import org.quartz.JobExecutionException; -import org.springframework.scheduling.annotation.Async; import org.springframework.scheduling.quartz.QuartzJobBean; import org.springframework.util.ReflectionUtils; diff --git a/server/src/main/java/io/wdd/rpc/scheduler/config/QuartzSchedulerUtils.java b/server/src/main/java/io/wdd/rpc/scheduler/config/QuartzSchedulerUtils.java index 2e6e8b7..eb89986 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/config/QuartzSchedulerUtils.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/config/QuartzSchedulerUtils.java @@ -1,7 +1,7 @@ package io.wdd.rpc.scheduler.config; -import io.wdd.common.beans.executor.ExecutionMessage; import io.wdd.common.utils.TimeUtils; +import io.wdd.rpc.execute.ExecutionMessage; import io.wdd.rpc.scheduler.beans.ScriptSchedulerDTO; import org.quartz.Scheduler; import org.quartz.SchedulerException; diff --git a/server/src/main/java/io/wdd/rpc/scheduler/job/AgentStatusMonitorJob.java b/server/src/main/java/io/wdd/rpc/scheduler/job/AgentAliveStatusMonitorJob.java similarity index 73% rename from server/src/main/java/io/wdd/rpc/scheduler/job/AgentStatusMonitorJob.java rename to server/src/main/java/io/wdd/rpc/scheduler/job/AgentAliveStatusMonitorJob.java index efa60c1..a72a37b 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/job/AgentStatusMonitorJob.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/job/AgentAliveStatusMonitorJob.java @@ -1,17 +1,17 @@ package io.wdd.rpc.scheduler.job; import io.wdd.rpc.scheduler.config.QuartzLogOperator; -import io.wdd.rpc.scheduler.service.status.MonitorAllAgentStatus; +import io.wdd.rpc.scheduler.service.status.AgentAliveStatusMonitorService; import org.quartz.JobExecutionContext; import org.quartz.JobExecutionException; import org.springframework.scheduling.quartz.QuartzJobBean; import javax.annotation.Resource; -public class AgentStatusMonitorJob extends QuartzJobBean { +public class AgentAliveStatusMonitorJob extends QuartzJobBean { @Resource - MonitorAllAgentStatus monitorAllAgentStatus; + AgentAliveStatusMonitorService agentAliveStatusMonitorService; @Resource QuartzLogOperator quartzLogOperator; @@ -23,7 +23,7 @@ public class AgentStatusMonitorJob extends QuartzJobBean { //JobDataMap jobDataMap = jobExecutionContext.getJobDetail().getJobDataMap(); // actually execute the monitor service - monitorAllAgentStatus.go(); + agentAliveStatusMonitorService.go(); // log to somewhere quartzLogOperator.save(); diff --git a/server/src/main/java/io/wdd/rpc/scheduler/job/AgentRunMetricStatusJob.java b/server/src/main/java/io/wdd/rpc/scheduler/job/AgentRunMetricStatusJob.java index 87445a9..e34ad2b 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/job/AgentRunMetricStatusJob.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/job/AgentRunMetricStatusJob.java @@ -1,6 +1,6 @@ package io.wdd.rpc.scheduler.job; -import io.wdd.rpc.scheduler.service.status.AgentRuntimeMetricStatus; +import io.wdd.rpc.scheduler.service.status.AgentMetricStatusCollectService; import org.quartz.JobDataMap; import org.quartz.JobExecutionContext; import org.quartz.JobExecutionException; @@ -8,13 +8,13 @@ import org.springframework.scheduling.quartz.QuartzJobBean; import javax.annotation.Resource; -import static io.wdd.rpc.scheduler.service.status.AgentRuntimeMetricStatus.METRIC_REPORT_TIMES_COUNT; -import static io.wdd.rpc.scheduler.service.status.AgentRuntimeMetricStatus.METRIC_REPORT_TIME_PINCH; +import static io.wdd.rpc.scheduler.service.status.AgentMetricStatusCollectService.METRIC_REPORT_TIMES_COUNT; +import static io.wdd.rpc.scheduler.service.status.AgentMetricStatusCollectService.METRIC_REPORT_TIME_PINCH; public class AgentRunMetricStatusJob extends QuartzJobBean { @Resource - AgentRuntimeMetricStatus agentRuntimeMetricStatus; + AgentMetricStatusCollectService agentMetricStatusCollectService; @Override protected void executeInternal(JobExecutionContext jobExecutionContext) throws JobExecutionException { @@ -25,7 +25,7 @@ public class AgentRunMetricStatusJob extends QuartzJobBean { .getJobDataMap(); // 执行Agent Metric 状态收集任务 - agentRuntimeMetricStatus.collect( + agentMetricStatusCollectService.collect( (Integer) jobDataMap.get(METRIC_REPORT_TIMES_COUNT), (Integer) jobDataMap.get(METRIC_REPORT_TIME_PINCH) ); diff --git a/server/src/main/java/io/wdd/rpc/scheduler/service/BuildStatusScheduleTask.java b/server/src/main/java/io/wdd/rpc/scheduler/service/BuildStatusScheduleTask.java index f8cc610..ae2353d 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/service/BuildStatusScheduleTask.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/service/BuildStatusScheduleTask.java @@ -1,8 +1,8 @@ package io.wdd.rpc.scheduler.service; +import io.wdd.rpc.scheduler.job.AgentAliveStatusMonitorJob; import io.wdd.rpc.scheduler.job.AgentRunMetricStatusJob; -import io.wdd.rpc.scheduler.job.AgentStatusMonitorJob; import lombok.extern.slf4j.Slf4j; import org.quartz.CronExpression; import org.springframework.beans.factory.annotation.Value; @@ -14,8 +14,8 @@ import java.text.ParseException; import java.util.Date; import java.util.HashMap; -import static io.wdd.rpc.scheduler.service.status.AgentRuntimeMetricStatus.METRIC_REPORT_TIMES_COUNT; -import static io.wdd.rpc.scheduler.service.status.AgentRuntimeMetricStatus.METRIC_REPORT_TIME_PINCH; +import static io.wdd.rpc.scheduler.service.status.AgentMetricStatusCollectService.METRIC_REPORT_TIMES_COUNT; +import static io.wdd.rpc.scheduler.service.status.AgentMetricStatusCollectService.METRIC_REPORT_TIME_PINCH; @Component @Slf4j @@ -43,7 +43,7 @@ public class BuildStatusScheduleTask { private void buildAll() { // Agent存活健康状态检查 - buildMonitorAllAgentStatusScheduleTask(); + buildMonitorAllAgentAliveStatusScheduleTask(); // Agent运行信息检查 Metric @@ -99,12 +99,12 @@ public class BuildStatusScheduleTask { * 延迟触发时间 healthyCheckStartDelaySeconds * 定时任务间隔 healthyCronTimeExpress */ - private void buildMonitorAllAgentStatusScheduleTask() { + private void buildMonitorAllAgentAliveStatusScheduleTask() { // build the Job octopusQuartzService.addMission( - AgentStatusMonitorJob.class, - "monitorAllAgentStatusJob", + AgentAliveStatusMonitorJob.class, + "monitorAllAgentAliveStatusJob", JOB_GROUP_NAME, healthyCheckStartDelaySeconds, healthyCronTimeExpress, diff --git a/server/src/main/java/io/wdd/rpc/scheduler/service/script/AgentApplyScheduledScript.java b/server/src/main/java/io/wdd/rpc/scheduler/service/script/AgentApplyScheduledScript.java index 3329193..48682bb 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/service/script/AgentApplyScheduledScript.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/service/script/AgentApplyScheduledScript.java @@ -1,7 +1,7 @@ package io.wdd.rpc.scheduler.service.script; -import io.wdd.rpc.execute.service.AsyncExecutionService; +import io.wdd.rpc.execute.service.SyncExecutionService; import io.wdd.rpc.scheduler.beans.ScriptSchedulerDTO; import io.wdd.rpc.scheduler.config.QuartzSchedulerUtils; import lombok.extern.slf4j.Slf4j; @@ -20,7 +20,7 @@ import java.util.List; public class AgentApplyScheduledScript { @Resource - AsyncExecutionService asyncExecutionService; + SyncExecutionService asyncExecutionService; @Resource QuartzSchedulerUtils quartzSchedulerUtils; @@ -46,7 +46,7 @@ public class AgentApplyScheduledScript { // 发送命令到Agent中 List resultKeyList = asyncExecutionService - .SendCommandToAgentComplete( + .SyncSendCommandToAgentComplete( targetMachineList, scriptType, completeCommandList, diff --git a/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentAliveStatusMonitorService.java b/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentAliveStatusMonitorService.java new file mode 100644 index 0000000..fa423df --- /dev/null +++ b/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentAliveStatusMonitorService.java @@ -0,0 +1,157 @@ +package io.wdd.rpc.scheduler.service.status; + +import io.wdd.common.utils.TimeUtils; +import io.wdd.rpc.init.AgentStatusCacheService; +import io.wdd.rpc.scheduler.service.BuildStatusScheduleTask; +import io.wdd.rpc.status.service.AsyncStatusService; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.collections.CollectionUtils; +import org.springframework.context.annotation.Lazy; +import org.springframework.data.redis.core.RedisTemplate; +import org.springframework.stereotype.Service; + +import javax.annotation.Resource; +import java.util.HashMap; +import java.util.Map; + +import static io.wdd.rpc.init.AgentStatusCacheService.ALL_AGENT_TOPIC_NAME_LIST; +import static io.wdd.rpc.status.OctopusStatusMessage.ALL_AGENT_STATUS_REDIS_KEY; + +/** + * 更新频率被类 BuildStatusScheduleTask.class控制 + *

+ *

+ * 获取所有注册的Agent + *

+ * 发送状态检查信息, agent需要update相应的HashMap的值 + * redis --> all-agent-health-map agent-topic-name : 1 + * todo 分布式问题,弱网环境,多线程操作同一个hashMap会不会出现冲突 + *

+ * 休眠 MAX_WAIT_AGENT_REPORT_STATUS_TIME 秒 等待agent的状态上报 + *

+ * 检查相应的 状态HashMap,然后全部置为零 + */ +@Service +@Slf4j +@Lazy +public class AgentAliveStatusMonitorService { + + private static final int MAX_WAIT_AGENT_REPORT_STATUS_TIME = 5; + @Resource + RedisTemplate redisTemplate; + + @Resource + AgentStatusCacheService agentStatusCacheService; + + @Resource + BuildStatusScheduleTask buildStatusScheduleTask; + + @Resource + AsyncStatusService asyncStatusService; + + private HashMap AGENT_HEALTHY_INIT_MAP; + + public void go() { + + // 1. 获取所有注册的Agent 手动更新 + agentStatusCacheService.updateAllAgentTopicNameCache(); + if (CollectionUtils.isEmpty(ALL_AGENT_TOPIC_NAME_LIST)) { + log.warn("[Scheduler] No Agent Registered ! End Up Status Monitor !"); + return; + } + + // 1.1 检查 Agent状态保存数据结构是否正常 + checkOrCreateRedisHealthyKey(); + + // 2.发送状态检查信息, agent需要update相应的HashMap的值 + // 2023年6月14日 2. 发送ping等待所有的Agent返回PONG, 然后进行redis的状态修改 + + // 使用同步更新的策略 + Map agentAliveStatusMap = asyncStatusService.AsyncCollectAgentAliveStatus( + ALL_AGENT_TOPIC_NAME_LIST, + 5 + ); + + // 更新Agent的状态 + updateAllAgentHealthyStatus(agentAliveStatusMap); + } + + /** + * 初始化Agent存活状态的Redis缓存的信息,全部设置为False,然后等待存活状态检测 + */ + private void checkOrCreateRedisHealthyKey() { + + // 检查开始的时候 需要手动将所有Agent的状态置为0 + // Agent如果存活,那么就可以将其自身状态修改为1 + + // build the redis all agent healthy map struct + HashMap initMap = new HashMap<>(32); + ALL_AGENT_TOPIC_NAME_LIST + .stream() + .forEach( + agentTopicName -> { + initMap.put( + agentTopicName, + Boolean.FALSE + ); + } + ); + + // cache this map struct + AGENT_HEALTHY_INIT_MAP = initMap; + + // create the healthy redis structure + redisTemplate + .opsForHash() + .putAll( + ALL_AGENT_STATUS_REDIS_KEY, + initMap + ); + + redisTemplate + .opsForHash() + .put( + ALL_AGENT_STATUS_REDIS_KEY, + "initTime", + TimeUtils.currentTimeString() + ); + + } + + public void updateAllAgentHealthyStatus(Map agentAliveStatusMap) { + + String currentTimeString = TimeUtils.currentTimeString(); + + // 更新所有的缓存状态 + agentStatusCacheService.updateAgentStatusMapCache(agentAliveStatusMap); + + // 执行Metric上报定时任务 +// buildStatusScheduleTask.buildAgentMetricScheduleTask(); + + log.debug( + "[存活状态] - 当前时间为 [ %s ] , 所有的Agent存活状态为=> %s", + currentTimeString, + agentAliveStatusMap + ); + + // 这里仅仅是更新时间 + redisTemplate + .opsForHash() + .put( + ALL_AGENT_STATUS_REDIS_KEY, + "updateTime", + currentTimeString + ); + + // 更新所有的Agent状态 + redisTemplate + .opsForHash() + .putAll( + ALL_AGENT_STATUS_REDIS_KEY, + agentAliveStatusMap + ); + + } + + +} diff --git a/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentRuntimeMetricStatus.java b/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentMetricStatusCollectService.java similarity index 51% rename from server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentRuntimeMetricStatus.java rename to server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentMetricStatusCollectService.java index a461729..bcaec69 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentRuntimeMetricStatus.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentMetricStatusCollectService.java @@ -1,17 +1,22 @@ package io.wdd.rpc.scheduler.service.status; -import io.wdd.common.beans.status.OctopusStatusMessage; +import com.fasterxml.jackson.databind.ObjectMapper; +import io.wdd.common.utils.TimeUtils; +import io.wdd.rpc.message.OctopusMessage; +import io.wdd.rpc.message.sender.OMessageToAgentSender; import lombok.extern.slf4j.Slf4j; import org.springframework.stereotype.Service; import org.springframework.util.CollectionUtils; import javax.annotation.Resource; +import java.time.LocalDateTime; import java.util.List; import java.util.stream.Collectors; -import static io.wdd.common.beans.status.OctopusStatusMessage.METRIC_STATUS_MESSAGE_TYPE; -import static io.wdd.rpc.init.ServerCacheAgentStatus.ALL_HEALTHY_AGENT_TOPIC_NAME_LIST; +import static io.wdd.rpc.init.AgentStatusCacheService.ALL_HEALTHY_AGENT_TOPIC_NAME_LIST; +import static io.wdd.rpc.status.OctopusStatusMessage.ConstructAgentStatusMessage; +import static io.wdd.rpc.status.OctopusStatusMessage.METRIC_STATUS_MESSAGE_TYPE; /** * 收集OctopusAgent的运行Metric信息 @@ -20,13 +25,18 @@ import static io.wdd.rpc.init.ServerCacheAgentStatus.ALL_HEALTHY_AGENT_TOPIC_NAM */ @Service @Slf4j -public class AgentRuntimeMetricStatus { +public class AgentMetricStatusCollectService { public static final String METRIC_REPORT_TIME_PINCH = "metricRepeatPinch"; public static final String METRIC_REPORT_TIMES_COUNT = "metricRepeatCount"; + @Resource - CollectAgentStatus collectAgentStatus; + OMessageToAgentSender oMessageToAgentSender; + + @Resource + ObjectMapper objectMapper; + public void collect(int metricRepeatCount, int metricRepeatPinch) { @@ -34,35 +44,31 @@ public class AgentRuntimeMetricStatus { if (CollectionUtils.isEmpty(ALL_HEALTHY_AGENT_TOPIC_NAME_LIST)) { log.error("Metric Status Collect Failed ! no ALL_HEALTHY_AGENT_TOPIC_NAMES"); } - // 构建 OctopusMessage - // 只发送一次消息,让Agent循环定时执行任务 + buildMetricStatusMessageAndSend( metricRepeatCount, metricRepeatPinch ); - // } private void buildMetricStatusMessageAndSend(int metricRepeatCount, int metricRepeatPinch) { - List collect = ALL_HEALTHY_AGENT_TOPIC_NAME_LIST + LocalDateTime currentTime = TimeUtils.currentFormatTime(); + + List octopusStatusMessageList = ALL_HEALTHY_AGENT_TOPIC_NAME_LIST .stream() .map( - agentTopicName -> { - return OctopusStatusMessage - .builder() - .type(METRIC_STATUS_MESSAGE_TYPE) - .metricRepeatCount(metricRepeatCount) - .metricRepeatPinch(metricRepeatPinch) - .agentTopicName(agentTopicName) - .build(); - } + agentTopicName -> ConstructAgentStatusMessage( + METRIC_STATUS_MESSAGE_TYPE, + agentTopicName, + currentTime + ) ) .collect(Collectors.toList()); - // send to the next level - collectAgentStatus.statusMessageToAgent(collect); + // batch send all messages to RabbitMQ + oMessageToAgentSender.send(octopusStatusMessageList); } diff --git a/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentStatusStreamReader.java b/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentStatusStreamReader.java index fee04e6..f0edfa4 100644 --- a/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentStatusStreamReader.java +++ b/server/src/main/java/io/wdd/rpc/scheduler/service/status/AgentStatusStreamReader.java @@ -4,7 +4,7 @@ import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.ObjectMapper; -import io.wdd.common.beans.status.AgentStatus; +import io.wdd.rpc.status.AgentStatus; import lombok.Getter; import lombok.Setter; import lombok.extern.slf4j.Slf4j; diff --git a/server/src/main/java/io/wdd/rpc/scheduler/service/status/CollectAgentStatus.java b/server/src/main/java/io/wdd/rpc/scheduler/service/status/CollectAgentStatus.java deleted file mode 100644 index 6e71a63..0000000 --- a/server/src/main/java/io/wdd/rpc/scheduler/service/status/CollectAgentStatus.java +++ /dev/null @@ -1,72 +0,0 @@ -package io.wdd.rpc.scheduler.service.status; - -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; -import io.wdd.common.beans.rabbitmq.OctopusMessage; -import io.wdd.common.beans.rabbitmq.OctopusMessageType; -import io.wdd.common.beans.status.OctopusStatusMessage; -import io.wdd.common.utils.TimeUtils; -import io.wdd.rpc.message.sender.OMessageToAgentSender; -import org.springframework.stereotype.Service; - -import javax.annotation.Resource; -import java.util.List; -import java.util.stream.Collectors; - -/** - * 1. 定时任务 - * 2. 向RabbitMQ中发送消息,STATUS类型的消息 - * 3. 然后开始监听相应的Result StreamKey - */ -@Service -public class CollectAgentStatus { - - @Resource - OMessageToAgentSender oMessageToAgentSender; - - @Resource - ObjectMapper objectMapper; - - - public void collectAgentStatus(OctopusStatusMessage statusMessage) { - - this.statusMessageToAgent(List.of(statusMessage)); - } - - - public void statusMessageToAgent(List statusMessageList) { - - // build all the OctopusMessage - List octopusMessageList = statusMessageList.stream().map( - statusMessage -> { - OctopusMessage octopusMessage = buildOctopusMessageStatus(statusMessage); - return octopusMessage; - } - ).collect(Collectors.toList()); - - // batch send all messages to RabbitMQ - oMessageToAgentSender.send(octopusMessageList); - - // todo how to get result ? - } - - private OctopusMessage buildOctopusMessageStatus(OctopusStatusMessage octopusStatusMessage) { - - // must be like this or it will be deserialized as LinkedHashMap - String s; - try { - s = objectMapper.writeValueAsString(octopusStatusMessage); - } catch (JsonProcessingException e) { - throw new RuntimeException(e); - } - - return OctopusMessage.builder() - .uuid(octopusStatusMessage.getAgentTopicName()) - .type(OctopusMessageType.STATUS) - .init_time(TimeUtils.currentTime()) - .content(s) - .build(); - } - - -} diff --git a/server/src/main/java/io/wdd/rpc/scheduler/service/status/MonitorAllAgentStatus.java b/server/src/main/java/io/wdd/rpc/scheduler/service/status/MonitorAllAgentStatus.java deleted file mode 100644 index 8614d1b..0000000 --- a/server/src/main/java/io/wdd/rpc/scheduler/service/status/MonitorAllAgentStatus.java +++ /dev/null @@ -1,159 +0,0 @@ -package io.wdd.rpc.scheduler.service.status; - -import io.wdd.common.beans.status.OctopusStatusMessage; -import io.wdd.common.utils.TimeUtils; -import io.wdd.rpc.init.ServerCacheAgentStatus; -import io.wdd.rpc.scheduler.service.BuildStatusScheduleTask; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.collections.CollectionUtils; -import org.springframework.context.annotation.Lazy; -import org.springframework.data.redis.core.RedisTemplate; -import org.springframework.stereotype.Service; - -import javax.annotation.Resource; -import java.util.HashMap; -import java.util.List; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - -import static io.wdd.common.beans.status.OctopusStatusMessage.ALL_AGENT_STATUS_REDIS_KEY; -import static io.wdd.common.beans.status.OctopusStatusMessage.HEALTHY_STATUS_MESSAGE_TYPE; -import static io.wdd.rpc.init.ServerCacheAgentStatus.ALL_AGENT_TOPIC_NAME_LIST; - -/** - * 更新频率被类 BuildStatusScheduleTask.class控制 - *

- *

- * 获取所有注册的Agent - *

- * 发送状态检查信息, agent需要update相应的HashMap的值 - * redis --> all-agent-health-map agent-topic-name : 1 - * todo 分布式问题,弱网环境,多线程操作同一个hashMap会不会出现冲突 - *

- * 休眠 MAX_WAIT_AGENT_REPORT_STATUS_TIME 秒 等待agent的状态上报 - *

- * 检查相应的 状态HashMap,然后全部置为零 - */ -@Service -@Slf4j -@Lazy -public class MonitorAllAgentStatus { - - private static final int MAX_WAIT_AGENT_REPORT_STATUS_TIME = 5; - @Resource - RedisTemplate redisTemplate; - @Resource - CollectAgentStatus collectAgentStatus; - - @Resource - ServerCacheAgentStatus serverCacheAgentStatus; - - @Resource - BuildStatusScheduleTask buildStatusScheduleTask; - - private HashMap AGENT_HEALTHY_INIT_MAP; - - public void go() { - - try { - // 1. 获取所有注册的Agent 手动更新 - serverCacheAgentStatus.updateAllAgentTopicNameCache(); - if (CollectionUtils.isEmpty(ALL_AGENT_TOPIC_NAME_LIST)) { - log.warn("[Scheduler] No Agent Registered ! End Up Status Monitor !"); - return; - } - - // 1.1 检查 Agent状态保存数据结构是否正常 - checkOrCreateRedisHealthyKey(); - - // 2.发送状态检查信息, agent需要update相应的HashMap的值 - buildAndSendAgentHealthMessage(); - - // 3. 休眠 MAX_WAIT_AGENT_REPORT_STATUS_TIME 秒 等待agent的状态上报 - TimeUnit.SECONDS.sleep(MAX_WAIT_AGENT_REPORT_STATUS_TIME); - - // 4.检查相应的 状态HashMap,然后全部置为零 - // todo 存储到某个地方,目前只是打印日志 - updateAllAgentHealthyStatus(); - - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - } - - private void checkOrCreateRedisHealthyKey() { - - // 检查开始的时候 需要手动将所有Agent的状态置为0 - // Agent如果存活,那么就可以将其自身状态修改为1 - - // build the redis all agent healthy map struct - HashMap initMap = new HashMap<>(32); - ALL_AGENT_TOPIC_NAME_LIST - .stream() - .forEach( - agentTopicName -> { - initMap.put( - agentTopicName, - "0" - ); - } - ); - - initMap.put( - "updateTime", - TimeUtils.currentTimeString() - ); - - // cache this map struct - AGENT_HEALTHY_INIT_MAP = initMap; - - // create the healthy redis structure - redisTemplate - .opsForHash() - .putAll( - ALL_AGENT_STATUS_REDIS_KEY, - initMap - ); - - } - - private void buildAndSendAgentHealthMessage() { - - List collect = ALL_AGENT_TOPIC_NAME_LIST - .stream() - .map( - agentTopicName -> OctopusStatusMessage - .builder() - .agentTopicName(agentTopicName) - .type(HEALTHY_STATUS_MESSAGE_TYPE) - .build() - ) - .collect(Collectors.toList()); - - // 发送信息 - collectAgentStatus.statusMessageToAgent(collect); - } - - private void updateAllAgentHealthyStatus() { - - String currentTimeString = TimeUtils.currentTimeString(); - - // 更新所有的缓存状态 - serverCacheAgentStatus.updateAgentStatusMapCache(); - - // 执行Metric上报定时任务 - buildStatusScheduleTask.buildAgentMetricScheduleTask(); - - // 这里仅仅是更新时间 - redisTemplate - .opsForHash() - .put( - ALL_AGENT_STATUS_REDIS_KEY, - "updateTime", - currentTimeString - ); - - } - - -} diff --git a/common/src/main/java/io/wdd/common/beans/status/AgentHealthyStatusEnum.java b/server/src/main/java/io/wdd/rpc/status/AgentHealthyStatusEnum.java similarity index 89% rename from common/src/main/java/io/wdd/common/beans/status/AgentHealthyStatusEnum.java rename to server/src/main/java/io/wdd/rpc/status/AgentHealthyStatusEnum.java index 0494909..ef794e7 100644 --- a/common/src/main/java/io/wdd/common/beans/status/AgentHealthyStatusEnum.java +++ b/server/src/main/java/io/wdd/rpc/status/AgentHealthyStatusEnum.java @@ -1,7 +1,6 @@ -package io.wdd.common.beans.status; +package io.wdd.rpc.status; import lombok.Getter; -import lombok.Setter; /** * AgentHealthy状态描述实体类 diff --git a/common/src/main/java/io/wdd/common/beans/status/AgentStatus.java b/server/src/main/java/io/wdd/rpc/status/AgentStatus.java similarity index 95% rename from common/src/main/java/io/wdd/common/beans/status/AgentStatus.java rename to server/src/main/java/io/wdd/rpc/status/AgentStatus.java index 489a48d..83a6a90 100644 --- a/common/src/main/java/io/wdd/common/beans/status/AgentStatus.java +++ b/server/src/main/java/io/wdd/rpc/status/AgentStatus.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.status; +package io.wdd.rpc.status; import lombok.AllArgsConstructor; diff --git a/common/src/main/java/io/wdd/common/beans/status/AgentSystemInfo.java b/server/src/main/java/io/wdd/rpc/status/AgentSystemInfo.java similarity index 97% rename from common/src/main/java/io/wdd/common/beans/status/AgentSystemInfo.java rename to server/src/main/java/io/wdd/rpc/status/AgentSystemInfo.java index d9056e9..89f5abc 100644 --- a/common/src/main/java/io/wdd/common/beans/status/AgentSystemInfo.java +++ b/server/src/main/java/io/wdd/rpc/status/AgentSystemInfo.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.status; +package io.wdd.rpc.status; import io.wdd.common.utils.TimeUtils; import lombok.AllArgsConstructor; diff --git a/common/src/main/java/io/wdd/common/beans/status/AppStatusEnum.java b/server/src/main/java/io/wdd/rpc/status/AppStatusEnum.java similarity index 92% rename from common/src/main/java/io/wdd/common/beans/status/AppStatusEnum.java rename to server/src/main/java/io/wdd/rpc/status/AppStatusEnum.java index 2173f30..e627994 100644 --- a/common/src/main/java/io/wdd/common/beans/status/AppStatusEnum.java +++ b/server/src/main/java/io/wdd/rpc/status/AppStatusEnum.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.status; +package io.wdd.rpc.status; public enum AppStatusEnum { diff --git a/common/src/main/java/io/wdd/common/beans/status/AppStatusInfo.java b/server/src/main/java/io/wdd/rpc/status/AppStatusInfo.java similarity index 90% rename from common/src/main/java/io/wdd/common/beans/status/AppStatusInfo.java rename to server/src/main/java/io/wdd/rpc/status/AppStatusInfo.java index b8f647f..fd3abb0 100644 --- a/common/src/main/java/io/wdd/common/beans/status/AppStatusInfo.java +++ b/server/src/main/java/io/wdd/rpc/status/AppStatusInfo.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.status; +package io.wdd.rpc.status; import lombok.AllArgsConstructor; import lombok.Data; diff --git a/common/src/main/java/io/wdd/common/beans/status/CpuInfo.java b/server/src/main/java/io/wdd/rpc/status/CpuInfo.java similarity index 99% rename from common/src/main/java/io/wdd/common/beans/status/CpuInfo.java rename to server/src/main/java/io/wdd/rpc/status/CpuInfo.java index 8be8979..43cb652 100644 --- a/common/src/main/java/io/wdd/common/beans/status/CpuInfo.java +++ b/server/src/main/java/io/wdd/rpc/status/CpuInfo.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.status; +package io.wdd.rpc.status; import lombok.AllArgsConstructor; import lombok.Data; diff --git a/common/src/main/java/io/wdd/common/beans/status/CpuTicks.java b/server/src/main/java/io/wdd/rpc/status/CpuTicks.java similarity index 97% rename from common/src/main/java/io/wdd/common/beans/status/CpuTicks.java rename to server/src/main/java/io/wdd/rpc/status/CpuTicks.java index 6d4d587..2065ee7 100644 --- a/common/src/main/java/io/wdd/common/beans/status/CpuTicks.java +++ b/server/src/main/java/io/wdd/rpc/status/CpuTicks.java @@ -1,8 +1,7 @@ -package io.wdd.common.beans.status; +package io.wdd.rpc.status; import lombok.AllArgsConstructor; import lombok.Data; -import lombok.Getter; import lombok.NoArgsConstructor; import oshi.hardware.CentralProcessor; import oshi.util.Util; diff --git a/common/src/main/java/io/wdd/common/beans/status/DiskInfo.java b/server/src/main/java/io/wdd/rpc/status/DiskInfo.java similarity index 98% rename from common/src/main/java/io/wdd/common/beans/status/DiskInfo.java rename to server/src/main/java/io/wdd/rpc/status/DiskInfo.java index 085e0a2..291617a 100644 --- a/common/src/main/java/io/wdd/common/beans/status/DiskInfo.java +++ b/server/src/main/java/io/wdd/rpc/status/DiskInfo.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.status; +package io.wdd.rpc.status; import io.wdd.common.utils.FormatUtils; import lombok.AllArgsConstructor; diff --git a/common/src/main/java/io/wdd/common/beans/status/MemoryInfo.java b/server/src/main/java/io/wdd/rpc/status/MemoryInfo.java similarity index 97% rename from common/src/main/java/io/wdd/common/beans/status/MemoryInfo.java rename to server/src/main/java/io/wdd/rpc/status/MemoryInfo.java index 528e6ea..1afe27b 100644 --- a/common/src/main/java/io/wdd/common/beans/status/MemoryInfo.java +++ b/server/src/main/java/io/wdd/rpc/status/MemoryInfo.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.status; +package io.wdd.rpc.status; import io.wdd.common.utils.FormatUtils; import lombok.AllArgsConstructor; diff --git a/common/src/main/java/io/wdd/common/beans/status/MetricStatus.java b/server/src/main/java/io/wdd/rpc/status/MetricStatus.java similarity index 86% rename from common/src/main/java/io/wdd/common/beans/status/MetricStatus.java rename to server/src/main/java/io/wdd/rpc/status/MetricStatus.java index 7b980fd..2f7bae5 100644 --- a/common/src/main/java/io/wdd/common/beans/status/MetricStatus.java +++ b/server/src/main/java/io/wdd/rpc/status/MetricStatus.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.status; +package io.wdd.rpc.status; import lombok.Data; diff --git a/common/src/main/java/io/wdd/common/beans/status/NetworkInfo.java b/server/src/main/java/io/wdd/rpc/status/NetworkInfo.java similarity index 99% rename from common/src/main/java/io/wdd/common/beans/status/NetworkInfo.java rename to server/src/main/java/io/wdd/rpc/status/NetworkInfo.java index 71a93ff..d540e28 100644 --- a/common/src/main/java/io/wdd/common/beans/status/NetworkInfo.java +++ b/server/src/main/java/io/wdd/rpc/status/NetworkInfo.java @@ -1,4 +1,4 @@ -package io.wdd.common.beans.status; +package io.wdd.rpc.status; import io.wdd.common.utils.FormatUtils; diff --git a/server/src/main/java/io/wdd/rpc/status/OctopusStatusMessage.java b/server/src/main/java/io/wdd/rpc/status/OctopusStatusMessage.java new file mode 100644 index 0000000..e6e1a14 --- /dev/null +++ b/server/src/main/java/io/wdd/rpc/status/OctopusStatusMessage.java @@ -0,0 +1,65 @@ +package io.wdd.rpc.status; + +import com.fasterxml.jackson.core.JsonProcessingException; +import io.wdd.rpc.message.OctopusMessage; +import io.wdd.rpc.message.OctopusMessageType; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; +import lombok.experimental.SuperBuilder; + +import java.time.LocalDateTime; + +import static io.wdd.common.utils.OctopusObjectMapperConfig.OctopusObjectMapper; + +@Data +@AllArgsConstructor +@NoArgsConstructor +@SuperBuilder(toBuilder = true) +public class OctopusStatusMessage { + + // below two will be used by both server and agent + // 存储所有Agent的实时健康状态, 1代表健康 0代表失败 + public static final String ALL_AGENT_STATUS_REDIS_KEY = "ALL_AGENT_HEALTHY_STATUS"; + public static final String HEALTHY_STATUS_MESSAGE_TYPE = "PING"; + public static final String ALL_STATUS_MESSAGE_TYPE = "ALL"; + public static final String METRIC_STATUS_MESSAGE_TYPE = "METRIC"; + public static final String APP_STATUS_MESSAGE_TYPE = "APP"; + + /** + * which kind of status should be return + * metric => short time message + * all => all agent status message + * healthy => check for healthy + */ + String statusType; + + int metricRepeatCount; + + int metricRepeatPinch; + + public static OctopusMessage ConstructAgentStatusMessage(String statusType, String agentTopicName, LocalDateTime currentTime) { + + OctopusStatusMessage statusMessage = OctopusStatusMessage + .builder() + .statusType(statusType) + .build(); + + String ops; + try { + ops = OctopusObjectMapper.writeValueAsString(statusMessage); + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + + return OctopusMessage + .builder() + .type(OctopusMessageType.STATUS) + .uuid(agentTopicName) + .init_time(currentTime) + .content(ops) + .build(); + + } + +} diff --git a/server/src/main/java/io/wdd/rpc/status/beans/AgentStatus.java b/server/src/main/java/io/wdd/rpc/status/beans/AgentStatus.java new file mode 100644 index 0000000..c6bad73 --- /dev/null +++ b/server/src/main/java/io/wdd/rpc/status/beans/AgentStatus.java @@ -0,0 +1,23 @@ +package io.wdd.rpc.status.beans; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.Data; +import lombok.NoArgsConstructor; + +@NoArgsConstructor +@Data +public class AgentStatus { + + @JsonProperty("CPUStatus") + private CPUStatus cpuStatus; + + @JsonProperty("MemoryStatus") + private MemoryStatus memoryStatus; + + @JsonProperty("NetworkStatus") + private NetworkStatus networkStatus; + + @JsonProperty("DiskStatus") + private DiskStatus diskStatus; + +} diff --git a/server/src/main/java/io/wdd/rpc/status/beans/CPUStatus.java b/server/src/main/java/io/wdd/rpc/status/beans/CPUStatus.java new file mode 100644 index 0000000..add5821 --- /dev/null +++ b/server/src/main/java/io/wdd/rpc/status/beans/CPUStatus.java @@ -0,0 +1,81 @@ +package io.wdd.rpc.status.beans; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; +import lombok.experimental.SuperBuilder; + +import java.util.List; + +@Data +@AllArgsConstructor +@NoArgsConstructor +@SuperBuilder(toBuilder = true) +public class CPUStatus { + + + @JsonProperty("NumCores") + private Integer numCores; + @JsonProperty("CPUStatus") + private List cPUInfo; + @JsonProperty("CPUPercent") + private Double cPUPercent; + @JsonProperty("CPULoads") + private CPULoadsDTO cPULoads; + @JsonProperty("SystemLoads") + private SystemLoadsDTO systemLoads; + + @NoArgsConstructor + @Data + public static class CPULoadsDTO { + @JsonProperty("load1") + private Integer load1; + @JsonProperty("load5") + private Integer load5; + @JsonProperty("load15") + private Integer load15; + } + + @NoArgsConstructor + @Data + public static class SystemLoadsDTO { + @JsonProperty("load1") + private Integer load1; + @JsonProperty("load5") + private Integer load5; + @JsonProperty("load15") + private Integer load15; + } + + @NoArgsConstructor + @Data + public static class CPUInfoDTO { + @JsonProperty("cpu") + private Integer cpu; + @JsonProperty("vendorId") + private String vendorId; + @JsonProperty("family") + private String family; + @JsonProperty("model") + private String model; + @JsonProperty("stepping") + private Integer stepping; + @JsonProperty("physicalId") + private String physicalId; + @JsonProperty("coreId") + private String coreId; + @JsonProperty("cores") + private Integer cores; + @JsonProperty("modelName") + private String modelName; + @JsonProperty("mhz") + private Integer mhz; + @JsonProperty("cacheSize") + private Integer cacheSize; + @JsonProperty("flags") + private List flags; + @JsonProperty("microcode") + private String microcode; + } +} diff --git a/server/src/main/java/io/wdd/rpc/status/beans/DiskStatus.java b/server/src/main/java/io/wdd/rpc/status/beans/DiskStatus.java new file mode 100644 index 0000000..2c728cf --- /dev/null +++ b/server/src/main/java/io/wdd/rpc/status/beans/DiskStatus.java @@ -0,0 +1,32 @@ +package io.wdd.rpc.status.beans; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.Data; +import lombok.NoArgsConstructor; + +import java.util.List; + +@NoArgsConstructor +@Data +public class DiskStatus { + + @JsonProperty("Total") + private Long total; + @JsonProperty("Used") + private Long used; + @JsonProperty("LogicalDisk") + private List logicalDisk; + + @NoArgsConstructor + @Data + public static class LogicalDiskDTO { + @JsonProperty("device") + private String device; + @JsonProperty("mountpoint") + private String mountpoint; + @JsonProperty("fstype") + private String fstype; + @JsonProperty("opts") + private List opts; + } +} diff --git a/server/src/main/java/io/wdd/rpc/status/beans/MemoryStatus.java b/server/src/main/java/io/wdd/rpc/status/beans/MemoryStatus.java new file mode 100644 index 0000000..c16e8ba --- /dev/null +++ b/server/src/main/java/io/wdd/rpc/status/beans/MemoryStatus.java @@ -0,0 +1,21 @@ +package io.wdd.rpc.status.beans; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.Data; +import lombok.NoArgsConstructor; + +@NoArgsConstructor +@Data +public class MemoryStatus { + + @JsonProperty("TotalMemory") + private Long totalMemory; + @JsonProperty("UsedMemory") + private Long usedMemory; + @JsonProperty("AvailableMemory") + private Long availableMemory; + @JsonProperty("TotalVirtualMemory") + private Long totalVirtualMemory; + @JsonProperty("UsedVirtualMemory") + private Long usedVirtualMemory; +} diff --git a/server/src/main/java/io/wdd/rpc/status/beans/NetworkStatus.java b/server/src/main/java/io/wdd/rpc/status/beans/NetworkStatus.java new file mode 100644 index 0000000..c5a067d --- /dev/null +++ b/server/src/main/java/io/wdd/rpc/status/beans/NetworkStatus.java @@ -0,0 +1,33 @@ +package io.wdd.rpc.status.beans; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; +import lombok.experimental.SuperBuilder; + +import java.util.List; + +@NoArgsConstructor +@Data +@AllArgsConstructor +@SuperBuilder(toBuilder = true) +public class NetworkStatus { + + @JsonProperty("name") + private String name; + @JsonProperty("internal_ip_v4") + private List internalIpV4; + @JsonProperty("internal_ip_v6") + private List internalIpV6; + @JsonProperty("mac") + private String mac; + @JsonProperty("sent") + private Long sent; + @JsonProperty("recv") + private Integer recv; + @JsonProperty("sent_rate") + private String sentRate; + @JsonProperty("recv_rate") + private String recvRate; +} diff --git a/server/src/main/java/io/wdd/rpc/status/service/AsyncStatusService.java b/server/src/main/java/io/wdd/rpc/status/service/AsyncStatusService.java new file mode 100644 index 0000000..844035b --- /dev/null +++ b/server/src/main/java/io/wdd/rpc/status/service/AsyncStatusService.java @@ -0,0 +1,16 @@ +package io.wdd.rpc.status.service; + +import java.util.List; +import java.util.Map; + +public interface AsyncStatusService { + + /** + * 应该是同步收集 agentTopicNameList 的节点的存活状态,并返回所有的状态存活结果 + * + * @param agentTopicNameList + * @param aliveStatusWaitMaxTime + * @return + */ + Map AsyncCollectAgentAliveStatus(List agentTopicNameList, int aliveStatusWaitMaxTime); +} diff --git a/server/src/main/java/io/wdd/rpc/status/service/AsyncStatusServiceImpl.java b/server/src/main/java/io/wdd/rpc/status/service/AsyncStatusServiceImpl.java new file mode 100644 index 0000000..a9cb661 --- /dev/null +++ b/server/src/main/java/io/wdd/rpc/status/service/AsyncStatusServiceImpl.java @@ -0,0 +1,119 @@ +package io.wdd.rpc.status.service; + +import io.wdd.common.utils.TimeUtils; +import io.wdd.rpc.message.OctopusMessage; +import io.wdd.rpc.message.OctopusMessageType; +import io.wdd.rpc.message.handler.async.AsyncWaitOctopusMessageResultService; +import io.wdd.rpc.message.handler.async.OctopusMessageAsyncReplayContend; +import io.wdd.rpc.message.sender.OMessageToAgentSender; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; + +import javax.annotation.Resource; +import java.time.LocalDateTime; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static io.wdd.rpc.init.AgentStatusCacheService.ALL_AGENT_TOPIC_NAME_LIST; +import static io.wdd.rpc.status.OctopusStatusMessage.ConstructAgentStatusMessage; +import static io.wdd.rpc.status.OctopusStatusMessage.HEALTHY_STATUS_MESSAGE_TYPE; + +@Slf4j +@Service +public class AsyncStatusServiceImpl implements AsyncStatusService { + + private static final OctopusMessageType CurrentAppOctopusMessageType = OctopusMessageType.STATUS; + + @Resource + OMessageToAgentSender oMessageToAgentSender; + + @Resource + AsyncWaitOctopusMessageResultService asyncWaitOctopusMessageResultService; + + @Override + public Map AsyncCollectAgentAliveStatus(List agentTopicNameList, int aliveStatusWaitMaxTime) { + + // 构造最后的结果Map + Map agentAliveStatusMap = agentTopicNameList + .stream() + .collect( + Collectors.toMap( + agentTopicName -> agentTopicName, + agentTopicName -> Boolean.FALSE + )); + + LocalDateTime currentTime = TimeUtils.currentFormatTime(); + // 构造OctopusMessage - StatusMessage结构体, 下发所有的消息 + buildAndSendAgentAliveOctopusMessage(currentTime); + + // 异步收集消息 + OctopusMessageAsyncReplayContend statusAsyncReplayContend = OctopusMessageAsyncReplayContend.build( + agentTopicNameList.size(), + CurrentAppOctopusMessageType, + currentTime + ); + asyncWaitOctopusMessageResultService.waitFor(statusAsyncReplayContend); + + // 解析结果 + CountDownLatch countDownLatch = statusAsyncReplayContend.getCountDownLatch(); + + // 等待状态返回的结果 + boolean agentAliveStatusCollectResult = false; + try { + agentAliveStatusCollectResult = countDownLatch.await( + aliveStatusWaitMaxTime, + TimeUnit.SECONDS + ); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } finally { + if (!agentAliveStatusCollectResult) { + log.debug("Agent存活状态检查,没有检查到全部的Agent!"); + } + + // 移除等待队列 + asyncWaitOctopusMessageResultService.stopWaiting(statusAsyncReplayContend); + + // 处理结果 + statusAsyncReplayContend + .getReplayOMList() + .stream() + .forEach( + statusOMessage -> { + if (statusOMessage.getResult() != null) { + agentAliveStatusMap.put( + statusOMessage.getUuid(), + Boolean.TRUE + ); + } + } + ); + } + + // 返回Agent的存活状态内容 + return agentAliveStatusMap; + } + + private void buildAndSendAgentAliveOctopusMessage(LocalDateTime currentTime) { + + List octopusStatusMessageList = ALL_AGENT_TOPIC_NAME_LIST + .stream() + .map( + agentTopicName -> ConstructAgentStatusMessage( + HEALTHY_STATUS_MESSAGE_TYPE, + agentTopicName, + currentTime + ) + ) + .collect(Collectors.toList()); + + // 发送信息 + oMessageToAgentSender.send(octopusStatusMessageList); + + } + + +} diff --git a/server/src/main/java/io/wdd/server/config/OctopusObjectMapper.java b/server/src/main/java/io/wdd/server/config/OctopusObjectMapper.java index f939b6d..d22b3a7 100644 --- a/server/src/main/java/io/wdd/server/config/OctopusObjectMapper.java +++ b/server/src/main/java/io/wdd/server/config/OctopusObjectMapper.java @@ -1,19 +1,14 @@ package io.wdd.server.config; -import io.wdd.common.utils.OctopusObjectMapperConfig; -import org.springframework.boot.autoconfigure.jackson.Jackson2ObjectMapperBuilderCustomizer; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; - -@Configuration -public class OctopusObjectMapper { - - //注意:该段代码并未覆盖SpringBoot自动装配的ObjectMapper对象,而是加强其配置。 - // use the common config of object mapper - @Bean - public Jackson2ObjectMapperBuilderCustomizer customJackson() { - return OctopusObjectMapperConfig.common(); - } - -} +//@Configuration +//public class OctopusObjectMapper { +// +// //注意:该段代码并未覆盖SpringBoot自动装配的ObjectMapper对象,而是加强其配置。 +// // use the common config of object mapper +// @Bean +// public Jackson2ObjectMapperBuilderCustomizer customJackson() { +// return OctopusObjectMapperConfig.common(); +// } +// +//} diff --git a/server/src/main/java/io/wdd/server/controller/AppController.java b/server/src/main/java/io/wdd/server/controller/AppController.java index a7f4c08..f9a5d3a 100644 --- a/server/src/main/java/io/wdd/server/controller/AppController.java +++ b/server/src/main/java/io/wdd/server/controller/AppController.java @@ -1,9 +1,9 @@ package io.wdd.server.controller; +import io.wdd.common.response.R; import io.wdd.server.beans.vo.AppInfoVO; import io.wdd.server.coreService.CoreAppService; -import io.wdd.common.beans.response.R; import org.springframework.validation.annotation.Validated; import org.springframework.web.bind.annotation.*; diff --git a/server/src/main/java/io/wdd/server/controller/DomainController.java b/server/src/main/java/io/wdd/server/controller/DomainController.java index cd41c3c..21c00d3 100644 --- a/server/src/main/java/io/wdd/server/controller/DomainController.java +++ b/server/src/main/java/io/wdd/server/controller/DomainController.java @@ -1,9 +1,9 @@ package io.wdd.server.controller; +import io.wdd.common.response.R; import io.wdd.server.beans.po.DomainInfoPO; import io.wdd.server.beans.vo.DomainInfoVO; import io.wdd.server.coreService.CoreDomainService; -import io.wdd.common.beans.response.R; import org.springframework.validation.annotation.Validated; import org.springframework.web.bind.annotation.*; diff --git a/server/src/main/java/io/wdd/server/controller/ServerController.java b/server/src/main/java/io/wdd/server/controller/ServerController.java index 2a03dfa..411f0eb 100644 --- a/server/src/main/java/io/wdd/server/controller/ServerController.java +++ b/server/src/main/java/io/wdd/server/controller/ServerController.java @@ -4,7 +4,7 @@ package io.wdd.server.controller; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiParam; -import io.wdd.common.beans.response.R; +import io.wdd.common.response.R; import io.wdd.server.beans.po.DomainInfoPO; import io.wdd.server.beans.po.ServerInfoPO; import io.wdd.server.beans.vo.AppInfoVO; diff --git a/server/src/main/java/io/wdd/server/核心功能设计.md b/server/src/main/java/io/wdd/server/核心功能设计.md new file mode 100644 index 0000000..cdf88e4 --- /dev/null +++ b/server/src/main/java/io/wdd/server/核心功能设计.md @@ -0,0 +1,3 @@ +1. 使用Java实现WebShell的功能 + +2. \ No newline at end of file diff --git a/server/src/main/resources/application.yml b/server/src/main/resources/application.yml new file mode 100644 index 0000000..4cdbd16 --- /dev/null +++ b/server/src/main/resources/application.yml @@ -0,0 +1,161 @@ +server: + port: 9999 + +spring: + main: + allow-circular-references: true + allow-bean-definition-overriding: true + rabbitmq: + host: 42.192.52.227 + port: 20672 + username: boge + password: boge8tingH + virtual-host: /wdd + listener: + simple: + retry: + # ack failed will reentrant the Rabbit Listener + max-attempts: 2 + enabled: true + # retry interval unit ms + max-interval: 65000 + initial-interval: 65000 + redis: + host: 42.192.52.227 + port: 21370 + database: 0 + password: boge8tingH + # cluster: + # nodes: + # - 43.154.83.213:21370 + # - 43.154.83.213:21371 + # - 43.154.83.213:21372 + # - 43.154.83.213:21373 + # - 43.154.83.213:21374 + # - 43.154.83.213:21375 + # # 获取失败 最大重定向次数 + # max-redirects: 3 + # timeout: 50000 + #如果用以前的jedis,可以把下面的lettuce换成jedis即可 + lettuce: + pool: + # 连接池最大连接数默认值为8 + max-active: 16 + # 连接池最大阻塞时间(使用负值表示没有限制)默认值为-1 + max-wait: -1 + # 连接池中最大空闲连接数默认值为8 + max-idle: 10 + # 连接池中的最小空闲连接数,默认值为0 + min-idle: 10 + time-between-eviction-runs: 50000 + datasource: + driver-class-name: com.mysql.cj.jdbc.Driver + url: jdbc:mysql://42.192.52.227:21306/octopus_server?autoReconnect=true&useSSL=false&useUnicode=true&characterEncoding=UTF-8&serverTimezone=GMT%2B8 + username: boge + password: boge8tingH + type: com.zaxxer.hikari.HikariDataSource + hikari: + minimum-idle: 3 + # 空闲连接存活最大时间,默认600000(10分钟) + idle-timeout: 180000 + # 连接池最大连接数,默认是10 + maximum-pool-size: 5 + # 此属性控制从池返回的连接的默认自动提交行为,默认值:true + auto-commit: true + connection-test-query: SELECT 1 + # 最大文件上传 + servlet: + multipart: + max-file-size: 500MB + max-request-size: 500MB + +mybatis-plus: + type-aliases-package: io.wdd.server.beans.po + global-config: + db-column-underline: true + db-config: + # modify ethe id strategy + id-type: assign_id + # logic delete field globally + logicDeleteField: isDelete + logic-not-delete-value: 0 + logic-delete-value: 1 + banner: false + configuration: + # 希望知道所有的sql是怎么执行的, 配置输出日志 + #log-impl: org.apache.ibatis.logging.stdout.StdOutImpl + log-impl: org.apache.ibatis.logging.nologging.NoLoggingImpl + # 数据库下划线--实体类也是下划线 需要为false + map-underscore-to-camel-case: true + # 一级缓存的 缓存级别默认为 session,如果要关闭一级缓存可以设置为 statement + local-cache-scope: session + # 是否开启二级缓存 + cache-enabled: false + # 默认地址为 classpath*:/mapper/**/*.xml +# mapper-locations: classpath*:/real-mappers/**/*.xml + +octopus: + message: + # agent boot up default common exchange + init_exchange: InitExchange + # server will send message to agent using this common queue + init_to_server: InitToServer + # agent boot up default common exchange routing key + init_to_server_key: InitToServerKey + # server will receive message from agent using this common queue + init_from_server: InitFromServer + # agent boot up default common exchange routing key + init_from_server_key: InitFromServerKey + # initialization register time out (unit ms) default is 5 min + init_ttl: "3000000" + # Octopus Exchange Name == server comunicate with agent + octopus_exchange: OctopusExchange + # Octopus Message To Server == all agent send info to server queue and topic + octopus_to_server: OctopusToServer + executor: + name: executor-functions + status: + name: octopus-agent + healthy: + type: cron + cron: 10 * * * * ? * + start-delay: 30 + metric: + pinch: 20 + +oss: + # 这里只是因为需要一个层级,不一定下面的都是oracle + oracle: + seoul1: + namespace: cnk8d6fazu16 + region: ap-seoul-1 + key: aed62d24d85e2da809ce02bf272420ba4ed74820 + secret: rQdEcn69K049+JkA1IGoQmC1k8zma8zfWvZvVS0h144= + capacity: 10737418240 + seoul2: + namespace: cncvl8ro2rbf + region: ap-seoul-1 + key: 9e413c6e66269bc65d7ec951d93ba9c6a9781f6e + secret: dkXD7PysjrhsTKfNIbKupUmtxdfOvYCyLXf0MXa4hnU= + capacity: 10737418240 + tokyo1: + namespace: nrjcs6lwr9vy + region: ap-tokyo-1 + key: 0584c323d6c8d24cc2fc8c2d716a4ea35bb99ae6 + secret: +xicO9obeqzC5a/WY1rXvl5pMWSWbVIpMt3Qv691NtU= + capacity: 10737418240 + phoenix1: + namespace: axqr6x6t48wm + region: us-phoenix-1 + key: e87a121f1548b244c7bd649a1f0ca35195d46cf2 + secret: uT+NIgJiKPjSaPT8EVUw3xbLSCv/CFMFuebVauznafk= + capacity: 10737418240 + london1: + namespace: lrmzslyt8jzs + region: uk-london-1 + key: 57671886f9f1bcc5ac7235b5a0e6123f5ca271b3 + secret: ukWae6TXjID2Wqxh+7mAPAf4busZPGzwAh/WDKZ5MOQ= + capacity: 10737418240 + + + diff --git a/server/src/main/resources/bootstrap.yml b/server/src/main/resources/bootstrap.yml-back similarity index 61% rename from server/src/main/resources/bootstrap.yml rename to server/src/main/resources/bootstrap.yml-back index 47a0a39..c98f222 100644 --- a/server/src/main/resources/bootstrap.yml +++ b/server/src/main/resources/bootstrap.yml-back @@ -2,22 +2,22 @@ spring: application: name: octopus-server profiles: - active: k3s + active: local cloud: nacos: config: - group: k3s + group: local config-retry-time: 3000 file-extension: yaml max-retry: 3 - # server-addr: 43.154.83.213:21060 - server-addr: 150.230.198.103:21060 + server-addr: 42.192.52.227:21060 + # server-addr: 150.230.198.103:21060 # server-addr: https://nacos.107421.xyz:443 timeout: 5000 config-long-poll-timeout: 5000 extension-configs: - - group: k3s - data-id: common-k3s.yaml + - group: local + data-id: common-local.yaml server: port: 9999 \ No newline at end of file diff --git a/server/src/test/java/io/wdd/server/ServerApplicationTests.java b/server/src/test/java/io/wdd/server/ServerApplicationTests.java index 38cc180..bfdd25a 100644 --- a/server/src/test/java/io/wdd/server/ServerApplicationTests.java +++ b/server/src/test/java/io/wdd/server/ServerApplicationTests.java @@ -1,6 +1,6 @@ package io.wdd.server; -import io.wdd.rpc.execute.service.AsyncExecutionService; +import io.wdd.rpc.execute.service.SyncExecutionService; import org.junit.jupiter.api.Test; import org.springframework.boot.test.context.SpringBootTest; @@ -13,7 +13,7 @@ class ServerApplicationTests { @Resource - AsyncExecutionService asyncExecutionService; + SyncExecutionService asyncExecutionService; @Test void testCoreExecutionCompleteScript() { @@ -61,7 +61,7 @@ class ServerApplicationTests { ) ); - List resultList = asyncExecutionService.SendCommandToAgentComplete( + List resultList = asyncExecutionService.SyncSendCommandToAgentComplete( targetMachineList, "Scheduled Script", completeScript diff --git a/source/src/main/java/io/wdd/source/octopus/run-enviroment-compose.yaml b/source/src/main/java/io/wdd/source/octopus/run-enviroment-compose.yaml index 107a94e..2bb7487 100644 --- a/source/src/main/java/io/wdd/source/octopus/run-enviroment-compose.yaml +++ b/source/src/main/java/io/wdd/source/octopus/run-enviroment-compose.yaml @@ -24,13 +24,13 @@ services: interval: 15s timeout: 5s retries: 6 - redis-master: + redis: # https://hub.docker.com/r/bitnami/redis image: bitnami/redis:6.2.11-debian-11-r1 networks: - app-tier environment: - - REDIS_PASSWORD=Superwmm.23 + - REDIS_PASSWORD=boge8tingH # Redis 6.0 features a new multi-threading model - REDIS_IO_THREADS=4 - REDIS_IO_THREADS_DO_READS=yes diff --git a/source/src/main/java/io/wdd/source/octopus/simple-middleware.yaml b/source/src/main/java/io/wdd/source/octopus/simple-middleware.yaml new file mode 100644 index 0000000..8b422b4 --- /dev/null +++ b/source/src/main/java/io/wdd/source/octopus/simple-middleware.yaml @@ -0,0 +1,75 @@ +version: '2.1' + +services: + mysql: + # https://hub.docker.com/r/bitnami/mysql + image: bitnami/mysql:8.0.32-debian-11-r12 + ports: + - '21306:3306' + volumes: + #- '/octopus-server/mysql/data:/bitnami/mysql/data' + - 'mysql_data:/bitnami/mysql/data' + #- '/octopus-server/mysql/nacos-2.2.0-initdb.sql:/docker-entrypoint-initdb.d/nacos-2.2.0-initdb.sql' + # - '/octopus-server/mysql/wdd-server.sql:/docker-entrypoint-initdb.d/wdd-server.sql' + networks: + - app-tier + environment: + - MYSQL_ROOT_USER=boge + - MYSQL_ROOT_PASSWORD=boge8tingH + - MYSQL_CHARACTER_SET=utf8mb4 + - MYSQL_COLLATE:utf8_general_ci + - MYSQL_AUTHENTICATION_PLUGIN=mysql_native_password + healthcheck: + test: [ 'CMD', '/opt/bitnami/scripts/mysql/healthcheck.sh' ] + interval: 15s + timeout: 5s + retries: 6 + rabbitmq: + # https://hub.docker.com/r/bitnami/rabbitmq + image: docker.io/bitnami/rabbitmq:3.11.10-debian-11-r0 + ports: + - '4369' + - '5551' + - '5552' + - '20672:5672' + - '25672' + - '20678:15672' + environment: + - RABBITMQ_VHOST=/ + - RABBITMQ_VHOSTS=/dev + - RABBITMQ_USERNAME=boge + - RABBITMQ_PASSWORD=boge8tingH + - RABBITMQ_SECURE_PASSWORD=no + - RABBITMQ_LOAD_DEFINITIONS=no + - RABBITMQ_NODE_PORT_NUMBER=5672 + - RABBITMQ_NODE_SSL_PORT_NUMBER=5671 + - RABBITMQ_MANAGEMENT_PORT_NUMBER=15672 + volumes: + - 'rabbitmq_data:/bitnami/rabbitmq/mnesia' + redis: + # https://hub.docker.com/r/bitnami/redis + image: bitnami/redis:6.2.11-debian-11-r1 + networks: + - app-tier + environment: + - REDIS_PASSWORD=boge8tingH + # Redis 6.0 features a new multi-threading model + - REDIS_IO_THREADS=4 + - REDIS_IO_THREADS_DO_READS=yes + - REDIS_PORT_NUMBER=6379 + - REDIS_REPLICATION_MODE=master + #- REDIS_DISABLE_COMMANDS=FLUSHDB,FLUSHALL + ports: + - '21370:6379' + volumes: + - 'redis_data:/bitnami/redis/data' + +volumes: + mysql_data: + driver: local + rabbitmq_data: + driver: local + +networks: + app-tier: + driver: bridge \ No newline at end of file