Remove unused gRPC and JWT related code, including Woodpecker service definitions and JWT token management.
This commit is contained in:
62
internal/transport/grpc/agent_lifecycle.go
Normal file
62
internal/transport/grpc/agent_lifecycle.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
proto "stream.api/internal/api/proto/agent/v1"
|
||||
"stream.api/internal/dto"
|
||||
)
|
||||
|
||||
func (s *Server) RegisterAgent(ctx context.Context, req *proto.RegisterAgentRequest) (*proto.RegisterAgentResponse, error) {
|
||||
if req.Info == nil {
|
||||
return nil, status.Error(codes.InvalidArgument, "connection info is required")
|
||||
}
|
||||
id, _, ok := s.getAgentIDFromContext(ctx)
|
||||
if !ok {
|
||||
return nil, status.Error(codes.Unauthenticated, "invalid session")
|
||||
}
|
||||
hostname := ""
|
||||
if req.Info.CustomLabels != nil {
|
||||
hostname = req.Info.CustomLabels["hostname"]
|
||||
}
|
||||
name := hostname
|
||||
if name == "" {
|
||||
name = fmt.Sprintf("agent-%s", id)
|
||||
}
|
||||
s.agentManager.Register(id, name, req.Info.Platform, req.Info.Backend, req.Info.Version, req.Info.Capacity)
|
||||
if s.onAgentEvent != nil {
|
||||
s.onAgentEvent("agent_update", s.getAgentWithStats(id))
|
||||
}
|
||||
return &proto.RegisterAgentResponse{AgentId: id}, nil
|
||||
}
|
||||
|
||||
func (s *Server) UnregisterAgent(ctx context.Context, _ *proto.Empty) (*proto.Empty, error) {
|
||||
agentID, token, ok := s.getAgentIDFromContext(ctx)
|
||||
if !ok {
|
||||
return nil, status.Error(codes.Unauthenticated, "invalid session")
|
||||
}
|
||||
for _, jobID := range s.getAgentJobs(agentID) {
|
||||
_ = s.jobService.UpdateJobStatus(ctx, jobID, dto.JobStatusFailure)
|
||||
s.untrackJobAssignment(agentID, jobID)
|
||||
}
|
||||
s.sessions.Delete(token)
|
||||
s.agentJobs.Delete(agentID)
|
||||
agent := s.getAgentWithStats(agentID)
|
||||
s.agentManager.Unregister(agentID)
|
||||
if s.onAgentEvent != nil {
|
||||
s.onAgentEvent("agent_update", agent)
|
||||
}
|
||||
return &proto.Empty{}, nil
|
||||
}
|
||||
|
||||
func (s *Server) ReportHealth(ctx context.Context, _ *proto.ReportHealthRequest) (*proto.Empty, error) {
|
||||
agentID, _, ok := s.getAgentIDFromContext(ctx)
|
||||
if !ok {
|
||||
return nil, status.Error(codes.Unauthenticated, "invalid session")
|
||||
}
|
||||
s.agentManager.UpdateHeartbeat(agentID)
|
||||
return &proto.Empty{}, nil
|
||||
}
|
||||
111
internal/transport/grpc/agent_manager.go
Normal file
111
internal/transport/grpc/agent_manager.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"stream.api/internal/dto"
|
||||
)
|
||||
|
||||
type AgentInfo struct {
|
||||
ID string
|
||||
Name string
|
||||
Platform string
|
||||
Backend string
|
||||
Version string
|
||||
Capacity int32
|
||||
CPU float64
|
||||
RAM float64
|
||||
LastHeartbeat time.Time
|
||||
ConnectedAt time.Time
|
||||
CommandCh chan string
|
||||
}
|
||||
|
||||
type AgentManager struct {
|
||||
mu sync.RWMutex
|
||||
agents map[string]*AgentInfo
|
||||
}
|
||||
|
||||
func NewAgentManager() *AgentManager {
|
||||
return &AgentManager{agents: make(map[string]*AgentInfo)}
|
||||
}
|
||||
|
||||
func (am *AgentManager) Register(id string, name, platform, backend, version string, capacity int32) {
|
||||
am.mu.Lock()
|
||||
defer am.mu.Unlock()
|
||||
now := time.Now()
|
||||
if existing, ok := am.agents[id]; ok {
|
||||
existing.Name = name
|
||||
existing.Platform = platform
|
||||
existing.Backend = backend
|
||||
existing.Version = version
|
||||
existing.Capacity = capacity
|
||||
existing.LastHeartbeat = now
|
||||
return
|
||||
}
|
||||
am.agents[id] = &AgentInfo{ID: id, Name: name, Platform: platform, Backend: backend, Version: version, Capacity: capacity, LastHeartbeat: now, ConnectedAt: now, CommandCh: make(chan string, 10)}
|
||||
}
|
||||
|
||||
func (am *AgentManager) GetCommandChannel(id string) (chan string, bool) {
|
||||
am.mu.RLock()
|
||||
defer am.mu.RUnlock()
|
||||
agent, ok := am.agents[id]
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
return agent.CommandCh, true
|
||||
}
|
||||
|
||||
func (am *AgentManager) SendCommand(id string, cmd string) bool {
|
||||
am.mu.RLock()
|
||||
defer am.mu.RUnlock()
|
||||
agent, ok := am.agents[id]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
select {
|
||||
case agent.CommandCh <- cmd:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (am *AgentManager) UpdateHeartbeat(id string) {
|
||||
am.mu.Lock()
|
||||
defer am.mu.Unlock()
|
||||
if agent, ok := am.agents[id]; ok {
|
||||
agent.LastHeartbeat = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
func (am *AgentManager) UpdateResources(id string, cpu, ram float64) {
|
||||
am.mu.Lock()
|
||||
defer am.mu.Unlock()
|
||||
if agent, ok := am.agents[id]; ok {
|
||||
agent.CPU = cpu
|
||||
agent.RAM = ram
|
||||
agent.LastHeartbeat = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
func (am *AgentManager) Unregister(id string) {
|
||||
am.mu.Lock()
|
||||
defer am.mu.Unlock()
|
||||
delete(am.agents, id)
|
||||
}
|
||||
|
||||
func (am *AgentManager) ListAll() []*dto.Agent {
|
||||
am.mu.RLock()
|
||||
defer am.mu.RUnlock()
|
||||
now := time.Now()
|
||||
all := make([]*dto.Agent, 0, len(am.agents))
|
||||
for _, info := range am.agents {
|
||||
status := dto.AgentStatusOnline
|
||||
if now.Sub(info.LastHeartbeat) >= 60*time.Second {
|
||||
status = dto.AgentStatusOffline
|
||||
}
|
||||
all = append(all, &dto.Agent{ID: info.ID, Name: info.Name, Platform: info.Platform, Backend: info.Backend, Version: info.Version, Capacity: info.Capacity, Status: status, CPU: info.CPU, RAM: info.RAM, LastHeartbeat: info.LastHeartbeat, CreatedAt: info.ConnectedAt, UpdatedAt: info.LastHeartbeat})
|
||||
}
|
||||
return all
|
||||
}
|
||||
63
internal/transport/grpc/agent_runtime_server.go
Normal file
63
internal/transport/grpc/agent_runtime_server.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
grpcpkg "google.golang.org/grpc"
|
||||
proto "stream.api/internal/api/proto/agent/v1"
|
||||
"stream.api/internal/dto"
|
||||
"stream.api/internal/service"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
proto.UnimplementedWoodpeckerServer
|
||||
proto.UnimplementedWoodpeckerAuthServer
|
||||
jobService *service.JobService
|
||||
agentManager *AgentManager
|
||||
agentSecret string
|
||||
sessions sync.Map
|
||||
agentJobs sync.Map
|
||||
onAgentEvent func(string, *dto.AgentWithStats)
|
||||
}
|
||||
|
||||
func NewServer(jobService *service.JobService, agentSecret string) *Server {
|
||||
return &Server{jobService: jobService, agentManager: NewAgentManager(), agentSecret: agentSecret}
|
||||
}
|
||||
|
||||
func (s *Server) SetAgentEventHandler(handler func(string, *dto.AgentWithStats)) {
|
||||
s.onAgentEvent = handler
|
||||
}
|
||||
|
||||
func (s *Server) Register(grpcServer grpcpkg.ServiceRegistrar) {
|
||||
proto.RegisterWoodpeckerServer(grpcServer, s)
|
||||
proto.RegisterWoodpeckerAuthServer(grpcServer, s)
|
||||
}
|
||||
|
||||
func (s *Server) SendCommand(agentID string, cmd string) bool {
|
||||
return s.agentManager.SendCommand(agentID, cmd)
|
||||
}
|
||||
|
||||
func (s *Server) ListAgents() []*dto.Agent { return s.agentManager.ListAll() }
|
||||
|
||||
func (s *Server) ListAgentsWithStats() []*dto.AgentWithStats {
|
||||
agents := s.agentManager.ListAll()
|
||||
result := make([]*dto.AgentWithStats, 0, len(agents))
|
||||
for _, agent := range agents {
|
||||
result = append(result, &dto.AgentWithStats{Agent: agent, ActiveJobCount: int64(len(s.getAgentJobs(agent.ID)))})
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (s *Server) getAgentWithStats(agentID string) *dto.AgentWithStats {
|
||||
for _, agent := range s.ListAgentsWithStats() {
|
||||
if agent != nil && agent.Agent != nil && agent.Agent.ID == agentID {
|
||||
return agent
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) Version(context.Context, *proto.Empty) (*proto.VersionResponse, error) {
|
||||
return &proto.VersionResponse{GrpcVersion: 15, ServerVersion: "stream.api"}, nil
|
||||
}
|
||||
43
internal/transport/grpc/assignments.go
Normal file
43
internal/transport/grpc/assignments.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package grpc
|
||||
|
||||
import "sync"
|
||||
|
||||
func (s *Server) trackJobAssignment(agentID, jobID string) {
|
||||
jobSetInterface, _ := s.agentJobs.LoadOrStore(agentID, &sync.Map{})
|
||||
if jobSet, ok := jobSetInterface.(*sync.Map); ok {
|
||||
jobSet.Store(jobID, true)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) untrackJobAssignment(agentID, jobID string) {
|
||||
if jobSetInterface, ok := s.agentJobs.Load(agentID); ok {
|
||||
if jobSet, ok := jobSetInterface.(*sync.Map); ok {
|
||||
jobSet.Delete(jobID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) isJobAssigned(agentID, jobID string) bool {
|
||||
if jobSetInterface, ok := s.agentJobs.Load(agentID); ok {
|
||||
if jobSet, ok := jobSetInterface.(*sync.Map); ok {
|
||||
_, found := jobSet.Load(jobID)
|
||||
return found
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *Server) getAgentJobs(agentID string) []string {
|
||||
jobs := []string{}
|
||||
if jobSetInterface, ok := s.agentJobs.Load(agentID); ok {
|
||||
if jobSet, ok := jobSetInterface.(*sync.Map); ok {
|
||||
jobSet.Range(func(key, _ any) bool {
|
||||
if jobID, ok := key.(string); ok {
|
||||
jobs = append(jobs, jobID)
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
||||
return jobs
|
||||
}
|
||||
56
internal/transport/grpc/auth.go
Normal file
56
internal/transport/grpc/auth.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
proto "stream.api/internal/api/proto/agent/v1"
|
||||
)
|
||||
|
||||
func generateToken() string {
|
||||
b := make([]byte, 16)
|
||||
_, _ = rand.Read(b)
|
||||
return hex.EncodeToString(b)
|
||||
}
|
||||
|
||||
func generateAgentID() string {
|
||||
return strconv.FormatInt(time.Now().UnixNano(), 10)
|
||||
}
|
||||
|
||||
func (s *Server) getAgentIDFromContext(ctx context.Context) (string, string, bool) {
|
||||
md, ok := metadata.FromIncomingContext(ctx)
|
||||
if !ok {
|
||||
return "", "", false
|
||||
}
|
||||
tokens := md.Get("token")
|
||||
if len(tokens) == 0 {
|
||||
return "", "", false
|
||||
}
|
||||
token := tokens[0]
|
||||
if id, ok := s.sessions.Load(token); ok {
|
||||
return id.(string), token, true
|
||||
}
|
||||
return "", "", false
|
||||
}
|
||||
|
||||
func (s *Server) Auth(ctx context.Context, req *proto.AuthRequest) (*proto.AuthResponse, error) {
|
||||
if s.agentSecret != "" && req.AgentToken != s.agentSecret {
|
||||
return nil, status.Error(codes.Unauthenticated, "invalid agent secret")
|
||||
}
|
||||
agentID := req.AgentId
|
||||
if len(agentID) > 6 && agentID[:6] == "agent-" {
|
||||
agentID = agentID[6:]
|
||||
}
|
||||
if agentID == "" {
|
||||
agentID = generateAgentID()
|
||||
}
|
||||
accessToken := generateToken()
|
||||
s.sessions.Store(accessToken, agentID)
|
||||
return &proto.AuthResponse{Status: "ok", AgentId: agentID, AccessToken: accessToken}, nil
|
||||
}
|
||||
@@ -8,44 +8,39 @@ import (
|
||||
"gorm.io/gorm"
|
||||
redisadapter "stream.api/internal/adapters/redis"
|
||||
"stream.api/internal/config"
|
||||
"stream.api/internal/dto"
|
||||
"stream.api/internal/service"
|
||||
"stream.api/internal/video"
|
||||
runtime "stream.api/internal/video/runtime"
|
||||
runtimegrpc "stream.api/internal/video/runtime/grpc"
|
||||
"stream.api/internal/video/runtime/services"
|
||||
"stream.api/internal/transport/mqtt"
|
||||
"stream.api/pkg/logger"
|
||||
)
|
||||
|
||||
type GRPCModule struct {
|
||||
jobService *services.JobService
|
||||
healthService *services.HealthService
|
||||
agentRuntime *runtimegrpc.Server
|
||||
mqttPublisher *runtime.MQTTBootstrap
|
||||
jobService *service.JobService
|
||||
agentRuntime *Server
|
||||
mqttPublisher *mqtt.MQTTBootstrap
|
||||
grpcServer *grpcpkg.Server
|
||||
cfg *config.Config
|
||||
}
|
||||
|
||||
func NewGRPCModule(ctx context.Context, cfg *config.Config, db *gorm.DB, rds *redisadapter.RedisAdapter, appLogger logger.Logger) (*GRPCModule, error) {
|
||||
jobService := services.NewJobService(rds, rds)
|
||||
healthService := services.NewHealthService(db, rds.Client(), cfg.Render.ServiceName)
|
||||
agentRuntime := runtimegrpc.NewServer(jobService, cfg.Render.AgentSecret)
|
||||
videoService := video.NewService(db, jobService)
|
||||
jobService := service.NewJobService(rds, rds)
|
||||
agentRuntime := NewServer(jobService, cfg.Render.AgentSecret)
|
||||
videoService := service.NewService(db, jobService)
|
||||
grpcServer := grpcpkg.NewServer()
|
||||
|
||||
module := &GRPCModule{
|
||||
jobService: jobService,
|
||||
healthService: healthService,
|
||||
agentRuntime: agentRuntime,
|
||||
grpcServer: grpcServer,
|
||||
cfg: cfg,
|
||||
jobService: jobService,
|
||||
agentRuntime: agentRuntime,
|
||||
grpcServer: grpcServer,
|
||||
cfg: cfg,
|
||||
}
|
||||
|
||||
if publisher, err := runtime.NewMQTTBootstrap(jobService, agentRuntime, appLogger); err != nil {
|
||||
if publisher, err := mqtt.NewMQTTBootstrap(jobService, agentRuntime, appLogger); err != nil {
|
||||
appLogger.Error("Failed to initialize MQTT publisher", "error", err)
|
||||
} else {
|
||||
module.mqttPublisher = publisher
|
||||
agentRuntime.SetAgentEventHandler(func(eventType string, agent *services.AgentWithStats) {
|
||||
runtime.PublishAgentMQTTEvent(publisher.Client(), appLogger, eventType, agent)
|
||||
agentRuntime.SetAgentEventHandler(func(eventType string, agent *dto.AgentWithStats) {
|
||||
mqtt.PublishAgentMQTTEvent(publisher.Client(), appLogger, eventType, agent)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -58,8 +53,8 @@ func NewGRPCModule(ctx context.Context, cfg *config.Config, db *gorm.DB, rds *re
|
||||
return module, nil
|
||||
}
|
||||
|
||||
func (m *GRPCModule) JobService() *services.JobService { return m.jobService }
|
||||
func (m *GRPCModule) AgentRuntime() *runtimegrpc.Server { return m.agentRuntime }
|
||||
func (m *GRPCModule) JobService() *service.JobService { return m.jobService }
|
||||
func (m *GRPCModule) AgentRuntime() *Server { return m.agentRuntime }
|
||||
func (m *GRPCModule) GRPCServer() *grpcpkg.Server { return m.grpcServer }
|
||||
func (m *GRPCModule) GRPCAddress() string { return ":" + m.cfg.Server.GRPCPort }
|
||||
func (m *GRPCModule) ServeGRPC(listener net.Listener) error { return m.grpcServer.Serve(listener) }
|
||||
|
||||
170
internal/transport/grpc/stream_handlers.go
Normal file
170
internal/transport/grpc/stream_handlers.go
Normal file
@@ -0,0 +1,170 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
grpcpkg "google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
proto "stream.api/internal/api/proto/agent/v1"
|
||||
"stream.api/internal/dto"
|
||||
)
|
||||
|
||||
func (s *Server) Next(context.Context, *proto.NextRequest) (*proto.NextResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "use StreamJobs")
|
||||
}
|
||||
|
||||
func (s *Server) StreamJobs(_ *proto.StreamOptions, stream grpcpkg.ServerStreamingServer[proto.Workflow]) error {
|
||||
ctx := stream.Context()
|
||||
agentID, _, ok := s.getAgentIDFromContext(ctx)
|
||||
if !ok {
|
||||
return status.Error(codes.Unauthenticated, "invalid or missing token")
|
||||
}
|
||||
s.agentManager.UpdateHeartbeat(agentID)
|
||||
cancelCh, _ := s.jobService.SubscribeCancel(ctx, agentID)
|
||||
commandCh, _ := s.agentManager.GetCommandChannel(agentID)
|
||||
ticker := time.NewTicker(2 * time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case cmd := <-commandCh:
|
||||
payload, _ := json.Marshal(map[string]any{"image": "alpine", "commands": []string{"echo 'System Command'"}, "environment": map[string]string{}, "action": cmd})
|
||||
if err := stream.Send(&proto.Workflow{Id: fmt.Sprintf("cmd-%s-%d", agentID, time.Now().UnixNano()), Timeout: 300, Payload: payload}); err != nil {
|
||||
return err
|
||||
}
|
||||
case jobID := <-cancelCh:
|
||||
if s.isJobAssigned(agentID, jobID) {
|
||||
if err := stream.Send(&proto.Workflow{Id: jobID, Cancel: true}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
s.agentManager.UpdateHeartbeat(agentID)
|
||||
jobCtx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
job, err := s.jobService.GetNextJob(jobCtx)
|
||||
cancel()
|
||||
if err != nil || job == nil {
|
||||
continue
|
||||
}
|
||||
s.trackJobAssignment(agentID, job.ID)
|
||||
if err := s.jobService.AssignJob(ctx, job.ID, agentID); err != nil {
|
||||
s.untrackJobAssignment(agentID, job.ID)
|
||||
continue
|
||||
}
|
||||
var config map[string]any
|
||||
if err := json.Unmarshal([]byte(*job.Config), &config); err != nil {
|
||||
_ = s.jobService.UpdateJobStatus(ctx, job.ID, dto.JobStatusFailure)
|
||||
s.untrackJobAssignment(agentID, job.ID)
|
||||
continue
|
||||
}
|
||||
image, _ := config["image"].(string)
|
||||
if image == "" {
|
||||
image = "alpine"
|
||||
}
|
||||
commands := []string{"echo 'No commands specified'"}
|
||||
if raw, ok := config["commands"].([]any); ok && len(raw) > 0 {
|
||||
commands = commands[:0]
|
||||
for _, item := range raw {
|
||||
if text, ok := item.(string); ok {
|
||||
commands = append(commands, text)
|
||||
}
|
||||
}
|
||||
if len(commands) == 0 {
|
||||
commands = []string{"echo 'No commands specified'"}
|
||||
}
|
||||
}
|
||||
payload, _ := json.Marshal(map[string]any{"image": image, "commands": commands, "environment": map[string]string{}})
|
||||
if err := stream.Send(&proto.Workflow{Id: job.ID, Timeout: 60 * 60 * 1000, Payload: payload}); err != nil {
|
||||
_ = s.jobService.UpdateJobStatus(ctx, job.ID, dto.JobStatusPending)
|
||||
s.untrackJobAssignment(agentID, job.ID)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) SubmitStatus(stream grpcpkg.ClientStreamingServer[proto.StatusUpdate, proto.Empty]) error {
|
||||
ctx := stream.Context()
|
||||
agentID, _, ok := s.getAgentIDFromContext(ctx)
|
||||
if !ok {
|
||||
return status.Error(codes.Unauthenticated, "invalid or missing token")
|
||||
}
|
||||
for {
|
||||
update, err := stream.Recv()
|
||||
if err != nil {
|
||||
return stream.SendAndClose(&proto.Empty{})
|
||||
}
|
||||
switch update.Type {
|
||||
case 0, 1:
|
||||
_ = s.jobService.ProcessLog(ctx, update.StepUuid, update.Data)
|
||||
case 4:
|
||||
var progress float64
|
||||
fmt.Sscanf(string(update.Data), "%f", &progress)
|
||||
_ = s.jobService.UpdateJobProgress(ctx, update.StepUuid, progress)
|
||||
case 5:
|
||||
var stats struct {
|
||||
CPU float64 `json:"cpu"`
|
||||
RAM float64 `json:"ram"`
|
||||
}
|
||||
if json.Unmarshal(update.Data, &stats) == nil {
|
||||
s.agentManager.UpdateResources(agentID, stats.CPU, stats.RAM)
|
||||
if s.onAgentEvent != nil {
|
||||
s.onAgentEvent("agent_update", s.getAgentWithStats(agentID))
|
||||
}
|
||||
}
|
||||
_ = s.jobService.PublishSystemResources(ctx, agentID, update.Data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) Init(ctx context.Context, req *proto.InitRequest) (*proto.Empty, error) {
|
||||
if err := s.jobService.UpdateJobStatus(ctx, req.Id, dto.JobStatusRunning); err != nil {
|
||||
return nil, status.Error(codes.Internal, "failed to update job status")
|
||||
}
|
||||
return &proto.Empty{}, nil
|
||||
}
|
||||
|
||||
func (s *Server) Wait(context.Context, *proto.WaitRequest) (*proto.WaitResponse, error) {
|
||||
return &proto.WaitResponse{Canceled: false}, nil
|
||||
}
|
||||
|
||||
func (s *Server) Done(ctx context.Context, req *proto.DoneRequest) (*proto.Empty, error) {
|
||||
agentID, _, ok := s.getAgentIDFromContext(ctx)
|
||||
if !ok {
|
||||
return nil, status.Error(codes.Unauthenticated, "invalid session")
|
||||
}
|
||||
jobStatus := dto.JobStatusSuccess
|
||||
if req.State != nil && req.State.Error != "" {
|
||||
jobStatus = dto.JobStatusFailure
|
||||
}
|
||||
if err := s.jobService.UpdateJobStatus(ctx, req.Id, jobStatus); err != nil {
|
||||
return nil, status.Error(codes.Internal, "failed to update job status")
|
||||
}
|
||||
s.untrackJobAssignment(agentID, req.Id)
|
||||
return &proto.Empty{}, nil
|
||||
}
|
||||
|
||||
func (s *Server) Update(context.Context, *proto.UpdateRequest) (*proto.Empty, error) {
|
||||
return &proto.Empty{}, nil
|
||||
}
|
||||
|
||||
func (s *Server) Log(ctx context.Context, req *proto.LogRequest) (*proto.Empty, error) {
|
||||
if _, _, ok := s.getAgentIDFromContext(ctx); !ok {
|
||||
return nil, status.Error(codes.Unauthenticated, "invalid session")
|
||||
}
|
||||
for _, entry := range req.LogEntries {
|
||||
if entry.StepUuid != "" {
|
||||
_ = s.jobService.ProcessLog(ctx, entry.StepUuid, entry.Data)
|
||||
}
|
||||
}
|
||||
return &proto.Empty{}, nil
|
||||
}
|
||||
|
||||
func (s *Server) Extend(context.Context, *proto.ExtendRequest) (*proto.Empty, error) {
|
||||
return &proto.Empty{}, nil
|
||||
}
|
||||
Reference in New Issue
Block a user