Skip to content

Commit

Permalink
Merge pull request #36 from kubescape/refactor-logs
Browse files Browse the repository at this point in the history
Refactor logs
  • Loading branch information
rcohencyberarmor authored Mar 23, 2023
2 parents 11615c8 + 80dd70a commit 3bff5a0
Show file tree
Hide file tree
Showing 8 changed files with 53 additions and 61 deletions.
2 changes: 1 addition & 1 deletion internal/validator/validator.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ func checkKernelVersion() error {
return fmt.Errorf("checkKernelVersion: fail to detect the kernel version")
}
kernelVersion := int8ToStr(uname.Sysname[:]) + "," + int8ToStr(uname.Release[:]) + "," + int8ToStr(uname.Version[:])
logger.L().Debug("", helpers.String("kernelVersion: ", kernelVersion))
logger.L().Debug("kernelVersion", helpers.String("is", kernelVersion))

if int8ToStr(uname.Release[:]) < minKernelVersion {
return fmt.Errorf("checkKernelVersion: the current kernel version %s is less than the min kernel version support %s", int8ToStr(uname.Release[:]), minKernelVersion)
Expand Down
13 changes: 6 additions & 7 deletions main.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
package main

import (
"fmt"
"sniffer/internal/validator"
"sniffer/pkg/config"
v1 "sniffer/pkg/config/v1"
Expand All @@ -18,15 +17,15 @@ func main() {
cfg := config.GetConfigurationConfigContext()
configData, err := cfg.GetConfigurationReader()
if err != nil {
logger.L().Ctx(context.GetBackgroundContext()).Fatal("", helpers.String("error during getting configuration data: ", fmt.Sprintf("%v", err)))
logger.L().Ctx(context.GetBackgroundContext()).Fatal("error during getting configuration data", helpers.Error(err))
}
err = cfg.ParseConfiguration(v1.CreateConfigData(), configData)
if err != nil {
logger.L().Ctx(context.GetBackgroundContext()).Fatal("", helpers.String("error during parsing configuration: ", fmt.Sprintf("%v", err)))
logger.L().Ctx(context.GetBackgroundContext()).Fatal("error during parsing configuration", helpers.Error(err))
}
err = validator.CheckPrerequisites()
if err != nil {
logger.L().Ctx(context.GetBackgroundContext()).Fatal("", helpers.String("error during validation: ", fmt.Sprintf("%v", err)))
logger.L().Ctx(context.GetBackgroundContext()).Fatal("error during validation", helpers.Error(err))
}

context.SetBackgroundContext()
Expand All @@ -35,16 +34,16 @@ func main() {
acc := accumulator.GetAccumulator()
err = acc.StartAccumulator(accumulatorChannelError)
if err != nil {
logger.L().Ctx(context.GetBackgroundContext()).Fatal("", helpers.String("error during start accumulator: ", fmt.Sprintf("%v", err)))
logger.L().Ctx(context.GetBackgroundContext()).Fatal("error during start accumulator", helpers.Error(err))
}

k8sAPIServerClient, err := conthandler.CreateContainerClientK8SAPIServer()
if err != nil {
logger.L().Ctx(context.GetBackgroundContext()).Fatal("", helpers.String("error during create the container client: ", fmt.Sprintf("%v", err)))
logger.L().Ctx(context.GetBackgroundContext()).Fatal("error during create the container client", helpers.Error(err))
}
storageClient, err := storageclient.CreateSBOMStorageK8SAggregatedAPIClient()
if err != nil {
logger.L().Ctx(context.GetBackgroundContext()).Fatal("", helpers.Error(err))
logger.L().Ctx(context.GetBackgroundContext()).Fatal("error during create the storage client", helpers.Error(err))
}
mainHandler, err := conthandler.CreateContainerHandler(k8sAPIServerClient, storageClient)
mainHandler.StartMainHandler()
Expand Down
24 changes: 12 additions & 12 deletions pkg/conthandler/container_main_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ func (ch *ContainerHandler) afterTimerActions() error {
afterTimerActionsData := <-ch.afterTimerActionsChannel
containerDataInterface, exist := ch.watchedContainers.Load(afterTimerActionsData.containerID)
if !exist {
logger.L().Ctx(context.GetBackgroundContext()).Warning("afterTimerActions: failed to get container data of containerID ", []helpers.IDetails{helpers.String("", afterTimerActionsData.containerID)}...)
logger.L().Ctx(context.GetBackgroundContext()).Warning("afterTimerActions: failed to get container data of container ID", []helpers.IDetails{helpers.String("container ID", afterTimerActionsData.containerID)}...)
continue
}
containerData := containerDataInterface.(watchedContainerData)
Expand All @@ -81,22 +81,22 @@ func (ch *ContainerHandler) afterTimerActions() error {
fileList := containerData.containerAggregator.GetContainerRealtimeFileList()

if err = <-containerData.syncChannel[StepGetSBOM]; err != nil {
logger.L().Ctx(context.GetBackgroundContext()).Warning("failed to get SBOM of containerID ", []helpers.IDetails{helpers.String(" ", afterTimerActionsData.containerID), helpers.String("of k8s resource ", containerData.event.GetK8SWorkloadID()), helpers.Error(err)}...)
logger.L().Ctx(context.GetBackgroundContext()).Warning("failed to get SBOM", []helpers.IDetails{helpers.String("container ID", afterTimerActionsData.containerID), helpers.String("container name", containerData.event.GetContainerName()), helpers.String("k8s resource ", containerData.event.GetK8SWorkloadID()), helpers.Error(err)}...)
continue
}
if err = containerData.sbomClient.FilterSBOM(fileList); err != nil {
logger.L().Ctx(context.GetBackgroundContext()).Warning("failed to filter SBOM of containerID ", []helpers.IDetails{helpers.String(" ", afterTimerActionsData.containerID), helpers.String("of k8s resource ", containerData.event.GetK8SWorkloadID()), helpers.Error(err)}...)
logger.L().Ctx(context.GetBackgroundContext()).Warning("failed to filter SBOM", []helpers.IDetails{helpers.String("container ID", afterTimerActionsData.containerID), helpers.String("container name", containerData.event.GetContainerName()), helpers.String("k8s resource", containerData.event.GetK8SWorkloadID()), helpers.Error(err)}...)
continue
}
if err = containerData.sbomClient.StoreFilterSBOM(containerData.event.GetInstanceIDHash()); err != nil {
if errors.Is(err, sbom.IsAlreadyExist()) {
logger.L().Info("SBOM of containerID ", []helpers.IDetails{helpers.String(" ", afterTimerActionsData.containerID), helpers.String("of k8s resource already reported ", containerData.event.GetK8SWorkloadID())}...)
logger.L().Info("SBOM already reported", []helpers.IDetails{helpers.String("container ID", afterTimerActionsData.containerID), helpers.String("container name", containerData.event.GetContainerName()), helpers.String("k8s resource", containerData.event.GetK8SWorkloadID())}...)
} else {
logger.L().Ctx(context.GetBackgroundContext()).Warning("failed to store filter SBOM of containerID ", []helpers.IDetails{helpers.String(" ", afterTimerActionsData.containerID), helpers.String("of k8s resource ", containerData.event.GetK8SWorkloadID()), helpers.Error(err)}...)
logger.L().Ctx(context.GetBackgroundContext()).Warning("failed to store filter SBOM", []helpers.IDetails{helpers.String("container ID", afterTimerActionsData.containerID), helpers.String("k8s resource", containerData.event.GetK8SWorkloadID()), helpers.Error(err)}...)
}
continue
}
logger.L().Info("filtered SBOM of containerID ", []helpers.IDetails{helpers.String(" ", afterTimerActionsData.containerID), helpers.String("of k8s resource has stored successfully in the storage", containerData.event.GetK8SWorkloadID())}...)
logger.L().Info("filtered SBOM has stored successfully in the storage", []helpers.IDetails{helpers.String("containerID", afterTimerActionsData.containerID), helpers.String("k8s resource", containerData.event.GetK8SWorkloadID())}...)
}
}
}
Expand Down Expand Up @@ -128,7 +128,7 @@ func createTicker() *time.Ticker {
func (ch *ContainerHandler) startRelevancyProcess(contEvent v1.ContainerEventData) {
containerDataInterface, exist := ch.watchedContainers.Load(contEvent.GetContainerID())
if !exist {
logger.L().Ctx(context.GetBackgroundContext()).Error("startRelevancyProcess: failed to get container data of ", helpers.String("containerID: ", contEvent.GetContainerID()))
logger.L().Ctx(context.GetBackgroundContext()).Error("startRelevancyProcess: failed to get container data", helpers.String("container ID", contEvent.GetContainerID()), helpers.String("container name", contEvent.GetContainerName()), helpers.String("k8s resources", contEvent.GetK8SWorkloadID()))
return
}
watchedContainer := containerDataInterface.(watchedContainerData)
Expand All @@ -145,10 +145,10 @@ func (ch *ContainerHandler) startRelevancyProcess(contEvent v1.ContainerEventDat
go ch.getSBOM(contEvent)
err = ch.startTimer(watchedContainer, contEvent.GetContainerID())
if err != nil {
logger.L().Ctx(context.GetBackgroundContext()).Warning("", helpers.Error(err))
logger.L().Ctx(context.GetBackgroundContext()).Warning("timer of containerID stop before expected", helpers.String("container ID", contEvent.GetContainerID()), helpers.String("container name", contEvent.GetContainerName()), helpers.String("k8s resources", contEvent.GetK8SWorkloadID()), helpers.Error(err))
err = watchedContainer.containerAggregator.StopAggregate()
if err != nil {
logger.L().Ctx(context.GetBackgroundContext()).Warning("we have failed to stop to aggregate data for container ID: ", helpers.String("", contEvent.GetContainerID()))
logger.L().Ctx(context.GetBackgroundContext()).Warning("we have failed to stop to aggregate data", helpers.String("container ID", contEvent.GetContainerID()), helpers.String("container name", contEvent.GetContainerName()), helpers.String("k8s resources", contEvent.GetK8SWorkloadID()))
}
ch.watchedContainers.Delete(contEvent.GetContainerID())
break
Expand All @@ -164,7 +164,7 @@ func getShortContainerID(containerID string) string {
func (ch *ContainerHandler) getSBOM(contEvent v1.ContainerEventData) {
containerDataInterface, exist := ch.watchedContainers.Load(contEvent.GetContainerID())
if !exist {
logger.L().Ctx(context.GetBackgroundContext()).Error("getSBOM: failed to get container data of ", helpers.String("containerID: ", contEvent.GetContainerID()))
logger.L().Ctx(context.GetBackgroundContext()).Error("getSBOM: failed to get container data of ContainerID, not exist in memory", helpers.String("containerID", contEvent.GetContainerID()))
return
}
watchedContainer := containerDataInterface.(watchedContainerData)
Expand Down Expand Up @@ -205,10 +205,10 @@ func (ch *ContainerHandler) StartMainHandler() error {

for {
contEvent := <-ch.containersEventChan
logger.L().Info("", []helpers.IDetails{helpers.String("new container ", contEvent.GetContainerID()), helpers.String("has loaded in microservice ", contEvent.GetK8SWorkloadID())}...)
logger.L().Info("", []helpers.IDetails{helpers.String("new container has loaded", contEvent.GetContainerID()), helpers.String("ContainerID", contEvent.GetContainerID()), helpers.String("Container name", contEvent.GetContainerID()), helpers.String("k8s workload", contEvent.GetK8SWorkloadID())}...)
err := ch.handleNewContainerEvent(contEvent)
if err != nil {
logger.L().Ctx(context.GetBackgroundContext()).Warning("fail to handle new container" + contEvent.GetK8SWorkloadID() + "was loaded, start monitor on it's container " + contEvent.GetContainerID())
logger.L().Ctx(context.GetBackgroundContext()).Warning("fail to handle new container", helpers.String("ContainerID", contEvent.GetContainerID()), helpers.String("Container name", contEvent.GetContainerID()), helpers.String("k8s workload", contEvent.GetK8SWorkloadID()), helpers.Error(err))
}
}
}
8 changes: 4 additions & 4 deletions pkg/conthandler/container_watcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,11 +112,11 @@ func (containerWatcher *ContainerWatcher) parsePodData(pod *core.Pod, containerI
}

func (containerWatcher *ContainerWatcher) StartWatchedOnContainers(containerEventChannel chan conthandlerV1.ContainerEventData) error {
logger.L().Info("", helpers.String("Ready to watch over node", containerWatcher.nodeName))
logger.L().Info("Ready to watch over node", helpers.String("node name", containerWatcher.nodeName))
for {
watcher, err := containerWatcher.ContainerClient.GetWatcher()
if err != nil {
logger.L().Ctx(context.GetBackgroundContext()).Error("GetWatcher err: ", helpers.Error(err))
logger.L().Ctx(context.GetBackgroundContext()).Error("GetWatcher err", helpers.Error(err))
continue
}
for {
Expand All @@ -139,13 +139,13 @@ func (containerWatcher *ContainerWatcher) StartWatchedOnContainers(containerEven
case watch.Modified:
for i := range pod.Status.ContainerStatuses {
if pod.Status.ContainerStatuses[i].Started != nil && *pod.Status.ContainerStatuses[i].Started {
logger.L().Info("container started: ", helpers.String("container name: ", pod.Status.ContainerStatuses[i].ContainerID))
logger.L().Info("container has started", helpers.String("namespace", pod.GetNamespace()), helpers.String("Pod name", pod.GetName()), helpers.String("ContainerID", pod.Status.ContainerStatuses[i].ContainerID), helpers.String("Container name", pod.Status.ContainerStatuses[i].Name))
if pod.GetNamespace() == config.GetConfigurationConfigContext().GetNamespace() && pod.GetName() == config.GetConfigurationConfigContext().GetContainerName() {
continue
}
containerEventData, err := containerWatcher.parsePodData(pod, i)
if err != nil {
logger.L().Ctx(context.GetBackgroundContext()).Error("parsePodData failed with error: ", helpers.Error(err))
logger.L().Ctx(context.GetBackgroundContext()).Error("failed to parse container data", helpers.String("namespace", pod.GetNamespace()), helpers.String("Pod name", pod.GetName()), helpers.String("ContainerID", pod.Status.ContainerStatuses[i].ContainerID), helpers.String("Container name", pod.Status.ContainerStatuses[i].Name), helpers.Error(err))
continue
}
containerEventChannel <- *containerEventData
Expand Down
30 changes: 17 additions & 13 deletions pkg/conthandler/v1/container_handler_data.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,22 +17,22 @@ const (
type ContainerEventType string

type ContainerEventData struct {
imageID string
containerID string
podName string
wlid string
instanceID string
eventType ContainerEventType
imageID string
containerID string
containerName string
wlid string
instanceID string
eventType ContainerEventType
}

func CreateNewContainerEvent(imageID, containerID, podName, wlid, instanceID string, eventType ContainerEventType) *ContainerEventData {
func CreateNewContainerEvent(imageID, containerID, containerName, wlid, instanceID string, eventType ContainerEventType) *ContainerEventData {
return &ContainerEventData{
imageID: imageID,
containerID: containerID,
podName: podName,
wlid: wlid,
instanceID: instanceID,
eventType: eventType,
imageID: imageID,
containerID: containerID,
containerName: containerName,
wlid: wlid,
instanceID: instanceID,
eventType: eventType,
}
}

Expand All @@ -48,6 +48,10 @@ func (event *ContainerEventData) GetK8SWorkloadID() string {
return event.wlid
}

func (event *ContainerEventData) GetContainerName() string {
return event.containerName
}

func (event *ContainerEventData) GetImageHash() (string, error) {
imageIDSplit := strings.Split(event.imageID, "@sha256:")
if len(imageIDSplit) == 2 {
Expand Down
21 changes: 7 additions & 14 deletions pkg/ebpfeng/falco_sniffer_engine.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,14 +84,14 @@ func (FalcoEbpfEngine *FalcoEbpfEngine) StartEbpfEngine() error {

fullEbpfEngineCMD := FalcoEbpfEngine.ebpfEngineCMDWithParams()
cmd := exec.Command(ebpfEngineLoaderPath, fullEbpfEngineCMD...)
logger.L().Debug("", helpers.String("cmd.Args ", fmt.Sprintf("%v", cmd.Args)))
logger.L().Debug("start ebpf engine process", helpers.String("cmd.Args", fmt.Sprintf("%v", cmd.Args)))
stdout, err := cmd.StdoutPipe()
if err != nil {
return err
}
err = cmd.Start()
if err != nil {
logger.L().Debug("", helpers.String("StartEbpfEngine: fail with err ", fmt.Sprintf("%v", err)))
logger.L().Error("failed to start ebpf engine process", helpers.Error(err))
return err
}
FalcoEbpfEngine.cmd = cmd
Expand All @@ -107,42 +107,35 @@ func convertStringTimeToTimeOBJ(timestamp string) (*time.Time, error) {

year, err := strconv.Atoi(date[0])
if err != nil {
logger.L().Ctx(context.GetBackgroundContext()).Error("fail strconv", helpers.Error(err))
return nil, err
}
month, err := strconv.Atoi(date[1])
if err != nil {
logger.L().Ctx(context.GetBackgroundContext()).Error("fail strconv", helpers.Error(err))
return nil, err
}
day, err := strconv.Atoi(date[2])
if err != nil {
logger.L().Ctx(context.GetBackgroundContext()).Error("fail strconv", helpers.Error(err))
return nil, err
}

hour, err := strconv.Atoi(tm[0])
if err != nil {
logger.L().Ctx(context.GetBackgroundContext()).Error("fail strconv", helpers.Error(err))
return nil, err
}
minute, err := strconv.Atoi(tm[1])
if err != nil {
logger.L().Ctx(context.GetBackgroundContext()).Error("fail strconv", helpers.Error(err))
return nil, err
}
seconds := strings.Split(tm[2], "+")
secs := strings.Split(seconds[0], ".")

sec, err := strconv.Atoi(secs[0])
if err != nil {
logger.L().Ctx(context.GetBackgroundContext()).Error("fail strconv", helpers.Error(err))
return nil, err
}

nsec, err := strconv.Atoi(secs[1])
if err != nil {
logger.L().Ctx(context.GetBackgroundContext()).Error("fail strconv", helpers.Error(err))
return nil, err
}

Expand All @@ -157,15 +150,15 @@ func parseLine(line string) (*ebpfev.EventData, error) {
}
lineParts := strings.Split(line, "]::[")
if len(lineParts) != 8 {
logger.L().Ctx(context.GetBackgroundContext()).Error("", helpers.String("we have got unknown line format, line is ", fmt.Sprintf("%s", line)))
logger.L().Ctx(context.GetBackgroundContext()).Error("failed to parse data from ebpf engine", helpers.String("we have got unknown line format, line is", line))
return nil, fmt.Errorf("we have got unknown line format, line is %s", line)
}
Timestamp, err := convertStringTimeToTimeOBJ(lineParts[0])
timestamp, err := convertStringTimeToTimeOBJ(lineParts[0])
if err != nil {
logger.L().Ctx(context.GetBackgroundContext()).Error("", helpers.String("parseLine Timestamp fail line is ", fmt.Sprintf("%s, err %v", line, err)))
logger.L().Ctx(context.GetBackgroundContext()).Warning("fail to parse timestamp from ebpf engine", []helpers.IDetails{helpers.String("timestamp", lineParts[0]), helpers.Error(err)}...)
return nil, fmt.Errorf("parseLine Timestamp fail line is %s, err %v", line, err)
}
return ebpfev.CreateKernelEvent(Timestamp, lineParts[1], lineParts[2], lineParts[3], lineParts[4], lineParts[5], lineParts[6], lineParts[7]), nil
return ebpfev.CreateKernelEvent(timestamp, lineParts[1], lineParts[2], lineParts[3], lineParts[4], lineParts[5], lineParts[6], lineParts[7]), nil
}

func (FalcoEbpfEngine *FalcoEbpfEngine) GetData(ebpfEngineDataChannel chan *ebpfev.EventData) {
Expand All @@ -181,7 +174,7 @@ func (FalcoEbpfEngine *FalcoEbpfEngine) GetData(ebpfEngineDataChannel chan *ebpf
ebpfEngineDataChannel <- data
}
}
logger.L().Info("", helpers.String("CacheAccumulator accumulateEbpfEngineData scanner.Err(): ", fmt.Sprintf("%v", scanner.Err())))
logger.L().Error("failed to get data from ebpf engine process", helpers.Error(scanner.Err()))
break
}
}
Expand Down
Loading

0 comments on commit 3bff5a0

Please sign in to comment.