falco: Failure to find a BPF probe on Container Optimized OS

I’m having the issues running Falco on GCP using Kubernetes Engine with Container Optimized OS.

Initially, I assumed the error was related to not having the host /etc mounted on the container or not having the SYSDIG_BPF_PROBE environment variable exported, which I was able to confirm that both are correct on the container.

To spin up this infrastructure I’ve converted the yaml files from the examples to terraform:

resource "kubernetes_service_account" "falco_sa" {
  metadata {
    name = "falco-account"
    labels = {
      app  = "falco"
      role = "security"
    }
  }
}

resource "kubernetes_cluster_role" "falco_cr" {
  metadata {
    name = "falco-cluster-role"
    labels = {
      app  = "falco"
      role = "security"
    }
  }
  rule {
    api_groups = ["extensions", ""]
    resources  = ["nodes", "namespaces", "pods", "replicationcontrollers", "replicasets", "services", "daemonsets", "deployments", "events", "configmaps"]
    verbs      = ["get", "list", "watch"]
  }
  rule {
    non_resource_urls = ["/healthz", "/healthz/*"]
    verbs             = ["get"]
  }
}

resource "kubernetes_cluster_role_binding" "falco_crb" {
  metadata {
    name = "falco-cluster-role-bind"
    labels = {
      app  = "falco"
      role = "security"
    }
  }

  subject {
    kind      = "ServiceAccount"
    name      = kubernetes_service_account.falco_sa.metadata.0.name
    namespace = "default"
  }

  role_ref {
    kind      = "ClusterRole"
    name      = kubernetes_cluster_role.falco_cr.metadata.0.name
    api_group = "rbac.authorization.k8s.io"
  }
}

resource "kubernetes_config_map" "falco_cfgmap" {
  metadata {
    name = "falco-cfgmap"
    labels = {
      app  = "falco"
      role = "security"
    }
  }

  data = {
    "application_rules.yaml" = file("configs/falco/application_rules.yaml")
    "falco_rules.local.yaml" = file("configs/falco/falco_rules.local.yaml")
    "falco_rules.yaml"       = file("configs/falco/falco_rules.yaml")
    "k8s_audit_rules.yaml"   = file("configs/falco/k8s_audit_rules.yaml")
  }
}

resource "kubernetes_daemonset" "falco_ds" {
  metadata {
    name = "falco-daemonset"
    labels = {
      app  = "falco"
      role = "security"
    }
  }

  spec {

    selector {
      match_labels = {
        app  = "falco"
        role = "security"
      }
    }


    template {
      metadata {
        labels = {
          app  = "falco"
          role = "security"
        }
      }

      spec {
        service_account_name = kubernetes_service_account.falco_sa.metadata.0.name
        volume {
          name = "docker-socket"
          host_path {
            path = "/var/run/docker.socket"
          }
        }
        volume {
          name = "containerd-socket"
          host_path {
            path = "/run/containerd/containerd.sock"
          }
        }
        volume {
          name = "dev-fs"
          host_path {
            path = "/dev"
          }
        }
        volume {
          name = "proc-fs"
          host_path {
            path = "/proc"
          }
        }
        volume {
          name = "boot-fs"
          host_path {
            path = "/boot"
          }
        }
        volume {
          name = "lib-modules"
          host_path {
            path = "/lib/modules"
          }
        }
        volume {
          name = "usr-fs"
          host_path {
            path = "/usr"
          }
        }
        volume {
          name = "etc-fs"
          host_path {
            path = "/etc"
          }
        }
        volume {
          name = "falco-config"
          config_map {
            name = kubernetes_config_map.falco_cfgmap.metadata.0.name
          }
        }

        container {
          name  = "falco"
          image = "falcosecurity/falco:latest"
          args  = [
            "/usr/bin/falco",
            "--cri", "/host/run/containerd/containerd.sock",
            "-K", "/var/run/secrets/kubernetes.io/serviceaccount/token",
            "-k", "https://$(KUBERNETES_SERVICE_HOST)",
            "-pk",
          ]
          security_context {
            privileged = true
          }
          env {
            name  = "SYSDIG_BPF_PROBE"
            value = ""
          }
          volume_mount {
            name       = "docker-socket"
            mount_path = "/host/var/run/docker.sock"
          }
          volume_mount {
            name       = "containerd-socket"
            mount_path = "/host/run/containerd/containerd.sock"
          }
          volume_mount {
            name       = "dev-fs"
            mount_path = "/host/dev"
          }
          volume_mount {
            name       = "proc-fs"
            mount_path = "/host/proc"
            read_only = true
          }
          volume_mount {
            name       = "boot-fs"
            mount_path = "/host/boot"
            read_only = true
          }
          volume_mount {
            name       = "lib-modules"
            mount_path = "/host/lib/modules"
            read_only = true
          }
          volume_mount {
            name       = "usr-fs"
            mount_path = "/host/usr"
            read_only = true
          }
          volume_mount {
            name       = "etc-fs"
            mount_path = "/host/etc"
            read_only = true
          }
          volume_mount {
            name       = "falco-config"
            mount_path = "/etc/falco"
          }
        }
      }
    }
  }
}

resource "kubernetes_service" "falco_svc" {
  metadata {
    name = kubernetes_daemonset.falco_ds.metadata.0.name
    labels = {
      app  = "falco"
      role = "security"
    }
  }
  spec {
    type = "ClusterIP"

    port {
      protocol = "TCP"
      port     = 8765
    }

    selector = {
      app  = "falco"
      role = "security"
    }
  }
}

The output of describe daemonset looks correct:

Name:           falco-daemonset
Selector:       app=falco,role=security
Node-Selector:  <none>
Labels:         app=falco
                role=security
Annotations:    deprecated.daemonset.template.generation: 4
Desired Number of Nodes Scheduled: 3
Current Number of Nodes Scheduled: 3
Number of Nodes Scheduled with Up-to-date Pods: 3
Number of Nodes Scheduled with Available Pods: 0
Number of Nodes Misscheduled: 0
Pods Status:  3 Running / 0 Waiting / 0 Succeeded / 0 Failed
Pod Template:
  Labels:           app=falco
                    role=security
  Service Account:  falco-account
  Containers:
   falco:
    Image:      falcosecurity/falco:latest
    Port:       <none>
    Host Port:  <none>
    Args:
      /usr/bin/falco
      --cri
      /host/run/containerd/containerd.sock
      -K
      /var/run/secrets/kubernetes.io/serviceaccount/token
      -k
      https://$(KUBERNETES_SERVICE_HOST)
      -pk
     Environment:
      SYSDIG_BPF_PROBE:  
    Mounts:
      /etc/falco from falco-config (rw)
      /host/boot from boot-fs (ro)
      /host/dev from dev-fs (rw)
      /host/etc from etc-fs (ro)
      /host/lib/modules from lib-modules (ro)
      /host/proc from proc-fs (ro)
      /host/run/containerd/containerd.sock from containerd-socket (rw)
      /host/usr from usr-fs (ro)
      /host/var/run/docker.sock from docker-socket (rw)
  Volumes:
   docker-socket:
    Type:          HostPath (bare host directory volume)
    Path:          /var/run/docker.socket
    HostPathType:  
   containerd-socket:
    Type:          HostPath (bare host directory volume)
    Path:          /run/containerd/containerd.sock
    HostPathType:  
   dev-fs:
    Type:          HostPath (bare host directory volume)
    Path:          /dev
    HostPathType:  
   proc-fs:
    Type:          HostPath (bare host directory volume)
    Path:          /proc
    HostPathType:  
   boot-fs:
    Type:          HostPath (bare host directory volume)
    Path:          /boot
    HostPathType:  
   lib-modules:
    Type:          HostPath (bare host directory volume)
    Path:          /lib/modules
    HostPathType:  
   usr-fs:
    Type:          HostPath (bare host directory volume)
    Path:          /usr
    HostPathType:  
   etc-fs:
    Type:          HostPath (bare host directory volume)
    Path:          /etc
    HostPathType:  
   falco-config:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      falco-cfgmap
    Optional:  false
Events:
  Type    Reason            Age    From                  Message
  ----    ------            ----   ----                  -------
  Normal  SuccessfulCreate  45m    daemonset-controller  Created pod: falco-daemonset-vvfm4
  Normal  SuccessfulCreate  45m    daemonset-controller  Created pod: falco-daemonset-r6xx2
  Normal  SuccessfulCreate  45m    daemonset-controller  Created pod: falco-daemonset-rk64p
  Normal  SuccessfulDelete  34m    daemonset-controller  Deleted pod: falco-daemonset-vvfm4
  Normal  SuccessfulDelete  34m    daemonset-controller  Deleted pod: falco-daemonset-r6xx2
  Normal  SuccessfulDelete  34m    daemonset-controller  Deleted pod: falco-daemonset-rk64p
  Normal  SuccessfulCreate  34m    daemonset-controller  Created pod: falco-daemonset-ghpbt
  Normal  SuccessfulCreate  34m    daemonset-controller  Created pod: falco-daemonset-6n7fr
  Normal  SuccessfulCreate  34m    daemonset-controller  Created pod: falco-daemonset-dz42d
  Normal  SuccessfulDelete  19m    daemonset-controller  Deleted pod: falco-daemonset-dz42d
  Normal  SuccessfulDelete  19m    daemonset-controller  Deleted pod: falco-daemonset-ghpbt
  Normal  SuccessfulDelete  19m    daemonset-controller  Deleted pod: falco-daemonset-6n7fr
  Normal  SuccessfulCreate  19m    daemonset-controller  Created pod: falco-daemonset-7l9wr
  Normal  SuccessfulCreate  19m    daemonset-controller  Created pod: falco-daemonset-4p9xg
  Normal  SuccessfulCreate  19m    daemonset-controller  Created pod: falco-daemonset-hn24r
  Normal  SuccessfulDelete  8m33s  daemonset-controller  Deleted pod: falco-daemonset-hn24r
  Normal  SuccessfulDelete  8m33s  daemonset-controller  Deleted pod: falco-daemonset-4p9xg
  Normal  SuccessfulDelete  8m33s  daemonset-controller  Deleted pod: falco-daemonset-7l9wr
  Normal  SuccessfulCreate  8m23s  daemonset-controller  Created pod: falco-daemonset-t7xgr
  Normal  SuccessfulCreate  8m23s  daemonset-controller  Created pod: falco-daemonset-ptpvj
  Normal  SuccessfulCreate  8m23s  daemonset-controller  Created pod: falco-daemonset-dkctn

And my logs:

* Setting up /usr/src links from host
* Mounting debugfs
Found kernel config at /proc/config.gz
* COS detected (build 11647.163.0), downloading and setting up kernel headers
* Downloading https://storage.googleapis.com/cos-tools/11647.163.0/kernel-src.tar.gz
* Extracting kernel sources
* Configuring kernel
scripts/sign-file.c:25:30: fatal error: openssl/opensslv.h: No such file or directory
compilation terminated.
make[1]: *** [scripts/Makefile.host:102: scripts/sign-file] Error 1
make: *** [Makefile:572: scripts] Error 2
* Trying to compile BPF probe falco-probe-bpf (falco-probe-bpf-0.15.0-x86_64-4.14.94+-d7aaf2e0f41bfe30d6d84fe8e754c0d9.o)
In file included from /usr/src/falco-0.15.0/bpf/probe.c:23:
/usr/src/falco-0.15.0/bpf/fillers.h:2017:26: error: no member named 'loginuid' in 'struct task_struct'
                loginuid = _READ(task->loginuid);
                                 ~~~~  ^
/usr/src/falco-0.15.0/bpf/plumbing_helpers.h:18:28: note: expanded from macro '_READ'
#define _READ(P) ({ typeof(P) _val;                             \
                           ^
In file included from /usr/src/falco-0.15.0/bpf/probe.c:23:
/usr/src/falco-0.15.0/bpf/fillers.h:2017:26: error: no member named 'loginuid' in 'struct task_struct'
                loginuid = _READ(task->loginuid);
                                 ~~~~  ^
/usr/src/falco-0.15.0/bpf/plumbing_helpers.h:20:44: note: expanded from macro '_READ'
                    bpf_probe_read(&_val, sizeof(_val), &P);    \
                                                         ^
In file included from /usr/src/falco-0.15.0/bpf/probe.c:23:
/usr/src/falco-0.15.0/bpf/fillers.h:2017:12: error: assigning to 'kuid_t' from incompatible type 'void'
                loginuid = _READ(task->loginuid);
                         ^ ~~~~~~~~~~~~~~~~~~~~~
3 errors generated.
make[2]: *** [/usr/src/falco-0.15.0/bpf/Makefile:33: /usr/src/falco-0.15.0/bpf/probe.o] Error 1
make[1]: *** [Makefile:1541: _module_/usr/src/falco-0.15.0/bpf] Error 2
make: *** [Makefile:18: all] Error 2
mv: cannot stat '/usr/src/falco-0.15.0/bpf/probe.o': No such file or directory
* Trying to download precompiled BPF probe from https://s3.amazonaws.com/download.draios.com/stable/sysdig-probe-binaries/falco-probe-bpf-0.15.0-x86_64-4.14.94%2B-d7aaf2e0f41bfe30d6d84fe8e754c0d9.o
curl: (22) The requested URL returned error: 404 Not Found
* Failure to find a BPF probe

About this issue

  • Original URL
  • State: closed
  • Created 5 years ago
  • Reactions: 5
  • Comments: 20 (11 by maintainers)

Most upvoted comments

@caquino If you run this on your GKE cluster then let me know that it solves your problem. I believe it will (it solves it on my end), but unfortunately COS is a bit of a moving target so it will be helpful to hear it fixes your stuff as well 😃

@mfdii I can confirm that with the KBUILD_EXTRA_CPPFLAGS the build works without issues.

It also required some changes to my terraform to work, to automount the service account secret.

I’m sharing the final configuration here in case anyone else is interested.

resource "kubernetes_service_account" "falco_sa" {
  metadata {
    name = "falco-account"
    labels = {
      app  = "falco"
      role = "security"
    }
  }
  automount_service_account_token = true
}

resource "kubernetes_cluster_role" "falco_cr" {
  metadata {
    name = "falco-cluster-role"
    labels = {
      app  = "falco"
      role = "security"
    }
  }
  rule {
    api_groups = ["extensions", ""]
    resources  = ["nodes", "namespaces", "pods", "replicationcontrollers", "replicasets", "services", "daemonsets", "deployments", "events", "configmaps"]
    verbs      = ["get", "list", "watch"]
  }
  rule {
    non_resource_urls = ["/healthz", "/healthz/*"]
    verbs             = ["get"]
  }
}

resource "kubernetes_cluster_role_binding" "falco_crb" {
  metadata {
    name = "falco-cluster-role-bind"
    labels = {
      app  = "falco"
      role = "security"
    }
  }

  subject {
    kind      = "ServiceAccount"
    name      = kubernetes_service_account.falco_sa.metadata.0.name
    namespace = "default"
  }

  role_ref {
    kind      = "ClusterRole"
    name      = kubernetes_cluster_role.falco_cr.metadata.0.name
    api_group = "rbac.authorization.k8s.io"
  }
}

resource "kubernetes_config_map" "falco_cfgmap" {
  metadata {
    name = "falco-cfgmap"
    labels = {
      app  = "falco"
      role = "security"
    }
  }

  data = {
    "application_rules.yaml" = file("configs/falco/application_rules.yaml")
    "falco_rules.local.yaml" = file("configs/falco/falco_rules.local.yaml")
    "falco_rules.yaml"       = file("configs/falco/falco_rules.yaml")
    "k8s_audit_rules.yaml"   = file("configs/falco/k8s_audit_rules.yaml")
    "falco.yaml"             = file("configs/falco/falco.yaml")
  }
}

resource "kubernetes_daemonset" "falco_ds" {
  metadata {
    name = "falco-daemonset"
    labels = {
      app  = "falco"
      role = "security"
    }
  }

  spec {

    selector {
      match_labels = {
        app  = "falco"
        role = "security"
      }
    }

    template {
      metadata {
        labels = {
          app  = "falco"
          role = "security"
        }
      }

      spec {
        host_network         = true
        service_account_name = kubernetes_service_account.falco_sa.metadata.0.name
        dns_policy           = "ClusterFirstWithHostNet"

        volume {
          name = "docker-socket"
          host_path {
            path = "/var/run/docker.socket"
          }
        }
        volume {
          name = "containerd-socket"
          host_path {
            path = "/run/containerd/containerd.sock"
          }
        }
        volume {
          name = "dev-fs"
          host_path {
            path = "/dev"
          }
        }
        volume {
          name = "proc-fs"
          host_path {
            path = "/proc"
          }
        }
        volume {
          name = "boot-fs"
          host_path {
            path = "/boot"
          }
        }
        volume {
          name = "lib-modules"
          host_path {
            path = "/lib/modules"
          }
        }
        volume {
          name = "usr-fs"
          host_path {
            path = "/usr"
          }
        }
        volume {
          name = "etc-fs"
          host_path {
            path = "/etc"
          }
        }
        volume {
          name = "dshm"
          empty_dir {
            medium = "Memory"
          }
        }
        volume {
          name = "falco-config"
          config_map {
            name = kubernetes_config_map.falco_cfgmap.metadata.0.name
          }
        }

        container {
          name  = "falco"
          image = "falcosecurity/falco:latest"
          args = [
            "/usr/bin/falco",
            "--cri", "/host/run/containerd/containerd.sock",
            "-K", "/var/run/secrets/kubernetes.io/serviceaccount/token",
            "-k", "https://$(KUBERNETES_SERVICE_HOST)",
            "-pk",
          ]
          security_context {
            privileged = true
          }
          env {
            name  = "SYSDIG_BPF_PROBE"
            value = ""
          }
          env {
            name  = "KBUILD_EXTRA_CPPFLAGS"
            value = "-DCOS_73_WORKAROUND"
          }
          volume_mount {
            name       = "docker-socket"
            mount_path = "/host/var/run/docker.sock"
          }
          volume_mount {
            name       = "containerd-socket"
            mount_path = "/host/run/containerd/containerd.sock"
          }
          volume_mount {
            name       = "dev-fs"
            mount_path = "/host/dev"
          }
          volume_mount {
            name       = "proc-fs"
            mount_path = "/host/proc"
            read_only  = true
          }
          volume_mount {
            name       = "boot-fs"
            mount_path = "/host/boot"
            read_only  = true
          }
          volume_mount {
            name       = "lib-modules"
            mount_path = "/host/lib/modules"
            read_only  = true
          }
          volume_mount {
            name       = "usr-fs"
            mount_path = "/host/usr"
            read_only  = true
          }
          volume_mount {
            name       = "etc-fs"
            mount_path = "/host/etc"
            read_only  = true
          }
          volume_mount {
            name       = "dshm"
            mount_path = "/dev/shm"
          }
          volume_mount {
            name       = "falco-config"
            mount_path = "/etc/falco"
          }
        }
      }
    }
  }
}

resource "kubernetes_service" "falco_svc" {
  metadata {
    name = kubernetes_daemonset.falco_ds.metadata.0.name
    labels = {
      app  = "falco"
      role = "security"
    }
  }
  spec {
    type = "ClusterIP"

    port {
      protocol = "TCP"
      port     = 8765
    }

    selector = {
      app  = "falco"
      role = "security"
    }
  }
}

@mfdii On using kernel modules, it’s shutting down the instance due to kernel panic error. Showing module signature verification failed, probe not found, kernel panic, system reboot.

Hi folks. I was unable to get this to work on my GKE cluster until I switched the image tag from 0.15.3 to dev. Prior to that, I was still seeing the compilation issue re: loginuid. I had been (and still am) applying COS_73_WORKAROUND, so I’m not sure what changed between 0.15.3 and dev, but whatever it was, it made falco work for me!

We are cutting a new release to incorporate the fix from sysdig. Hopefully it will be out today.