From 66cb0281c4307d88406af02adfb7ae0d857aaab6 Mon Sep 17 00:00:00 2001 From: Chad Kittel Date: Mon, 23 Aug 2021 16:02:16 -0500 Subject: [PATCH] Update to latest Falco (#36) * Update to latest falco release * Config map is now expanded (no longer a single string) * Adjust limits to prevent DS from being killed (upstream is 1 core, so going part way there) --- cluster-manifests/falco-system/configmap.yaml | 5531 +++++++++++------ cluster-manifests/falco-system/daemonset.yaml | 8 +- .../falco-system/resource-quota.yaml | 2 +- docs/deploy/10-pre-bootstrap.md | 4 +- 4 files changed, 3725 insertions(+), 1820 deletions(-) diff --git a/cluster-manifests/falco-system/configmap.yaml b/cluster-manifests/falco-system/configmap.yaml index 095bee17..605283bc 100644 --- a/cluster-manifests/falco-system/configmap.yaml +++ b/cluster-manifests/falco-system/configmap.yaml @@ -75,6 +75,7 @@ data: syscall_event_drops: actions: - log + - alert rate: 0.03333 max_burst: 10 @@ -147,6 +148,7 @@ data: enabled: false listen_port: 8765 k8s_audit_endpoint: /k8s-audit + k8s_healthz_endpoint: /healthz ssl_enabled: false ssl_certificate: /etc/falco/certs/server.pem @@ -404,1816 +406,3719 @@ data: # tags: [users, container] # Or override/append to any rule, macro, or list from the Default Rules - falco_rules.yaml: "#\n# Copyright (C) 2020 The Falco Authors.\n#\n#\n# Licensed under - the Apache License, Version 2.0 (the \"License\");\n# you may not use this file - except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# - \ http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable - law or agreed to in writing, software\n# distributed under the License is distributed - on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - or implied.\n# See the License for the specific language governing permissions and\n# - limitations under the License.\n#\n\n# See xxx for details on falco engine and rules - versioning. Currently,\n# this specific rules file is compatible with engine version - 0\n# (e.g. falco releases <= 0.13.1), so we'll keep the\n# required_engine_version - lines commented out, so maintain\n# compatibility with older falco releases. With - the first incompatible\n# change to this rules file, we'll uncomment this line and - set it to\n# the falco engine version in use at the time.\n#\n- required_engine_version: - 7\n\n# Currently disabled as read/write are ignored syscalls. The nearly\n# similar - open_write/open_read check for files being opened for\n# reading/writing.\n# - macro: - write\n# condition: (syscall.type=write and fd.type in (file, directory))\n# - - macro: read\n# condition: (syscall.type=read and evt.dir=> and fd.type in (file, - directory))\n\n- macro: open_write\n condition: (evt.type=open or evt.type=openat) - and evt.is_open_write=true and fd.typechar='f' and fd.num>=0\n\n- macro: open_read\n - \ condition: (evt.type=open or evt.type=openat) and evt.is_open_read=true and fd.typechar='f' - and fd.num>=0\n\n- macro: open_directory\n condition: (evt.type=open or evt.type=openat) - and evt.is_open_read=true and fd.typechar='d' and fd.num>=0\n\n- macro: never_true\n - \ condition: (evt.num=0)\n\n- macro: always_true\n condition: (evt.num>=0)\n\n# - In some cases, such as dropped system call events, information about\n# the process - name may be missing. For some rules that really depend\n# on the identity of the - process performing an action such as opening\n# a file, etc., we require that the - process name be known.\n- macro: proc_name_exists\n condition: (proc.name!=\"\")\n\n- - macro: rename\n condition: evt.type in (rename, renameat, renameat2)\n\n- macro: - mkdir\n condition: evt.type in (mkdir, mkdirat)\n\n- macro: remove\n condition: - evt.type in (rmdir, unlink, unlinkat)\n\n- macro: modify\n condition: rename or - remove\n\n- macro: spawned_process\n condition: evt.type = execve and evt.dir=<\n\n- - macro: create_symlink\n condition: evt.type in (symlink, symlinkat) and evt.dir=<\n\n- - macro: chmod\n condition: (evt.type in (chmod, fchmod, fchmodat) and evt.dir=<)\n\n# - File categories\n- macro: bin_dir\n condition: fd.directory in (/bin, /sbin, /usr/bin, - /usr/sbin)\n\n- macro: bin_dir_mkdir\n condition: >\n (evt.arg.path startswith - /bin/ or\n evt.arg.path startswith /sbin/ or\n evt.arg.path startswith /usr/bin/ - or\n evt.arg.path startswith /usr/sbin/)\n\n- macro: bin_dir_rename\n condition: - >\n (evt.arg.path startswith /bin/ or\n evt.arg.path startswith /sbin/ or\n - \ evt.arg.path startswith /usr/bin/ or\n evt.arg.path startswith /usr/sbin/ - or\n evt.arg.name startswith /bin/ or\n evt.arg.name startswith /sbin/ or\n - \ evt.arg.name startswith /usr/bin/ or\n evt.arg.name startswith /usr/sbin/ - or\n evt.arg.oldpath startswith /bin/ or\n evt.arg.oldpath startswith /sbin/ - or\n evt.arg.oldpath startswith /usr/bin/ or\n evt.arg.oldpath startswith - /usr/sbin/ or\n evt.arg.newpath startswith /bin/ or\n evt.arg.newpath startswith - /sbin/ or\n evt.arg.newpath startswith /usr/bin/ or\n evt.arg.newpath startswith - /usr/sbin/)\n\n- macro: etc_dir\n condition: fd.name startswith /etc/\n\n# This - detects writes immediately below / or any write anywhere below /root\n- macro: root_dir\n - \ condition: (fd.directory=/ or fd.name startswith /root/)\n\n- list: shell_binaries\n - \ items: [ash, bash, csh, ksh, sh, tcsh, zsh, dash]\n\n- list: ssh_binaries\n items: - [\n sshd, sftp-server, ssh-agent,\n ssh, scp, sftp,\n ssh-keygen, ssh-keysign, - ssh-keyscan, ssh-add\n ]\n\n- list: shell_mgmt_binaries\n items: [add-shell, - remove-shell]\n\n- macro: shell_procs\n condition: proc.name in (shell_binaries)\n\n- - list: coreutils_binaries\n items: [\n truncate, sha1sum, numfmt, fmt, fold, - uniq, cut, who,\n groups, csplit, sort, expand, printf, printenv, unlink, tee, - chcon, stat,\n basename, split, nice, \"yes\", whoami, sha224sum, hostid, users, - stdbuf,\n base64, unexpand, cksum, od, paste, nproc, pathchk, sha256sum, wc, - test,\n comm, arch, du, factor, sha512sum, md5sum, tr, runcon, env, dirname,\n - \ tsort, join, shuf, install, logname, pinky, nohup, expr, pr, tty, timeout,\n - \ tail, \"[\", seq, sha384sum, nl, head, id, mkfifo, sum, dircolors, ptx, shred,\n - \ tac, link, chroot, vdir, chown, touch, ls, dd, uname, \"true\", pwd, date,\n - \ chgrp, chmod, mktemp, cat, mknod, sync, ln, \"false\", rm, mv, cp, echo,\n readlink, - sleep, stty, mkdir, df, dir, rmdir, touch\n ]\n\n# dpkg -L login | grep bin | - xargs ls -ld | grep -v '^d' | awk '{print $9}' | xargs -L 1 basename | tr \"\\\\n\" - \",\"\n- list: login_binaries\n items: [\n login, systemd, '\"(systemd)\"', - systemd-logind, su,\n nologin, faillog, lastlog, newgrp, sg\n ]\n\n# dpkg - -L passwd | grep bin | xargs ls -ld | grep -v '^d' | awk '{print $9}' | xargs -L - 1 basename | tr \"\\\\n\" \",\"\n- list: passwd_binaries\n items: [\n shadowconfig, - grpck, pwunconv, grpconv, pwck,\n groupmod, vipw, pwconv, useradd, newusers, - cppw, chpasswd, usermod,\n groupadd, groupdel, grpunconv, chgpasswd, userdel, - chage, chsh,\n gpasswd, chfn, expiry, passwd, vigr, cpgr, adduser, addgroup, - deluser, delgroup\n ]\n\n# repoquery -l shadow-utils | grep bin | xargs ls -ld - | grep -v '^d' |\n# awk '{print $9}' | xargs -L 1 basename | tr \"\\\\n\" \",\"\n- - list: shadowutils_binaries\n items: [\n chage, gpasswd, lastlog, newgrp, sg, - adduser, deluser, chpasswd,\n groupadd, groupdel, addgroup, delgroup, groupmems, - groupmod, grpck, grpconv, grpunconv,\n newusers, pwck, pwconv, pwunconv, useradd, - userdel, usermod, vigr, vipw, unix_chkpwd\n ]\n\n- list: sysdigcloud_binaries\n - \ items: [setup-backend, dragent, sdchecks]\n\n- list: docker_binaries\n items: - [docker, dockerd, exe, docker-compose, docker-entrypoi, docker-runc-cur, docker-current, - dockerd-current]\n\n- list: k8s_binaries\n items: [hyperkube, skydns, kube2sky, - exechealthz, weave-net, loopback, bridge, openshift-sdn, openshift]\n\n- list: lxd_binaries\n - \ items: [lxd, lxcfs]\n\n- list: http_server_binaries\n items: [nginx, httpd, httpd-foregroun, - lighttpd, apache, apache2]\n\n- list: db_server_binaries\n items: [mysqld, postgres, - sqlplus]\n\n- list: mysql_mgmt_binaries\n items: [mysql_install_d, mysql_ssl_rsa_s]\n\n- - list: postgres_mgmt_binaries\n items: [pg_dumpall, pg_ctl, pg_lsclusters, pg_ctlcluster]\n\n- - list: db_mgmt_binaries\n items: [mysql_mgmt_binaries, postgres_mgmt_binaries]\n\n- - list: nosql_server_binaries\n items: [couchdb, memcached, redis-server, rabbitmq-server, - mongod]\n\n- list: gitlab_binaries\n items: [gitlab-shell, gitlab-mon, gitlab-runner-b, - git]\n\n- list: interpreted_binaries\n items: [lua, node, perl, perl5, perl6, php, - python, python2, python3, ruby, tcl]\n\n- macro: interpreted_procs\n condition: - >\n (proc.name in (interpreted_binaries))\n\n- macro: server_procs\n condition: - proc.name in (http_server_binaries, db_server_binaries, docker_binaries, sshd)\n\n# - The explicit quotes are needed to avoid the - characters being\n# interpreted by - the filter expression.\n- list: rpm_binaries\n items: [dnf, rpm, rpmkey, yum, '\"75-system-updat\"', - rhsmcertd-worke, subscription-ma,\n repoquery, rpmkeys, rpmq, yum-cron, - yum-config-mana, yum-debug-dump,\n abrt-action-sav, rpmdb_stat, microdnf, - rhn_check, yumdb]\n\n- list: openscap_rpm_binaries\n items: [probe_rpminfo, probe_rpmverify, - probe_rpmverifyfile, probe_rpmverifypackage]\n\n- macro: rpm_procs\n condition: - (proc.name in (rpm_binaries, openscap_rpm_binaries) or proc.name in (salt-minion))\n\n- - list: deb_binaries\n items: [dpkg, dpkg-preconfigu, dpkg-reconfigur, dpkg-divert, - apt, apt-get, aptitude,\n frontend, preinst, add-apt-reposit, apt-auto-remova, - apt-key,\n apt-listchanges, unattended-upgr, apt-add-reposit, apt-config, apt-cache\n - \ ]\n\n# The truncated dpkg-preconfigu is intentional, process names are\n# truncated - at the sysdig level.\n- list: package_mgmt_binaries\n items: [rpm_binaries, deb_binaries, - update-alternat, gem, pip, pip3, sane-utils.post, alternatives, chef-client, apk, - snapd]\n\n- macro: package_mgmt_procs\n condition: proc.name in (package_mgmt_binaries)\n\n- - macro: package_mgmt_ancestor_procs\n condition: proc.pname in (package_mgmt_binaries) - or\n proc.aname[2] in (package_mgmt_binaries) or\n proc.aname[3] - in (package_mgmt_binaries) or\n proc.aname[4] in (package_mgmt_binaries)\n\n- - macro: coreos_write_ssh_dir\n condition: (proc.name=update-ssh-keys and fd.name - startswith /home/core/.ssh)\n\n- macro: run_by_package_mgmt_binaries\n condition: - proc.aname in (package_mgmt_binaries, needrestart)\n\n- list: ssl_mgmt_binaries\n - \ items: [ca-certificates]\n\n- list: dhcp_binaries\n items: [dhclient, dhclient-script, - 11-dhclient]\n\n# A canonical set of processes that run other programs with different\n# - privileges or as a different user.\n- list: userexec_binaries\n items: [sudo, su, - suexec, critical-stack, dzdo]\n\n- list: known_setuid_binaries\n items: [\n sshd, - dbus-daemon-lau, ping, ping6, critical-stack-, pmmcli,\n filemng, PassengerAgent, - bwrap, osdetect, nginxmng, sw-engine-fpm,\n start-stop-daem\n ]\n\n- list: - user_mgmt_binaries\n items: [login_binaries, passwd_binaries, shadowutils_binaries]\n\n- - list: dev_creation_binaries\n items: [blkid, rename_device, update_engine, sgdisk]\n\n- - list: hids_binaries\n items: [aide, aide.wrapper, update-aide.con, logcheck, syslog-summary, - osqueryd, ossec-syscheckd]\n\n- list: vpn_binaries\n items: [openvpn]\n\n- list: - nomachine_binaries\n items: [nxexec, nxnode.bin, nxserver.bin, nxclient.bin]\n\n- - macro: system_procs\n condition: proc.name in (coreutils_binaries, user_mgmt_binaries)\n\n- - list: mail_binaries\n items: [\n sendmail, sendmail-msp, postfix, procmail, - exim4,\n pickup, showq, mailq, dovecot, imap-login, imap,\n mailmng-core, - pop3-login, dovecot-lda, pop3\n ]\n\n- list: mail_config_binaries\n items: [\n - \ update_conf, parse_mc, makemap_hash, newaliases, update_mk, update_tlsm4,\n - \ update_db, update_mc, ssmtp.postinst, mailq, postalias, postfix.config.,\n postfix.config, - postfix-script, postconf\n ]\n\n- list: sensitive_file_names\n items: [/etc/shadow, - /etc/sudoers, /etc/pam.conf, /etc/security/pwquality.conf]\n\n- list: sensitive_directory_names\n - \ items: [/, /etc, /etc/, /root, /root/]\n\n- macro: sensitive_files\n condition: - >\n fd.name startswith /etc and\n (fd.name in (sensitive_file_names)\n or - fd.directory in (/etc/sudoers.d, /etc/pam.d))\n\n# Indicates that the process is - new. Currently detected using time\n# since process was started, using a threshold - of 5 seconds.\n- macro: proc_is_new\n condition: proc.duration <= 5000000000\n\n# - Network\n- macro: inbound\n condition: >\n (((evt.type in (accept,listen) and - evt.dir=<) or\n (evt.type in (recvfrom,recvmsg) and evt.dir=< and\n fd.l4proto - != tcp and fd.connected=false and fd.name_changed=true)) and\n (fd.typechar - = 4 or fd.typechar = 6) and\n (fd.ip != \"0.0.0.0\" and fd.net != \"127.0.0.0/8\") - and\n (evt.rawres >= 0 or evt.res = EINPROGRESS))\n\n# RFC1918 addresses were - assigned for private network usage\n- list: rfc_1918_addresses\n items: ['\"10.0.0.0/8\"', - '\"172.16.0.0/12\"', '\"192.168.0.0/16\"']\n\n- macro: outbound\n condition: >\n - \ (((evt.type = connect and evt.dir=<) or\n (evt.type in (sendto,sendmsg) - and evt.dir=< and\n fd.l4proto != tcp and fd.connected=false and fd.name_changed=true)) - and\n (fd.typechar = 4 or fd.typechar = 6) and\n (fd.ip != \"0.0.0.0\" and - fd.net != \"127.0.0.0/8\" and not fd.snet in (rfc_1918_addresses)) and\n (evt.rawres - >= 0 or evt.res = EINPROGRESS))\n\n# Very similar to inbound/outbound, but combines - the tests together\n# for efficiency.\n- macro: inbound_outbound\n condition: >\n - \ ((((evt.type in (accept,listen,connect) and evt.dir=<)) or\n (fd.typechar - = 4 or fd.typechar = 6)) and\n (fd.ip != \"0.0.0.0\" and fd.net != \"127.0.0.0/8\") - and\n (evt.rawres >= 0 or evt.res = EINPROGRESS))\n\n- macro: ssh_port\n condition: - fd.sport=22\n\n# In a local/user rules file, you could override this macro to\n# - enumerate the servers for which ssh connections are allowed. For\n# example, you - might have a ssh gateway host for which ssh connections\n# are allowed.\n#\n# In - the main falco rules file, there isn't any way to know the\n# specific hosts for - which ssh access is allowed, so this macro just\n# repeats ssh_port, which effectively - allows ssh from all hosts. In\n# the overridden macro, the condition would look - something like\n# \"fd.sip=\"a.b.c.d\" or fd.sip=\"e.f.g.h\" or ...\"\n- macro: - allowed_ssh_hosts\n condition: ssh_port\n\n- rule: Disallowed SSH Connection\n - \ desc: Detect any new ssh connection to a host other than those in an allowed group - of hosts\n condition: (inbound_outbound) and ssh_port and not allowed_ssh_hosts\n - \ output: Disallowed SSH Connection (command=%proc.cmdline connection=%fd.name user=%user.name - user_loginuid=%user.loginuid container_id=%container.id image=%container.image.repository)\n - \ priority: NOTICE\n tags: [network, mitre_remote_service]\n\n# These rules and - supporting macros are more of an example for how to\n# use the fd.*ip and fd.*ip.name - fields to match connection\n# information against ips, netmasks, and complete domain - names.\n#\n# To use this rule, you should modify consider_all_outbound_conns and\n# - populate allowed_{source,destination}_{ipaddrs,networks,domains} with the\n# values - that make sense for your environment.\n- macro: consider_all_outbound_conns\n condition: - (never_true)\n\n# Note that this can be either individual IPs or netmasks\n- list: - allowed_outbound_destination_ipaddrs\n items: ['\"127.0.0.1\"', '\"8.8.8.8\"']\n\n- - list: allowed_outbound_destination_networks\n items: ['\"127.0.0.1/8\"']\n\n- list: - allowed_outbound_destination_domains\n items: [google.com, www.yahoo.com]\n\n- - rule: Unexpected outbound connection destination\n desc: Detect any outbound connection - to a destination outside of an allowed set of ips, networks, or domain names\n condition: - >\n consider_all_outbound_conns and outbound and not\n ((fd.sip in (allowed_outbound_destination_ipaddrs)) - or\n (fd.snet in (allowed_outbound_destination_networks)) or\n (fd.sip.name - in (allowed_outbound_destination_domains)))\n output: Disallowed outbound connection - destination (command=%proc.cmdline connection=%fd.name user=%user.name user_loginuid=%user.loginuid - container_id=%container.id image=%container.image.repository)\n priority: NOTICE\n - \ tags: [network]\n\n- macro: consider_all_inbound_conns\n condition: (never_true)\n\n- - list: allowed_inbound_source_ipaddrs\n items: ['\"127.0.0.1\"']\n\n- list: allowed_inbound_source_networks\n - \ items: ['\"127.0.0.1/8\"', '\"10.0.0.0/8\"']\n\n- list: allowed_inbound_source_domains\n - \ items: [google.com]\n\n- rule: Unexpected inbound connection source\n desc: Detect - any inbound connection from a source outside of an allowed set of ips, networks, - or domain names\n condition: >\n consider_all_inbound_conns and inbound and - not\n ((fd.cip in (allowed_inbound_source_ipaddrs)) or\n (fd.cnet in (allowed_inbound_source_networks)) - or\n (fd.cip.name in (allowed_inbound_source_domains)))\n output: Disallowed - inbound connection source (command=%proc.cmdline connection=%fd.name user=%user.name - user_loginuid=%user.loginuid container_id=%container.id image=%container.image.repository)\n - \ priority: NOTICE\n tags: [network]\n\n- list: bash_config_filenames\n items: - [.bashrc, .bash_profile, .bash_history, .bash_login, .bash_logout, .inputrc, .profile]\n\n- - list: bash_config_files\n items: [/etc/profile, /etc/bashrc]\n\n# Covers both csh - and tcsh\n- list: csh_config_filenames\n items: [.cshrc, .login, .logout, .history, - .tcshrc, .cshdirs]\n\n- list: csh_config_files\n items: [/etc/csh.cshrc, /etc/csh.login]\n\n- - list: zsh_config_filenames\n items: [.zshenv, .zprofile, .zshrc, .zlogin, .zlogout]\n\n- - list: shell_config_filenames\n items: [bash_config_filenames, csh_config_filenames, - zsh_config_filenames]\n\n- list: shell_config_files\n items: [bash_config_files, - csh_config_files]\n\n- list: shell_config_directories\n items: [/etc/zsh]\n\n- - rule: Modify Shell Configuration File\n desc: Detect attempt to modify shell configuration - files\n condition: >\n open_write and\n (fd.filename in (shell_config_filenames) - or\n fd.name in (shell_config_files) or\n fd.directory in (shell_config_directories))\n - \ and not proc.name in (shell_binaries)\n and not exe_running_docker_save\n - \ output: >\n a shell configuration file has been modified (user=%user.name user_loginuid=%user.loginuid - command=%proc.cmdline pcmdline=%proc.pcmdline file=%fd.name container_id=%container.id - image=%container.image.repository)\n priority:\n WARNING\n tags: [file, mitre_persistence]\n\n# - This rule is not enabled by default, as there are many legitimate\n# readers of - shell config files. If you want to enable it, modify the\n# following macro.\n\n- - macro: consider_shell_config_reads\n condition: (never_true)\n\n- rule: Read Shell - Configuration File\n desc: Detect attempts to read shell configuration files by - non-shell programs\n condition: >\n open_read and\n consider_shell_config_reads - and\n (fd.filename in (shell_config_filenames) or\n fd.name in (shell_config_files) - or\n fd.directory in (shell_config_directories)) and\n (not proc.name in - (shell_binaries))\n output: >\n a shell configuration file was read by a non-shell - program (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline file=%fd.name - container_id=%container.id image=%container.image.repository)\n priority:\n WARNING\n - \ tags: [file, mitre_discovery]\n\n- macro: consider_all_cron_jobs\n condition: - (never_true)\n\n- macro: user_known_cron_jobs\n condition: (never_true)\n\n- rule: - Schedule Cron Jobs\n desc: Detect cron jobs scheduled\n condition: >\n ((open_write - and fd.name startswith /etc/cron) or\n (spawned_process and proc.name = \"crontab\")) - and\n consider_all_cron_jobs and\n not user_known_cron_jobs\n output: >\n - \ Cron jobs were scheduled to run (user=%user.name user_loginuid=%user.loginuid - command=%proc.cmdline\n file=%fd.name container_id=%container.id container_name=%container.name - image=%container.image.repository:%container.image.tag)\n priority:\n NOTICE\n - \ tags: [file, mitre_persistence]\n\n# Use this to test whether the event occurred - within a container.\n\n# When displaying container information in the output field, - use\n# %container.info, without any leading term (file=%fd.name\n# %container.info - user=%user.name user_loginuid=%user.loginuid, and not file=%fd.name\n# container=%container.info - user=%user.name user_loginuid=%user.loginuid). The output will change\n# based on - the context and whether or not -pk/-pm/-pc was specified on\n# the command line.\n- - macro: container\n condition: (container.id != host)\n\n- macro: container_started\n - \ condition: >\n ((evt.type = container or\n (spawned_process and proc.vpid=1)) - and\n container.image.repository != incomplete)\n\n- macro: interactive\n condition: - >\n ((proc.aname=sshd and proc.name != sshd) or\n proc.name=systemd-logind - or proc.name=login)\n\n- list: cron_binaries\n items: [anacron, cron, crond, crontab]\n\n# - https://github.com/liske/needrestart\n- list: needrestart_binaries\n items: [needrestart, - 10-dpkg, 20-rpm, 30-pacman]\n\n# Possible scripts run by sshkit\n- list: sshkit_script_binaries\n - \ items: [10_etc_sudoers., 10_passwd_group]\n\n- list: plesk_binaries\n items: - [sw-engine, sw-engine-fpm, sw-engine-kv, filemng, f2bmng]\n\n# System users that - should never log into a system. Consider adding your own\n# service users (e.g. - 'apache' or 'mysqld') here.\n- macro: system_users\n condition: user.name in (bin, - daemon, games, lp, mail, nobody, sshd, sync, uucp, www-data)\n\n# These macros will - be removed soon. Only keeping them to maintain\n# compatiblity with some widely - used rules files.\n# Begin Deprecated\n- macro: parent_ansible_running_python\n - \ condition: (proc.pname in (python, pypy, python3) and proc.pcmdline contains ansible)\n\n- - macro: parent_bro_running_python\n condition: (proc.pname=python and proc.cmdline - contains /usr/share/broctl)\n\n- macro: parent_python_running_denyhosts\n condition: - >\n (proc.cmdline startswith \"denyhosts.py /usr/bin/denyhosts.py\" or\n (proc.pname=python - and\n (proc.pcmdline contains /usr/sbin/denyhosts or\n proc.pcmdline contains - /usr/local/bin/denyhosts.py)))\n\n- macro: parent_python_running_sdchecks\n condition: - >\n (proc.pname in (python, python2.7) and\n (proc.pcmdline contains /opt/draios/bin/sdchecks))\n\n- - macro: python_running_sdchecks\n condition: >\n (proc.name in (python, python2.7) - and\n (proc.cmdline contains /opt/draios/bin/sdchecks))\n\n- macro: parent_linux_image_upgrade_script\n - \ condition: proc.pname startswith linux-image-\n\n- macro: parent_java_running_echo\n - \ condition: (proc.pname=java and proc.cmdline startswith \"sh -c echo\")\n\n- macro: - parent_scripting_running_builds\n condition: >\n (proc.pname in (php,php5-fpm,php-fpm7.1,python,ruby,ruby2.3,ruby2.1,node,conda) - and (\n proc.cmdline startswith \"sh -c git\" or\n proc.cmdline startswith - \"sh -c date\" or\n proc.cmdline startswith \"sh -c /usr/bin/g++\" or\n proc.cmdline - startswith \"sh -c /usr/bin/gcc\" or\n proc.cmdline startswith \"sh -c gcc\" - or\n proc.cmdline startswith \"sh -c if type gcc\" or\n proc.cmdline - startswith \"sh -c cd '/var/www/edi/';LC_ALL=en_US.UTF-8 git\" or\n proc.cmdline - startswith \"sh -c /var/www/edi/bin/sftp.sh\" or\n proc.cmdline startswith - \"sh -c /usr/src/app/crxlsx/bin/linux/crxlsx\" or\n proc.cmdline startswith - \"sh -c make parent\" or\n proc.cmdline startswith \"node /jenkins/tools\" - or\n proc.cmdline startswith \"sh -c '/usr/bin/node'\" or\n proc.cmdline - startswith \"sh -c stty -a |\" or\n proc.pcmdline startswith \"node /opt/nodejs/bin/yarn\" - or\n proc.pcmdline startswith \"node /usr/local/bin/yarn\" or\n proc.pcmdline - startswith \"node /root/.config/yarn\" or\n proc.pcmdline startswith \"node - /opt/yarn/bin/yarn.js\"))\n\n\n- macro: httpd_writing_ssl_conf\n condition: >\n - \ (proc.pname=run-httpd and\n (proc.cmdline startswith \"sed -ri\" or proc.cmdline - startswith \"sed -i\") and\n (fd.name startswith /etc/httpd/conf.d/ or fd.name - startswith /etc/httpd/conf))\n\n- macro: userhelper_writing_etc_security\n condition: - (proc.name=userhelper and fd.name startswith /etc/security)\n\n- macro: parent_Xvfb_running_xkbcomp\n - \ condition: (proc.pname=Xvfb and proc.cmdline startswith 'sh -c \"/usr/bin/xkbcomp\"')\n\n- - macro: parent_nginx_running_serf\n condition: (proc.pname=nginx and proc.cmdline - startswith \"sh -c serf\")\n\n- macro: parent_node_running_npm\n condition: (proc.pcmdline - startswith \"node /usr/local/bin/npm\" or\n proc.pcmdline startswith - \"node /usr/local/nodejs/bin/npm\" or\n proc.pcmdline startswith \"node - /opt/rh/rh-nodejs6/root/usr/bin/npm\")\n\n- macro: parent_java_running_sbt\n condition: - (proc.pname=java and proc.pcmdline contains sbt-launch.jar)\n\n- list: known_container_shell_spawn_cmdlines\n - \ items: []\n\n- list: known_shell_spawn_binaries\n items: []\n\n## End Deprecated\n\n- - macro: ansible_running_python\n condition: (proc.name in (python, pypy, python3) - and proc.cmdline contains ansible)\n\n- macro: python_running_chef\n condition: - (proc.name=python and (proc.cmdline contains yum-dump.py or proc.cmdline=\"python - /usr/bin/chef-monitor.py\"))\n\n- macro: python_running_denyhosts\n condition: - >\n (proc.name=python and\n (proc.cmdline contains /usr/sbin/denyhosts or\n - \ proc.cmdline contains /usr/local/bin/denyhosts.py))\n\n# Qualys seems to run - a variety of shell subprocesses, at various\n# levels. This checks at a few levels - without the cost of a full\n# proc.aname, which traverses the full parent heirarchy.\n- - macro: run_by_qualys\n condition: >\n (proc.pname=qualys-cloud-ag or\n proc.aname[2]=qualys-cloud-ag - or\n proc.aname[3]=qualys-cloud-ag or\n proc.aname[4]=qualys-cloud-ag)\n\n- - macro: run_by_sumologic_securefiles\n condition: >\n ((proc.cmdline=\"usermod - -a -G sumologic_collector\" or\n proc.cmdline=\"groupadd sumologic_collector\") - and\n (proc.pname=secureFiles.sh and proc.aname[2]=java))\n\n- macro: run_by_yum\n - \ condition: ((proc.pname=sh and proc.aname[2]=yum) or\n (proc.aname[2]=sh - and proc.aname[3]=yum))\n\n- macro: run_by_ms_oms\n condition: >\n (proc.aname[3] - startswith omsagent- or\n proc.aname[3] startswith scx-)\n\n- macro: run_by_google_accounts_daemon\n - \ condition: >\n (proc.aname[1] startswith google_accounts or\n proc.aname[2] - startswith google_accounts or\n proc.aname[3] startswith google_accounts)\n\n# - Chef is similar.\n- macro: run_by_chef\n condition: (proc.aname[2]=chef_command_wr - or proc.aname[3]=chef_command_wr or\n proc.aname[2]=chef-client or - proc.aname[3]=chef-client or\n proc.name=chef-client)\n\n- macro: run_by_adclient\n - \ condition: (proc.aname[2]=adclient or proc.aname[3]=adclient or proc.aname[4]=adclient)\n\n- - macro: run_by_centrify\n condition: (proc.aname[2]=centrify or proc.aname[3]=centrify - or proc.aname[4]=centrify)\n\n- macro: run_by_puppet\n condition: (proc.aname[2]=puppet - or proc.aname[3]=puppet)\n\n# Also handles running semi-indirectly via scl\n- macro: - run_by_foreman\n condition: >\n (user.name=foreman and\n ((proc.pname in - (rake, ruby, scl) and proc.aname[5] in (tfm-rake,tfm-ruby)) or\n (proc.pname=scl - and proc.aname[2] in (tfm-rake,tfm-ruby))))\n\n- macro: java_running_sdjagent\n - \ condition: proc.name=java and proc.cmdline contains sdjagent.jar\n\n- macro: kubelet_running_loopback\n - \ condition: (proc.pname=kubelet and proc.name=loopback)\n\n- macro: python_mesos_marathon_scripting\n - \ condition: (proc.pcmdline startswith \"python3 /marathon-lb/marathon_lb.py\")\n\n- - macro: splunk_running_forwarder\n condition: (proc.pname=splunkd and proc.cmdline - startswith \"sh -c /opt/splunkforwarder\")\n\n- macro: parent_supervise_running_multilog\n - \ condition: (proc.name=multilog and proc.pname=supervise)\n\n- macro: supervise_writing_status\n - \ condition: (proc.name in (supervise,svc) and fd.name startswith \"/etc/sb/\")\n\n- - macro: pki_realm_writing_realms\n condition: (proc.cmdline startswith \"bash /usr/local/lib/pki/pki-realm\" - and fd.name startswith /etc/pki/realms)\n\n- macro: htpasswd_writing_passwd\n condition: - (proc.name=htpasswd and fd.name=/etc/nginx/.htpasswd)\n\n- macro: lvprogs_writing_conf\n - \ condition: >\n (proc.name in (dmeventd,lvcreate,pvscan,lvs) and\n (fd.name - startswith /etc/lvm/archive or\n fd.name startswith /etc/lvm/backup or\n fd.name - startswith /etc/lvm/cache))\n\n- macro: ovsdb_writing_openvswitch\n condition: - (proc.name=ovsdb-server and fd.directory=/etc/openvswitch)\n\n- macro: perl_running_plesk\n - \ condition: (proc.cmdline startswith \"perl /opt/psa/admin/bin/plesk_agent_manager\" - or\n proc.pcmdline startswith \"perl /opt/psa/admin/bin/plesk_agent_manager\")\n\n- - macro: perl_running_updmap\n condition: (proc.cmdline startswith \"perl /usr/bin/updmap\")\n\n- - macro: perl_running_centrifydc\n condition: (proc.cmdline startswith \"perl /usr/share/centrifydc\")\n\n- - macro: runuser_reading_pam\n condition: (proc.name=runuser and fd.directory=/etc/pam.d)\n\n# - CIS Linux Benchmark program\n- macro: linux_bench_reading_etc_shadow\n condition: - ((proc.aname[2]=linux-bench and\n proc.name in (awk,cut,grep)) and\n - \ (fd.name=/etc/shadow or\n fd.directory=/etc/pam.d))\n\n- - macro: parent_ucf_writing_conf\n condition: (proc.pname=ucf and proc.aname[2]=frontend)\n\n- - macro: consul_template_writing_conf\n condition: >\n ((proc.name=consul-template - and fd.name startswith /etc/haproxy) or\n (proc.name=reload.sh and proc.aname[2]=consul-template - and fd.name startswith /etc/ssl))\n\n- macro: countly_writing_nginx_conf\n condition: - (proc.cmdline startswith \"nodejs /opt/countly/bin\" and fd.name startswith /etc/nginx)\n\n- - list: ms_oms_binaries\n items: [omi.postinst, omsconfig.posti, scx.postinst, omsadmin.sh, - omiagent]\n\n- macro: ms_oms_writing_conf\n condition: >\n ((proc.name in (omiagent,omsagent,in_heartbeat_r*,omsadmin.sh,PerformInventor,dsc_host)\n - \ or proc.pname in (ms_oms_binaries)\n or proc.aname[2] in (ms_oms_binaries))\n - \ and (fd.name startswith /etc/opt/omi or fd.name startswith /etc/opt/microsoft/omsagent))\n\n- - macro: ms_scx_writing_conf\n condition: (proc.name in (GetLinuxOS.sh) and fd.name - startswith /etc/opt/microsoft/scx)\n\n- macro: azure_scripts_writing_conf\n condition: - (proc.pname startswith \"bash /var/lib/waagent/\" and fd.name startswith /etc/azure)\n\n- - macro: azure_networkwatcher_writing_conf\n condition: (proc.name in (NetworkWatcherA) - and fd.name=/etc/init.d/AzureNetworkWatcherAgent)\n\n- macro: couchdb_writing_conf\n - \ condition: (proc.name=beam.smp and proc.cmdline contains couchdb and fd.name startswith - /etc/couchdb)\n\n- macro: update_texmf_writing_conf\n condition: (proc.name=update-texmf - and fd.name startswith /etc/texmf)\n\n- macro: slapadd_writing_conf\n condition: - (proc.name=slapadd and fd.name startswith /etc/ldap)\n\n- macro: openldap_writing_conf\n - \ condition: (proc.pname=run-openldap.sh and fd.name startswith /etc/openldap)\n\n- - macro: ucpagent_writing_conf\n condition: (proc.name=apiserver and container.image.repository=docker/ucp-agent - and fd.name=/etc/authorization_config.cfg)\n\n- macro: iscsi_writing_conf\n condition: - (proc.name=iscsiadm and fd.name startswith /etc/iscsi)\n\n- macro: istio_writing_conf\n - \ condition: (proc.name=pilot-agent and fd.name startswith /etc/istio)\n\n- macro: - symantec_writing_conf\n condition: >\n ((proc.name=symcfgd and fd.name startswith - /etc/symantec) or\n (proc.name=navdefutil and fd.name=/etc/symc-defutils.conf))\n\n- - macro: liveupdate_writing_conf\n condition: (proc.cmdline startswith \"java LiveUpdate\" - and fd.name in (/etc/liveupdate.conf, /etc/Product.Catalog.JavaLiveUpdate))\n\n- - macro: rancher_agent\n condition: (proc.name=agent and container.image.repository - contains \"rancher/agent\")\n\n- macro: rancher_network_manager\n condition: (proc.name=rancher-bridge - and container.image.repository contains \"rancher/network-manager\")\n\n- macro: - sosreport_writing_files\n condition: >\n (proc.name=urlgrabber-ext- and proc.aname[3]=sosreport - and\n (fd.name startswith /etc/pkt/nssdb or fd.name startswith /etc/pki/nssdb))\n\n- - macro: pkgmgmt_progs_writing_pki\n condition: >\n (proc.name=urlgrabber-ext- - and proc.pname in (yum, yum-cron, repoquery) and\n (fd.name startswith /etc/pkt/nssdb - or fd.name startswith /etc/pki/nssdb))\n\n- macro: update_ca_trust_writing_pki\n - \ condition: (proc.pname=update-ca-trust and proc.name=trust and fd.name startswith - /etc/pki)\n\n- macro: brandbot_writing_os_release\n condition: proc.name=brandbot - and fd.name=/etc/os-release\n\n- macro: selinux_writing_conf\n condition: (proc.name - in (semodule,genhomedircon,sefcontext_comp) and fd.name startswith /etc/selinux)\n\n- - list: veritas_binaries\n items: [vxconfigd, sfcache, vxclustadm, vxdctl, vxprint, - vxdmpadm, vxdisk, vxdg, vxassist, vxtune]\n\n- macro: veritas_driver_script\n condition: - (proc.cmdline startswith \"perl /opt/VRTSsfmh/bin/mh_driver.pl\")\n\n- macro: veritas_progs\n - \ condition: (proc.name in (veritas_binaries) or veritas_driver_script)\n\n- macro: - veritas_writing_config\n condition: (veritas_progs and (fd.name startswith /etc/vx - or fd.name startswith /etc/opt/VRTS or fd.name startswith /etc/vom))\n\n- macro: - nginx_writing_conf\n condition: (proc.name in (nginx,nginx-ingress-c,nginx-ingress) - and (fd.name startswith /etc/nginx or fd.name startswith /etc/ingress-controller))\n\n- - macro: nginx_writing_certs\n condition: >\n (((proc.name=openssl and proc.pname=nginx-launch.sh) - or proc.name=nginx-launch.sh) and fd.name startswith /etc/nginx/certs)\n\n- macro: - chef_client_writing_conf\n condition: (proc.pcmdline startswith \"chef-client /opt/gitlab\" - and fd.name startswith /etc/gitlab)\n\n- macro: centrify_writing_krb\n condition: - (proc.name in (adjoin,addns) and fd.name startswith /etc/krb5)\n\n- macro: cockpit_writing_conf\n - \ condition: >\n ((proc.pname=cockpit-kube-la or proc.aname[2]=cockpit-kube-la)\n - \ and fd.name startswith /etc/cockpit)\n\n- macro: ipsec_writing_conf\n condition: - (proc.name=start-ipsec.sh and fd.directory=/etc/ipsec)\n\n- macro: exe_running_docker_save\n - \ condition: >\n proc.name = \"exe\"\n and (proc.cmdline contains \"/var/lib/docker\"\n - \ or proc.cmdline contains \"/var/run/docker\")\n and proc.pname in (dockerd, - docker, dockerd-current, docker-current)\n\n# Ideally we'd have a length check here - as well but sysdig\n# filterchecks don't have operators like len()\n- macro: sed_temporary_file\n - \ condition: (proc.name=sed and fd.name startswith \"/etc/sed\")\n\n- macro: python_running_get_pip\n - \ condition: (proc.cmdline startswith \"python get-pip.py\")\n\n- macro: python_running_ms_oms\n - \ condition: (proc.cmdline startswith \"python /var/lib/waagent/\")\n\n- macro: - gugent_writing_guestagent_log\n condition: (proc.name=gugent and fd.name=GuestAgent.log)\n\n- - macro: dse_writing_tmp\n condition: (proc.name=dse-entrypoint and fd.name=/root/tmp__)\n\n- - macro: zap_writing_state\n condition: (proc.name=java and proc.cmdline contains - \"jar /zap\" and fd.name startswith /root/.ZAP)\n\n- macro: airflow_writing_state\n - \ condition: (proc.name=airflow and fd.name startswith /root/airflow)\n\n- macro: - rpm_writing_root_rpmdb\n condition: (proc.name=rpm and fd.directory=/root/.rpmdb)\n\n- - macro: maven_writing_groovy\n condition: (proc.name=java and proc.cmdline contains - \"classpath /usr/local/apache-maven\" and fd.name startswith /root/.groovy)\n\n- - macro: chef_writing_conf\n condition: (proc.name=chef-client and fd.name startswith - /root/.chef)\n\n- macro: kubectl_writing_state\n condition: (proc.name in (kubectl,oc) - and fd.name startswith /root/.kube)\n\n- macro: java_running_cassandra\n condition: - (proc.name=java and proc.cmdline contains \"cassandra.jar\")\n\n- macro: cassandra_writing_state\n - \ condition: (java_running_cassandra and fd.directory=/root/.cassandra)\n\n# Istio\n- - macro: galley_writing_state\n condition: (proc.name=galley and fd.name in (known_istio_files))\n\n- - list: known_istio_files\n items: [/healthready, /healthliveness]\n\n- macro: calico_writing_state\n - \ condition: (proc.name=kube-controller and fd.name startswith /status.json and - k8s.pod.name startswith calico)\n\n- macro: calico_writing_envvars\n condition: - (proc.name=start_runit and fd.name startswith \"/etc/envvars\" and container.image.repository - endswith \"calico/node\")\n\n- list: repository_files\n items: [sources.list]\n\n- - list: repository_directories\n items: [/etc/apt/sources.list.d, /etc/yum.repos.d, - /etc/apt]\n\n- macro: access_repositories\n condition: (fd.directory in (repository_directories) - or\n (fd.name pmatch (repository_directories) and\n fd.filename - in (repository_files)))\n\n- macro: modify_repositories\n condition: (evt.arg.newpath - pmatch (repository_directories))\n\n- macro: user_known_update_package_registry\n - \ condition: (never_true)\n\n- rule: Update Package Repository\n desc: Detect package - repositories get updated\n condition: >\n ((open_write and access_repositories) - or (modify and modify_repositories))\n and not package_mgmt_procs\n and not - package_mgmt_ancestor_procs\n and not exe_running_docker_save\n and not user_known_update_package_registry\n - \ output: >\n Repository files get updated (user=%user.name user_loginuid=%user.loginuid - command=%proc.cmdline pcmdline=%proc.pcmdline file=%fd.name newpath=%evt.arg.newpath - container_id=%container.id image=%container.image.repository)\n priority:\n NOTICE\n - \ tags: [filesystem, mitre_persistence]\n\n# Users should overwrite this macro to - specify conditions under which a\n# write under the binary dir is ignored. For example, - it may be okay to\n# install a binary in the context of a ci/cd build.\n- macro: - user_known_write_below_binary_dir_activities\n condition: (never_true)\n\n- rule: - Write below binary dir\n desc: an attempt to write to any file below a set of binary - directories\n condition: >\n bin_dir and evt.dir = < and open_write\n and - not package_mgmt_procs\n and not exe_running_docker_save\n and not python_running_get_pip\n - \ and not python_running_ms_oms\n and not user_known_write_below_binary_dir_activities\n - \ output: >\n File below a known binary directory opened for writing (user=%user.name - user_loginuid=%user.loginuid\n command=%proc.cmdline file=%fd.name parent=%proc.pname - pcmdline=%proc.pcmdline gparent=%proc.aname[2] container_id=%container.id image=%container.image.repository)\n - \ priority: ERROR\n tags: [filesystem, mitre_persistence]\n\n# If you'd like to - generally monitor a wider set of directories on top\n# of the ones covered by the - rule Write below binary dir, you can use\n# the following rule and lists.\n\n- list: - monitored_directories\n items: [/boot, /lib, /lib64, /usr/lib, /usr/local/lib, - /usr/local/sbin, /usr/local/bin, /root/.ssh, /etc/cardserver]\n\n# Until https://github.com/draios/sysdig/pull/1153, - which fixes\n# https://github.com/draios/sysdig/issues/1152, is widely available,\n# - we can't use glob operators to match pathnames. Until then, we do a\n# looser check - to match ssh directories.\n# When fixed, we will use \"fd.name glob '/home/*/.ssh/*'\"\n- - macro: user_ssh_directory\n condition: (fd.name startswith '/home' and fd.name - contains '.ssh')\n\n# google_accounts_(daemon)\n- macro: google_accounts_daemon_writing_ssh\n - \ condition: (proc.name=google_accounts and user_ssh_directory)\n\n- macro: cloud_init_writing_ssh\n - \ condition: (proc.name=cloud-init and user_ssh_directory)\n\n- macro: mkinitramfs_writing_boot\n - \ condition: (proc.pname in (mkinitramfs, update-initramf) and fd.directory=/boot)\n\n- - macro: monitored_dir\n condition: >\n (fd.directory in (monitored_directories)\n - \ or user_ssh_directory)\n and not mkinitramfs_writing_boot\n\n# Add conditions - to this macro (probably in a separate file,\n# overwriting this macro) to allow - for specific combinations of\n# programs writing below monitored directories.\n#\n# - Its default value is an expression that always is false, which\n# becomes true when - the \"not ...\" in the rule is applied.\n- macro: user_known_write_monitored_dir_conditions\n - \ condition: (never_true)\n\n- rule: Write below monitored dir\n desc: an attempt - to write to any file below a set of binary directories\n condition: >\n evt.dir - = < and open_write and monitored_dir\n and not package_mgmt_procs\n and not - coreos_write_ssh_dir\n and not exe_running_docker_save\n and not python_running_get_pip\n - \ and not python_running_ms_oms\n and not google_accounts_daemon_writing_ssh\n - \ and not cloud_init_writing_ssh\n and not user_known_write_monitored_dir_conditions\n - \ output: >\n File below a monitored directory opened for writing (user=%user.name - user_loginuid=%user.loginuid\n command=%proc.cmdline file=%fd.name parent=%proc.pname - pcmdline=%proc.pcmdline gparent=%proc.aname[2] container_id=%container.id image=%container.image.repository)\n - \ priority: ERROR\n tags: [filesystem, mitre_persistence]\n\n# This rule is disabled - by default as many system management tools\n# like ansible, etc can read these files/paths. - Enable it using this macro.\n\n- macro: consider_ssh_reads\n condition: (never_true)\n\n- - macro: user_known_read_ssh_information_activities\n condition: (never_true)\n\n- - rule: Read ssh information\n desc: Any attempt to read files below ssh directories - by non-ssh programs\n condition: >\n ((open_read or open_directory) and\n consider_ssh_reads - and\n (user_ssh_directory or fd.name startswith /root/.ssh) and\n not user_known_read_ssh_information_activities - and\n not proc.name in (ssh_binaries))\n output: >\n ssh-related file/directory - read by non-ssh program (user=%user.name user_loginuid=%user.loginuid\n command=%proc.cmdline - file=%fd.name parent=%proc.pname pcmdline=%proc.pcmdline container_id=%container.id - image=%container.image.repository)\n priority: ERROR\n tags: [filesystem, mitre_discovery]\n\n- - list: safe_etc_dirs\n items: [/etc/cassandra, /etc/ssl/certs/java, /etc/logstash, - /etc/nginx/conf.d, /etc/container_environment, /etc/hrmconfig, /etc/fluent/configs.d]\n\n- - macro: fluentd_writing_conf_files\n condition: (proc.name=start-fluentd and fd.name - in (/etc/fluent/fluent.conf, /etc/td-agent/td-agent.conf))\n\n- macro: qualys_writing_conf_files\n - \ condition: (proc.name=qualys-cloud-ag and fd.name=/etc/qualys/cloud-agent/qagent-log.conf)\n\n- - macro: git_writing_nssdb\n condition: (proc.name=git-remote-http and fd.directory=/etc/pki/nssdb)\n\n- - macro: plesk_writing_keys\n condition: (proc.name in (plesk_binaries) and fd.name - startswith /etc/sw/keys)\n\n- macro: plesk_install_writing_apache_conf\n condition: - (proc.cmdline startswith \"bash -hB /usr/lib/plesk-9.0/services/webserver.apache - configure\"\n and fd.name=\"/etc/apache2/apache2.conf.tmp\")\n\n- macro: - plesk_running_mktemp\n condition: (proc.name=mktemp and proc.aname[3] in (plesk_binaries))\n\n- - macro: networkmanager_writing_resolv_conf\n condition: proc.aname[2]=nm-dispatcher - and fd.name=/etc/resolv.conf\n\n- macro: add_shell_writing_shells_tmp\n condition: - (proc.name=add-shell and fd.name=/etc/shells.tmp)\n\n- macro: duply_writing_exclude_files\n - \ condition: (proc.name=touch and proc.pcmdline startswith \"bash /usr/bin/duply\" - and fd.name startswith \"/etc/duply\")\n\n- macro: xmlcatalog_writing_files\n condition: - (proc.name=update-xmlcatal and fd.directory=/etc/xml)\n\n- macro: datadog_writing_conf\n - \ condition: ((proc.cmdline startswith \"python /opt/datadog-agent\" or\n proc.cmdline - startswith \"entrypoint.sh /entrypoint.sh datadog start\" or\n proc.cmdline - startswith \"agent.py /opt/datadog-agent\")\n and fd.name startswith - \"/etc/dd-agent\")\n\n- macro: rancher_writing_conf\n condition: ((proc.name in - (healthcheck, lb-controller, rancher-dns)) and\n (container.image.repository - contains \"rancher/healthcheck\" or\n container.image.repository contains - \"rancher/lb-service-haproxy\" or\n container.image.repository contains - \"rancher/dns\") and\n (fd.name startswith \"/etc/haproxy\" or fd.name - startswith \"/etc/rancher-dns\"))\n\n- macro: rancher_writing_root\n condition: - (proc.name=rancher-metadat and\n (container.image.repository contains - \"rancher/metadata\" or container.image.repository contains \"rancher/lb-service-haproxy\") - and\n fd.name startswith \"/answers.json\")\n\n- macro: checkpoint_writing_state\n - \ condition: (proc.name=checkpoint and\n container.image.repository - contains \"coreos/pod-checkpointer\" and\n fd.name startswith \"/etc/kubernetes\")\n\n- - macro: jboss_in_container_writing_passwd\n condition: >\n ((proc.cmdline=\"run-java.sh - /opt/jboss/container/java/run/run-java.sh\"\n or proc.cmdline=\"run-java.sh - /opt/run-java/run-java.sh\")\n and container\n and fd.name=/etc/passwd)\n\n- - macro: curl_writing_pki_db\n condition: (proc.name=curl and fd.directory=/etc/pki/nssdb)\n\n- - macro: haproxy_writing_conf\n condition: ((proc.name in (update-haproxy-,haproxy_reload.) - or proc.pname in (update-haproxy-,haproxy_reload,haproxy_reload.))\n and - (fd.name=/etc/openvpn/client.map or fd.name startswith /etc/haproxy))\n\n- macro: - java_writing_conf\n condition: (proc.name=java and fd.name=/etc/.java/.systemPrefs/.system.lock)\n\n- - macro: rabbitmq_writing_conf\n condition: (proc.name=rabbitmq-server and fd.directory=/etc/rabbitmq)\n\n- - macro: rook_writing_conf\n condition: (proc.name=toolbox.sh and container.image.repository=rook/toolbox\n - \ and fd.directory=/etc/ceph)\n\n- macro: httpd_writing_conf_logs\n - \ condition: (proc.name=httpd and fd.name startswith /etc/httpd/)\n\n- macro: mysql_writing_conf\n - \ condition: >\n ((proc.name in (start-mysql.sh, run-mysqld) or proc.pname=start-mysql.sh) - and\n (fd.name startswith /etc/mysql or fd.directory=/etc/my.cnf.d))\n\n- macro: - redis_writing_conf\n condition: >\n (proc.name in (run-redis, redis-launcher.) - and (fd.name=/etc/redis.conf or fd.name startswith /etc/redis))\n\n- macro: openvpn_writing_conf\n - \ condition: (proc.name in (openvpn,openvpn-entrypo) and fd.name startswith /etc/openvpn)\n\n- - macro: php_handlers_writing_conf\n condition: (proc.name=php_handlers_co and fd.name=/etc/psa/php_versions.json)\n\n- - macro: sed_writing_temp_file\n condition: >\n ((proc.aname[3]=cron_start.sh - and fd.name startswith /etc/security/sed) or\n (proc.name=sed and (fd.name startswith - /etc/apt/sources.list.d/sed or\n fd.name startswith /etc/apt/sed - or\n fd.name startswith /etc/apt/apt.conf.d/sed)))\n\n- - macro: cron_start_writing_pam_env\n condition: (proc.cmdline=\"bash /usr/sbin/start-cron\" - and fd.name=/etc/security/pam_env.conf)\n\n# In some cases dpkg-reconfigur runs - commands that modify /etc. Not\n# putting the full set of package management programs - yet.\n- macro: dpkg_scripting\n condition: (proc.aname[2] in (dpkg-reconfigur, - dpkg-preconfigu))\n\n- macro: ufw_writing_conf\n condition: (proc.name=ufw and - fd.directory=/etc/ufw)\n\n- macro: calico_writing_conf\n condition: >\n (((proc.name - = calico-node) or\n (container.image.repository=gcr.io/projectcalico-org/node - and proc.name in (start_runit, cp)) or\n (container.image.repository=gcr.io/projectcalico-org/cni - and proc.name=sed))\n and fd.name startswith /etc/calico)\n\n- macro: prometheus_conf_writing_conf\n - \ condition: (proc.name=prometheus-conf and fd.name startswith /etc/prometheus/config_out)\n\n- - macro: openshift_writing_conf\n condition: (proc.name=oc and fd.name startswith - /etc/origin/node)\n\n- macro: keepalived_writing_conf\n condition: (proc.name=keepalived - and fd.name=/etc/keepalived/keepalived.conf)\n\n- macro: etcd_manager_updating_dns\n - \ condition: (container and proc.name=etcd-manager and fd.name=/etc/hosts)\n\n- - macro: automount_using_mtab\n condition: (proc.pname = automount and fd.name startswith - /etc/mtab)\n\n- macro: mcafee_writing_cma_d\n condition: (proc.name=macompatsvc - and fd.directory=/etc/cma.d)\n\n- macro: avinetworks_supervisor_writing_ssh\n condition: - >\n (proc.cmdline=\"se_supervisor.p /opt/avi/scripts/se_supervisor.py -d\" and\n - \ (fd.name startswith /etc/ssh/known_host_ or\n fd.name startswith /etc/ssh/ssh_monitor_config_ - or\n fd.name startswith /etc/ssh/ssh_config_))\n\n- macro: multipath_writing_conf\n - \ condition: (proc.name = multipath and fd.name startswith /etc/multipath/)\n\n# - Add conditions to this macro (probably in a separate file,\n# overwriting this macro) - to allow for specific combinations of\n# programs writing below specific directories - below\n# /etc. fluentd_writing_conf_files is a good example to follow, as it\n# - specifies both the program doing the writing as well as the specific\n# files it - is allowed to modify.\n#\n# In this file, it just takes one of the programs in the - base macro\n# and repeats it.\n\n- macro: user_known_write_etc_conditions\n condition: - proc.name=confd\n\n# This is a placeholder for user to extend the whitelist for - write below etc rule\n- macro: user_known_write_below_etc_activities\n condition: - (never_true)\n\n- macro: write_etc_common\n condition: >\n etc_dir and evt.dir - = < and open_write\n and proc_name_exists\n and not proc.name in (passwd_binaries, - shadowutils_binaries, sysdigcloud_binaries,\n package_mgmt_binaries, - ssl_mgmt_binaries, dhcp_binaries,\n dev_creation_binaries, - shell_mgmt_binaries,\n mail_config_binaries,\n sshkit_script_binaries,\n - \ ldconfig.real, ldconfig, confd, gpg, insserv,\n apparmor_parser, - update-mime, tzdata.config, tzdata.postinst,\n systemd, - systemd-machine, systemd-sysuser,\n debconf-show, rollerd, - bind9.postinst, sv,\n gen_resolvconf., update-ca-certi, - certbot, runsv,\n qualys-cloud-ag, locales.postins, nomachine_binaries,\n - \ adclient, certutil, crlutil, pam-auth-update, parallels_insta,\n - \ openshift-launc, update-rc.d, puppet)\n and not (container - and proc.cmdline in (\"cp /run/secrets/kubernetes.io/serviceaccount/ca.crt /etc/pki/ca-trust/source/anchors/openshift-ca.crt\"))\n - \ and not proc.pname in (sysdigcloud_binaries, mail_config_binaries, hddtemp.postins, - sshkit_script_binaries, locales.postins, deb_binaries, dhcp_binaries)\n and not - fd.name pmatch (safe_etc_dirs)\n and not fd.name in (/etc/container_environment.sh, - /etc/container_environment.json, /etc/motd, /etc/motd.svc)\n and not sed_temporary_file\n - \ and not exe_running_docker_save\n and not ansible_running_python\n and - not python_running_denyhosts\n and not fluentd_writing_conf_files\n and not - user_known_write_etc_conditions\n and not run_by_centrify\n and not run_by_adclient\n - \ and not qualys_writing_conf_files\n and not git_writing_nssdb\n and not - plesk_writing_keys\n and not plesk_install_writing_apache_conf\n and not plesk_running_mktemp\n - \ and not networkmanager_writing_resolv_conf\n and not run_by_chef\n and - not add_shell_writing_shells_tmp\n and not duply_writing_exclude_files\n and - not xmlcatalog_writing_files\n and not parent_supervise_running_multilog\n and - not supervise_writing_status\n and not pki_realm_writing_realms\n and not - htpasswd_writing_passwd\n and not lvprogs_writing_conf\n and not ovsdb_writing_openvswitch\n - \ and not datadog_writing_conf\n and not curl_writing_pki_db\n and not haproxy_writing_conf\n - \ and not java_writing_conf\n and not dpkg_scripting\n and not parent_ucf_writing_conf\n - \ and not rabbitmq_writing_conf\n and not rook_writing_conf\n and not php_handlers_writing_conf\n - \ and not sed_writing_temp_file\n and not cron_start_writing_pam_env\n and - not httpd_writing_conf_logs\n and not mysql_writing_conf\n and not openvpn_writing_conf\n - \ and not consul_template_writing_conf\n and not countly_writing_nginx_conf\n - \ and not ms_oms_writing_conf\n and not ms_scx_writing_conf\n and not azure_scripts_writing_conf\n - \ and not azure_networkwatcher_writing_conf\n and not couchdb_writing_conf\n - \ and not update_texmf_writing_conf\n and not slapadd_writing_conf\n and - not symantec_writing_conf\n and not liveupdate_writing_conf\n and not sosreport_writing_files\n - \ and not selinux_writing_conf\n and not veritas_writing_config\n and not - nginx_writing_conf\n and not nginx_writing_certs\n and not chef_client_writing_conf\n - \ and not centrify_writing_krb\n and not cockpit_writing_conf\n and not - ipsec_writing_conf\n and not httpd_writing_ssl_conf\n and not userhelper_writing_etc_security\n - \ and not pkgmgmt_progs_writing_pki\n and not update_ca_trust_writing_pki\n - \ and not brandbot_writing_os_release\n and not redis_writing_conf\n and - not openldap_writing_conf\n and not ucpagent_writing_conf\n and not iscsi_writing_conf\n - \ and not istio_writing_conf\n and not ufw_writing_conf\n and not calico_writing_conf\n - \ and not calico_writing_envvars\n and not prometheus_conf_writing_conf\n and - not openshift_writing_conf\n and not keepalived_writing_conf\n and not rancher_writing_conf\n - \ and not checkpoint_writing_state\n and not jboss_in_container_writing_passwd\n - \ and not etcd_manager_updating_dns\n and not user_known_write_below_etc_activities\n - \ and not automount_using_mtab\n and not mcafee_writing_cma_d\n and not - avinetworks_supervisor_writing_ssh\n and not multipath_writing_conf\n\n- rule: - Write below etc\n desc: an attempt to write to any file below /etc\n condition: - write_etc_common\n output: \"File below /etc opened for writing (user=%user.name - user_loginuid=%user.loginuid command=%proc.cmdline parent=%proc.pname pcmdline=%proc.pcmdline - file=%fd.name program=%proc.name gparent=%proc.aname[2] ggparent=%proc.aname[3] - gggparent=%proc.aname[4] container_id=%container.id image=%container.image.repository)\"\n - \ priority: ERROR\n tags: [filesystem, mitre_persistence]\n\n- list: known_root_files\n - \ items: [/root/.monit.state, /root/.auth_tokens, /root/.bash_history, /root/.ash_history, - /root/.aws/credentials,\n /root/.viminfo.tmp, /root/.lesshst, /root/.bzr.log, - /root/.gitconfig.lock, /root/.babel.json, /root/.localstack,\n /root/.node_repl_history, - /root/.mongorc.js, /root/.dbshell, /root/.augeas/history, /root/.rnd, /root/.wget-hsts, - /health, /exec.fifo]\n\n- list: known_root_directories\n items: [/root/.oracle_jre_usage, - /root/.ssh, /root/.subversion, /root/.nami]\n\n- macro: known_root_conditions\n - \ condition: (fd.name startswith /root/orcexec.\n or fd.name startswith - /root/.m2\n or fd.name startswith /root/.npm\n or fd.name - startswith /root/.pki\n or fd.name startswith /root/.ivy2\n or - fd.name startswith /root/.config/Cypress\n or fd.name startswith /root/.config/pulse\n - \ or fd.name startswith /root/.config/configstore\n or - fd.name startswith /root/jenkins/workspace\n or fd.name startswith - /root/.jenkins\n or fd.name startswith /root/.cache\n or - fd.name startswith /root/.sbt\n or fd.name startswith /root/.java\n - \ or fd.name startswith /root/.glide\n or fd.name startswith - /root/.sonar\n or fd.name startswith /root/.v8flag\n or - fd.name startswith /root/infaagent\n or fd.name startswith /root/.local/lib/python\n - \ or fd.name startswith /root/.pm2\n or fd.name startswith - /root/.gnupg\n or fd.name startswith /root/.pgpass\n or - fd.name startswith /root/.theano\n or fd.name startswith /root/.gradle\n - \ or fd.name startswith /root/.android\n or fd.name startswith - /root/.ansible\n or fd.name startswith /root/.crashlytics\n or - fd.name startswith /root/.dbus\n or fd.name startswith /root/.composer\n - \ or fd.name startswith /root/.gconf\n or fd.name startswith - /root/.nv\n or fd.name startswith /root/.local/share/jupyter\n or - fd.name startswith /root/oradiag_root\n or fd.name startswith /root/workspace\n - \ or fd.name startswith /root/jvm\n or fd.name startswith - /root/.node-gyp)\n\n# Add conditions to this macro (probably in a separate file,\n# - overwriting this macro) to allow for specific combinations of\n# programs writing - below specific directories below\n# / or /root.\n#\n# In this file, it just takes - one of the condition in the base macro\n# and repeats it.\n- macro: user_known_write_root_conditions\n - \ condition: fd.name=/root/.bash_history\n\n# This is a placeholder for user to - extend the whitelist for write below root rule\n- macro: user_known_write_below_root_activities\n - \ condition: (never_true)\n\n- macro: runc_writing_exec_fifo\n condition: (proc.cmdline=\"runc:[1:CHILD] - init\" and fd.name=/exec.fifo)\n\n- macro: runc_writing_var_lib_docker\n condition: - (proc.cmdline=\"runc:[1:CHILD] init\" and evt.arg.filename startswith /var/lib/docker)\n\n- - macro: mysqlsh_writing_state\n condition: (proc.name=mysqlsh and fd.directory=/root/.mysqlsh)\n\n- - rule: Write below root\n desc: an attempt to write to any file directly below / - or /root\n condition: >\n root_dir and evt.dir = < and open_write\n and proc_name_exists\n - \ and not fd.name in (known_root_files)\n and not fd.directory pmatch (known_root_directories)\n - \ and not exe_running_docker_save\n and not gugent_writing_guestagent_log\n - \ and not dse_writing_tmp\n and not zap_writing_state\n and not airflow_writing_state\n - \ and not rpm_writing_root_rpmdb\n and not maven_writing_groovy\n and not - chef_writing_conf\n and not kubectl_writing_state\n and not cassandra_writing_state\n - \ and not galley_writing_state\n and not calico_writing_state\n and not - rancher_writing_root\n and not runc_writing_exec_fifo\n and not mysqlsh_writing_state\n - \ and not known_root_conditions\n and not user_known_write_root_conditions\n - \ and not user_known_write_below_root_activities\n output: \"File below / or - /root opened for writing (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline - parent=%proc.pname file=%fd.name program=%proc.name container_id=%container.id image=%container.image.repository)\"\n - \ priority: ERROR\n tags: [filesystem, mitre_persistence]\n\n- macro: cmp_cp_by_passwd\n - \ condition: proc.name in (cmp, cp) and proc.pname in (passwd, run-parts)\n\n- macro: - user_known_read_sensitive_files_activities\n condition: (never_true)\n\n- rule: - Read sensitive file trusted after startup\n desc: >\n an attempt to read any - sensitive file (e.g. files containing user/password/authentication\n information) - by a trusted program after startup. Trusted programs might read these files\n at - startup to load initial state, but not afterwards.\n condition: sensitive_files - and open_read and server_procs and not proc_is_new and proc.name!=\"sshd\" and not - user_known_read_sensitive_files_activities\n output: >\n Sensitive file opened - for reading by trusted program after startup (user=%user.name user_loginuid=%user.loginuid\n - \ command=%proc.cmdline parent=%proc.pname file=%fd.name parent=%proc.pname gparent=%proc.aname[2] - container_id=%container.id image=%container.image.repository)\n priority: WARNING\n - \ tags: [filesystem, mitre_credential_access]\n\n- list: read_sensitive_file_binaries\n - \ items: [\n iptables, ps, lsb_release, check-new-relea, dumpe2fs, accounts-daemon, - sshd,\n vsftpd, systemd, mysql_install_d, psql, screen, debconf-show, sa-update,\n - \ pam-auth-update, pam-config, /usr/sbin/spamd, polkit-agent-he, lsattr, file, - sosreport,\n scxcimservera, adclient, rtvscand, cockpit-session, userhelper, - ossec-syscheckd\n ]\n\n# Add conditions to this macro (probably in a separate - file,\n# overwriting this macro) to allow for specific combinations of\n# programs - accessing sensitive files.\n# fluentd_writing_conf_files is a good example to follow, - as it\n# specifies both the program doing the writing as well as the specific\n# - files it is allowed to modify.\n#\n# In this file, it just takes one of the macros - in the base rule\n# and repeats it.\n\n- macro: user_read_sensitive_file_conditions\n - \ condition: cmp_cp_by_passwd\n\n- list: read_sensitive_file_images\n items: []\n\n- - macro: user_read_sensitive_file_containers\n condition: (container and container.image.repository - in (read_sensitive_file_images))\n\n- rule: Read sensitive file untrusted\n desc: - >\n an attempt to read any sensitive file (e.g. files containing user/password/authentication\n - \ information). Exceptions are made for known trusted programs.\n condition: - >\n sensitive_files and open_read\n and proc_name_exists\n and not proc.name - in (user_mgmt_binaries, userexec_binaries, package_mgmt_binaries,\n cron_binaries, - read_sensitive_file_binaries, shell_binaries, hids_binaries,\n vpn_binaries, - mail_config_binaries, nomachine_binaries, sshkit_script_binaries,\n in.proftpd, - mandb, salt-minion, postgres_mgmt_binaries,\n google_oslogin_\n )\n and - not cmp_cp_by_passwd\n and not ansible_running_python\n and not proc.cmdline - contains /usr/bin/mandb\n and not run_by_qualys\n and not run_by_chef\n and - not run_by_google_accounts_daemon\n and not user_read_sensitive_file_conditions\n - \ and not perl_running_plesk\n and not perl_running_updmap\n and not veritas_driver_script\n - \ and not perl_running_centrifydc\n and not runuser_reading_pam\n and not - linux_bench_reading_etc_shadow\n and not user_known_read_sensitive_files_activities\n - \ and not user_read_sensitive_file_containers\n output: >\n Sensitive file - opened for reading by non-trusted program (user=%user.name user_loginuid=%user.loginuid - program=%proc.name\n command=%proc.cmdline file=%fd.name parent=%proc.pname gparent=%proc.aname[2] - ggparent=%proc.aname[3] gggparent=%proc.aname[4] container_id=%container.id image=%container.image.repository)\n - \ priority: WARNING\n tags: [filesystem, mitre_credential_access, mitre_discovery]\n\n- - macro: amazon_linux_running_python_yum\n condition: >\n (proc.name = python - and\n proc.pcmdline = \"python -m amazon_linux_extras system_motd\" and\n proc.cmdline - startswith \"python -c import yum;\")\n\n- macro: user_known_write_rpm_database_activities\n - \ condition: (never_true)\n\n# Only let rpm-related programs write to the rpm database\n- - rule: Write below rpm database\n desc: an attempt to write to the rpm database - by any non-rpm related program\n condition: >\n fd.name startswith /var/lib/rpm - and open_write\n and not rpm_procs\n and not ansible_running_python\n and - not python_running_chef\n and not exe_running_docker_save\n and not amazon_linux_running_python_yum\n - \ and not user_known_write_rpm_database_activities\n output: \"Rpm database opened - for writing by a non-rpm program (command=%proc.cmdline file=%fd.name parent=%proc.pname - pcmdline=%proc.pcmdline container_id=%container.id image=%container.image.repository)\"\n - \ priority: ERROR\n tags: [filesystem, software_mgmt, mitre_persistence]\n\n- macro: - postgres_running_wal_e\n condition: (proc.pname=postgres and proc.cmdline startswith - \"sh -c envdir /etc/wal-e.d/env /usr/local/bin/wal-e\")\n\n- macro: redis_running_prepost_scripts\n - \ condition: (proc.aname[2]=redis-server and (proc.cmdline contains \"redis-server.post-up.d\" - or proc.cmdline contains \"redis-server.pre-up.d\"))\n\n- macro: rabbitmq_running_scripts\n - \ condition: >\n (proc.pname=beam.smp and\n (proc.cmdline startswith \"sh - -c exec ps\" or\n proc.cmdline startswith \"sh -c exec inet_gethost\" or\n proc.cmdline= - \"sh -s unix:cmd\" or\n proc.cmdline= \"sh -c exec /bin/sh -s unix:cmd 2>&1\"))\n\n- - macro: rabbitmqctl_running_scripts\n condition: (proc.aname[2]=rabbitmqctl and - proc.cmdline startswith \"sh -c \")\n\n- macro: run_by_appdynamics\n condition: - (proc.pname=java and proc.pcmdline startswith \"java -jar -Dappdynamics\")\n\n- - macro: user_known_db_spawned_processes\n condition: (never_true)\n\n- rule: DB - program spawned process\n desc: >\n a database-server related program spawned - a new process other than itself.\n This shouldn\\'t occur and is a follow on - from some SQL injection attacks.\n condition: >\n proc.pname in (db_server_binaries)\n - \ and spawned_process\n and not proc.name in (db_server_binaries)\n and - not postgres_running_wal_e\n and not user_known_db_spawned_processes\n output: - >\n Database-related program spawned process other than itself (user=%user.name - user_loginuid=%user.loginuid\n program=%proc.cmdline parent=%proc.pname container_id=%container.id - image=%container.image.repository)\n priority: NOTICE\n tags: [process, database, - mitre_execution]\n\n- macro: user_known_modify_bin_dir_activities\n condition: - (never_true)\n\n- rule: Modify binary dirs\n desc: an attempt to modify any file - below a set of binary directories.\n condition: bin_dir_rename and modify and not - package_mgmt_procs and not exe_running_docker_save and not user_known_modify_bin_dir_activities\n - \ output: >\n File below known binary directory renamed/removed (user=%user.name - user_loginuid=%user.loginuid command=%proc.cmdline\n pcmdline=%proc.pcmdline - operation=%evt.type file=%fd.name %evt.args container_id=%container.id image=%container.image.repository)\n - \ priority: ERROR\n tags: [filesystem, mitre_persistence]\n\n- macro: user_known_mkdir_bin_dir_activities\n - \ condition: (never_true)\n\n- rule: Mkdir binary dirs\n desc: an attempt to create - a directory below a set of binary directories.\n condition: >\n mkdir\n and - bin_dir_mkdir\n and not package_mgmt_procs\n and not user_known_mkdir_bin_dir_activities\n - \ and not exe_running_docker_save\n output: >\n Directory below known binary - directory created (user=%user.name user_loginuid=%user.loginuid\n command=%proc.cmdline - directory=%evt.arg.path container_id=%container.id image=%container.image.repository)\n - \ priority: ERROR\n tags: [filesystem, mitre_persistence]\n\n# This list allows - for easy additions to the set of commands allowed\n# to change thread namespace - without having to copy and override the\n# entire change thread namespace rule.\n- - list: user_known_change_thread_namespace_binaries\n items: [crio, multus]\n\n- - macro: user_known_change_thread_namespace_activities\n condition: (never_true)\n\n- - list: network_plugin_binaries\n items: [aws-cni, azure-vnet]\n\n- macro: calico_node\n - \ condition: (container.image.repository endswith calico/node and proc.name=calico-node)\n\n- - macro: weaveworks_scope\n condition: (container.image.repository endswith weaveworks/scope - and proc.name=scope)\n\n- rule: Change thread namespace\n desc: >\n an attempt - to change a program/thread\\'s namespace (commonly done\n as a part of creating - a container) by calling setns.\n condition: >\n evt.type=setns and evt.dir=<\n - \ and proc_name_exists\n and not (container.id=host and proc.name in (docker_binaries, - k8s_binaries, lxd_binaries, nsenter))\n and not proc.name in (sysdigcloud_binaries, - sysdig, calico, oci-umount, cilium-cni, network_plugin_binaries)\n and not proc.name - in (user_known_change_thread_namespace_binaries)\n and not proc.name startswith - \"runc\"\n and not proc.cmdline startswith \"containerd\"\n and not proc.pname - in (sysdigcloud_binaries, hyperkube, kubelet, protokube, dockerd, tini, aws)\n and - not python_running_sdchecks\n and not java_running_sdjagent\n and not kubelet_running_loopback\n - \ and not rancher_agent\n and not rancher_network_manager\n and not calico_node\n - \ and not weaveworks_scope\n and not user_known_change_thread_namespace_activities\n - \ output: >\n Namespace change (setns) by unexpected program (user=%user.name - user_loginuid=%user.loginuid command=%proc.cmdline\n parent=%proc.pname %container.info - container_id=%container.id image=%container.image.repository:%container.image.tag)\n - \ priority: NOTICE\n tags: [process, mitre_privilege_escalation, mitre_lateral_movement]\n\n# - The binaries in this list and their descendents are *not* allowed\n# spawn shells. - This includes the binaries spawning shells directly as\n# well as indirectly. For - example, apache -> php/perl for\n# mod_{php,perl} -> some shell is also not allowed, - because the shell\n# has apache as an ancestor.\n\n- list: protected_shell_spawning_binaries\n - \ items: [\n http_server_binaries, db_server_binaries, nosql_server_binaries, - mail_binaries,\n fluentd, flanneld, splunkd, consul, smbd, runsv, PM2\n ]\n\n- - macro: parent_java_running_zookeeper\n condition: (proc.pname=java and proc.pcmdline - contains org.apache.zookeeper.server)\n\n- macro: parent_java_running_kafka\n condition: - (proc.pname=java and proc.pcmdline contains kafka.Kafka)\n\n- macro: parent_java_running_elasticsearch\n - \ condition: (proc.pname=java and proc.pcmdline contains org.elasticsearch.bootstrap.Elasticsearch)\n\n- - macro: parent_java_running_activemq\n condition: (proc.pname=java and proc.pcmdline - contains activemq.jar)\n\n- macro: parent_java_running_cassandra\n condition: (proc.pname=java - and (proc.pcmdline contains \"-Dcassandra.config.loader\" or proc.pcmdline contains - org.apache.cassandra.service.CassandraDaemon))\n\n- macro: parent_java_running_jboss_wildfly\n - \ condition: (proc.pname=java and proc.pcmdline contains org.jboss)\n\n- macro: - parent_java_running_glassfish\n condition: (proc.pname=java and proc.pcmdline contains - com.sun.enterprise.glassfish)\n\n- macro: parent_java_running_hadoop\n condition: - (proc.pname=java and proc.pcmdline contains org.apache.hadoop)\n\n- macro: parent_java_running_datastax\n - \ condition: (proc.pname=java and proc.pcmdline contains com.datastax)\n\n- macro: - nginx_starting_nginx\n condition: (proc.pname=nginx and proc.cmdline contains \"/usr/sbin/nginx - -c /etc/nginx/nginx.conf\")\n\n- macro: nginx_running_aws_s3_cp\n condition: (proc.pname=nginx - and proc.cmdline startswith \"sh -c /usr/local/bin/aws s3 cp\")\n\n- macro: consul_running_net_scripts\n - \ condition: (proc.pname=consul and (proc.cmdline startswith \"sh -c curl\" or proc.cmdline - startswith \"sh -c nc\"))\n\n- macro: consul_running_alert_checks\n condition: - (proc.pname=consul and proc.cmdline startswith \"sh -c /bin/consul-alerts\")\n\n- - macro: serf_script\n condition: (proc.cmdline startswith \"sh -c serf\")\n\n- macro: - check_process_status\n condition: (proc.cmdline startswith \"sh -c kill -0 \")\n\n# - In some cases, you may want to consider node processes run directly\n# in containers - as protected shell spawners. Examples include using\n# pm2-docker or pm2 start some-app.js - --no-daemon-mode as the direct\n# entrypoint of the container, and when the node - app is a long-lived\n# server using something like express.\n#\n# However, there - are other uses of node related to build pipelines for\n# which node is not really - a server but instead a general scripting\n# tool. In these cases, shells are very - likely and in these cases you\n# don't want to consider node processes protected - shell spawners.\n#\n# We have to choose one of these cases, so we consider node - processes\n# as unprotected by default. If you want to consider any node process\n# - run in a container as a protected shell spawner, override the below\n# macro to - remove the \"never_true\" clause, which allows it to take effect.\n- macro: possibly_node_in_container\n - \ condition: (never_true and (proc.pname=node and proc.aname[3]=docker-containe))\n\n# - Similarly, you may want to consider any shell spawned by apache\n# tomcat as suspect. - The famous apache struts attack (CVE-2017-5638)\n# could be exploited to do things - like spawn shells.\n#\n# However, many applications *do* use tomcat to run arbitrary - shells,\n# as a part of build pipelines, etc.\n#\n# Like for node, we make this - case opt-in.\n- macro: possibly_parent_java_running_tomcat\n condition: (never_true - and proc.pname=java and proc.pcmdline contains org.apache.catalina.startup.Bootstrap)\n\n- - macro: protected_shell_spawner\n condition: >\n (proc.aname in (protected_shell_spawning_binaries)\n - \ or parent_java_running_zookeeper\n or parent_java_running_kafka\n or parent_java_running_elasticsearch\n - \ or parent_java_running_activemq\n or parent_java_running_cassandra\n or - parent_java_running_jboss_wildfly\n or parent_java_running_glassfish\n or - parent_java_running_hadoop\n or parent_java_running_datastax\n or possibly_parent_java_running_tomcat\n - \ or possibly_node_in_container)\n\n- list: mesos_shell_binaries\n items: [mesos-docker-ex, - mesos-slave, mesos-health-ch]\n\n# Note that runsv is both in protected_shell_spawner - and the\n# exclusions by pname. This means that runsv can itself spawn shells\n# - (the ./run and ./finish scripts), but the processes runsv can not\n# spawn shells.\n- - rule: Run shell untrusted\n desc: an attempt to spawn a shell below a non-shell - application. Specific applications are monitored.\n condition: >\n spawned_process\n - \ and shell_procs\n and proc.pname exists\n and protected_shell_spawner\n - \ and not proc.pname in (shell_binaries, gitlab_binaries, cron_binaries, user_known_shell_spawn_binaries,\n - \ needrestart_binaries,\n mesos_shell_binaries,\n - \ erl_child_setup, exechealthz,\n PM2, - PassengerWatchd, c_rehash, svlogd, logrotate, hhvm, serf,\n lb-controller, - nvidia-installe, runsv, statsite, erlexec, calico-node,\n \"puma - reactor\")\n and not proc.cmdline in (known_shell_spawn_cmdlines)\n and not - proc.aname in (unicorn_launche)\n and not consul_running_net_scripts\n and - not consul_running_alert_checks\n and not nginx_starting_nginx\n and not nginx_running_aws_s3_cp\n - \ and not run_by_package_mgmt_binaries\n and not serf_script\n and not check_process_status\n - \ and not run_by_foreman\n and not python_mesos_marathon_scripting\n and - not splunk_running_forwarder\n and not postgres_running_wal_e\n and not redis_running_prepost_scripts\n - \ and not rabbitmq_running_scripts\n and not rabbitmqctl_running_scripts\n - \ and not run_by_appdynamics\n and not user_shell_container_exclusions\n output: - >\n Shell spawned by untrusted binary (user=%user.name user_loginuid=%user.loginuid - shell=%proc.name parent=%proc.pname\n cmdline=%proc.cmdline pcmdline=%proc.pcmdline - gparent=%proc.aname[2] ggparent=%proc.aname[3]\n aname[4]=%proc.aname[4] aname[5]=%proc.aname[5] - aname[6]=%proc.aname[6] aname[7]=%proc.aname[7] container_id=%container.id image=%container.image.repository)\n - \ priority: DEBUG\n tags: [shell, mitre_execution]\n\n- macro: allowed_openshift_registry_root\n - \ condition: >\n (container.image.repository startswith openshift3/ or\n container.image.repository - startswith registry.redhat.io/openshift3/ or\n container.image.repository startswith - registry.access.redhat.com/openshift3/)\n\n# Source: https://docs.openshift.com/enterprise/3.2/install_config/install/disconnected_install.html\n- - macro: openshift_image\n condition: >\n (allowed_openshift_registry_root and\n - \ (container.image.repository endswith /logging-deployment or\n container.image.repository - endswith /logging-elasticsearch or\n container.image.repository endswith /logging-kibana - or\n container.image.repository endswith /logging-fluentd or\n container.image.repository - endswith /logging-auth-proxy or\n container.image.repository endswith /metrics-deployer - or\n container.image.repository endswith /metrics-hawkular-metrics or\n container.image.repository - endswith /metrics-cassandra or\n container.image.repository endswith /metrics-heapster - or\n container.image.repository endswith /ose-haproxy-router or\n container.image.repository - endswith /ose-deployer or\n container.image.repository endswith /ose-sti-builder - or\n container.image.repository endswith /ose-docker-builder or\n container.image.repository - endswith /ose-pod or\n container.image.repository endswith /ose-node or\n - \ container.image.repository endswith /ose-docker-registry or\n container.image.repository - endswith /prometheus-node-exporter or\n container.image.repository endswith - /image-inspector))\n\n# These images are allowed both to run with --privileged and - to mount\n# sensitive paths from the host filesystem.\n#\n# NOTE: This list is only - provided for backwards compatibility with\n# older local falco rules files that - may have been appending to\n# trusted_images. To make customizations, it's better - to add images to\n# either privileged_images or falco_sensitive_mount_images.\n- - list: trusted_images\n items: []\n\n# NOTE: This macro is only provided for backwards - compatibility with\n# older local falco rules files that may have been appending - to\n# trusted_images. To make customizations, it's better to add containers to\n# - user_trusted_containers, user_privileged_containers or user_sensitive_mount_containers.\n- - macro: trusted_containers\n condition: (container.image.repository in (trusted_images))\n\n# - Add conditions to this macro (probably in a separate file,\n# overwriting this macro) - to specify additional containers that are\n# trusted and therefore allowed to run - privileged *and* with sensitive\n# mounts.\n#\n# Like trusted_images, this is deprecated - in favor of\n# user_privileged_containers and user_sensitive_mount_containers and\n# - is only provided for backwards compatibility.\n#\n# In this file, it just takes - one of the images in trusted_containers\n# and repeats it.\n- macro: user_trusted_containers\n - \ condition: (never_true)\n\n- list: sematext_images\n items: [docker.io/sematext/sematext-agent-docker, - docker.io/sematext/agent, docker.io/sematext/logagent,\n registry.access.redhat.com/sematext/sematext-agent-docker,\n - \ registry.access.redhat.com/sematext/agent,\n registry.access.redhat.com/sematext/logagent]\n\n# - These container images are allowed to run with --privileged\n- list: falco_privileged_images\n - \ items: [\n docker.io/calico/node,\n calico/node,\n docker.io/cloudnativelabs/kube-router,\n - \ docker.io/docker/ucp-agent,\n docker.io/falcosecurity/falco,\n docker.io/mesosphere/mesos-slave,\n - \ docker.io/rook/toolbox,\n docker.io/sysdig/falco,\n docker.io/sysdig/sysdig,\n - \ falcosecurity/falco,\n gcr.io/google_containers/kube-proxy,\n gcr.io/google-containers/startup-script,\n - \ gcr.io/projectcalico-org/node,\n gke.gcr.io/kube-proxy,\n gke.gcr.io/gke-metadata-server,\n - \ gke.gcr.io/netd-amd64,\n gcr.io/google-containers/prometheus-to-sd,\n k8s.gcr.io/ip-masq-agent-amd64,\n - \ k8s.gcr.io/kube-proxy,\n k8s.gcr.io/prometheus-to-sd,\n quay.io/calico/node,\n - \ sysdig/falco,\n sysdig/sysdig,\n sematext_images\n ]\n\n- macro: falco_privileged_containers\n - \ condition: (openshift_image or\n user_trusted_containers or\n container.image.repository - in (trusted_images) or\n container.image.repository in (falco_privileged_images) - or\n container.image.repository startswith istio/proxy_ or\n container.image.repository - startswith quay.io/sysdig/)\n\n# Add conditions to this macro (probably in a separate - file,\n# overwriting this macro) to specify additional containers that are\n# allowed - to run privileged\n#\n# In this file, it just takes one of the images in falco_privileged_images\n# - and repeats it.\n- macro: user_privileged_containers\n condition: (never_true)\n\n- - list: rancher_images\n items: [\n rancher/network-manager, rancher/dns, rancher/agent,\n - \ rancher/lb-service-haproxy, rancher/metadata, rancher/healthcheck\n ]\n\n# - These container images are allowed to mount sensitive paths from the\n# host filesystem.\n- - list: falco_sensitive_mount_images\n items: [\n docker.io/sysdig/falco, docker.io/sysdig/sysdig, - sysdig/falco, sysdig/sysdig,\n docker.io/falcosecurity/falco, falcosecurity/falco,\n - \ gcr.io/google_containers/hyperkube,\n gcr.io/google_containers/kube-proxy, - docker.io/calico/node,\n docker.io/rook/toolbox, docker.io/cloudnativelabs/kube-router, - docker.io/consul,\n docker.io/datadog/docker-dd-agent, docker.io/datadog/agent, - docker.io/docker/ucp-agent, docker.io/gliderlabs/logspout,\n docker.io/netdata/netdata, - docker.io/google/cadvisor, docker.io/prom/node-exporter,\n amazon/amazon-ecs-agent\n - \ ]\n\n- macro: falco_sensitive_mount_containers\n condition: (user_trusted_containers - or\n container.image.repository in (trusted_images) or\n container.image.repository - in (falco_sensitive_mount_images) or\n container.image.repository startswith - quay.io/sysdig/)\n\n# These container images are allowed to run with hostnetwork=true\n- - list: falco_hostnetwork_images\n items: [\n gcr.io/google-containers/prometheus-to-sd,\n - \ gcr.io/projectcalico-org/typha,\n gcr.io/projectcalico-org/node,\n gke.gcr.io/gke-metadata-server,\n - \ gke.gcr.io/kube-proxy,\n gke.gcr.io/netd-amd64,\n k8s.gcr.io/ip-masq-agent-amd64\n - \ k8s.gcr.io/prometheus-to-sd,\n ]\n\n# Add conditions to this macro (probably - in a separate file,\n# overwriting this macro) to specify additional containers - that are\n# allowed to perform sensitive mounts.\n#\n# In this file, it just takes - one of the images in falco_sensitive_mount_images\n# and repeats it.\n- macro: user_sensitive_mount_containers\n - \ condition: (never_true)\n\n- rule: Launch Privileged Container\n desc: Detect - the initial process started in a privileged container. Exceptions are made for known - trusted images.\n condition: >\n container_started and container\n and container.privileged=true\n - \ and not falco_privileged_containers\n and not user_privileged_containers\n - \ output: Privileged container started (user=%user.name user_loginuid=%user.loginuid - command=%proc.cmdline %container.info image=%container.image.repository:%container.image.tag)\n - \ priority: INFO\n tags: [container, cis, mitre_privilege_escalation, mitre_lateral_movement]\n\n# - For now, only considering a full mount of /etc as\n# sensitive. Ideally, this would - also consider all subdirectories\n# below /etc as well, but the globbing mechanism - used by sysdig\n# doesn't allow exclusions of a full pattern, only single characters.\n- - macro: sensitive_mount\n condition: (container.mount.dest[/proc*] != \"N/A\" or\n - \ container.mount.dest[/var/run/docker.sock] != \"N/A\" or\n container.mount.dest[/var/run/crio/crio.sock] - != \"N/A\" or\n container.mount.dest[/var/lib/kubelet] != \"N/A\" or\n - \ container.mount.dest[/var/lib/kubelet/pki] != \"N/A\" or\n container.mount.dest[/] - != \"N/A\" or\n container.mount.dest[/home/admin] != \"N/A\" or\n container.mount.dest[/etc] - != \"N/A\" or\n container.mount.dest[/etc/kubernetes] != \"N/A\" or\n - \ container.mount.dest[/etc/kubernetes/manifests] != \"N/A\" or\n container.mount.dest[/root*] - != \"N/A\")\n\n# The steps libcontainer performs to set up the root program for - a container are:\n# - clone + exec self to a program runc:[0:PARENT]\n# - clone - a program runc:[1:CHILD] which sets up all the namespaces\n# - clone a second program - runc:[2:INIT] + exec to the root program.\n# The parent of runc:[2:INIT] is runc:0:PARENT]\n# - As soon as 1:CHILD is created, 0:PARENT exits, so there's a race\n# where at the - time 2:INIT execs the root program, 0:PARENT might have\n# already exited, or - might still be around. So we handle both.\n# We also let runc:[1:CHILD] count as - the parent process, which can occur\n# when we lose events and lose track of state.\n\n- - macro: container_entrypoint\n condition: (not proc.pname exists or proc.pname in - (runc:[0:PARENT], runc:[1:CHILD], runc, docker-runc, exe, docker-runc-cur))\n\n- - rule: Launch Sensitive Mount Container\n desc: >\n Detect the initial process - started by a container that has a mount from a sensitive host directory\n (i.e. - /proc). Exceptions are made for known trusted images.\n condition: >\n container_started - and container\n and sensitive_mount\n and not falco_sensitive_mount_containers\n - \ and not user_sensitive_mount_containers\n output: Container with sensitive - mount started (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline - %container.info image=%container.image.repository:%container.image.tag mounts=%container.mounts)\n - \ priority: INFO\n tags: [container, cis, mitre_lateral_movement]\n\n# In a local/user - rules file, you could override this macro to\n# explicitly enumerate the container - images that you want to run in\n# your environment. In this main falco rules file, - there isn't any way\n# to know all the containers that can run, so any container - is\n# allowed, by using a filter that is guaranteed to evaluate to true.\n# In the - overridden macro, the condition would look something like\n# (container.image.repository - = vendor/container-1 or\n# container.image.repository = vendor/container-2 or ...)\n\n- - macro: allowed_containers\n condition: (container.id exists)\n\n- rule: Launch - Disallowed Container\n desc: >\n Detect the initial process started by a container - that is not in a list of allowed containers.\n condition: container_started and - container and not allowed_containers\n output: Container started and not in allowed - list (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline %container.info - image=%container.image.repository:%container.image.tag)\n priority: WARNING\n tags: - [container, mitre_lateral_movement]\n\n- macro: user_known_system_user_login\n condition: - (never_true)\n\n# Anything run interactively by root\n# - condition: evt.type != - switch and user.name = root and proc.name != sshd and interactive\n# output: \"Interactive - root (%user.name %proc.name %evt.dir %evt.type %evt.args %fd.name)\"\n# priority: - WARNING\n\n- rule: System user interactive\n desc: an attempt to run interactive - commands by a system (i.e. non-login) user\n condition: spawned_process and system_users - and interactive and not user_known_system_user_login\n output: \"System user ran - an interactive command (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline - container_id=%container.id image=%container.image.repository)\"\n priority: INFO\n - \ tags: [users, mitre_remote_access_tools]\n\n# In some cases, a shell is expected - to be run in a container. For example, configuration\n# management software may - do this, which is expected.\n- macro: user_expected_terminal_shell_in_container_conditions\n - \ condition: (never_true)\n\n- rule: Terminal shell in container\n desc: A shell - was used as the entrypoint/exec point into a container with an attached terminal.\n - \ condition: >\n spawned_process and container\n and shell_procs and proc.tty - != 0\n and container_entrypoint\n and not user_expected_terminal_shell_in_container_conditions\n - \ output: >\n A shell was spawned in a container with an attached terminal (user=%user.name - user_loginuid=%user.loginuid %container.info\n shell=%proc.name parent=%proc.pname - cmdline=%proc.cmdline terminal=%proc.tty container_id=%container.id image=%container.image.repository)\n - \ priority: NOTICE\n tags: [container, shell, mitre_execution]\n\n# For some container - types (mesos), there isn't a container image to\n# work with, and the container - name is autogenerated, so there isn't\n# any stable aspect of the software to work - with. In this case, we\n# fall back to allowing certain command lines.\n\n- list: - known_shell_spawn_cmdlines\n items: [\n '\"sh -c uname -p 2> /dev/null\"',\n - \ '\"sh -c uname -s 2>&1\"',\n '\"sh -c uname -r 2>&1\"',\n '\"sh -c uname - -v 2>&1\"',\n '\"sh -c uname -a 2>&1\"',\n '\"sh -c ruby -v 2>&1\"',\n '\"sh - -c getconf CLK_TCK\"',\n '\"sh -c getconf PAGESIZE\"',\n '\"sh -c LC_ALL=C - LANG=C /sbin/ldconfig -p 2>/dev/null\"',\n '\"sh -c LANG=C /sbin/ldconfig -p - 2>/dev/null\"',\n '\"sh -c /sbin/ldconfig -p 2>/dev/null\"',\n '\"sh -c stty - -a 2>/dev/null\"',\n '\"sh -c stty -a < /dev/tty\"',\n '\"sh -c stty -g < - /dev/tty\"',\n '\"sh -c node index.js\"',\n '\"sh -c node index\"',\n '\"sh - -c node ./src/start.js\"',\n '\"sh -c node app.js\"',\n '\"sh -c node -e \\\"require(''nan'')\\\"\"',\n - \ '\"sh -c node -e \\\"require(''nan'')\\\")\"',\n '\"sh -c node $NODE_DEBUG_OPTION - index.js \"',\n '\"sh -c crontab -l 2\"',\n '\"sh -c lsb_release -a\"',\n - \ '\"sh -c lsb_release -is 2>/dev/null\"',\n '\"sh -c whoami\"',\n '\"sh - -c node_modules/.bin/bower-installer\"',\n '\"sh -c /bin/hostname -f 2> /dev/null\"',\n - \ '\"sh -c locale -a\"',\n '\"sh -c -t -i\"',\n '\"sh -c openssl version\"',\n - \ '\"bash -c id -Gn kafadmin\"',\n '\"sh -c /bin/sh -c ''date +%%s''\"'\n ]\n\n# - This list allows for easy additions to the set of commands allowed\n# to run shells - in containers without having to without having to copy\n# and override the entire - run shell in container macro. Once\n# https://github.com/draios/falco/issues/255 - is fixed this will be a\n# bit easier, as someone could append of any of the existing - lists.\n- list: user_known_shell_spawn_binaries\n items: []\n\n# This macro allows - for easy additions to the set of commands allowed\n# to run shells in containers - without having to override the entire\n# rule. Its default value is an expression - that always is false, which\n# becomes true when the \"not ...\" in the rule is - applied.\n- macro: user_shell_container_exclusions\n condition: (never_true)\n\n- - macro: login_doing_dns_lookup\n condition: (proc.name=login and fd.l4proto=udp - and fd.sport=53)\n\n# sockfamily ip is to exclude certain processes (like 'groups') - that communicate on unix-domain sockets\n# systemd can listen on ports to launch - things like sshd on demand\n- rule: System procs network activity\n desc: any network - activity performed by system binaries that are not expected to send or receive any - network traffic\n condition: >\n (fd.sockfamily = ip and (system_procs or proc.name - in (shell_binaries)))\n and (inbound_outbound)\n and not proc.name in (known_system_procs_network_activity_binaries)\n - \ and not login_doing_dns_lookup\n and not user_expected_system_procs_network_activity_conditions\n - \ output: >\n Known system binary sent/received network traffic\n (user=%user.name - user_loginuid=%user.loginuid command=%proc.cmdline connection=%fd.name container_id=%container.id - image=%container.image.repository)\n priority: NOTICE\n tags: [network, mitre_exfiltration]\n\n# - This list allows easily whitelisting system proc names that are\n# expected to communicate - on the network.\n- list: known_system_procs_network_activity_binaries\n items: - [systemd, hostid, id]\n\n# This macro allows specifying conditions under which a - system binary\n# is allowed to communicate on the network. For instance, only specific\n# - proc.cmdline values could be allowed to be more granular in what is\n# allowed.\n- - macro: user_expected_system_procs_network_activity_conditions\n condition: (never_true)\n\n# - When filled in, this should look something like:\n# (proc.env contains \"HTTP_PROXY=http://my.http.proxy.com - \")\n# The trailing space is intentional so avoid matching on prefixes of\n# the - actual proxy.\n- macro: allowed_ssh_proxy_env\n condition: (always_true)\n\n- list: - http_proxy_binaries\n items: [curl, wget]\n\n- macro: http_proxy_procs\n condition: - (proc.name in (http_proxy_binaries))\n\n- rule: Program run with disallowed http - proxy env\n desc: An attempt to run a program with a disallowed HTTP_PROXY environment - variable\n condition: >\n spawned_process and\n http_proxy_procs and\n not - allowed_ssh_proxy_env and\n proc.env icontains HTTP_PROXY\n output: >\n Program - run with disallowed HTTP_PROXY environment variable\n (user=%user.name user_loginuid=%user.loginuid - command=%proc.cmdline env=%proc.env parent=%proc.pname container_id=%container.id - image=%container.image.repository)\n priority: NOTICE\n tags: [host, users]\n\n# - In some environments, any attempt by a interpreted program (perl,\n# python, ruby, - etc) to listen for incoming connections or perform\n# outgoing connections might - be suspicious. These rules are not\n# enabled by default, but you can modify the - following macros to\n# enable them.\n\n- macro: consider_interpreted_inbound\n condition: - (never_true)\n\n- macro: consider_interpreted_outbound\n condition: (never_true)\n\n- - rule: Interpreted procs inbound network activity\n desc: Any inbound network activity - performed by any interpreted program (perl, python, ruby, etc.)\n condition: >\n - \ (inbound and consider_interpreted_inbound\n and interpreted_procs)\n output: - >\n Interpreted program received/listened for network traffic\n (user=%user.name - user_loginuid=%user.loginuid command=%proc.cmdline connection=%fd.name container_id=%container.id - image=%container.image.repository)\n priority: NOTICE\n tags: [network, mitre_exfiltration]\n\n- - rule: Interpreted procs outbound network activity\n desc: Any outbound network - activity performed by any interpreted program (perl, python, ruby, etc.)\n condition: - >\n (outbound and consider_interpreted_outbound\n and interpreted_procs)\n - \ output: >\n Interpreted program performed outgoing network connection\n (user=%user.name - user_loginuid=%user.loginuid command=%proc.cmdline connection=%fd.name container_id=%container.id - image=%container.image.repository)\n priority: NOTICE\n tags: [network, mitre_exfiltration]\n\n- - list: openvpn_udp_ports\n items: [1194, 1197, 1198, 8080, 9201]\n\n- list: l2tp_udp_ports\n - \ items: [500, 1701, 4500, 10000]\n\n- list: statsd_ports\n items: [8125]\n\n- - list: ntp_ports\n items: [123]\n\n# Some applications will connect a udp socket - to an address only to\n# test connectivity. Assuming the udp connect works, they - will follow\n# up with a tcp connect that actually sends/receives data.\n#\n# With - that in mind, we listed a few commonly seen ports here to avoid\n# some false positives. - In addition, we make the main rule opt-in, so\n# it's disabled by default.\n\n- - list: test_connect_ports\n items: [0, 9, 80, 3306]\n\n- macro: do_unexpected_udp_check\n - \ condition: (never_true)\n\n- list: expected_udp_ports\n items: [53, openvpn_udp_ports, - l2tp_udp_ports, statsd_ports, ntp_ports, test_connect_ports]\n\n- macro: expected_udp_traffic\n - \ condition: fd.port in (expected_udp_ports)\n\n- rule: Unexpected UDP Traffic\n - \ desc: UDP traffic not on port 53 (DNS) or other commonly used ports\n condition: - (inbound_outbound) and do_unexpected_udp_check and fd.l4proto=udp and not expected_udp_traffic\n - \ output: >\n Unexpected UDP Traffic Seen\n (user=%user.name user_loginuid=%user.loginuid - command=%proc.cmdline connection=%fd.name proto=%fd.l4proto evt=%evt.type %evt.args - container_id=%container.id image=%container.image.repository)\n priority: NOTICE\n - \ tags: [network, mitre_exfiltration]\n\n# With the current restriction on system - calls handled by falco\n# (e.g. excluding read/write/sendto/recvfrom/etc, this rule - won't\n# trigger).\n# - rule: Ssh error in syslog\n# desc: any ssh errors (failed - logins, disconnects, ...) sent to syslog\n# condition: syslog and ssh_error_message - and evt.dir = <\n# output: \"sshd sent error message to syslog (error=%evt.buffer)\"\n# - \ priority: WARNING\n\n- macro: somebody_becoming_themself\n condition: ((user.name=nobody - and evt.arg.uid=nobody) or\n (user.name=www-data and evt.arg.uid=www-data) - or\n (user.name=_apt and evt.arg.uid=_apt) or\n (user.name=postfix - and evt.arg.uid=postfix) or\n (user.name=pki-agent and evt.arg.uid=pki-agent) - or\n (user.name=pki-acme and evt.arg.uid=pki-acme) or\n (user.name=nfsnobody - and evt.arg.uid=nfsnobody) or\n (user.name=postgres and evt.arg.uid=postgres))\n\n- - macro: nrpe_becoming_nagios\n condition: (proc.name=nrpe and evt.arg.uid=nagios)\n\n# - In containers, the user name might be for a uid that exists in the\n# container - but not on the host. (See\n# https://github.com/draios/sysdig/issues/954). So in - that case, allow\n# a setuid.\n- macro: known_user_in_container\n condition: (container - and user.name != \"N/A\")\n\n# Add conditions to this macro (probably in a separate - file,\n# overwriting this macro) to allow for specific combinations of\n# programs - changing users by calling setuid.\n#\n# In this file, it just takes one of the condition - in the base macro\n# and repeats it.\n- macro: user_known_non_sudo_setuid_conditions\n - \ condition: user.name=root\n\n# sshd, mail programs attempt to setuid to root even - when running as non-root. Excluding here to avoid meaningless FPs\n- rule: Non sudo - setuid\n desc: >\n an attempt to change users by calling setuid. sudo/su are - excluded. users \"root\" and \"nobody\"\n suing to itself are also excluded, - as setuid calls typically involve dropping privileges.\n condition: >\n evt.type=setuid - and evt.dir=>\n and (known_user_in_container or not container)\n and not user.name=root\n - \ and not somebody_becoming_themself\n and not proc.name in (known_setuid_binaries, - userexec_binaries, mail_binaries, docker_binaries,\n nomachine_binaries)\n - \ and not proc.name startswith \"runc:\"\n and not java_running_sdjagent\n - \ and not nrpe_becoming_nagios\n and not user_known_non_sudo_setuid_conditions\n - \ output: >\n Unexpected setuid call by non-sudo, non-root program (user=%user.name - user_loginuid=%user.loginuid cur_uid=%user.uid parent=%proc.pname\n command=%proc.cmdline - uid=%evt.arg.uid container_id=%container.id image=%container.image.repository)\n - \ priority: NOTICE\n tags: [users, mitre_privilege_escalation]\n\n- macro: user_known_user_management_activities\n - \ condition: (never_true)\n\n- macro: chage_list\n condition: (proc.name=chage - and (proc.cmdline contains \"-l\" or proc.cmdline contains \"--list\"))\n\n- rule: - User mgmt binaries\n desc: >\n activity by any programs that can manage users, - passwords, or permissions. sudo and su are excluded.\n Activity in containers - is also excluded--some containers create custom users on top\n of a base linux - distribution at startup.\n Some innocuous commandlines that don't actually change - anything are excluded.\n condition: >\n spawned_process and proc.name in (user_mgmt_binaries) - and\n not proc.name in (su, sudo, lastlog, nologin, unix_chkpwd) and not container - and\n not proc.pname in (cron_binaries, systemd, systemd.postins, udev.postinst, - run-parts) and\n not proc.cmdline startswith \"passwd -S\" and\n not proc.cmdline - startswith \"useradd -D\" and\n not proc.cmdline startswith \"systemd --version\" - and\n not run_by_qualys and\n not run_by_sumologic_securefiles and\n not - run_by_yum and\n not run_by_ms_oms and\n not run_by_google_accounts_daemon - and\n not chage_list and\n not user_known_user_management_activities\n output: - >\n User management binary command run outside of container\n (user=%user.name - user_loginuid=%user.loginuid command=%proc.cmdline parent=%proc.pname gparent=%proc.aname[2] - ggparent=%proc.aname[3] gggparent=%proc.aname[4])\n priority: NOTICE\n tags: [host, - users, mitre_persistence]\n\n- list: allowed_dev_files\n items: [\n /dev/null, - /dev/stdin, /dev/stdout, /dev/stderr,\n /dev/random, /dev/urandom, /dev/console, - /dev/kmsg\n ]\n\n- macro: user_known_create_files_below_dev_activities\n condition: - (never_true)\n\n# (we may need to add additional checks against false positives, - see:\n# https://bugs.launchpad.net/ubuntu/+source/rkhunter/+bug/86153)\n- rule: - Create files below dev\n desc: creating any files below /dev other than known programs - that manage devices. Some rootkits hide files in /dev.\n condition: >\n fd.directory - = /dev and\n (evt.type = creat or ((evt.type = open or evt.type = openat) and - evt.arg.flags contains O_CREAT))\n and not proc.name in (dev_creation_binaries)\n - \ and not fd.name in (allowed_dev_files)\n and not fd.name startswith /dev/tty\n - \ and not user_known_create_files_below_dev_activities\n output: \"File created - below /dev by untrusted program (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline - file=%fd.name container_id=%container.id image=%container.image.repository)\"\n - \ priority: ERROR\n tags: [filesystem, mitre_persistence]\n\n\n# In a local/user - rules file, you could override this macro to\n# explicitly enumerate the container - images that you want to allow\n# access to EC2 metadata. In this main falco rules - file, there isn't\n# any way to know all the containers that should have access, - so any\n# container is alllowed, by repeating the \"container\" macro. In the\n# - overridden macro, the condition would look something like\n# (container.image.repository - = vendor/container-1 or\n# container.image.repository = vendor/container-2 or ...)\n- - macro: ec2_metadata_containers\n condition: container\n\n# On EC2 instances, 169.254.169.254 - is a special IP used to fetch\n# metadata about the instance. It may be desirable - to prevent access\n# to this IP from containers.\n- rule: Contact EC2 Instance Metadata - Service From Container\n desc: Detect attempts to contact the EC2 Instance Metadata - Service from a container\n condition: outbound and fd.sip=\"169.254.169.254\" and - container and not ec2_metadata_containers\n output: Outbound connection to EC2 - instance metadata service (command=%proc.cmdline connection=%fd.name %container.info - image=%container.image.repository:%container.image.tag)\n priority: NOTICE\n tags: - [network, aws, container, mitre_discovery]\n\n\n# This rule is not enabled by default, - since this rule is for cloud environment(GCP, AWS and Azure) only.\n# If you want - to enable this rule, overwrite the first macro,\n# And you can filter the container - that you want to allow access to metadata by overwriting the second macro.\n- macro: - consider_metadata_access\n condition: (never_true)\n\n- macro: user_known_metadata_access\n - \ condition: (k8s.ns.name = \"kube-system\")\n\n# On GCP, AWS and Azure, 169.254.169.254 - is a special IP used to fetch\n# metadata about the instance. The metadata could - be used to get credentials by attackers.\n- rule: Contact cloud metadata service - from container\n desc: Detect attempts to contact the Cloud Instance Metadata Service - from a container\n condition: outbound and fd.sip=\"169.254.169.254\" and container - and consider_metadata_access and not user_known_metadata_access\n output: Outbound - connection to cloud instance metadata service (command=%proc.cmdline connection=%fd.name - %container.info image=%container.image.repository:%container.image.tag)\n priority: - NOTICE\n tags: [network, container, mitre_discovery]\n\n\n# In a local/user rules - file, list the namespace or container images that are\n# allowed to contact the - K8s API Server from within a container. This\n# might cover cases where the K8s - infrastructure itself is running\n# within a container.\n- macro: k8s_containers\n - \ condition: >\n (container.image.repository in (gcr.io/google_containers/hyperkube-amd64,\n - \ gcr.io/google_containers/kube2sky, docker.io/sysdig/falco,\n docker.io/sysdig/sysdig, - docker.io/falcosecurity/falco,\n sysdig/falco, sysdig/sysdig, falcosecurity/falco) - or (k8s.ns.name = \"kube-system\"))\n\n- macro: k8s_api_server\n condition: (fd.sip.name=\"kubernetes.default.svc.cluster.local\")\n\n- - macro: user_known_contact_k8s_api_server_activities\n condition: (never_true)\n\n- - rule: Contact K8S API Server From Container\n desc: Detect attempts to contact - the K8S API Server from a container\n condition: >\n evt.type=connect and evt.dir=< - and \n (fd.typechar=4 or fd.typechar=6) and\n container and \n not k8s_containers - and\n k8s_api_server and\n not user_known_contact_k8s_api_server_activities\n - \ output: Unexpected connection to K8s API Server from container (command=%proc.cmdline - %container.info image=%container.image.repository:%container.image.tag connection=%fd.name)\n - \ priority: NOTICE\n tags: [network, k8s, container, mitre_discovery]\n\n# In a - local/user rules file, list the container images that are\n# allowed to contact - NodePort services from within a container. This\n# might cover cases where the K8s - infrastructure itself is running\n# within a container.\n#\n# By default, all containers - are allowed to contact NodePort services.\n- macro: nodeport_containers\n condition: - container\n\n- rule: Unexpected K8s NodePort Connection\n desc: Detect attempts - to use K8s NodePorts from a container\n condition: (inbound_outbound) and fd.sport - >= 30000 and fd.sport <= 32767 and container and not nodeport_containers\n output: - Unexpected K8s NodePort Connection (command=%proc.cmdline connection=%fd.name container_id=%container.id - image=%container.image.repository)\n priority: NOTICE\n tags: [network, k8s, container, - mitre_port_knocking]\n\n- list: network_tool_binaries\n items: [nc, ncat, nmap, - dig, tcpdump, tshark, ngrep, telnet, mitmproxy, socat, zmap]\n\n- macro: network_tool_procs\n - \ condition: (proc.name in (network_tool_binaries))\n\n# In a local/user rules file, - create a condition that matches legitimate uses\n# of a package management process - inside a container.\n#\n# For example:\n# - macro: user_known_package_manager_in_container\n# - \ condition: proc.cmdline=\"dpkg -l\"\n- macro: user_known_package_manager_in_container\n - \ condition: (never_true)\n\n# Container is supposed to be immutable. Package management - should be done in building the image.\n- rule: Launch Package Management Process - in Container\n desc: Package management process ran inside container\n condition: - >\n spawned_process\n and container\n and user.name != \"_apt\"\n and - package_mgmt_procs\n and not package_mgmt_ancestor_procs\n and not user_known_package_manager_in_container\n - \ output: >\n Package management process launched in container (user=%user.name - user_loginuid=%user.loginuid\n command=%proc.cmdline container_id=%container.id - container_name=%container.name image=%container.image.repository:%container.image.tag)\n - \ priority: ERROR\n tags: [process, mitre_persistence]\n\n- rule: Netcat Remote - Code Execution in Container\n desc: Netcat Program runs inside container that allows - remote code execution\n condition: >\n spawned_process and container and\n ((proc.name - = \"nc\" and (proc.args contains \"-e\" or proc.args contains \"-c\")) or\n (proc.name - = \"ncat\" and (proc.args contains \"--sh-exec\" or proc.args contains \"--exec\" - or proc.args contains \"-e \"\n or proc.args contains - \"-c \" or proc.args contains \"--lua-exec\"))\n )\n output: >\n Netcat runs - inside container that allows remote code execution (user=%user.name user_loginuid=%user.loginuid\n - \ command=%proc.cmdline container_id=%container.id container_name=%container.name - image=%container.image.repository:%container.image.tag)\n priority: WARNING\n tags: - [network, process, mitre_execution]\n\n- macro: user_known_network_tool_activities\n - \ condition: (never_true)\n\n- rule: Launch Suspicious Network Tool in Container\n - \ desc: Detect network tools launched inside container\n condition: >\n spawned_process - and container and network_tool_procs and not user_known_network_tool_activities\n - \ output: >\n Network tool launched in container (user=%user.name user_loginuid=%user.loginuid - command=%proc.cmdline parent_process=%proc.pname\n container_id=%container.id - container_name=%container.name image=%container.image.repository:%container.image.tag)\n - \ priority: NOTICE\n tags: [network, process, mitre_discovery, mitre_exfiltration]\n\n# - This rule is not enabled by default, as there are legitimate use\n# cases for these - tools on hosts. If you want to enable it, modify the\n# following macro.\n- macro: - consider_network_tools_on_host\n condition: (never_true)\n\n- rule: Launch Suspicious - Network Tool on Host\n desc: Detect network tools launched on the host\n condition: - >\n spawned_process and\n not container and\n consider_network_tools_on_host - and\n network_tool_procs and\n not user_known_network_tool_activities\n output: - >\n Network tool launched on host (user=%user.name user_loginuid=%user.loginuid - command=%proc.cmdline parent_process=%proc.pname)\n priority: NOTICE\n tags: [network, - process, mitre_discovery, mitre_exfiltration]\n\n- list: grep_binaries\n items: - [grep, egrep, fgrep]\n\n- macro: grep_commands\n condition: (proc.name in (grep_binaries))\n\n# - a less restrictive search for things that might be passwords/ssh/user etc.\n- macro: - grep_more\n condition: (never_true)\n\n- macro: private_key_or_password\n condition: - >\n (proc.args icontains \"BEGIN PRIVATE\" or\n proc.args icontains \"BEGIN - RSA PRIVATE\" or\n proc.args icontains \"BEGIN DSA PRIVATE\" or\n proc.args - icontains \"BEGIN EC PRIVATE\" or\n (grep_more and\n (proc.args icontains - \" pass \" or\n proc.args icontains \" ssh \" or\n proc.args icontains - \" user \"))\n )\n\n- rule: Search Private Keys or Passwords\n desc: >\n Detect - grep private keys or passwords activity.\n condition: >\n (spawned_process and\n - \ ((grep_commands and private_key_or_password) or\n (proc.name = \"find\" - and (proc.args contains \"id_rsa\" or proc.args contains \"id_dsa\")))\n )\n - \ output: >\n Grep private keys or passwords activities found\n (user=%user.name - user_loginuid=%user.loginuid command=%proc.cmdline container_id=%container.id container_name=%container.name\n - \ image=%container.image.repository:%container.image.tag)\n priority:\n WARNING\n - \ tags: [process, mitre_credential_access]\n\n- list: log_directories\n items: - [/var/log, /dev/log]\n\n- list: log_files\n items: [syslog, auth.log, secure, kern.log, - cron, user.log, dpkg.log, last.log, yum.log, access_log, mysql.log, mysqld.log]\n\n- - macro: access_log_files\n condition: (fd.directory in (log_directories) or fd.filename - in (log_files))\n\n# a placeholder for whitelist log files that could be cleared. - Recommend the macro as (fd.name startswith \"/var/log/app1*\")\n- macro: allowed_clear_log_files\n - \ condition: (never_true)\n\n- macro: trusted_logging_images\n condition: (container.image.repository - endswith \"splunk/fluentd-hec\" or\n container.image.repository endswith - \"fluent/fluentd-kubernetes-daemonset\" or\n container.image.repository - endswith \"openshift3/ose-logging-fluentd\" or\n container.image.repository - endswith \"containernetworking/azure-npm\")\n\n- rule: Clear Log Activities\n desc: - Detect clearing of critical log files\n condition: >\n open_write and\n access_log_files - and\n evt.arg.flags contains \"O_TRUNC\" and\n not trusted_logging_images - and\n not allowed_clear_log_files\n output: >\n Log files were tampered (user=%user.name - user_loginuid=%user.loginuid command=%proc.cmdline file=%fd.name container_id=%container.id - image=%container.image.repository)\n priority:\n WARNING\n tags: [file, mitre_defense_evasion]\n\n- - list: data_remove_commands\n items: [shred, mkfs, mke2fs]\n\n- macro: clear_data_procs\n - \ condition: (proc.name in (data_remove_commands))\n\n- macro: user_known_remove_data_activities\n - \ condition: (never_true)\n\n- rule: Remove Bulk Data from Disk\n desc: Detect - process running to clear bulk data from disk\n condition: spawned_process and clear_data_procs - and not user_known_remove_data_activities\n output: >\n Bulk data has been removed - from disk (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline file=%fd.name - container_id=%container.id image=%container.image.repository)\n priority:\n WARNING\n - \ tags: [process, mitre_persistence]\n\n- macro: modify_shell_history\n condition: - >\n (modify and (\n evt.arg.name contains \"bash_history\" or\n evt.arg.name - contains \"zsh_history\" or\n evt.arg.name contains \"fish_read_history\" or\n - \ evt.arg.name endswith \"fish_history\" or\n evt.arg.oldpath contains - \"bash_history\" or\n evt.arg.oldpath contains \"zsh_history\" or\n evt.arg.oldpath - contains \"fish_read_history\" or\n evt.arg.oldpath endswith \"fish_history\" - or\n evt.arg.path contains \"bash_history\" or\n evt.arg.path contains - \"zsh_history\" or\n evt.arg.path contains \"fish_read_history\" or\n evt.arg.path - endswith \"fish_history\"))\n\n- macro: truncate_shell_history\n condition: >\n - \ (open_write and (\n fd.name contains \"bash_history\" or\n fd.name - contains \"zsh_history\" or\n fd.name contains \"fish_read_history\" or\n fd.name - endswith \"fish_history\") and evt.arg.flags contains \"O_TRUNC\")\n\n- macro: var_lib_docker_filepath\n - \ condition: (evt.arg.name startswith /var/lib/docker or fd.name startswith /var/lib/docker)\n\n- - rule: Delete or rename shell history\n desc: Detect shell history deletion\n condition: - >\n (modify_shell_history or truncate_shell_history) and\n not var_lib_docker_filepath - and\n not proc.name in (docker_binaries)\n output: >\n Shell history had - been deleted or renamed (user=%user.name user_loginuid=%user.loginuid type=%evt.type - command=%proc.cmdline fd.name=%fd.name name=%evt.arg.name path=%evt.arg.path oldpath=%evt.arg.oldpath - %container.info)\n priority:\n WARNING\n tags: [process, mitre_defense_evasion]\n\n# - This rule is deprecated and will/should never be triggered. Keep it here for backport - compatibility.\n# Rule Delete or rename shell history is the preferred rule to use - now.\n- rule: Delete Bash History\n desc: Detect bash history deletion\n condition: - >\n ((spawned_process and proc.name in (shred, rm, mv) and proc.args contains - \"bash_history\") or\n (open_write and fd.name contains \"bash_history\" and - evt.arg.flags contains \"O_TRUNC\"))\n output: >\n Shell history had been deleted - or renamed (user=%user.name user_loginuid=%user.loginuid type=%evt.type command=%proc.cmdline - fd.name=%fd.name name=%evt.arg.name path=%evt.arg.path oldpath=%evt.arg.oldpath - %container.info)\n priority:\n WARNING\n tags: [process, mitre_defense_evasion]\n\n- - macro: consider_all_chmods\n condition: (always_true)\n\n- list: user_known_chmod_applications\n - \ items: [hyperkube, kubelet]\n\n# This macro should be overridden in user rules - as needed. This is useful if a given application\n# should not be ignored alltogether - with the user_known_chmod_applications list, but only in\n# specific conditions.\n- - macro: user_known_set_setuid_or_setgid_bit_conditions\n condition: (never_true)\n\n- - rule: Set Setuid or Setgid bit\n desc: >\n When the setuid or setgid bits are - set for an application,\n this means that the application will run with the privileges - of the owning user or group respectively.\n Detect setuid or setgid bits set - via chmod\n condition: >\n consider_all_chmods and chmod and (evt.arg.mode contains - \"S_ISUID\" or evt.arg.mode contains \"S_ISGID\")\n and not proc.name in (user_known_chmod_applications)\n - \ and not exe_running_docker_save\n and not user_known_set_setuid_or_setgid_bit_conditions\n - \ output: >\n Setuid or setgid bit is set via chmod (fd=%evt.arg.fd filename=%evt.arg.filename - mode=%evt.arg.mode user=%user.name user_loginuid=%user.loginuid process=%proc.name\n - \ command=%proc.cmdline container_id=%container.id container_name=%container.name - image=%container.image.repository:%container.image.tag)\n priority:\n NOTICE\n - \ tags: [process, mitre_persistence]\n\n- list: exclude_hidden_directories\n items: - [/root/.cassandra]\n\n# To use this rule, you should modify consider_hidden_file_creation.\n- - macro: consider_hidden_file_creation\n condition: (never_true)\n\n- macro: user_known_create_hidden_file_activities\n - \ condition: (never_true)\n\n- rule: Create Hidden Files or Directories\n desc: - Detect hidden files or directories created\n condition: >\n ((modify and evt.arg.newpath - contains \"/.\") or\n (mkdir and evt.arg.path contains \"/.\") or\n (open_write - and evt.arg.flags contains \"O_CREAT\" and fd.name contains \"/.\" and not fd.name - pmatch (exclude_hidden_directories))) and\n consider_hidden_file_creation and\n - \ not user_known_create_hidden_file_activities\n and not exe_running_docker_save\n - \ output: >\n Hidden file or directory created (user=%user.name user_loginuid=%user.loginuid - command=%proc.cmdline\n file=%fd.name newpath=%evt.arg.newpath container_id=%container.id - container_name=%container.name image=%container.image.repository:%container.image.tag)\n - \ priority:\n NOTICE\n tags: [file, mitre_persistence]\n\n- list: remote_file_copy_binaries\n - \ items: [rsync, scp, sftp, dcp]\n\n- macro: remote_file_copy_procs\n condition: - (proc.name in (remote_file_copy_binaries))\n\n# Users should overwrite this macro - to specify conditions under which a\n# Custom condition for use of remote file copy - tool in container\n- macro: user_known_remote_file_copy_activities\n condition: - (never_true)\n\n- rule: Launch Remote File Copy Tools in Container\n desc: Detect - remote file copy tools launched in container\n condition: >\n spawned_process\n - \ and container\n and remote_file_copy_procs\n and not user_known_remote_file_copy_activities\n - \ output: >\n Remote file copy tool launched in container (user=%user.name user_loginuid=%user.loginuid - command=%proc.cmdline parent_process=%proc.pname\n container_id=%container.id - container_name=%container.name image=%container.image.repository:%container.image.tag)\n - \ priority: NOTICE\n tags: [network, process, mitre_lateral_movement, mitre_exfiltration]\n\n- - rule: Create Symlink Over Sensitive Files\n desc: Detect symlink created over sensitive - files\n condition: >\n create_symlink and\n (evt.arg.target in (sensitive_file_names) - or evt.arg.target in (sensitive_directory_names))\n output: >\n Symlinks created - over senstivie files (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline - target=%evt.arg.target linkpath=%evt.arg.linkpath parent_process=%proc.pname)\n - \ priority: NOTICE\n tags: [file, mitre_exfiltration]\n\n- list: miner_ports\n - \ items: [\n 25, 3333, 3334, 3335, 3336, 3357, 4444,\n 5555, 5556, - 5588, 5730, 6099, 6666, 7777,\n 7778, 8000, 8001, 8008, 8080, 8118, 8333,\n - \ 8888, 8899, 9332, 9999, 14433, 14444,\n 45560, 45700\n ]\n\n- - list: miner_domains\n items: [\n \"asia1.ethpool.org\",\"ca.minexmr.com\",\n - \ \"cn.stratum.slushpool.com\",\"de.minexmr.com\",\n \"eth-ar.dwarfpool.com\",\"eth-asia.dwarfpool.com\",\n - \ \"eth-asia1.nanopool.org\",\"eth-au.dwarfpool.com\",\n \"eth-au1.nanopool.org\",\"eth-br.dwarfpool.com\",\n - \ \"eth-cn.dwarfpool.com\",\"eth-cn2.dwarfpool.com\",\n \"eth-eu.dwarfpool.com\",\"eth-eu1.nanopool.org\",\n - \ \"eth-eu2.nanopool.org\",\"eth-hk.dwarfpool.com\",\n \"eth-jp1.nanopool.org\",\"eth-ru.dwarfpool.com\",\n - \ \"eth-ru2.dwarfpool.com\",\"eth-sg.dwarfpool.com\",\n \"eth-us-east1.nanopool.org\",\"eth-us-west1.nanopool.org\",\n - \ \"eth-us.dwarfpool.com\",\"eth-us2.dwarfpool.com\",\n \"eu.stratum.slushpool.com\",\"eu1.ethermine.org\",\n - \ \"eu1.ethpool.org\",\"fr.minexmr.com\",\n \"mine.moneropool.com\",\"mine.xmrpool.net\",\n - \ \"pool.minexmr.com\",\"pool.monero.hashvault.pro\",\n \"pool.supportxmr.com\",\"sg.minexmr.com\",\n - \ \"sg.stratum.slushpool.com\",\"stratum-eth.antpool.com\",\n \"stratum-ltc.antpool.com\",\"stratum-zec.antpool.com\",\n - \ \"stratum.antpool.com\",\"us-east.stratum.slushpool.com\",\n \"us1.ethermine.org\",\"us1.ethpool.org\",\n - \ \"us2.ethermine.org\",\"us2.ethpool.org\",\n \"xmr-asia1.nanopool.org\",\"xmr-au1.nanopool.org\",\n - \ \"xmr-eu1.nanopool.org\",\"xmr-eu2.nanopool.org\",\n \"xmr-jp1.nanopool.org\",\"xmr-us-east1.nanopool.org\",\n - \ \"xmr-us-west1.nanopool.org\",\"xmr.crypto-pool.fr\",\n \"xmr.pool.minergate.com\"\n - \ ]\n\n- list: https_miner_domains\n items: [\n \"ca.minexmr.com\",\n \"cn.stratum.slushpool.com\",\n - \ \"de.minexmr.com\",\n \"fr.minexmr.com\",\n \"mine.moneropool.com\",\n - \ \"mine.xmrpool.net\",\n \"pool.minexmr.com\",\n \"sg.minexmr.com\",\n - \ \"stratum-eth.antpool.com\",\n \"stratum-ltc.antpool.com\",\n \"stratum-zec.antpool.com\",\n - \ \"stratum.antpool.com\",\n \"xmr.crypto-pool.fr\"\n ]\n\n- list: http_miner_domains\n - \ items: [\n \"ca.minexmr.com\",\n \"de.minexmr.com\",\n \"fr.minexmr.com\",\n - \ \"mine.moneropool.com\",\n \"mine.xmrpool.net\",\n \"pool.minexmr.com\",\n - \ \"sg.minexmr.com\",\n \"xmr.crypto-pool.fr\"\n ]\n\n# Add rule based on - crypto mining IOCs\n- macro: minerpool_https\n condition: (fd.sport=\"443\" and - fd.sip.name in (https_miner_domains))\n\n- macro: minerpool_http\n condition: (fd.sport=\"80\" - and fd.sip.name in (http_miner_domains))\n\n- macro: minerpool_other\n condition: - (fd.sport in (miner_ports) and fd.sip.name in (miner_domains))\n\n- macro: net_miner_pool\n - \ condition: (evt.type in (sendto, sendmsg) and evt.dir=< and (fd.net != \"127.0.0.0/8\" - and not fd.snet in (rfc_1918_addresses)) and ((minerpool_http) or (minerpool_https) - or (minerpool_other)))\n\n- macro: trusted_images_query_miner_domain_dns\n condition: - (container.image.repository in (docker.io/falcosecurity/falco, falcosecurity/falco))\n - \ append: false\n\n# The rule is disabled by default.\n# Note: falco will send DNS - request to resolve miner pool domain which may trigger alerts in your environment.\n- - rule: Detect outbound connections to common miner pool ports\n desc: Miners typically - connect to miner pools on common ports.\n condition: net_miner_pool and not trusted_images_query_miner_domain_dns\n - \ enabled: false\n output: Outbound connection to IP/Port flagged by cryptoioc.ch - (command=%proc.cmdline port=%fd.rport ip=%fd.rip container=%container.info image=%container.image.repository)\n - \ priority: CRITICAL\n tags: [network, mitre_execution]\n\n- rule: Detect crypto - miners using the Stratum protocol\n desc: Miners typically specify the mining pool - to connect to with a URI that begins with 'stratum+tcp'\n condition: spawned_process - and proc.cmdline contains \"stratum+tcp\"\n output: Possible miner running (command=%proc.cmdline - container=%container.info image=%container.image.repository)\n priority: CRITICAL\n - \ tags: [process, mitre_execution]\n\n- list: k8s_client_binaries\n items: [docker, - kubectl, crictl]\n\n- list: user_known_k8s_ns_kube_system_images\n items: [\n k8s.gcr.io/fluentd-gcp-scaler,\n - \ k8s.gcr.io/node-problem-detector/node-problem-detector\n ]\n\n- list: user_known_k8s_images\n - \ items: [\n mcr.microsoft.com/aks/hcp/hcp-tunnel-front\n ]\n\n# Whitelist - for known docker client binaries run inside container\n# - k8s.gcr.io/fluentd-gcp-scaler - in GCP/GKE\n- macro: user_known_k8s_client_container\n condition: >\n (k8s.ns.name=\"kube-system\" - and container.image.repository in (user_known_k8s_ns_kube_system_images)) or container.image.repository - in (user_known_k8s_images)\n\n- macro: user_known_k8s_client_container_parens\n - \ condition: (user_known_k8s_client_container)\n\n- rule: The docker client is executed - in a container\n desc: Detect a k8s client tool executed inside a container\n condition: - spawned_process and container and not user_known_k8s_client_container_parens and - proc.name in (k8s_client_binaries)\n output: \"Docker or kubernetes client executed - in container (user=%user.name user_loginuid=%user.loginuid %container.info parent=%proc.pname - cmdline=%proc.cmdline image=%container.image.repository:%container.image.tag)\"\n - \ priority: WARNING\n tags: [container, mitre_execution]\n\n\n# This rule is enabled - by default. \n# If you want to disable it, modify the following macro.\n- macro: - consider_packet_socket_communication\n condition: (always_true)\n\n- list: user_known_packet_socket_binaries\n - \ items: []\n\n- rule: Packet socket created in container\n desc: Detect new packet - socket at the device driver (OSI Layer 2) level in a container. Packet socket could - be used for ARP Spoofing and privilege escalation(CVE-2020-14386) by attacker.\n - \ condition: evt.type=socket and evt.arg[0]=AF_PACKET and consider_packet_socket_communication - and container and not proc.name in (user_known_packet_socket_binaries)\n output: - Packet socket was created in a container (user=%user.name user_loginuid=%user.loginuid - command=%proc.cmdline socket_info=%evt.args container_id=%container.id container_name=%container.name - image=%container.image.repository:%container.image.tag)\n priority: NOTICE\n tags: - [network, mitre_discovery]\n\n# Change to (always_true) to enable rule 'Network - connection outside local subnet'\n- macro: enabled_rule_network_only_subnet\n condition: - (never_true)\n\n# Images that are allowed to have outbound traffic\n- list: images_allow_network_outside_subnet\n - \ items: []\n\n# Namespaces where the rule is enforce\n- list: namespace_scope_network_only_subnet\n - \ items: []\n\n- macro: network_local_subnet\n condition: >\n fd.rnet in (rfc_1918_addresses) - or\n fd.ip = \"0.0.0.0\" or\n fd.net = \"127.0.0.0/8\"\n\n# # How to test:\n# - # Change macro enabled_rule_network_only_subnet to condition: always_true\n# # Add - 'default' to namespace_scope_network_only_subnet\n# # Run:\n# kubectl run --generator=run-pod/v1 - -n default -i --tty busybox --image=busybox --rm -- wget google.com -O /var/google.html\n# - # Check logs running\n\n- rule: Network Connection outside Local Subnet\n desc: - Detect traffic to image outside local subnet.\n condition: >\n enabled_rule_network_only_subnet - and\n inbound_outbound and\n container and\n not network_local_subnet and\n - \ k8s.ns.name in (namespace_scope_network_only_subnet)\n output: >\n Network - connection outside local subnet\n (command=%proc.cmdline connection=%fd.name - user=%user.name user_loginuid=%user.loginuid container_id=%container.id\n image=%container.image.repository - namespace=%k8s.ns.name\n fd.rip.name=%fd.rip.name fd.lip.name=%fd.lip.name fd.cip.name=%fd.cip.name - fd.sip.name=%fd.sip.name)\n priority: WARNING\n tags: [network]\n\n- macro: allowed_port\n - \ condition: (never_true)\n\n- list: allowed_image\n items: [] # add image to monitor, - i.e.: bitnami/nginx\n\n- list: authorized_server_binaries\n items: [] # add binary - to allow, i.e.: nginx\n\n- list: authorized_server_port\n items: [] # add port - to allow, i.e.: 80\n\n# # How to test:\n# kubectl run --image=nginx nginx-app --port=80 - --env=\"DOMAIN=cluster\"\n# kubectl expose deployment nginx-app --port=80 --name=nginx-http - --type=LoadBalancer\n# # On minikube:\n# minikube service nginx-http\n# # On general - K8s:\n# kubectl get services\n# kubectl cluster-info\n# # Visit the Nginx service - and port, should not fire.\n# # Change rule to different port, then different process - name, and test again that it fires.\n\n- rule: Outbound or Inbound Traffic not to - Authorized Server Process and Port\n desc: Detect traffic that is not to authorized - server process and port.\n condition: >\n allowed_port and\n inbound_outbound - and\n container and\n container.image.repository in (allowed_image) and\n - \ not proc.name in (authorized_server_binary) and\n not fd.sport in (authorized_server_port)\n - \ output: >\n Network connection outside authorized port and binary\n (command=%proc.cmdline - connection=%fd.name user=%user.name user_loginuid=%user.loginuid container_id=%container.id\n - \ image=%container.image.repository)\n priority: WARNING\n tags: [network]\n\n- - macro: user_known_stand_streams_redirect_activities\n condition: (never_true)\n\n- - rule: Redirect STDOUT/STDIN to Network Connection in Container\n desc: Detect redirecting - stdout/stdin to network connection in container (potential reverse shell).\n condition: - evt.type=dup and evt.dir=> and container and fd.num in (0, 1, 2) and fd.type in - (\"ipv4\", \"ipv6\") and not user_known_stand_streams_redirect_activities\n output: - >\n Redirect stdout/stdin to network connection (user=%user.name user_loginuid=%user.loginuid - %container.info process=%proc.name parent=%proc.pname cmdline=%proc.cmdline terminal=%proc.tty - container_id=%container.id image=%container.image.repository fd.name=%fd.name fd.num=%fd.num - fd.type=%fd.type fd.sip=%fd.sip)\n priority: WARNING\n\n# The two Container Drift - rules below will fire when a new executable is created in a container.\n# There - are two ways to create executables - file is created with execution permissions - or permissions change of existing file.\n# We will use a new sysdig filter, is_open_exec, - to find all files creations with execution permission, and will trace all chmods - in a container.\n# The use case we are targeting here is an attempt to execute code - that was not shipped as part of a container (drift) -\n# an activity that might - be malicious or non-compliant.\n# Two things to pay attention to:\n# 1) In most - cases, 'docker cp' will not be identified, but the assumption is that if an attacker - gained access to the container runtime daemon, they are already privileged\n# 2) - Drift rules will be noisy in environments in which containers are built (e.g. docker - build)\n# These two rules are not enabled by default. Use `never_true` in macro - condition to enable them.\n\n- macro: user_known_container_drift_activities\n condition: - (always_true)\n\n- rule: Container Drift Detected (chmod)\n desc: New executable - created in a container due to chmod\n condition: >\n chmod and\n consider_all_chmods - and\n container and\n not runc_writing_exec_fifo and\n not runc_writing_var_lib_docker - and\n not user_known_container_drift_activities and\n evt.rawres>=0 and\n - \ ((evt.arg.mode contains \"S_IXUSR\") or\n (evt.arg.mode contains \"S_IXGRP\") - or\n (evt.arg.mode contains \"S_IXOTH\"))\n output: Drift detected (chmod), - new executable created in a container (user=%user.name user_loginuid=%user.loginuid - command=%proc.cmdline filename=%evt.arg.filename name=%evt.arg.name mode=%evt.arg.mode - event=%evt.type)\n priority: ERROR\n\n# ****************************************************************************\n# - * \"Container Drift Detected (open+create)\" requires FALCO_ENGINE_VERSION 6 *\n# - ****************************************************************************\n- - rule: Container Drift Detected (open+create)\n desc: New executable created in - a container due to open+create\n condition: >\n evt.type in (open,openat,creat) - and\n evt.is_open_exec=true and\n container and\n not runc_writing_exec_fifo - and\n not runc_writing_var_lib_docker and\n not user_known_container_drift_activities - and\n evt.rawres>=0\n output: Drift detected (open+create), new executable created - in a container (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline - filename=%evt.arg.filename name=%evt.arg.name mode=%evt.arg.mode event=%evt.type)\n - \ priority: ERROR\n\n- list: c2_server_ip_list\n items: []\n\n- rule: Outbound - Connection to C2 Servers\n desc: Detect outbound connection to command & control - servers\n condition: outbound and fd.sip in (c2_server_ip_list)\n output: Outbound - connection to C2 server (command=%proc.cmdline connection=%fd.name user=%user.name - user_loginuid=%user.loginuid container_id=%container.id image=%container.image.repository)\n - \ priority: WARNING\n tags: [network]\n\n- list: white_listed_modules\n items: - []\n \n- rule: Linux Kernel Module Injection Detected\n desc: Detect kernel - module was injected (from container).\n condition: spawned_process and container - and proc.name=insmod and not proc.args in (white_listed_modules)\n output: Linux - Kernel Module injection using insmod detected (user=%user.name user_loginuid=%user.loginuid - parent_process=%proc.pname module=%proc.args)\n priority: WARNING\n tags: [process]\n\n- - list: run_as_root_image_list\n items: []\n\n- macro: user_known_run_as_root_container\n - \ condition: (container.image.repository in (run_as_root_image_list))\n\n# The rule - is disabled by default and should be enabled when non-root container policy has - been applied.\n# Note the rule will not work as expected when usernamespace is applied, - e.g. userns-remap is enabled.\n- rule: Container Run as Root User\n desc: Detected - container running as root user\n condition: spawned_process and container and proc.vpid=1 - and user.uid=0 and not user_known_run_as_root_container\n enabled: false\n output: - Container launched with root user privilege (uid=%user.uid container_id=%container.id - container_name=%container.name image=%container.image.repository:%container.image.tag)\n - \ priority: INFO\n tags: [container, process]\n\n# Application rules have moved - to application_rules.yaml. Please look\n# there if you want to enable them by adding - to\n# falco_rules.local.yaml.\n" - k8s_audit_rules.yaml: "#\n# Copyright (C) 2019 The Falco Authors.\n#\n#\n# Licensed - under the Apache License, Version 2.0 (the \"License\");\n# you may not use this - file except in compliance with the License.\n# You may obtain a copy of the License - at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by - applicable law or agreed to in writing, software\n# distributed under the License - is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY - KIND, either express or implied.\n# See the License for the specific language governing - permissions and\n# limitations under the License.\n#\n- required_engine_version: - 2\n\n# Like always_true/always_false, but works with k8s audit events\n- macro: - k8s_audit_always_true\n condition: (jevt.rawtime exists)\n\n- macro: k8s_audit_never_true\n - \ condition: (jevt.rawtime=0)\n\n# Generally only consider audit events once the - response has completed\n- list: k8s_audit_stages\n items: [\"ResponseComplete\"]\n\n# - Generally exclude users starting with \"system:\"\n- macro: non_system_user\n condition: - (not ka.user.name startswith \"system:\")\n\n# This macro selects the set of Audit - Events used by the below rules.\n- macro: kevt\n condition: (jevt.value[/stage] - in (k8s_audit_stages))\n\n- macro: kevt_started\n condition: (jevt.value[/stage]=ResponseStarted)\n\n# - If you wish to restrict activity to a specific set of users, override/append to - this list.\n# users created by kops are included\n- list: vertical_pod_autoscaler_users\n - \ items: [\"vpa-recommender\", \"vpa-updater\"]\n\n- list: allowed_k8s_users\n items: - [\n \"minikube\", \"minikube-user\", \"kubelet\", \"kops\", \"admin\", \"kube\", - \"kube-proxy\", \"kube-apiserver-healthcheck\",\n \"kubernetes-admin\",\n vertical_pod_autoscaler_users,\n - \ cluster-autoscaler,\n \"system:addon-manager\",\n \"cloud-controller-manager\"\n - \ ]\n\n- rule: Disallowed K8s User\n desc: Detect any k8s operation by users - outside of an allowed set of users.\n condition: kevt and non_system_user and not - ka.user.name in (allowed_k8s_users)\n output: K8s Operation performed by user not - in allowed list of users (user=%ka.user.name target=%ka.target.name/%ka.target.resource - verb=%ka.verb uri=%ka.uri resp=%ka.response.code)\n priority: WARNING\n source: - k8s_audit\n tags: [k8s]\n\n# In a local/user rules file, you could override this - macro to\n# explicitly enumerate the container images that you want to run in\n# - your environment. In this main falco rules file, there isn't any way\n# to know - all the containers that can run, so any container is\n# allowed, by using the always_true - macro. In the overridden macro, the condition\n# would look something like (ka.req.pod.containers.image.repository - in (my-repo/my-image))\n- macro: allowed_k8s_containers\n condition: (k8s_audit_always_true)\n\n- - macro: response_successful\n condition: (ka.response.code startswith 2)\n\n- macro: - kcreate\n condition: ka.verb=create\n\n- macro: kmodify\n condition: (ka.verb - in (create,update,patch))\n\n- macro: kdelete\n condition: ka.verb=delete\n\n- - macro: pod\n condition: ka.target.resource=pods and not ka.target.subresource exists\n\n- - macro: pod_subresource\n condition: ka.target.resource=pods and ka.target.subresource - exists\n\n- macro: deployment\n condition: ka.target.resource=deployments\n\n- - macro: service\n condition: ka.target.resource=services\n\n- macro: configmap\n - \ condition: ka.target.resource=configmaps\n\n- macro: namespace\n condition: ka.target.resource=namespaces\n\n- - macro: serviceaccount\n condition: ka.target.resource=serviceaccounts\n\n- macro: - clusterrole\n condition: ka.target.resource=clusterroles\n\n- macro: clusterrolebinding\n - \ condition: ka.target.resource=clusterrolebindings\n\n- macro: role\n condition: - ka.target.resource=roles\n\n- macro: secret\n condition: ka.target.resource=secrets\n\n- - macro: health_endpoint\n condition: ka.uri=/healthz\n\n- rule: Create Disallowed - Pod\n desc: >\n Detect an attempt to start a pod with a container image outside - of a list of allowed images.\n condition: kevt and pod and kcreate and not allowed_k8s_containers\n - \ output: Pod started with container not in allowed list (user=%ka.user.name pod=%ka.resp.name - ns=%ka.target.namespace images=%ka.req.pod.containers.image)\n priority: WARNING\n - \ source: k8s_audit\n tags: [k8s]\n\n- rule: Create Privileged Pod\n desc: >\n - \ Detect an attempt to start a pod with a privileged container\n condition: kevt - and pod and kcreate and ka.req.pod.containers.privileged intersects (true) and not - ka.req.pod.containers.image.repository in (falco_privileged_images)\n output: Pod - started with privileged container (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace - images=%ka.req.pod.containers.image)\n priority: WARNING\n source: k8s_audit\n - \ tags: [k8s]\n\n- macro: sensitive_vol_mount\n condition: >\n (ka.req.pod.volumes.hostpath - intersects (/proc, /var/run/docker.sock, /, /etc, /root, /var/run/crio/crio.sock, - /home/admin, /var/lib/kubelet, /var/lib/kubelet/pki, /etc/kubernetes, /etc/kubernetes/manifests))\n\n- - rule: Create Sensitive Mount Pod\n desc: >\n Detect an attempt to start a pod - with a volume from a sensitive host directory (i.e. /proc).\n Exceptions are - made for known trusted images.\n condition: kevt and pod and kcreate and sensitive_vol_mount - and not ka.req.pod.containers.image.repository in (falco_sensitive_mount_images)\n - \ output: Pod started with sensitive mount (user=%ka.user.name pod=%ka.resp.name - ns=%ka.target.namespace images=%ka.req.pod.containers.image volumes=%jevt.value[/requestObject/spec/volumes])\n - \ priority: WARNING\n source: k8s_audit\n tags: [k8s]\n\n# Corresponds to K8s - CIS Benchmark 1.7.4\n- rule: Create HostNetwork Pod\n desc: Detect an attempt to - start a pod using the host network.\n condition: kevt and pod and kcreate and ka.req.pod.host_network - intersects (true) and not ka.req.pod.containers.image.repository in (falco_hostnetwork_images)\n - \ output: Pod started using host network (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace - images=%ka.req.pod.containers.image)\n priority: WARNING\n source: k8s_audit\n - \ tags: [k8s]\n\n- macro: user_known_node_port_service\n condition: (k8s_audit_never_true)\n\n- - rule: Create NodePort Service\n desc: >\n Detect an attempt to start a service - with a NodePort service type\n condition: kevt and service and kcreate and ka.req.service.type=NodePort - and not user_known_node_port_service\n output: NodePort Service Created (user=%ka.user.name - service=%ka.target.name ns=%ka.target.namespace ports=%ka.req.service.ports)\n priority: - WARNING\n source: k8s_audit\n tags: [k8s]\n\n- macro: contains_private_credentials\n - \ condition: >\n (ka.req.configmap.obj contains \"aws_access_key_id\" or\n ka.req.configmap.obj - contains \"aws-access-key-id\" or\n ka.req.configmap.obj contains \"aws_s3_access_key_id\" - or\n ka.req.configmap.obj contains \"aws-s3-access-key-id\" or\n ka.req.configmap.obj - contains \"password\" or\n ka.req.configmap.obj contains \"passphrase\")\n\n- - rule: Create/Modify Configmap With Private Credentials\n desc: >\n Detect creating/modifying - a configmap containing a private credential (aws key, password, etc.)\n condition: - kevt and configmap and kmodify and contains_private_credentials\n output: K8s configmap - with private credential (user=%ka.user.name verb=%ka.verb configmap=%ka.req.configmap.name - config=%ka.req.configmap.obj)\n priority: WARNING\n source: k8s_audit\n tags: - [k8s]\n\n# Corresponds to K8s CIS Benchmark, 1.1.1.\n- rule: Anonymous Request Allowed\n - \ desc: >\n Detect any request made by the anonymous user that was allowed\n - \ condition: kevt and ka.user.name=system:anonymous and ka.auth.decision=\"allow\" - and not health_endpoint\n output: Request by anonymous user allowed (user=%ka.user.name - verb=%ka.verb uri=%ka.uri reason=%ka.auth.reason))\n priority: WARNING\n source: - k8s_audit\n tags: [k8s]\n\n# Roughly corresponds to K8s CIS Benchmark, 1.1.12. - In this case,\n# notifies an attempt to exec/attach to a privileged container.\n\n# - Ideally, we'd add a more stringent rule that detects attaches/execs\n# to a privileged - pod, but that requires the engine for k8s audit\n# events to be stateful, so it - could know if a container named in an\n# attach request was created privileged or - not. For now, we have a\n# less severe rule that detects attaches/execs to any pod.\n\n- - macro: user_known_exec_pod_activities\n condition: (k8s_audit_never_true)\n\n- - rule: Attach/Exec Pod\n desc: >\n Detect any attempt to attach/exec to a pod\n - \ condition: kevt_started and pod_subresource and kcreate and ka.target.subresource - in (exec,attach) and not user_known_exec_pod_activities\n output: Attach/Exec to - pod (user=%ka.user.name pod=%ka.target.name ns=%ka.target.namespace action=%ka.target.subresource - command=%ka.uri.param[command])\n priority: NOTICE\n source: k8s_audit\n tags: - [k8s]\n\n- macro: user_known_pod_debug_activities\n condition: (k8s_audit_never_true)\n\n# - Only works when feature gate EphemeralContainers is enabled\n- rule: EphemeralContainers - Created\n desc: >\n Detect any ephemeral container created\n condition: kevt - and pod_subresource and kmodify and ka.target.subresource in (ephemeralcontainers) - and not user_known_pod_debug_activities\n output: Ephemeral container is created - in pod (user=%ka.user.name pod=%ka.target.name ns=%ka.target.namespace ephemeral_container_name=%jevt.value[/requestObject/ephemeralContainers/0/name] - ephemeral_container_image=%jevt.value[/requestObject/ephemeralContainers/0/image])\n - \ priority: NOTICE\n source: k8s_audit\n tags: [k8s]\n\n# In a local/user rules - fie, you can append to this list to add additional allowed namespaces\n- list: allowed_namespaces\n - \ items: [kube-system, kube-public, default]\n\n- rule: Create Disallowed Namespace\n - \ desc: Detect any attempt to create a namespace outside of a set of known namespaces\n - \ condition: kevt and namespace and kcreate and not ka.target.name in (allowed_namespaces)\n - \ output: Disallowed namespace created (user=%ka.user.name ns=%ka.target.name)\n - \ priority: WARNING\n source: k8s_audit\n tags: [k8s]\n\n# Only defined for backwards - compatibility. Use the more specific\n# user_allowed_kube_namespace_image_list instead.\n- - list: user_trusted_image_list\n items: []\n\n- list: user_allowed_kube_namespace_image_list\n - \ items: [user_trusted_image_list]\n\n# Only defined for backwards compatibility. - Use the more specific\n# allowed_kube_namespace_image_list instead.\n- list: k8s_image_list\n - \ items: []\n\n- list: allowed_kube_namespace_image_list\n items: [\n gcr.io/google-containers/prometheus-to-sd,\n - \ gcr.io/projectcalico-org/node,\n gke.gcr.io/addon-resizer,\n gke.gcr.io/heapster,\n - \ gke.gcr.io/gke-metadata-server,\n k8s.gcr.io/ip-masq-agent-amd64,\n k8s.gcr.io/kube-apiserver,\n - \ gke.gcr.io/kube-proxy,\n gke.gcr.io/netd-amd64,\n k8s.gcr.io/addon-resizer\n - \ k8s.gcr.io/prometheus-to-sd,\n k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64,\n - \ k8s.gcr.io/k8s-dns-kube-dns-amd64,\n k8s.gcr.io/k8s-dns-sidecar-amd64,\n - \ k8s.gcr.io/metrics-server-amd64,\n kope/kube-apiserver-healthcheck,\n k8s_image_list\n - \ ]\n\n- macro: allowed_kube_namespace_pods\n condition: (ka.req.pod.containers.image.repository - in (user_allowed_kube_namespace_image_list) or\n ka.req.pod.containers.image.repository - in (allowed_kube_namespace_image_list))\n\n# Detect any new pod created in the kube-system - namespace\n- rule: Pod Created in Kube Namespace\n desc: Detect any attempt to - create a pod in the kube-system or kube-public namespaces\n condition: kevt and - pod and kcreate and ka.target.namespace in (kube-system, kube-public) and not allowed_kube_namespace_pods\n - \ output: Pod created in kube namespace (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace - images=%ka.req.pod.containers.image)\n priority: WARNING\n source: k8s_audit\n - \ tags: [k8s]\n\n- list: user_known_sa_list\n items: []\n\n- macro: trusted_sa\n - \ condition: (ka.target.name in (user_known_sa_list))\n\n# Detect creating a service - account in the kube-system/kube-public namespace\n- rule: Service Account Created - in Kube Namespace\n desc: Detect any attempt to create a serviceaccount in the - kube-system or kube-public namespaces\n condition: kevt and serviceaccount and - kcreate and ka.target.namespace in (kube-system, kube-public) and response_successful - and not trusted_sa\n output: Service account created in kube namespace (user=%ka.user.name - serviceaccount=%ka.target.name ns=%ka.target.namespace)\n priority: WARNING\n source: - k8s_audit\n tags: [k8s]\n\n# Detect any modify/delete to any ClusterRole starting - with\n# \"system:\". \"system:coredns\" is excluded as changes are expected in\n# - normal operation.\n- rule: System ClusterRole Modified/Deleted\n desc: Detect any - attempt to modify/delete a ClusterRole/Role starting with system\n condition: kevt - and (role or clusterrole) and (kmodify or kdelete) and (ka.target.name startswith - \"system:\") and\n not ka.target.name in (system:coredns, system:managed-certificate-controller)\n - \ output: System ClusterRole/Role modified or deleted (user=%ka.user.name role=%ka.target.name - ns=%ka.target.namespace action=%ka.verb)\n priority: WARNING\n source: k8s_audit\n - \ tags: [k8s]\n\n# Detect any attempt to create a ClusterRoleBinding to the cluster-admin - user\n# (exapand this to any built-in cluster role that does \"sensitive\" things)\n- - rule: Attach to cluster-admin Role\n desc: Detect any attempt to create a ClusterRoleBinding - to the cluster-admin user\n condition: kevt and clusterrolebinding and kcreate - and ka.req.binding.role=cluster-admin\n output: Cluster Role Binding to cluster-admin - role (user=%ka.user.name subject=%ka.req.binding.subjects)\n priority: WARNING\n - \ source: k8s_audit\n tags: [k8s]\n\n- rule: ClusterRole With Wildcard Created\n - \ desc: Detect any attempt to create a Role/ClusterRole with wildcard resources - or verbs\n condition: kevt and (role or clusterrole) and kcreate and (ka.req.role.rules.resources - intersects (\"*\") or ka.req.role.rules.verbs intersects (\"*\"))\n output: Created - Role/ClusterRole with wildcard (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules)\n - \ priority: WARNING\n source: k8s_audit\n tags: [k8s]\n\n- macro: writable_verbs\n - \ condition: >\n (ka.req.role.rules.verbs intersects (create, update, patch, - delete, deletecollection))\n\n- rule: ClusterRole With Write Privileges Created\n - \ desc: Detect any attempt to create a Role/ClusterRole that can perform write-related - actions\n condition: kevt and (role or clusterrole) and kcreate and writable_verbs\n - \ output: Created Role/ClusterRole with write privileges (user=%ka.user.name role=%ka.target.name - rules=%ka.req.role.rules)\n priority: NOTICE\n source: k8s_audit\n tags: [k8s]\n\n- - rule: ClusterRole With Pod Exec Created\n desc: Detect any attempt to create a - Role/ClusterRole that can exec to pods\n condition: kevt and (role or clusterrole) - and kcreate and ka.req.role.rules.resources intersects (\"pods/exec\")\n output: - Created Role/ClusterRole with pod exec privileges (user=%ka.user.name role=%ka.target.name - rules=%ka.req.role.rules)\n priority: WARNING\n source: k8s_audit\n tags: [k8s]\n\n# - The rules below this point are less discriminatory and generally\n# represent a - stream of activity for a cluster. If you wish to disable\n# these events, modify - the following macro.\n- macro: consider_activity_events\n condition: (k8s_audit_always_true)\n\n- - macro: kactivity\n condition: (kevt and consider_activity_events)\n\n- rule: K8s - Deployment Created\n desc: Detect any attempt to create a deployment\n condition: - (kactivity and kcreate and deployment and response_successful)\n output: K8s Deployment - Created (user=%ka.user.name deployment=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code - decision=%ka.auth.decision reason=%ka.auth.reason)\n priority: INFO\n source: - k8s_audit\n tags: [k8s]\n\n- rule: K8s Deployment Deleted\n desc: Detect any attempt - to delete a deployment\n condition: (kactivity and kdelete and deployment and response_successful)\n - \ output: K8s Deployment Deleted (user=%ka.user.name deployment=%ka.target.name - ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)\n - \ priority: INFO\n source: k8s_audit\n tags: [k8s]\n\n- rule: K8s Service Created\n - \ desc: Detect any attempt to create a service\n condition: (kactivity and kcreate - and service and response_successful)\n output: K8s Service Created (user=%ka.user.name - service=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision - reason=%ka.auth.reason)\n priority: INFO\n source: k8s_audit\n tags: [k8s]\n\n- - rule: K8s Service Deleted\n desc: Detect any attempt to delete a service\n condition: - (kactivity and kdelete and service and response_successful)\n output: K8s Service - Deleted (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code - decision=%ka.auth.decision reason=%ka.auth.reason)\n priority: INFO\n source: - k8s_audit\n tags: [k8s]\n\n- rule: K8s ConfigMap Created\n desc: Detect any attempt - to create a configmap\n condition: (kactivity and kcreate and configmap and response_successful)\n - \ output: K8s ConfigMap Created (user=%ka.user.name configmap=%ka.target.name ns=%ka.target.namespace - resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)\n priority: - INFO\n source: k8s_audit\n tags: [k8s]\n\n- rule: K8s ConfigMap Deleted\n desc: - Detect any attempt to delete a configmap\n condition: (kactivity and kdelete and - configmap and response_successful)\n output: K8s ConfigMap Deleted (user=%ka.user.name - configmap=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision - reason=%ka.auth.reason)\n priority: INFO\n source: k8s_audit\n tags: [k8s]\n\n- - rule: K8s Namespace Created\n desc: Detect any attempt to create a namespace\n - \ condition: (kactivity and kcreate and namespace and response_successful)\n output: - K8s Namespace Created (user=%ka.user.name namespace=%ka.target.name resp=%ka.response.code - decision=%ka.auth.decision reason=%ka.auth.reason)\n priority: INFO\n source: - k8s_audit\n tags: [k8s]\n\n- rule: K8s Namespace Deleted\n desc: Detect any attempt - to delete a namespace\n condition: (kactivity and non_system_user and kdelete and - namespace and response_successful)\n output: K8s Namespace Deleted (user=%ka.user.name - namespace=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)\n - \ priority: INFO\n source: k8s_audit\n tags: [k8s]\n\n- rule: K8s Serviceaccount - Created\n desc: Detect any attempt to create a service account\n condition: (kactivity - and kcreate and serviceaccount and response_successful)\n output: K8s Serviceaccount - Created (user=%ka.user.name user=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code - decision=%ka.auth.decision reason=%ka.auth.reason)\n priority: INFO\n source: - k8s_audit\n tags: [k8s]\n\n- rule: K8s Serviceaccount Deleted\n desc: Detect any - attempt to delete a service account\n condition: (kactivity and kdelete and serviceaccount - and response_successful)\n output: K8s Serviceaccount Deleted (user=%ka.user.name - user=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision - reason=%ka.auth.reason)\n priority: INFO\n source: k8s_audit\n tags: [k8s]\n\n- - rule: K8s Role/Clusterrole Created\n desc: Detect any attempt to create a cluster - role/role\n condition: (kactivity and kcreate and (clusterrole or role) and response_successful)\n - \ output: K8s Cluster Role Created (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules - resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)\n priority: - INFO\n source: k8s_audit\n tags: [k8s]\n\n- rule: K8s Role/Clusterrole Deleted\n - \ desc: Detect any attempt to delete a cluster role/role\n condition: (kactivity - and kdelete and (clusterrole or role) and response_successful)\n output: K8s Cluster - Role Deleted (user=%ka.user.name role=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision - reason=%ka.auth.reason)\n priority: INFO\n source: k8s_audit\n tags: [k8s]\n\n- - rule: K8s Role/Clusterrolebinding Created\n desc: Detect any attempt to create - a clusterrolebinding\n condition: (kactivity and kcreate and clusterrolebinding - and response_successful)\n output: K8s Cluster Role Binding Created (user=%ka.user.name - binding=%ka.target.name subjects=%ka.req.binding.subjects role=%ka.req.binding.role - resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)\n priority: - INFO\n source: k8s_audit\n tags: [k8s]\n\n- rule: K8s Role/Clusterrolebinding - Deleted\n desc: Detect any attempt to delete a clusterrolebinding\n condition: - (kactivity and kdelete and clusterrolebinding and response_successful)\n output: - K8s Cluster Role Binding Deleted (user=%ka.user.name binding=%ka.target.name resp=%ka.response.code - decision=%ka.auth.decision reason=%ka.auth.reason)\n priority: INFO\n source: - k8s_audit\n tags: [k8s]\n\n- rule: K8s Secret Created\n desc: Detect any attempt - to create a secret. Service account tokens are excluded.\n condition: (kactivity - and kcreate and secret and ka.target.namespace!=kube-system and non_system_user - and response_successful)\n output: K8s Secret Created (user=%ka.user.name secret=%ka.target.name - ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)\n - \ priority: INFO\n source: k8s_audit\n tags: [k8s]\n\n- rule: K8s Secret Deleted\n - \ desc: Detect any attempt to delete a secret Service account tokens are excluded.\n - \ condition: (kactivity and kdelete and secret and ka.target.namespace!=kube-system - and non_system_user and response_successful)\n output: K8s Secret Deleted (user=%ka.user.name - secret=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision - reason=%ka.auth.reason)\n priority: INFO\n source: k8s_audit\n tags: [k8s]\n\n# - This rule generally matches all events, and as a result is disabled\n# by default. - If you wish to enable these events, modify the\n# following macro.\n# condition: - (jevt.rawtime exists)\n- macro: consider_all_events\n condition: (k8s_audit_never_true)\n\n- - macro: kall\n condition: (kevt and consider_all_events)\n\n- rule: All K8s Audit - Events\n desc: Match all K8s Audit Events\n condition: kall\n output: K8s Audit - Event received (user=%ka.user.name verb=%ka.verb uri=%ka.uri obj=%jevt.obj)\n priority: - DEBUG\n source: k8s_audit\n tags: [k8s]\n\n\n# This macro disables following rule, - change to k8s_audit_never_true to enable it\n- macro: allowed_full_admin_users\n - \ condition: (k8s_audit_always_true)\n\n# This list includes some of the default - user names for an administrator in several K8s installations\n- list: full_admin_k8s_users\n - \ items: [\"admin\", \"kubernetes-admin\", \"kubernetes-admin@kubernetes\", \"kubernetes-admin@cluster.local\", - \"minikube-user\"]\n\n# This rules detect an operation triggered by an user name - that is \n# included in the list of those that are default administrators upon \n# - cluster creation. This may signify a permission setting too broader. \n# As we can't - check for role of the user on a general ka.* event, this \n# may or may not be an - administrator. Customize the full_admin_k8s_users \n# list to your needs, and activate - at your discrection.\n\n# # How to test:\n# # Execute any kubectl command connected - using default cluster user, as:\n# kubectl create namespace rule-test\n\n- rule: - Full K8s Administrative Access\n desc: Detect any k8s operation by a user name - that may be an administrator with full access.\n condition: >\n kevt \n and - non_system_user \n and ka.user.name in (full_admin_k8s_users)\n and not allowed_full_admin_users\n - \ output: K8s Operation performed by full admin user (user=%ka.user.name target=%ka.target.name/%ka.target.resource - verb=%ka.verb uri=%ka.uri resp=%ka.response.code)\n priority: WARNING\n source: - k8s_audit\n tags: [k8s]\n\n- macro: ingress\n condition: ka.target.resource=ingresses\n\n- - macro: ingress_tls\n condition: (jevt.value[/requestObject/spec/tls] exists)\n\n# - # How to test:\n# # Create an ingress.yaml file with content:\n# apiVersion: networking.k8s.io/v1beta1\n# - kind: Ingress\n# metadata:\n# name: test-ingress\n# annotations:\n# nginx.ingress.kubernetes.io/rewrite-target: - /\n# spec:\n# rules:\n# - http:\n# paths:\n# - path: /testpath\n# - \ backend:\n# serviceName: test\n# servicePort: 80\n# - # Execute: kubectl apply -f ingress.yaml\n\n- rule: Ingress Object without TLS Certificate - Created\n desc: Detect any attempt to create an ingress without TLS certification.\n - \ condition: >\n (kactivity and kcreate and ingress and response_successful and - not ingress_tls)\n output: >\n K8s Ingress Without TLS Cert Created (user=%ka.user.name - ingress=%ka.target.name\n namespace=%ka.target.namespace)\n source: k8s_audit - \ \n priority: WARNING\n tags: [k8s, network]\n\n- macro: node\n condition: - ka.target.resource=nodes\n\n- macro: allow_all_k8s_nodes\n condition: (k8s_audit_always_true)\n\n- - list: allowed_k8s_nodes\n items: []\n\n# # How to test:\n# # Create a Falco monitored - cluster with Kops\n# # Increase the number of minimum nodes with:\n# kops edit ig - nodes\n# kops apply --yes\n\n- rule: Untrusted Node Successfully Joined the Cluster\n - \ desc: >\n Detect a node successfully joined the cluster outside of the list - of allowed nodes.\n condition: >\n kevt and node \n and kcreate \n and - response_successful \n and not allow_all_k8s_nodes \n and not ka.target.name - in (allowed_k8s_nodes)\n output: Node not in allowed list successfully joined the - cluster (user=%ka.user.name node=%ka.target.name)\n priority: ERROR\n source: - k8s_audit\n tags: [k8s]\n\n- rule: Untrusted Node Unsuccessfully Tried to Join - the Cluster\n desc: >\n Detect an unsuccessful attempt to join the cluster for - a node not in the list of allowed nodes.\n condition: >\n kevt and node \n and - kcreate \n and not response_successful \n and not allow_all_k8s_nodes \n and - not ka.target.name in (allowed_k8s_nodes)\n output: Node not in allowed list tried - unsuccessfully to join the cluster (user=%ka.user.name node=%ka.target.name reason=%ka.response.reason)\n - \ priority: WARNING\n source: k8s_audit\n tags: [k8s]\n\n" + falco_rules.yaml: |+ + # + # Copyright (C) 2020 The Falco Authors. + # + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + # + + # The latest Falco Engine version is 9. + # Starting with version 8, the Falco engine supports exceptions. + # However the Falco rules file does not use them by default. + - required_engine_version: 9 + + # Currently disabled as read/write are ignored syscalls. The nearly + # similar open_write/open_read check for files being opened for + # reading/writing. + # - macro: write + # condition: (syscall.type=write and fd.type in (file, directory)) + # - macro: read + # condition: (syscall.type=read and evt.dir=> and fd.type in (file, directory)) + + - macro: open_write + condition: (evt.type=open or evt.type=openat) and evt.is_open_write=true and fd.typechar='f' and fd.num>=0 + + - macro: open_read + condition: (evt.type=open or evt.type=openat) and evt.is_open_read=true and fd.typechar='f' and fd.num>=0 + + - macro: open_directory + condition: (evt.type=open or evt.type=openat) and evt.is_open_read=true and fd.typechar='d' and fd.num>=0 + + - macro: never_true + condition: (evt.num=0) + + - macro: always_true + condition: (evt.num>=0) + + # In some cases, such as dropped system call events, information about + # the process name may be missing. For some rules that really depend + # on the identity of the process performing an action such as opening + # a file, etc., we require that the process name be known. + - macro: proc_name_exists + condition: (proc.name!="") + + - macro: rename + condition: evt.type in (rename, renameat, renameat2) + + - macro: mkdir + condition: evt.type in (mkdir, mkdirat) + + - macro: remove + condition: evt.type in (rmdir, unlink, unlinkat) + + - macro: modify + condition: rename or remove + + - macro: spawned_process + condition: evt.type = execve and evt.dir=< + + - macro: create_symlink + condition: evt.type in (symlink, symlinkat) and evt.dir=< + + - macro: chmod + condition: (evt.type in (chmod, fchmod, fchmodat) and evt.dir=<) + + # File categories + - macro: bin_dir + condition: fd.directory in (/bin, /sbin, /usr/bin, /usr/sbin) + + - macro: bin_dir_mkdir + condition: > + (evt.arg.path startswith /bin/ or + evt.arg.path startswith /sbin/ or + evt.arg.path startswith /usr/bin/ or + evt.arg.path startswith /usr/sbin/) + + - macro: bin_dir_rename + condition: > + (evt.arg.path startswith /bin/ or + evt.arg.path startswith /sbin/ or + evt.arg.path startswith /usr/bin/ or + evt.arg.path startswith /usr/sbin/ or + evt.arg.name startswith /bin/ or + evt.arg.name startswith /sbin/ or + evt.arg.name startswith /usr/bin/ or + evt.arg.name startswith /usr/sbin/ or + evt.arg.oldpath startswith /bin/ or + evt.arg.oldpath startswith /sbin/ or + evt.arg.oldpath startswith /usr/bin/ or + evt.arg.oldpath startswith /usr/sbin/ or + evt.arg.newpath startswith /bin/ or + evt.arg.newpath startswith /sbin/ or + evt.arg.newpath startswith /usr/bin/ or + evt.arg.newpath startswith /usr/sbin/) + + - macro: etc_dir + condition: fd.name startswith /etc/ + + # This detects writes immediately below / or any write anywhere below /root + - macro: root_dir + condition: (fd.directory=/ or fd.name startswith /root/) + + - list: shell_binaries + items: [ash, bash, csh, ksh, sh, tcsh, zsh, dash] + + - list: ssh_binaries + items: [ + sshd, sftp-server, ssh-agent, + ssh, scp, sftp, + ssh-keygen, ssh-keysign, ssh-keyscan, ssh-add + ] + + - list: shell_mgmt_binaries + items: [add-shell, remove-shell] + + - macro: shell_procs + condition: proc.name in (shell_binaries) + + - list: coreutils_binaries + items: [ + truncate, sha1sum, numfmt, fmt, fold, uniq, cut, who, + groups, csplit, sort, expand, printf, printenv, unlink, tee, chcon, stat, + basename, split, nice, "yes", whoami, sha224sum, hostid, users, stdbuf, + base64, unexpand, cksum, od, paste, nproc, pathchk, sha256sum, wc, test, + comm, arch, du, factor, sha512sum, md5sum, tr, runcon, env, dirname, + tsort, join, shuf, install, logname, pinky, nohup, expr, pr, tty, timeout, + tail, "[", seq, sha384sum, nl, head, id, mkfifo, sum, dircolors, ptx, shred, + tac, link, chroot, vdir, chown, touch, ls, dd, uname, "true", pwd, date, + chgrp, chmod, mktemp, cat, mknod, sync, ln, "false", rm, mv, cp, echo, + readlink, sleep, stty, mkdir, df, dir, rmdir, touch + ] + + # dpkg -L login | grep bin | xargs ls -ld | grep -v '^d' | awk '{print $9}' | xargs -L 1 basename | tr "\\n" "," + - list: login_binaries + items: [ + login, systemd, '"(systemd)"', systemd-logind, su, + nologin, faillog, lastlog, newgrp, sg + ] + + # dpkg -L passwd | grep bin | xargs ls -ld | grep -v '^d' | awk '{print $9}' | xargs -L 1 basename | tr "\\n" "," + - list: passwd_binaries + items: [ + shadowconfig, grpck, pwunconv, grpconv, pwck, + groupmod, vipw, pwconv, useradd, newusers, cppw, chpasswd, usermod, + groupadd, groupdel, grpunconv, chgpasswd, userdel, chage, chsh, + gpasswd, chfn, expiry, passwd, vigr, cpgr, adduser, addgroup, deluser, delgroup + ] + + # repoquery -l shadow-utils | grep bin | xargs ls -ld | grep -v '^d' | + # awk '{print $9}' | xargs -L 1 basename | tr "\\n" "," + - list: shadowutils_binaries + items: [ + chage, gpasswd, lastlog, newgrp, sg, adduser, deluser, chpasswd, + groupadd, groupdel, addgroup, delgroup, groupmems, groupmod, grpck, grpconv, grpunconv, + newusers, pwck, pwconv, pwunconv, useradd, userdel, usermod, vigr, vipw, unix_chkpwd + ] + + - list: sysdigcloud_binaries + items: [setup-backend, dragent, sdchecks] + + - list: docker_binaries + items: [docker, dockerd, exe, docker-compose, docker-entrypoi, docker-runc-cur, docker-current, dockerd-current] + + - list: k8s_binaries + items: [hyperkube, skydns, kube2sky, exechealthz, weave-net, loopback, bridge, openshift-sdn, openshift] + + - list: lxd_binaries + items: [lxd, lxcfs] + + - list: http_server_binaries + items: [nginx, httpd, httpd-foregroun, lighttpd, apache, apache2] + + - list: db_server_binaries + items: [mysqld, postgres, sqlplus] + + - list: postgres_mgmt_binaries + items: [pg_dumpall, pg_ctl, pg_lsclusters, pg_ctlcluster] + + - list: nosql_server_binaries + items: [couchdb, memcached, redis-server, rabbitmq-server, mongod] + + - list: gitlab_binaries + items: [gitlab-shell, gitlab-mon, gitlab-runner-b, git] + + - list: interpreted_binaries + items: [lua, node, perl, perl5, perl6, php, python, python2, python3, ruby, tcl] + + - macro: interpreted_procs + condition: > + (proc.name in (interpreted_binaries)) + + - macro: server_procs + condition: proc.name in (http_server_binaries, db_server_binaries, docker_binaries, sshd) + + # The explicit quotes are needed to avoid the - characters being + # interpreted by the filter expression. + - list: rpm_binaries + items: [dnf, rpm, rpmkey, yum, '"75-system-updat"', rhsmcertd-worke, rhsmcertd, subscription-ma, + repoquery, rpmkeys, rpmq, yum-cron, yum-config-mana, yum-debug-dump, + abrt-action-sav, rpmdb_stat, microdnf, rhn_check, yumdb] + + - list: openscap_rpm_binaries + items: [probe_rpminfo, probe_rpmverify, probe_rpmverifyfile, probe_rpmverifypackage] + + - macro: rpm_procs + condition: (proc.name in (rpm_binaries, openscap_rpm_binaries) or proc.name in (salt-minion)) + + - list: deb_binaries + items: [dpkg, dpkg-preconfigu, dpkg-reconfigur, dpkg-divert, apt, apt-get, aptitude, + frontend, preinst, add-apt-reposit, apt-auto-remova, apt-key, + apt-listchanges, unattended-upgr, apt-add-reposit, apt-config, apt-cache, apt.systemd.dai + ] + + # The truncated dpkg-preconfigu is intentional, process names are + # truncated at the sysdig level. + - list: package_mgmt_binaries + items: [rpm_binaries, deb_binaries, update-alternat, gem, pip, pip3, sane-utils.post, alternatives, chef-client, apk, snapd] + + - macro: package_mgmt_procs + condition: proc.name in (package_mgmt_binaries) + + - macro: package_mgmt_ancestor_procs + condition: proc.pname in (package_mgmt_binaries) or + proc.aname[2] in (package_mgmt_binaries) or + proc.aname[3] in (package_mgmt_binaries) or + proc.aname[4] in (package_mgmt_binaries) + + - macro: coreos_write_ssh_dir + condition: (proc.name=update-ssh-keys and fd.name startswith /home/core/.ssh) + + - macro: run_by_package_mgmt_binaries + condition: proc.aname in (package_mgmt_binaries, needrestart) + + - list: ssl_mgmt_binaries + items: [ca-certificates] + + - list: dhcp_binaries + items: [dhclient, dhclient-script, 11-dhclient] + + # A canonical set of processes that run other programs with different + # privileges or as a different user. + - list: userexec_binaries + items: [sudo, su, suexec, critical-stack, dzdo] + + - list: known_setuid_binaries + items: [ + sshd, dbus-daemon-lau, ping, ping6, critical-stack-, pmmcli, + filemng, PassengerAgent, bwrap, osdetect, nginxmng, sw-engine-fpm, + start-stop-daem + ] + + - list: user_mgmt_binaries + items: [login_binaries, passwd_binaries, shadowutils_binaries] + + - list: dev_creation_binaries + items: [blkid, rename_device, update_engine, sgdisk] + + - list: hids_binaries + items: [aide, aide.wrapper, update-aide.con, logcheck, syslog-summary, osqueryd, ossec-syscheckd] + + - list: vpn_binaries + items: [openvpn] + + - list: nomachine_binaries + items: [nxexec, nxnode.bin, nxserver.bin, nxclient.bin] + + - macro: system_procs + condition: proc.name in (coreutils_binaries, user_mgmt_binaries) + + - list: mail_binaries + items: [ + sendmail, sendmail-msp, postfix, procmail, exim4, + pickup, showq, mailq, dovecot, imap-login, imap, + mailmng-core, pop3-login, dovecot-lda, pop3 + ] + + - list: mail_config_binaries + items: [ + update_conf, parse_mc, makemap_hash, newaliases, update_mk, update_tlsm4, + update_db, update_mc, ssmtp.postinst, mailq, postalias, postfix.config., + postfix.config, postfix-script, postconf + ] + + - list: sensitive_file_names + items: [/etc/shadow, /etc/sudoers, /etc/pam.conf, /etc/security/pwquality.conf] + + - list: sensitive_directory_names + items: [/, /etc, /etc/, /root, /root/] + + - macro: sensitive_files + condition: > + fd.name startswith /etc and + (fd.name in (sensitive_file_names) + or fd.directory in (/etc/sudoers.d, /etc/pam.d)) + + # Indicates that the process is new. Currently detected using time + # since process was started, using a threshold of 5 seconds. + - macro: proc_is_new + condition: proc.duration <= 5000000000 + + # Network + - macro: inbound + condition: > + (((evt.type in (accept,listen) and evt.dir=<) or + (evt.type in (recvfrom,recvmsg) and evt.dir=< and + fd.l4proto != tcp and fd.connected=false and fd.name_changed=true)) and + (fd.typechar = 4 or fd.typechar = 6) and + (fd.ip != "0.0.0.0" and fd.net != "127.0.0.0/8") and + (evt.rawres >= 0 or evt.res = EINPROGRESS)) + + # RFC1918 addresses were assigned for private network usage + - list: rfc_1918_addresses + items: ['"10.0.0.0/8"', '"172.16.0.0/12"', '"192.168.0.0/16"'] + + - macro: outbound + condition: > + (((evt.type = connect and evt.dir=<) or + (evt.type in (sendto,sendmsg) and evt.dir=< and + fd.l4proto != tcp and fd.connected=false and fd.name_changed=true)) and + (fd.typechar = 4 or fd.typechar = 6) and + (fd.ip != "0.0.0.0" and fd.net != "127.0.0.0/8" and not fd.snet in (rfc_1918_addresses)) and + (evt.rawres >= 0 or evt.res = EINPROGRESS)) + + # Very similar to inbound/outbound, but combines the tests together + # for efficiency. + - macro: inbound_outbound + condition: > + ((((evt.type in (accept,listen,connect) and evt.dir=<)) or + (fd.typechar = 4 or fd.typechar = 6)) and + (fd.ip != "0.0.0.0" and fd.net != "127.0.0.0/8") and + (evt.rawres >= 0 or evt.res = EINPROGRESS)) + + - macro: ssh_port + condition: fd.sport=22 + + # In a local/user rules file, you could override this macro to + # enumerate the servers for which ssh connections are allowed. For + # example, you might have a ssh gateway host for which ssh connections + # are allowed. + # + # In the main falco rules file, there isn't any way to know the + # specific hosts for which ssh access is allowed, so this macro just + # repeats ssh_port, which effectively allows ssh from all hosts. In + # the overridden macro, the condition would look something like + # "fd.sip="a.b.c.d" or fd.sip="e.f.g.h" or ..." + - macro: allowed_ssh_hosts + condition: ssh_port + + - rule: Disallowed SSH Connection + desc: Detect any new ssh connection to a host other than those in an allowed group of hosts + condition: (inbound_outbound) and ssh_port and not allowed_ssh_hosts + output: Disallowed SSH Connection (command=%proc.cmdline connection=%fd.name user=%user.name user_loginuid=%user.loginuid container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [network, mitre_remote_service] + + # These rules and supporting macros are more of an example for how to + # use the fd.*ip and fd.*ip.name fields to match connection + # information against ips, netmasks, and complete domain names. + # + # To use this rule, you should modify consider_all_outbound_conns and + # populate allowed_{source,destination}_{ipaddrs,networks,domains} with the + # values that make sense for your environment. + - macro: consider_all_outbound_conns + condition: (never_true) + + # Note that this can be either individual IPs or netmasks + - list: allowed_outbound_destination_ipaddrs + items: ['"127.0.0.1"', '"8.8.8.8"'] + + - list: allowed_outbound_destination_networks + items: ['"127.0.0.1/8"'] + + - list: allowed_outbound_destination_domains + items: [google.com, www.yahoo.com] + + - rule: Unexpected outbound connection destination + desc: Detect any outbound connection to a destination outside of an allowed set of ips, networks, or domain names + condition: > + consider_all_outbound_conns and outbound and not + ((fd.sip in (allowed_outbound_destination_ipaddrs)) or + (fd.snet in (allowed_outbound_destination_networks)) or + (fd.sip.name in (allowed_outbound_destination_domains))) + output: Disallowed outbound connection destination (command=%proc.cmdline connection=%fd.name user=%user.name user_loginuid=%user.loginuid container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [network] + + - macro: consider_all_inbound_conns + condition: (never_true) + + - list: allowed_inbound_source_ipaddrs + items: ['"127.0.0.1"'] + + - list: allowed_inbound_source_networks + items: ['"127.0.0.1/8"', '"10.0.0.0/8"'] + + - list: allowed_inbound_source_domains + items: [google.com] + + - rule: Unexpected inbound connection source + desc: Detect any inbound connection from a source outside of an allowed set of ips, networks, or domain names + condition: > + consider_all_inbound_conns and inbound and not + ((fd.cip in (allowed_inbound_source_ipaddrs)) or + (fd.cnet in (allowed_inbound_source_networks)) or + (fd.cip.name in (allowed_inbound_source_domains))) + output: Disallowed inbound connection source (command=%proc.cmdline connection=%fd.name user=%user.name user_loginuid=%user.loginuid container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [network] + + - list: bash_config_filenames + items: [.bashrc, .bash_profile, .bash_history, .bash_login, .bash_logout, .inputrc, .profile] + + - list: bash_config_files + items: [/etc/profile, /etc/bashrc] + + # Covers both csh and tcsh + - list: csh_config_filenames + items: [.cshrc, .login, .logout, .history, .tcshrc, .cshdirs] + + - list: csh_config_files + items: [/etc/csh.cshrc, /etc/csh.login] + + - list: zsh_config_filenames + items: [.zshenv, .zprofile, .zshrc, .zlogin, .zlogout] + + - list: shell_config_filenames + items: [bash_config_filenames, csh_config_filenames, zsh_config_filenames] + + - list: shell_config_files + items: [bash_config_files, csh_config_files] + + - list: shell_config_directories + items: [/etc/zsh] + + - rule: Modify Shell Configuration File + desc: Detect attempt to modify shell configuration files + condition: > + open_write and + (fd.filename in (shell_config_filenames) or + fd.name in (shell_config_files) or + fd.directory in (shell_config_directories)) + and not proc.name in (shell_binaries) + and not exe_running_docker_save + output: > + a shell configuration file has been modified (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline pcmdline=%proc.pcmdline file=%fd.name container_id=%container.id image=%container.image.repository) + priority: + WARNING + tags: [file, mitre_persistence] + + # This rule is not enabled by default, as there are many legitimate + # readers of shell config files. If you want to enable it, modify the + # following macro. + + - macro: consider_shell_config_reads + condition: (never_true) + + - rule: Read Shell Configuration File + desc: Detect attempts to read shell configuration files by non-shell programs + condition: > + open_read and + consider_shell_config_reads and + (fd.filename in (shell_config_filenames) or + fd.name in (shell_config_files) or + fd.directory in (shell_config_directories)) and + (not proc.name in (shell_binaries)) + output: > + a shell configuration file was read by a non-shell program (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline file=%fd.name container_id=%container.id image=%container.image.repository) + priority: + WARNING + tags: [file, mitre_discovery] + + - macro: consider_all_cron_jobs + condition: (never_true) + + - macro: user_known_cron_jobs + condition: (never_true) + + - rule: Schedule Cron Jobs + desc: Detect cron jobs scheduled + condition: > + ((open_write and fd.name startswith /etc/cron) or + (spawned_process and proc.name = "crontab")) and + consider_all_cron_jobs and + not user_known_cron_jobs + output: > + Cron jobs were scheduled to run (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline + file=%fd.name container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) + priority: + NOTICE + tags: [file, mitre_persistence] + + # Use this to test whether the event occurred within a container. + + # When displaying container information in the output field, use + # %container.info, without any leading term (file=%fd.name + # %container.info user=%user.name user_loginuid=%user.loginuid, and not file=%fd.name + # container=%container.info user=%user.name user_loginuid=%user.loginuid). The output will change + # based on the context and whether or not -pk/-pm/-pc was specified on + # the command line. + - macro: container + condition: (container.id != host) + + - macro: container_started + condition: > + ((evt.type = container or + (spawned_process and proc.vpid=1)) and + container.image.repository != incomplete) + + - macro: interactive + condition: > + ((proc.aname=sshd and proc.name != sshd) or + proc.name=systemd-logind or proc.name=login) + + - list: cron_binaries + items: [anacron, cron, crond, crontab] + + # https://github.com/liske/needrestart + - list: needrestart_binaries + items: [needrestart, 10-dpkg, 20-rpm, 30-pacman] + + # Possible scripts run by sshkit + - list: sshkit_script_binaries + items: [10_etc_sudoers., 10_passwd_group] + + - list: plesk_binaries + items: [sw-engine, sw-engine-fpm, sw-engine-kv, filemng, f2bmng] + + # System users that should never log into a system. Consider adding your own + # service users (e.g. 'apache' or 'mysqld') here. + - macro: system_users + condition: user.name in (bin, daemon, games, lp, mail, nobody, sshd, sync, uucp, www-data) + + - macro: httpd_writing_ssl_conf + condition: > + (proc.pname=run-httpd and + (proc.cmdline startswith "sed -ri" or proc.cmdline startswith "sed -i") and + (fd.name startswith /etc/httpd/conf.d/ or fd.name startswith /etc/httpd/conf)) + + - macro: userhelper_writing_etc_security + condition: (proc.name=userhelper and fd.name startswith /etc/security) + + - macro: ansible_running_python + condition: (proc.name in (python, pypy, python3) and proc.cmdline contains ansible) + + - macro: python_running_chef + condition: (proc.name=python and (proc.cmdline contains yum-dump.py or proc.cmdline="python /usr/bin/chef-monitor.py")) + + - macro: python_running_denyhosts + condition: > + (proc.name=python and + (proc.cmdline contains /usr/sbin/denyhosts or + proc.cmdline contains /usr/local/bin/denyhosts.py)) + + # Qualys seems to run a variety of shell subprocesses, at various + # levels. This checks at a few levels without the cost of a full + # proc.aname, which traverses the full parent heirarchy. + - macro: run_by_qualys + condition: > + (proc.pname=qualys-cloud-ag or + proc.aname[2]=qualys-cloud-ag or + proc.aname[3]=qualys-cloud-ag or + proc.aname[4]=qualys-cloud-ag) + + - macro: run_by_sumologic_securefiles + condition: > + ((proc.cmdline="usermod -a -G sumologic_collector" or + proc.cmdline="groupadd sumologic_collector") and + (proc.pname=secureFiles.sh and proc.aname[2]=java)) + + - macro: run_by_yum + condition: ((proc.pname=sh and proc.aname[2]=yum) or + (proc.aname[2]=sh and proc.aname[3]=yum)) + + - macro: run_by_ms_oms + condition: > + (proc.aname[3] startswith omsagent- or + proc.aname[3] startswith scx-) + + - macro: run_by_google_accounts_daemon + condition: > + (proc.aname[1] startswith google_accounts or + proc.aname[2] startswith google_accounts or + proc.aname[3] startswith google_accounts) + + # Chef is similar. + - macro: run_by_chef + condition: (proc.aname[2]=chef_command_wr or proc.aname[3]=chef_command_wr or + proc.aname[2]=chef-client or proc.aname[3]=chef-client or + proc.name=chef-client) + + - macro: run_by_adclient + condition: (proc.aname[2]=adclient or proc.aname[3]=adclient or proc.aname[4]=adclient) + + - macro: run_by_centrify + condition: (proc.aname[2]=centrify or proc.aname[3]=centrify or proc.aname[4]=centrify) + + # Also handles running semi-indirectly via scl + - macro: run_by_foreman + condition: > + (user.name=foreman and + ((proc.pname in (rake, ruby, scl) and proc.aname[5] in (tfm-rake,tfm-ruby)) or + (proc.pname=scl and proc.aname[2] in (tfm-rake,tfm-ruby)))) + + - macro: java_running_sdjagent + condition: proc.name=java and proc.cmdline contains sdjagent.jar + + - macro: kubelet_running_loopback + condition: (proc.pname=kubelet and proc.name=loopback) + + - macro: python_mesos_marathon_scripting + condition: (proc.pcmdline startswith "python3 /marathon-lb/marathon_lb.py") + + - macro: splunk_running_forwarder + condition: (proc.pname=splunkd and proc.cmdline startswith "sh -c /opt/splunkforwarder") + + - macro: parent_supervise_running_multilog + condition: (proc.name=multilog and proc.pname=supervise) + + - macro: supervise_writing_status + condition: (proc.name in (supervise,svc) and fd.name startswith "/etc/sb/") + + - macro: pki_realm_writing_realms + condition: (proc.cmdline startswith "bash /usr/local/lib/pki/pki-realm" and fd.name startswith /etc/pki/realms) + + - macro: htpasswd_writing_passwd + condition: (proc.name=htpasswd and fd.name=/etc/nginx/.htpasswd) + + - macro: lvprogs_writing_conf + condition: > + (proc.name in (dmeventd,lvcreate,pvscan,lvs) and + (fd.name startswith /etc/lvm/archive or + fd.name startswith /etc/lvm/backup or + fd.name startswith /etc/lvm/cache)) + + - macro: ovsdb_writing_openvswitch + condition: (proc.name=ovsdb-server and fd.directory=/etc/openvswitch) + + - macro: perl_running_plesk + condition: (proc.cmdline startswith "perl /opt/psa/admin/bin/plesk_agent_manager" or + proc.pcmdline startswith "perl /opt/psa/admin/bin/plesk_agent_manager") + + - macro: perl_running_updmap + condition: (proc.cmdline startswith "perl /usr/bin/updmap") + + - macro: perl_running_centrifydc + condition: (proc.cmdline startswith "perl /usr/share/centrifydc") + + - macro: runuser_reading_pam + condition: (proc.name=runuser and fd.directory=/etc/pam.d) + + # CIS Linux Benchmark program + - macro: linux_bench_reading_etc_shadow + condition: ((proc.aname[2]=linux-bench and + proc.name in (awk,cut,grep)) and + (fd.name=/etc/shadow or + fd.directory=/etc/pam.d)) + + - macro: parent_ucf_writing_conf + condition: (proc.pname=ucf and proc.aname[2]=frontend) + + - macro: consul_template_writing_conf + condition: > + ((proc.name=consul-template and fd.name startswith /etc/haproxy) or + (proc.name=reload.sh and proc.aname[2]=consul-template and fd.name startswith /etc/ssl)) + + - macro: countly_writing_nginx_conf + condition: (proc.cmdline startswith "nodejs /opt/countly/bin" and fd.name startswith /etc/nginx) + + - list: ms_oms_binaries + items: [omi.postinst, omsconfig.posti, scx.postinst, omsadmin.sh, omiagent] + + - macro: ms_oms_writing_conf + condition: > + ((proc.name in (omiagent,omsagent,in_heartbeat_r*,omsadmin.sh,PerformInventor,dsc_host) + or proc.pname in (ms_oms_binaries) + or proc.aname[2] in (ms_oms_binaries)) + and (fd.name startswith /etc/opt/omi or fd.name startswith /etc/opt/microsoft/omsagent)) + + - macro: ms_scx_writing_conf + condition: (proc.name in (GetLinuxOS.sh) and fd.name startswith /etc/opt/microsoft/scx) + + - macro: azure_scripts_writing_conf + condition: (proc.pname startswith "bash /var/lib/waagent/" and fd.name startswith /etc/azure) + + - macro: azure_networkwatcher_writing_conf + condition: (proc.name in (NetworkWatcherA) and fd.name=/etc/init.d/AzureNetworkWatcherAgent) + + - macro: couchdb_writing_conf + condition: (proc.name=beam.smp and proc.cmdline contains couchdb and fd.name startswith /etc/couchdb) + + - macro: update_texmf_writing_conf + condition: (proc.name=update-texmf and fd.name startswith /etc/texmf) + + - macro: slapadd_writing_conf + condition: (proc.name=slapadd and fd.name startswith /etc/ldap) + + - macro: openldap_writing_conf + condition: (proc.pname=run-openldap.sh and fd.name startswith /etc/openldap) + + - macro: ucpagent_writing_conf + condition: (proc.name=apiserver and container.image.repository=docker/ucp-agent and fd.name=/etc/authorization_config.cfg) + + - macro: iscsi_writing_conf + condition: (proc.name=iscsiadm and fd.name startswith /etc/iscsi) + + - macro: istio_writing_conf + condition: (proc.name=pilot-agent and fd.name startswith /etc/istio) + + - macro: symantec_writing_conf + condition: > + ((proc.name=symcfgd and fd.name startswith /etc/symantec) or + (proc.name=navdefutil and fd.name=/etc/symc-defutils.conf)) + + - macro: liveupdate_writing_conf + condition: (proc.cmdline startswith "java LiveUpdate" and fd.name in (/etc/liveupdate.conf, /etc/Product.Catalog.JavaLiveUpdate)) + + - macro: rancher_agent + condition: (proc.name=agent and container.image.repository contains "rancher/agent") + + - macro: rancher_network_manager + condition: (proc.name=rancher-bridge and container.image.repository contains "rancher/network-manager") + + - macro: sosreport_writing_files + condition: > + (proc.name=urlgrabber-ext- and proc.aname[3]=sosreport and + (fd.name startswith /etc/pkt/nssdb or fd.name startswith /etc/pki/nssdb)) + + - macro: pkgmgmt_progs_writing_pki + condition: > + (proc.name=urlgrabber-ext- and proc.pname in (yum, yum-cron, repoquery) and + (fd.name startswith /etc/pkt/nssdb or fd.name startswith /etc/pki/nssdb)) + + - macro: update_ca_trust_writing_pki + condition: (proc.pname=update-ca-trust and proc.name=trust and fd.name startswith /etc/pki) + + - macro: brandbot_writing_os_release + condition: proc.name=brandbot and fd.name=/etc/os-release + + - macro: selinux_writing_conf + condition: (proc.name in (semodule,genhomedircon,sefcontext_comp) and fd.name startswith /etc/selinux) + + - list: veritas_binaries + items: [vxconfigd, sfcache, vxclustadm, vxdctl, vxprint, vxdmpadm, vxdisk, vxdg, vxassist, vxtune] + + - macro: veritas_driver_script + condition: (proc.cmdline startswith "perl /opt/VRTSsfmh/bin/mh_driver.pl") + + - macro: veritas_progs + condition: (proc.name in (veritas_binaries) or veritas_driver_script) + + - macro: veritas_writing_config + condition: (veritas_progs and (fd.name startswith /etc/vx or fd.name startswith /etc/opt/VRTS or fd.name startswith /etc/vom)) + + - macro: nginx_writing_conf + condition: (proc.name in (nginx,nginx-ingress-c,nginx-ingress) and (fd.name startswith /etc/nginx or fd.name startswith /etc/ingress-controller)) + + - macro: nginx_writing_certs + condition: > + (((proc.name=openssl and proc.pname=nginx-launch.sh) or proc.name=nginx-launch.sh) and fd.name startswith /etc/nginx/certs) + + - macro: chef_client_writing_conf + condition: (proc.pcmdline startswith "chef-client /opt/gitlab" and fd.name startswith /etc/gitlab) + + - macro: centrify_writing_krb + condition: (proc.name in (adjoin,addns) and fd.name startswith /etc/krb5) + + - macro: cockpit_writing_conf + condition: > + ((proc.pname=cockpit-kube-la or proc.aname[2]=cockpit-kube-la) + and fd.name startswith /etc/cockpit) + + - macro: ipsec_writing_conf + condition: (proc.name=start-ipsec.sh and fd.directory=/etc/ipsec) + + - macro: exe_running_docker_save + condition: > + proc.name = "exe" + and (proc.cmdline contains "/var/lib/docker" + or proc.cmdline contains "/var/run/docker") + and proc.pname in (dockerd, docker, dockerd-current, docker-current) + + # Ideally we'd have a length check here as well but + # filterchecks don't have operators like len() + - macro: sed_temporary_file + condition: (proc.name=sed and fd.name startswith "/etc/sed") + + - macro: python_running_get_pip + condition: (proc.cmdline startswith "python get-pip.py") + + - macro: python_running_ms_oms + condition: (proc.cmdline startswith "python /var/lib/waagent/") + + - macro: gugent_writing_guestagent_log + condition: (proc.name=gugent and fd.name=GuestAgent.log) + + - macro: dse_writing_tmp + condition: (proc.name=dse-entrypoint and fd.name=/root/tmp__) + + - macro: zap_writing_state + condition: (proc.name=java and proc.cmdline contains "jar /zap" and fd.name startswith /root/.ZAP) + + - macro: airflow_writing_state + condition: (proc.name=airflow and fd.name startswith /root/airflow) + + - macro: rpm_writing_root_rpmdb + condition: (proc.name=rpm and fd.directory=/root/.rpmdb) + + - macro: maven_writing_groovy + condition: (proc.name=java and proc.cmdline contains "classpath /usr/local/apache-maven" and fd.name startswith /root/.groovy) + + - macro: chef_writing_conf + condition: (proc.name=chef-client and fd.name startswith /root/.chef) + + - macro: kubectl_writing_state + condition: (proc.name in (kubectl,oc) and fd.name startswith /root/.kube) + + - macro: java_running_cassandra + condition: (proc.name=java and proc.cmdline contains "cassandra.jar") + + - macro: cassandra_writing_state + condition: (java_running_cassandra and fd.directory=/root/.cassandra) + + # Istio + - macro: galley_writing_state + condition: (proc.name=galley and fd.name in (known_istio_files)) + + - list: known_istio_files + items: [/healthready, /healthliveness] + + - macro: calico_writing_state + condition: (proc.name=kube-controller and fd.name startswith /status.json and k8s.pod.name startswith calico) + + - macro: calico_writing_envvars + condition: (proc.name=start_runit and fd.name startswith "/etc/envvars" and container.image.repository endswith "calico/node") + + - list: repository_files + items: [sources.list] + + - list: repository_directories + items: [/etc/apt/sources.list.d, /etc/yum.repos.d, /etc/apt] + + - macro: access_repositories + condition: (fd.directory in (repository_directories) or + (fd.name pmatch (repository_directories) and + fd.filename in (repository_files))) + + - macro: modify_repositories + condition: (evt.arg.newpath pmatch (repository_directories)) + + - macro: user_known_update_package_registry + condition: (never_true) + + - rule: Update Package Repository + desc: Detect package repositories get updated + condition: > + ((open_write and access_repositories) or (modify and modify_repositories)) + and not package_mgmt_procs + and not package_mgmt_ancestor_procs + and not exe_running_docker_save + and not user_known_update_package_registry + output: > + Repository files get updated (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline pcmdline=%proc.pcmdline file=%fd.name newpath=%evt.arg.newpath container_id=%container.id image=%container.image.repository) + priority: + NOTICE + tags: [filesystem, mitre_persistence] + + # Users should overwrite this macro to specify conditions under which a + # write under the binary dir is ignored. For example, it may be okay to + # install a binary in the context of a ci/cd build. + - macro: user_known_write_below_binary_dir_activities + condition: (never_true) + + - rule: Write below binary dir + desc: an attempt to write to any file below a set of binary directories + condition: > + bin_dir and evt.dir = < and open_write + and not package_mgmt_procs + and not exe_running_docker_save + and not python_running_get_pip + and not python_running_ms_oms + and not user_known_write_below_binary_dir_activities + output: > + File below a known binary directory opened for writing (user=%user.name user_loginuid=%user.loginuid + command=%proc.cmdline file=%fd.name parent=%proc.pname pcmdline=%proc.pcmdline gparent=%proc.aname[2] container_id=%container.id image=%container.image.repository) + priority: ERROR + tags: [filesystem, mitre_persistence] + + # If you'd like to generally monitor a wider set of directories on top + # of the ones covered by the rule Write below binary dir, you can use + # the following rule and lists. + + - list: monitored_directories + items: [/boot, /lib, /lib64, /usr/lib, /usr/local/lib, /usr/local/sbin, /usr/local/bin, /root/.ssh] + + - macro: user_ssh_directory + condition: (fd.name glob '/home/*/.ssh/*') + + # google_accounts_(daemon) + - macro: google_accounts_daemon_writing_ssh + condition: (proc.name=google_accounts and user_ssh_directory) + + - macro: cloud_init_writing_ssh + condition: (proc.name=cloud-init and user_ssh_directory) + + - macro: mkinitramfs_writing_boot + condition: (proc.pname in (mkinitramfs, update-initramf) and fd.directory=/boot) + + - macro: monitored_dir + condition: > + (fd.directory in (monitored_directories) + or user_ssh_directory) + and not mkinitramfs_writing_boot + + # Add conditions to this macro (probably in a separate file, + # overwriting this macro) to allow for specific combinations of + # programs writing below monitored directories. + # + # Its default value is an expression that always is false, which + # becomes true when the "not ..." in the rule is applied. + - macro: user_known_write_monitored_dir_conditions + condition: (never_true) + + - rule: Write below monitored dir + desc: an attempt to write to any file below a set of monitored directories + condition: > + evt.dir = < and open_write and monitored_dir + and not package_mgmt_procs + and not coreos_write_ssh_dir + and not exe_running_docker_save + and not python_running_get_pip + and not python_running_ms_oms + and not google_accounts_daemon_writing_ssh + and not cloud_init_writing_ssh + and not user_known_write_monitored_dir_conditions + output: > + File below a monitored directory opened for writing (user=%user.name user_loginuid=%user.loginuid + command=%proc.cmdline file=%fd.name parent=%proc.pname pcmdline=%proc.pcmdline gparent=%proc.aname[2] container_id=%container.id image=%container.image.repository) + priority: ERROR + tags: [filesystem, mitre_persistence] + + # This rule is disabled by default as many system management tools + # like ansible, etc can read these files/paths. Enable it using this macro. + + - macro: consider_ssh_reads + condition: (never_true) + + - macro: user_known_read_ssh_information_activities + condition: (never_true) + + - rule: Read ssh information + desc: Any attempt to read files below ssh directories by non-ssh programs + condition: > + ((open_read or open_directory) and + consider_ssh_reads and + (user_ssh_directory or fd.name startswith /root/.ssh) and + not user_known_read_ssh_information_activities and + not proc.name in (ssh_binaries)) + output: > + ssh-related file/directory read by non-ssh program (user=%user.name user_loginuid=%user.loginuid + command=%proc.cmdline file=%fd.name parent=%proc.pname pcmdline=%proc.pcmdline container_id=%container.id image=%container.image.repository) + priority: ERROR + tags: [filesystem, mitre_discovery] + + - list: safe_etc_dirs + items: [/etc/cassandra, /etc/ssl/certs/java, /etc/logstash, /etc/nginx/conf.d, /etc/container_environment, /etc/hrmconfig, /etc/fluent/configs.d] + + - macro: fluentd_writing_conf_files + condition: (proc.name=start-fluentd and fd.name in (/etc/fluent/fluent.conf, /etc/td-agent/td-agent.conf)) + + - macro: qualys_writing_conf_files + condition: (proc.name=qualys-cloud-ag and fd.name=/etc/qualys/cloud-agent/qagent-log.conf) + + - macro: git_writing_nssdb + condition: (proc.name=git-remote-http and fd.directory=/etc/pki/nssdb) + + - macro: plesk_writing_keys + condition: (proc.name in (plesk_binaries) and fd.name startswith /etc/sw/keys) + + - macro: plesk_install_writing_apache_conf + condition: (proc.cmdline startswith "bash -hB /usr/lib/plesk-9.0/services/webserver.apache configure" + and fd.name="/etc/apache2/apache2.conf.tmp") + + - macro: plesk_running_mktemp + condition: (proc.name=mktemp and proc.aname[3] in (plesk_binaries)) + + - macro: networkmanager_writing_resolv_conf + condition: proc.aname[2]=nm-dispatcher and fd.name=/etc/resolv.conf + + - macro: add_shell_writing_shells_tmp + condition: (proc.name=add-shell and fd.name=/etc/shells.tmp) + + - macro: duply_writing_exclude_files + condition: (proc.name=touch and proc.pcmdline startswith "bash /usr/bin/duply" and fd.name startswith "/etc/duply") + + - macro: xmlcatalog_writing_files + condition: (proc.name=update-xmlcatal and fd.directory=/etc/xml) + + - macro: datadog_writing_conf + condition: ((proc.cmdline startswith "python /opt/datadog-agent" or + proc.cmdline startswith "entrypoint.sh /entrypoint.sh datadog start" or + proc.cmdline startswith "agent.py /opt/datadog-agent") + and fd.name startswith "/etc/dd-agent") + + - macro: rancher_writing_conf + condition: ((proc.name in (healthcheck, lb-controller, rancher-dns)) and + (container.image.repository contains "rancher/healthcheck" or + container.image.repository contains "rancher/lb-service-haproxy" or + container.image.repository contains "rancher/dns") and + (fd.name startswith "/etc/haproxy" or fd.name startswith "/etc/rancher-dns")) + + - macro: rancher_writing_root + condition: (proc.name=rancher-metadat and + (container.image.repository contains "rancher/metadata" or container.image.repository contains "rancher/lb-service-haproxy") and + fd.name startswith "/answers.json") + + - macro: checkpoint_writing_state + condition: (proc.name=checkpoint and + container.image.repository contains "coreos/pod-checkpointer" and + fd.name startswith "/etc/kubernetes") + + - macro: jboss_in_container_writing_passwd + condition: > + ((proc.cmdline="run-java.sh /opt/jboss/container/java/run/run-java.sh" + or proc.cmdline="run-java.sh /opt/run-java/run-java.sh") + and container + and fd.name=/etc/passwd) + + - macro: curl_writing_pki_db + condition: (proc.name=curl and fd.directory=/etc/pki/nssdb) + + - macro: haproxy_writing_conf + condition: ((proc.name in (update-haproxy-,haproxy_reload.) or proc.pname in (update-haproxy-,haproxy_reload,haproxy_reload.)) + and (fd.name=/etc/openvpn/client.map or fd.name startswith /etc/haproxy)) + + - macro: java_writing_conf + condition: (proc.name=java and fd.name=/etc/.java/.systemPrefs/.system.lock) + + - macro: rabbitmq_writing_conf + condition: (proc.name=rabbitmq-server and fd.directory=/etc/rabbitmq) + + - macro: rook_writing_conf + condition: (proc.name=toolbox.sh and container.image.repository=rook/toolbox + and fd.directory=/etc/ceph) + + - macro: httpd_writing_conf_logs + condition: (proc.name=httpd and fd.name startswith /etc/httpd/) + + - macro: mysql_writing_conf + condition: > + ((proc.name in (start-mysql.sh, run-mysqld) or proc.pname=start-mysql.sh) and + (fd.name startswith /etc/mysql or fd.directory=/etc/my.cnf.d)) + + - macro: redis_writing_conf + condition: > + (proc.name in (run-redis, redis-launcher.) and (fd.name=/etc/redis.conf or fd.name startswith /etc/redis)) + + - macro: openvpn_writing_conf + condition: (proc.name in (openvpn,openvpn-entrypo) and fd.name startswith /etc/openvpn) + + - macro: php_handlers_writing_conf + condition: (proc.name=php_handlers_co and fd.name=/etc/psa/php_versions.json) + + - macro: sed_writing_temp_file + condition: > + ((proc.aname[3]=cron_start.sh and fd.name startswith /etc/security/sed) or + (proc.name=sed and (fd.name startswith /etc/apt/sources.list.d/sed or + fd.name startswith /etc/apt/sed or + fd.name startswith /etc/apt/apt.conf.d/sed))) + + - macro: cron_start_writing_pam_env + condition: (proc.cmdline="bash /usr/sbin/start-cron" and fd.name=/etc/security/pam_env.conf) + + # In some cases dpkg-reconfigur runs commands that modify /etc. Not + # putting the full set of package management programs yet. + - macro: dpkg_scripting + condition: (proc.aname[2] in (dpkg-reconfigur, dpkg-preconfigu)) + + - macro: ufw_writing_conf + condition: (proc.name=ufw and fd.directory=/etc/ufw) + + - macro: calico_writing_conf + condition: > + (((proc.name = calico-node) or + (container.image.repository=gcr.io/projectcalico-org/node and proc.name in (start_runit, cp)) or + (container.image.repository=gcr.io/projectcalico-org/cni and proc.name=sed)) + and fd.name startswith /etc/calico) + + - macro: prometheus_conf_writing_conf + condition: (proc.name=prometheus-conf and fd.name startswith /etc/prometheus/config_out) + + - macro: openshift_writing_conf + condition: (proc.name=oc and fd.name startswith /etc/origin/node) + + - macro: keepalived_writing_conf + condition: (proc.name=keepalived and fd.name=/etc/keepalived/keepalived.conf) + + - macro: etcd_manager_updating_dns + condition: (container and proc.name=etcd-manager and fd.name=/etc/hosts) + + - macro: automount_using_mtab + condition: (proc.pname = automount and fd.name startswith /etc/mtab) + + - macro: mcafee_writing_cma_d + condition: (proc.name=macompatsvc and fd.directory=/etc/cma.d) + + - macro: avinetworks_supervisor_writing_ssh + condition: > + (proc.cmdline="se_supervisor.p /opt/avi/scripts/se_supervisor.py -d" and + (fd.name startswith /etc/ssh/known_host_ or + fd.name startswith /etc/ssh/ssh_monitor_config_ or + fd.name startswith /etc/ssh/ssh_config_)) + + - macro: multipath_writing_conf + condition: (proc.name = multipath and fd.name startswith /etc/multipath/) + + # Add conditions to this macro (probably in a separate file, + # overwriting this macro) to allow for specific combinations of + # programs writing below specific directories below + # /etc. fluentd_writing_conf_files is a good example to follow, as it + # specifies both the program doing the writing as well as the specific + # files it is allowed to modify. + # + # In this file, it just takes one of the programs in the base macro + # and repeats it. + + - macro: user_known_write_etc_conditions + condition: proc.name=confd + + # This is a placeholder for user to extend the whitelist for write below etc rule + - macro: user_known_write_below_etc_activities + condition: (never_true) + + - macro: write_etc_common + condition: > + etc_dir and evt.dir = < and open_write + and proc_name_exists + and not proc.name in (passwd_binaries, shadowutils_binaries, sysdigcloud_binaries, + package_mgmt_binaries, ssl_mgmt_binaries, dhcp_binaries, + dev_creation_binaries, shell_mgmt_binaries, + mail_config_binaries, + sshkit_script_binaries, + ldconfig.real, ldconfig, confd, gpg, insserv, + apparmor_parser, update-mime, tzdata.config, tzdata.postinst, + systemd, systemd-machine, systemd-sysuser, + debconf-show, rollerd, bind9.postinst, sv, + gen_resolvconf., update-ca-certi, certbot, runsv, + qualys-cloud-ag, locales.postins, nomachine_binaries, + adclient, certutil, crlutil, pam-auth-update, parallels_insta, + openshift-launc, update-rc.d, puppet) + and not (container and proc.cmdline in ("cp /run/secrets/kubernetes.io/serviceaccount/ca.crt /etc/pki/ca-trust/source/anchors/openshift-ca.crt")) + and not proc.pname in (sysdigcloud_binaries, mail_config_binaries, hddtemp.postins, sshkit_script_binaries, locales.postins, deb_binaries, dhcp_binaries) + and not fd.name pmatch (safe_etc_dirs) + and not fd.name in (/etc/container_environment.sh, /etc/container_environment.json, /etc/motd, /etc/motd.svc) + and not sed_temporary_file + and not exe_running_docker_save + and not ansible_running_python + and not python_running_denyhosts + and not fluentd_writing_conf_files + and not user_known_write_etc_conditions + and not run_by_centrify + and not run_by_adclient + and not qualys_writing_conf_files + and not git_writing_nssdb + and not plesk_writing_keys + and not plesk_install_writing_apache_conf + and not plesk_running_mktemp + and not networkmanager_writing_resolv_conf + and not run_by_chef + and not add_shell_writing_shells_tmp + and not duply_writing_exclude_files + and not xmlcatalog_writing_files + and not parent_supervise_running_multilog + and not supervise_writing_status + and not pki_realm_writing_realms + and not htpasswd_writing_passwd + and not lvprogs_writing_conf + and not ovsdb_writing_openvswitch + and not datadog_writing_conf + and not curl_writing_pki_db + and not haproxy_writing_conf + and not java_writing_conf + and not dpkg_scripting + and not parent_ucf_writing_conf + and not rabbitmq_writing_conf + and not rook_writing_conf + and not php_handlers_writing_conf + and not sed_writing_temp_file + and not cron_start_writing_pam_env + and not httpd_writing_conf_logs + and not mysql_writing_conf + and not openvpn_writing_conf + and not consul_template_writing_conf + and not countly_writing_nginx_conf + and not ms_oms_writing_conf + and not ms_scx_writing_conf + and not azure_scripts_writing_conf + and not azure_networkwatcher_writing_conf + and not couchdb_writing_conf + and not update_texmf_writing_conf + and not slapadd_writing_conf + and not symantec_writing_conf + and not liveupdate_writing_conf + and not sosreport_writing_files + and not selinux_writing_conf + and not veritas_writing_config + and not nginx_writing_conf + and not nginx_writing_certs + and not chef_client_writing_conf + and not centrify_writing_krb + and not cockpit_writing_conf + and not ipsec_writing_conf + and not httpd_writing_ssl_conf + and not userhelper_writing_etc_security + and not pkgmgmt_progs_writing_pki + and not update_ca_trust_writing_pki + and not brandbot_writing_os_release + and not redis_writing_conf + and not openldap_writing_conf + and not ucpagent_writing_conf + and not iscsi_writing_conf + and not istio_writing_conf + and not ufw_writing_conf + and not calico_writing_conf + and not calico_writing_envvars + and not prometheus_conf_writing_conf + and not openshift_writing_conf + and not keepalived_writing_conf + and not rancher_writing_conf + and not checkpoint_writing_state + and not jboss_in_container_writing_passwd + and not etcd_manager_updating_dns + and not user_known_write_below_etc_activities + and not automount_using_mtab + and not mcafee_writing_cma_d + and not avinetworks_supervisor_writing_ssh + and not multipath_writing_conf + + - rule: Write below etc + desc: an attempt to write to any file below /etc + condition: write_etc_common + output: "File below /etc opened for writing (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline parent=%proc.pname pcmdline=%proc.pcmdline file=%fd.name program=%proc.name gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4] container_id=%container.id image=%container.image.repository)" + priority: ERROR + tags: [filesystem, mitre_persistence] + + - list: known_root_files + items: [/root/.monit.state, /root/.auth_tokens, /root/.bash_history, /root/.ash_history, /root/.aws/credentials, + /root/.viminfo.tmp, /root/.lesshst, /root/.bzr.log, /root/.gitconfig.lock, /root/.babel.json, /root/.localstack, + /root/.node_repl_history, /root/.mongorc.js, /root/.dbshell, /root/.augeas/history, /root/.rnd, /root/.wget-hsts, /health, /exec.fifo] + + - list: known_root_directories + items: [/root/.oracle_jre_usage, /root/.ssh, /root/.subversion, /root/.nami] + + - macro: known_root_conditions + condition: (fd.name startswith /root/orcexec. + or fd.name startswith /root/.m2 + or fd.name startswith /root/.npm + or fd.name startswith /root/.pki + or fd.name startswith /root/.ivy2 + or fd.name startswith /root/.config/Cypress + or fd.name startswith /root/.config/pulse + or fd.name startswith /root/.config/configstore + or fd.name startswith /root/jenkins/workspace + or fd.name startswith /root/.jenkins + or fd.name startswith /root/.cache + or fd.name startswith /root/.sbt + or fd.name startswith /root/.java + or fd.name startswith /root/.glide + or fd.name startswith /root/.sonar + or fd.name startswith /root/.v8flag + or fd.name startswith /root/infaagent + or fd.name startswith /root/.local/lib/python + or fd.name startswith /root/.pm2 + or fd.name startswith /root/.gnupg + or fd.name startswith /root/.pgpass + or fd.name startswith /root/.theano + or fd.name startswith /root/.gradle + or fd.name startswith /root/.android + or fd.name startswith /root/.ansible + or fd.name startswith /root/.crashlytics + or fd.name startswith /root/.dbus + or fd.name startswith /root/.composer + or fd.name startswith /root/.gconf + or fd.name startswith /root/.nv + or fd.name startswith /root/.local/share/jupyter + or fd.name startswith /root/oradiag_root + or fd.name startswith /root/workspace + or fd.name startswith /root/jvm + or fd.name startswith /root/.node-gyp) + + # Add conditions to this macro (probably in a separate file, + # overwriting this macro) to allow for specific combinations of + # programs writing below specific directories below + # / or /root. + # + # In this file, it just takes one of the condition in the base macro + # and repeats it. + - macro: user_known_write_root_conditions + condition: fd.name=/root/.bash_history + + # This is a placeholder for user to extend the whitelist for write below root rule + - macro: user_known_write_below_root_activities + condition: (never_true) + + - macro: runc_writing_exec_fifo + condition: (proc.cmdline="runc:[1:CHILD] init" and fd.name=/exec.fifo) + + - macro: runc_writing_var_lib_docker + condition: (proc.cmdline="runc:[1:CHILD] init" and evt.arg.filename startswith /var/lib/docker) + + - macro: mysqlsh_writing_state + condition: (proc.name=mysqlsh and fd.directory=/root/.mysqlsh) + + - rule: Write below root + desc: an attempt to write to any file directly below / or /root + condition: > + root_dir and evt.dir = < and open_write + and proc_name_exists + and not fd.name in (known_root_files) + and not fd.directory pmatch (known_root_directories) + and not exe_running_docker_save + and not gugent_writing_guestagent_log + and not dse_writing_tmp + and not zap_writing_state + and not airflow_writing_state + and not rpm_writing_root_rpmdb + and not maven_writing_groovy + and not chef_writing_conf + and not kubectl_writing_state + and not cassandra_writing_state + and not galley_writing_state + and not calico_writing_state + and not rancher_writing_root + and not runc_writing_exec_fifo + and not mysqlsh_writing_state + and not known_root_conditions + and not user_known_write_root_conditions + and not user_known_write_below_root_activities + output: "File below / or /root opened for writing (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline parent=%proc.pname file=%fd.name program=%proc.name container_id=%container.id image=%container.image.repository)" + priority: ERROR + tags: [filesystem, mitre_persistence] + + - macro: cmp_cp_by_passwd + condition: proc.name in (cmp, cp) and proc.pname in (passwd, run-parts) + + - macro: user_known_read_sensitive_files_activities + condition: (never_true) + + - rule: Read sensitive file trusted after startup + desc: > + an attempt to read any sensitive file (e.g. files containing user/password/authentication + information) by a trusted program after startup. Trusted programs might read these files + at startup to load initial state, but not afterwards. + condition: sensitive_files and open_read and server_procs and not proc_is_new and proc.name!="sshd" and not user_known_read_sensitive_files_activities + output: > + Sensitive file opened for reading by trusted program after startup (user=%user.name user_loginuid=%user.loginuid + command=%proc.cmdline parent=%proc.pname file=%fd.name parent=%proc.pname gparent=%proc.aname[2] container_id=%container.id image=%container.image.repository) + priority: WARNING + tags: [filesystem, mitre_credential_access] + + - list: read_sensitive_file_binaries + items: [ + iptables, ps, lsb_release, check-new-relea, dumpe2fs, accounts-daemon, sshd, + vsftpd, systemd, mysql_install_d, psql, screen, debconf-show, sa-update, + pam-auth-update, pam-config, /usr/sbin/spamd, polkit-agent-he, lsattr, file, sosreport, + scxcimservera, adclient, rtvscand, cockpit-session, userhelper, ossec-syscheckd + ] + + # Add conditions to this macro (probably in a separate file, + # overwriting this macro) to allow for specific combinations of + # programs accessing sensitive files. + # fluentd_writing_conf_files is a good example to follow, as it + # specifies both the program doing the writing as well as the specific + # files it is allowed to modify. + # + # In this file, it just takes one of the macros in the base rule + # and repeats it. + + - macro: user_read_sensitive_file_conditions + condition: cmp_cp_by_passwd + + - list: read_sensitive_file_images + items: [] + + - macro: user_read_sensitive_file_containers + condition: (container and container.image.repository in (read_sensitive_file_images)) + + # This macro detects man-db postinst, see https://salsa.debian.org/debian/man-db/-/blob/master/debian/postinst + # The rule "Read sensitive file untrusted" use this macro to avoid FPs. + - macro: mandb_postinst + condition: > + (proc.name=perl and proc.args startswith "-e" and + proc.args contains "@pwd = getpwnam(" and + proc.args contains "exec " and + proc.args contains "/usr/bin/mandb") + + - rule: Read sensitive file untrusted + desc: > + an attempt to read any sensitive file (e.g. files containing user/password/authentication + information). Exceptions are made for known trusted programs. + condition: > + sensitive_files and open_read + and proc_name_exists + and not proc.name in (user_mgmt_binaries, userexec_binaries, package_mgmt_binaries, + cron_binaries, read_sensitive_file_binaries, shell_binaries, hids_binaries, + vpn_binaries, mail_config_binaries, nomachine_binaries, sshkit_script_binaries, + in.proftpd, mandb, salt-minion, postgres_mgmt_binaries, + google_oslogin_ + ) + and not cmp_cp_by_passwd + and not ansible_running_python + and not run_by_qualys + and not run_by_chef + and not run_by_google_accounts_daemon + and not user_read_sensitive_file_conditions + and not mandb_postinst + and not perl_running_plesk + and not perl_running_updmap + and not veritas_driver_script + and not perl_running_centrifydc + and not runuser_reading_pam + and not linux_bench_reading_etc_shadow + and not user_known_read_sensitive_files_activities + and not user_read_sensitive_file_containers + output: > + Sensitive file opened for reading by non-trusted program (user=%user.name user_loginuid=%user.loginuid program=%proc.name + command=%proc.cmdline file=%fd.name parent=%proc.pname gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4] container_id=%container.id image=%container.image.repository) + priority: WARNING + tags: [filesystem, mitre_credential_access, mitre_discovery] + + - macro: amazon_linux_running_python_yum + condition: > + (proc.name = python and + proc.pcmdline = "python -m amazon_linux_extras system_motd" and + proc.cmdline startswith "python -c import yum;") + + - macro: user_known_write_rpm_database_activities + condition: (never_true) + + # Only let rpm-related programs write to the rpm database + - rule: Write below rpm database + desc: an attempt to write to the rpm database by any non-rpm related program + condition: > + fd.name startswith /var/lib/rpm and open_write + and not rpm_procs + and not ansible_running_python + and not python_running_chef + and not exe_running_docker_save + and not amazon_linux_running_python_yum + and not user_known_write_rpm_database_activities + output: "Rpm database opened for writing by a non-rpm program (command=%proc.cmdline file=%fd.name parent=%proc.pname pcmdline=%proc.pcmdline container_id=%container.id image=%container.image.repository)" + priority: ERROR + tags: [filesystem, software_mgmt, mitre_persistence] + + - macro: postgres_running_wal_e + condition: (proc.pname=postgres and proc.cmdline startswith "sh -c envdir /etc/wal-e.d/env /usr/local/bin/wal-e") + + - macro: redis_running_prepost_scripts + condition: (proc.aname[2]=redis-server and (proc.cmdline contains "redis-server.post-up.d" or proc.cmdline contains "redis-server.pre-up.d")) + + - macro: rabbitmq_running_scripts + condition: > + (proc.pname=beam.smp and + (proc.cmdline startswith "sh -c exec ps" or + proc.cmdline startswith "sh -c exec inet_gethost" or + proc.cmdline= "sh -s unix:cmd" or + proc.cmdline= "sh -c exec /bin/sh -s unix:cmd 2>&1")) + + - macro: rabbitmqctl_running_scripts + condition: (proc.aname[2]=rabbitmqctl and proc.cmdline startswith "sh -c ") + + - macro: run_by_appdynamics + condition: (proc.pname=java and proc.pcmdline startswith "java -jar -Dappdynamics") + + - macro: user_known_db_spawned_processes + condition: (never_true) + + - rule: DB program spawned process + desc: > + a database-server related program spawned a new process other than itself. + This shouldn\'t occur and is a follow on from some SQL injection attacks. + condition: > + proc.pname in (db_server_binaries) + and spawned_process + and not proc.name in (db_server_binaries) + and not postgres_running_wal_e + and not user_known_db_spawned_processes + output: > + Database-related program spawned process other than itself (user=%user.name user_loginuid=%user.loginuid + program=%proc.cmdline parent=%proc.pname container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [process, database, mitre_execution] + + - macro: user_known_modify_bin_dir_activities + condition: (never_true) + + - rule: Modify binary dirs + desc: an attempt to modify any file below a set of binary directories. + condition: bin_dir_rename and modify and not package_mgmt_procs and not exe_running_docker_save and not user_known_modify_bin_dir_activities + output: > + File below known binary directory renamed/removed (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline + pcmdline=%proc.pcmdline operation=%evt.type file=%fd.name %evt.args container_id=%container.id image=%container.image.repository) + priority: ERROR + tags: [filesystem, mitre_persistence] + + - macro: user_known_mkdir_bin_dir_activities + condition: (never_true) + + - rule: Mkdir binary dirs + desc: an attempt to create a directory below a set of binary directories. + condition: > + mkdir + and bin_dir_mkdir + and not package_mgmt_procs + and not user_known_mkdir_bin_dir_activities + and not exe_running_docker_save + output: > + Directory below known binary directory created (user=%user.name user_loginuid=%user.loginuid + command=%proc.cmdline directory=%evt.arg.path container_id=%container.id image=%container.image.repository) + priority: ERROR + tags: [filesystem, mitre_persistence] + + # This list allows for easy additions to the set of commands allowed + # to change thread namespace without having to copy and override the + # entire change thread namespace rule. + - list: user_known_change_thread_namespace_binaries + items: [crio, multus] + + - macro: user_known_change_thread_namespace_activities + condition: (never_true) + + - list: network_plugin_binaries + items: [aws-cni, azure-vnet] + + - macro: calico_node + condition: (container.image.repository endswith calico/node and proc.name=calico-node) + + - macro: weaveworks_scope + condition: (container.image.repository endswith weaveworks/scope and proc.name=scope) + + - rule: Change thread namespace + desc: > + an attempt to change a program/thread\'s namespace (commonly done + as a part of creating a container) by calling setns. + condition: > + evt.type=setns and evt.dir=< + and proc_name_exists + and not (container.id=host and proc.name in (docker_binaries, k8s_binaries, lxd_binaries, nsenter)) + and not proc.name in (sysdigcloud_binaries, sysdig, calico, oci-umount, cilium-cni, network_plugin_binaries) + and not proc.name in (user_known_change_thread_namespace_binaries) + and not proc.name startswith "runc" + and not proc.cmdline startswith "containerd" + and not proc.pname in (sysdigcloud_binaries, hyperkube, kubelet, protokube, dockerd, tini, aws) + and not java_running_sdjagent + and not kubelet_running_loopback + and not rancher_agent + and not rancher_network_manager + and not calico_node + and not weaveworks_scope + and not user_known_change_thread_namespace_activities + enabled: false + output: > + Namespace change (setns) by unexpected program (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline + parent=%proc.pname %container.info container_id=%container.id image=%container.image.repository:%container.image.tag) + priority: NOTICE + tags: [process, mitre_privilege_escalation, mitre_lateral_movement] + + # The binaries in this list and their descendents are *not* allowed + # spawn shells. This includes the binaries spawning shells directly as + # well as indirectly. For example, apache -> php/perl for + # mod_{php,perl} -> some shell is also not allowed, because the shell + # has apache as an ancestor. + + - list: protected_shell_spawning_binaries + items: [ + http_server_binaries, db_server_binaries, nosql_server_binaries, mail_binaries, + fluentd, flanneld, splunkd, consul, smbd, runsv, PM2 + ] + + - macro: parent_java_running_zookeeper + condition: (proc.pname=java and proc.pcmdline contains org.apache.zookeeper.server) + + - macro: parent_java_running_kafka + condition: (proc.pname=java and proc.pcmdline contains kafka.Kafka) + + - macro: parent_java_running_elasticsearch + condition: (proc.pname=java and proc.pcmdline contains org.elasticsearch.bootstrap.Elasticsearch) + + - macro: parent_java_running_activemq + condition: (proc.pname=java and proc.pcmdline contains activemq.jar) + + - macro: parent_java_running_cassandra + condition: (proc.pname=java and (proc.pcmdline contains "-Dcassandra.config.loader" or proc.pcmdline contains org.apache.cassandra.service.CassandraDaemon)) + + - macro: parent_java_running_jboss_wildfly + condition: (proc.pname=java and proc.pcmdline contains org.jboss) + + - macro: parent_java_running_glassfish + condition: (proc.pname=java and proc.pcmdline contains com.sun.enterprise.glassfish) + + - macro: parent_java_running_hadoop + condition: (proc.pname=java and proc.pcmdline contains org.apache.hadoop) + + - macro: parent_java_running_datastax + condition: (proc.pname=java and proc.pcmdline contains com.datastax) + + - macro: nginx_starting_nginx + condition: (proc.pname=nginx and proc.cmdline contains "/usr/sbin/nginx -c /etc/nginx/nginx.conf") + + - macro: nginx_running_aws_s3_cp + condition: (proc.pname=nginx and proc.cmdline startswith "sh -c /usr/local/bin/aws s3 cp") + + - macro: consul_running_net_scripts + condition: (proc.pname=consul and (proc.cmdline startswith "sh -c curl" or proc.cmdline startswith "sh -c nc")) + + - macro: consul_running_alert_checks + condition: (proc.pname=consul and proc.cmdline startswith "sh -c /bin/consul-alerts") + + - macro: serf_script + condition: (proc.cmdline startswith "sh -c serf") + + - macro: check_process_status + condition: (proc.cmdline startswith "sh -c kill -0 ") + + # In some cases, you may want to consider node processes run directly + # in containers as protected shell spawners. Examples include using + # pm2-docker or pm2 start some-app.js --no-daemon-mode as the direct + # entrypoint of the container, and when the node app is a long-lived + # server using something like express. + # + # However, there are other uses of node related to build pipelines for + # which node is not really a server but instead a general scripting + # tool. In these cases, shells are very likely and in these cases you + # don't want to consider node processes protected shell spawners. + # + # We have to choose one of these cases, so we consider node processes + # as unprotected by default. If you want to consider any node process + # run in a container as a protected shell spawner, override the below + # macro to remove the "never_true" clause, which allows it to take effect. + - macro: possibly_node_in_container + condition: (never_true and (proc.pname=node and proc.aname[3]=docker-containe)) + + # Similarly, you may want to consider any shell spawned by apache + # tomcat as suspect. The famous apache struts attack (CVE-2017-5638) + # could be exploited to do things like spawn shells. + # + # However, many applications *do* use tomcat to run arbitrary shells, + # as a part of build pipelines, etc. + # + # Like for node, we make this case opt-in. + - macro: possibly_parent_java_running_tomcat + condition: (never_true and proc.pname=java and proc.pcmdline contains org.apache.catalina.startup.Bootstrap) + + - macro: protected_shell_spawner + condition: > + (proc.aname in (protected_shell_spawning_binaries) + or parent_java_running_zookeeper + or parent_java_running_kafka + or parent_java_running_elasticsearch + or parent_java_running_activemq + or parent_java_running_cassandra + or parent_java_running_jboss_wildfly + or parent_java_running_glassfish + or parent_java_running_hadoop + or parent_java_running_datastax + or possibly_parent_java_running_tomcat + or possibly_node_in_container) + + - list: mesos_shell_binaries + items: [mesos-docker-ex, mesos-slave, mesos-health-ch] + + # Note that runsv is both in protected_shell_spawner and the + # exclusions by pname. This means that runsv can itself spawn shells + # (the ./run and ./finish scripts), but the processes runsv can not + # spawn shells. + - rule: Run shell untrusted + desc: an attempt to spawn a shell below a non-shell application. Specific applications are monitored. + condition: > + spawned_process + and shell_procs + and proc.pname exists + and protected_shell_spawner + and not proc.pname in (shell_binaries, gitlab_binaries, cron_binaries, user_known_shell_spawn_binaries, + needrestart_binaries, + mesos_shell_binaries, + erl_child_setup, exechealthz, + PM2, PassengerWatchd, c_rehash, svlogd, logrotate, hhvm, serf, + lb-controller, nvidia-installe, runsv, statsite, erlexec, calico-node, + "puma reactor") + and not proc.cmdline in (known_shell_spawn_cmdlines) + and not proc.aname in (unicorn_launche) + and not consul_running_net_scripts + and not consul_running_alert_checks + and not nginx_starting_nginx + and not nginx_running_aws_s3_cp + and not run_by_package_mgmt_binaries + and not serf_script + and not check_process_status + and not run_by_foreman + and not python_mesos_marathon_scripting + and not splunk_running_forwarder + and not postgres_running_wal_e + and not redis_running_prepost_scripts + and not rabbitmq_running_scripts + and not rabbitmqctl_running_scripts + and not run_by_appdynamics + and not user_shell_container_exclusions + output: > + Shell spawned by untrusted binary (user=%user.name user_loginuid=%user.loginuid shell=%proc.name parent=%proc.pname + cmdline=%proc.cmdline pcmdline=%proc.pcmdline gparent=%proc.aname[2] ggparent=%proc.aname[3] + aname[4]=%proc.aname[4] aname[5]=%proc.aname[5] aname[6]=%proc.aname[6] aname[7]=%proc.aname[7] container_id=%container.id image=%container.image.repository) + priority: DEBUG + tags: [shell, mitre_execution] + + - macro: allowed_openshift_registry_root + condition: > + (container.image.repository startswith openshift3/ or + container.image.repository startswith registry.redhat.io/openshift3/ or + container.image.repository startswith registry.access.redhat.com/openshift3/) + + # Source: https://docs.openshift.com/enterprise/3.2/install_config/install/disconnected_install.html + - macro: openshift_image + condition: > + (allowed_openshift_registry_root and + (container.image.repository endswith /logging-deployment or + container.image.repository endswith /logging-elasticsearch or + container.image.repository endswith /logging-kibana or + container.image.repository endswith /logging-fluentd or + container.image.repository endswith /logging-auth-proxy or + container.image.repository endswith /metrics-deployer or + container.image.repository endswith /metrics-hawkular-metrics or + container.image.repository endswith /metrics-cassandra or + container.image.repository endswith /metrics-heapster or + container.image.repository endswith /ose-haproxy-router or + container.image.repository endswith /ose-deployer or + container.image.repository endswith /ose-sti-builder or + container.image.repository endswith /ose-docker-builder or + container.image.repository endswith /ose-pod or + container.image.repository endswith /ose-node or + container.image.repository endswith /ose-docker-registry or + container.image.repository endswith /prometheus-node-exporter or + container.image.repository endswith /image-inspector)) + + # https://docs.aws.amazon.com/eks/latest/userguide/add-ons-images.html + # official AWS EKS registry list. AWS has different ECR repo per region + - macro: allowed_aws_ecr_registry_root_for_eks + condition: > + (container.image.repository startswith "602401143452.dkr.ecr" or + container.image.repository startswith "877085696533.dkr.ecr" or + container.image.repository startswith "800184023465.dkr.ecr" or + container.image.repository startswith "602401143452.dkr.ecr" or + container.image.repository startswith "918309763551.dkr.ecr" or + container.image.repository startswith "961992271922.dkr.ecr" or + container.image.repository startswith "590381155156.dkr.ecr" or + container.image.repository startswith "558608220178.dkr.ecr" or + container.image.repository startswith "151742754352.dkr.ecr" or + container.image.repository startswith "013241004608.dkr.ecr") + + + - macro: aws_eks_core_images + condition: > + (allowed_aws_ecr_registry_root_for_eks and + (container.image.repository endswith ".amazonaws.com/amazon-k8s-cni" or + container.image.repository endswith ".amazonaws.com/eks/kube-proxy")) + + + - macro: aws_eks_image_sensitive_mount + condition: > + (allowed_aws_ecr_registry_root_for_eks and container.image.repository endswith ".amazonaws.com/amazon-k8s-cni") + + # These images are allowed both to run with --privileged and to mount + # sensitive paths from the host filesystem. + # + # NOTE: This list is only provided for backwards compatibility with + # older local falco rules files that may have been appending to + # trusted_images. To make customizations, it's better to add images to + # either privileged_images or falco_sensitive_mount_images. + - list: trusted_images + items: [] + + # Add conditions to this macro (probably in a separate file, + # overwriting this macro) to specify additional containers that are + # trusted and therefore allowed to run privileged *and* with sensitive + # mounts. + # + # Like trusted_images, this is deprecated in favor of + # user_privileged_containers and user_sensitive_mount_containers and + # is only provided for backwards compatibility. + # + # In this file, it just takes one of the images in trusted_containers + # and repeats it. + - macro: user_trusted_containers + condition: (never_true) + + - list: sematext_images + items: [docker.io/sematext/sematext-agent-docker, docker.io/sematext/agent, docker.io/sematext/logagent, + registry.access.redhat.com/sematext/sematext-agent-docker, + registry.access.redhat.com/sematext/agent, + registry.access.redhat.com/sematext/logagent] + + # These container images are allowed to run with --privileged + - list: falco_privileged_images + items: [ + docker.io/calico/node, + calico/node, + docker.io/cloudnativelabs/kube-router, + docker.io/docker/ucp-agent, + docker.io/falcosecurity/falco, + docker.io/mesosphere/mesos-slave, + docker.io/rook/toolbox, + docker.io/sysdig/sysdig, + falcosecurity/falco, + gcr.io/google_containers/kube-proxy, + gcr.io/google-containers/startup-script, + gcr.io/projectcalico-org/node, + gke.gcr.io/kube-proxy, + gke.gcr.io/gke-metadata-server, + gke.gcr.io/netd-amd64, + gcr.io/google-containers/prometheus-to-sd, + k8s.gcr.io/ip-masq-agent-amd64, + k8s.gcr.io/kube-proxy, + k8s.gcr.io/prometheus-to-sd, + quay.io/calico/node, + sysdig/sysdig, + sematext_images + ] + + - macro: falco_privileged_containers + condition: (openshift_image or + user_trusted_containers or + aws_eks_core_images or + container.image.repository in (trusted_images) or + container.image.repository in (falco_privileged_images) or + container.image.repository startswith istio/proxy_ or + container.image.repository startswith quay.io/sysdig/) + + # Add conditions to this macro (probably in a separate file, + # overwriting this macro) to specify additional containers that are + # allowed to run privileged + # + # In this file, it just takes one of the images in falco_privileged_images + # and repeats it. + - macro: user_privileged_containers + condition: (never_true) + + # These container images are allowed to mount sensitive paths from the + # host filesystem. + - list: falco_sensitive_mount_images + items: [ + docker.io/sysdig/sysdig, sysdig/sysdig, + docker.io/falcosecurity/falco, falcosecurity/falco, + gcr.io/google_containers/hyperkube, + gcr.io/google_containers/kube-proxy, docker.io/calico/node, + docker.io/rook/toolbox, docker.io/cloudnativelabs/kube-router, docker.io/consul, + docker.io/datadog/docker-dd-agent, docker.io/datadog/agent, docker.io/docker/ucp-agent, docker.io/gliderlabs/logspout, + docker.io/netdata/netdata, docker.io/google/cadvisor, docker.io/prom/node-exporter, + amazon/amazon-ecs-agent, prom/node-exporter, amazon/cloudwatch-agent + ] + + - macro: falco_sensitive_mount_containers + condition: (user_trusted_containers or + aws_eks_image_sensitive_mount or + container.image.repository in (trusted_images) or + container.image.repository in (falco_sensitive_mount_images) or + container.image.repository startswith quay.io/sysdig/) + + # These container images are allowed to run with hostnetwork=true + - list: falco_hostnetwork_images + items: [ + gcr.io/google-containers/prometheus-to-sd, + gcr.io/projectcalico-org/typha, + gcr.io/projectcalico-org/node, + gke.gcr.io/gke-metadata-server, + gke.gcr.io/kube-proxy, + gke.gcr.io/netd-amd64, + k8s.gcr.io/ip-masq-agent-amd64 + k8s.gcr.io/prometheus-to-sd, + ] + + # Add conditions to this macro (probably in a separate file, + # overwriting this macro) to specify additional containers that are + # allowed to perform sensitive mounts. + # + # In this file, it just takes one of the images in falco_sensitive_mount_images + # and repeats it. + - macro: user_sensitive_mount_containers + condition: (never_true) + + - rule: Launch Privileged Container + desc: Detect the initial process started in a privileged container. Exceptions are made for known trusted images. + condition: > + container_started and container + and container.privileged=true + and not falco_privileged_containers + and not user_privileged_containers + output: Privileged container started (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline %container.info image=%container.image.repository:%container.image.tag) + priority: INFO + tags: [container, cis, mitre_privilege_escalation, mitre_lateral_movement] + + # For now, only considering a full mount of /etc as + # sensitive. Ideally, this would also consider all subdirectories + # below /etc as well, but the globbing mechanism used by sysdig + # doesn't allow exclusions of a full pattern, only single characters. + - macro: sensitive_mount + condition: (container.mount.dest[/proc*] != "N/A" or + container.mount.dest[/var/run/docker.sock] != "N/A" or + container.mount.dest[/var/run/crio/crio.sock] != "N/A" or + container.mount.dest[/var/lib/kubelet] != "N/A" or + container.mount.dest[/var/lib/kubelet/pki] != "N/A" or + container.mount.dest[/] != "N/A" or + container.mount.dest[/home/admin] != "N/A" or + container.mount.dest[/etc] != "N/A" or + container.mount.dest[/etc/kubernetes] != "N/A" or + container.mount.dest[/etc/kubernetes/manifests] != "N/A" or + container.mount.dest[/root*] != "N/A") + + # The steps libcontainer performs to set up the root program for a container are: + # - clone + exec self to a program runc:[0:PARENT] + # - clone a program runc:[1:CHILD] which sets up all the namespaces + # - clone a second program runc:[2:INIT] + exec to the root program. + # The parent of runc:[2:INIT] is runc:0:PARENT] + # As soon as 1:CHILD is created, 0:PARENT exits, so there's a race + # where at the time 2:INIT execs the root program, 0:PARENT might have + # already exited, or might still be around. So we handle both. + # We also let runc:[1:CHILD] count as the parent process, which can occur + # when we lose events and lose track of state. + + - macro: container_entrypoint + condition: (not proc.pname exists or proc.pname in (runc:[0:PARENT], runc:[1:CHILD], runc, docker-runc, exe, docker-runc-cur)) + + - rule: Launch Sensitive Mount Container + desc: > + Detect the initial process started by a container that has a mount from a sensitive host directory + (i.e. /proc). Exceptions are made for known trusted images. + condition: > + container_started and container + and sensitive_mount + and not falco_sensitive_mount_containers + and not user_sensitive_mount_containers + output: Container with sensitive mount started (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline %container.info image=%container.image.repository:%container.image.tag mounts=%container.mounts) + priority: INFO + tags: [container, cis, mitre_lateral_movement] + + # In a local/user rules file, you could override this macro to + # explicitly enumerate the container images that you want to run in + # your environment. In this main falco rules file, there isn't any way + # to know all the containers that can run, so any container is + # allowed, by using a filter that is guaranteed to evaluate to true. + # In the overridden macro, the condition would look something like + # (container.image.repository = vendor/container-1 or + # container.image.repository = vendor/container-2 or ...) + + - macro: allowed_containers + condition: (container.id exists) + + - rule: Launch Disallowed Container + desc: > + Detect the initial process started by a container that is not in a list of allowed containers. + condition: container_started and container and not allowed_containers + output: Container started and not in allowed list (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline %container.info image=%container.image.repository:%container.image.tag) + priority: WARNING + tags: [container, mitre_lateral_movement] + + - macro: user_known_system_user_login + condition: (never_true) + + # Anything run interactively by root + # - condition: evt.type != switch and user.name = root and proc.name != sshd and interactive + # output: "Interactive root (%user.name %proc.name %evt.dir %evt.type %evt.args %fd.name)" + # priority: WARNING + + - rule: System user interactive + desc: an attempt to run interactive commands by a system (i.e. non-login) user + condition: spawned_process and system_users and interactive and not user_known_system_user_login + output: "System user ran an interactive command (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline container_id=%container.id image=%container.image.repository)" + priority: INFO + tags: [users, mitre_remote_access_tools] + + # In some cases, a shell is expected to be run in a container. For example, configuration + # management software may do this, which is expected. + - macro: user_expected_terminal_shell_in_container_conditions + condition: (never_true) + + - rule: Terminal shell in container + desc: A shell was used as the entrypoint/exec point into a container with an attached terminal. + condition: > + spawned_process and container + and shell_procs and proc.tty != 0 + and container_entrypoint + and not user_expected_terminal_shell_in_container_conditions + output: > + A shell was spawned in a container with an attached terminal (user=%user.name user_loginuid=%user.loginuid %container.info + shell=%proc.name parent=%proc.pname cmdline=%proc.cmdline terminal=%proc.tty container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [container, shell, mitre_execution] + + # For some container types (mesos), there isn't a container image to + # work with, and the container name is autogenerated, so there isn't + # any stable aspect of the software to work with. In this case, we + # fall back to allowing certain command lines. + + - list: known_shell_spawn_cmdlines + items: [ + '"sh -c uname -p 2> /dev/null"', + '"sh -c uname -s 2>&1"', + '"sh -c uname -r 2>&1"', + '"sh -c uname -v 2>&1"', + '"sh -c uname -a 2>&1"', + '"sh -c ruby -v 2>&1"', + '"sh -c getconf CLK_TCK"', + '"sh -c getconf PAGESIZE"', + '"sh -c LC_ALL=C LANG=C /sbin/ldconfig -p 2>/dev/null"', + '"sh -c LANG=C /sbin/ldconfig -p 2>/dev/null"', + '"sh -c /sbin/ldconfig -p 2>/dev/null"', + '"sh -c stty -a 2>/dev/null"', + '"sh -c stty -a < /dev/tty"', + '"sh -c stty -g < /dev/tty"', + '"sh -c node index.js"', + '"sh -c node index"', + '"sh -c node ./src/start.js"', + '"sh -c node app.js"', + '"sh -c node -e \"require(''nan'')\""', + '"sh -c node -e \"require(''nan'')\")"', + '"sh -c node $NODE_DEBUG_OPTION index.js "', + '"sh -c crontab -l 2"', + '"sh -c lsb_release -a"', + '"sh -c lsb_release -is 2>/dev/null"', + '"sh -c whoami"', + '"sh -c node_modules/.bin/bower-installer"', + '"sh -c /bin/hostname -f 2> /dev/null"', + '"sh -c locale -a"', + '"sh -c -t -i"', + '"sh -c openssl version"', + '"bash -c id -Gn kafadmin"', + '"sh -c /bin/sh -c ''date +%%s''"' + ] + + # This list allows for easy additions to the set of commands allowed + # to run shells in containers without having to without having to copy + # and override the entire run shell in container macro. Once + # https://github.com/draios/falco/issues/255 is fixed this will be a + # bit easier, as someone could append of any of the existing lists. + - list: user_known_shell_spawn_binaries + items: [] + + # This macro allows for easy additions to the set of commands allowed + # to run shells in containers without having to override the entire + # rule. Its default value is an expression that always is false, which + # becomes true when the "not ..." in the rule is applied. + - macro: user_shell_container_exclusions + condition: (never_true) + + - macro: login_doing_dns_lookup + condition: (proc.name=login and fd.l4proto=udp and fd.sport=53) + + # sockfamily ip is to exclude certain processes (like 'groups') that communicate on unix-domain sockets + # systemd can listen on ports to launch things like sshd on demand + - rule: System procs network activity + desc: any network activity performed by system binaries that are not expected to send or receive any network traffic + condition: > + (fd.sockfamily = ip and (system_procs or proc.name in (shell_binaries))) + and (inbound_outbound) + and not proc.name in (known_system_procs_network_activity_binaries) + and not login_doing_dns_lookup + and not user_expected_system_procs_network_activity_conditions + output: > + Known system binary sent/received network traffic + (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline connection=%fd.name container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [network, mitre_exfiltration] + + # This list allows easily whitelisting system proc names that are + # expected to communicate on the network. + - list: known_system_procs_network_activity_binaries + items: [systemd, hostid, id] + + # This macro allows specifying conditions under which a system binary + # is allowed to communicate on the network. For instance, only specific + # proc.cmdline values could be allowed to be more granular in what is + # allowed. + - macro: user_expected_system_procs_network_activity_conditions + condition: (never_true) + + # When filled in, this should look something like: + # (proc.env contains "HTTP_PROXY=http://my.http.proxy.com ") + # The trailing space is intentional so avoid matching on prefixes of + # the actual proxy. + - macro: allowed_ssh_proxy_env + condition: (always_true) + + - list: http_proxy_binaries + items: [curl, wget] + + - macro: http_proxy_procs + condition: (proc.name in (http_proxy_binaries)) + + - rule: Program run with disallowed http proxy env + desc: An attempt to run a program with a disallowed HTTP_PROXY environment variable + condition: > + spawned_process and + http_proxy_procs and + not allowed_ssh_proxy_env and + proc.env icontains HTTP_PROXY + output: > + Program run with disallowed HTTP_PROXY environment variable + (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline env=%proc.env parent=%proc.pname container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [host, users] + + # In some environments, any attempt by a interpreted program (perl, + # python, ruby, etc) to listen for incoming connections or perform + # outgoing connections might be suspicious. These rules are not + # enabled by default, but you can modify the following macros to + # enable them. + + - macro: consider_interpreted_inbound + condition: (never_true) + + - macro: consider_interpreted_outbound + condition: (never_true) + + - rule: Interpreted procs inbound network activity + desc: Any inbound network activity performed by any interpreted program (perl, python, ruby, etc.) + condition: > + (inbound and consider_interpreted_inbound + and interpreted_procs) + output: > + Interpreted program received/listened for network traffic + (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline connection=%fd.name container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [network, mitre_exfiltration] + + - rule: Interpreted procs outbound network activity + desc: Any outbound network activity performed by any interpreted program (perl, python, ruby, etc.) + condition: > + (outbound and consider_interpreted_outbound + and interpreted_procs) + output: > + Interpreted program performed outgoing network connection + (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline connection=%fd.name container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [network, mitre_exfiltration] + + - list: openvpn_udp_ports + items: [1194, 1197, 1198, 8080, 9201] + + - list: l2tp_udp_ports + items: [500, 1701, 4500, 10000] + + - list: statsd_ports + items: [8125] + + - list: ntp_ports + items: [123] + + # Some applications will connect a udp socket to an address only to + # test connectivity. Assuming the udp connect works, they will follow + # up with a tcp connect that actually sends/receives data. + # + # With that in mind, we listed a few commonly seen ports here to avoid + # some false positives. In addition, we make the main rule opt-in, so + # it's disabled by default. + + - list: test_connect_ports + items: [0, 9, 80, 3306] + + - macro: do_unexpected_udp_check + condition: (never_true) + + - list: expected_udp_ports + items: [53, openvpn_udp_ports, l2tp_udp_ports, statsd_ports, ntp_ports, test_connect_ports] + + - macro: expected_udp_traffic + condition: fd.port in (expected_udp_ports) + + - rule: Unexpected UDP Traffic + desc: UDP traffic not on port 53 (DNS) or other commonly used ports + condition: (inbound_outbound) and do_unexpected_udp_check and fd.l4proto=udp and not expected_udp_traffic + output: > + Unexpected UDP Traffic Seen + (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline connection=%fd.name proto=%fd.l4proto evt=%evt.type %evt.args container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [network, mitre_exfiltration] + + # With the current restriction on system calls handled by falco + # (e.g. excluding read/write/sendto/recvfrom/etc, this rule won't + # trigger). + # - rule: Ssh error in syslog + # desc: any ssh errors (failed logins, disconnects, ...) sent to syslog + # condition: syslog and ssh_error_message and evt.dir = < + # output: "sshd sent error message to syslog (error=%evt.buffer)" + # priority: WARNING + + - macro: somebody_becoming_themself + condition: ((user.name=nobody and evt.arg.uid=nobody) or + (user.name=www-data and evt.arg.uid=www-data) or + (user.name=_apt and evt.arg.uid=_apt) or + (user.name=postfix and evt.arg.uid=postfix) or + (user.name=pki-agent and evt.arg.uid=pki-agent) or + (user.name=pki-acme and evt.arg.uid=pki-acme) or + (user.name=nfsnobody and evt.arg.uid=nfsnobody) or + (user.name=postgres and evt.arg.uid=postgres)) + + - macro: nrpe_becoming_nagios + condition: (proc.name=nrpe and evt.arg.uid=nagios) + + # In containers, the user name might be for a uid that exists in the + # container but not on the host. (See + # https://github.com/draios/sysdig/issues/954). So in that case, allow + # a setuid. + - macro: known_user_in_container + condition: (container and user.name != "N/A") + + # Add conditions to this macro (probably in a separate file, + # overwriting this macro) to allow for specific combinations of + # programs changing users by calling setuid. + # + # In this file, it just takes one of the condition in the base macro + # and repeats it. + - macro: user_known_non_sudo_setuid_conditions + condition: user.name=root + + # sshd, mail programs attempt to setuid to root even when running as non-root. Excluding here to avoid meaningless FPs + - rule: Non sudo setuid + desc: > + an attempt to change users by calling setuid. sudo/su are excluded. users "root" and "nobody" + suing to itself are also excluded, as setuid calls typically involve dropping privileges. + condition: > + evt.type=setuid and evt.dir=> + and (known_user_in_container or not container) + and not (user.name=root or user.uid=0) + and not somebody_becoming_themself + and not proc.name in (known_setuid_binaries, userexec_binaries, mail_binaries, docker_binaries, + nomachine_binaries) + and not proc.name startswith "runc:" + and not java_running_sdjagent + and not nrpe_becoming_nagios + and not user_known_non_sudo_setuid_conditions + output: > + Unexpected setuid call by non-sudo, non-root program (user=%user.name user_loginuid=%user.loginuid cur_uid=%user.uid parent=%proc.pname + command=%proc.cmdline uid=%evt.arg.uid container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [users, mitre_privilege_escalation] + + - macro: user_known_user_management_activities + condition: (never_true) + + - macro: chage_list + condition: (proc.name=chage and (proc.cmdline contains "-l" or proc.cmdline contains "--list")) + + - rule: User mgmt binaries + desc: > + activity by any programs that can manage users, passwords, or permissions. sudo and su are excluded. + Activity in containers is also excluded--some containers create custom users on top + of a base linux distribution at startup. + Some innocuous commandlines that don't actually change anything are excluded. + condition: > + spawned_process and proc.name in (user_mgmt_binaries) and + not proc.name in (su, sudo, lastlog, nologin, unix_chkpwd) and not container and + not proc.pname in (cron_binaries, systemd, systemd.postins, udev.postinst, run-parts) and + not proc.cmdline startswith "passwd -S" and + not proc.cmdline startswith "useradd -D" and + not proc.cmdline startswith "systemd --version" and + not run_by_qualys and + not run_by_sumologic_securefiles and + not run_by_yum and + not run_by_ms_oms and + not run_by_google_accounts_daemon and + not chage_list and + not user_known_user_management_activities + output: > + User management binary command run outside of container + (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline parent=%proc.pname gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4]) + priority: NOTICE + tags: [host, users, mitre_persistence] + + - list: allowed_dev_files + items: [ + /dev/null, /dev/stdin, /dev/stdout, /dev/stderr, + /dev/random, /dev/urandom, /dev/console, /dev/kmsg + ] + + - macro: user_known_create_files_below_dev_activities + condition: (never_true) + + # (we may need to add additional checks against false positives, see: + # https://bugs.launchpad.net/ubuntu/+source/rkhunter/+bug/86153) + - rule: Create files below dev + desc: creating any files below /dev other than known programs that manage devices. Some rootkits hide files in /dev. + condition: > + fd.directory = /dev and + (evt.type = creat or ((evt.type = open or evt.type = openat) and evt.arg.flags contains O_CREAT)) + and not proc.name in (dev_creation_binaries) + and not fd.name in (allowed_dev_files) + and not fd.name startswith /dev/tty + and not user_known_create_files_below_dev_activities + output: "File created below /dev by untrusted program (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline file=%fd.name container_id=%container.id image=%container.image.repository)" + priority: ERROR + tags: [filesystem, mitre_persistence] + + + # In a local/user rules file, you could override this macro to + # explicitly enumerate the container images that you want to allow + # access to EC2 metadata. In this main falco rules file, there isn't + # any way to know all the containers that should have access, so any + # container is alllowed, by repeating the "container" macro. In the + # overridden macro, the condition would look something like + # (container.image.repository = vendor/container-1 or + # container.image.repository = vendor/container-2 or ...) + - macro: ec2_metadata_containers + condition: container + + # On EC2 instances, 169.254.169.254 is a special IP used to fetch + # metadata about the instance. It may be desirable to prevent access + # to this IP from containers. + - rule: Contact EC2 Instance Metadata Service From Container + desc: Detect attempts to contact the EC2 Instance Metadata Service from a container + condition: outbound and fd.sip="169.254.169.254" and container and not ec2_metadata_containers + output: Outbound connection to EC2 instance metadata service (command=%proc.cmdline connection=%fd.name %container.info image=%container.image.repository:%container.image.tag) + priority: NOTICE + tags: [network, aws, container, mitre_discovery] + + + # This rule is not enabled by default, since this rule is for cloud environment(GCP, AWS and Azure) only. + # If you want to enable this rule, overwrite the first macro, + # And you can filter the container that you want to allow access to metadata by overwriting the second macro. + - macro: consider_metadata_access + condition: (never_true) + + - macro: user_known_metadata_access + condition: (k8s.ns.name = "kube-system") + + # On GCP, AWS and Azure, 169.254.169.254 is a special IP used to fetch + # metadata about the instance. The metadata could be used to get credentials by attackers. + - rule: Contact cloud metadata service from container + desc: Detect attempts to contact the Cloud Instance Metadata Service from a container + condition: outbound and fd.sip="169.254.169.254" and container and consider_metadata_access and not user_known_metadata_access + output: Outbound connection to cloud instance metadata service (command=%proc.cmdline connection=%fd.name %container.info image=%container.image.repository:%container.image.tag) + priority: NOTICE + tags: [network, container, mitre_discovery] + + # Containers from IBM Cloud + - list: ibm_cloud_containers + items: + - icr.io/ext/sysdig/agent + - registry.ng.bluemix.net/armada-master/metrics-server-amd64 + - registry.ng.bluemix.net/armada-master/olm + + # In a local/user rules file, list the namespace or container images that are + # allowed to contact the K8s API Server from within a container. This + # might cover cases where the K8s infrastructure itself is running + # within a container. + - macro: k8s_containers + condition: > + (container.image.repository in (gcr.io/google_containers/hyperkube-amd64, + gcr.io/google_containers/kube2sky, + docker.io/sysdig/sysdig, docker.io/falcosecurity/falco, + sysdig/sysdig, falcosecurity/falco, + fluent/fluentd-kubernetes-daemonset, prom/prometheus, + ibm_cloud_containers) + or (k8s.ns.name = "kube-system")) + + - macro: k8s_api_server + condition: (fd.sip.name="kubernetes.default.svc.cluster.local") + + - macro: user_known_contact_k8s_api_server_activities + condition: (never_true) + + - rule: Contact K8S API Server From Container + desc: Detect attempts to contact the K8S API Server from a container + condition: > + evt.type=connect and evt.dir=< and + (fd.typechar=4 or fd.typechar=6) and + container and + not k8s_containers and + k8s_api_server and + not user_known_contact_k8s_api_server_activities + output: Unexpected connection to K8s API Server from container (command=%proc.cmdline %container.info image=%container.image.repository:%container.image.tag connection=%fd.name) + priority: NOTICE + tags: [network, k8s, container, mitre_discovery] + + # In a local/user rules file, list the container images that are + # allowed to contact NodePort services from within a container. This + # might cover cases where the K8s infrastructure itself is running + # within a container. + # + # By default, all containers are allowed to contact NodePort services. + - macro: nodeport_containers + condition: container + + - rule: Unexpected K8s NodePort Connection + desc: Detect attempts to use K8s NodePorts from a container + condition: (inbound_outbound) and fd.sport >= 30000 and fd.sport <= 32767 and container and not nodeport_containers + output: Unexpected K8s NodePort Connection (command=%proc.cmdline connection=%fd.name container_id=%container.id image=%container.image.repository) + priority: NOTICE + tags: [network, k8s, container, mitre_port_knocking] + + - list: network_tool_binaries + items: [nc, ncat, nmap, dig, tcpdump, tshark, ngrep, telnet, mitmproxy, socat, zmap] + + - macro: network_tool_procs + condition: (proc.name in (network_tool_binaries)) + + # In a local/user rules file, create a condition that matches legitimate uses + # of a package management process inside a container. + # + # For example: + # - macro: user_known_package_manager_in_container + # condition: proc.cmdline="dpkg -l" + - macro: user_known_package_manager_in_container + condition: (never_true) + + # Container is supposed to be immutable. Package management should be done in building the image. + - rule: Launch Package Management Process in Container + desc: Package management process ran inside container + condition: > + spawned_process + and container + and user.name != "_apt" + and package_mgmt_procs + and not package_mgmt_ancestor_procs + and not user_known_package_manager_in_container + output: > + Package management process launched in container (user=%user.name user_loginuid=%user.loginuid + command=%proc.cmdline container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) + priority: ERROR + tags: [process, mitre_persistence] + + - rule: Netcat Remote Code Execution in Container + desc: Netcat Program runs inside container that allows remote code execution + condition: > + spawned_process and container and + ((proc.name = "nc" and (proc.args contains "-e" or proc.args contains "-c")) or + (proc.name = "ncat" and (proc.args contains "--sh-exec" or proc.args contains "--exec" or proc.args contains "-e " + or proc.args contains "-c " or proc.args contains "--lua-exec")) + ) + output: > + Netcat runs inside container that allows remote code execution (user=%user.name user_loginuid=%user.loginuid + command=%proc.cmdline container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) + priority: WARNING + tags: [network, process, mitre_execution] + + - macro: user_known_network_tool_activities + condition: (never_true) + + - rule: Launch Suspicious Network Tool in Container + desc: Detect network tools launched inside container + condition: > + spawned_process and container and network_tool_procs and not user_known_network_tool_activities + output: > + Network tool launched in container (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline parent_process=%proc.pname + container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) + priority: NOTICE + tags: [network, process, mitre_discovery, mitre_exfiltration] + + # This rule is not enabled by default, as there are legitimate use + # cases for these tools on hosts. If you want to enable it, modify the + # following macro. + - macro: consider_network_tools_on_host + condition: (never_true) + + - rule: Launch Suspicious Network Tool on Host + desc: Detect network tools launched on the host + condition: > + spawned_process and + not container and + consider_network_tools_on_host and + network_tool_procs and + not user_known_network_tool_activities + output: > + Network tool launched on host (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline parent_process=%proc.pname) + priority: NOTICE + tags: [network, process, mitre_discovery, mitre_exfiltration] + + - list: grep_binaries + items: [grep, egrep, fgrep] + + - macro: grep_commands + condition: (proc.name in (grep_binaries)) + + # a less restrictive search for things that might be passwords/ssh/user etc. + - macro: grep_more + condition: (never_true) + + - macro: private_key_or_password + condition: > + (proc.args icontains "BEGIN PRIVATE" or + proc.args icontains "BEGIN RSA PRIVATE" or + proc.args icontains "BEGIN DSA PRIVATE" or + proc.args icontains "BEGIN EC PRIVATE" or + (grep_more and + (proc.args icontains " pass " or + proc.args icontains " ssh " or + proc.args icontains " user ")) + ) + + - rule: Search Private Keys or Passwords + desc: > + Detect grep private keys or passwords activity. + condition: > + (spawned_process and + ((grep_commands and private_key_or_password) or + (proc.name = "find" and (proc.args contains "id_rsa" or proc.args contains "id_dsa"))) + ) + output: > + Grep private keys or passwords activities found + (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline container_id=%container.id container_name=%container.name + image=%container.image.repository:%container.image.tag) + priority: + WARNING + tags: [process, mitre_credential_access] + + - list: log_directories + items: [/var/log, /dev/log] + + - list: log_files + items: [syslog, auth.log, secure, kern.log, cron, user.log, dpkg.log, last.log, yum.log, access_log, mysql.log, mysqld.log] + + - macro: access_log_files + condition: (fd.directory in (log_directories) or fd.filename in (log_files)) + + # a placeholder for whitelist log files that could be cleared. Recommend the macro as (fd.name startswith "/var/log/app1*") + - macro: allowed_clear_log_files + condition: (never_true) + + - macro: trusted_logging_images + condition: (container.image.repository endswith "splunk/fluentd-hec" or + container.image.repository endswith "fluent/fluentd-kubernetes-daemonset" or + container.image.repository endswith "openshift3/ose-logging-fluentd" or + container.image.repository endswith "containernetworking/azure-npm") + + - rule: Clear Log Activities + desc: Detect clearing of critical log files + condition: > + open_write and + access_log_files and + evt.arg.flags contains "O_TRUNC" and + not trusted_logging_images and + not allowed_clear_log_files + output: > + Log files were tampered (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline file=%fd.name container_id=%container.id image=%container.image.repository) + priority: + WARNING + tags: [file, mitre_defense_evasion] + + - list: data_remove_commands + items: [shred, mkfs, mke2fs] + + - macro: clear_data_procs + condition: (proc.name in (data_remove_commands)) + + - macro: user_known_remove_data_activities + condition: (never_true) + + - rule: Remove Bulk Data from Disk + desc: Detect process running to clear bulk data from disk + condition: spawned_process and clear_data_procs and not user_known_remove_data_activities + output: > + Bulk data has been removed from disk (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline file=%fd.name container_id=%container.id image=%container.image.repository) + priority: + WARNING + tags: [process, mitre_persistence] + + - macro: modify_shell_history + condition: > + (modify and ( + evt.arg.name contains "bash_history" or + evt.arg.name contains "zsh_history" or + evt.arg.name contains "fish_read_history" or + evt.arg.name endswith "fish_history" or + evt.arg.oldpath contains "bash_history" or + evt.arg.oldpath contains "zsh_history" or + evt.arg.oldpath contains "fish_read_history" or + evt.arg.oldpath endswith "fish_history" or + evt.arg.path contains "bash_history" or + evt.arg.path contains "zsh_history" or + evt.arg.path contains "fish_read_history" or + evt.arg.path endswith "fish_history")) + + - macro: truncate_shell_history + condition: > + (open_write and ( + fd.name contains "bash_history" or + fd.name contains "zsh_history" or + fd.name contains "fish_read_history" or + fd.name endswith "fish_history") and evt.arg.flags contains "O_TRUNC") + + - macro: var_lib_docker_filepath + condition: (evt.arg.name startswith /var/lib/docker or fd.name startswith /var/lib/docker) + + - rule: Delete or rename shell history + desc: Detect shell history deletion + condition: > + (modify_shell_history or truncate_shell_history) and + not var_lib_docker_filepath and + not proc.name in (docker_binaries) + output: > + Shell history had been deleted or renamed (user=%user.name user_loginuid=%user.loginuid type=%evt.type command=%proc.cmdline fd.name=%fd.name name=%evt.arg.name path=%evt.arg.path oldpath=%evt.arg.oldpath %container.info) + priority: + WARNING + tags: [process, mitre_defense_evasion] + + # This rule is deprecated and will/should never be triggered. Keep it here for backport compatibility. + # Rule Delete or rename shell history is the preferred rule to use now. + - rule: Delete Bash History + desc: Detect bash history deletion + condition: > + ((spawned_process and proc.name in (shred, rm, mv) and proc.args contains "bash_history") or + (open_write and fd.name contains "bash_history" and evt.arg.flags contains "O_TRUNC")) + output: > + Shell history had been deleted or renamed (user=%user.name user_loginuid=%user.loginuid type=%evt.type command=%proc.cmdline fd.name=%fd.name name=%evt.arg.name path=%evt.arg.path oldpath=%evt.arg.oldpath %container.info) + priority: + WARNING + tags: [process, mitre_defense_evasion] + + - macro: consider_all_chmods + condition: (always_true) + + - list: user_known_chmod_applications + items: [hyperkube, kubelet, k3s-agent] + + # This macro should be overridden in user rules as needed. This is useful if a given application + # should not be ignored alltogether with the user_known_chmod_applications list, but only in + # specific conditions. + - macro: user_known_set_setuid_or_setgid_bit_conditions + condition: (never_true) + + - rule: Set Setuid or Setgid bit + desc: > + When the setuid or setgid bits are set for an application, + this means that the application will run with the privileges of the owning user or group respectively. + Detect setuid or setgid bits set via chmod + condition: > + consider_all_chmods and chmod and (evt.arg.mode contains "S_ISUID" or evt.arg.mode contains "S_ISGID") + and not proc.name in (user_known_chmod_applications) + and not exe_running_docker_save + and not user_known_set_setuid_or_setgid_bit_conditions + enabled: false + output: > + Setuid or setgid bit is set via chmod (fd=%evt.arg.fd filename=%evt.arg.filename mode=%evt.arg.mode user=%user.name user_loginuid=%user.loginuid process=%proc.name + command=%proc.cmdline container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) + priority: + NOTICE + tags: [process, mitre_persistence] + + - list: exclude_hidden_directories + items: [/root/.cassandra] + + # To use this rule, you should modify consider_hidden_file_creation. + - macro: consider_hidden_file_creation + condition: (never_true) + + - macro: user_known_create_hidden_file_activities + condition: (never_true) + + - rule: Create Hidden Files or Directories + desc: Detect hidden files or directories created + condition: > + ((modify and evt.arg.newpath contains "/.") or + (mkdir and evt.arg.path contains "/.") or + (open_write and evt.arg.flags contains "O_CREAT" and fd.name contains "/." and not fd.name pmatch (exclude_hidden_directories))) and + consider_hidden_file_creation and + not user_known_create_hidden_file_activities + and not exe_running_docker_save + output: > + Hidden file or directory created (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline + file=%fd.name newpath=%evt.arg.newpath container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) + priority: + NOTICE + tags: [file, mitre_persistence] + + - list: remote_file_copy_binaries + items: [rsync, scp, sftp, dcp] + + - macro: remote_file_copy_procs + condition: (proc.name in (remote_file_copy_binaries)) + + # Users should overwrite this macro to specify conditions under which a + # Custom condition for use of remote file copy tool in container + - macro: user_known_remote_file_copy_activities + condition: (never_true) + + - rule: Launch Remote File Copy Tools in Container + desc: Detect remote file copy tools launched in container + condition: > + spawned_process + and container + and remote_file_copy_procs + and not user_known_remote_file_copy_activities + output: > + Remote file copy tool launched in container (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline parent_process=%proc.pname + container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) + priority: NOTICE + tags: [network, process, mitre_lateral_movement, mitre_exfiltration] + + - rule: Create Symlink Over Sensitive Files + desc: Detect symlink created over sensitive files + condition: > + create_symlink and + (evt.arg.target in (sensitive_file_names) or evt.arg.target in (sensitive_directory_names)) + output: > + Symlinks created over senstivie files (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline target=%evt.arg.target linkpath=%evt.arg.linkpath parent_process=%proc.pname) + priority: NOTICE + tags: [file, mitre_exfiltration] + + - list: miner_ports + items: [ + 25, 3333, 3334, 3335, 3336, 3357, 4444, + 5555, 5556, 5588, 5730, 6099, 6666, 7777, + 7778, 8000, 8001, 8008, 8080, 8118, 8333, + 8888, 8899, 9332, 9999, 14433, 14444, + 45560, 45700 + ] + + - list: miner_domains + items: [ + "asia1.ethpool.org","ca.minexmr.com", + "cn.stratum.slushpool.com","de.minexmr.com", + "eth-ar.dwarfpool.com","eth-asia.dwarfpool.com", + "eth-asia1.nanopool.org","eth-au.dwarfpool.com", + "eth-au1.nanopool.org","eth-br.dwarfpool.com", + "eth-cn.dwarfpool.com","eth-cn2.dwarfpool.com", + "eth-eu.dwarfpool.com","eth-eu1.nanopool.org", + "eth-eu2.nanopool.org","eth-hk.dwarfpool.com", + "eth-jp1.nanopool.org","eth-ru.dwarfpool.com", + "eth-ru2.dwarfpool.com","eth-sg.dwarfpool.com", + "eth-us-east1.nanopool.org","eth-us-west1.nanopool.org", + "eth-us.dwarfpool.com","eth-us2.dwarfpool.com", + "eu.stratum.slushpool.com","eu1.ethermine.org", + "eu1.ethpool.org","fr.minexmr.com", + "mine.moneropool.com","mine.xmrpool.net", + "pool.minexmr.com","pool.monero.hashvault.pro", + "pool.supportxmr.com","sg.minexmr.com", + "sg.stratum.slushpool.com","stratum-eth.antpool.com", + "stratum-ltc.antpool.com","stratum-zec.antpool.com", + "stratum.antpool.com","us-east.stratum.slushpool.com", + "us1.ethermine.org","us1.ethpool.org", + "us2.ethermine.org","us2.ethpool.org", + "xmr-asia1.nanopool.org","xmr-au1.nanopool.org", + "xmr-eu1.nanopool.org","xmr-eu2.nanopool.org", + "xmr-jp1.nanopool.org","xmr-us-east1.nanopool.org", + "xmr-us-west1.nanopool.org","xmr.crypto-pool.fr", + "xmr.pool.minergate.com", "rx.unmineable.com" + ] + + - list: https_miner_domains + items: [ + "ca.minexmr.com", + "cn.stratum.slushpool.com", + "de.minexmr.com", + "fr.minexmr.com", + "mine.moneropool.com", + "mine.xmrpool.net", + "pool.minexmr.com", + "sg.minexmr.com", + "stratum-eth.antpool.com", + "stratum-ltc.antpool.com", + "stratum-zec.antpool.com", + "stratum.antpool.com", + "xmr.crypto-pool.fr" + ] + + - list: http_miner_domains + items: [ + "ca.minexmr.com", + "de.minexmr.com", + "fr.minexmr.com", + "mine.moneropool.com", + "mine.xmrpool.net", + "pool.minexmr.com", + "sg.minexmr.com", + "xmr.crypto-pool.fr" + ] + + # Add rule based on crypto mining IOCs + - macro: minerpool_https + condition: (fd.sport="443" and fd.sip.name in (https_miner_domains)) + + - macro: minerpool_http + condition: (fd.sport="80" and fd.sip.name in (http_miner_domains)) + + - macro: minerpool_other + condition: (fd.sport in (miner_ports) and fd.sip.name in (miner_domains)) + + - macro: net_miner_pool + condition: (evt.type in (sendto, sendmsg) and evt.dir=< and (fd.net != "127.0.0.0/8" and not fd.snet in (rfc_1918_addresses)) and ((minerpool_http) or (minerpool_https) or (minerpool_other))) + + - macro: trusted_images_query_miner_domain_dns + condition: (container.image.repository in (docker.io/falcosecurity/falco, falcosecurity/falco)) + append: false + + # The rule is disabled by default. + # Note: falco will send DNS request to resolve miner pool domain which may trigger alerts in your environment. + - rule: Detect outbound connections to common miner pool ports + desc: Miners typically connect to miner pools on common ports. + condition: net_miner_pool and not trusted_images_query_miner_domain_dns + enabled: false + output: Outbound connection to IP/Port flagged by cryptoioc.ch (command=%proc.cmdline port=%fd.rport ip=%fd.rip container=%container.info image=%container.image.repository) + priority: CRITICAL + tags: [network, mitre_execution] + + - rule: Detect crypto miners using the Stratum protocol + desc: Miners typically specify the mining pool to connect to with a URI that begins with 'stratum+tcp' + condition: spawned_process and proc.cmdline contains "stratum+tcp" + output: Possible miner running (command=%proc.cmdline container=%container.info image=%container.image.repository) + priority: CRITICAL + tags: [process, mitre_execution] + + - list: k8s_client_binaries + items: [docker, kubectl, crictl] + + - list: user_known_k8s_ns_kube_system_images + items: [ + k8s.gcr.io/fluentd-gcp-scaler, + k8s.gcr.io/node-problem-detector/node-problem-detector + ] + + - list: user_known_k8s_images + items: [ + mcr.microsoft.com/aks/hcp/hcp-tunnel-front + ] + + # Whitelist for known docker client binaries run inside container + # - k8s.gcr.io/fluentd-gcp-scaler in GCP/GKE + - macro: user_known_k8s_client_container + condition: > + (k8s.ns.name="kube-system" and container.image.repository in (user_known_k8s_ns_kube_system_images)) or container.image.repository in (user_known_k8s_images) + + - macro: user_known_k8s_client_container_parens + condition: (user_known_k8s_client_container) + + - rule: The docker client is executed in a container + desc: Detect a k8s client tool executed inside a container + condition: spawned_process and container and not user_known_k8s_client_container_parens and proc.name in (k8s_client_binaries) + output: "Docker or kubernetes client executed in container (user=%user.name user_loginuid=%user.loginuid %container.info parent=%proc.pname cmdline=%proc.cmdline image=%container.image.repository:%container.image.tag)" + priority: WARNING + tags: [container, mitre_execution] + + + # This rule is enabled by default. + # If you want to disable it, modify the following macro. + - macro: consider_packet_socket_communication + condition: (always_true) + + - list: user_known_packet_socket_binaries + items: [] + + - rule: Packet socket created in container + desc: Detect new packet socket at the device driver (OSI Layer 2) level in a container. Packet socket could be used for ARP Spoofing and privilege escalation(CVE-2020-14386) by attacker. + condition: evt.type=socket and evt.arg[0]=AF_PACKET and consider_packet_socket_communication and container and not proc.name in (user_known_packet_socket_binaries) + output: Packet socket was created in a container (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline socket_info=%evt.args container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) + priority: NOTICE + tags: [network, mitre_discovery] + + # Change to (always_true) to enable rule 'Network connection outside local subnet' + - macro: enabled_rule_network_only_subnet + condition: (never_true) + + # Namespaces where the rule is enforce + - list: namespace_scope_network_only_subnet + items: [] + + - macro: network_local_subnet + condition: > + fd.rnet in (rfc_1918_addresses) or + fd.ip = "0.0.0.0" or + fd.net = "127.0.0.0/8" + + # # How to test: + # # Change macro enabled_rule_network_only_subnet to condition: always_true + # # Add 'default' to namespace_scope_network_only_subnet + # # Run: + # kubectl run --generator=run-pod/v1 -n default -i --tty busybox --image=busybox --rm -- wget google.com -O /var/google.html + # # Check logs running + + - rule: Network Connection outside Local Subnet + desc: Detect traffic to image outside local subnet. + condition: > + enabled_rule_network_only_subnet and + inbound_outbound and + container and + not network_local_subnet and + k8s.ns.name in (namespace_scope_network_only_subnet) + output: > + Network connection outside local subnet + (command=%proc.cmdline connection=%fd.name user=%user.name user_loginuid=%user.loginuid container_id=%container.id + image=%container.image.repository namespace=%k8s.ns.name + fd.rip.name=%fd.rip.name fd.lip.name=%fd.lip.name fd.cip.name=%fd.cip.name fd.sip.name=%fd.sip.name) + priority: WARNING + tags: [network] + + - macro: allowed_port + condition: (never_true) + + - list: allowed_image + items: [] # add image to monitor, i.e.: bitnami/nginx + + - list: authorized_server_port + items: [] # add port to allow, i.e.: 80 + + # # How to test: + # kubectl run --image=nginx nginx-app --port=80 --env="DOMAIN=cluster" + # kubectl expose deployment nginx-app --port=80 --name=nginx-http --type=LoadBalancer + # # On minikube: + # minikube service nginx-http + # # On general K8s: + # kubectl get services + # kubectl cluster-info + # # Visit the Nginx service and port, should not fire. + # # Change rule to different port, then different process name, and test again that it fires. + + - rule: Outbound or Inbound Traffic not to Authorized Server Process and Port + desc: Detect traffic that is not to authorized server process and port. + condition: > + allowed_port and + inbound_outbound and + container and + container.image.repository in (allowed_image) and + not proc.name in (authorized_server_binary) and + not fd.sport in (authorized_server_port) + output: > + Network connection outside authorized port and binary + (command=%proc.cmdline connection=%fd.name user=%user.name user_loginuid=%user.loginuid container_id=%container.id + image=%container.image.repository) + priority: WARNING + tags: [network] + + - macro: user_known_stand_streams_redirect_activities + condition: (never_true) + + - rule: Redirect STDOUT/STDIN to Network Connection in Container + desc: Detect redirecting stdout/stdin to network connection in container (potential reverse shell). + condition: evt.type=dup and evt.dir=> and container and fd.num in (0, 1, 2) and fd.type in ("ipv4", "ipv6") and not user_known_stand_streams_redirect_activities + output: > + Redirect stdout/stdin to network connection (user=%user.name user_loginuid=%user.loginuid %container.info process=%proc.name parent=%proc.pname cmdline=%proc.cmdline terminal=%proc.tty container_id=%container.id image=%container.image.repository fd.name=%fd.name fd.num=%fd.num fd.type=%fd.type fd.sip=%fd.sip) + priority: WARNING + + # The two Container Drift rules below will fire when a new executable is created in a container. + # There are two ways to create executables - file is created with execution permissions or permissions change of existing file. + # We will use a new sysdig filter, is_open_exec, to find all files creations with execution permission, and will trace all chmods in a container. + # The use case we are targeting here is an attempt to execute code that was not shipped as part of a container (drift) - + # an activity that might be malicious or non-compliant. + # Two things to pay attention to: + # 1) In most cases, 'docker cp' will not be identified, but the assumption is that if an attacker gained access to the container runtime daemon, they are already privileged + # 2) Drift rules will be noisy in environments in which containers are built (e.g. docker build) + # These two rules are not enabled by default. Use `never_true` in macro condition to enable them. + + - macro: user_known_container_drift_activities + condition: (always_true) + + - rule: Container Drift Detected (chmod) + desc: New executable created in a container due to chmod + condition: > + chmod and + consider_all_chmods and + container and + not runc_writing_exec_fifo and + not runc_writing_var_lib_docker and + not user_known_container_drift_activities and + evt.rawres>=0 and + ((evt.arg.mode contains "S_IXUSR") or + (evt.arg.mode contains "S_IXGRP") or + (evt.arg.mode contains "S_IXOTH")) + output: Drift detected (chmod), new executable created in a container (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline filename=%evt.arg.filename name=%evt.arg.name mode=%evt.arg.mode event=%evt.type) + priority: ERROR + + # **************************************************************************** + # * "Container Drift Detected (open+create)" requires FALCO_ENGINE_VERSION 6 * + # **************************************************************************** + - rule: Container Drift Detected (open+create) + desc: New executable created in a container due to open+create + condition: > + evt.type in (open,openat,creat) and + evt.is_open_exec=true and + container and + not runc_writing_exec_fifo and + not runc_writing_var_lib_docker and + not user_known_container_drift_activities and + evt.rawres>=0 + output: Drift detected (open+create), new executable created in a container (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline filename=%evt.arg.filename name=%evt.arg.name mode=%evt.arg.mode event=%evt.type) + priority: ERROR + + - list: c2_server_ip_list + items: [] + + - rule: Outbound Connection to C2 Servers + desc: Detect outbound connection to command & control servers + condition: outbound and fd.sip in (c2_server_ip_list) + output: Outbound connection to C2 server (command=%proc.cmdline connection=%fd.name user=%user.name user_loginuid=%user.loginuid container_id=%container.id image=%container.image.repository) + priority: WARNING + tags: [network] + + - list: white_listed_modules + items: [] + + - rule: Linux Kernel Module Injection Detected + desc: Detect kernel module was injected (from container). + condition: spawned_process and container and proc.name=insmod and not proc.args in (white_listed_modules) + output: Linux Kernel Module injection using insmod detected (user=%user.name user_loginuid=%user.loginuid parent_process=%proc.pname module=%proc.args %container.info image=%container.image.repository:%container.image.tag) + priority: WARNING + tags: [process] + + - list: run_as_root_image_list + items: [] + + - macro: user_known_run_as_root_container + condition: (container.image.repository in (run_as_root_image_list)) + + # The rule is disabled by default and should be enabled when non-root container policy has been applied. + # Note the rule will not work as expected when usernamespace is applied, e.g. userns-remap is enabled. + - rule: Container Run as Root User + desc: Detected container running as root user + condition: spawned_process and container and proc.vpid=1 and user.uid=0 and not user_known_run_as_root_container + enabled: false + output: Container launched with root user privilege (uid=%user.uid container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag) + priority: INFO + tags: [container, process] + + # This rule helps detect CVE-2021-3156: + # A privilege escalation to root through heap-based buffer overflow + - rule: Sudo Potential Privilege Escalation + desc: Privilege escalation vulnerability affecting sudo (<= 1.9.5p2). Executing sudo using sudoedit -s or sudoedit -i command with command-line argument that ends with a single backslash character from an unprivileged user it's possible to elevate the user privileges to root. + condition: spawned_process and user.uid != 0 and proc.name=sudoedit and (proc.args contains -s or proc.args contains -i) and (proc.args contains "\ " or proc.args endswith \) + output: "Detect Sudo Privilege Escalation Exploit (CVE-2021-3156) (user=%user.name parent=%proc.pname cmdline=%proc.cmdline %container.info)" + priority: CRITICAL + tags: [filesystem, mitre_privilege_escalation] + + - rule: Debugfs Launched in Privileged Container + desc: Detect file system debugger debugfs launched inside a privileged container which might lead to container escape. + condition: > + spawned_process and container + and container.privileged=true + and proc.name=debugfs + output: Debugfs launched started in a privileged container (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline %container.info image=%container.image.repository:%container.image.tag) + priority: WARNING + tags: [container, cis, mitre_lateral_movement] + + - macro: mount_info + condition: (proc.args="" or proc.args intersects ("-V", "-l", "-h")) + + - rule: Mount Launched in Privileged Container + desc: Detect file system mount happened inside a privilegd container which might lead to container escape. + condition: > + spawned_process and container + and container.privileged=true + and proc.name=mount + and not mount_info + output: Mount was executed inside a privileged container (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline %container.info image=%container.image.repository:%container.image.tag) + priority: WARNING + tags: [container, cis, mitre_lateral_movement] + + - macro: consider_userfaultfd_activities + condition: (always_true) + + - list: user_known_userfaultfd_processes + items: [] + + - rule: Unprivileged Delegation of Page Faults Handling to a Userspace Process + desc: Detect a successful unprivileged userfaultfd syscall which might act as an attack primitive to exploit other bugs + condition: > + consider_userfaultfd_activities and + evt.type = userfaultfd and + user.uid != 0 and + (evt.rawres >= 0 or evt.res != -1) and + not proc.name in (user_known_userfaultfd_processes) + output: An userfaultfd syscall was successfully executed by an unprivileged user (user=%user.name user_loginuid=%user.loginuid process=%proc.name command=%proc.cmdline %container.info image=%container.image.repository:%container.image.tag) + priority: CRITICAL + tags: [syscall, mitre_defense_evasion] + + # Application rules have moved to application_rules.yaml. Please look + # there if you want to enable them by adding to + # falco_rules.local.yaml. + + k8s_audit_rules.yaml: |+ + # + # Copyright (C) 2019 The Falco Authors. + # + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + # + - required_engine_version: 2 + + # Like always_true/always_false, but works with k8s audit events + - macro: k8s_audit_always_true + condition: (jevt.rawtime exists) + + - macro: k8s_audit_never_true + condition: (jevt.rawtime=0) + + # Generally only consider audit events once the response has completed + - list: k8s_audit_stages + items: ["ResponseComplete"] + + # Generally exclude users starting with "system:" + - macro: non_system_user + condition: (not ka.user.name startswith "system:") + + # This macro selects the set of Audit Events used by the below rules. + - macro: kevt + condition: (jevt.value[/stage] in (k8s_audit_stages)) + + - macro: kevt_started + condition: (jevt.value[/stage]=ResponseStarted) + + # If you wish to restrict activity to a specific set of users, override/append to this list. + # users created by kops are included + - list: vertical_pod_autoscaler_users + items: ["vpa-recommender", "vpa-updater"] + + - list: allowed_k8s_users + items: [ + "minikube", "minikube-user", "kubelet", "kops", "admin", "kube", "kube-proxy", "kube-apiserver-healthcheck", + "kubernetes-admin", + vertical_pod_autoscaler_users, + cluster-autoscaler, + "system:addon-manager", + "cloud-controller-manager", + "eks:node-manager", + "system:kube-controller-manager" + ] + + - rule: Disallowed K8s User + desc: Detect any k8s operation by users outside of an allowed set of users. + condition: kevt and non_system_user and not ka.user.name in (allowed_k8s_users) + output: K8s Operation performed by user not in allowed list of users (user=%ka.user.name target=%ka.target.name/%ka.target.resource verb=%ka.verb uri=%ka.uri resp=%ka.response.code) + priority: WARNING + source: k8s_audit + tags: [k8s] + + # In a local/user rules file, you could override this macro to + # explicitly enumerate the container images that you want to run in + # your environment. In this main falco rules file, there isn't any way + # to know all the containers that can run, so any container is + # allowed, by using the always_true macro. In the overridden macro, the condition + # would look something like (ka.req.pod.containers.image.repository in (my-repo/my-image)) + - macro: allowed_k8s_containers + condition: (k8s_audit_always_true) + + - macro: response_successful + condition: (ka.response.code startswith 2) + + - macro: kcreate + condition: ka.verb=create + + - macro: kmodify + condition: (ka.verb in (create,update,patch)) + + - macro: kdelete + condition: ka.verb=delete + + - macro: pod + condition: ka.target.resource=pods and not ka.target.subresource exists + + - macro: pod_subresource + condition: ka.target.resource=pods and ka.target.subresource exists + + - macro: deployment + condition: ka.target.resource=deployments + + - macro: service + condition: ka.target.resource=services + + - macro: configmap + condition: ka.target.resource=configmaps + + - macro: namespace + condition: ka.target.resource=namespaces + + - macro: serviceaccount + condition: ka.target.resource=serviceaccounts + + - macro: clusterrole + condition: ka.target.resource=clusterroles + + - macro: clusterrolebinding + condition: ka.target.resource=clusterrolebindings + + - macro: role + condition: ka.target.resource=roles + + - macro: secret + condition: ka.target.resource=secrets + + - macro: health_endpoint + condition: ka.uri=/healthz + + - rule: Create Disallowed Pod + desc: > + Detect an attempt to start a pod with a container image outside of a list of allowed images. + condition: kevt and pod and kcreate and not allowed_k8s_containers + output: Pod started with container not in allowed list (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image) + priority: WARNING + source: k8s_audit + tags: [k8s] + + - rule: Create Privileged Pod + desc: > + Detect an attempt to start a pod with a privileged container + condition: kevt and pod and kcreate and ka.req.pod.containers.privileged intersects (true) and not ka.req.pod.containers.image.repository in (falco_privileged_images) + output: Pod started with privileged container (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image) + priority: WARNING + source: k8s_audit + tags: [k8s] + + - macro: sensitive_vol_mount + condition: > + (ka.req.pod.volumes.hostpath intersects (/proc, /var/run/docker.sock, /, /etc, /root, /var/run/crio/crio.sock, /home/admin, /var/lib/kubelet, /var/lib/kubelet/pki, /etc/kubernetes, /etc/kubernetes/manifests)) + + - rule: Create Sensitive Mount Pod + desc: > + Detect an attempt to start a pod with a volume from a sensitive host directory (i.e. /proc). + Exceptions are made for known trusted images. + condition: kevt and pod and kcreate and sensitive_vol_mount and not ka.req.pod.containers.image.repository in (falco_sensitive_mount_images) + output: Pod started with sensitive mount (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image volumes=%jevt.value[/requestObject/spec/volumes]) + priority: WARNING + source: k8s_audit + tags: [k8s] + + # Corresponds to K8s CIS Benchmark 1.7.4 + - rule: Create HostNetwork Pod + desc: Detect an attempt to start a pod using the host network. + condition: kevt and pod and kcreate and ka.req.pod.host_network intersects (true) and not ka.req.pod.containers.image.repository in (falco_hostnetwork_images) + output: Pod started using host network (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image) + priority: WARNING + source: k8s_audit + tags: [k8s] + + - macro: user_known_node_port_service + condition: (k8s_audit_never_true) + + - rule: Create NodePort Service + desc: > + Detect an attempt to start a service with a NodePort service type + condition: kevt and service and kcreate and ka.req.service.type=NodePort and not user_known_node_port_service + output: NodePort Service Created (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace ports=%ka.req.service.ports) + priority: WARNING + source: k8s_audit + tags: [k8s] + + - macro: contains_private_credentials + condition: > + (ka.req.configmap.obj contains "aws_access_key_id" or + ka.req.configmap.obj contains "aws-access-key-id" or + ka.req.configmap.obj contains "aws_s3_access_key_id" or + ka.req.configmap.obj contains "aws-s3-access-key-id" or + ka.req.configmap.obj contains "password" or + ka.req.configmap.obj contains "passphrase") + + - rule: Create/Modify Configmap With Private Credentials + desc: > + Detect creating/modifying a configmap containing a private credential (aws key, password, etc.) + condition: kevt and configmap and kmodify and contains_private_credentials + output: K8s configmap with private credential (user=%ka.user.name verb=%ka.verb configmap=%ka.req.configmap.name config=%ka.req.configmap.obj) + priority: WARNING + source: k8s_audit + tags: [k8s] + + # Corresponds to K8s CIS Benchmark, 1.1.1. + - rule: Anonymous Request Allowed + desc: > + Detect any request made by the anonymous user that was allowed + condition: kevt and ka.user.name=system:anonymous and ka.auth.decision="allow" and not health_endpoint + output: Request by anonymous user allowed (user=%ka.user.name verb=%ka.verb uri=%ka.uri reason=%ka.auth.reason)) + priority: WARNING + source: k8s_audit + tags: [k8s] + + # Roughly corresponds to K8s CIS Benchmark, 1.1.12. In this case, + # notifies an attempt to exec/attach to a privileged container. + + # Ideally, we'd add a more stringent rule that detects attaches/execs + # to a privileged pod, but that requires the engine for k8s audit + # events to be stateful, so it could know if a container named in an + # attach request was created privileged or not. For now, we have a + # less severe rule that detects attaches/execs to any pod. + # + # For the same reason, you can't use things like image names/prefixes, + # as the event that creates the pod (which has the images) is a + # separate event than the actual exec/attach to the pod. + + - macro: user_known_exec_pod_activities + condition: (k8s_audit_never_true) + + - rule: Attach/Exec Pod + desc: > + Detect any attempt to attach/exec to a pod + condition: kevt_started and pod_subresource and kcreate and ka.target.subresource in (exec,attach) and not user_known_exec_pod_activities + output: Attach/Exec to pod (user=%ka.user.name pod=%ka.target.name ns=%ka.target.namespace action=%ka.target.subresource command=%ka.uri.param[command]) + priority: NOTICE + source: k8s_audit + tags: [k8s] + + - macro: user_known_pod_debug_activities + condition: (k8s_audit_never_true) + + # Only works when feature gate EphemeralContainers is enabled + - rule: EphemeralContainers Created + desc: > + Detect any ephemeral container created + condition: kevt and pod_subresource and kmodify and ka.target.subresource in (ephemeralcontainers) and not user_known_pod_debug_activities + output: Ephemeral container is created in pod (user=%ka.user.name pod=%ka.target.name ns=%ka.target.namespace ephemeral_container_name=%jevt.value[/requestObject/ephemeralContainers/0/name] ephemeral_container_image=%jevt.value[/requestObject/ephemeralContainers/0/image]) + priority: NOTICE + source: k8s_audit + tags: [k8s] + + # In a local/user rules fie, you can append to this list to add additional allowed namespaces + - list: allowed_namespaces + items: [kube-system, kube-public, default] + + - rule: Create Disallowed Namespace + desc: Detect any attempt to create a namespace outside of a set of known namespaces + condition: kevt and namespace and kcreate and not ka.target.name in (allowed_namespaces) + output: Disallowed namespace created (user=%ka.user.name ns=%ka.target.name) + priority: WARNING + source: k8s_audit + tags: [k8s] + + # Only defined for backwards compatibility. Use the more specific + # user_allowed_kube_namespace_image_list instead. + - list: user_trusted_image_list + items: [] + + - list: user_allowed_kube_namespace_image_list + items: [user_trusted_image_list] + + # Only defined for backwards compatibility. Use the more specific + # allowed_kube_namespace_image_list instead. + - list: k8s_image_list + items: [] + + - list: allowed_kube_namespace_image_list + items: [ + gcr.io/google-containers/prometheus-to-sd, + gcr.io/projectcalico-org/node, + gke.gcr.io/addon-resizer, + gke.gcr.io/heapster, + gke.gcr.io/gke-metadata-server, + k8s.gcr.io/ip-masq-agent-amd64, + k8s.gcr.io/kube-apiserver, + gke.gcr.io/kube-proxy, + gke.gcr.io/netd-amd64, + k8s.gcr.io/addon-resizer + k8s.gcr.io/prometheus-to-sd, + k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64, + k8s.gcr.io/k8s-dns-kube-dns-amd64, + k8s.gcr.io/k8s-dns-sidecar-amd64, + k8s.gcr.io/metrics-server-amd64, + kope/kube-apiserver-healthcheck, + k8s_image_list + ] + + - macro: allowed_kube_namespace_pods + condition: (ka.req.pod.containers.image.repository in (user_allowed_kube_namespace_image_list) or + ka.req.pod.containers.image.repository in (allowed_kube_namespace_image_list)) + + # Detect any new pod created in the kube-system namespace + - rule: Pod Created in Kube Namespace + desc: Detect any attempt to create a pod in the kube-system or kube-public namespaces + condition: kevt and pod and kcreate and ka.target.namespace in (kube-system, kube-public) and not allowed_kube_namespace_pods + output: Pod created in kube namespace (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image) + priority: WARNING + source: k8s_audit + tags: [k8s] + + - list: user_known_sa_list + items: [] + + - list: known_sa_list + items: ["pod-garbage-collector","resourcequota-controller","cronjob-controller","generic-garbage-collector", + "daemon-set-controller","endpointslice-controller","deployment-controller", "replicaset-controller", + "endpoint-controller", "namespace-controller", "statefulset-controller", "disruption-controller"] + + - macro: trusted_sa + condition: (ka.target.name in (known_sa_list, user_known_sa_list)) + + # Detect creating a service account in the kube-system/kube-public namespace + - rule: Service Account Created in Kube Namespace + desc: Detect any attempt to create a serviceaccount in the kube-system or kube-public namespaces + condition: kevt and serviceaccount and kcreate and ka.target.namespace in (kube-system, kube-public) and response_successful and not trusted_sa + output: Service account created in kube namespace (user=%ka.user.name serviceaccount=%ka.target.name ns=%ka.target.namespace) + priority: WARNING + source: k8s_audit + tags: [k8s] + + # Detect any modify/delete to any ClusterRole starting with + # "system:". "system:coredns" is excluded as changes are expected in + # normal operation. + - rule: System ClusterRole Modified/Deleted + desc: Detect any attempt to modify/delete a ClusterRole/Role starting with system + condition: kevt and (role or clusterrole) and (kmodify or kdelete) and (ka.target.name startswith "system:") and + not ka.target.name in (system:coredns, system:managed-certificate-controller) + output: System ClusterRole/Role modified or deleted (user=%ka.user.name role=%ka.target.name ns=%ka.target.namespace action=%ka.verb) + priority: WARNING + source: k8s_audit + tags: [k8s] + + # Detect any attempt to create a ClusterRoleBinding to the cluster-admin user + # (exapand this to any built-in cluster role that does "sensitive" things) + - rule: Attach to cluster-admin Role + desc: Detect any attempt to create a ClusterRoleBinding to the cluster-admin user + condition: kevt and clusterrolebinding and kcreate and ka.req.binding.role=cluster-admin + output: Cluster Role Binding to cluster-admin role (user=%ka.user.name subject=%ka.req.binding.subjects) + priority: WARNING + source: k8s_audit + tags: [k8s] + + - rule: ClusterRole With Wildcard Created + desc: Detect any attempt to create a Role/ClusterRole with wildcard resources or verbs + condition: kevt and (role or clusterrole) and kcreate and (ka.req.role.rules.resources intersects ("*") or ka.req.role.rules.verbs intersects ("*")) + output: Created Role/ClusterRole with wildcard (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules) + priority: WARNING + source: k8s_audit + tags: [k8s] + + - macro: writable_verbs + condition: > + (ka.req.role.rules.verbs intersects (create, update, patch, delete, deletecollection)) + + - rule: ClusterRole With Write Privileges Created + desc: Detect any attempt to create a Role/ClusterRole that can perform write-related actions + condition: kevt and (role or clusterrole) and kcreate and writable_verbs + output: Created Role/ClusterRole with write privileges (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules) + priority: NOTICE + source: k8s_audit + tags: [k8s] + + - rule: ClusterRole With Pod Exec Created + desc: Detect any attempt to create a Role/ClusterRole that can exec to pods + condition: kevt and (role or clusterrole) and kcreate and ka.req.role.rules.resources intersects ("pods/exec") + output: Created Role/ClusterRole with pod exec privileges (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules) + priority: WARNING + source: k8s_audit + tags: [k8s] + + # The rules below this point are less discriminatory and generally + # represent a stream of activity for a cluster. If you wish to disable + # these events, modify the following macro. + - macro: consider_activity_events + condition: (k8s_audit_always_true) + + - macro: kactivity + condition: (kevt and consider_activity_events) + + - rule: K8s Deployment Created + desc: Detect any attempt to create a deployment + condition: (kactivity and kcreate and deployment and response_successful) + output: K8s Deployment Created (user=%ka.user.name deployment=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s Deployment Deleted + desc: Detect any attempt to delete a deployment + condition: (kactivity and kdelete and deployment and response_successful) + output: K8s Deployment Deleted (user=%ka.user.name deployment=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s Service Created + desc: Detect any attempt to create a service + condition: (kactivity and kcreate and service and response_successful) + output: K8s Service Created (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s Service Deleted + desc: Detect any attempt to delete a service + condition: (kactivity and kdelete and service and response_successful) + output: K8s Service Deleted (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s ConfigMap Created + desc: Detect any attempt to create a configmap + condition: (kactivity and kcreate and configmap and response_successful) + output: K8s ConfigMap Created (user=%ka.user.name configmap=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s ConfigMap Deleted + desc: Detect any attempt to delete a configmap + condition: (kactivity and kdelete and configmap and response_successful) + output: K8s ConfigMap Deleted (user=%ka.user.name configmap=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s Namespace Created + desc: Detect any attempt to create a namespace + condition: (kactivity and kcreate and namespace and response_successful) + output: K8s Namespace Created (user=%ka.user.name namespace=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s Namespace Deleted + desc: Detect any attempt to delete a namespace + condition: (kactivity and non_system_user and kdelete and namespace and response_successful) + output: K8s Namespace Deleted (user=%ka.user.name namespace=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s Serviceaccount Created + desc: Detect any attempt to create a service account + condition: (kactivity and kcreate and serviceaccount and response_successful) + output: K8s Serviceaccount Created (user=%ka.user.name user=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s Serviceaccount Deleted + desc: Detect any attempt to delete a service account + condition: (kactivity and kdelete and serviceaccount and response_successful) + output: K8s Serviceaccount Deleted (user=%ka.user.name user=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s Role/Clusterrole Created + desc: Detect any attempt to create a cluster role/role + condition: (kactivity and kcreate and (clusterrole or role) and response_successful) + output: K8s Cluster Role Created (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s Role/Clusterrole Deleted + desc: Detect any attempt to delete a cluster role/role + condition: (kactivity and kdelete and (clusterrole or role) and response_successful) + output: K8s Cluster Role Deleted (user=%ka.user.name role=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s Role/Clusterrolebinding Created + desc: Detect any attempt to create a clusterrolebinding + condition: (kactivity and kcreate and clusterrolebinding and response_successful) + output: K8s Cluster Role Binding Created (user=%ka.user.name binding=%ka.target.name subjects=%ka.req.binding.subjects role=%ka.req.binding.role resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s Role/Clusterrolebinding Deleted + desc: Detect any attempt to delete a clusterrolebinding + condition: (kactivity and kdelete and clusterrolebinding and response_successful) + output: K8s Cluster Role Binding Deleted (user=%ka.user.name binding=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s Secret Created + desc: Detect any attempt to create a secret. Service account tokens are excluded. + condition: (kactivity and kcreate and secret and ka.target.namespace!=kube-system and non_system_user and response_successful) + output: K8s Secret Created (user=%ka.user.name secret=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + - rule: K8s Secret Deleted + desc: Detect any attempt to delete a secret Service account tokens are excluded. + condition: (kactivity and kdelete and secret and ka.target.namespace!=kube-system and non_system_user and response_successful) + output: K8s Secret Deleted (user=%ka.user.name secret=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + + # This rule generally matches all events, and as a result is disabled + # by default. If you wish to enable these events, modify the + # following macro. + # condition: (jevt.rawtime exists) + - macro: consider_all_events + condition: (k8s_audit_never_true) + + - macro: kall + condition: (kevt and consider_all_events) + + - rule: All K8s Audit Events + desc: Match all K8s Audit Events + condition: kall + output: K8s Audit Event received (user=%ka.user.name verb=%ka.verb uri=%ka.uri obj=%jevt.obj) + priority: DEBUG + source: k8s_audit + tags: [k8s] + + + # This macro disables following rule, change to k8s_audit_never_true to enable it + - macro: allowed_full_admin_users + condition: (k8s_audit_always_true) + + # This list includes some of the default user names for an administrator in several K8s installations + - list: full_admin_k8s_users + items: ["admin", "kubernetes-admin", "kubernetes-admin@kubernetes", "kubernetes-admin@cluster.local", "minikube-user"] + + # This rules detect an operation triggered by an user name that is + # included in the list of those that are default administrators upon + # cluster creation. This may signify a permission setting too broader. + # As we can't check for role of the user on a general ka.* event, this + # may or may not be an administrator. Customize the full_admin_k8s_users + # list to your needs, and activate at your discrection. + + # # How to test: + # # Execute any kubectl command connected using default cluster user, as: + # kubectl create namespace rule-test + + - rule: Full K8s Administrative Access + desc: Detect any k8s operation by a user name that may be an administrator with full access. + condition: > + kevt + and non_system_user + and ka.user.name in (full_admin_k8s_users) + and not allowed_full_admin_users + output: K8s Operation performed by full admin user (user=%ka.user.name target=%ka.target.name/%ka.target.resource verb=%ka.verb uri=%ka.uri resp=%ka.response.code) + priority: WARNING + source: k8s_audit + tags: [k8s] + + - macro: ingress + condition: ka.target.resource=ingresses + + - macro: ingress_tls + condition: (jevt.value[/requestObject/spec/tls] exists) + + # # How to test: + # # Create an ingress.yaml file with content: + # apiVersion: networking.k8s.io/v1beta1 + # kind: Ingress + # metadata: + # name: test-ingress + # annotations: + # nginx.ingress.kubernetes.io/rewrite-target: / + # spec: + # rules: + # - http: + # paths: + # - path: /testpath + # backend: + # serviceName: test + # servicePort: 80 + # # Execute: kubectl apply -f ingress.yaml + + - rule: Ingress Object without TLS Certificate Created + desc: Detect any attempt to create an ingress without TLS certification. + condition: > + (kactivity and kcreate and ingress and response_successful and not ingress_tls) + output: > + K8s Ingress Without TLS Cert Created (user=%ka.user.name ingress=%ka.target.name + namespace=%ka.target.namespace) + source: k8s_audit + priority: WARNING + tags: [k8s, network] + + - macro: node + condition: ka.target.resource=nodes + + - macro: allow_all_k8s_nodes + condition: (k8s_audit_always_true) + + - list: allowed_k8s_nodes + items: [] + + # # How to test: + # # Create a Falco monitored cluster with Kops + # # Increase the number of minimum nodes with: + # kops edit ig nodes + # kops apply --yes + + - rule: Untrusted Node Successfully Joined the Cluster + desc: > + Detect a node successfully joined the cluster outside of the list of allowed nodes. + condition: > + kevt and node + and kcreate + and response_successful + and not allow_all_k8s_nodes + and not ka.target.name in (allowed_k8s_nodes) + output: Node not in allowed list successfully joined the cluster (user=%ka.user.name node=%ka.target.name) + priority: ERROR + source: k8s_audit + tags: [k8s] + + - rule: Untrusted Node Unsuccessfully Tried to Join the Cluster + desc: > + Detect an unsuccessful attempt to join the cluster for a node not in the list of allowed nodes. + condition: > + kevt and node + and kcreate + and not response_successful + and not allow_all_k8s_nodes + and not ka.target.name in (allowed_k8s_nodes) + output: Node not in allowed list tried unsuccessfully to join the cluster (user=%ka.user.name node=%ka.target.name reason=%ka.response.reason) + priority: WARNING + source: k8s_audit + tags: [k8s] \ No newline at end of file diff --git a/cluster-manifests/falco-system/daemonset.yaml b/cluster-manifests/falco-system/daemonset.yaml index 8392438e..d9f64fb0 100644 --- a/cluster-manifests/falco-system/daemonset.yaml +++ b/cluster-manifests/falco-system/daemonset.yaml @@ -21,7 +21,7 @@ spec: role: security pci-scope: out-of-scope annotations: - checksum/config: c8d2718dd632fb0254f5f202c1063b5bbeb21382d762acbe2390da1ea1267b48 + checksum/config: b3e7235ffa87a7967af2b7acfb01e2f827cc2f50b18c466f64c6bb64b386244b checksum/rules: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b checksum/certs: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b spec: @@ -35,11 +35,11 @@ spec: value: "true" containers: - name: falco - image: docker.io/falcosecurity/falco:0.27.0 + image: docker.io/falcosecurity/falco:0.29.1 imagePullPolicy: IfNotPresent resources: limits: - cpu: 200m + cpu: 500m memory: 1024Mi requests: cpu: 100m @@ -53,7 +53,7 @@ spec: - -K - /var/run/secrets/kubernetes.io/serviceaccount/token - -k - - "https://$(KUBERNETES_SERVICE_HOST)" + - https://$(KUBERNETES_SERVICE_HOST) - -pk volumeMounts: - mountPath: /host/run/containerd/containerd.sock diff --git a/cluster-manifests/falco-system/resource-quota.yaml b/cluster-manifests/falco-system/resource-quota.yaml index cb324f74..b4328efd 100644 --- a/cluster-manifests/falco-system/resource-quota.yaml +++ b/cluster-manifests/falco-system/resource-quota.yaml @@ -8,7 +8,7 @@ spec: requests.cpu: "5" requests.memory: 8Gi requests.storage: "0" - limits.cpu: "7" + limits.cpu: "10" limits.memory: 20Gi count/persistentvolumeclaims: "0" count/services.loadbalancers: "0" diff --git a/docs/deploy/10-pre-bootstrap.md b/docs/deploy/10-pre-bootstrap.md index d51acd1f..0c490466 100644 --- a/docs/deploy/10-pre-bootstrap.md +++ b/docs/deploy/10-pre-bootstrap.md @@ -47,7 +47,7 @@ Using a security agent that is container-aware and can operate from within the c # [Combined this takes about eight minutes.] az acr import --source ghcr.io/fluxcd/kustomize-controller:v0.8.1 -t quarantine/fluxcd/kustomize-controller:v0.8.1 -n $ACR_NAME_QUARANTINE && \ az acr import --source ghcr.io/fluxcd/source-controller:v0.8.1 -t quarantine/fluxcd/source-controller:v0.8.1 -n $ACR_NAME_QUARANTINE && \ - az acr import --source docker.io/falcosecurity/falco:0.27.0 -t quarantine/falcosecurity/falco:0.27.0 -n $ACR_NAME_QUARANTINE && \ + az acr import --source docker.io/falcosecurity/falco:0.29.1 -t quarantine/falcosecurity/falco:0.29.1 -n $ACR_NAME_QUARANTINE && \ az acr import --source docker.io/library/busybox:1.33.0 -t quarantine/library/busybox:1.33.0 -n $ACR_NAME_QUARANTINE && \ az acr import --source docker.io/weaveworks/kured:1.6.1 -t quarantine/weaveworks/kured:1.6.1 -n $ACR_NAME_QUARANTINE && \ az acr import --source k8s.gcr.io/ingress-nginx/controller:v0.49.0 -t quarantine/ingress-nginx/controller:v0.49.0 -n $ACR_NAME_QUARANTINE && \ @@ -83,7 +83,7 @@ Using a security agent that is container-aware and can operate from within the c # [Combined this takes about eight minutes.] az acr import --source quarantine/fluxcd/kustomize-controller:v0.8.1 -r $ACR_NAME_QUARANTINE -t live/fluxcd/kustomize-controller:v0.8.1 -n $ACR_NAME && \ az acr import --source quarantine/fluxcd/source-controller:v0.8.1 -r $ACR_NAME_QUARANTINE -t live/fluxcd/source-controller:v0.8.1 -n $ACR_NAME && \ - az acr import --source quarantine/falcosecurity/falco:0.27.0 -r $ACR_NAME_QUARANTINE -t live/falcosecurity/falco:0.27.0 -n $ACR_NAME && \ + az acr import --source quarantine/falcosecurity/falco:0.29.1 -r $ACR_NAME_QUARANTINE -t live/falcosecurity/falco:0.29.1 -n $ACR_NAME && \ az acr import --source quarantine/library/busybox:1.33.0 -r $ACR_NAME_QUARANTINE -t live/library/busybox:1.33.0 -n $ACR_NAME && \ az acr import --source quarantine/weaveworks/kured:1.6.1 -r $ACR_NAME_QUARANTINE -t live/weaveworks/kured:1.6.1 -n $ACR_NAME && \ az acr import --source quarantine/ingress-nginx/controller:v0.49.0 -r $ACR_NAME_QUARANTINE -t live/ingress-nginx/controller:v0.49.0 -n $ACR_NAME && \