[1] https://github.com/ntop/PF_RING/blob/dev/doc/README.hugepages.md
-
Dependencies
yum -y install epel-release yum -y install "@Development tools" python-devel libpcap-devel dkms glib2-devel pcre-devel zlib-devel openssl-devel yum install kernel-devel-$(uname -r)
-
Enable 2MB THP
echo always > /sys/kernel/mm/transparent_hugepage/enabled echo 8192 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages mkdir -p /mnt/hugepages mount -t hugetlbfs nodev /mnt/hugepages
-
Validate THP
grep Huge /proc/meminfo
-
Install Librdkafka
wget https://github.com/edenhill/librdkafka/archive/v0.9.4.tar.gz -O - | tar -xz cd librdkafka-0.9.4/ ./configure --prefix=/usr make make install
-
Build and install.
wget https://github.com/ntop/PF_RING/archive/6.6.0.tar.gz -O - | tar -xz cd PF_RING-6.6.0 cd kernel make make install cd ../userland/lib ./configure --prefix=/usr/local/pfring make make install cd ../libpcap ./configure --prefix=/usr/local/pfring make make install cd ../tcpdump-4.1.1 ./configure --prefix=/usr/local/pfring make install
-
Load the kernel module.
modprobe pf_ring
-
Validate
$ lsmod | grep pf_ring pf_ring 1234009 0
-
Build the ZC driver.
cd ~/PF_RING-6.6.0/drivers/intel/ixgbe/ixgbe-4.1.5-zc/src make
-
Load the ZC driver
rmmod ixgbe insmod ~/PF_RING-6.6.0/drivers/intel/ixgbe/ixgbe-4.1.5-zc/src/ixgbe.ko
-
Validate the ZC driver.
lsmod | grep ixgbe
-
Create License
- Please create your PF_RING ZC license(s) at the following address:
- URL: http://shop.ntop.org/mkzclicense/
- URL: http://packages.ntop.org/ (binary packages)
-
Build the ZC tools.
cd userland/examples_zc/ make
-
Validate the license.
[root@localhost examples_zc]# ./zcount -i <tap-interface-1> -C License OK
-
Sanity check
[root@localhost examples_zc]# ./zsanitycheck Writing data.. Reading data.. Test completed, 1024 buffers inspected
-
Add PF Ring's libpcap to the dynamic library load path.
echo "/usr/local/pfring/lib/" >> /etc/ld.so.conf.d/pfring.conf ldconfig -v
[1] https://www.bro.org/documentation/load-balancing.html
-
Install Bro on the host where it will run (
y137
).cd wget https://www.bro.org/downloads/release/bro-2.4.1.tar.gz -O - | tar -xz cd bro-2.4.1 ./configure --prefix=/usr --with-pcap=/usr/local/pfring make make install
-
Configure Bro to listen on the TAP interface.
sed -i 's/eth0/<tap-interface-1>/g' /usr/etc/node.cfg
-
Configure load balancer; edit
/usr/etc/node.cfg
to look similar to the following.[manager] type=manager host=localhost [proxy-1] type=proxy host=localhost [worker-1] type=worker host=localhost interface=<tap-interface-1> lb_method=pf_ring lb_procs=4 pin_cpus=0,1,2,3
-
Install config changes.
broctl install
-
Configure logs at
/usr/etc/broctl.cfg
. Replace/metron1
with desired mount point for the data storage disks.# Rotation interval in seconds for log files on manager (or standalone) node. # A value of 0 disables log rotation. LogRotationInterval = 3600
# Expiration interval for archived log files in LogDir. Files older than this # will be deleted by "broctl cron". The interval is an integer followed by # one of these time units: day, hr, min. A value of 0 means that logs # never expire. LogExpireInterval = 7 day
# Location of the log directory where log files will be archived each rotation # interval. LogDir = /metron1/bro/logs
# Location of the spool directory where files and data that are currently being # written are stored. SpoolDir = /metron1/bro/spool
-
Install the Bro Plugin on the host where it will run (
y137
).wget https://github.com/apache/metron/archive/master.zip unzip master.zip cd metron-master/metron-sensors/bro-plugin-kafka ./configure --bro-dist=/root/bro-2.4.1 --install-root=/usr/lib/bro/plugins/ --with-librdkafka=/usr make make install
-
Add the following to
/usr/share/bro/site/local.bro
. Add the appropriate kafka broker and kerberos information.@load Bro/Kafka/logs-to-kafka.bro redef Kafka::logs_to_send = set(HTTP::LOG, DNS::LOG); redef Kafka::topic_name = "bro"; redef Kafka::tag_json = T; redef Kafka::kafka_conf = table( ["metadata.broker.list"] = "<kafka-broker-list>" , ["security.protocol"] = "SASL_PLAINTEXT" , ["sasl.kerberos.keytab"] = "<path-to-kerberos-keytab>" , ["sasl.kerberos.principal"] = "<kerberos-principal>" , ["debug"] = "metadata" );
-
Make sure the changes are installed.
broctl install
-
Start Bro.
broctl deploy
-
Ensure that Bro is producing telemetry.
ls -ltr /metron1/bro/logs/current
-
If there is telemetry in the logs, then validate that it is also landing in the Kafka topic.
-
Install libfixbuf.
cd wget http://tools.netsa.cert.org/releases/libfixbuf-1.7.1.tar.gz -O - | tar -xz cd libfixbuf-1.7.1/ ./configure make make install
-
Build YAF. Double check path to PF Ring.
cd wget http://tools.netsa.cert.org/releases/yaf-2.8.0.tar.gz -O - | tar -xz cd yaf-2.8.0/ ./configure --enable-applabel --enable-plugins --disable-airframe --with-pfring=/usr/local/pfring/ make make install
-
Add YAF's lib path to the dynamic library load path.
echo "/usr/local/lib/" >> /etc/ld.so.conf.d/yaf.conf ldconfig -v
-
Start yaf by following the instructions below.
-
Enable 1G THP by following these instructions.
-
Install DPDK following these instructions.
-
Grab the source code for Fastcapa.
wget https://github.com/nickwallen/metron/archive/X520-POC.zip unzip X520-POC.zip cd metron-X520-POC/metron-sensors/fastcapa/
-
Build Fastcapa following these instructions.
-
Start with an example fastcapa configuration file like the following.
# # kafka global settings # [kafka-global] #debug = broker,topic,msg # Protocol used to communicate with brokers. # Type: enum value { plaintext, ssl, sasl_plaintext, sasl_ssl } security.protocol = SASL_PLAINTEXT # Broker service name #sasl.kerberos.service.name=kafka # Client keytab location sasl.kerberos.keytab=<path-to-kerberos-keytab> # sasl.kerberos.principal sasl.kerberos.principal=<kerberos-principal> # Initial list of brokers as a CSV list of broker host or host:port. # Type: string metadata.broker.list=kafka1:9092,kafka2:9092,kafka3:9092 # Client identifier. # Type: string client.id = fastcapa-ens3f0 # Maximum number of messages allowed on the producer queue. # Type: integer # Default: 100000 queue.buffering.max.messages = 5000000 # Maximum total message size sum allowed on the producer queue. # Type: integer #queue.buffering.max.kbytes = 2097151 # Maximum time, in milliseconds, for buffering data on the producer queue. # Type: integer # Default: 1000 queue.buffering.max.ms = 20000 # Maximum size for message to be copied to buffer. Messages larger than this will be # passed by reference (zero-copy) at the expense of larger iovecs. # Type: integer # Default: 65535 #message.copy.max.bytes = 65535 # Compression codec to use for compressing message sets. This is the default value # for all topics, may be overriden by the topic configuration property compression.codec. # Type: enum value { none, gzip, snappy, lz4 } # Default: none compression.codec = snappy # Maximum number of messages batched in one MessageSet. The total MessageSet size is # also limited by message.max.bytes. # Increase for better compression. # Type: integer batch.num.messages = 100000 # Maximum transmit message size. # Type: integer # Default: 1000000 message.max.bytes = 10000000 # How many times to retry sending a failing MessageSet. Note: retrying may cause reordering. # Type: integer message.send.max.retries = 5 # The backoff time in milliseconds before retrying a message send. # Type: integer # Default: 100 retry.backoff.ms = 500 # how often statistics are emitted; 0 = never # Statistics emit interval. The application also needs to register a stats callback # using rd_kafka_conf_set_stats_cb(). The granularity is 1000ms. A value of 0 disables statistics. # Type: integer # Default: 0 statistics.interval.ms = 5000 socket.timeout.ms = 10000 # Only provide delivery reports for failed messages. # Type: boolean # Default: false delivery.report.only.error = false # # kafka topic settings # [kafka-topic] # This field indicates how many acknowledgements the leader broker must receive from ISR brokers # before responding to the request: # 0=Broker does not send any response/ack to client, # 1=Only the leader broker will need to ack the message, # -1 or all=broker will block until message is committed by all in sync replicas (ISRs) or broker's in.sync.replicas setting before sending response. # Type: integer request.required.acks = 1 # Local message timeout. This value is only enforced locally and limits the time a produced message # waits for successful delivery. A time of 0 is infinite. # Type: integer # Default: 300000 message.timeout.ms = 900000 # Report offset of produced message back to application. The application must be use the # dr_msg_cb to retrieve the offset from rd_kafka_message_t.offset. # Type: boolean # Default: false #produce.offset.report = false
-
Update the
metadata.broker.list
in the configuration file. -
Update the following Kerberos properties in the configuration file under the
[kafka-global]
header.sasl.kerberos.keytab=/etc/security/keytabs/metron.service.keytab [email protected]
-
Add the build location of Fastcapa to our path.
export PATH=$PATH:/root/metron-X520-POC/metron-sensors/fastcapa/build/app
-
Run Fastcapa. Edit the settings like topic, lcores to match the environment.
screen -S fastcapa TOPIC=pcap CONFIG=/root/fastcapa.conf THP_MNT=/mnt/huge_1GB fastcapa -l 8-15,24 \ --huge-dir $THP_MNT -- \ -t $TOPIC \ -c $CONFIG \ -b 192 \ -x 262144 \ -q 4 \ -r 4096
Note: This will not pick-up any configuration changes. For that use install
or deploy
.
broctl start
broctl deploy
broctl status
broctl stop
broctl diag
BRO_LOGS=/metron1/bro/logs
The http.log
and dns.log
produced by Bro should be very active.
ls -ltr $BRO_LOGS/current/
Look for any connectivity or authorization issues.
cat $BRO_LOGS/current/stderr.out
cat $BRO_LOGS/current/stdout.out
Alter the topic name in the local.bro
script.
$ cat /usr/bro/share/bro/site/local.bro
...
redef Kafka::topic_name = "bro";
Then restart Bro. Make sure to install
or deploy
it. Otherwise the configuration change will not take effect.
broctl stop
broctl deploy
KAFKA_BROKERS=kafka1:9092,kafka2:9092
YAF_TOPIC=yaf
export KAFKA_OPTS="-Djava.security.auth.login.config=/usr/metron/0.4.0/client_jaas.conf"
export PATH=$PATH:/usr/hdp/current/kafka-broker/bin:/usr/local/bin
yaf --in ens2f0 --live pcap | \
yafscii --tabular | \
kafka-console-producer.sh \
--broker-list $KAFKA_BROKERS \
--topic $YAF_TOPIC \
--security-protocol SASL_PLAINTEXT
-
Start the load balancer.
SNIFF_IFACE=ens2f0 YAF_TOPIC=yafpoc CLUSTER_NUM=99 LOG_DIR=/var/log/yaf NUM_WORKERS=2 mkdir -p $LOG_DIR modprobe pf_ring yafzcbalance --in=$SNIFF_IFACE \ --cluster $CLUSTER_NUM \ --num $NUM_WORKERS \ --stats 15 \ --daemon \ --log $LOG_DIR/yafzcbalance.log
-
Start the workers. Increment the
$WORKER_NUM
to start multiple workers. There should be at least 2 workers running.WORKER_NUM=0 screen -S yaf-$WORKER_NUM WORKER_NUM=0 CLUSTER_NUM=99 KAFKA_BROKERS=kafka1:9092,kafka2:9092 YAF_TOPIC=yaf export KAFKA_OPTS="-Djava.security.auth.login.config=/usr/metron/0.4.0/client_jaas.conf" export PATH=$PATH:/usr/hdp/current/kafka-broker/bin:/usr/local/bin yaf --in $CLUSTER_NUM:$WORKER_NUM --live zc | \ yafscii --tabular | \ kafka-console-producer.sh --broker-list $KAFKA_BROKERS --topic $YAF_TOPIC --security-protocol SASL_PLAINTEXT
killall yafzcbalance
killall yaf
First start the yafzcbalancer
. Then attach a worker that will simply consume and print the YAF output.
WORKER_NUM=0
screen -S yaf-$WORKER_NUM
WORKER_NUM=0
CLUSTER_NUM=99
KAFKA_BROKERS=kafka1:9092,kafka2:9092
YAF_TOPIC=yaf
export KAFKA_OPTS="-Djava.security.auth.login.config=/usr/metron/0.4.0/client_jaas.conf"
export PATH=$PATH:/usr/hdp/current/kafka-broker/bin:/usr/local/bin
yaf --in $CLUSTER_NUM:$WORKER_NUM --live zc | yafscii --tabular
Be sure to completely shut down the load balancer and workers. Then restart them, but change the $CLUSTER_NAME
. This will often fix oddities with PF_Ring's ring buffers.
-
Ensure that the NIC is bound to DPDK
export PATH=$PATH:/usr/local/dpdk/sbin dpdk-devbind --status
If it is not bound, bind it.
ifdown ens3f0 modprobe uio_pci_generic dpdk-devbind --bind=uio_pci_generic "81:00.0"
-
Ensure that THPs are available
$ grep -e "^Huge" /proc/meminfo HugePages_Total: 16 HugePages_Free: 16 HugePages_Rsvd: 0 HugePages_Surp: 0 Hugepagesize: 1048576 kB
-
Ensure the THPs are mounted
$ mount | grep hugetlbfs nodev on /mnt/huge_1GB type hugetlbfs (rw,relatime,pagesize=1GB)
Mount them if they are not.
umount -a -t hugetlbfs mount -t hugetlbfs nodev /mnt/hugepages
-
Start Fastcapa in its own screen session.
screen -S fastcapa TOPIC=pcappoc CONFIG=/root/fastcapa.conf THP_MNT=/mnt/hugepages fastcapa -l 8-15,24 --huge-dir $THP_MNT -- -t $TOPIC -c $CONFIG -b 192 -x 262144 -q 4 -r 4096
killall fastcapa
Simply change the name passed to Fastcapa on the command line using the -t
command-line switch.
https://github.com/apache/metron/tree/master/metron-sensors/fastcapa#output
https://github.com/apache/metron/tree/master/metron-sensors/fastcapa#faqs
export PATH=$PATH:/usr/hdp/current/kafka-broker/bin
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer \
-authorizer-properties zookeeper.connect=$ZOOKEEPER \
--list
Run the following command as the metron
user. The metron
user already has the JAAS configuration setup.
export PATH=$PATH:/usr/hdp/current/kafka-broker/bin
kafka-console-consumer.sh --zookeeper $ZOOKEEPER \
--topic $TOPIC \
--security-protcol SASL_PLAINTEXT
https://github.com/apache/metron/blob/master/metron-deployment/Kerberos-manual-setup.md#kafka-authorization