21. February 2021 · Comments Off on Targeted network monitoring using only fping and rrdtool. · Categories: Linux, Linux Admin, Linux Scripts, Networking · Tags: , , , , , ,

I’ve been very unhappy with the state of network monitoring applications lately. Most network monitoring tools are either too big or too arbitrary to be helpful for application support. This can be an issue when focusing on a specific application with components separated into various tiers, datacenters or locations. When network performance is in question the most helpful data is the active latency between a node and its other components (during or leading up to the time in question). If a network monitoring tool lacks any specificity of the application in question it will be viewed as too dense or cerebral to be useful; at worse it will harm troubleshooting. The quicker hard data can be accessed proving out the network layer, the quicker troubleshooting can move up the stack towards resolution.

Monolithic tools like Cacti are sometimes useful, however the lighter the script is, the more nimbly it can be deployed on a wide variety of nodes. Because both FPing and RRDTool are small, useful and standard Linux packages they are ideal, so l wrote the following bash script that leverages only those 2 tools together. The data collected is roughly identical to SmokePing but has the benefit of not dirtying a system with unnecessary packages. The script can easily be deployed by any devops deployment and is ran via crontab. Graph data can be created when or if they are needed.

fping_rrd.sh: Bourne-Again shell script, ASCII text executable

#!/usr/bin/env bash
## FPing data collector for RRDTOOL
#
# Crontab:
#   */5 * * * *     fping_rrd.sh
#
## Requires: fping, rrdtool
## 2021 - Script from www.davideaves.com
 
# Enable for debuging
#set -x
 
STEP=300 # 5min
PINGS=20 # 20 pings
 
# The first ping is usually an outlier; adding an extra ping to drop the first result.
 
fping_opts="-C $((PINGS+1)) -q -B1 -r1 -i10"
fping_hosts="172.31.3.1 172.31.3.3 172.31.4.1 172.31.4.10 172.31.15.1 172.31.15.4"
rrd_path="/var/lib/fping"
rrd_timestamp=$(date +%s)
 
calc_median() {
    awk '{ if ( $1 != "-" ) { fping[NR] = $1 }
           else { NR-- }
         }
     END { asort(fping);
           if (NR % 2) { print fping[(NR + 1) / 2] }
           else { print (fping[(NR / 2)] + fping[(NR / 2) + 1]) / 2.0 }
         }'
}
 
rrd_create() {
    rrdtool create "${fping_rrd}" \
     --start now-2h --step $((STEP)) \
     DS:loss:GAUGE:$((STEP*2)):0:$((PINGS)) \
     DS:median:GAUGE:$((STEP*2)):0:180 \
     $(seq -f " DS:ping%g:GAUGE:$((STEP*2)):0:180" 1 $((PINGS++))) \
     RRA:AVERAGE:0.5:1:1008 \
     RRA:AVERAGE:0.5:12:4320 \
     RRA:MIN:0.5:12:4320 \
     RRA:MAX:0.5:12:4320 \
     RRA:AVERAGE:0.5:144:720 \
     RRA:MAX:0.5:144:720 \
     RRA:MIN:0.5:144:720
}
 
rrd_update() {
    rrd_loss=0
    rrd_median=""
    rrd_rev=$((PINGS))
    rrd_name=""
    rrd_value="${rrd_timestamp}"
 
    for rrd_idx in $(seq 1 $((rrd_rev)))
     do
        rrd_name="${rrd_name}$([[ ${rrd_idx} -gt "1" ]] && echo ":")ping$((rrd_idx))"
        rrd_value="${rrd_value}:${fping_array[-$((rrd_rev))]}"
        rrd_median="${fping_array[-$((rrd_rev))]}\n${rrd_median}"
 
        [ "${fping_array[-$((rrd_rev))]}" == "-" ] && (( rrd_loss++ ))
 
        (( rrd_rev-- ))
    done
    rrd_median=$(printf ${rrd_median} | calc_median)
 
    rrdtool update "${fping_rrd}" --template $(echo ${rrd_name}:median:loss ${rrd_value}:${rrd_median}:${rrd_loss} | sed 's/-/U/g')
    unset rrd_loss rrd_median rrd_rev rrd_name rrd_value
}
 
fping ${fping_opts} ${fping_hosts} 2>&1 | while read fping_line;
 do fping_array=( ${fping_line} )
    fping_rrd="${rrd_path}/fping_${fping_array[0],,}.rrd"
 
    # Create RRD file.
    if [ ! -f "${fping_rrd}" ]
     then rrd_create
    fi
 
    # Update RRD file.
    if [ -f "${fping_rrd}" ]
     then rrd_last=$(( ${rrd_timestamp} - $(rrdtool last "${fping_rrd}") ))
          [[ $((rrd_last)) -ge $((STEP)) ]] && rrd_update
    fi && unset rrd_last
done

Creating Network Monitoring Graphs

The following are 3 example scripts that use rrdtool to create graphs from the RRD files.

Mini Graph

fping_172.31.15.4_mini.png

graph_mini.sh: Bourne-Again shell script, ASCII text executable

#!/usr/bin/env bash
## Create a  mini graph from a RRD file
## Requires: rrdtool
## 2021 - Script from www.davideaves.com
 
# Enable for debuging
#set -x
 
fping_rrd="${1}"
COLOR=( "FF5500" )
 
rrd_graph_cmd() {
cat << EOF
rrdtool graph "$(basename ${fping_rrd%.*})_mini.png"
--start "${START}" --end "${END}"
--title "$(date -d "${START}") ($(awk -v TIME=$TIME 'BEGIN {printf "%.1f hr", TIME/3600}'))"
--height 65 --width 600
--vertical-label "Seconds"
--color BACK#F3F3F3
--color CANVAS#FDFDFD
--color SHADEA#CBCBCB
--color SHADEB#999999
--color FONT#000000
--color AXIS#2C4D43
--color ARROW#2C4D43
--color FRAME#2C4D43
--border 1
--font TITLE:10:"Arial"
--font AXIS:8:"Arial"
--font LEGEND:8:"Courier"
--font UNIT:8:"Arial"
--font WATERMARK:6:"Arial"
--imgformat PNG
EOF
}
 
rrd_graph_opts() {
rrd_idx=0
cat << EOF
DEF:median$((rrd_idx))="${fping_rrd}":median:AVERAGE
DEF:loss$((rrd_idx))="${fping_rrd}":loss:AVERAGE
$(for ((i=1;i<=PINGS;i++)); do echo "DEF:ping$((rrd_idx))p$((i))=\"${fping_rrd}\":ping$((i)):AVERAGE"; done)
CDEF:ploss$((rrd_idx))=loss$((rrd_idx)),20,/,100,*
CDEF:dm$((rrd_idx))=median$((rrd_idx)),0,100000,LIMIT
$(for ((i=1;i<=PINGS;i++)); do echo "CDEF:p$((rrd_idx))p$((i))=ping$((rrd_idx))p$((i)),UN,0,ping$((rrd_idx))p$((i)),IF"; done)
$(echo -n "CDEF:pings$((rrd_idx))=$((PINGS)),p$((rrd_idx))p1,UN"; for ((i=2;i<=PINGS;i++)); do echo -n ",p$((rrd_idx))p$((i)),UN,+"; done; echo ",-")
$(echo -n "CDEF:m$((rrd_idx))=p$((rrd_idx))p1"; for ((i=2;i<=PINGS;i++)); do echo -n ",p$((rrd_idx))p$((i)),+"; done; echo ",pings$((rrd_idx)),/")
$(echo -n "CDEF:sdev$((rrd_idx))=p$((rrd_idx))p1,m$((rrd_idx)),-,DUP,*"; for ((i=2;i<=PINGS;i++)); do echo -n ",p$((rrd_idx))p$((i)),m$((rrd_idx)),-,DUP,*,+"; done; echo ",pings$((rrd_idx)),/,SQRT")
CDEF:dmlow$((rrd_idx))=dm$((rrd_idx)),sdev$((rrd_idx)),2,/,-
CDEF:s2d$((rrd_idx))=sdev$((rrd_idx))
AREA:dmlow$((rrd_idx))
AREA:s2d$((rrd_idx))#${COLOR}30:STACK
LINE1:dm$((rrd_idx))#${COLOR}:"$(basename ${fping_rrd%.*} | awk -F'_' '{print $NF}')\t"
VDEF:avmed$((rrd_idx))=median$((rrd_idx)),AVERAGE
VDEF:avsd$((rrd_idx))=sdev$((rrd_idx)),AVERAGE
CDEF:msr$((rrd_idx))=median$((rrd_idx)),POP,avmed$((rrd_idx)),avsd$((rrd_idx)),/
VDEF:avmsr$((rrd_idx))=msr$((rrd_idx)),AVERAGE
GPRINT:avmed$((rrd_idx)):"Median RTT\: %5.2lfms"
GPRINT:ploss$((rrd_idx)):AVERAGE:"Loss\: %5.1lf%%"
GPRINT:avsd$((rrd_idx)):"Std Dev\: %5.2lfms"
GPRINT:avmsr$((rrd_idx)):"Ratio\: %5.1lfms\\j"
COMMENT:"Probe\: $((PINGS)) pings every $((STEP)) seconds"
COMMENT:"${fping_rrd}\\j"
EOF
}
 
if [ ! -r "${fping_rrd}" ]
 then printf "${0} \"file.rrd\"\n"
 else
      STEP=$(rrdtool info "${fping_rrd}" | awk '/^step/{print $NF}')
      PINGS=$(rrdtool info "${fping_rrd}" | awk '/^ds.ping.*index/{count++} END{print count}')
 
      START="$([ -z "${2}" ] && echo "-9 hours" || echo "${2}")"
      END="$([ -z "${3}" ] && echo "now" || echo "${3}")"
      TIME=$(( $(date -d "${END}" +%s) - $(date -d "${START}" +%s) ))
 
      eval $(rrd_graph_cmd; rrd_graph_opts)
fi

Combined (multi) Graph

fping_graph.png

graph_multi.sh: Bourne-Again shell script, ASCII text executable

#!/usr/bin/env bash
## Create a mini graph from multiple RRDs
## Requires: rrdtool
## 2021 - Script from www.davideaves.com
 
# Enable for debuging
#set -x
 
START="-9 hours"
END="now"
 
png_file="${1}"
rrd_files="${*:2}"
 
rrd_graph_cmd() {
cat << EOF
rrdtool graph "${png_file}"
--start "${START}" --end "${END}"
--title "$(date -d "${START}") ($(awk -v TIME=$TIME 'BEGIN {printf "%.1f hr", TIME/3600}'))"
--height 115 --width 600
--vertical-label "Seconds"
--color BACK#F3F3F3
--color CANVAS#FDFDFD
--color SHADEA#CBCBCB
--color SHADEB#999999
--color FONT#000000
--color AXIS#2C4D43
--color ARROW#2C4D43
--color FRAME#2C4D43
--border 1
--font TITLE:10:"Arial"
--font AXIS:8:"Arial"
--font LEGEND:8:"Courier"
--font UNIT:8:"Arial"
--font WATERMARK:6:"Arial"
--imgformat PNG
EOF
}
 
rrd_graph_opts() {
rrd_idx=0
for fping_rrd in ${rrd_files}
do COLOR=$(openssl rand -hex 3)
STEP=$(rrdtool info "${fping_rrd}" | awk '/^step/{print $NF}')
PINGS=$(rrdtool info "${fping_rrd}" | awk '/^ds.ping.*index/{count++} END{print count}')
cat << EOF
DEF:median$((rrd_idx))="${fping_rrd}":median:AVERAGE
DEF:loss$((rrd_idx))="${fping_rrd}":loss:AVERAGE
$(for ((i=1;i<=PINGS;i++)); do echo "DEF:ping$((rrd_idx))p$((i))=\"${fping_rrd}\":ping$((i)):AVERAGE"; done)
CDEF:ploss$((rrd_idx))=loss$((rrd_idx)),20,/,100,*
CDEF:dm$((rrd_idx))=median$((rrd_idx)),0,100000,LIMIT
$(for ((i=1;i<=PINGS;i++)); do echo "CDEF:p$((rrd_idx))p$((i))=ping$((rrd_idx))p$((i)),UN,0,ping$((rrd_idx))p$((i)),IF"; done)
$(echo -n "CDEF:pings$((rrd_idx))=$((PINGS)),p$((rrd_idx))p1,UN"; for ((i=2;i<=PINGS;i++)); do echo -n ",p$((rrd_idx))p$((i)),UN,+"; done; echo ",-")
$(echo -n "CDEF:m$((rrd_idx))=p$((rrd_idx))p1"; for ((i=2;i<=PINGS;i++)); do echo -n ",p$((rrd_idx))p$((i)),+"; done; echo ",pings$((rrd_idx)),/")
$(echo -n "CDEF:sdev$((rrd_idx))=p$((rrd_idx))p1,m$((rrd_idx)),-,DUP,*"; for ((i=2;i<=PINGS;i++)); do echo -n ",p$((rrd_idx))p$((i)),m$((rrd_idx)),-,DUP,*,+"; done; echo ",pings$((rrd_idx)),/,SQRT")
CDEF:dmlow$((rrd_idx))=dm$((rrd_idx)),sdev$((rrd_idx)),2,/,-
CDEF:s2d$((rrd_idx))=sdev$((rrd_idx))
AREA:dmlow$((rrd_idx))
AREA:s2d$((rrd_idx))#${COLOR}30:STACK
LINE1:dm$((rrd_idx))#${COLOR}:"$(basename ${fping_rrd%.*} | awk -F'_' '{print $NF}')\t"
VDEF:avmed$((rrd_idx))=median$((rrd_idx)),AVERAGE
VDEF:avsd$((rrd_idx))=sdev$((rrd_idx)),AVERAGE
CDEF:msr$((rrd_idx))=median$((rrd_idx)),POP,avmed$((rrd_idx)),avsd$((rrd_idx)),/
VDEF:avmsr$((rrd_idx))=msr$((rrd_idx)),AVERAGE
GPRINT:avmed$((rrd_idx)):"Median RTT\: %5.2lfms"
GPRINT:ploss$((rrd_idx)):AVERAGE:"Loss\: %5.1lf%%"
GPRINT:avsd$((rrd_idx)):"Std Dev\: %5.2lfms"
GPRINT:avmsr$((rrd_idx)):"Ratio\: %5.1lfms\\j"
EOF
(( rrd_idx++ ))
done && unset rrd_idx
}
 
if [ -z "${rrd_files}" ]
 then printf "${0} \"file.png\" { file1.rrd ... file6.rrd }\n"
 else TIME=$(( $(date -d "${END}" +%s) - $(date -d "${START}" +%s) ))
      eval $(rrd_graph_cmd; rrd_graph_opts)
fi

SmokePing like Graph

fping_172.31.15.4_smoke.png

graph_smoke.sh: Bourne-Again shell script, ASCII text executable

#!/usr/bin/env bash
## Create a SmokePing like graph from a RRD file
## Requires: rrdtool
## 2021 - Script from www.davideaves.com
 
# Enable for debuging
#set -x
 
fping_rrd="${1}"
COLOR=( "0F0f00" "00FF00" "00BBFF" "0022FF" "8A2BE2" "FA0BE2" "C71585" "FF0000" )
LINE=".5"
 
rrd_graph_cmd() {
cat << EOF
rrdtool graph "$(basename ${fping_rrd%.*})_smoke.png"
--start "${START}" --end "${END}"
--title "$(basename ${fping_rrd%.*} | awk -F'_' '{print $NF}')"
--height 95 --width 600
--vertical-label "Seconds"
--color BACK#F3F3F3
--color CANVAS#FDFDFD
--color SHADEA#CBCBCB
--color SHADEB#999999
--color FONT#000000
--color AXIS#2C4D43
--color ARROW#2C4D43
--color FRAME#2C4D43
--border 1
--font TITLE:10:"Arial"
--font AXIS:8:"Arial"
--font LEGEND:9:"Courier"
--font UNIT:8:"Arial"
--font WATERMARK:7:"Arial"
--imgformat PNG
EOF
}
 
rrd_graph_opts() {
cat << EOF
DEF:median$((rrd_idx))="${fping_rrd}":median:AVERAGE
DEF:loss$((rrd_idx))="${fping_rrd}":loss:AVERAGE
$(for ((i=1;i<=PINGS;i++)); do echo "DEF:ping$((rrd_idx))p$((i))=\"${fping_rrd}\":ping$((i)):AVERAGE"; done)
CDEF:ploss$((rrd_idx))=loss$((rrd_idx)),20,/,100,*
CDEF:dm$((rrd_idx))=median$((rrd_idx)),0,100000,LIMIT
$(for ((i=1;i<=PINGS;i++)); do echo "CDEF:p$((rrd_idx))p$((i))=ping$((rrd_idx))p$((i)),UN,0,ping$((rrd_idx))p$((i)),IF"; done)
$(echo -n "CDEF:pings$((rrd_idx))=$((PINGS)),p$((rrd_idx))p1,UN"; for ((i=2;i<=PINGS;i++)); do echo -n ",p$((rrd_idx))p$((i)),UN,+"; done; echo ",-")
$(echo -n "CDEF:m$((rrd_idx))=p$((rrd_idx))p1"; for ((i=2;i<=PINGS;i++)); do echo -n ",p$((rrd_idx))p$((i)),+"; done; echo ",pings$((rrd_idx)),/")
$(echo -n "CDEF:sdev$((rrd_idx))=p$((rrd_idx))p1,m$((rrd_idx)),-,DUP,*"; for ((i=2;i<=PINGS;i++)); do echo -n ",p$((rrd_idx))p$((i)),m$((rrd_idx)),-,DUP,*,+"; done; echo ",pings$((rrd_idx)),/,SQRT")
CDEF:dmlow$((rrd_idx))=dm$((rrd_idx)),sdev$((rrd_idx)),2,/,-
CDEF:s2d$((rrd_idx))=sdev$((rrd_idx))
AREA:dmlow$((rrd_idx))
AREA:s2d$((rrd_idx))#${COLOR[0]}30:STACK
\
VDEF:avmed$((rrd_idx))=median$((rrd_idx)),AVERAGE
VDEF:avsd$((rrd_idx))=sdev$((rrd_idx)),AVERAGE
CDEF:msr$((rrd_idx))=median$((rrd_idx)),POP,avmed$((rrd_idx)),avsd$((rrd_idx)),/
VDEF:avmsr$((rrd_idx))=msr$((rrd_idx)),AVERAGE
LINE3:avmed$((rrd_idx))#${COLOR[1]}15:
\
COMMENT:"\t\t"
COMMENT:"Average"
COMMENT:"Maximum"
COMMENT:"Minimum"
COMMENT:"Current"
COMMENT:"Std Dev"
COMMENT:" \\j"
\
COMMENT:"Median RTT\:\t"
GPRINT:avmed$((rrd_idx)):"%.2lf"
GPRINT:median$((rrd_idx)):MAX:"%.2lf"
GPRINT:median$((rrd_idx)):MIN:"%.2lf"
GPRINT:median$((rrd_idx)):LAST:"%.2lf"
GPRINT:avsd$((rrd_idx)):"%.2lf"
COMMENT:" \\j"
\
COMMENT:"Packet Loss\:\t"
GPRINT:ploss$((rrd_idx)):AVERAGE:"%.2lf%%"
GPRINT:ploss$((rrd_idx)):MAX:"%.2lf%%"
GPRINT:ploss$((rrd_idx)):MIN:"%.2lf%%"
GPRINT:ploss$((rrd_idx)):LAST:"%.2lf%%"
COMMENT:"  -  "
COMMENT:" \\j"
\
COMMENT:"Loss Colors\:\t"
CDEF:me0=loss$((rrd_idx)),-1,GT,loss$((rrd_idx)),0,LE,*,1,UNKN,IF,median$((rrd_idx)),*
CDEF:meL0=me0,${LINE},-
CDEF:meH0=me0,0,*,${LINE},2,*,+
AREA:meL0
STACK:meH0#${COLOR[1]}:" 0/$((PINGS))"
CDEF:me1=loss$((rrd_idx)),0,GT,loss$((rrd_idx)),1,LE,*,1,UNKN,IF,median$((rrd_idx)),*
CDEF:meL1=me1,${LINE},-
CDEF:meH1=me1,0,*,${LINE},2,*,+
AREA:meL1
STACK:meH1#${COLOR[2]}:" 1/$((PINGS))"
CDEF:me2=loss$((rrd_idx)),1,GT,loss$((rrd_idx)),2,LE,*,1,UNKN,IF,median$((rrd_idx)),*
CDEF:meL2=me2,${LINE},-
CDEF:meH2=me2,0,*,${LINE},2,*,+
AREA:meL2
STACK:meH2#${COLOR[3]}:" 2/$((PINGS))"
CDEF:me3=loss$((rrd_idx)),2,GT,loss$((rrd_idx)),3,LE,*,1,UNKN,IF,median$((rrd_idx)),*
CDEF:meL3=me3,${LINE},-
CDEF:meH3=me3,0,*,${LINE},2,*,+
AREA:meL3
STACK:meH3#${COLOR[4]}:" 3/$((PINGS))"
CDEF:me4=loss$((rrd_idx)),3,GT,loss$((rrd_idx)),4,LE,*,1,UNKN,IF,median$((rrd_idx)),*
CDEF:meL4=me4,${LINE},-
CDEF:meH4=me4,0,*,${LINE},2,*,+
AREA:meL4
STACK:meH4#${COLOR[5]}:" 4/$((PINGS))"
CDEF:me10=loss$((rrd_idx)),4,GT,loss$((rrd_idx)),10,LE,*,1,UNKN,IF,median$((rrd_idx)),*
CDEF:meL10=me10,${LINE},-
CDEF:meH10=me10,0,*,${LINE},2,*,+
AREA:meL10
STACK:meH10#${COLOR[6]}:"10/$((PINGS))"
CDEF:me19=loss$((rrd_idx)),10,GT,loss$((rrd_idx)),19,LE,*,1,UNKN,IF,median$((rrd_idx)),*
CDEF:meL19=me19,${LINE},-
CDEF:meH19=me19,0,*,${LINE},2,*,+
AREA:meL19
STACK:meH19#${COLOR[7]}:"19/$((PINGS))\\j"
\
COMMENT:"Probe\: $((PINGS)) pings every $((STEP)) seconds"
COMMENT:"$(date -d "${START}" | sed 's/\:/\\\:/g') ($(awk -v TIME=$TIME 'BEGIN {printf "%.1f hr", TIME/3600}'))\\j"
EOF
}
 
if [ ! -r "${fping_rrd}" ]
 then printf "${0} \"file.rrd\"\n"
 else
      STEP=$(rrdtool info "${fping_rrd}" | awk '/^step/{print $NF}')
      PINGS=$(rrdtool info "${fping_rrd}" | awk '/^ds.ping.*index/{count++} END{print count}')
 
      START="$([ -z "${2}" ] && echo "-7 hours" || echo "${2}")"
      END="$([ -z "${3}" ] && echo "now" || echo "${3}")"
      TIME=$(( $(date -d "${END}" +%s) - $(date -d "${START}" +%s) ))
 
      eval $(rrd_graph_cmd; rrd_graph_opts)
fi
28. December 2018 · Comments Off on Search for object matches in an ASA config. · Categories: AWK, Firewall, Linux, Linux Scripts, Networking · Tags: , , , , , ,

Having to parse ASA configs for migration purposes provides a never-ending source of reasons to write scripts. The following AWK script will munge an ASA config searching for any specified address or object name and will output any objects that reference it. This script is something I use in conjunction with the ASA_acls.sh script to find security rules relating to an address. As far as I know this is the closest offline tool simmilar to the “Where Used” feature in ASDM for finding addresses.

ASA_obj.awk: awk script, ASCII text executable

#!/usr/bin/awk -f
## Search for object matches in an ASA config.
## 2018 (v.01) - Script from www.davideaves.com
 
### BEGIN ###
 
BEGIN {
  dig_range="y"
  dig_subnet="n"
 
  # Script arguments: ASA configuration + Search objects
  if ( ARGV[1] == "" ) {
    print "ERROR: No Input ASA config provided!" > "/dev/stderr"
    exit 1
  } else if ( ARGV[2] == "" ) {
    print "ERROR: No address or object to search for!" > "/dev/stderr"
    exit 1
  } else {
    # Saving everything after ARGV[1] in search_array.
    for (i = 2; i < ARGC; i++) {
      search_array[ARGV[i]] = ARGV[i]
      delete ARGV[i]
  } }
}
 
### FUNCTIONS ###
 
# Convert IP to Interger.
function ip_to_int(input) {
  split(input, oc, ".")
  ip_int=(oc[1]*(256^3))+(oc[2]*(256^2))+(oc[3]*(256))+(oc[4])
  return ip_int
}
 
# test if a string is an ipv4 address
function is_v4(address) {
  split(address, octet, ".")
  if ( octet[1] <= 255 && octet[2] <= 255 && octet[3] <= 255 && octet[4] <= 255 )
  return address
}
 
# convert number to bits
function bits(N){
  c = 0
  for(i=0; i<8; ++i) if( and(2**i, N) ) ++c
  return c
}
 
# convert ipv4 to prefix
function to_prefix(mask) {
  split(mask, octet, ".")
  return bits(octet[1]) + bits(octet[2]) + bits(octet[3]) + bits(octet[4])
}
 
### SCRIPT ###
 
//{ gsub(/\r/, "") # Strip CTRL+M
 
  ### LINE IS NAME ###
  if ( $1 ~ /^name$/ ) {
 
    name=$3; host=$2; type=$1
    for(col = 5; col <= NF; col++) { previous=previous" "$col }
    description=substr(previous,2)
    previous=""
 
    # Add to search_array
    for (search in search_array) if ( host == search ) search_array[name]
  }
 
  ### LINE IS OBJECT ### 
  else if ( $1 ~ /^object/ ) {
 
    tab="Y"
    name=$3
    type=$2
    if ( type == "service" ) service=$4
    previous=""
 
  } else if ( tab == "Y" && substr($0,1,1) == " " ) {
 
    # object is single host.
    if ( $1 == "host" ) {
      host=$NF
      for (search in search_array) if ( host == search ) search_array[name]
    }
 
    # object is a subnet
    else if ( $1 == "subnet" && dig_subnet == "y" ) {
      for (search in search_array) if ( is_v4(search) ) {
 
        NETWORK=ip_to_int($2)
        PREFIX=to_prefix($3)
        BROADCAST=(NETWORK + (2 ^ (32 - PREFIX) - 1))
 
        if ( ip_to_int(search) >= int(NETWORK) && ip_to_int(search) <= int(BROADCAST) ) {
          search_array[name]
      } }
    }
 
    # object is a range
    else if ( $1 == "range" && dig_range == "y" ) {
      for (search in search_array) if ( is_v4(search) ) {
        if ( ip_to_int(search) >= ip_to_int($2) && ip_to_int(search) <= ip_to_int($3) ) {
          search_array[name]
      } }
    }
 
    # object is group of other objects
    else if ( $2 ~ /(host|object)/ ) {
      for (search in search_array) if ( $NF == search ) search_array[name]
    }
 
    # object contains nat statement
    else if ( $1 == "nat" ) {
      for (search in search_array) if ( $NF == search ) search_array[name]
    }
 
    ### Debug everything else within an object
    #else { print "DEBUG:",$0 }
 
  }
  else { tab="" }
 
}
 
### END ###
 
END{
  if ( isarray(search_array) ) {
    print "asa_objects:"
    for (search in search_array) print "  -",search
  }
}
19. December 2018 · Comments Off on Collect all sensor information from the FMC. · Categories: Cisco, Firewall, Linux Scripts, Networking, Uncategorized · Tags: , , , , , ,

Eventually I plan on refactoring all my firepower scripts into Ansible Playbooks. But in the meanwhile the following is a quick script that will collect all sensor information from a Firepower Management Center and save that information to a CSV file. The output is pretty handy for migrations and general data collection.

#!/bin/bash
## Collect all sensor devicerecords from a FMC.
## Requires: python:PyYAML,shyaml
## 2018 (v.01) - Script from www.davideaves.com
 
username="fmcusername"
password="fmcpassword"
 
FMC="192.0.2.13 192.0.2.14 192.0.2.15 192.0.2.16 192.0.2.17 192.0.2.18 192.0.2.21 192.0.2.22 192.0.2.23"
 
### Convert JSON to YAML.
j2y() {
 python -c 'import sys, yaml, json; yaml.safe_dump(json.load(sys.stdin), sys.stdout, default_flow_style=False)' 2> /dev/null
}
 
### Convert YAML to JSON.
y2j() {
 python -c 'import sys, yaml, json; y=yaml.load(sys.stdin.read()); print json.dumps(y)' 2> /dev/null
}
 
echo "FMC,healthStatus,hostName,model,name," > "$(basename ${0%.*}).csv"
 
# Itterate through all FMC devices
for firepower in ${FMC}
 do eval "$(curl -skX POST https://${firepower}/api/fmc_platform/v1/auth/generatetoken \
        -H "Authorization: Basic $(printf "${username}:${password}" | base64)" -D - |\
        awk '/(auth|DOMAIN|global)/{gsub(/[\r|:]/,""); gsub(/-/,"_",$1); print $1"=\""$2"\""}')"
 
    ### Get expanded of list devices
    curl -skX GET "https://${firepower}/api/fmc_config/v1/domain/${DOMAIN_UUID}/devices/devicerecords?offset=0&limit=1000&expanded=true" -H "X-auth-access-token: ${X_auth_access_token}" |\
     j2y | awk 'BEGIN{ X=0; }/^(-|  [a-z])/{if($1 == "-") {X+=1; printf "'''${firepower}''',"} else if($1 == "healthStatus:" || $1 == "hostName:" || $1 == "model:" || $1 == "name:") {printf $NF","} else if($1 == "type:") {printf "\n"}}'
 
done >> "$(basename ${0%.*}).csv"
19. December 2018 · Comments Off on Ansible playbook to provision Netscaler VIPs. · Categories: Ansible, Linux, Linux Admin, Load Balancing, NetScaler, Networking · Tags: , , ,

The following playbook will create a fully functional VIP; including the supporting monitor, service-group (pool) and servers (nodes) on a netscaler loadbalancer. Additionally, the same playbook has the ability to fully deprovision a VIP and all its supporting artifacts. To do all this I use the native Netscaler Ansible modules. When it comes to using the netscaler_servicegroup module, since the number of servers are not always consistent; I create that task with a Jinja2 template, where its imported back into the play.

netscaler_provision.yaml: a /usr/bin/ansible-playbook -f 10 script text executable, ASCII text

#!/usr/bin/ansible-playbook -f 10
## Ansible playbook to provision Netscaler VIPs.
# Requires: nitrosdk-python
# 2018 (v.01) - Playbook from www.davideaves.com
---
- name: Netscaler VIP provision
  hosts: netscaler
  connection: local
  gather_facts: False

  vars:

    ansible_connection: "local"
    ansible_python_interpreter: "/usr/bin/env python"

    state: 'present'

    lbvip:
      name: testvip
      address: 203.0.113.1
      server:
        - name: 'server-1'
          address: '192.0.2.1'
          description: 'Ansible Test Server 1'
          disabled: 'true'
        - name: 'server-2'
          address: '192.0.2.2'
          description: 'Ansible Test Server 2'
          disabled: 'true'
        - name: 'server-3'
          address: '192.0.2.3'
          description: 'Ansible Test Server 3'
          disabled: 'true'
        - name: 'server-4'
          address: '192.0.2.4'
          description: 'Ansible Test Server 4'
          disabled: 'true'
        - name: 'server-5'
          address: '192.0.2.5'
          description: 'Ansible Test Server 5'
          disabled: 'true'
        - name: 'server-6'
          address: '192.0.2.6'
          description: 'Ansible Test Server 6'
          disabled: 'true'
        - name: 'server-7'
          address: '192.0.2.7'
          description: 'Ansible Test Server 7'
          disabled: 'true'
        - name: 'server-8'
          address: '192.0.2.8'
          description: 'Ansible Test Server 8'
          disabled: 'true'
      vserver:
        - port: '80'
          description: 'Generic service running on 80'
          type: 'HTTP'
          method: 'LEASTCONNECTION'
          persistence: 'SOURCEIP'
        - port: '443'
          description: 'Generic service running on 443'
          type: 'SSL_BRIDGE'
          method: 'LEASTCONNECTION'
          persistence: 'SOURCEIP'
        - port: '8080'
          description: 'Generic service running on 8080'
          type: 'HTTP'
          method: 'LEASTCONNECTION'
          persistence: 'SOURCEIP'
        - port: '8081'
          description: 'Generic service running on 8081'
          type: 'HTTP'
          method: 'LEASTCONNECTION'
          persistence: 'SOURCEIP'
        - port: '8443'
          description: 'Generic service running on 8443'
          type: 'SSL_BRIDGE'
          method: 'LEASTCONNECTION'
          persistence: 'SOURCEIP'

  tasks:

    - name: Build lbvip and all related componets.
      block:
      - local_action:
          module: netscaler_server
          nsip: "{{ inventory_hostname }}"
          nitro_user: "{{ nitro_user | default('nsroot') }}"
          nitro_pass: "{{ nitro_pass | default('nsroot') }}"
          nitro_protocol: "https"
          validate_certs: no
          state: "{{ state }}"
          name: "{{ item.name }}"
          ipaddress: "{{ item.address }}"
          comment: "{{ item.description | default('Ansible Created') }}"
          disabled: "{{ item.disabled | default('false') }}"
        with_items: "{{ lbvip.server }}"
      - local_action:
          module: netscaler_lb_monitor
          nsip: "{{ inventory_hostname }}"
          nitro_user: "{{ nitro_user | default('nsroot') }}"
          nitro_pass: "{{ nitro_pass | default('nsroot') }}"
          nitro_protocol: "https"
          validate_certs: no
          state: "{{ state }}"
          monitorname: "tcp_{{ lbvip.name }}_{{ item.port }}"
          type: TCP
          destport: "{{ item.port }}"
        with_items: "{{ lbvip.vserver }}"
        no_log: false
      - local_action:
          module: copy
          content: "{{ lookup('template', 'templates/netscaler_servicegroup.j2') }}"
          dest: "/tmp/svg_{{ lbvip.name }}_{{ item.port }}.yaml"
          mode: "0644"
        with_items: "{{ lbvip.vserver }}"
        changed_when: false
      - include_tasks: "/tmp/svg_{{ lbvip.name }}_{{ item.port }}.yaml"
        with_items: "{{ lbvip.vserver }}"
      - local_action:
          module: file
          state: absent
          path: "/tmp/svg_{{ lbvip.name }}_{{ item.port }}.yaml"
        with_items: "{{ lbvip.vserver }}"
        changed_when: false
      - local_action:
          module: netscaler_lb_vserver
          nsip: "{{ inventory_hostname }}"
          nitro_user: "{{ nitro_user | default('nsroot') }}"
          nitro_pass: "{{ nitro_pass | default('nsroot') }}"
          nitro_protocol: "https"
          validate_certs: no
          state: "{{ state }}"
          name: "vs_{{ lbvip.name }}_{{ item.port }}"
          servicetype: "{{ item.type }}"
          ipv46: "{{ lbvip.address }}"
          port: "{{ item.port }}"
          lbmethod: "{{ item.method | default('LEASTCONNECTION') }}"
          persistencetype: "{{ item.persistence | default('SOURCEIP') }}"
          servicegroupbindings:
            - servicegroupname: "svg_{{ lbvip.name }}_{{ item.port }}"
        with_items: "{{ lbvip.vserver }}"
      when: state == "present"

    - name: Destroy lbvip and all related componets.
      block:
      - local_action:
          module: netscaler_lb_vserver
          nsip: "{{ inventory_hostname }}"
          nitro_user: "{{ nitro_user | default('nsroot') }}"
          nitro_pass: "{{ nitro_pass | default('nsroot') }}"
          nitro_protocol: "https"
          validate_certs: no
          state: "{{ state }}"
          name: "vs_{{ lbvip.name }}_{{ item.port }}"
        with_items: "{{ lbvip.vserver }}"
      - local_action:
          module: netscaler_servicegroup
          nsip: "{{ inventory_hostname }}"
          nitro_user: "{{ nitro_user | default('nsroot') }}"
          nitro_pass: "{{ nitro_pass | default('nsroot') }}"
          nitro_protocol: "https"
          validate_certs: no
          state: "{{ state }}"
          servicegroupname: "svg_{{ lbvip.name }}_{{ item.port }}"
        with_items: "{{ lbvip.vserver }}"
      - local_action:
          module: netscaler_lb_monitor
          nsip: "{{ inventory_hostname }}"
          nitro_user: "{{ nitro_user | default('nsroot') }}"
          nitro_pass: "{{ nitro_pass | default('nsroot') }}"
          nitro_protocol: "https"
          validate_certs: no
          state: "{{ state }}"
          monitorname: "tcp_{{ lbvip.name }}_{{ item.port }}"
          type: TCP
        with_items: "{{ lbvip.vserver }}"
      - local_action:
          module: netscaler_server
          nsip: "{{ inventory_hostname }}"
          nitro_user: "{{ nitro_user | default('nsroot') }}"
          nitro_pass: "{{ nitro_pass | default('nsroot') }}"
          nitro_protocol: "https"
          validate_certs: no
          state: "{{ state }}"
          name: "{{ item.name }}"
        with_items: "{{ lbvip.server }}"
      when: state == "absent"

The following is the Jinja2 template that creates the netscaler_servicegroup task. An important thing to note is my use of the RAW block. When the task is created and stored in /tmp it does not contain any account credentials, instead I preserve the variable in the raw to prevent leaking sensitive information to anyone who may be snooping around on the server while the playbook is running.

templates/netscaler_servicegroup.j2: ASCII text, with CRLF line terminators

---
- local_action:
    module: netscaler_servicegroup
    nsip: {% raw %}"{{ inventory_hostname }}"
{% endraw %}
    nitro_user: {% raw %}"{{ nitro_user }}"
{% endraw %}
    nitro_pass: {% raw %}"{{ nitro_pass }}"
{% endraw %}
    nitro_protocol: "https"
    validate_certs: no

    state: "{{ state | default('present') }}"

    servicegroupname: "svg_{{ lbvip.name }}_{{ item.port }}"
    comment: "{{ item.description | default('Ansible Created') }}"
    servicetype: "{{ item.type }}"
    servicemembers:
{% for i in lbvip.server %}
      - servername: "{{ i.name }}"
        port: "{{ item.port }}"
{% endfor %}
    monitorbindings:
      - monitorname: "tcp_{{ lbvip.name }}_{{ item.port }}"
17. November 2018 · Comments Off on Convert ASA access-list rules to a parseable YAML format. · Categories: AWK, Cisco, Firewall, Linux Scripts, Networking · Tags: , , , , ,

This script spun out of a string of firewall migrations off the legacy ASA platform, I need the ability to convert access-lists to a parseable format. There are multiple reasons for needing this script. First is for human readability and auditing purposes. Second is to have a parseable rule base for duplication or migration to other firewall types.

ASA_acls.sh: Bourne-Again shell script text executable, ASCII text

#!/bin/bash
## Convert ASA access-list rules to a parseable YAML format.
## 2018 (v.01) - Script from www.davideaves.com
 
### VARIABLES ###
 
asa_config_file="${1}"
search_string="${2}"
 
### MAIN SCRIPT ###
 
[ -z "${asa_config_file}" ] && { echo -e "${0} - ERROR: missing ASA config"; exit 0; }
 
for ACCESSGROUP in `awk '/^access-group /{print $2}' "${asa_config_file}" | sort --ignore-case`
 do
 
  echo "${ACCESSGROUP}:"
  awk 'BEGIN{ REMARK=""; ACTION=""; SERVICE=""; SOURCE=""; DESTINATION=""; PORT=""; LOG=""; DISABLED=""; previous="" }
 
        # convert number to bits
        function bits(N){
          c = 0
          for(i=0; i<8; ++i) if(and(2**i, N)) ++c
          return c
        }
 
        # convert ipv4 to prefix
        function to_prefix(mask) {
          split(mask, octet, ".")
          return bits(octet[1]) + bits(octet[2]) + bits(octet[3]) + bits(octet[4])
        }
 
        # test if a string is an ipv4 address
        function is_v4(address) {
          split(address, octet, ".")
          if ( octet[1] <= 255 && octet[2] <= 255 && octet[3] <= 255 && octet[4] <= 255 )
          return address
        }
 
        # Only look at access-lists lines
        /^access-list '''${ACCESSGROUP}''' .*'''${search_string}'''/{
 
        # If line is a remark store it else continue
        if ( $3 == "remark" ) { $1=$2=$3=""; REMARK=substr($0,4) }
        else { $1=$2=$3=""; gsub("^   ", "")
 
          # Itterate through columns
          for(col = 1; col <= NF; col++) {
 
           # Append prefix to SOURCE & DESTINATION
           if ( is_v4(previous) && is_v4($col) ) {
            if ( DESTINATION != "" ) { DESTINATION=DESTINATION"/"to_prefix($col); previous="" }
            else if ( SOURCE != "" ) { SOURCE=SOURCE"/"to_prefix($col); previous="" }
          } else {
 
            # Determine col variable
            if ( col == "1" ) { ACTION=$col; SERVICE=""; SOURCE=""; DESTINATION=""; PORT=""; LOG=""; DISABLED=""; previous="" }
            else if ( $col ~ /^(eq|interface|object|object-group)$/ ) { previous=$col }
            else if ( SERVICE == "" && $col !~ /^(host|object|object-group)$/ ) { SERVICE=$col; PORT=""; previous="" }
            else if ( SOURCE == "" && $col !~ /^(host|object|object-group)$/ ) {
              if ( previous == "interface" ) { SOURCE=previous"/"$col }
              else { SOURCE=$col }; PORT=""; previous=to_prefix($col) }
            else if ( DESTINATION == "" && $col !~ /^(host|object|object-group)$/ ) {
              if ( previous == "interface" ) { DESTINATION=previous"/"$col }
              else { DESTINATION=$col }; PORT=""; previous=to_prefix($col) }
            else if ( previous ~ /^(eq|object-group)$/ ) { PORT=$col; previous="" }
            else if ( $col == "log" ) { LOG=$col; previous="" }
            else if ( $col == "inactive" ) { DISABLED=$col; previous="" }
            else { LAST=$col; previous="" }
 
          }
 
        }}
 
        # Display the output
        if ( DESTINATION != "" ) { count++
          print "  - name: '''${ACCESSGROUP}''' rule",count,"line",NR
          print "    debug:",$0
          if ( REMARK != "" ) { print "    description:",REMARK }
          print "    action:",ACTION
          print "    source:",SOURCE
          print "    destination:",DESTINATION
          if ( PORT == "" ) { print "    service:",SERVICE }
          else { print "    service:",SERVICE"/"PORT }
          if ( LOG != "" ) { print "    log: true" }
          if ( DISABLED != "" ) { print "    disabled: true" }
          REMARK=""; ACTION=""; SERVICE=""; SOURCE=""; DESTINATION=""; PORT=""; LOG=""; DISABLED=""; previous=""
        }
 
  }' "${asa_config_file}"
 
done