I have had many ruminations on the best way to encrypt and secure files. A good reason for wanting to encrypt your files might be; you keep a USB stick on your keychain with your taxes or customer data on it so its accesable to you, but you’re are worried about loosing it. In situations like that, file encryption is exactly what you want! I previously have written a scripts and jotted notes on encrypting block devices; of which I am not even convinced is best practice. Encrypting block devices is all well and good but it’s not very practical for encrypting individual files. I recently noticed that Cisco uses GPG to encrypt their backup files on their ISE appliance, which convinced me that GPG is probabbly the smartest way to encrypt files. Unfortunately there is a lot of commandline options and reading you have to do inorder to get it to work properly. I figured this was perfect thing to write a script for and make public. My goal is to make GnuPG a little bit more accesable to a broader audience.

The following script is intended to make it simple for a lay Linux user to use GPG to encrypt/decrypt files and directories. As a bonus I’m keeping the debug block that I used, and can be removed, in order to test encryption and decryption while I was writting the script and testing commands. If you do not already have a public-private key pair, the script will generate one for you and ask you if you want to do a backup. If you allow the script to create backup files of your keys I strongly recommend you remove them as soon as possible! The script can also perform a restore of your keys from backup if you put them back in the root of your home directory. Additionally the script doesn’t see any keys it will perform symmetric encryption against the follow directory. Symmetric encryption just uses a simple passphrase to perform the encryption and decryption. If you create the following variable:

export gpg_method="symmetric"

With the above variable set; the script will ignore your keys and will do symmetric only encryption.

gpg_crypt.sh: a /bin/env bash script, ASCII text executable, with very long lines (382)

#!/bin/env bash
## GnuPG encryption/decryption helper.
## 2021 - Script from www.davideaves.com
 
# Enable for debuging
# set -x
 
export GPG_TTY=$(tty)
 
# Verify script requirements
for req in curl gpg jq
 do type ${req} >/dev/null 2>&1 || { echo >&2 "$(basename "${0}"): I require ${req} but it's not installed. Aborting."; exit 1; }
done && umask 0077
 
# Fetch existing keys
keyid=( `gpg --list-keys --keyid-format 0xLONG | awk '/^sub.*[E]/{gsub("[]|[]|/", " "); print $3,$NF}'` )
 
# Help manage keys
if [ -z "${keyid}" ] && [ -z "${gpg_method}" ]
 then [ -f "${HOME}/bin/rc_files/gpg.conf" -a ! -f "${HOME}/.gnupg/gpg.conf" ] && \
       { cat "${HOME}/bin/rc_files/gpg.conf" > "${HOME}/.gnupg/gpg.conf"; }
 
      # Generate a new keys
      read -p "(G)enerate new or (R)estore keys? (g/r) " -n 1 -r; echo
      if [[ "${REPLY}" =~ ^[Gg]$ ]]
       then gpg --full-generate-key || gpg --gen-key
 
            # Backup keys
            read -p "Export a backup of keys? " -n 1 -r; echo
            if [[ "${REPLY}" =~ ^[Yy]$ ]]
             then gpg --armor --export-secret-key > "${HOME}/gpg_secret-key.asc"
                  gpg --armor --export-secret-subkeys > "${HOME}/gpg_secret-subkeys.asc"
                  gpg --armor --export > "${HOME}/gpg_public-key.asc"
                  gpg --armor --export-ownertrust > "${HOME}/gpg_ownertrust.txt"
            fi
 
      # Restore existing keys
      elif [[ "${REPLY}" =~ ^[Rr]$ ]]
       then for asc in ${HOME}/gpg_*.asc
             do gpg --import "${asc}"
            done && gpg --import-ownertrust "${HOME}/gpg_ownertrust.txt"
      fi && unset ${REPLY}
 elif [ -n "${gpg_method}" ]
  then unset keyid
fi
 
# Exit if no debug or user input
if [[ ! "$SHELLOPTS" =~ "xtrace" ]] && [[ -z "${@}" ]]
 then echo "$(basename "${0}"): GnuPG encryption/decryption helper."
      echo "File or Directory input is required to continue!"
 exit 0
fi
 
# Create debug test file
if [[ "$SHELLOPTS" =~ "xtrace" ]] && [[ -z "${@}" ]]
 then debug_b64="/9j/4AAQSkZJRgABAQAAZABkAAD/2wCEABQQEBkSGScXFycyJh8mMi4mJiYmLj41NTU1NT5EQUFBQUFBREREREREREREREREREREREREREREREREREREREQBFRkZIBwgJhgYJjYmICY2RDYrKzZERERCNUJERERERERERERERERERERERERERERERERERERERERERERERERERP/AABEIAAEAAQMBIgACEQEDEQH/xABMAAEBAAAAAAAAAAAAAAAAAAAABQEBAQAAAAAAAAAAAAAAAAAABQYQAQAAAAAAAAAAAAAAAAAAAAARAQAAAAAAAAAAAAAAAAAAAAD/2gAMAwEAAhEDEQA/AJQA9Yv/2Q=="
 
      # Create temp file
      if debug_file="$(mktemp)"
       then trap "{ if [ -e "${debug_file}*" ]; then rm -rf "${debug_file}*"; fi }" \
         SIGINT SIGTERM ERR EXIT
       else echo "Failure, exit status: ${?}"
            exit ${?};
      fi && echo -n "${debug_b64}" | base64 -d > "${debug_file}"
 
      MimeDB=( "https://raw.githubusercontent.com/jshttp/mime-db/master/db.json" "${HOME}/.mime.json" )
      [ ! -s "${MimeDB[1]}" ] && { curl -s "${MimeDB[0]}" | jq > "${MimeDB[1]}"; }
 
      debug_mime="$(file -b --mime-type "${debug_file}")"
      debug_hash="$(sha1sum "${debug_temp}" | awk '{print $1}')"
      debug_ext="$(jq -r --arg MIME "${debug_mime}" '.[$MIME].extensions[0] // empty' "${MimeDB[1]}")"
      debug_output="${debug_hash}.${debug_ext}"
 
      mv "${debug_file}" "${debug_file}.${debug_ext}"
      set -- "$@" "${debug_file}.${debug_ext}"
 
      file "${debug_file}.${debug_ext}"
fi
 
### BEGIN ###
 
for input in "${@}"
 do file_ext="$(basename "${input##*.}")"
 
    ## symmetric encryption: file ##
    if [ -z "${keyid[0]}" ] && [ -f "${input}" ]
     then if [ "${file_ext}" != "gpg" ]
           then gpg --batch --yes --quiet --output "$(basename "${input:-'null'}").gpg" --symmetric "${input}"
          ## decrypt file ##
          elif [[ "${input%.*}" =~ ".tgz"$ ]]
           then gpg --batch --yes --quiet --decrypt "${input}" | tar xzfv -
           else gpg --batch --yes --quiet --output "$(basename "${input%.*}")" --decrypt "${input}"
          fi
 
    ## symmetric encryption: directory ##
    elif [ -z "${keyid[0]}" ] && [ -d "${input}" ]
     then tar czfv - "${input}" | gpg --batch --yes --quiet --output "$(basename "${input:-'null'}").tgz.gpg" --symmetric
 
    ## key encryption: file ##
    elif [ -n "${keyid[0]}" ] && [ -f "${input}" ]
     then if [ "${file_ext}" != "gpg" ]
           then gpg --batch --yes --quiet --output "$(basename "${input:-'null'}").gpg" --recipient ${keyid[0]} --encrypt "${input}"
          ## decrypt file ##
          elif [[ "${input%.*}" =~ ".tgz"$ ]]
           then gpg --batch --yes --quiet --recipient ${keyid[0]} --decrypt "${input}" | tar xzfv -
           else gpg --batch --yes --quiet --output "$(basename "${input%.*}")" --recipient ${keyid[0]} --decrypt "${input}"
          fi
 
    ## key encryption: directory ##
    elif [ -n "${keyid[0]}" ] && [ -d "${input}" ]
     then tar czfv - "${input}" | gpg --batch --yes --quiet --recipient ${keyid[0]} --encrypt > "$(basename "${input:-'null'}").tgz.gpg"
    fi
done
 
### FINISH ###

GnuPG user configuration options

If you use GnuPG I recommend updating your configuration to give it an affinity for stronger ciphers. Riseup.net created a very good best practice config that is a good starting place. I’ve made a few modifications to it, but have left it relatively unchanged. If the above script sees the user config missing and can find the following config file in a repo direcory, it will go ahead and copy the configuration to where it needs to be… Feel free to use the below config as an optional reference.

gpg.conf: ASCII text

## GnuPG Options
 
# Assume that command line arguments are given as UTF8 strings.
utf8-strings
 
#
# This is an implementation of the Riseup OpenPGP Best Practices
# https://help.riseup.net/en/security/message-security/openpgp/best-practices
#
 
#-----------------------------
# default key
#-----------------------------
 
# The default key to sign with. If this option is not used, the default key is the first key found in the secret keyring
#default-key 0xD8692123C4065DEA5E0F3AB5249B39D24F25E3B6
 
#-----------------------------
# behavior
#-----------------------------
 
# Disable inclusion of the version string in ASCII armored output
no-emit-version
 
# Disable comment string in clear text signatures and ASCII armored messages
no-comments
 
# Display long key IDs
keyid-format 0xlong
 
# List all keys (or the specified ones) along with their fingerprints
with-fingerprint
 
# Display the calculated validity of user IDs during key listings
list-options show-uid-validity
verify-options show-uid-validity
 
# Try to use the GnuPG-Agent. With this option, GnuPG first tries to connect to the agent before it asks for a passphrase.
use-agent
 
#-----------------------------
# algorithm and ciphers
#-----------------------------
 
# list of personal digest preferences. When multiple digests are supported by all recipients, choose the strongest one
personal-cipher-preferences AES256 AES192 AES CAST5
 
# list of personal digest preferences. When multiple ciphers are supported by all recipients, choose the strongest one
personal-digest-preferences SHA512 SHA384 SHA256 SHA224
 
# message digest algorithm used when signing a key
cert-digest-algo SHA512
 
# This preference list is used for new keys and becomes the default for "setpref" in the edit menu
default-preference-list SHA512 SHA384 SHA256 SHA224 AES256 AES192 AES CAST5 ZLIB BZIP2 ZIP Uncompressed
 
# Use a specified algorithm as the symmetric cipher
cipher-algo AES256
21. February 2021 · Comments Off on Targeted network monitoring using only fping and rrdtool. · Categories: Linux, Linux Admin, Linux Scripts, Networking · Tags: , , , , , ,

I’ve been very unhappy with the state of network monitoring applications lately. Most network monitoring tools are either too big or too arbitrary to be helpful for application support. This can be an issue when focusing on a specific application with components separated into various tiers, datacenters or locations. When network performance is in question the most helpful data is the active latency between a node and its other components (during or leading up to the time in question). If a network monitoring tool lacks any specificity of the application in question it will be viewed as too dense or cerebral to be useful; at worse it will harm troubleshooting. The quicker hard data can be accessed proving out the network layer, the quicker troubleshooting can move up the stack towards resolution.

Monolithic tools like Cacti are sometimes useful, however the lighter the script is, the more nimbly it can be deployed on a wide variety of nodes. Because both FPing and RRDTool are small, useful and standard Linux packages they are ideal, so l wrote the following bash script that leverages only those 2 tools together. The data collected is roughly identical to SmokePing but has the benefit of not dirtying a system with unnecessary packages. The script can easily be deployed by any devops deployment and is ran via crontab. Graph data can be created when or if they are needed.

fping_rrd.sh: Bourne-Again shell script, ASCII text executable

#!/usr/bin/env bash
## FPing data collector for RRDTOOL
#
# Crontab:
#   */5 * * * *     fping_rrd.sh
#
## Requires: fping, rrdtool
## 2021 - Script from www.davideaves.com
 
# Enable for debuging
#set -x
 
STEP=300 # 5min
PINGS=20 # 20 pings
 
# The first ping is usually an outlier; adding an extra ping to drop the first result.
 
fping_opts="-C $((PINGS+1)) -q -B1 -r1 -i10"
fping_hosts="172.31.3.1 172.31.3.3 172.31.4.1 172.31.4.10 172.31.15.1 172.31.15.4"
rrd_path="/var/lib/fping"
rrd_timestamp=$(date +%s)
 
calc_median() {
    awk '{ if ( $1 != "-" ) { fping[NR] = $1 }
           else { NR-- }
         }
     END { asort(fping);
           if (NR % 2) { print fping[(NR + 1) / 2] }
           else { print (fping[(NR / 2)] + fping[(NR / 2) + 1]) / 2.0 }
         }'
}
 
rrd_create() {
    rrdtool create "${fping_rrd}" \
     --start now-2h --step $((STEP)) \
     DS:loss:GAUGE:$((STEP*2)):0:$((PINGS)) \
     DS:median:GAUGE:$((STEP*2)):0:180 \
     $(seq -f " DS:ping%g:GAUGE:$((STEP*2)):0:180" 1 $((PINGS++))) \
     RRA:AVERAGE:0.5:1:1008 \
     RRA:AVERAGE:0.5:12:4320 \
     RRA:MIN:0.5:12:4320 \
     RRA:MAX:0.5:12:4320 \
     RRA:AVERAGE:0.5:144:720 \
     RRA:MAX:0.5:144:720 \
     RRA:MIN:0.5:144:720
}
 
rrd_update() {
    rrd_loss=0
    rrd_median=""
    rrd_rev=$((PINGS))
    rrd_name=""
    rrd_value="${rrd_timestamp}"
 
    for rrd_idx in $(seq 1 $((rrd_rev)))
     do
        rrd_name="${rrd_name}$([[ ${rrd_idx} -gt "1" ]] && echo ":")ping$((rrd_idx))"
        rrd_value="${rrd_value}:${fping_array[-$((rrd_rev))]}"
        rrd_median="${fping_array[-$((rrd_rev))]}\n${rrd_median}"
 
        [ "${fping_array[-$((rrd_rev))]}" == "-" ] && (( rrd_loss++ ))
 
        (( rrd_rev-- ))
    done
    rrd_median=$(printf ${rrd_median} | calc_median)
 
    rrdtool update "${fping_rrd}" --template $(echo ${rrd_name}:median:loss ${rrd_value}:${rrd_median}:${rrd_loss} | sed 's/-/U/g')
    unset rrd_loss rrd_median rrd_rev rrd_name rrd_value
}
 
fping ${fping_opts} ${fping_hosts} 2>&1 | while read fping_line;
 do fping_array=( ${fping_line} )
    fping_rrd="${rrd_path}/fping_${fping_array[0],,}.rrd"
 
    # Create RRD file.
    if [ ! -f "${fping_rrd}" ]
     then rrd_create
    fi
 
    # Update RRD file.
    if [ -f "${fping_rrd}" ]
     then rrd_last=$(( ${rrd_timestamp} - $(rrdtool last "${fping_rrd}") ))
          [[ $((rrd_last)) -ge $((STEP)) ]] && rrd_update
    fi && unset rrd_last
done

Creating Network Monitoring Graphs

The following are 3 example scripts that use rrdtool to create graphs from the RRD files.

Mini Graph

fping_172.31.15.4_mini.png

graph_mini.sh: Bourne-Again shell script, ASCII text executable

#!/usr/bin/env bash
## Create a  mini graph from a RRD file
## Requires: rrdtool
## 2021 - Script from www.davideaves.com
 
# Enable for debuging
#set -x
 
fping_rrd="${1}"
COLOR=( "FF5500" )
 
rrd_graph_cmd() {
cat << EOF
rrdtool graph "$(basename ${fping_rrd%.*})_mini.png"
--start "${START}" --end "${END}"
--title "$(date -d "${START}") ($(awk -v TIME=$TIME 'BEGIN {printf "%.1f hr", TIME/3600}'))"
--height 65 --width 600
--vertical-label "Seconds"
--color BACK#F3F3F3
--color CANVAS#FDFDFD
--color SHADEA#CBCBCB
--color SHADEB#999999
--color FONT#000000
--color AXIS#2C4D43
--color ARROW#2C4D43
--color FRAME#2C4D43
--border 1
--font TITLE:10:"Arial"
--font AXIS:8:"Arial"
--font LEGEND:8:"Courier"
--font UNIT:8:"Arial"
--font WATERMARK:6:"Arial"
--imgformat PNG
EOF
}
 
rrd_graph_opts() {
rrd_idx=0
cat << EOF
DEF:median$((rrd_idx))="${fping_rrd}":median:AVERAGE
DEF:loss$((rrd_idx))="${fping_rrd}":loss:AVERAGE
$(for ((i=1;i<=PINGS;i++)); do echo "DEF:ping$((rrd_idx))p$((i))=\"${fping_rrd}\":ping$((i)):AVERAGE"; done)
CDEF:ploss$((rrd_idx))=loss$((rrd_idx)),20,/,100,*
CDEF:dm$((rrd_idx))=median$((rrd_idx)),0,100000,LIMIT
$(for ((i=1;i<=PINGS;i++)); do echo "CDEF:p$((rrd_idx))p$((i))=ping$((rrd_idx))p$((i)),UN,0,ping$((rrd_idx))p$((i)),IF"; done)
$(echo -n "CDEF:pings$((rrd_idx))=$((PINGS)),p$((rrd_idx))p1,UN"; for ((i=2;i<=PINGS;i++)); do echo -n ",p$((rrd_idx))p$((i)),UN,+"; done; echo ",-")
$(echo -n "CDEF:m$((rrd_idx))=p$((rrd_idx))p1"; for ((i=2;i<=PINGS;i++)); do echo -n ",p$((rrd_idx))p$((i)),+"; done; echo ",pings$((rrd_idx)),/")
$(echo -n "CDEF:sdev$((rrd_idx))=p$((rrd_idx))p1,m$((rrd_idx)),-,DUP,*"; for ((i=2;i<=PINGS;i++)); do echo -n ",p$((rrd_idx))p$((i)),m$((rrd_idx)),-,DUP,*,+"; done; echo ",pings$((rrd_idx)),/,SQRT")
CDEF:dmlow$((rrd_idx))=dm$((rrd_idx)),sdev$((rrd_idx)),2,/,-
CDEF:s2d$((rrd_idx))=sdev$((rrd_idx))
AREA:dmlow$((rrd_idx))
AREA:s2d$((rrd_idx))#${COLOR}30:STACK
LINE1:dm$((rrd_idx))#${COLOR}:"$(basename ${fping_rrd%.*} | awk -F'_' '{print $NF}')\t"
VDEF:avmed$((rrd_idx))=median$((rrd_idx)),AVERAGE
VDEF:avsd$((rrd_idx))=sdev$((rrd_idx)),AVERAGE
CDEF:msr$((rrd_idx))=median$((rrd_idx)),POP,avmed$((rrd_idx)),avsd$((rrd_idx)),/
VDEF:avmsr$((rrd_idx))=msr$((rrd_idx)),AVERAGE
GPRINT:avmed$((rrd_idx)):"Median RTT\: %5.2lfms"
GPRINT:ploss$((rrd_idx)):AVERAGE:"Loss\: %5.1lf%%"
GPRINT:avsd$((rrd_idx)):"Std Dev\: %5.2lfms"
GPRINT:avmsr$((rrd_idx)):"Ratio\: %5.1lfms\\j"
COMMENT:"Probe\: $((PINGS)) pings every $((STEP)) seconds"
COMMENT:"${fping_rrd}\\j"
EOF
}
 
if [ ! -r "${fping_rrd}" ]
 then printf "${0} \"file.rrd\"\n"
 else
      STEP=$(rrdtool info "${fping_rrd}" | awk '/^step/{print $NF}')
      PINGS=$(rrdtool info "${fping_rrd}" | awk '/^ds.ping.*index/{count++} END{print count}')
 
      START="$([ -z "${2}" ] && echo "-9 hours" || echo "${2}")"
      END="$([ -z "${3}" ] && echo "now" || echo "${3}")"
      TIME=$(( $(date -d "${END}" +%s) - $(date -d "${START}" +%s) ))
 
      eval $(rrd_graph_cmd; rrd_graph_opts)
fi

Combined (multi) Graph

fping_graph.png

graph_multi.sh: Bourne-Again shell script, ASCII text executable

#!/usr/bin/env bash
## Create a mini graph from multiple RRDs
## Requires: rrdtool
## 2021 - Script from www.davideaves.com
 
# Enable for debuging
#set -x
 
START="-9 hours"
END="now"
 
png_file="${1}"
rrd_files="${*:2}"
 
rrd_graph_cmd() {
cat << EOF
rrdtool graph "${png_file}"
--start "${START}" --end "${END}"
--title "$(date -d "${START}") ($(awk -v TIME=$TIME 'BEGIN {printf "%.1f hr", TIME/3600}'))"
--height 115 --width 600
--vertical-label "Seconds"
--color BACK#F3F3F3
--color CANVAS#FDFDFD
--color SHADEA#CBCBCB
--color SHADEB#999999
--color FONT#000000
--color AXIS#2C4D43
--color ARROW#2C4D43
--color FRAME#2C4D43
--border 1
--font TITLE:10:"Arial"
--font AXIS:8:"Arial"
--font LEGEND:8:"Courier"
--font UNIT:8:"Arial"
--font WATERMARK:6:"Arial"
--imgformat PNG
EOF
}
 
rrd_graph_opts() {
rrd_idx=0
for fping_rrd in ${rrd_files}
do COLOR=$(openssl rand -hex 3)
STEP=$(rrdtool info "${fping_rrd}" | awk '/^step/{print $NF}')
PINGS=$(rrdtool info "${fping_rrd}" | awk '/^ds.ping.*index/{count++} END{print count}')
cat << EOF
DEF:median$((rrd_idx))="${fping_rrd}":median:AVERAGE
DEF:loss$((rrd_idx))="${fping_rrd}":loss:AVERAGE
$(for ((i=1;i<=PINGS;i++)); do echo "DEF:ping$((rrd_idx))p$((i))=\"${fping_rrd}\":ping$((i)):AVERAGE"; done)
CDEF:ploss$((rrd_idx))=loss$((rrd_idx)),20,/,100,*
CDEF:dm$((rrd_idx))=median$((rrd_idx)),0,100000,LIMIT
$(for ((i=1;i<=PINGS;i++)); do echo "CDEF:p$((rrd_idx))p$((i))=ping$((rrd_idx))p$((i)),UN,0,ping$((rrd_idx))p$((i)),IF"; done)
$(echo -n "CDEF:pings$((rrd_idx))=$((PINGS)),p$((rrd_idx))p1,UN"; for ((i=2;i<=PINGS;i++)); do echo -n ",p$((rrd_idx))p$((i)),UN,+"; done; echo ",-")
$(echo -n "CDEF:m$((rrd_idx))=p$((rrd_idx))p1"; for ((i=2;i<=PINGS;i++)); do echo -n ",p$((rrd_idx))p$((i)),+"; done; echo ",pings$((rrd_idx)),/")
$(echo -n "CDEF:sdev$((rrd_idx))=p$((rrd_idx))p1,m$((rrd_idx)),-,DUP,*"; for ((i=2;i<=PINGS;i++)); do echo -n ",p$((rrd_idx))p$((i)),m$((rrd_idx)),-,DUP,*,+"; done; echo ",pings$((rrd_idx)),/,SQRT")
CDEF:dmlow$((rrd_idx))=dm$((rrd_idx)),sdev$((rrd_idx)),2,/,-
CDEF:s2d$((rrd_idx))=sdev$((rrd_idx))
AREA:dmlow$((rrd_idx))
AREA:s2d$((rrd_idx))#${COLOR}30:STACK
LINE1:dm$((rrd_idx))#${COLOR}:"$(basename ${fping_rrd%.*} | awk -F'_' '{print $NF}')\t"
VDEF:avmed$((rrd_idx))=median$((rrd_idx)),AVERAGE
VDEF:avsd$((rrd_idx))=sdev$((rrd_idx)),AVERAGE
CDEF:msr$((rrd_idx))=median$((rrd_idx)),POP,avmed$((rrd_idx)),avsd$((rrd_idx)),/
VDEF:avmsr$((rrd_idx))=msr$((rrd_idx)),AVERAGE
GPRINT:avmed$((rrd_idx)):"Median RTT\: %5.2lfms"
GPRINT:ploss$((rrd_idx)):AVERAGE:"Loss\: %5.1lf%%"
GPRINT:avsd$((rrd_idx)):"Std Dev\: %5.2lfms"
GPRINT:avmsr$((rrd_idx)):"Ratio\: %5.1lfms\\j"
EOF
(( rrd_idx++ ))
done && unset rrd_idx
}
 
if [ -z "${rrd_files}" ]
 then printf "${0} \"file.png\" { file1.rrd ... file6.rrd }\n"
 else TIME=$(( $(date -d "${END}" +%s) - $(date -d "${START}" +%s) ))
      eval $(rrd_graph_cmd; rrd_graph_opts)
fi

SmokePing like Graph

fping_172.31.15.4_smoke.png

graph_smoke.sh: Bourne-Again shell script, ASCII text executable

#!/usr/bin/env bash
## Create a SmokePing like graph from a RRD file
## Requires: rrdtool
## 2021 - Script from www.davideaves.com
 
# Enable for debuging
#set -x
 
fping_rrd="${1}"
COLOR=( "0F0f00" "00FF00" "00BBFF" "0022FF" "8A2BE2" "FA0BE2" "C71585" "FF0000" )
LINE=".5"
 
rrd_graph_cmd() {
cat << EOF
rrdtool graph "$(basename ${fping_rrd%.*})_smoke.png"
--start "${START}" --end "${END}"
--title "$(basename ${fping_rrd%.*} | awk -F'_' '{print $NF}')"
--height 95 --width 600
--vertical-label "Seconds"
--color BACK#F3F3F3
--color CANVAS#FDFDFD
--color SHADEA#CBCBCB
--color SHADEB#999999
--color FONT#000000
--color AXIS#2C4D43
--color ARROW#2C4D43
--color FRAME#2C4D43
--border 1
--font TITLE:10:"Arial"
--font AXIS:8:"Arial"
--font LEGEND:9:"Courier"
--font UNIT:8:"Arial"
--font WATERMARK:7:"Arial"
--imgformat PNG
EOF
}
 
rrd_graph_opts() {
cat << EOF
DEF:median$((rrd_idx))="${fping_rrd}":median:AVERAGE
DEF:loss$((rrd_idx))="${fping_rrd}":loss:AVERAGE
$(for ((i=1;i<=PINGS;i++)); do echo "DEF:ping$((rrd_idx))p$((i))=\"${fping_rrd}\":ping$((i)):AVERAGE"; done)
CDEF:ploss$((rrd_idx))=loss$((rrd_idx)),20,/,100,*
CDEF:dm$((rrd_idx))=median$((rrd_idx)),0,100000,LIMIT
$(for ((i=1;i<=PINGS;i++)); do echo "CDEF:p$((rrd_idx))p$((i))=ping$((rrd_idx))p$((i)),UN,0,ping$((rrd_idx))p$((i)),IF"; done)
$(echo -n "CDEF:pings$((rrd_idx))=$((PINGS)),p$((rrd_idx))p1,UN"; for ((i=2;i<=PINGS;i++)); do echo -n ",p$((rrd_idx))p$((i)),UN,+"; done; echo ",-")
$(echo -n "CDEF:m$((rrd_idx))=p$((rrd_idx))p1"; for ((i=2;i<=PINGS;i++)); do echo -n ",p$((rrd_idx))p$((i)),+"; done; echo ",pings$((rrd_idx)),/")
$(echo -n "CDEF:sdev$((rrd_idx))=p$((rrd_idx))p1,m$((rrd_idx)),-,DUP,*"; for ((i=2;i<=PINGS;i++)); do echo -n ",p$((rrd_idx))p$((i)),m$((rrd_idx)),-,DUP,*,+"; done; echo ",pings$((rrd_idx)),/,SQRT")
CDEF:dmlow$((rrd_idx))=dm$((rrd_idx)),sdev$((rrd_idx)),2,/,-
CDEF:s2d$((rrd_idx))=sdev$((rrd_idx))
AREA:dmlow$((rrd_idx))
AREA:s2d$((rrd_idx))#${COLOR[0]}30:STACK
\
VDEF:avmed$((rrd_idx))=median$((rrd_idx)),AVERAGE
VDEF:avsd$((rrd_idx))=sdev$((rrd_idx)),AVERAGE
CDEF:msr$((rrd_idx))=median$((rrd_idx)),POP,avmed$((rrd_idx)),avsd$((rrd_idx)),/
VDEF:avmsr$((rrd_idx))=msr$((rrd_idx)),AVERAGE
LINE3:avmed$((rrd_idx))#${COLOR[1]}15:
\
COMMENT:"\t\t"
COMMENT:"Average"
COMMENT:"Maximum"
COMMENT:"Minimum"
COMMENT:"Current"
COMMENT:"Std Dev"
COMMENT:" \\j"
\
COMMENT:"Median RTT\:\t"
GPRINT:avmed$((rrd_idx)):"%.2lf"
GPRINT:median$((rrd_idx)):MAX:"%.2lf"
GPRINT:median$((rrd_idx)):MIN:"%.2lf"
GPRINT:median$((rrd_idx)):LAST:"%.2lf"
GPRINT:avsd$((rrd_idx)):"%.2lf"
COMMENT:" \\j"
\
COMMENT:"Packet Loss\:\t"
GPRINT:ploss$((rrd_idx)):AVERAGE:"%.2lf%%"
GPRINT:ploss$((rrd_idx)):MAX:"%.2lf%%"
GPRINT:ploss$((rrd_idx)):MIN:"%.2lf%%"
GPRINT:ploss$((rrd_idx)):LAST:"%.2lf%%"
COMMENT:"  -  "
COMMENT:" \\j"
\
COMMENT:"Loss Colors\:\t"
CDEF:me0=loss$((rrd_idx)),-1,GT,loss$((rrd_idx)),0,LE,*,1,UNKN,IF,median$((rrd_idx)),*
CDEF:meL0=me0,${LINE},-
CDEF:meH0=me0,0,*,${LINE},2,*,+
AREA:meL0
STACK:meH0#${COLOR[1]}:" 0/$((PINGS))"
CDEF:me1=loss$((rrd_idx)),0,GT,loss$((rrd_idx)),1,LE,*,1,UNKN,IF,median$((rrd_idx)),*
CDEF:meL1=me1,${LINE},-
CDEF:meH1=me1,0,*,${LINE},2,*,+
AREA:meL1
STACK:meH1#${COLOR[2]}:" 1/$((PINGS))"
CDEF:me2=loss$((rrd_idx)),1,GT,loss$((rrd_idx)),2,LE,*,1,UNKN,IF,median$((rrd_idx)),*
CDEF:meL2=me2,${LINE},-
CDEF:meH2=me2,0,*,${LINE},2,*,+
AREA:meL2
STACK:meH2#${COLOR[3]}:" 2/$((PINGS))"
CDEF:me3=loss$((rrd_idx)),2,GT,loss$((rrd_idx)),3,LE,*,1,UNKN,IF,median$((rrd_idx)),*
CDEF:meL3=me3,${LINE},-
CDEF:meH3=me3,0,*,${LINE},2,*,+
AREA:meL3
STACK:meH3#${COLOR[4]}:" 3/$((PINGS))"
CDEF:me4=loss$((rrd_idx)),3,GT,loss$((rrd_idx)),4,LE,*,1,UNKN,IF,median$((rrd_idx)),*
CDEF:meL4=me4,${LINE},-
CDEF:meH4=me4,0,*,${LINE},2,*,+
AREA:meL4
STACK:meH4#${COLOR[5]}:" 4/$((PINGS))"
CDEF:me10=loss$((rrd_idx)),4,GT,loss$((rrd_idx)),10,LE,*,1,UNKN,IF,median$((rrd_idx)),*
CDEF:meL10=me10,${LINE},-
CDEF:meH10=me10,0,*,${LINE},2,*,+
AREA:meL10
STACK:meH10#${COLOR[6]}:"10/$((PINGS))"
CDEF:me19=loss$((rrd_idx)),10,GT,loss$((rrd_idx)),19,LE,*,1,UNKN,IF,median$((rrd_idx)),*
CDEF:meL19=me19,${LINE},-
CDEF:meH19=me19,0,*,${LINE},2,*,+
AREA:meL19
STACK:meH19#${COLOR[7]}:"19/$((PINGS))\\j"
\
COMMENT:"Probe\: $((PINGS)) pings every $((STEP)) seconds"
COMMENT:"$(date -d "${START}" | sed 's/\:/\\\:/g') ($(awk -v TIME=$TIME 'BEGIN {printf "%.1f hr", TIME/3600}'))\\j"
EOF
}
 
if [ ! -r "${fping_rrd}" ]
 then printf "${0} \"file.rrd\"\n"
 else
      STEP=$(rrdtool info "${fping_rrd}" | awk '/^step/{print $NF}')
      PINGS=$(rrdtool info "${fping_rrd}" | awk '/^ds.ping.*index/{count++} END{print count}')
 
      START="$([ -z "${2}" ] && echo "-7 hours" || echo "${2}")"
      END="$([ -z "${3}" ] && echo "now" || echo "${3}")"
      TIME=$(( $(date -d "${END}" +%s) - $(date -d "${START}" +%s) ))
 
      eval $(rrd_graph_cmd; rrd_graph_opts)
fi
11. November 2019 · Comments Off on Ansible playbook to manage security rules on a Palo Alto firewall · Categories: Ansible, Firewall, Networking · Tags: , , , , , , , ,

The following Ansible playbook is how I manage firewall rules on a Palo Alto firewall. My overall playbook methodology is to be able to reuse playbook task lists as though they were building blocks. Also, to be able to both add and remove configuration using the same playbook. To do this, a common trick I like to use is the CLI flag “-e” to specify an input file. The input file is where the abstracted configuration is defined and how I tell the playbook what to build.

Depending on the resources of the company most ticketing systems, like Service Now or CA Service Desk, can output the proper YAML input file after all certain workflow items have been approved. The ticketing system can then output to a Samba Share, that has a crontab to kick off and ingest any new input files or the ticketing system itself can kick off the playbook directly if you have a Ansible Tower or AWX in the environment.

The following is my input. When all is said and done, I put most of my mental effort on how best to structure the input. Ideally I try to ask for as little as possible and try to make it so it can be adapted to any vendor product, such as a Cisco FMC.

ER/CO99999.yaml: ASCII text: ASCII text

---
ticket: CO99999
security_rule:
- description: Ansible test rule 0
  source_ip:
  - 192.168.0.100
  - 192.168.100.96
  destination_ip:
  - any
  service:
  - tcp_9000
- description: Another Ansible test rule 1
  source_ip:
  - 192.168.100.104
  - 192.168.100.105
  destination_ip:
  - 192.168.0.100
  service:
  - tcp_9000
  - tcp_9100-9200
- description: Another Ansible test rule 2
  source_ip:
  - 192.168.100.204
  - 192.168.100.205
  - 192.168.100.206
  - 192.168.100.207
  destination_ip:
  - 8.8.8.8
  - 192.168.0.42
  service:
  - udp_1053-2053
  - tcp_1053-2053
- description: Another Ansible test rule 3
  source_ip:
  - 192.168.100.204
  destination_ip:
  - 192.168.0.42
  service:
  - udp_123
- description: Another Ansible test rule 4
  source_ip:
  - 192.168.100.204
  - 192.168.100.205
  destination_ip:
  - 192.168.0.100
  service:
  - tcp_1-65535
- description: Another Ansible test rule 5
  source_ip:
  - 192.168.100.204
  - 192.168.100.207
  destination_ip:
  - 8.8.8.8
  service:
  - tcp_8081

Since the PA firewall is zone based I read the following CSV file to the playbook quicker. The CSV table contains the firewall (or device-group) the network and the Security zone that the network belongs to. Without this, I would need to perform a lot more tasks looking this information on each pass.

fwzones.csv: ASCII text

LABPA,192.168.0.0/24,AWS-PROD
LABPA,192.168.100.0/24,AWS-DEV
LABPA,0.0.0.0/0,Layer3-Outside

The following is the inventory in my lab. I don’t recommend storing any credentials here.

inventory: ASCII text

[all:vars]
ansible_connection="local"
ansible_python_interpreter="/usr/bin/env python"
username="admin"
password="admin"
 
[labpa]
labpa01

The following is my main playbook. It will prompt for username password credentials and read the input variables related to the change. Since this is a sample, I am only calling a single task list “panos_security_rule.yaml” which is responsible for managing the security rules on the PA.

main.yaml: a /usr/bin/ansible-playbook -f 10 script text executable, ASCII text

#!/usr/local/bin/ansible-playbook -f 10
---
- name: "manage panos devices"
hosts: labpa01
connection: local
gather_facts: False

vars_prompt:

- name: "username"
prompt: "Username"
private: no

- name: "password"
prompt: "Password"

vars:

- panos_provider:
ip_address: "{{ inventory_hostname }}"
username: "{{ username | default('admin') }}"
password: "{{ password | default('admin') }}"

pre_tasks:

- name: "fail: check for required input"
fail:
msg: "Example: ./main.yaml -e state=present -e er=./ER/CO99999.yaml"
when: (er is undefined) and (state is undefined)

- name: "include_vars: load security rules"
include_vars:
file: "{{ er }}"

roles:
- role: PaloAltoNetworks.paloaltonetworks

tasks:

- name: "include: create panos security rule"
include: panos_security_rule.yaml
with_indexed_items: "{{ security_rule }}"
when: state is defined

handlers:

- name: "commit pending changes"
local_action:
module: panos_commit
provider: "{{ panos_provider }}"

The following is my task list for managing PanOS security rules. If I were to manage any other vendors firewall I would make it read the same input and just simply create a different task list for that vendor device type. There are two tricks that I am performing within this tasklist… I am reading the fwzones.csv file into a variable for lookups. I am also calling another task list that will build the L4 service groups that will be referenced in the security rule.

panos_security_rule.yaml: ASCII text

## Manage security rules on a Palo Alto Firewall
## Requires: panos_object_service.yaml
#
## Vars Example:
#
# ticket: CO99999
# security_rule:
# - source_ip: ["192.168.0.100"]
#   destination_ip: ["any"]
#   service: ["tcp_9000"]
#   description: "Ansible test rule 0"
#
## Task Example:
#
#  - name: "include: create panos security rule"
#    include: panos_security_rule.yaml
#    with_indexed_items: "{{ security_rule }}"
#    when: state is defined
#
---
 
###
# Derive firewall zone and devicegroup from prebuilt CSV.
# Normally we would retrieve this from a functional IPAM.
###
 
# Example CSV file
#
# devicegroup,192.168.0.0/24,prod
# devicegroup,192.168.100.0/24,dev
# devicegroup,0.0.0.0/0,outside

- name: "read_csv: read firewall zones from csv"
  local_action:
    module: read_csv
    path: fwzones.csv
    fieldnames: devicegroup,network,zone
  register: fwzones
  run_once: true

- name: "set_fact: source details"
  set_fact:
    source_dgrp: "{{ item_tmp.1['devicegroup'] }}"
    source_addr: "{{ source_addr|default([]) + [ item_tmp.0 ] }}"
    source_zone: "{{ source_zone|default([]) + [ item_tmp.1['zone'] ] }}"
  with_nested:
  - "{{ item.1.source_ip }}"
  - "{{ fwzones.list }}"
  loop_control:
    loop_var: item_tmp
  when: ( item_tmp.0|ipaddr('int') >= item_tmp.1['network']|ipaddr('network')|ipaddr('int') ) and
        ( item_tmp.0|ipaddr('int') <= item_tmp.1['network']|ipaddr('broadcast')|ipaddr('int') ) and
        ( item_tmp.1['network']|ipaddr('int') != "0/0" )

- name: "set_fact: destination zone"
  set_fact:
    destination_dgrp: "{{ item_tmp.1['devicegroup'] }}"
    destination_zone: "{{ destination_zone|default([]) + [ item_tmp.1['zone'] ] }}"
  with_nested:
  - "{{ item.1.destination_ip }}"
  - "{{ fwzones.list }}"
  loop_control:
    loop_var: item_tmp
  when: ( item_tmp.0|ipaddr('int') >= item_tmp.1['network']|ipaddr('network')|ipaddr('int') ) and
        ( item_tmp.0|ipaddr('int') <= item_tmp.1['network']|ipaddr('broadcast')|ipaddr('int') ) and
        ( item_tmp.1['devicegroup'] == source_dgrp ) and ( destination_zone|default([])|length < item.1.destination_ip|unique|length )
 
##
# Done collecting firewall zone & devicegroup.
##

- name: "set_fact: services"
  set_fact:
    services: "{{ services|default([]) + [ service ] }}"
    service_list: "{{ service_list|default([]) + [ {\"protocol\": {service.split('_')[0]: {\"port\": service.split('_')[1]}}, \"name\": service }] }}"
  with_items: "{{ item.1.service }}"
  loop_control:
    loop_var: service

- name: "include: create panos service object"
  include: panos_object_service.yaml
  with_items: "{{ service_list|unique }}"
  loop_control:
    loop_var: service
  when: (state == "present")
 
###
# Testing against a single PA firewall, uncomment if running against Panorama
###

- name: "panos_security_rule: firewall rule"
  local_action:
    module: panos_security_rule
    provider: "{{ panos_provider }}"
    state: "{{ state }}"
    rule_name: "{{ ticket|upper }}-{{ item.0 }}"
    description: "{{ item.1.description }}"
    tag_name: "ansible"
    source_zone: "{{ source_zone|unique }}"
    source_ip: "{{ source_addr|unique }}"
    destination_zone: "{{ destination_zone|unique }}"
    destination_ip: "{{ item.1.destination_ip|unique }}"
    service: "{{ services|unique }}"
#   devicegroup: "{{ source_dgrp|unique }}"
    action: "allow"
    commit: "False"
  notify:
  - commit pending changes

- name: "include: create panos service object"
  include: panos_object_service.yaml
  with_items: "{{ service_list|unique }}"
  loop_control:
    loop_var: service
  when: (state == "absent")

- name: "set_fact: clear facts from run"
  set_fact:
    services: []
    service_list: []
    source_dgrp: ""
    source_addr: []
    source_zone: []
    destination_dgrp: ""
    destination_addr: []
    destination_zone: []

The following will parse the “service” variable from the input and will manage the creation or removal of its service group. This is probably not best practice, but I like to initially build all PA rules as L4 then after a month to bake in, I will use the Expedition tool or the PanOS9 AppID migration tool to convert rules to L7 later. I never assume that an app owner knows how their application works, which is why I choose to migrate to L7 rules based on what I actually see in the logs.

panos_object_service.yaml: ASCII text

## Var Example:
#
#  services:
#  - { name: service-abc, protocol: { tcp: { port: '5000,6000-7000' } } }
#
## Task Example:
#
#  - name: "include: create panos address object"
#    include: panos_object_service.yaml state="absent"
#    with_items: "{{ services }}"
#    loop_control:
#      loop_var: service
#
---
- name: attempt to locate existing address
  block:

  - name: "panos_object: service - find {{ service.name }}"
    local_action:
      module: panos_object
      ip_address: "{{ inventory_hostname }}"
      username: "{{ username }}"
      password: "{{ password }}"
      serviceobject: "{{ service.name }}"
      devicegroup: "{{ devicegroup | default('') }}"
      operation: "find"
    register: result

  - name: 'set_fact: existing service object'
    set_fact:
      existing: "{{ result.stdout_lines|from_json|json_query('entry')|regex_replace('@') }}"
    when: (state == "present")

  rescue:

  - name: "panos_object: service - add {{ service.name }}"
    local_action:
      module: panos_object
      ip_address: "{{ inventory_hostname }}"
      username: "{{ username }}"
      password: "{{ password }}"
      serviceobject: "{{ service.name }}"
      protocol: "{{ service.protocol | flatten | list | join('\", \"') }}"
      destination_port: "{{ service | json_query('protocol.*.port') | list | join('\", \"') }}"
      description: "{{ service.description | default('') }}"
      devicegroup: "{{ devicegroup | default('') }}"
      operation: 'add'
    when: (state == "present")

- name: "panos_object: service - update {{ service.name }}"
  local_action:
    module: panos_object
    ip_address: "{{ inventory_hostname }}"
    username: "{{ username }}"
    password: "{{ password }}"
    serviceobject: "{{ service.name }}"
    protocol: "{{ service.protocol | flatten | list | join('\", \"') }}"
    destination_port: "{{ service | json_query('protocol.*.port') | list | join('\", \"') }}"
    description: "{{ service.description | default('') }}"
    devicegroup: "{{ devicegroup | default('') }}"
    operation: 'update'
  when: (state == "present") and (existing is defined) and (existing != service)

- name: "panos_object: service - delete {{ service.name }}"
  local_action:
    module: panos_object
    ip_address: "{{ inventory_hostname }}"
    username: "{{ username }}"
    password: "{{ password }}"
    serviceobject: "{{ service.name }}"
    devicegroup: "{{ devicegroup | default('') }}"
    operation: 'delete'
  ignore_errors: yes
  when: (state == "absent") and (result.stdout_lines is defined)
01. January 2019 · Comments Off on Ansible playbook to manage objects on a Cisco Firepower Management Center (FMC) · Categories: Ansible, Cisco, Firewall, Networking · Tags: , , , , , , , , , , , ,

I really wish Cisco would support the DevOps community and release Ansible modules for their products like most other vendors. That being said, since there are no modules for the Cisco Firepower you have to manage the device through the APIs directly. Managing anything using raw API requests in Ansible can be a little tricky but not impossible. When creating playbooks like this you will typically spend most time figuring out the structure of responses and how best to iterate through them.

The following Ansible playbook is a refactor of a previous script I wrote last year to post/delete objects up to a firepower in bulk. I have spent a lot of time with Ansible playbooks and I recommend grouping and modularizing related tasks into separate importable YAML files. This not only makes reusing common groups of tasks much easier but also means later those logical task groupings can simply be copied up into a role with little to no effort.

main.yaml: a /usr/bin/ansible-playbook -f 10 script text executable, ASCII text

#!/usr/bin/ansible-playbook -f 10
## Ansible playbook to manage objects on a FMC
# 2019 (v.01) - Playbook from www.davideaves.com
---
- name: manage firepower objects
  hosts: fmc
  connection: local
  gather_facts: no

  vars:

  - ansible_connection: "local"
  - ansible_python_interpreter: "/usr/bin/env python"

  - fmc_provider:
      username: "{{ username | default('apiuser') }}"
      password: "{{ password | default('api1234') }}"

  - fmc_objects:
    - name: server1
      value: 192.0.2.1
      description: Test Server

  tasks:

  ## Note ##
  # Firepower Management Center REST API authentication tokens are valid for 30 minutes, and can be refreshed up to three times
  # Ref: https://www.cisco.com/c/en/us/td/docs/security/firepower/623/api/REST/Firepower_Management_Center_REST_API_Quick_Start_Guide_623/Connecting_with_a_Client.html

  - name: "fmc_platform: generatetoken"
    local_action:
      module: uri
      url: "https://{{ inventory_hostname }}/api/fmc_platform/v1/auth/generatetoken"
      method: POST
      user: "{{ fmc_provider.username }}"
      password: "{{ fmc_provider.password }}"
      validate_certs: no
      return_content: no
      force_basic_auth: yes
      status_code: 204
    register: auth

  - include: fmc_objects.yaml
    when: auth.x_auth_access_token is defined

The following is the task grouping that will make object changes to the FMC using Ansibles built in URI module. I have tried to make this playbook as idempotent as possible so I first register an array with all of the objects that exist on the FMC. I then iterate through that array in subsequent tasks so I only change what does not match. If it sees a fmc_object name key with no value set, the delete task will remove the object from the FMC.

fmc_objects.yaml: ASCII text

## Cisco FMC object management tasks for Ansible
## Requires: VAR:auth.x_auth_access_token
## 2019 (v.01) - Playbook from www.davideaves.com
#
## VARIABLE EXAMPLE ##
#
#  - fmc_objects:
#    - name: server1
#      value: 192.0.2.1
#
## USAGE EXAMPLE ##
#  - include: fmc_objects.yaml
#    when: auth.x_auth_access_token is defined
#
---
 
## NOTE ##
# Currently only handling host and network objects!
# Other object types will likely require a j2 template to construct the body submission.

- name: "fmc_config: get all objects"
  local_action:
    module: uri
    url: "https://{{ inventory_hostname }}/api/fmc_config/v1/domain/{{ auth.domain_uuid }}/object/{{ item }}?limit=10000&expanded=true"
    method: GET
    validate_certs: no
    status_code: 200
    headers:
      Content-Type: application/json
      X-auth-access-token: "{{ auth.x_auth_access_token }}"
  with_items:
    - hosts
    - networks
  register: "all_objects_raw"
 
# Unable to figure out how to do this without a j2 template.
# FMC returns too many subelements to easily filter.

- name: "fmc_config: post new objects"
  local_action:
    module: uri
    url: "https://{{ inventory_hostname }}/api/fmc_config/v1/domain/{{ auth.domain_uuid }}/object/{{ fmc_objects | selectattr('name', 'equalto', item) | map(attribute='type') | list | last | default('hosts') | lower }}"
    method: POST
    validate_certs: no
    status_code: 201
    headers:
      Content-Type: application/json
      X-auth-access-token: "{{ auth.x_auth_access_token }}"
    body_format: json
    body:
      name: "{{ item }}"
      value: "{{ fmc_objects | selectattr('name', 'equalto', item) | map(attribute='value') | list | last }}"
      description: "{{ fmc_objects | selectattr('name', 'equalto', item) | map(attribute='description') | list | last | default('Ansible Created') }}"
      overridable: "{{ fmc_objects | selectattr('name', 'equalto', item) | map(attribute='overridable') | list | last | default('False') | bool }}"
  with_items: "{{ lookup('template', 'fmc_objects-missing.j2').split('\n') }}"
  when: (item != "") and (fmc_objects | selectattr('name', 'equalto', item) | map(attribute='value') | list | last is defined)
  changed_when: True
 
## NOTE ##
# The conditions below will not catch the sudden removal of the description or overridable key

- name: "fmc_config: modify existing objects"
  local_action:
    module: uri
    url: "{{ item.1.links.self }}"
    method: PUT
    validate_certs: no
    status_code: 200
    headers:
      Content-Type: application/json
      X-auth-access-token: "{{ auth.x_auth_access_token }}"
    body_format: json
    body:
      name: "{{ item.1.name }}"
      id: "{{ item.1.id }}"
      type: "{{ item.1.type }}"
      value: "{{ fmc_objects | selectattr('name', 'equalto', item.1.name) | map(attribute='value') | list | last }}"
      description: "{{ fmc_objects | selectattr('name', 'equalto', item.1.name) | map(attribute='description') | list | last | default('Ansible Created') }}"
      overridable: "{{ fmc_objects | selectattr('name', 'equalto', item.1.name) | map(attribute='overridable') | list | last | default('False') | bool }}"
  with_subelements:
    - "{{ all_objects_raw['results'] }}"
    - json.items
  when: (fmc_objects | selectattr('name', 'equalto', item.1.name) | list | count > 0) and
        (((fmc_objects | selectattr('name', 'equalto', item.1.name) | map(attribute='value') | list | last is defined) and (fmc_objects | selectattr('name', 'equalto', item.1.name) | map(attribute='value') | list | last != item.1.value)) or
         ((fmc_objects | selectattr('name', 'equalto', item.1.name) | map(attribute='description') | list | last is defined) and (fmc_objects | selectattr('name', 'equalto', item.1.name) | map(attribute='description') | list | last | default('Ansible Created') != item.1.description)) or
         ((fmc_objects | selectattr('name', 'equalto', item.1.name) | map(attribute='overridable') | list | last is defined) and (fmc_objects | selectattr('name', 'equalto', item.1.name) | map(attribute='overridable') | list | last | default('False') | bool != item.1.overridable)))
  changed_when: True

- name: "fmc_config: delete objects"
  local_action:
    module: uri
    url: "{{ item.1.links.self }}"
    method: DELETE
    validate_certs: no
    status_code: 200
    headers:
      X-auth-access-token: "{{ auth.x_auth_access_token }}"
  with_subelements:
    - "{{ all_objects_raw['results'] }}"
    - json.items
  when: (fmc_objects | selectattr('name', 'equalto', item.1.name) | list | count > 0)
        and(fmc_objects | selectattr('name', 'equalto', item.1.name) | map(attribute='name') | list | last is defined)
        and(fmc_objects | selectattr('name', 'equalto', item.1.name) | map(attribute='value') | list | last is undefined)
  changed_when: True

Sometimes when trying to munge an array and perform comparisons you have to do it in a Jinja2 Template. The following template creates a list of existing object names then will check to see if that object needs to be created. This is what my POST task uses to determine what new objects will be created.

templates/fmc_objects-missing.j2: ASCII text

{#- Build a list of the existing objects -#}
{% set EXISTING = [] %}
{% for object_result in all_objects_raw['results'] %}
{% for object_line in object_result['json']['items'] %}
{{- EXISTING.append( object_line['name'] ) -}}
{% endfor %}
{% endfor %}
 
{#- Check fmc_objects to see if missing -#}
{% for fmc_object in fmc_objects %}
{% if fmc_object['name'] not in EXISTING %}
{{ fmc_object['name'] }}
{% endif %}
{% endfor %}
28. December 2018 · Comments Off on Search for object matches in an ASA config. · Categories: AWK, Firewall, Linux, Linux Scripts, Networking · Tags: , , , , , ,

Having to parse ASA configs for migration purposes provides a never-ending source of reasons to write scripts. The following AWK script will munge an ASA config searching for any specified address or object name and will output any objects that reference it. This script is something I use in conjunction with the ASA_acls.sh script to find security rules relating to an address. As far as I know this is the closest offline tool simmilar to the “Where Used” feature in ASDM for finding addresses.

ASA_obj.awk: awk script, ASCII text executable

#!/usr/bin/awk -f
## Search for object matches in an ASA config.
## 2018 (v.01) - Script from www.davideaves.com
 
### BEGIN ###
 
BEGIN {
  dig_range="y"
  dig_subnet="n"
 
  # Script arguments: ASA configuration + Search objects
  if ( ARGV[1] == "" ) {
    print "ERROR: No Input ASA config provided!" > "/dev/stderr"
    exit 1
  } else if ( ARGV[2] == "" ) {
    print "ERROR: No address or object to search for!" > "/dev/stderr"
    exit 1
  } else {
    # Saving everything after ARGV[1] in search_array.
    for (i = 2; i < ARGC; i++) {
      search_array[ARGV[i]] = ARGV[i]
      delete ARGV[i]
  } }
}
 
### FUNCTIONS ###
 
# Convert IP to Interger.
function ip_to_int(input) {
  split(input, oc, ".")
  ip_int=(oc[1]*(256^3))+(oc[2]*(256^2))+(oc[3]*(256))+(oc[4])
  return ip_int
}
 
# test if a string is an ipv4 address
function is_v4(address) {
  split(address, octet, ".")
  if ( octet[1] <= 255 && octet[2] <= 255 && octet[3] <= 255 && octet[4] <= 255 )
  return address
}
 
# convert number to bits
function bits(N){
  c = 0
  for(i=0; i<8; ++i) if( and(2**i, N) ) ++c
  return c
}
 
# convert ipv4 to prefix
function to_prefix(mask) {
  split(mask, octet, ".")
  return bits(octet[1]) + bits(octet[2]) + bits(octet[3]) + bits(octet[4])
}
 
### SCRIPT ###
 
//{ gsub(/\r/, "") # Strip CTRL+M
 
  ### LINE IS NAME ###
  if ( $1 ~ /^name$/ ) {
 
    name=$3; host=$2; type=$1
    for(col = 5; col <= NF; col++) { previous=previous" "$col }
    description=substr(previous,2)
    previous=""
 
    # Add to search_array
    for (search in search_array) if ( host == search ) search_array[name]
  }
 
  ### LINE IS OBJECT ### 
  else if ( $1 ~ /^object/ ) {
 
    tab="Y"
    name=$3
    type=$2
    if ( type == "service" ) service=$4
    previous=""
 
  } else if ( tab == "Y" && substr($0,1,1) == " " ) {
 
    # object is single host.
    if ( $1 == "host" ) {
      host=$NF
      for (search in search_array) if ( host == search ) search_array[name]
    }
 
    # object is a subnet
    else if ( $1 == "subnet" && dig_subnet == "y" ) {
      for (search in search_array) if ( is_v4(search) ) {
 
        NETWORK=ip_to_int($2)
        PREFIX=to_prefix($3)
        BROADCAST=(NETWORK + (2 ^ (32 - PREFIX) - 1))
 
        if ( ip_to_int(search) >= int(NETWORK) && ip_to_int(search) <= int(BROADCAST) ) {
          search_array[name]
      } }
    }
 
    # object is a range
    else if ( $1 == "range" && dig_range == "y" ) {
      for (search in search_array) if ( is_v4(search) ) {
        if ( ip_to_int(search) >= ip_to_int($2) && ip_to_int(search) <= ip_to_int($3) ) {
          search_array[name]
      } }
    }
 
    # object is group of other objects
    else if ( $2 ~ /(host|object)/ ) {
      for (search in search_array) if ( $NF == search ) search_array[name]
    }
 
    # object contains nat statement
    else if ( $1 == "nat" ) {
      for (search in search_array) if ( $NF == search ) search_array[name]
    }
 
    ### Debug everything else within an object
    #else { print "DEBUG:",$0 }
 
  }
  else { tab="" }
 
}
 
### END ###
 
END{
  if ( isarray(search_array) ) {
    print "asa_objects:"
    for (search in search_array) print "  -",search
  }
}