19. December 2018 · Comments Off on Ansible playbook to provision Netscaler VIPs. · Categories: Ansible, Linux, Linux Admin, Load Balancing, NetScaler, Networking · Tags: , , ,

The following playbook will create a fully functional VIP; including the supporting monitor, service-group (pool) and servers (nodes) on a netscaler loadbalancer. Additionally, the same playbook has the ability to fully deprovision a VIP and all its supporting artifacts. To do all this I use the native Netscaler Ansible modules. When it comes to using the netscaler_servicegroup module, since the number of servers are not always consistent; I create that task with a Jinja2 template, where its imported back into the play.

netscaler_provision.yaml: a /usr/bin/ansible-playbook -f 10 script text executable, ASCII text

#!/usr/bin/ansible-playbook -f 10
## Ansible playbook to provision Netscaler VIPs.
# Requires: nitrosdk-python
# 2018 (v.01) - Playbook from www.davideaves.com
---
- name: Netscaler VIP provision
  hosts: netscaler
  connection: local
  gather_facts: False

  vars:

    ansible_connection: "local"
    ansible_python_interpreter: "/usr/bin/env python"

    state: 'present'

    lbvip:
      name: testvip
      address: 203.0.113.1
      server:
        - name: 'server-1'
          address: '192.0.2.1'
          description: 'Ansible Test Server 1'
          disabled: 'true'
        - name: 'server-2'
          address: '192.0.2.2'
          description: 'Ansible Test Server 2'
          disabled: 'true'
        - name: 'server-3'
          address: '192.0.2.3'
          description: 'Ansible Test Server 3'
          disabled: 'true'
        - name: 'server-4'
          address: '192.0.2.4'
          description: 'Ansible Test Server 4'
          disabled: 'true'
        - name: 'server-5'
          address: '192.0.2.5'
          description: 'Ansible Test Server 5'
          disabled: 'true'
        - name: 'server-6'
          address: '192.0.2.6'
          description: 'Ansible Test Server 6'
          disabled: 'true'
        - name: 'server-7'
          address: '192.0.2.7'
          description: 'Ansible Test Server 7'
          disabled: 'true'
        - name: 'server-8'
          address: '192.0.2.8'
          description: 'Ansible Test Server 8'
          disabled: 'true'
      vserver:
        - port: '80'
          description: 'Generic service running on 80'
          type: 'HTTP'
          method: 'LEASTCONNECTION'
          persistence: 'SOURCEIP'
        - port: '443'
          description: 'Generic service running on 443'
          type: 'SSL_BRIDGE'
          method: 'LEASTCONNECTION'
          persistence: 'SOURCEIP'
        - port: '8080'
          description: 'Generic service running on 8080'
          type: 'HTTP'
          method: 'LEASTCONNECTION'
          persistence: 'SOURCEIP'
        - port: '8081'
          description: 'Generic service running on 8081'
          type: 'HTTP'
          method: 'LEASTCONNECTION'
          persistence: 'SOURCEIP'
        - port: '8443'
          description: 'Generic service running on 8443'
          type: 'SSL_BRIDGE'
          method: 'LEASTCONNECTION'
          persistence: 'SOURCEIP'

  tasks:

    - name: Build lbvip and all related componets.
      block:
      - local_action:
          module: netscaler_server
          nsip: "{{ inventory_hostname }}"
          nitro_user: "{{ nitro_user | default('nsroot') }}"
          nitro_pass: "{{ nitro_pass | default('nsroot') }}"
          nitro_protocol: "https"
          validate_certs: no
          state: "{{ state }}"
          name: "{{ item.name }}"
          ipaddress: "{{ item.address }}"
          comment: "{{ item.description | default('Ansible Created') }}"
          disabled: "{{ item.disabled | default('false') }}"
        with_items: "{{ lbvip.server }}"
      - local_action:
          module: netscaler_lb_monitor
          nsip: "{{ inventory_hostname }}"
          nitro_user: "{{ nitro_user | default('nsroot') }}"
          nitro_pass: "{{ nitro_pass | default('nsroot') }}"
          nitro_protocol: "https"
          validate_certs: no
          state: "{{ state }}"
          monitorname: "tcp_{{ lbvip.name }}_{{ item.port }}"
          type: TCP
          destport: "{{ item.port }}"
        with_items: "{{ lbvip.vserver }}"
        no_log: false
      - local_action:
          module: copy
          content: "{{ lookup('template', 'templates/netscaler_servicegroup.j2') }}"
          dest: "/tmp/svg_{{ lbvip.name }}_{{ item.port }}.yaml"
          mode: "0644"
        with_items: "{{ lbvip.vserver }}"
        changed_when: false
      - include_tasks: "/tmp/svg_{{ lbvip.name }}_{{ item.port }}.yaml"
        with_items: "{{ lbvip.vserver }}"
      - local_action:
          module: file
          state: absent
          path: "/tmp/svg_{{ lbvip.name }}_{{ item.port }}.yaml"
        with_items: "{{ lbvip.vserver }}"
        changed_when: false
      - local_action:
          module: netscaler_lb_vserver
          nsip: "{{ inventory_hostname }}"
          nitro_user: "{{ nitro_user | default('nsroot') }}"
          nitro_pass: "{{ nitro_pass | default('nsroot') }}"
          nitro_protocol: "https"
          validate_certs: no
          state: "{{ state }}"
          name: "vs_{{ lbvip.name }}_{{ item.port }}"
          servicetype: "{{ item.type }}"
          ipv46: "{{ lbvip.address }}"
          port: "{{ item.port }}"
          lbmethod: "{{ item.method | default('LEASTCONNECTION') }}"
          persistencetype: "{{ item.persistence | default('SOURCEIP') }}"
          servicegroupbindings:
            - servicegroupname: "svg_{{ lbvip.name }}_{{ item.port }}"
        with_items: "{{ lbvip.vserver }}"
      when: state == "present"

    - name: Destroy lbvip and all related componets.
      block:
      - local_action:
          module: netscaler_lb_vserver
          nsip: "{{ inventory_hostname }}"
          nitro_user: "{{ nitro_user | default('nsroot') }}"
          nitro_pass: "{{ nitro_pass | default('nsroot') }}"
          nitro_protocol: "https"
          validate_certs: no
          state: "{{ state }}"
          name: "vs_{{ lbvip.name }}_{{ item.port }}"
        with_items: "{{ lbvip.vserver }}"
      - local_action:
          module: netscaler_servicegroup
          nsip: "{{ inventory_hostname }}"
          nitro_user: "{{ nitro_user | default('nsroot') }}"
          nitro_pass: "{{ nitro_pass | default('nsroot') }}"
          nitro_protocol: "https"
          validate_certs: no
          state: "{{ state }}"
          servicegroupname: "svg_{{ lbvip.name }}_{{ item.port }}"
        with_items: "{{ lbvip.vserver }}"
      - local_action:
          module: netscaler_lb_monitor
          nsip: "{{ inventory_hostname }}"
          nitro_user: "{{ nitro_user | default('nsroot') }}"
          nitro_pass: "{{ nitro_pass | default('nsroot') }}"
          nitro_protocol: "https"
          validate_certs: no
          state: "{{ state }}"
          monitorname: "tcp_{{ lbvip.name }}_{{ item.port }}"
          type: TCP
        with_items: "{{ lbvip.vserver }}"
      - local_action:
          module: netscaler_server
          nsip: "{{ inventory_hostname }}"
          nitro_user: "{{ nitro_user | default('nsroot') }}"
          nitro_pass: "{{ nitro_pass | default('nsroot') }}"
          nitro_protocol: "https"
          validate_certs: no
          state: "{{ state }}"
          name: "{{ item.name }}"
        with_items: "{{ lbvip.server }}"
      when: state == "absent"

The following is the Jinja2 template that creates the netscaler_servicegroup task. An important thing to note is my use of the RAW block. When the task is created and stored in /tmp it does not contain any account credentials, instead I preserve the variable in the raw to prevent leaking sensitive information to anyone who may be snooping around on the server while the playbook is running.

templates/netscaler_servicegroup.j2: ASCII text, with CRLF line terminators

---
- local_action:
    module: netscaler_servicegroup
    nsip: {% raw %}"{{ inventory_hostname }}"
{% endraw %}
    nitro_user: {% raw %}"{{ nitro_user }}"
{% endraw %}
    nitro_pass: {% raw %}"{{ nitro_pass }}"
{% endraw %}
    nitro_protocol: "https"
    validate_certs: no

    state: "{{ state | default('present') }}"

    servicegroupname: "svg_{{ lbvip.name }}_{{ item.port }}"
    comment: "{{ item.description | default('Ansible Created') }}"
    servicetype: "{{ item.type }}"
    servicemembers:
{% for i in lbvip.server %}
      - servername: "{{ i.name }}"
        port: "{{ item.port }}"
{% endfor %}
    monitorbindings:
      - monitorname: "tcp_{{ lbvip.name }}_{{ item.port }}"
13. June 2018 · Comments Off on Using netaddr in Ansible to manipulate network IP, CIDR, MAC and prefix. · Categories: Ansible, Cloud, Linux Admin, Networking · Tags: , , , , , , , , , , , ,

The following ansible playbook is an example that demonstrates using netaddr to manipulate network IP, CIDR, MAC and prefix. Additional examples can be found in the Ansible docs or if your looking to do manipulation in python the following are the docs for netaddr.

#!/usr/local/bin/ansible-playbook
## Using netaddr in Ansible to manipulate network IP, CIDR, MAC and prefix
## 2018 (v.01) - Playbook from www.davideaves.com
---
- hosts: localhost
  gather_facts: false

  vars:
  - IP: 172.31.3.13/23
  - CIDR: 192.168.0.0/16
  - MAC: 1a:2b:3c:4d:5e:6f
  - PREFIX: 18

  tasks:
    - debug: msg="___ {{ IP }} ___ ADDRESS {{ IP | ipaddr('address') }}"
    - debug: msg="___ {{ IP }} ___ BROADCAST {{ IP | ipaddr('broadcast') }}"
    - debug: msg="___ {{ IP }} ___ NETMASK {{ IP | ipaddr('netmask') }}"
    - debug: msg="___ {{ IP }} ___ NETWORK {{ IP | ipaddr('network') }}"
    - debug: msg="___ {{ IP }} ___ PREFIX {{ IP | ipaddr('prefix') }}"
    - debug: msg="___ {{ IP }} ___ SIZE {{ IP | ipaddr('size') }}"
    - debug: msg="___ {{ IP }} ___ WILDCARD {{ IP | ipaddr('wildcard') }}"
    - debug: msg="___ {{ IP }} ___ RANGE {{ IP | ipaddr('range_usable') }}"
    - debug: msg="___ {{ IP }} ___ REVERSE DNS {{ IP | ipaddr('revdns') }}"
    - debug: msg="___ {{ IP }} ___ HEX {{ IP | ipaddr('address') | ip4_hex() }}"
    - debug: msg="___ {{ MAC }} ___ CISCO {{ MAC | hwaddr('cisco') }}"
    - debug: msg="___ {{ CIDR }} ___ Last /20 CIDR {{ CIDR | ipsubnet(20, -1) }}"
    - debug: msg="___ {{ CIDR }} ___ 1st IP {{ CIDR | ipaddr(1) }}"
    - debug: msg="___ {{ CIDR }} ___ 3rd from last IP {{ CIDR | ipaddr(-3) }}"
27. November 2017 · Comments Off on Preventing brute force login attempts by merging geoiplookup and hosts_access · Categories: Ansible, Linux, Linux Admin, Linux Scripts, Linux Security, Networking · Tags: , , , , , ,

To me networking and UNIX run hand-in-hand; as I have been getting into Ansible to do network automation I am also using it to automate and enforce consistency between all of my other hosts. The following is a simple playbook to configure Debian based hosts to perform a geoiplookup against all incoming hosts to prevent brute force login attempts using host_access. I have to admit the original script and idea are not mine; they originated from the following blog post: Limit your SSH logins using GeoIP. As more and more systems are migrated into cloud environments automating and enforcing security controls like this one are of critical importance.

geowrapper.yaml: a /usr/local/bin/ansible-playbook script, ASCII text executable

#!/usr/local/bin/ansible-playbook
## Configure Debian OS family to geoiplookup against all incoming hosts.
## 2017 (v.01) - Playbook from www.davideaves.com
---
- name: GEO Wrapper
  hosts: all
  become: yes
  gather_facts: yes
  tags: host_access

  vars:
    geocountries: "US"
    geofilter: "/opt/geowrapper.sh"

  tasks:
  - name: "Fail if OS family not Debian"
    fail:
      msg: "Distribution not supported"
    when: ansible_os_family != "Debian"

  - name: "Fetch geoip packages"
    apt:
      name:
        - geoip-bin
        - geoip-database
      state: latest
      update_cache: yes
    register: geoip
    when: ansible_os_family == "Debian"

  - name: "Wrapper script {{ geofilter }}"
    copy:
      content: |
        #!/bin/bash
        # Ansible Managed: GeoIP aclexec script for Linux TCP wrappers.
        ## Source: http://www.axllent.org/docs/view/ssh-geoip
 
        # UPPERCASE space-separated country codes to ACCEPT
        ALLOW_COUNTRIES="{{ geocountries }}"
 
        if [ $# -ne 1 ]; then
          echo "Usage:  `basename $0` ip" 1>&2
          exit 0 # return true in case of config issue
        fi
 
        COUNTRY=`/usr/bin/geoiplookup $1 | awk -F ": " '{ print $2 }' | awk -F "," '{ print $1 }' | head -n 1`
 
        [[ $COUNTRY = "IP Address not found" || $ALLOW_COUNTRIES =~ $COUNTRY ]] && RESPONSE="ALLOW" || RESPONSE="DENY"
 
        if [ $RESPONSE = "ALLOW" ]
        then
          exit 0
        else
          logger "$RESPONSE connection from $1 ($COUNTRY)"
          exit 1
        fi
      dest: "{{ geofilter }}"
      mode: "0755"
      owner: root
      group: root
    ignore_errors: yes
    register: geowrapper
    when: geoip|success

  - name: "Mappings in /etc/hosts.allow"
    blockinfile:
      path: /etc/hosts.allow
      state: present
      content: |
        ALL: 10.0.0.0/8
        ALL: 172.16.0.0/12
        ALL: 192.168.0.0/16
        ALL: ALL: aclexec {{ geofilter }} %a
    when: geowrapper|success

  - name: "Mappings in /etc/hosts.deny"
    blockinfile:
      path: /etc/hosts.deny
      state: present
      content: "ALL: ALL"
    when: geowrapper|success
31. March 2017 · Comments Off on TCL/Expect script to backup Cisco device configs. · Categories: Cisco, Linux, Linux Admin, Linux Scripts, Networking · Tags: , , , , , , , ,

I am not a software developer, but I do like challenges and am interested in learning about different software languages. For this project I decided to practice some TCL/Expect so I rewrote a poorly written Perl script I came across. This script will back up Cisco device configurations by reading 2 files: command.db and device.db … It loads them into a data dictionary and iterates through it using a control loop. It then logs into the device, by shelling out to rancid, and executes all its commands. Its a little hacky, but works. I even wrote a shell script to parse the log output into separate files.

/srv/rtrinfo/rtrinfo.exp: a expect script, ASCII text executable

#!/usr/bin/expect -f
# Login to a list of devices and collect show output.
#
## Requires: clogin (rancid)
 
exp_version -exit 5.0
set timeout 5
 
set DEVDB "[lindex $argv 0]"
set LOGDIR "/var/log/rtrinfo"
set OUTLOG "/srv/rtrinfo/output.log"

## Validate input files or print usage.
if {0==[llength $DEVDB]} {
    send_user "usage: $argv0 -device.db-\n"
    exit
} else {
   if {[file isfile "cmd.db"] == "1"} {
      set CMDDB "cmd.db"
   } elseif {[file isfile "[file dirname $argv0]/cmd.db"] == "1"} {
      set CMDDB "[file dirname $argv0]/cmd.db"
   } else {
    send_user "Unable to find cmd.db file, can not start...\n"
    exit 1
   }
}

################################################################

### Procedure to create 3 column dictionary ###
proc addDICT {dbVar field1 field2 field3} {
 
   # Initialize the DEVICE dictionary
   if {![info exists $dbVar]} {
      dict set $dbVar ID 0
   }
 
   upvar 1 $dbVar db

   # Create a new ID
   dict incr db ID
   set id [dict get $db ID]

   # Add columns into dictionary
   dict set db $id "\"$field1\" \"$field2\" \"$field3\""
}

### Build the CMD and DEVICE dicts from db files ###
foreach DB [list $CMDDB $DEVDB] {
   set DBFILE [open $DB]
   set file [read $DBFILE]
   close $DBFILE

   ## Split into records on newlines
   set records [split $file "\n"]

   ## Load records for dictionary
   foreach rec $records {
      ## split into fields on colons
      set fields [split $rec ";"]
      lassign $fields field1 field2 field3
 
      if {"[file tail $DB]" == "cmd.db"} {
         # Cols: OUTPUT TYPE CMD
         foreach field2 [split $field2 ","] {
            addDICT CMDS $field2 $field1 $field3
         }
      } else {
         # Cols: HOST TYPE STATE DESC
         addDICT DEVICES $field1 $field2 $field3
      }
   }
}

################################################################

### Open $OUTLOG to be used for post parcing.
set OUTLOG [open "$OUTLOG" w 0664]

### Itterate the DEVICES dictionary ###
dict for {id row} $DEVICES {
 
   ## Assign field names
   lassign $row DEVICE DEVTYPE STATUS

   ## Process device status
   if {"$STATUS" == "up"} {
 
      ## Create log output directory if does not exist
      if {[file isdirectory "$LOGDIR"] != "1"} {
         file mkdir "$LOGDIR"
      }
 
      log_file
      log_file -noappend "$LOGDIR/$DEVTYPE\_$DEVICE.log"

      ## Run rancid's clogin with a 5min timeout.
      spawn timeout 300 clogin $DEVICE
 
      expect "*#" {
 
      ## Set proper terminal length ##
      if {$DEVTYPE != "asa"} {
         send "terminal length 0\r"
      } else {
         send "terminal pager 0\r"
      }

      ### Itterate the CMDS dictionary ###
      dict for {id row} $CMDS {
         ## Assign field names
         lassign $row CMDTYPE OUTPUT CMD

         ## Push commands to device & update $OUTLOG
         if {($DEVTYPE == $CMDTYPE)&&($OUTPUT != "")} {
            puts $OUTLOG "$LOGDIR/$DEVTYPE\_$DEVICE.log;$OUTPUT;$CMD"
            expect "*#" { send "$CMD\r" }
         }
      }

      ## We are done! logout
      expect "*#" { send "exit\r" }
      expect EOF
      }
 
   }
}
 
close $OUTLOG

### Run a shell script to parse the output.log ###
#exec "[file dirname $argv0]/rtrparse.sh"

/srv/rtrinfo/cmd.db: ASCII text

acl;asa,router;show access-list
arp;ap,ace,asa,router,switch;show arp
arpinspection;ace;show arp inspection
arpstats;ace;show arp statistics
bgp;router;show ip bgp
bgpsumm;router;show ip bgp summary
boot;switch;show boot
cdpneighbors;ap,router,switch;show cdp neighbors
conferror;ace;sh ft config-error
controller;router;show controller
cpuhis;ap,router,switch;show process cpu history
debug;ap,router,switch;show debug
dot11ass;ap;show dot11 associations
envall;switch;show env all
env;router;show environment all
errdis;switch;show interface status err-disabled
filesys;router,switch;dir
flash;asa;show flashfs
intdesc;ap,router,switch;show interface description
interface;ap,asa,router,switch;show interface
intfbrie;ap,ace,router,switch;show ip interface brief
intipbrief;asa;show interface ip brief
intstatus;switch;show interface status
intsumm;router;show int summary
inventory;asa,router,switch;show inventory
iparp;ap,switch;show ip arp
ipint;router;show ip int
mac;switch;show mac address-table
nameif;asa;show nameif
ntpassoc;ap,asa,router,switch;show ntp assoc
plat;router;show platform
power;switch;show power inline
probe;ace;show probe
routes;asa;show route
routes;ap,router,switch;show ip route
rserver;ace;show rserver
running;ace;show running-config
running;ap,asa,router,switch;more system:running-config
serverfarm;ace;show serverfarm
service-policy;ace;show service-policy
service-pol-summ;ace;show service-policy summary
spantree;switch;show spanning-tree
srvfarmdetail;ace;show serverfarm detail
version;ap,ace,asa,router,switch;show version
vlan;switch;show vlan

/srv/rtrinfo/device.db: ASCII text

192.168.0.1;router;up;Site Router
192.168.0.2;ap;up;Atonomous AP
192.168.0.3;asa;ASA Firewall
192.168.0.5;switch;Site Switch
192.168.0.10;ace;Cisco ACE

/srv/rtrinfo/rtrparse.sh: Bourne-Again shell script, ASCII text executable

#!/bin/bash
# Parse the new rtrinfo output.log and create individual cmd output.
# 2016 (v.03) - Script from www.davideaves.com
 
OUTLOG="/srv/rtrinfo/output.log"
RTRPATH="$(dirname $OUTLOG)"
 
### Delete previous directories.
for DIR in ace asa router switch
 do [ -d "$RTRPATH/$DIR" ] && { rm -rf "$RTRPATH/$DIR"; }
done
 
### Itterate through $OUTLOG
grep "\.log" "$OUTLOG" | while IFS=';' read LOGFILE OUTPUT CMD
 do
 
 ### Get device name and type.
 TYPE="$(basename "$LOGFILE" | awk -F'_' '{print $1}')"
 DEVICE="$(basename "$LOGFILE" | awk -F'_' '{print $2}' | sed 's/\.log$//')"
 
 ### Create output directory.
 [ ! -d "$RTRPATH/$TYPE/$OUTPUT" ] && { mkdir -p "$RTRPATH/$TYPE/$OUTPUT"; }
 
 ### Extract rtrinfo:output logs and dump into individual files.
 # 1) sed identify $CMD output between prompts.
 # 2) awk drops X beginning line(s).
 # 3) sed to drop the last line.
 sed -n "/[^.].*[#, ]$CMD\(.\)\{1,2\}$/,/[^.].*#.*$/p" "$LOGFILE" \
 | awk 'NR > 0' | sed -n '$!p' > "$RTRPATH/$TYPE/$OUTPUT/$DEVICE.txt"
 
 ## EX: sed -n "/[^.]\([a-zA-Z]\)\{3\}[0-9].*[#, ]$CMD\(.\)\{1,2\}$/,/[^.]\([a-zA-Z]\)\{3\}[0-9].*#.*$/p"
 
done

Since this is something that would be collect nightly or weekly, I would probably kick this off using logrotate (as opposed to using crontab). The following would be what I would drop in my /etc/logrotate.d directory…

/etc/logrotate.d/rtrinfo: ASCII text

/var/log/rtrinfo/*.log {
        rotate 14
        daily
        missingok
        compress
        sharedscripts
        postrotate
                /srv/rtrinfo/rtrinfo.exp /srv/rtrinfo/device.db > /dev/null
        endscript
}
18. November 2015 · Comments Off on Bulk clean corrupted MySQL tables under Linux · Categories: Linux, Linux Admin

Recommend shutting down the MySQL running service: /etc/init.d/mysql stop

To scan all DB files for errors: myisamchk -s /var/lib/mysql/*/*.MYI

To fix a courted Database: myisamchk -r –update-state /var/lib/mysql/DIR/file.MYI

To automate the scanning and fixing of all DB files:

myisamchk -s /var/lib/mysql/*/*.MYI 2>&1 | grep ^MyISAM | awk ‘{print $2}’ | sed “s/’//g” | sort | uniq | while read DBFILE; do myisamchk -r –update-state $DBFILE; done