19. December 2018 · Comments Off on Ansible playbook to provision Netscaler VIPs. · Categories: Ansible, Linux, Linux Admin, Load Balancing, NetScaler, Networking · Tags: , , ,

The following playbook will create a fully functional VIP; including the supporting monitor, service-group (pool) and servers (nodes) on a netscaler loadbalancer. Additionally, the same playbook has the ability to fully deprovision a VIP and all its supporting artifacts. To do all this I use the native Netscaler Ansible modules. When it comes to using the netscaler_servicegroup module, since the number of servers are not always consistent; I create that task with a Jinja2 template, where its imported back into the play.

netscaler_provision.yaml: a /usr/bin/ansible-playbook -f 10 script text executable, ASCII text

#!/usr/bin/ansible-playbook -f 10
## Ansible playbook to provision Netscaler VIPs.
# Requires: nitrosdk-python
# 2018 (v.01) - Playbook from www.davideaves.com
---
- name: Netscaler VIP provision
  hosts: netscaler
  connection: local
  gather_facts: False

  vars:

    ansible_connection: "local"
    ansible_python_interpreter: "/usr/bin/env python"

    state: 'present'

    lbvip:
      name: testvip
      address: 203.0.113.1
      server:
        - name: 'server-1'
          address: '192.0.2.1'
          description: 'Ansible Test Server 1'
          disabled: 'true'
        - name: 'server-2'
          address: '192.0.2.2'
          description: 'Ansible Test Server 2'
          disabled: 'true'
        - name: 'server-3'
          address: '192.0.2.3'
          description: 'Ansible Test Server 3'
          disabled: 'true'
        - name: 'server-4'
          address: '192.0.2.4'
          description: 'Ansible Test Server 4'
          disabled: 'true'
        - name: 'server-5'
          address: '192.0.2.5'
          description: 'Ansible Test Server 5'
          disabled: 'true'
        - name: 'server-6'
          address: '192.0.2.6'
          description: 'Ansible Test Server 6'
          disabled: 'true'
        - name: 'server-7'
          address: '192.0.2.7'
          description: 'Ansible Test Server 7'
          disabled: 'true'
        - name: 'server-8'
          address: '192.0.2.8'
          description: 'Ansible Test Server 8'
          disabled: 'true'
      vserver:
        - port: '80'
          description: 'Generic service running on 80'
          type: 'HTTP'
          method: 'LEASTCONNECTION'
          persistence: 'SOURCEIP'
        - port: '443'
          description: 'Generic service running on 443'
          type: 'SSL_BRIDGE'
          method: 'LEASTCONNECTION'
          persistence: 'SOURCEIP'
        - port: '8080'
          description: 'Generic service running on 8080'
          type: 'HTTP'
          method: 'LEASTCONNECTION'
          persistence: 'SOURCEIP'
        - port: '8081'
          description: 'Generic service running on 8081'
          type: 'HTTP'
          method: 'LEASTCONNECTION'
          persistence: 'SOURCEIP'
        - port: '8443'
          description: 'Generic service running on 8443'
          type: 'SSL_BRIDGE'
          method: 'LEASTCONNECTION'
          persistence: 'SOURCEIP'

  tasks:

    - name: Build lbvip and all related componets.
      block:
      - local_action:
          module: netscaler_server
          nsip: "{{ inventory_hostname }}"
          nitro_user: "{{ nitro_user | default('nsroot') }}"
          nitro_pass: "{{ nitro_pass | default('nsroot') }}"
          nitro_protocol: "https"
          validate_certs: no
          state: "{{ state }}"
          name: "{{ item.name }}"
          ipaddress: "{{ item.address }}"
          comment: "{{ item.description | default('Ansible Created') }}"
          disabled: "{{ item.disabled | default('false') }}"
        with_items: "{{ lbvip.server }}"
      - local_action:
          module: netscaler_lb_monitor
          nsip: "{{ inventory_hostname }}"
          nitro_user: "{{ nitro_user | default('nsroot') }}"
          nitro_pass: "{{ nitro_pass | default('nsroot') }}"
          nitro_protocol: "https"
          validate_certs: no
          state: "{{ state }}"
          monitorname: "tcp_{{ lbvip.name }}_{{ item.port }}"
          type: TCP
          destport: "{{ item.port }}"
        with_items: "{{ lbvip.vserver }}"
        no_log: false
      - local_action:
          module: copy
          content: "{{ lookup('template', 'templates/netscaler_servicegroup.j2') }}"
          dest: "/tmp/svg_{{ lbvip.name }}_{{ item.port }}.yaml"
          mode: "0644"
        with_items: "{{ lbvip.vserver }}"
        changed_when: false
      - include_tasks: "/tmp/svg_{{ lbvip.name }}_{{ item.port }}.yaml"
        with_items: "{{ lbvip.vserver }}"
      - local_action:
          module: file
          state: absent
          path: "/tmp/svg_{{ lbvip.name }}_{{ item.port }}.yaml"
        with_items: "{{ lbvip.vserver }}"
        changed_when: false
      - local_action:
          module: netscaler_lb_vserver
          nsip: "{{ inventory_hostname }}"
          nitro_user: "{{ nitro_user | default('nsroot') }}"
          nitro_pass: "{{ nitro_pass | default('nsroot') }}"
          nitro_protocol: "https"
          validate_certs: no
          state: "{{ state }}"
          name: "vs_{{ lbvip.name }}_{{ item.port }}"
          servicetype: "{{ item.type }}"
          ipv46: "{{ lbvip.address }}"
          port: "{{ item.port }}"
          lbmethod: "{{ item.method | default('LEASTCONNECTION') }}"
          persistencetype: "{{ item.persistence | default('SOURCEIP') }}"
          servicegroupbindings:
            - servicegroupname: "svg_{{ lbvip.name }}_{{ item.port }}"
        with_items: "{{ lbvip.vserver }}"
      when: state == "present"

    - name: Destroy lbvip and all related componets.
      block:
      - local_action:
          module: netscaler_lb_vserver
          nsip: "{{ inventory_hostname }}"
          nitro_user: "{{ nitro_user | default('nsroot') }}"
          nitro_pass: "{{ nitro_pass | default('nsroot') }}"
          nitro_protocol: "https"
          validate_certs: no
          state: "{{ state }}"
          name: "vs_{{ lbvip.name }}_{{ item.port }}"
        with_items: "{{ lbvip.vserver }}"
      - local_action:
          module: netscaler_servicegroup
          nsip: "{{ inventory_hostname }}"
          nitro_user: "{{ nitro_user | default('nsroot') }}"
          nitro_pass: "{{ nitro_pass | default('nsroot') }}"
          nitro_protocol: "https"
          validate_certs: no
          state: "{{ state }}"
          servicegroupname: "svg_{{ lbvip.name }}_{{ item.port }}"
        with_items: "{{ lbvip.vserver }}"
      - local_action:
          module: netscaler_lb_monitor
          nsip: "{{ inventory_hostname }}"
          nitro_user: "{{ nitro_user | default('nsroot') }}"
          nitro_pass: "{{ nitro_pass | default('nsroot') }}"
          nitro_protocol: "https"
          validate_certs: no
          state: "{{ state }}"
          monitorname: "tcp_{{ lbvip.name }}_{{ item.port }}"
          type: TCP
        with_items: "{{ lbvip.vserver }}"
      - local_action:
          module: netscaler_server
          nsip: "{{ inventory_hostname }}"
          nitro_user: "{{ nitro_user | default('nsroot') }}"
          nitro_pass: "{{ nitro_pass | default('nsroot') }}"
          nitro_protocol: "https"
          validate_certs: no
          state: "{{ state }}"
          name: "{{ item.name }}"
        with_items: "{{ lbvip.server }}"
      when: state == "absent"

The following is the Jinja2 template that creates the netscaler_servicegroup task. An important thing to note is my use of the RAW block. When the task is created and stored in /tmp it does not contain any account credentials, instead I preserve the variable in the raw to prevent leaking sensitive information to anyone who may be snooping around on the server while the playbook is running.

templates/netscaler_servicegroup.j2: ASCII text, with CRLF line terminators

---
- local_action:
    module: netscaler_servicegroup
    nsip: {% raw %}"{{ inventory_hostname }}"
{% endraw %}
    nitro_user: {% raw %}"{{ nitro_user }}"
{% endraw %}
    nitro_pass: {% raw %}"{{ nitro_pass }}"
{% endraw %}
    nitro_protocol: "https"
    validate_certs: no

    state: "{{ state | default('present') }}"

    servicegroupname: "svg_{{ lbvip.name }}_{{ item.port }}"
    comment: "{{ item.description | default('Ansible Created') }}"
    servicetype: "{{ item.type }}"
    servicemembers:
{% for i in lbvip.server %}
      - servername: "{{ i.name }}"
        port: "{{ item.port }}"
{% endfor %}
    monitorbindings:
      - monitorname: "tcp_{{ lbvip.name }}_{{ item.port }}"
04. December 2018 · Comments Off on Using Ansible to perform a Netscaler backup · Categories: Ansible, Load Balancing, NetScaler · Tags: , , , ,

The following Ansible playbook is a rewrite of a script from a long time ago to perform backups of a Netscaler. As far as I know, there are no native Ansible or Vendor modules to perform a system backup. Within the playbook I am simply performing a raw call using the URI module against the Nitro API and fetching the backup file.

The following Vendor links contain good/related reference information:

netscaler_systembackup.yaml: a /usr/bin/ansible-playbook -f 10 script text executable, ASCII text

#!/usr/bin/ansible-playbook -f 10
## Ansible playbook to perform a full backup of Netscaler systems
## 2018 (v.01) - Playbook from www.davideaves.com
---
- name: Netscaler full backup
  hosts: netscalers
  connection: local
  gather_facts: False

  vars:

    ansible_connection: "local"
    ansible_python_interpreter: "/usr/bin/env python"

    backup_location: "/srv/nsbackup"

    ns_sys_backup: "/var/ns_sys_backup"

  tasks:

    - name: Check backup file status
      local_action:
        module: stat
        path: "{{ backup_location }}/{{ inventory_hostname }}_{{ lookup('pipe', 'date +%Y%m%d') }}_nsbackup.tgz"
      register: stat_result

    - name: Check backup directory location
      local_action:
        module: file
        path: "{{ backup_location }}"
        state: directory
        mode: 0775
        recurse: yes
      run_once: True
      when: stat_result.stat.exists == False

    - name: Full backup of Netscaler configuration.
      block:

      - name: Create Netscaler system backup
        local_action:
          module: uri
          url: "https://{{ inventory_hostname }}/nitro/v1/config/systembackup?action=create"
          method: POST
          validate_certs: no
          return_content: yes
          headers:
            X-NITRO-USER: "{{ nitro_user | default('nsroot') }}"
            X-NITRO-PASS: "{{ nitro_pass | default('nsroot') }}"
          body_format: json
          body: 
            systembackup:
              filename: "{{ inventory_hostname | hash('md5') }}"
              level: full
              comment: Ansible Generated Backup

      - name: Fetch Netscaler system backup
        local_action:
          module: uri
          url: "https://{{ inventory_hostname }}/nitro/v1/config/systemfile?args=filename:{{ inventory_hostname | hash('md5') }}.tgz,filelocation:{{ ns_sys_backup | replace('/','%2F') }}"
          method: GET
          status_code: 200
          validate_certs: no
          return_content: yes
          headers:
            X-NITRO-USER: "{{ nitro_user | default('nsroot') }}"
            X-NITRO-PASS: "{{ nitro_pass | default('nsroot') }}"
        register: result

      - name: Save Netscaler system backup to backup directory
        local_action: "shell echo '{{ result.json.systemfile[0].filecontent }}' | base64 -d > '{{ backup_location }}/{{ inventory_hostname }}_{{ lookup('pipe', 'date +%Y%m%d') }}_nsbackup.tgz'"

      - name: Chmod saved backup file permissions
        local_action:
          module: file
          path: "{{ backup_location }}/{{ inventory_hostname }}_{{ lookup('pipe', 'date +%Y%m%d') }}_nsbackup.tgz"
          mode: 0644

      always:

      - name: Delete system backup from Netscaler
        local_action:
          module: uri
          url: "https://{{ inventory_hostname }}/nitro/v1/config/systembackup/{{ inventory_hostname | hash('md5') }}.tgz"
          method: DELETE
          validate_certs: no
          return_content: yes
          headers:
            X-NITRO-USER: "{{ nitro_user | default('nsroot') }}"
            X-NITRO-PASS: "{{ nitro_pass | default('nsroot') }}"

      - name: Locate backup files older than 90 days
        local_action:
          module: find
          paths: "{{ backup_location }}"
          age: "1d"
        run_once: true
        register: files_matched

      - name: Purge old backup files
        local_action:
          module: file
          path: "{{ item.path }}"
          state: absent
        run_once: true
        with_items: "{{ files_matched.files }}"

      when: stat_result.stat.exists == False
07. September 2016 · Comments Off on Backing up your Citrix Netscalers. · Categories: Linux, Linux Scripts, Load Balancing, NetScaler, Networking · Tags: , ,

The following is an older script I wrote to automate the backup of a bunch of Citrix NetScaler appliances. Previously I posted an F5 backup script; which was based on this original script. NetScalers are awesome appliances! Not only are they insanely easy to manage; their configs are very straight forward to backup and restore. Very similar to the F5 backup script, posted earlier, we rely on SSH in this script. Except here I use SSHFS to mount the NS:/nsconfig directory and create an archive of it. The reason why I decided to use SSHFS was originally was that I intended to grep out the configured hostname from the config before creating tarball output; below is an example…

DEST=”$BACKUPDIR/$(grep ^”set ns hostName” /tmp/nsbackup/ns.conf | awk ‘{print “ns-“$NF”__”}’ | sed ‘s/__$/'” [$(echo $NS | cksum | awk ‘{print $1}’)] $(date +%F)”‘.tar.xv/’)”

Just like the previous script, this can ran automatically from cron…
@weekly [ -f /srv/nsbackup/nsbackup.sh ] && { /srv/nsbackup/nsbackup.sh; } > /dev/null

For further reading please reference the following Citrix Support Documentation:

Feel free to review, modify or use this script however you see fit. Remember you do so at your own risk!

#!/bin/bash
## Backup /nsconfig directories against a list of Citrix Netscalers.
## 2016 (v1.0) - Script from www.davideaves.com
 
NSHOSTS="ns01 ns02"
NSPW="nsroot"
BACKUPDIR="/srv/nsbackup"
 
# FUNCTION: End Script if error.
DIE() {
 echo "ERROR: Validate \"$_\" is installed and working on your system."
 exit 0
}
 
# Validate script requirements are meet.
type -p sshfs > /dev/null || DIE
 
# Main Loop.
for NS in $(echo $NSHOSTS | tr [:lower:] [:upper:]); do
 
 # Create backup directory and mount nsconfig using sshfs.
 mkdir /tmp/nsbackup && echo "$NSPW" | sshfs nsroot@$NS:/nsconfig/ /tmp/nsbackup -o password_stdin,StrictHostKeyChecking=no
 
 if [ -f "/tmp/nsbackup/ns.conf" ]; then
 
  # Figure out backup destination file.
  DEST="$BACKUPDIR/$NS$(echo $NS | cksum | awk '{print "_"$1}') ($(date +%F)).tar.xv"
 
  # Delete backup files older than 90 days.
  find "$BACKUPDIR" -maxdepth 1 -type f -name "*$(echo $NS | cksum | awk '{print "_"$1}')\ *.tar.xv" -mtime +90 -exec rm {} \;
 
  # Create backup file.
  if [ ! -f "$DEST" ]; then
   cd /tmp/nsbackup
   tar cfJ "$DEST" * && sync
   cd ..
  else
   echo "$DEST: Backup already exists..."
  fi
 
 fi
 
  # Unmount and remove backup directory.
  [ -d "/tmp/nsbackup" ] && { fusermount -u /tmp/nsbackup; }
  [ -d "/tmp/nsbackup" ] && { rmdir /tmp/nsbackup; }
 
done