My shell portfolio begins
This commit is contained in:
parent
aaca49937f
commit
c2b1b821f7
51
KB/99-usb-serial.rules
Normal file
51
KB/99-usb-serial.rules
Normal file
@ -0,0 +1,51 @@
|
||||
#examples from :
|
||||
#http://hintshop.ludvig.co.nz/show/persistent-names-usb-serial-devices/
|
||||
#https://medium.com/@inegm/persistent-names-for-usb-serial-devices-in-linux-dev-ttyusbx-dev-custom-name-fd49b5db9af1
|
||||
|
||||
#SUBSYSTEM=="tty", ATTRS{idVendor}=="1a86", ATTRS{idProduct}=="7523", SYMLINK+="rah-ctrl"
|
||||
#SUBSYSTEM=="tty", ATTRS{idVendor}=="0403", ATTRS{idProduct}=="6001", ATTRS{serial}=="A6008isP", SYMLINK+="arduino"
|
||||
#SUBSYSTEM=="tty", ATTRS{idVendor}=="0403", ATTRS{idProduct}=="6001", ATTRS{serial}=="A7004IXj", SYMLINK+="buspirate"
|
||||
#SUBSYSTEM=="tty", ATTRS{idVendor}=="0403", ATTRS{idProduct}=="6001", ATTRS{serial}=="FTDIF46B", SYMLINK+="ttyUSB.ARM"
|
||||
|
||||
#Apply changes via
|
||||
#
|
||||
# udevadm control --reload-rules && sudo udevadm trigger
|
||||
#
|
||||
|
||||
|
||||
|
||||
|
||||
####################################
|
||||
#USB serial for UPS
|
||||
####################################
|
||||
|
||||
#Bus 002 Device 045: ID 067b:2303 Prolific Technology, Inc. PL2303 Serial Port
|
||||
|
||||
#root@pfv-vmsrv-03:~# udevadm info --name=/dev/ttyUSB2 --attribute-walk|grep -i vendor
|
||||
# ATTRS{idVendor}=="067b"
|
||||
# ATTRS{idVendor}=="2109"
|
||||
# ATTRS{idVendor}=="8087"
|
||||
# ATTRS{idVendor}=="1d6b"
|
||||
# ATTRS{subsystem_vendor}=="0x1028"
|
||||
# ATTRS{vendor}=="0x8086"
|
||||
|
||||
#root@pfv-vmsrv-03:/etc/snmp# udevadm info --name=/dev/ttyUSB2 --attribute-walk|grep -i serial
|
||||
# SUBSYSTEMS=="usb-serial"
|
||||
# ATTRS{product}=="USB-Serial Controller"
|
||||
# ATTRS{serial}=="0000:00:1d.0"
|
||||
|
||||
#root@pfv-vmsrv-03:/etc/udev/rules.d# udevadm info -a -n /dev/ttyUSB0 | grep '{serial}' | head -n1
|
||||
# ATTRS{serial}=="0000:00:1d.0"
|
||||
|
||||
#SUBSYSTEM=="tty", ATTRS{idVendor}=="067b", ATTRS{idProduct}=="2303", ATTRS{serial}=="0000:00:1d.0", SYMLINK+="ups4"
|
||||
SUBSYSTEM=="tty", ATTRS{idVendor}=="067b", ATTRS{idProduct}=="2303", SYMLINK+="ups4"
|
||||
|
||||
|
||||
####################################
|
||||
#USB serial for sifive
|
||||
####################################
|
||||
|
||||
####################################
|
||||
#USB serial for parallella
|
||||
####################################
|
||||
|
13
KB/nitrokey-reset.txt
Normal file
13
KB/nitrokey-reset.txt
Normal file
@ -0,0 +1,13 @@
|
||||
/hex
|
||||
scd serialno
|
||||
scd apdu 00 20 00 81 08 40 40 40 40 40 40 40 40
|
||||
scd apdu 00 20 00 81 08 40 40 40 40 40 40 40 40
|
||||
scd apdu 00 20 00 81 08 40 40 40 40 40 40 40 40
|
||||
scd apdu 00 20 00 81 08 40 40 40 40 40 40 40 40
|
||||
scd apdu 00 20 00 83 08 40 40 40 40 40 40 40 40
|
||||
scd apdu 00 20 00 83 08 40 40 40 40 40 40 40 40
|
||||
scd apdu 00 20 00 83 08 40 40 40 40 40 40 40 40
|
||||
scd apdu 00 20 00 83 08 40 40 40 40 40 40 40 40
|
||||
scd apdu 00 e6 00 00
|
||||
scd apdu 00 44 00 00
|
||||
/echo card has been reset to factory defaults
|
12
StartProject.sh
Normal file
12
StartProject.sh
Normal file
@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
#arguments
|
||||
#1) directory to start in
|
||||
|
||||
START_DIR="$1"
|
||||
|
||||
cd $START_DIR
|
||||
|
||||
code .
|
9
VPNCheck.sh
Normal file
9
VPNCheck.sh
Normal file
@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
|
||||
VPN_DNS="usvpn.turnsys.com"
|
||||
|
||||
CURRENT_WAN_IP="$(curl -s http://checkip.dyndns.org | sed -E 's/<[^>]*>//g'|awk -F ':' '{print $2}' )"
|
||||
CURRENT_VPNDNS_IP="$(dig +short $VPN_DNS)"
|
||||
|
||||
echo Current WAN IP is: $CURRENT_WAN_IP
|
||||
echo Current VPN IP is: $CURRENT_VPNDNS_IP
|
119
alias.sh
Normal file
119
alias.sh
Normal file
@ -0,0 +1,119 @@
|
||||
#alias history='history -f'
|
||||
#alias ssh='/usr/bin/ssh -F ~/Nextcloud/dotfiles-git/rcdirs/ssh/config'
|
||||
#alias scp='/usr/bin/scp -F ~/Nextcloud/dotfiles-git/rcdirs/ssh/config'
|
||||
alias s='ssh'
|
||||
alias vi='vim'
|
||||
alias id0='sudo -i'
|
||||
alias ls='ls --color'
|
||||
alias grep='rg --color auto'
|
||||
alias wget='wget --no-check-certificate'
|
||||
alias curl='curl --insecure'
|
||||
alias cls='clear ; ls'
|
||||
|
||||
#Git / git stuff
|
||||
alias gup='git pull'
|
||||
alias lpom='git add -A :/ ; git commit -va'
|
||||
alias gpom=' git push --tags origin master'
|
||||
alias tesla='gup;lpom;gpom'
|
||||
|
||||
#Docker / k8s aliases, cause i'm a docker/kubefarm fanboi now
|
||||
|
||||
## Most docker/k8s use is via vscode now, but somtimes you wanna drop to a shell and do some stuff...
|
||||
|
||||
alias dc='docker-compose'
|
||||
alias dcu='docker compose up'
|
||||
alias dcd='docker compose down'
|
||||
|
||||
alias dcdu='docker compose up -d'
|
||||
|
||||
|
||||
alias dcf='docker-compose rm -f ; docker-compose up'
|
||||
alias dcd-prod='docker-compose --context prod up -d'
|
||||
alias dcd-cicd='docker-compose --context cicd up -d'
|
||||
alias dcd-dev='docker-compose --context dev up -d'
|
||||
|
||||
alias kgn='kubectl get nodes -o wide|grep -v NAME|sort'
|
||||
alias kgp='kubectl get pods -A -o wide|grep -v NAME|sort'
|
||||
|
||||
alias k0-sb-gn='export KUBECONFIG=~/.kube/custom-contexts/context-sandbox-config.yml ; kgn'
|
||||
alias k0-sb-gp='export KUBECONFIG=~/.kube/custom-contexts/context-sandbox-config.yml ; kgp'
|
||||
|
||||
alias k0-dqu-gn='export KUBECONFIG=~/.kube/custom-contexts/context-dqu-config.yml ; kgn'
|
||||
alias k0-dqu-gp='export KUBECONFIG=~/.kube/custom-contexts/context-dqu-config.yml ; kgp'
|
||||
|
||||
alias k0-prod-gn='export KUBECONFIG=~/.kube/custom-contexts/context-prod-config.yml ; kgn'
|
||||
alias k0-prod-gp='export KUBECONFIG=~/.kube/custom-contexts/context-prod-config.yml ; kgp'
|
||||
|
||||
|
||||
#Projects
|
||||
|
||||
### Documentaton
|
||||
alias context-docs-techops='StartProject.sh ~/charles/code/techops/docs-techops'
|
||||
alias context-notes-public='StartProject.sh ~/charles/notes-public'
|
||||
alias context-docs-subo='StartProject.sh ~/charles/code/RD/docs-suborbital'
|
||||
alias context-docs-rr='StartProject.sh ~/charles/code/RD/docs-rackrental'
|
||||
|
||||
### Configs
|
||||
alias context-dotfiles='StartProject.sh ~/charles/dotfiles-git'
|
||||
alias context-tsys-dev='StartProject.sh ~/charles/code/techops/tsys-dev'
|
||||
|
||||
### Code - internal
|
||||
alias context-rd-MorseFlyer='StartProject.sh ~/charles/RD/Morse/Internal/'
|
||||
|
||||
### Code - BizOps/TechOps
|
||||
alias context-services-bizops='StartProject.sh ~/charles/code/techops/ITBackOfficeFLOStack'
|
||||
|
||||
|
||||
|
||||
#alias tmux='TERMINFO=/usr/share/terminfo/x/xterm-16color TERM=xterm-16color tmux -2'
|
||||
|
||||
|
||||
#####################################################
|
||||
#Personal host variables
|
||||
#####################################################
|
||||
#Eventually we'll move to ldap/no shared accounts/forced sudo. Blech. Corporate tyrany!
|
||||
#For now, the easy way. HAH!
|
||||
#CON_USER="charlesnw"
|
||||
#INBAND_USER="charlesnw"
|
||||
PERSONAL_OOB_USER="root"
|
||||
PERSONAL_INBAND_USER="root"
|
||||
#####################################################
|
||||
|
||||
#Functions to deploy on ultix and/or charles-prodlin...
|
||||
#alias 2600hz='ssh $INBAND_USER@conference.corp.thefnf.net'
|
||||
#alias yacy='ssh $INBAND_USER@yacyfnf.corp.thefnf.net'
|
||||
#alias confine='ssh $INBAND_USER@confine.dev.thefnf.net'
|
||||
|
||||
#FNF hosts
|
||||
alias tsys-fnf-freedomstack='ssh charles@tsys-fnf-freedomstack' #FreedomStack dev vm
|
||||
alias hearth-at='ssh charles@hearth-at.thefnf.net'
|
||||
alias hearth-an='ssh charles@hearth-an.thefnf.net'
|
||||
alias hearth-bds='ssh charles@hearth-bds.thefnf.net'
|
||||
alias hearth-uds='ssh charles@hearth-uds.thefnf.net'
|
||||
|
||||
#####################################################
|
||||
#PFV HOSTS #####################################################
|
||||
#Bare metal systems (in band access) - Production
|
||||
#Upstairs, production
|
||||
alias ausprod-core-ap01='telnet ausprod-core-ap01.turnsys.net'
|
||||
alias ausprod-core-sw01='telnet ausprod-core-sw01.turnsys.net'
|
||||
|
||||
#alias netbox='ssh -i $PATH_TO_KEY $LABUSER@netbox.dev.thefnf.net' #gns3 with ios/juniper/comware/extreme os/tinycore/openflow (ALL THE NETWORK THINGS)
|
||||
#alias cudasys='ssh -i $PATH_TO_KEY $LABUSER@cudasys.dev.thefnf.net' #cuda awesomeness
|
||||
|
||||
GIT_SSH_COMMAND='ssh -i ~/Nextcloud/secrets/ssh/ReachableCEOPrivateSSHKey -o IdentitiesOnly=yes'
|
||||
|
||||
###########################################################
|
||||
#Inband access (ssh/telnet) #
|
||||
###########################################################
|
||||
#alias rr-con-sw4='ssh $RRLABUSER:7003@ausprod.consrv.turnsys.net'
|
||||
#alias rr-con-r7='ssh $RRLABUSER:7021@ausprod-consrv.turnsys.net'
|
||||
#TBDalias rr-con-r8='ssh $RRLABUSER:7021@ausprod-consrv.turnsys.net'
|
||||
#TBDalias rr-con-r10='ssh $RRLABUSER:7021@ausprod-consrv.turnsys.net'
|
||||
###########################################################
|
||||
|
||||
alias hb='habitctl'
|
||||
|
||||
#rrom https://www.ackama.com/what-we-think/the-best-way-to-store-your-dotfiles-a-bare-git-repository-explained/
|
||||
alias dfile='/usr/bin/git --git-dir=$HOME/.cfg/.git/ --work-tree=$HOME'
|
||||
alias dadd='dfile add $1 ; dfile commit -m "added $1"'
|
5
backup-ez.sh
Normal file
5
backup-ez.sh
Normal file
@ -0,0 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
export BORG_PASSPHRASE=$(magicstuffhere-tbd)
|
||||
|
||||
borg create -v --stats /media/charles/CPWBKUP/Charles-Backup/BorgBkups/::$(date +%m-%d-%Y) /home/charles
|
5
backup-share.sh
Normal file
5
backup-share.sh
Normal file
@ -0,0 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
export BORG_PASSPHRASE='YourS3crt'
|
||||
|
||||
borg create -v --stats <target dir with trialing />::$(date +%m-%d-%Y) <src dir to backup>
|
9
checkDomainIPAddress.sh
Normal file
9
checkDomainIPAddress.sh
Normal file
@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
|
||||
for lineitem in $(grep -v ^# ~/notes/MasterDomList.md);
|
||||
do
|
||||
|
||||
NAME=$lineitem
|
||||
IP=$(dig +short $lineitem)
|
||||
echo $NAME: $IP
|
||||
done
|
5
clean-docker.sh
Normal file
5
clean-docker.sh
Normal file
@ -0,0 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
for did in $(docker ps -a |grep -v IMAGE|awk '{print $1}'); do docker rm -f $did;done
|
||||
for did in $(docker image ls |grep -v IMAGE|awk '{print $3}'); do docker image rm -f $did;done
|
156
createLxc.sh
Normal file
156
createLxc.sh
Normal file
@ -0,0 +1,156 @@
|
||||
#!/bin/bash
|
||||
#A script to create LXC virtual machines
|
||||
|
||||
#Takes two mandatory arguments
|
||||
#Hostname
|
||||
#IP address
|
||||
|
||||
#Takes two optional arguments
|
||||
#Template to provision from
|
||||
#Path to create instance
|
||||
|
||||
|
||||
#Usage message
|
||||
usage()
|
||||
{
|
||||
echo "$0 needs to be invoked with two arguments:\
|
||||
|
||||
Argument 1:Hostname
|
||||
Argument 2:IP Address
|
||||
|
||||
It can also take two optional arguments:
|
||||
|
||||
Path to a template you wish to provision from
|
||||
Path to a directory to store a virtual machine in"
|
||||
exit 0
|
||||
}
|
||||
|
||||
#Error handling code
|
||||
error_out()
|
||||
{
|
||||
echo "A critical error has occured. Please see above line for portion that failed."
|
||||
exit 1
|
||||
}
|
||||
|
||||
bail_out()
|
||||
{
|
||||
echo "Exiting at user request."
|
||||
exit 0
|
||||
}
|
||||
|
||||
preflight()
|
||||
{
|
||||
#Ensure script is running as lxcmgmt user
|
||||
if [ "$(whoami)" != 'lxcmgmt' ]; then
|
||||
echo "You must be the lxcmgmt user to run $0"
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
|
||||
#Check for hostname argument
|
||||
echo "Ensuring hostname is properly set..."
|
||||
if [ -z "$1" ]; then
|
||||
error_out
|
||||
else
|
||||
VMHOSTNAME="$1"
|
||||
fi
|
||||
|
||||
#Check for IP
|
||||
echo "Ensuring ip is properly set..."
|
||||
if [ -z "$2" ]; then
|
||||
error_out
|
||||
else
|
||||
VMIP=$2
|
||||
fi
|
||||
|
||||
#Check for template specification, otherwise set to default
|
||||
if [ -n "$3" ]; then
|
||||
VMTEMPLATE="$3"
|
||||
else
|
||||
VMTEMPLATE="/lxc/templates/ariesvm.tar.gz"
|
||||
fi
|
||||
|
||||
#Check for path specification, otherwise set to default
|
||||
if [ -n "$4" ]; then
|
||||
VMPATH="$4"
|
||||
else
|
||||
VMPATH="/lxc/instances/$VMHOSTNAME"
|
||||
fi
|
||||
|
||||
echo "VM will be created with the following paramaters."
|
||||
echo "Hostname: $VMHOSTNAME"
|
||||
echo "IPv4 Address: $VMIP"
|
||||
echo "Template: $VMTEMPLATE"
|
||||
echo "Path: $VMPATH"
|
||||
echo "Do you wish to proceed? (Y/N)"
|
||||
read proceed
|
||||
|
||||
if [ $proceed = "Y" ]; then
|
||||
createvm VMHOSTNAME VMIP VMTEMPLATE VMPATH
|
||||
elif [ $proceed = "N" ]; then
|
||||
bail_out
|
||||
else
|
||||
echo "Please specify Y or N"
|
||||
error_out
|
||||
fi
|
||||
}
|
||||
|
||||
createvm()
|
||||
{
|
||||
#Provision a vm
|
||||
#If we are here, preflight check passed, user confirmed paramaters and we are good to go
|
||||
|
||||
#SOME variables...
|
||||
CONFIGTEMPLATES="/lxc/templates"
|
||||
VMMAC=$(echo $VMIP | awk -F . '{print $4}')
|
||||
|
||||
#First we create a directory for the instance
|
||||
echo "Creating storage location for $VMHOSTNAME..."
|
||||
mkdir $VMPATH
|
||||
mkdir $VMPATH/rootfs
|
||||
|
||||
#Second we uncompress the VM template
|
||||
echo "Uncompressing template..."
|
||||
tar xfz $VMTEMPLATE -C $VMPATH/rootfs
|
||||
|
||||
#Dynamically create fstab and config file in /lxc/instances/vminstance:
|
||||
echo "Creating configuration files..."
|
||||
|
||||
#Create fstab:
|
||||
echo "Creating fstab..."
|
||||
cat > $VMPATH/$VMHOSTNAME.fstab <<FSTAB
|
||||
proc /lxc/instances/$VMHOSTNAME/rootfs/proc proc nodev,noexec,nosuid 0 0
|
||||
sysfs /lxc/instances/$VMHOSTNAME/rootfs/sys sysfs defaults 0 0
|
||||
/dev /lxc/instances/$VMHOSTNAME/rootfs/dev none bind 0 0
|
||||
FSTAB
|
||||
|
||||
echo "Creating config file..."
|
||||
cat > $VMPATH/$VMHOSTNAME.config <<CONFIG
|
||||
lxc.utsname = $VMHOSTNAME
|
||||
lxc.mount = $VMPATH/$VMHOSTNAME.fstab
|
||||
lxc.rootfs = $VMPATH/rootfs
|
||||
lxc.network.hwaddr = $VMMAC
|
||||
lxc.network.ipv4 = $VMIP
|
||||
lxc.tty = 6
|
||||
lxc.network.type = veth
|
||||
lxc.network.flags = up
|
||||
lxc.network.link = br0
|
||||
lxc.network.name = eth0
|
||||
CONFIG
|
||||
|
||||
#Start VM:
|
||||
echo "Starting your virtual machine $VMHOSTNAME..."
|
||||
|
||||
#Verify VM is running:
|
||||
echo "Verifying successful boot of $VMHOSTNAME..."
|
||||
|
||||
exit 0
|
||||
}
|
||||
|
||||
if [ "$1" = "--help" ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
preflight $1 $2 $3 $4
|
||||
|
||||
exit 0
|
10
custom_ssh_handler.sh
Normal file
10
custom_ssh_handler.sh
Normal file
@ -0,0 +1,10 @@
|
||||
function custom_ssh()
|
||||
{
|
||||
CUSTOMSSH_USER=$1
|
||||
CUSTOMSSH_HOST=$2
|
||||
CUSTOMSSH_COMMAND=$3
|
||||
|
||||
SSH_TARGET="$CUSTOMSSH_USER@$CUSTOMSSH_HOST"
|
||||
ssh -q -t -o StrictHostKeyChecking=no -i $SSH_KEY $SSH_TARGET "$CUSTOMSSH_COMMAND"
|
||||
}
|
||||
|
114
distro
Normal file
114
distro
Normal file
@ -0,0 +1,114 @@
|
||||
#!/usr/bin/env bash
|
||||
# Detects which OS and if it is Linux then it will detect which Linux Distribution.
|
||||
|
||||
OS=`uname -s`
|
||||
REV=`uname -r`
|
||||
MACH=`uname -m`
|
||||
|
||||
if [ "${OS}" = "SunOS" ] ; then
|
||||
OS=Solaris
|
||||
ARCH=`uname -p`
|
||||
OSSTR="${OS} ${REV}(${ARCH} `uname -v`)"
|
||||
|
||||
elif [ "${OS}" = "AIX" ] ; then
|
||||
OSSTR="${OS} `oslevel` (`oslevel -r`)"
|
||||
|
||||
elif [ "${OS}" = "Linux" ] ; then
|
||||
KERNEL=`uname -r`
|
||||
|
||||
if [ -f /etc/fedora-release ]; then
|
||||
DIST=$(cat /etc/fedora-release | awk '{print $1}')
|
||||
REV=`cat /etc/fedora-release | sed s/.*release\ // | sed s/\ .*//`
|
||||
|
||||
elif [ -f /etc/redhat-release ] ; then
|
||||
DIST=$(cat /etc/redhat-release | awk '{print $1}')
|
||||
if [ "${DIST}" = "CentOS" ]; then
|
||||
DIST="CentOS"
|
||||
elif [ "${DIST}" = "Mandriva" ]; then
|
||||
DIST="Mandriva"
|
||||
PSEUDONAME=`cat /etc/mandriva-release | sed s/.*\(// | sed s/\)//`
|
||||
REV=`cat /etc/mandriva-release | sed s/.*release\ // | sed s/\ .*//`
|
||||
elif [ -f /etc/oracle-release ]; then
|
||||
DIST="Oracle"
|
||||
else
|
||||
DIST="RedHat"
|
||||
fi
|
||||
|
||||
PSEUDONAME=`cat /etc/redhat-release | sed s/.*\(// | sed s/\)//`
|
||||
REV=`cat /etc/redhat-release | sed s/.*release\ // | sed s/\ .*//`
|
||||
|
||||
elif [ -f /etc/mandrake-release ] ; then
|
||||
DIST='Mandrake'
|
||||
PSEUDONAME=`cat /etc/mandrake-release | sed s/.*\(// | sed s/\)//`
|
||||
REV=`cat /etc/mandrake-release | sed s/.*release\ // | sed s/\ .*//`
|
||||
|
||||
elif [ -f /etc/devuan_version ] ; then
|
||||
DIST="Devuan `cat /etc/devuan_version`"
|
||||
REV=""
|
||||
|
||||
elif [ -f /etc/debian_version ] ; then
|
||||
DIST="Debian `cat /etc/debian_version`"
|
||||
REV=""
|
||||
ID=`lsb_release -i | awk -F ':' '{print $2}' | sed 's/ //g'`
|
||||
if [ "${ID}" = "Raspbian" ] ; then
|
||||
DIST="Raspbian `cat /etc/debian_version`"
|
||||
fi
|
||||
|
||||
elif [ -f /etc/gentoo-release ] ; then
|
||||
DIST="Gentoo"
|
||||
REV=$(tr -d '[[:alpha:]]' </etc/gentoo-release | tr -d " ")
|
||||
|
||||
elif [ -f /etc/arch-release ] ; then
|
||||
DIST="Arch Linux"
|
||||
REV="" # Omit version since Arch Linux uses rolling releases
|
||||
IGNORE_LSB=1 # /etc/lsb-release would overwrite $REV with "rolling"
|
||||
|
||||
elif [ -f /etc/os-release ] ; then
|
||||
DIST=$(grep '^NAME=' /etc/os-release | cut -d= -f2- | tr -d '"')
|
||||
REV=$(grep '^VERSION_ID=' /etc/os-release | cut -d= -f2- | tr -d '"')
|
||||
|
||||
elif [ -f /etc/openwrt_version ] ; then
|
||||
DIST="OpenWrt"
|
||||
REV=$(cat /etc/openwrt_version)
|
||||
|
||||
elif [ -f /etc/pld-release ] ; then
|
||||
DIST=$(cat /etc/pld-release)
|
||||
REV=""
|
||||
|
||||
elif [ -f /etc/SuSE-release ] ; then
|
||||
DIST=$(echo SLES $(grep VERSION /etc/SuSE-release | cut -d = -f 2 | tr -d " "))
|
||||
REV=$(echo SP$(grep PATCHLEVEL /etc/SuSE-release | cut -d = -f 2 | tr -d " "))
|
||||
fi
|
||||
|
||||
if [ -f /etc/lsb-release -a "${IGNORE_LSB}" != 1 ] ; then
|
||||
LSB_DIST=$(lsb_release -si)
|
||||
LSB_REV=$(lsb_release -sr)
|
||||
if [ "$LSB_DIST" != "" ] ; then
|
||||
DIST=$LSB_DIST
|
||||
fi
|
||||
if [ "$LSB_REV" != "" ] ; then
|
||||
REV=$LSB_REV
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "`uname -a | awk '{print $(NF)}'`" = "DD-WRT" ] ; then
|
||||
DIST="dd-wrt"
|
||||
fi
|
||||
|
||||
if [ -n "${REV}" ]
|
||||
then
|
||||
OSSTR="${DIST} ${REV}"
|
||||
else
|
||||
OSSTR="${DIST}"
|
||||
fi
|
||||
|
||||
elif [ "${OS}" = "Darwin" ] ; then
|
||||
if [ -f /usr/bin/sw_vers ] ; then
|
||||
OSSTR=`/usr/bin/sw_vers|grep -v Build|sed 's/^.*:.//'| tr "\n" ' '`
|
||||
fi
|
||||
|
||||
elif [ "${OS}" = "FreeBSD" ] ; then
|
||||
OSSTR=`/usr/bin/uname -mior`
|
||||
fi
|
||||
|
||||
echo ${OSSTR}
|
7
dnsCheck.sh
Normal file
7
dnsCheck.sh
Normal file
@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
for vmName in $(cat proxVmList); do
|
||||
IP="$(dig +short $vmName.turnsys.net)"
|
||||
echo VmName: $vmName IP: $IP
|
||||
done
|
9
getCurrentResolution.sh
Normal file
9
getCurrentResolution.sh
Normal file
@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
# From https://www.unix.com/unix-for-dummies-questions-and-answers/126641-how-get-current-x-screen-resolution.html
|
||||
|
||||
LINE=`xrandr -q | grep Screen`
|
||||
echo LINE = ${LINE}
|
||||
WIDTH=`echo ${LINE} | awk '{ print $8 }'`
|
||||
echo WIDTH = ${WIDTH}
|
||||
HEIGHT=`echo ${LINE} | awk '{ print $10 }' | awk -F"," '{ print $1 }'`
|
||||
echo HEIGHT = ${HEIGHT}
|
2
getFreePFVMgmtIp.sh
Normal file
2
getFreePFVMgmtIp.sh
Normal file
@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
nmap -sPn 10.251.30.0/24 |grep scan
|
326
get_helm.sh
Normal file
326
get_helm.sh
Normal file
@ -0,0 +1,326 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright The Helm Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# The install script is based off of the MIT-licensed script from glide,
|
||||
# the package manager for Go: https://github.com/Masterminds/glide.sh/blob/master/get
|
||||
|
||||
: ${BINARY_NAME:="helm"}
|
||||
: ${USE_SUDO:="true"}
|
||||
: ${DEBUG:="false"}
|
||||
: ${VERIFY_CHECKSUM:="true"}
|
||||
: ${VERIFY_SIGNATURES:="false"}
|
||||
: ${HELM_INSTALL_DIR:="/usr/local/bin"}
|
||||
: ${GPG_PUBRING:="pubring.kbx"}
|
||||
|
||||
HAS_CURL="$(type "curl" &> /dev/null && echo true || echo false)"
|
||||
HAS_WGET="$(type "wget" &> /dev/null && echo true || echo false)"
|
||||
HAS_OPENSSL="$(type "openssl" &> /dev/null && echo true || echo false)"
|
||||
HAS_GPG="$(type "gpg" &> /dev/null && echo true || echo false)"
|
||||
|
||||
# initArch discovers the architecture for this system.
|
||||
initArch() {
|
||||
ARCH=$(uname -m)
|
||||
case $ARCH in
|
||||
armv5*) ARCH="armv5";;
|
||||
armv6*) ARCH="armv6";;
|
||||
armv7*) ARCH="arm";;
|
||||
aarch64) ARCH="arm64";;
|
||||
x86) ARCH="386";;
|
||||
x86_64) ARCH="amd64";;
|
||||
i686) ARCH="386";;
|
||||
i386) ARCH="386";;
|
||||
esac
|
||||
}
|
||||
|
||||
# initOS discovers the operating system for this system.
|
||||
initOS() {
|
||||
OS=$(echo `uname`|tr '[:upper:]' '[:lower:]')
|
||||
|
||||
case "$OS" in
|
||||
# Minimalist GNU for Windows
|
||||
mingw*) OS='windows';;
|
||||
esac
|
||||
}
|
||||
|
||||
# runs the given command as root (detects if we are root already)
|
||||
runAsRoot() {
|
||||
if [ $EUID -ne 0 -a "$USE_SUDO" = "true" ]; then
|
||||
sudo "${@}"
|
||||
else
|
||||
"${@}"
|
||||
fi
|
||||
}
|
||||
|
||||
# verifySupported checks that the os/arch combination is supported for
|
||||
# binary builds, as well whether or not necessary tools are present.
|
||||
verifySupported() {
|
||||
local supported="darwin-amd64\nlinux-386\nlinux-amd64\nlinux-arm\nlinux-arm64\nlinux-ppc64le\nlinux-s390x\nwindows-amd64"
|
||||
if ! echo "${supported}" | grep -q "${OS}-${ARCH}"; then
|
||||
echo "No prebuilt binary for ${OS}-${ARCH}."
|
||||
echo "To build from source, go to https://github.com/helm/helm"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "${HAS_CURL}" != "true" ] && [ "${HAS_WGET}" != "true" ]; then
|
||||
echo "Either curl or wget is required"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "${VERIFY_CHECKSUM}" == "true" ] && [ "${HAS_OPENSSL}" != "true" ]; then
|
||||
echo "In order to verify checksum, openssl must first be installed."
|
||||
echo "Please install openssl or set VERIFY_CHECKSUM=false in your environment."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "${VERIFY_SIGNATURES}" == "true" ]; then
|
||||
if [ "${HAS_GPG}" != "true" ]; then
|
||||
echo "In order to verify signatures, gpg must first be installed."
|
||||
echo "Please install gpg or set VERIFY_SIGNATURES=false in your environment."
|
||||
exit 1
|
||||
fi
|
||||
if [ "${OS}" != "linux" ]; then
|
||||
echo "Signature verification is currently only supported on Linux."
|
||||
echo "Please set VERIFY_SIGNATURES=false or verify the signatures manually."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# checkDesiredVersion checks if the desired version is available.
|
||||
checkDesiredVersion() {
|
||||
if [ "x$DESIRED_VERSION" == "x" ]; then
|
||||
# Get tag from release URL
|
||||
local latest_release_url="https://github.com/helm/helm/releases"
|
||||
if [ "${HAS_CURL}" == "true" ]; then
|
||||
TAG=$(curl -Ls $latest_release_url | grep 'href="/helm/helm/releases/tag/v3.[0-9]*.[0-9]*\"' | grep -v no-underline | head -n 1 | cut -d '"' -f 2 | awk '{n=split($NF,a,"/");print a[n]}' | awk 'a !~ $0{print}; {a=$0}')
|
||||
elif [ "${HAS_WGET}" == "true" ]; then
|
||||
TAG=$(wget $latest_release_url -O - 2>&1 | grep 'href="/helm/helm/releases/tag/v3.[0-9]*.[0-9]*\"' | grep -v no-underline | head -n 1 | cut -d '"' -f 2 | awk '{n=split($NF,a,"/");print a[n]}' | awk 'a !~ $0{print}; {a=$0}')
|
||||
fi
|
||||
else
|
||||
TAG=$DESIRED_VERSION
|
||||
fi
|
||||
}
|
||||
|
||||
# checkHelmInstalledVersion checks which version of helm is installed and
|
||||
# if it needs to be changed.
|
||||
checkHelmInstalledVersion() {
|
||||
if [[ -f "${HELM_INSTALL_DIR}/${BINARY_NAME}" ]]; then
|
||||
local version=$("${HELM_INSTALL_DIR}/${BINARY_NAME}" version --template="{{ .Version }}")
|
||||
if [[ "$version" == "$TAG" ]]; then
|
||||
echo "Helm ${version} is already ${DESIRED_VERSION:-latest}"
|
||||
return 0
|
||||
else
|
||||
echo "Helm ${TAG} is available. Changing from version ${version}."
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# downloadFile downloads the latest binary package and also the checksum
|
||||
# for that binary.
|
||||
downloadFile() {
|
||||
HELM_DIST="helm-$TAG-$OS-$ARCH.tar.gz"
|
||||
DOWNLOAD_URL="https://get.helm.sh/$HELM_DIST"
|
||||
CHECKSUM_URL="$DOWNLOAD_URL.sha256"
|
||||
HELM_TMP_ROOT="$(mktemp -dt helm-installer-XXXXXX)"
|
||||
HELM_TMP_FILE="$HELM_TMP_ROOT/$HELM_DIST"
|
||||
HELM_SUM_FILE="$HELM_TMP_ROOT/$HELM_DIST.sha256"
|
||||
echo "Downloading $DOWNLOAD_URL"
|
||||
if [ "${HAS_CURL}" == "true" ]; then
|
||||
curl -SsL "$CHECKSUM_URL" -o "$HELM_SUM_FILE"
|
||||
curl -SsL "$DOWNLOAD_URL" -o "$HELM_TMP_FILE"
|
||||
elif [ "${HAS_WGET}" == "true" ]; then
|
||||
wget -q -O "$HELM_SUM_FILE" "$CHECKSUM_URL"
|
||||
wget -q -O "$HELM_TMP_FILE" "$DOWNLOAD_URL"
|
||||
fi
|
||||
}
|
||||
|
||||
# verifyFile verifies the SHA256 checksum of the binary package
|
||||
# and the GPG signatures for both the package and checksum file
|
||||
# (depending on settings in environment).
|
||||
verifyFile() {
|
||||
if [ "${VERIFY_CHECKSUM}" == "true" ]; then
|
||||
verifyChecksum
|
||||
fi
|
||||
if [ "${VERIFY_SIGNATURES}" == "true" ]; then
|
||||
verifySignatures
|
||||
fi
|
||||
}
|
||||
|
||||
# installFile installs the Helm binary.
|
||||
installFile() {
|
||||
HELM_TMP="$HELM_TMP_ROOT/$BINARY_NAME"
|
||||
mkdir -p "$HELM_TMP"
|
||||
tar xf "$HELM_TMP_FILE" -C "$HELM_TMP"
|
||||
HELM_TMP_BIN="$HELM_TMP/$OS-$ARCH/helm"
|
||||
echo "Preparing to install $BINARY_NAME into ${HELM_INSTALL_DIR}"
|
||||
runAsRoot cp "$HELM_TMP_BIN" "$HELM_INSTALL_DIR/$BINARY_NAME"
|
||||
echo "$BINARY_NAME installed into $HELM_INSTALL_DIR/$BINARY_NAME"
|
||||
}
|
||||
|
||||
# verifyChecksum verifies the SHA256 checksum of the binary package.
|
||||
verifyChecksum() {
|
||||
printf "Verifying checksum... "
|
||||
local sum=$(openssl sha1 -sha256 ${HELM_TMP_FILE} | awk '{print $2}')
|
||||
local expected_sum=$(cat ${HELM_SUM_FILE})
|
||||
if [ "$sum" != "$expected_sum" ]; then
|
||||
echo "SHA sum of ${HELM_TMP_FILE} does not match. Aborting."
|
||||
exit 1
|
||||
fi
|
||||
echo "Done."
|
||||
}
|
||||
|
||||
# verifySignatures obtains the latest KEYS file from GitHub master branch
|
||||
# as well as the signature .asc files from the specific GitHub release,
|
||||
# then verifies that the release artifacts were signed by a maintainer's key.
|
||||
verifySignatures() {
|
||||
printf "Verifying signatures... "
|
||||
local keys_filename="KEYS"
|
||||
local github_keys_url="https://raw.githubusercontent.com/helm/helm/master/${keys_filename}"
|
||||
if [ "${HAS_CURL}" == "true" ]; then
|
||||
curl -SsL "${github_keys_url}" -o "${HELM_TMP_ROOT}/${keys_filename}"
|
||||
elif [ "${HAS_WGET}" == "true" ]; then
|
||||
wget -q -O "${HELM_TMP_ROOT}/${keys_filename}" "${github_keys_url}"
|
||||
fi
|
||||
local gpg_keyring="${HELM_TMP_ROOT}/keyring.gpg"
|
||||
local gpg_homedir="${HELM_TMP_ROOT}/gnupg"
|
||||
mkdir -p -m 0700 "${gpg_homedir}"
|
||||
local gpg_stderr_device="/dev/null"
|
||||
if [ "${DEBUG}" == "true" ]; then
|
||||
gpg_stderr_device="/dev/stderr"
|
||||
fi
|
||||
gpg --batch --quiet --homedir="${gpg_homedir}" --import "${HELM_TMP_ROOT}/${keys_filename}" 2> "${gpg_stderr_device}"
|
||||
gpg --batch --no-default-keyring --keyring "${gpg_homedir}/${GPG_PUBRING}" --export > "${gpg_keyring}"
|
||||
local github_release_url="https://github.com/helm/helm/releases/download/${TAG}"
|
||||
if [ "${HAS_CURL}" == "true" ]; then
|
||||
curl -SsL "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc" -o "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc"
|
||||
curl -SsL "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc" -o "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc"
|
||||
elif [ "${HAS_WGET}" == "true" ]; then
|
||||
wget -q -O "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc" "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc"
|
||||
wget -q -O "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc" "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc"
|
||||
fi
|
||||
local error_text="If you think this might be a potential security issue,"
|
||||
error_text="${error_text}\nplease see here: https://github.com/helm/community/blob/master/SECURITY.md"
|
||||
local num_goodlines_sha=$(gpg --verify --keyring="${gpg_keyring}" --status-fd=1 "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc" 2> "${gpg_stderr_device}" | grep -c -E '^\[GNUPG:\] (GOODSIG|VALIDSIG)')
|
||||
if [[ ${num_goodlines_sha} -lt 2 ]]; then
|
||||
echo "Unable to verify the signature of helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256!"
|
||||
echo -e "${error_text}"
|
||||
exit 1
|
||||
fi
|
||||
local num_goodlines_tar=$(gpg --verify --keyring="${gpg_keyring}" --status-fd=1 "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc" 2> "${gpg_stderr_device}" | grep -c -E '^\[GNUPG:\] (GOODSIG|VALIDSIG)')
|
||||
if [[ ${num_goodlines_tar} -lt 2 ]]; then
|
||||
echo "Unable to verify the signature of helm-${TAG}-${OS}-${ARCH}.tar.gz!"
|
||||
echo -e "${error_text}"
|
||||
exit 1
|
||||
fi
|
||||
echo "Done."
|
||||
}
|
||||
|
||||
# fail_trap is executed if an error occurs.
|
||||
fail_trap() {
|
||||
result=$?
|
||||
if [ "$result" != "0" ]; then
|
||||
if [[ -n "$INPUT_ARGUMENTS" ]]; then
|
||||
echo "Failed to install $BINARY_NAME with the arguments provided: $INPUT_ARGUMENTS"
|
||||
help
|
||||
else
|
||||
echo "Failed to install $BINARY_NAME"
|
||||
fi
|
||||
echo -e "\tFor support, go to https://github.com/helm/helm."
|
||||
fi
|
||||
cleanup
|
||||
exit $result
|
||||
}
|
||||
|
||||
# testVersion tests the installed client to make sure it is working.
|
||||
testVersion() {
|
||||
set +e
|
||||
HELM="$(command -v $BINARY_NAME)"
|
||||
if [ "$?" = "1" ]; then
|
||||
echo "$BINARY_NAME not found. Is $HELM_INSTALL_DIR on your "'$PATH?'
|
||||
exit 1
|
||||
fi
|
||||
set -e
|
||||
}
|
||||
|
||||
# help provides possible cli installation arguments
|
||||
help () {
|
||||
echo "Accepted cli arguments are:"
|
||||
echo -e "\t[--help|-h ] ->> prints this help"
|
||||
echo -e "\t[--version|-v <desired_version>] . When not defined it fetches the latest release from GitHub"
|
||||
echo -e "\te.g. --version v3.0.0 or -v canary"
|
||||
echo -e "\t[--no-sudo] ->> install without sudo"
|
||||
}
|
||||
|
||||
# cleanup temporary files to avoid https://github.com/helm/helm/issues/2977
|
||||
cleanup() {
|
||||
if [[ -d "${HELM_TMP_ROOT:-}" ]]; then
|
||||
rm -rf "$HELM_TMP_ROOT"
|
||||
fi
|
||||
}
|
||||
|
||||
# Execution
|
||||
|
||||
#Stop execution on any error
|
||||
trap "fail_trap" EXIT
|
||||
set -e
|
||||
|
||||
# Set debug if desired
|
||||
if [ "${DEBUG}" == "true" ]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
# Parsing input arguments (if any)
|
||||
export INPUT_ARGUMENTS="${@}"
|
||||
set -u
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
'--version'|-v)
|
||||
shift
|
||||
if [[ $# -ne 0 ]]; then
|
||||
export DESIRED_VERSION="${1}"
|
||||
else
|
||||
echo -e "Please provide the desired version. e.g. --version v3.0.0 or -v canary"
|
||||
exit 0
|
||||
fi
|
||||
;;
|
||||
'--no-sudo')
|
||||
USE_SUDO="false"
|
||||
;;
|
||||
'--help'|-h)
|
||||
help
|
||||
exit 0
|
||||
;;
|
||||
*) exit 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
set +u
|
||||
|
||||
initArch
|
||||
initOS
|
||||
verifySupported
|
||||
checkDesiredVersion
|
||||
if ! checkHelmInstalledVersion; then
|
||||
downloadFile
|
||||
verifyFile
|
||||
installFile
|
||||
fi
|
||||
testVersion
|
||||
cleanup
|
19
gitMirror.sh
Normal file
19
gitMirror.sh
Normal file
@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
#A script to setup git mirroring
|
||||
|
||||
#Works with aliases
|
||||
# lpom='git add -A :/ ; git commit -va'
|
||||
# gpom='git push all master'
|
||||
# tesla='lpom;gpom'
|
||||
|
||||
PRIMARY_TARGET="$1"
|
||||
SECONDARY_TARGET="$2"
|
||||
|
||||
|
||||
git remote add all $PRIMARY_TARGET
|
||||
git remote set-url --add --push all $PRIMARY_TARGET
|
||||
git remote set-url --add --push all $SECONDARY_TARGET
|
||||
|
19
k8s-context.sh
Normal file
19
k8s-context.sh
Normal file
@ -0,0 +1,19 @@
|
||||
# Set the default kube context if present
|
||||
|
||||
DEFAULT_KUBE_CONTEXTS="$HOME/.kube/config"
|
||||
if test -f "${DEFAULT_KUBE_CONTEXTS}"
|
||||
then
|
||||
export KUBECONFIG="$DEFAULT_KUBE_CONTEXTS"
|
||||
fi
|
||||
|
||||
# Additional contexts should be in ~/.kube/custom-contexts/
|
||||
CUSTOM_KUBE_CONTEXTS="$HOME/.kube/custom-contexts"
|
||||
mkdir -p "${CUSTOM_KUBE_CONTEXTS}"
|
||||
|
||||
OIFS="$IFS"
|
||||
IFS=$'\n'
|
||||
for contextFile in `find "${CUSTOM_KUBE_CONTEXTS}" -type f -name "*.yml"`
|
||||
do
|
||||
export KUBECONFIG="$contextFile:$KUBECONFIG"
|
||||
done
|
||||
IFS="$OIFS"
|
18
kbCheck.sh
Normal file
18
kbCheck.sh
Normal file
@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
IFS=$'\n\t'
|
||||
|
||||
kbNodeList=(
|
||||
"db1"
|
||||
"db2"
|
||||
"db3"
|
||||
)
|
||||
|
||||
for kbNode in ${kbNodeList[@]}; do
|
||||
COMMAND="$(ssh $kbNode uptime)"
|
||||
echo "Load on $kbNode" $COMMAND
|
||||
done
|
||||
|
||||
kubectl get nodes -o wide
|
||||
|
||||
kubectl get pods -A -o wide
|
13
librenms-bulkAdd.sh
Normal file
13
librenms-bulkAdd.sh
Normal file
@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
|
||||
hostsToAdd=(
|
||||
pfv-vmsrv-06.turnsys.net
|
||||
)
|
||||
|
||||
IFS=$'\n\t'
|
||||
|
||||
for nodeToAdd in ${hostsToAdd[@]}; do
|
||||
./delhost.php $nodeToAdd
|
||||
./addhost.php $nodeToAdd kn3lmgmt ; ./discovery.php -h $nodeToAdd kn3lmgmt ; ./poller.php -h $nodeToAdd kn3lmgmt &
|
||||
done
|
||||
|
13
lookup_table.sh
Normal file
13
lookup_table.sh
Normal file
@ -0,0 +1,13 @@
|
||||
function lookup_table_<thing>()
|
||||
{
|
||||
#Description: Lookup key value pairs in a text file
|
||||
#Arguments:
|
||||
#<thing>
|
||||
|
||||
#Returns/outputs:
|
||||
#<value>
|
||||
|
||||
export <key>=$(grep $<keyname> <path to file> | awk -F ',' '{print $2}')
|
||||
|
||||
}
|
||||
|
8
makePdf.sh
Normal file
8
makePdf.sh
Normal file
@ -0,0 +1,8 @@
|
||||
#!/bin/bash
|
||||
|
||||
pandoc \
|
||||
< $1 \
|
||||
--from=markdown \
|
||||
--number-sections \
|
||||
--toc \
|
||||
--output=$1.pdf
|
12
mkHomeDir.sh
Normal file
12
mkHomeDir.sh
Normal file
@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
itemsToLink=(
|
||||
|
||||
|
||||
)
|
||||
|
||||
for itemToLink in $itemsToLink;
|
||||
do
|
||||
echo "making link for $itemToLink..."
|
||||
ln -s
|
||||
done
|
13
mutt2task.sh
Normal file
13
mutt2task.sh
Normal file
@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
while IFS= read -r line; do
|
||||
if echo $line | grep -q "^From:" ; then
|
||||
# Only take the first line starting with "From: "
|
||||
if [ ! -n "$F" ] ; then
|
||||
F=`echo $line | grep 'From:' | awk -F: '{print $2}'`
|
||||
fi
|
||||
fi
|
||||
if echo $line | grep -q "^Subject:" ; then
|
||||
S=`echo $line | grep 'Subject:' | awk -F: '{print $2}'`
|
||||
fi
|
||||
done
|
||||
task add +email due:today "E-mail $S (from $F)"
|
93
new-server-bootstrap.sh
Normal file
93
new-server-bootstrap.sh
Normal file
@ -0,0 +1,93 @@
|
||||
#!/bin/bash
|
||||
|
||||
#######################################################################################################################################################
|
||||
#Boilerplate notes
|
||||
# This code serves as highly robust, well tested, boilerplate entrypoint control logic code which is able to handle execution across #multiple distributions
|
||||
# and versions (centos/ubuntu) (presumiong you have the distro script installed
|
||||
|
||||
#######################################################################################################################################################
|
||||
|
||||
|
||||
#######################################################################################################################################################
|
||||
#Step 1: determine our mgmt interface,ip address,environment subnet,domain name
|
||||
#######################################################################################################################################################
|
||||
|
||||
#99% of the time eth0 is mgmt int and has a default route. But not always. Hence the need for this code:
|
||||
export DEFAULT_ROUTE=$(netstat -rn |grep 0.0.0.0|awk '{print $NF}' |head -n1 )
|
||||
|
||||
#Vince - added because the MGMT_INT is referred to in the MGMT_IP line below
|
||||
export MGMT_INT=$(netstat -rn |grep 0.0.0.0|awk '{print $NF}' |head -n1 )
|
||||
|
||||
export MGMT_IP=$(ifconfig $MGMT_INT|grep 'inet addr'|awk -F ':' '{print $2}'|awk '{print $1}')
|
||||
export IP=$(echo $MGMT_IP|awk -F '.' '{print $2}')
|
||||
export DOMAIN_NAME=$(hostname -d)
|
||||
|
||||
#######################################################################################################################################################
|
||||
#Step 2: Fixup the /etc/hosts file , this is the root of much evil
|
||||
#######################################################################################################################################################
|
||||
#Static /etc/hosts bits
|
||||
|
||||
#Dynamic /etc/hosts bits
|
||||
#added -s to hostname to account for FQDN in ks file
|
||||
|
||||
export FULLHOST=$(hostname -f)
|
||||
export SHORTHOST=$(hostname -s)
|
||||
|
||||
cat > /etc/hosts <<HOSTFILEDYNAMIC
|
||||
127.0.1.1 $FULLHOST $SHORTHOST
|
||||
$MGMT_IP $FULLHOST $SHORTHOST
|
||||
HOSTFILEDYNAMIC
|
||||
|
||||
cat >> /etc/hosts << HOSTFILESTATIC
|
||||
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
|
||||
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
|
||||
HOSTFILESTATIC
|
||||
|
||||
#######################################################################################################################################################
|
||||
#Step 3: determine distro
|
||||
#######################################################################################################################################################
|
||||
DISTRO_TYPE="$(distro |awk '{print $1}'|tr '[:upper:]' '[:lower:]')"
|
||||
DISTRO_VERSION=$(distro |awk '{print $2}'|awk -F '.' '{print $1}')
|
||||
|
||||
|
||||
#######################################################################################################################################################
|
||||
#Step 4: Register system with librenms
|
||||
#######################################################################################################################################################
|
||||
CURL_STRING="{\"hostname\":\"$(hostname -f)\",\"version\":\"v2c\",\"community\":\"$SNMP_COMMUNITY\"}"
|
||||
|
||||
curl \
|
||||
--insecure \
|
||||
-X POST \
|
||||
-d $CURL_STRING \
|
||||
-H 'X-Auth-Token: $TOKEN' \
|
||||
$LIBRENMS_ENDPOPINT/api/v0/devices
|
||||
|
||||
#######################################################################################################################################################
|
||||
#Step 5: Call a rundeck job
|
||||
#######################################################################################################################################################
|
||||
curl \
|
||||
--insecure \
|
||||
-X POST \
|
||||
-H 'X-Rundeck-Auth-Token: $RUNDECK_TOKEN' \
|
||||
$RDECK_BASE_URL/job/$JOB_ID/run
|
||||
|
||||
#######################################################################################################################################################
|
||||
#Step 6: Do stuff based on distribution type and version
|
||||
#######################################################################################################################################################
|
||||
|
||||
|
||||
if [ $DISTRO_TYPE == "centos" ] && [ $DISTRO_VERSION == 6 ] ;
|
||||
then
|
||||
c6stuff
|
||||
fi
|
||||
|
||||
if [ $DISTRO_TYPE == "centos" ] && [ $DISTRO_VERSION == 7 ] ;
|
||||
then
|
||||
c7stuff
|
||||
fi
|
||||
|
||||
if [ $DISTRO_TYPE == "ubuntu" ] && [ $DISTRO_VERSION == 14 ] ;
|
||||
then
|
||||
ub14stuff
|
||||
fi
|
||||
|
69
newHomeSetup.sh
Normal file
69
newHomeSetup.sh
Normal file
@ -0,0 +1,69 @@
|
||||
#!/bin/bash
|
||||
|
||||
# A script to setup Charles home directory on a new system
|
||||
|
||||
|
||||
# Pre-requisite : in whatever user directory create a symbolic link called charles to wherever charles data drive is:
|
||||
# > ls -l ~/charles
|
||||
#lrwxrwxrwx 1 pi pi 55 May 29 07:49 /home/pi/charles -> /media/pi/7f738693-22c7-482f-a75f-2af788ffb8921/charles
|
||||
|
||||
cd ~
|
||||
|
||||
#this directory needs to be created
|
||||
mkdir smb
|
||||
|
||||
#Symlinks follow
|
||||
|
||||
#rcFiles
|
||||
|
||||
ln -s charles/dotfiles/rcfiles/bash_history .bash_history
|
||||
ln -s charles/dotfiles/rcfiles/bash_logout .bash_logout
|
||||
ln -s charles/dotfiles/rcfiles/bashrc .bashrc
|
||||
ln -s charles/dotfiles/rcfiles/davmail.log davmail.log
|
||||
ln -s charles/dotfiles/rcfiles/davmail.properties .davmail.properties
|
||||
ln -s charles/dotfiles/rcfiles/dmrc .dmrc
|
||||
ln -s charles/dotfiles/rcfiles/zshenv .zshenv
|
||||
ln -s charles/dotfiles/rcfiles/zsh_history .zsh_history
|
||||
ln -s charles/dotfiles-git/rcfiles/zshrc .zshrc
|
||||
ln -s charles/dotfiles-git/rcfiles/gitconfig .gitconfig
|
||||
ln -s charles/dotfiles/rcfiles/offlineimaprc .offlineimaprc
|
||||
ln -s charles/dotfiles-git/rcfiles/p10k.zsh .p10k.zsh
|
||||
ln -s charles/dotfiles/rcfiles/profile .profile
|
||||
ln -s charles/dotfiles/rcfiles/ucsmb .ucsmb
|
||||
ln -s charles/dotfiles/rcdirs/viminfo .viminfo
|
||||
ln -s charles/dotfiles/rcdirs/vimrc .vimrc
|
||||
|
||||
#rcDirectories
|
||||
ln -s charles/dotfiles/rcdirs/arduino arduino
|
||||
ln -s charles/dotfiles/rcdirs/bundle .bundle
|
||||
ln -s charles/dotfiles/rcdirs/cargo .cargo
|
||||
ln -s charles/dotfiles/rcdirs/cache .cache
|
||||
ln -s charles/dotfiles/rcdirs/config .config
|
||||
ln -s charles/dotfiles/rcdirs/gem .gem
|
||||
ln -s charles/dotfiles/rcdirs/gnupg .gnupg
|
||||
ln -s charles/dotfiles/rcdirs/iceworks .iceworks
|
||||
ln -s charles/dotfiles/rcdirs/java .java
|
||||
ln -s charles/dotfiles/rcdirs/kde .kde
|
||||
ln -s charles/dotfiles/rcdirs/kube/ .kube
|
||||
ln -s charles/dotfiles/rcdirs/local .local
|
||||
ln -s charles/dotfiles/rcdirs/mozilla .mozilla
|
||||
ln -s charles/dotfiles/rcdirs/msf4 .msf4
|
||||
ln -s charles/dotfiles/rcdirs/mume .mume
|
||||
ln -s charles/dotfiles/rcdirs/npm .npm
|
||||
ln -s charles/dotfiles/rcdirs/offlineimap .offlineimap
|
||||
ln -s charles/dotfiles/rcdirs/oh-my-zsh oh-my-zsh
|
||||
ln -s charles/dotfiles/rcdirs/pki .pki
|
||||
ln -s charles/dotfiles/rcdirs/pp_backup .pp_backup
|
||||
ln -s charles/dotfiles/rcdirs/rustup .rustup
|
||||
ln -s charles/dotfiles/rcdirs/sane .sane
|
||||
ln -s charles/dotfiles-git/rcdirs/ssh/ .ssh
|
||||
ln -s charles/dotfiles/rcdirs/thunderbird .thunderbird
|
||||
ln -s charles/dotfiles/rcdirs/vim .vim
|
||||
ln -s charles/dotfiles/rcdirs/vs-kubernetes .vs-kubernetes
|
||||
ln -s charles/dotfiles/rcdirs/vscode .vscode
|
||||
|
||||
|
||||
#nonrcDirectoreis
|
||||
ln -s charles/Downloads Downloads
|
||||
ln -s charles/go go
|
||||
ln -s charles/sketchbook sketchbook
|
54
newSrv.sh
Normal file
54
newSrv.sh
Normal file
@ -0,0 +1,54 @@
|
||||
#!/bin/bash
|
||||
|
||||
#curl -s http://dl.turnsys.net/newSrv.sh|/bin/bash
|
||||
|
||||
apt-get -y --purge remove nano
|
||||
apt-get -y install ntp ntpdate
|
||||
systemctl stop ntp
|
||||
ntpdate 10.251.37.5
|
||||
apt-get update
|
||||
apt-get -y full-upgrade
|
||||
apt-get -y install glances htop dstat snmpd screen lldpd lsb-release libpcre2-dev libevent-dev
|
||||
|
||||
|
||||
rm -rf /usr/local/librenms-agent
|
||||
|
||||
curl -s http://dl.turnsys.net/librenms-agent/distro > /usr/local/bin/distro
|
||||
chmod +x /usr/local/bin/distro
|
||||
|
||||
curl -s http://dl.turnsys.net/librenms.tar.gz > /usr/local/librenms.tar.gz
|
||||
cd /usr/local ; tar xfs librenms.tar.gz
|
||||
|
||||
systemctl stop snmpd ; curl -s http://dl.turnsys.net/snmpd.conf > /etc/snmp/snmpd.conf
|
||||
|
||||
sed -i "s|-Lsd|-LS6d|" /lib/systemd/system/snmpd.service
|
||||
systemctl daemon-reload
|
||||
systemctl restart snmpd
|
||||
|
||||
/etc/init.d/rsyslog stop
|
||||
|
||||
cat <<EOF> /etc/rsyslog.conf
|
||||
# /etc/rsyslog.conf configuration file for rsyslog
|
||||
#
|
||||
# For more information install rsyslog-doc and see
|
||||
# /usr/share/doc/rsyslog-doc/html/configuration/index.html
|
||||
|
||||
|
||||
#################
|
||||
#### MODULES ####
|
||||
#################
|
||||
|
||||
module(load="imuxsock") # provides support for local system logging
|
||||
module(load="imklog") # provides kernel logging support
|
||||
#module(load="immark") # provides --MARK-- message capability
|
||||
|
||||
*.* @10.251.30.1:514
|
||||
EOF
|
||||
|
||||
/etc/init.d/rsyslog start
|
||||
logger "hi hi from $(hostname)"
|
||||
|
||||
|
||||
bash <(curl -Ss https://my-netdata.io/kickstart.sh) --dont-wait
|
||||
|
||||
|
47
next_apointment.py
Normal file
47
next_apointment.py
Normal file
@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env python
|
||||
from datetime import datetime, timedelta
|
||||
from dateutil import parser
|
||||
import re
|
||||
from sys import exit
|
||||
import caldav
|
||||
|
||||
# time offset
|
||||
time_offset = 2
|
||||
# user
|
||||
cal_user = 'danielh'
|
||||
# password
|
||||
cal_passwd = 'foobar'
|
||||
# define you caldav URL here
|
||||
caldav_url = \
|
||||
"http://{0}:{1}@office.hauck.it/remote.php/caldav/calendars/danielh/personal"\
|
||||
.format(cal_user, cal_passwd)
|
||||
|
||||
# connect to you caldav instance
|
||||
def connect(url):
|
||||
client = caldav.DAVClient(url)
|
||||
principal = client.principal()
|
||||
return principal.calendars()[0]
|
||||
|
||||
# get you next appointment for today
|
||||
|
||||
def parse_eventdata(event):
|
||||
parsed_event = {}
|
||||
for item in event.data.split("\n"):
|
||||
if re.match("^DTSTART\;", item):
|
||||
dto = parser.parse(item.split(";")[1].split(":")[1])\
|
||||
+ timedelta(hours=time_offset)
|
||||
if re.match("^SUMMARY\:", item):
|
||||
title = item.split(":")[1]
|
||||
return {
|
||||
"title": title,
|
||||
"start": dto.strftime("%H:%M"),
|
||||
}
|
||||
try:
|
||||
calendar = connect(caldav_url)
|
||||
latest_event = calendar.date_search(datetime.utcnow(), datetime.now().date() + timedelta(days=1))[-1]
|
||||
except IndexError:
|
||||
print "Nothing to do"
|
||||
exit(0)
|
||||
|
||||
print '{start} {title}'.format(**parse_eventdata(latest_event))
|
||||
|
17
obtain_centos_mac.sh
Normal file
17
obtain_centos_mac.sh
Normal file
@ -0,0 +1,17 @@
|
||||
function obtain_centos_mac()
|
||||
{
|
||||
|
||||
DISTRO_TYPE="$(distro |awk '{print $1}'|tr '[:upper:]' '[:lower:]')"
|
||||
DISTRO_VERSION=$(distro |awk '{print $2}'|awk -F '.' '{print $1}')
|
||||
|
||||
if [ $DISTRO_TYPE == "centos" ] && [ $DISTRO_VERSION == 6 ] ;
|
||||
then
|
||||
/sbin/ifconfig eth0|grep HWadd| awk '{print $NF}'|tr '[:upper:]' '[:lower:]'|sed 's/\:/-'/g
|
||||
fi
|
||||
|
||||
if [ $DISTRO_TYPE == "centos" ] && [ $DISTRO_VERSION == 7 ] ;
|
||||
then
|
||||
/sbin/ifconfig eth0|grep ether| awk '{print $2}'|tr '[:upper:]' '[:lower:]'|sed 's/\:/-'/g
|
||||
fi
|
||||
|
||||
}
|
34
omsa.sh
Normal file
34
omsa.sh
Normal file
@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
|
||||
#curl -s http://dl.turnsys.net/omsa.sh|/bin/bash
|
||||
|
||||
gpg --keyserver hkp://pool.sks-keyservers.net:80 --recv-key 1285491434D8786F
|
||||
gpg -a --export 1285491434D8786F | apt-key add -
|
||||
echo "deb http://linux.dell.com/repo/community/openmanage/930/bionic bionic main" > /etc/apt/sources.list.d/linux.dell.com.sources.list
|
||||
wget http://archive.ubuntu.com/ubuntu/pool/universe/o/openwsman/libwsman-curl-client-transport1_2.6.5-0ubuntu3_amd64.deb
|
||||
wget http://archive.ubuntu.com/ubuntu/pool/universe/o/openwsman/libwsman-client4_2.6.5-0ubuntu3_amd64.deb
|
||||
wget http://archive.ubuntu.com/ubuntu/pool/universe/o/openwsman/libwsman1_2.6.5-0ubuntu3_amd64.deb
|
||||
wget http://archive.ubuntu.com/ubuntu/pool/universe/o/openwsman/libwsman-server1_2.6.5-0ubuntu3_amd64.deb
|
||||
wget http://archive.ubuntu.com/ubuntu/pool/universe/s/sblim-sfcc/libcimcclient0_2.2.8-0ubuntu2_amd64.deb
|
||||
wget http://archive.ubuntu.com/ubuntu/pool/universe/o/openwsman/openwsman_2.6.5-0ubuntu3_amd64.deb
|
||||
wget http://archive.ubuntu.com/ubuntu/pool/multiverse/c/cim-schema/cim-schema_2.48.0-0ubuntu1_all.deb
|
||||
wget http://archive.ubuntu.com/ubuntu/pool/universe/s/sblim-sfc-common/libsfcutil0_1.0.1-0ubuntu4_amd64.deb
|
||||
wget http://archive.ubuntu.com/ubuntu/pool/multiverse/s/sblim-sfcb/sfcb_1.4.9-0ubuntu5_amd64.deb
|
||||
wget http://archive.ubuntu.com/ubuntu/pool/universe/s/sblim-cmpi-devel/libcmpicppimpl0_2.0.3-0ubuntu2_amd64.deb
|
||||
dpkg -i libwsman-curl-client-transport1_2.6.5-0ubuntu3_amd64.deb
|
||||
dpkg -i libwsman-client4_2.6.5-0ubuntu3_amd64.deb
|
||||
dpkg -i libwsman1_2.6.5-0ubuntu3_amd64.deb
|
||||
dpkg -i libwsman-server1_2.6.5-0ubuntu3_amd64.deb
|
||||
dpkg -i libcimcclient0_2.2.8-0ubuntu2_amd64.deb
|
||||
dpkg -i openwsman_2.6.5-0ubuntu3_amd64.deb
|
||||
dpkg -i cim-schema_2.48.0-0ubuntu1_all.deb
|
||||
dpkg -i libsfcutil0_1.0.1-0ubuntu4_amd64.deb
|
||||
dpkg -i sfcb_1.4.9-0ubuntu5_amd64.deb
|
||||
dpkg -i libcmpicppimpl0_2.0.3-0ubuntu2_amd64.deb
|
||||
|
||||
apt update
|
||||
apt -y install srvadmin-all
|
||||
touch /opt/dell/srvadmin/lib64/openmanage/IGNORE_GENERATION
|
||||
|
||||
#logout,login, then run
|
||||
# srvadmin-services.sh enable && srvadmin-services.sh start
|
21
phpipam_api_key.sh
Normal file
21
phpipam_api_key.sh
Normal file
@ -0,0 +1,21 @@
|
||||
function api_key_phpiahm()
|
||||
{
|
||||
#Description: obtain api key from phpipam for future operations
|
||||
#Arguments: none
|
||||
#Outputs: api key
|
||||
|
||||
curl \
|
||||
--silent \
|
||||
-X POST \
|
||||
--user $UN:$PW \
|
||||
-H "Content-Type: application/xml" \
|
||||
$PHPIPAM_BASE_URL/user/ > /tmp/phpipam/$CURR_EX_VAR-token.xml
|
||||
|
||||
export API_TOKEN=$(while read_dom; do
|
||||
if [[ $ENTITY = "token" ]]; then
|
||||
echo $CONTENT
|
||||
fi
|
||||
done < /tmp/phpipam/$CURR_EX_VAR-token.xml)
|
||||
rm -f /tmp/phpipam/$CURR_EX_VAR-token.xml
|
||||
}
|
||||
|
52
phpipam_hostname_detail.sh
Normal file
52
phpipam_hostname_detail.sh
Normal file
@ -0,0 +1,52 @@
|
||||
function phpipam_hostname_detail()
|
||||
{
|
||||
#Description: lookup network details from a hostname
|
||||
#Arguments: hostname
|
||||
#output: IP address, netmask, gw
|
||||
|
||||
#Lookup TS hostname/IP in phpipam
|
||||
IP_XML=$(curl \
|
||||
--silent \
|
||||
-X GET \
|
||||
--user $UN:$PW \
|
||||
-H "Content-Type: application/xml" \
|
||||
-H "token:${API_TOKEN}" \
|
||||
$PHPIPAM_BASE_URL/api/$APP_ID/addresses/search_hostname/$1/ > /tmp/phpipam/$CURR_EX_VAR-ip.xml
|
||||
)
|
||||
|
||||
PC_IP=$(while read_dom; do
|
||||
if [[ $ENTITY = "ip" ]]; then
|
||||
echo $CONTENT
|
||||
fi
|
||||
done < /tmp/phpipam/$CURR_EX_VAR-ip.xml)
|
||||
|
||||
SUBNET_ID=$(while read_dom; do
|
||||
if [[ $ENTITY = "subnetId" ]]; then
|
||||
echo $CONTENT
|
||||
fi
|
||||
done < /tmp/phpipam/$CURR_EX_VAR-ip.xml)
|
||||
|
||||
|
||||
#Use subnet id to determine netmask and gateway
|
||||
curl \
|
||||
--silent \
|
||||
-X GET \
|
||||
--user $UN:$PW \
|
||||
-H "Content-Type: application/xml" \
|
||||
-H "token:${API_TOKEN}" \
|
||||
$PHPIPAM_BASE_URL/api/$APP_ID/subnets/$SUBNET_ID/ > /tmp/phpipam/$CURR_EX_VAR-subnet.xml
|
||||
|
||||
export PC_NETMASK=$(while read_dom; do
|
||||
if [[ $ENTITY = "Subnet_netmask" ]]; then
|
||||
echo $CONTENT
|
||||
fi
|
||||
done < /tmp/phpipam/$CURR_EX_VAR-subnet.xml)
|
||||
|
||||
export PC_GATEWAY=$(while read_dom; do
|
||||
if [[ $ENTITY = "ip_addr" ]]; then
|
||||
echo $CONTENT
|
||||
fi
|
||||
done < /tmp/phpipam/$CURR_EX_VAR-subnet.xml)
|
||||
|
||||
}
|
||||
|
11
prox.sh
Normal file
11
prox.sh
Normal file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
rm -f /etc/apt/sources.list.d/*
|
||||
echo "deb http://download.proxmox.com/debian/pve buster pve-no-subscription" > /etc/apt/sources.list.d/pve-install-repo.list
|
||||
wget http://download.proxmox.com/debian/proxmox-ve-release-6.x.gpg -O /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||
chmod +r /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg # optional, if you have a non-default umask
|
||||
apt update && apt -y full-upgrade
|
||||
apt-get -y install ifupdown2 ipmitool
|
||||
|
||||
curl -s http://dl.turnsys.net/newSrv.sh|/bin/bash
|
||||
|
13
prox7.sh
Normal file
13
prox7.sh
Normal file
@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
|
||||
rm -f /etc/apt/sources.list.d/*
|
||||
echo "deb http://download.proxmox.com/debian/pve bullseye pve-no-subscription" > /etc/apt/sources.list.d/pve-install-repo.list
|
||||
wget http://download.proxmox.com/debian/proxmox-ve-release-6.x.gpg -O /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||
chmod +r /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg # optional, if you have a non-default umask
|
||||
apt update && apt -y full-upgrade
|
||||
apt-get -y install ifupdown2 ipmitool ethtool net-tools lshw
|
||||
|
||||
#curl -s http://dl.turnsys.net/newSrv.sh|/bin/bash
|
||||
|
||||
|
||||
|
17
read_xml_dom.sh
Normal file
17
read_xml_dom.sh
Normal file
@ -0,0 +1,17 @@
|
||||
function read_xml_dom ()
|
||||
|
||||
{
|
||||
|
||||
#Description: Helper function for reading xml from stdin in bash
|
||||
|
||||
#Arguments: none
|
||||
|
||||
#Returns: nothing
|
||||
|
||||
|
||||
|
||||
local IFS=\>
|
||||
|
||||
read -d \< ENTITY CONTENT
|
||||
|
||||
}
|
33
rpc_ssh.sh
Normal file
33
rpc_ssh.sh
Normal file
@ -0,0 +1,33 @@
|
||||
function rpc_ssh()
|
||||
{
|
||||
if ! args=("$(getopt -l "rmthost:,rmthostport:,rmtlogin:,pushvars:,pushfuncs:,rmtmain:" -o "h:p:u:v:f:m:A" -- "$@")")
|
||||
then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sshvars=( -q -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ~jigmaker/jigmaker/keys/TS_root.key )
|
||||
eval set -- "${args[@]}"
|
||||
while [ -n "$1" ]
|
||||
do
|
||||
case $1 in
|
||||
-h|--rmthost) rmthost=$2; shift; shift;;
|
||||
-p|--rmtport) sshvars=( "${sshvars[@]}" -p $2 ); shift; shift;;
|
||||
-u|--rmtlogin) rmtlogin=$2; shift; shift;;
|
||||
-v|--pushvars) pushvars=$2; shift; shift;;
|
||||
-f|--pushfuncs) pushfuncs=$2; shift; shift;;
|
||||
-m|--rmtmain) rmtmain=$2; shift; shift;;
|
||||
-A) sshvars=( "${sshvars[@]}" -A ); shift;;
|
||||
-i) sshvars=( "${sshvars[@]}" -i $2 ); shift; shift;;
|
||||
--) shift; break;;
|
||||
esac
|
||||
done
|
||||
rmtargs=( "$@" )
|
||||
|
||||
ssh ${sshvars[@]} ${rmtlogin}@${rmthost} "
|
||||
$(declare -p rmtargs 2>/dev/null)
|
||||
$([ -n "$pushvars" ] && declare -p $pushvars 2>/dev/null)
|
||||
$(declare -f $pushfuncs 2>/dev/null)
|
||||
$rmtmain \"\${rmtargs[@]}\"
|
||||
#$rmtmain {rmtargs[@]}
|
||||
"
|
||||
}
|
13
searchLdap.sh
Normal file
13
searchLdap.sh
Normal file
@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
#A script to search LDAP
|
||||
|
||||
#LDAP_SERVER=$(set |grep LOGONSERVER|awk -F '=' '{print $2}'|sed -e 's/\'//g')
|
||||
#echo $LDAP_SERVER
|
||||
|
||||
LDAP_SERVER="ldap.hp.com"
|
||||
LDAPSEARCH_OPTIONS="-x -LLL"
|
||||
|
||||
|
||||
#ldapsearch $LDAPSEARCH_OPTIONS -b "o=hp.com" -s sub -H ldaps://$LDAP_SERVER "(uid=wyble@hp.com)" cn mail displayName samaccountna
|
||||
#ldapsearch $LDAPSEARCH_OPTIONS -h $LDAP_SERVER -b "o=hp.com" uid=chris.radosh@hp.com directReports
|
||||
ldapsearch -LLL -x -W -H ldaps://g3w0044.americas.hpqcorp.net:3269 -b "dc=cpqcorp,dc=net" -D wyblehp.com mail=wyble@hp.com uid
|
21
setForPxe.sh
Normal file
21
setForPxe.sh
Normal file
@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "setting bmc for pxe boot..."
|
||||
|
||||
export ISHP="$(dmidecode -t System|grep Manufacturer|grep HP -c)"
|
||||
export ISDELL="$(dmidecode -t System|grep Manufacturer|grep Dell -c)"
|
||||
|
||||
#Set BMC to PXE
|
||||
|
||||
if [ $ISHP -eq 1 ]; then
|
||||
hpbootcfg -P
|
||||
fi
|
||||
|
||||
if [ $ISDELL -eq 1 ]; then
|
||||
ipmitool chassis bootparam set bootflag force_pxe
|
||||
fi
|
||||
|
||||
#Reboot the system
|
||||
echo "re-booting..."
|
||||
/sbin/reboot
|
||||
|
25
test-subosys-access.sh
Normal file
25
test-subosys-access.sh
Normal file
@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
|
||||
IFS=$'\n\t'
|
||||
|
||||
suboNodeList=(
|
||||
#"subopi1" #MorsePod m6
|
||||
#"subopi2" #seeduino lora gateway hat
|
||||
#"subopi3" # no hat
|
||||
#"subopi4" #dragino hat
|
||||
#"subopi5" #pi sense hat
|
||||
#"subopi6" #pi sense hat
|
||||
#"subobench" #benchtop computer
|
||||
"subodev" #develop workloads
|
||||
"suboqa" #test workloads
|
||||
"suboprod" #run actual workloads here
|
||||
"buildbox" #build all the things
|
||||
)
|
||||
|
||||
for suboNode in ${suboNodeList[@]}; do
|
||||
COMMAND="$(ssh $suboNode md5sum .ssh/authorized_keys)"
|
||||
echo "Testing ssh authorized_keys on $suboNode" $COMMAND
|
||||
#COMMAND="$(ssh $suboNode uptime)"
|
||||
#echo "Testing load on $suboNode" $COMMAND
|
||||
done
|
||||
|
7
up2date.sh
Normal file
7
up2date.sh
Normal file
@ -0,0 +1,7 @@
|
||||
apt-get -y --purge autoremove
|
||||
apt-get update
|
||||
apt-get -y upgrade
|
||||
apt-get -y dist-upgrade
|
||||
apt-get -y full-upgrade
|
||||
apt-get -y --purge autoremove
|
||||
apt-get clean
|
73
zshrc-include-cnw.sh
Normal file
73
zshrc-include-cnw.sh
Normal file
@ -0,0 +1,73 @@
|
||||
#Customized zshrc
|
||||
# CNW
|
||||
# Last updated 03/01/2021
|
||||
# merging my own bits and some stuff from oh-my-zsh, powerline etc
|
||||
|
||||
# Set personal aliases, overriding those provided by oh-my-zsh libs,
|
||||
# plugins, and themes. Aliases can be placed here, though oh-my-zsh
|
||||
# users are encouraged to define aliases within the ZSH_CUSTOM folder.
|
||||
# For a full list of active aliases, run `alias`.
|
||||
#
|
||||
|
||||
source ~/Nextcloud/bin/shell-frags/alias.sh
|
||||
|
||||
###############
|
||||
#My path is where I walk, not where you walk
|
||||
###############
|
||||
|
||||
PATHDIRS=(
|
||||
/usr/local/go/bin
|
||||
~/Nextcloud/bin
|
||||
~/Nextcloud/bin/apps/habitctl/target/release
|
||||
~/dotfiles-git/bin
|
||||
~/dotfiles-git/3rdparty/git-scripts
|
||||
)
|
||||
|
||||
for dir in $PATHDIRS; do
|
||||
if [ -d $dir ]; then
|
||||
path+=$dir
|
||||
fi
|
||||
done
|
||||
###############
|
||||
# Creature comforts
|
||||
###############
|
||||
|
||||
#vi 24x7 yo, this isn't a holiday inn (last night, it is now)
|
||||
#
|
||||
bindkey -v
|
||||
set -o vi
|
||||
|
||||
if [[ -n $SSH_CONNECTION ]]; then
|
||||
export EDITOR='vim'
|
||||
else
|
||||
export EDITOR='vim'
|
||||
fi
|
||||
|
||||
|
||||
HISTSIZE=5000 #How many lines of history to keep in memory
|
||||
HIST_STAMPS="mm/dd/yyyy"
|
||||
HISTFILE=~/.zsh_history #Where to save history to disk
|
||||
SAVEHIST=5000000 #Number of history entries to save to disk
|
||||
HISTDUP=erase #Erase duplicates in the history file
|
||||
setopt appendhistory #Append history to the history file (no overwriting)
|
||||
setopt incappendhistory #Immediately append to the history file, not just when a term is killed
|
||||
|
||||
ENABLE_CORRECTION="true"
|
||||
COMPLETION_WAITING_DOTS="true"
|
||||
DISABLE_UNTRACKED_FILES_DIRTY="true"
|
||||
export LANG=en_US.UTF-8
|
||||
|
||||
#####################################################
|
||||
#Personal host variables
|
||||
#####################################################
|
||||
#Eventually we'll move to ldap/no shared accounts/forced sudo. Blech. Corporate tyrany!
|
||||
#For now, the easy way. HAH!
|
||||
CON_USER="charlesnw"
|
||||
INBAND_USER="charlesnw"
|
||||
PERSONAL_OOB_USER="root"
|
||||
PERSONAL_INBAND_USER="root"
|
||||
#####################################################
|
||||
|
||||
#DO NOT Share history across terminals
|
||||
unsetopt sharehistory
|
||||
setopt no_share_history
|
Loading…
Reference in New Issue
Block a user