$ echo {hack,build} |
hack build
http://cn.linux.vbird.org/linux_basic/0320bash_2.php#variable_other 我们将这部份作个总结说明一下:
|
$ image="library/nginx:1.19" # 比如要获取镜像的 tag 常用的是 echo 然后 awk/cut 的方式 $ echo ${image} | awk -F ':' '{print $2}' 方式 # 可以直接使用 bash 内置的变量替换功能,截取特定字符串 $ image_name=${image%%:*} $ image_tag=${image##*:} $ image_repo=${image%%/*} |
变量配置方式 | str 没有配置 | str 为空字符串 | str 已配置非为空字符串 |
---|---|---|---|
var=${str-expr} | var=expr | var= | var=$str |
var=${str:-expr} | var=expr | var=expr | var=$str |
var=${str+expr} | var= | var=expr | var=expr |
var=${str:+expr} | var= | var= | var=expr |
var=${str=expr} | str=expr var=expr | str 不变 var= | str 不变 var=$str |
var=${str:=expr} | str=expr var=expr | str=expr var=expr | str 不变 var=$str |
var=${str?expr} | expr 输出至 stderr | var= | var=$str |
var=${str:?expr} | expr 输出至 stderr | expr 输出至 stderr | var=$str |
# 通过 ** 匹配 if [[ "${var}" == *"${sub_string}"* ]]; then printf '%s\n' "sub_string is in var." fi # 通过 bash 内置的 =~ 判断 if [[ "${sub_string}" =~ "${var}" ]]; then printf '%s\n' "sub_string is in var." fi |
#!/bin/bash if [ -z ${1} ];then echo -e "\033[1;31m$(date '+%Y-%m-%d %H:%M:%S.%N' | cut -b 1-23) 进程名关键字不可为空:\n语法:sh $0 java \033[0m" else echo -e "\033[1;34m$(date '+%Y-%m-%d %H:%M:%S.%N' | cut -b 1-23) 正在根据关键字分析进程:${1}\033[0m" top -bn ${2:-3} $(ps -ef|grep -Ev "grep|$0"| grep -w "${1}"|awk '{print " -p " $2}') fi |
#!/bin/bash # 生成随机密码(字串截取版本) # 示例:bash passgen.sh -l 18 # 设置变量 key,存储密码的所有可能性(密码库),如果还需要其他字符请自行添加其他密码字符 key="0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" # 使用$#统计密码库的长度 num=${#key} # 设置初始密码为空 pass='' function passgen() { # 循环 8 次,生成随机密码 # 每次都是随机数对密码库的长度取余,确保提取的密码字符不超过密码库的长度 # 每次循环提取一位随机密码,并将该随机密码追加到 pass 变量的最后 for ((i = 1; i <= ${1:-8}; i++)); do index=$((RANDOM % num)) pass=$pass${key:$index:1} done echo -e "\033[1;34m生成\033[1;33m${1:-8}位\033[0m\033[1;32m随机密码:\033[0m\033[0m\033[1;31m${pass}\033[0m" } function main() { while getopts "l:u:" arg; do #选项后面的冒号表示该选项需要参数 case $arg in l) Long=${OPTARG} #根据密码长度参数生成相应密码 passgen ${Long} ;; u) #判断当前用户类型 if [ "${UID}" -ne '0' ]; then echo -e "\033[1;31m仅root用户能修改密码,请以root用户运行\033[0m" exit fi echo -e "\033[1;34m正在为用户${OPTARG}生成配置随机密码\033[0m" #定义用户名 username=${OPTARG} #根据密码长度参数生成相应密码 #passgen ${Long} echo $pass | passwd --stdin ${username} >/dev/null if [ ${?} -eq '0' ]; then echo -e "\033[1;32m用户名:\033[0m\033[1;33m${username}\033[0m,\033[1;34m密码:\033[0m\033[1;31m${pass}\033[0m" #ssh -q -o 'StrictHostKeyChecking no' -o 'UserKnownHostsFile /dev/null' -l ${username} "$(hostname)" date fi ;; ?) #当有不认识的选项的时候arg为? echo -e "\033[1;31m无法识别\033[0m" exit ;; esac done } #自动显示菜单 if [ -z "$1" ]; then passgen echo -e "\033[1;32m随机生成\033[1;34m${1:-8}位\033[1;31m复杂密码:\033[1;33m${pass}\033[0m" fi main "${@}" |
#!/bin/bash #******************************************************** # _oo0oo_ # o8888888o # 88" . "88 # (| -_- |) # 0\ = /0 # ___/`---'\___ # .' \\| |// '. # / \\||| : |||// \ # / _||||| -:- |||||- \ # | | \\\ - /// | | # | \_| ''\---/'' |_/ | # \ .-\__ '-' ___/-. / # ___'. .' /--.--\ `. .'___ # ."" '< `.___\_<|>_/___.' >' "". # | | : `- \`.;`\ _ /`;.`/ - ` : | | # \ \ `_. \_ __\ /__ _/ .-` / / # =====`-.____`.___ \_____/___.-`___.-'===== # `=---=' # # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # 佛祖保佑 永不宕机 永无BUG # #******************************************************** #******************************************************** #author(作者): #version(版本): 2022-11-30 #date(时间): 2022年11月30日 星期三 10时42分58秒 #FileName(文件名): adu.sh #description(描述): #******************************************************** #----------------------------------------------------------------------------------- #使用说明 #计算/home目录中GB级别对象,深度10 #sh adu.sh /home 10 #------------------------------- Environment Variable ------------------------------ #路径 dir=${1:-/} #目录深度 depth=${2:-5} #---------------------------------- Shell Main Body -------------------------------- #V3.0 #排除非查找目录 exclude_dir=$(df | grep -wv ${dir} | awk 'NR>1{print "--exclude="$6""}') #du命令根据深度与目录计算 echo -e "\033[1;34m$(date '+%Y-%m-%d %H:%M:%S.%N' | cut -b 1-23) 正在分析计算目录${dir}大小,深度为${depth}\033[0m" echo -e "\033[1;31m时间可能较长,请耐心等待…… \033[0;33m" du -h --max-depth=${depth} ${exclude_dir} ${dir} 2>/dev/null | awk '$1~/G$/{print $0}' echo -e "\033[1;32m$(date '+%Y-%m-%d %H:%M:%S.%N' | cut -b 1-23) 分析计算结束 \033[0m" #V2.0 #排除非查找目录 #exclude_dir=$(df | grep -wv ${dir} | awk 'NR>1{print $6}') #替换分隔符为| #exclude=$(echo ${exclude_dir} | sed 's/ /|/g') #du命令根据深度与目录计算 #du -h --max-depth=${depth} ${dir} 2>/dev/null | awk '$1~/G$/{print $0}' | egrep -v \"${exclude}\" #V1.0 # #路径 # dir=${1:-/} # #目录深度 # depth=${2:-5} # du -h --max-depth=${depth} ${dir} 2>/dev/null | awk '$1~/G$/{print $0}' | egrep -v \"$(echo $(df | grep -wv ${dir} | awk 'NR>1{print $6}') | sed 's/ /|/g')\" |
功能:每5分钟去分析一下某个网站的html,并统计出5分钟之内超过60次的ip,将其生成一个txt文件
#!/bin/bash LOG_FILE="/path/to/access.log" OUTPUT_FILE="/path/to/output.txt" THRESHOLD=60 INTERVAL=5 # 5 minutes while true do # Initialize counters for each page declare -A page_counters # Get the last 5 minutes of log entries start_time=$(date -u --date="-5 minutes" "+[%d/%b/%Y:%H:%M:%S %z]") end_time=$(date -u "+[%d/%b/%Y:%H:%M:%S %z]") log_data=$(sed -n "/$start_time/,/$end_time/p" $LOG_FILE) # Count visits to each page while read -r line do page=$(echo $line | awk '{print $7}') if [[ $page =~ \.html$ ]] then ((page_counters[$page]++)) fi done <<< "$log_data" # Initialize counters for each IP declare -A ip_counters # Count visits from each IP to pages with over 60 visits for page in "${!page_counters[@]}" do if [[ ${page_counters[$page]} -gt $THRESHOLD ]] then page_data=$(echo "$log_data" | grep "$page") while read -r line do ip=$(echo $line | awk '{print $1}') ((ip_counters[$ip]++)) done <<< "$page_data" fi done # Write IP counts to output file echo "IP counts for $(date -u)" >> $OUTPUT_FILE for ip in "${!ip_counters[@]}" do count=${ip_counters[$ip]} echo "$ip: $count" >> $OUTPUT_FILE done # Wait for next interval sleep $(($INTERVAL * 60)) done |
$ curl -fsSL https://get.docker.com -o get-docker.sh $ bash get-docker.sh --mirror Aliyun |
另外可通过传入 DRY_RUN 的参数来输出际会执行的内容,这个输出的内容可以用来配置 docker-ce 的源,而不安装 docker。
$ DRY_RUN=1 sh ./get-docker.sh --mirror Aliyun > install.sh # Executing docker install script, commit: 7cae5f8b0decc17d6571f9f52eb840fbc13b2737 apt-get update -qq >/dev/null DEBIAN_FRONTEND=noninteractive apt-get install -y -qq apt-transport-https ca-certificates curl >/dev/null curl -fsSL "https://mirrors.aliyun.com/docker-ce/linux/debian/gpg" | apt-key add -qq - >/dev/null echo "deb [arch=amd64] https://mirrors.aliyun.com/docker-ce/linux/debian buster stable" > /etc/apt/sources.list.d/docker.list apt-get update -qq >/dev/null apt-get install -y -qq --no-install-recommends docker-ce >/dev/null DEBIAN_FRONTEND=noninteractive apt-get install -y -qq docker-ce-rootless-extras >/dev/null |
$ curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash |
$ curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose $ chmod +x /usr/local/bin/docker-compose |
$ sed -i "/kube-node/a ${ip}" test |
在不使用 yq 或者 jq 的情况下,需要输出 downloads 列表中的所有内容,即 download: 和 download_defaults: 之间的内容
dashboard_image_repo: "{{ docker_image_repo }}/kubernetesui/dashboard-{{ image_arch }}" dashboard_image_tag: "v2.2.0" dashboard_metrics_scraper_repo: "{{ docker_image_repo }}/kubernetesui/metrics-scraper" dashboard_metrics_scraper_tag: "v1.0.6" downloads: dashboard: enabled: "{{ dashboard_enabled }}" container: true repo: "{{ dashboard_image_repo }}" tag: "{{ dashboard_image_tag }}" sha256: "{{ dashboard_digest_checksum|default(None) }}" groups: - kube_control_plane dashboard_metrics_scrapper: enabled: "{{ dashboard_enabled }}" container: true repo: "{{ dashboard_metrics_scraper_repo }}" tag: "{{ dashboard_metrics_scraper_tag }}" sha256: "{{ dashboard_digest_checksum|default(None) }}" groups: - kube_control_plane download_defaults: container: false file: false repo: None tag: None enabled: false dest: None version: None url: None |
可使用 sed 的方式进行匹配输出 sed -n '/$VAR1/,/$VAR2/p'
$ sed -n '/^downloads:/,/download_defaults:/p' |
接着上一个问题,通过 sed -n "s/repo: //p;s/tag: //p" 匹配出镜像的 repo 和 tag,但一个完整的镜像的格式是 repo:tag,因此需要将 repo 和 tag 行进行合并。
repo: "{{ dashboard_image_repo }}" tag: "{{ dashboard_image_tag }}" repo: "{{ dashboard_metrics_scraper_repo }}" tag: "{{ dashboard_metrics_scraper_tag }}" |
可使用 sed 'N;s#\n# #g' 进行奇偶行合并
sed -n '/^downloads:/,/download_defaults:/p' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \ | sed -n "s/repo: //p;s/tag: //p" | tr -d ' ' | sed 's/{{/${/g;s/}}/}/g' \ | sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/^/echo /g' >> ${TEMP_DIR}/generate.sh |
$ sed -i ':a;N;$!ba;s/\n/ /g' |
$ lsof -i | grep sshd | wc -l |
# grep 通过 -c 参数即可统计匹配行,不需要使用 wc 来统计
$ lsof -i | grep -c sshd |
$ egrep --only-matching -E '([[:digit:]]{1,3}\.){3}[[:digit:]]{1,3}' |
$ cat access.log | awk '{d[$1]++} END {for (i in d) print d[i],i}' | sort -nr | head |
# d 是一个字典,以$1 第一列作为key,value每次累加
# END 指处理完所有行,再执行后面的逻辑
# for(i in d)遍历d并打印key和value
$ ss -nat | awk 'NR>1 {d[$1]++} END {for (i in d) print d[i],i}' |
# NR>1 去除第一行
# {d[$1]++} 对第一列元素进行累加计数
# {for (i in d) print d[i],i} 打印出数组的元素和出现的次数
和 FROM scratch 搭配起来使用,就可以将构建产物 build 到本地
$ DOCKER_BUILDKIT=1 docker build -o type=local,dest=$PWD -f Dockerfile . |
比如使用 Dockerfile 构建 skopeo 静态链接文件
FROM nixos/nix:2.3.11 as builder WORKDIR /build COPY . . RUN nix build -f nix FROM scratch COPY --from=builder /build/result/bin/skopeo /skopeo |
DOCKER_BUILDKIT=1 docker build -o type=local,dest=$PWD . |
$ echo 'FROM scratch\nCOPY . .' | docker build -t files:tag -f - . |
构建好之后就把它 push 到一个 registry 中,另一个人同样通过 docker build 将该文件下载到本地
echo 'FROM files:tag' | DOCKER_BUILDKIT=1 docker build -o type=local,dest=$PWD -f - /dev/null |
$ kubectl get pods -A -o=custom-columns='IMAGE:spec.containers[*].image' | tr ',' '\n' | sort -u |
$ kubectl get events --all-namespaces -o wide --sort-by=.metadata.creationTimestamp |
$ kubectl get pod -n kube-system | awk '{print $1}' | xargs -L1 -I {} bash -c "kubectl -n kube-system logs {} > {}.log" |
$ kubectl get pod -n kube-system | grep -E "kube-apiserver|kube-controller|kube-proxy|kube-scheduler|coredns" | awk '{print $1}' | xargs -L1 -I {} sh -c "kubectl -n kube-system logs {} > {}.log" |
$ kubectl get nodes -o jsonpath='{ $.items[*].status.addresses[?(@.type=="InternalIP")].address }' |
$ kubectl get pods -o jsonpath='{ $.items[*].status.podIP }' |
$ kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}' |
$ kubectl get svc --no-headers --all-namespaces -o jsonpath='{$.items[*].spec.clusterIP}' |
# cpu kubectl top pods --all-namespaces | sort --reverse --key 3 --numeric # memory kubectl top pods --all-namespaces | sort --reverse --key 4 --numeric |
# docker registry for offline resources - src: docker.io/library/registry dest: library/registry # helm chartmuseum for offline resources - src: ghcr.io/helm/chartmuseum dest: library/chartmuseum |
$ yq eval '.[]|select(.dest=="library/chartmuseum") | .src' images_origin.yaml |
version: '3.1' services: nginx: container_name: nginx image: nginx:1.20-alpine volumes: - ./resources/nginx:/usr/share/nginx ports: - 443:443 - 5000:5000 - 8080:8080 |
nginx_http_port="${NGINX_HTTP_PORT}:8080" yq eval --inplace '.services.nginx.ports[0] = strenv(nginx_http_port)' ${COMPOSE_YAML_FILE} registry_https_port="${REGISTRY_HTTPS_PORT}:443" yq eval --inplace '.services.nginx.ports[1] = strenv(registry_https_port)' ${COMPOSE_YAML_FILE} registry_push_port="${REGISTRY_PUSH_PORT}:5000" yq eval --inplace '.services.nginx.ports[2] = strenv(registry_push_port)' ${COMPOSE_YAML_FILE} |
for pmd_name in $(kubectl ${KUBECONFIG_ARG} get cpm --no-headers | cut -d ' ' -f1); do CPMD_NAME="${pmd_name}" JSON="${RCTL_TMP_PATH}/${pmd_name}.json" kubectl ${KUBECONFIG_ARG} get cpm ${pmd_name} -o jsonpath='{.spec}' > ${JSON} ((moudles_num=$(jq '.modules|length' ${JSON})-1)) for i in $(seq 0 ${moudles_num}); do ((addons_num=$(jq ".modules[${i}].addons|length" ${JSON})-1)) for j in $(seq 0 ${addons_num}); do addon_name=$(jq -r ".modules[${i}].addons[${j}].name" ${JSON}) if [ "${CHART_NAME}" = "${addon_name}" ]; then PATCH_DATA=$(jq -c ".modules[${i}].addons[${j}].version = \"${VERSION}\"" ${JSON}) break 3 fi done done done |
if printf "%s\\n%s\\n" v1.21 ${kube_version%.*} | sort --check=quiet --version-sort; then echo -n ${coredns_version};else echo -n ${coredns_version/v/} fi |
$ openssl x509 -noout -text -in ca.cert |
$ stat -c '%s' file |
$ ip r get 1 | awk 'NR==1 {print $NF}' $ ip r get 1 | sed "s/uid.*//g" | awk 'NR==1 {print $NF}' |
一些 shell 脚本中常用的函数
避免 tar 解压文件的时候污染终端,建议使用进度条的方式展示解压过程
untar() { file_size=$(stat -c '%s' $1) block_size=$(expr $file_size / 51200); block_size=$(expr $block_size + 1) tar_info="Untar $1 progress:" tar --blocking-factor=$block_size --checkpoint=1 --checkpoint-action=ttyout="${tar_info} %u% \r" -xpf $1 -C $2 } |
# regular match ip match_ip() { local INPUT_IPS=$* local IPS="" if ! echo ${INPUT_IPS} | egrep --only-matching -E '([[:digit:]]{1,3}\.){3}[[:digit:]]{1,3}-[[:digit:]]{1,3}' > /dev/null; then IPS="$(echo ${INPUT_IPS} | egrep --only-matching -E '([[:digit:]]{1,3}\.){3}[[:digit:]]{1,3}' | tr '\n' ' ')" else ip_prefix="$(echo ${INPUT_IPS} | egrep --only-matching -E '([[:digit:]]{1,3}\.){3}[[:digit:]]{1,3}-[[:digit:]]{1,3}' | cut -d '.' -f1-3)" ip_suffix="$(echo ${INPUT_IPS} | egrep --only-matching -E '([[:digit:]]{1,3}\.){3}[[:digit:]]{1,3}-[[:digit:]]{1,3}' | cut -d '.' -f4 | tr '-' ' ')" for suffix in $(seq ${ip_suffix}); do IPS="${IPS} ${ip_prefix}.${suffix}"; done fi echo ${IPS} | egrep --only-matching -E '([[:digit:]]{1,3}\.){3}[[:digit:]]{1,3}' | tr '\n' ' ' } |
Host * StrictHostKeyChecking no UserKnownHostsFile=/dev/null ForwardAgent yes ServerAliveInterval 10 ServerAliveCountMax 10000 TCPKeepAlive no ControlMaster auto ControlPath ~/.ssh/session/%h-%p-%r ControlPersist 12h Host nas Hostname 172.20.0.10 Port 22 User root IdentityFile ~/.ssh/local-node.pem Host 172.20.0.* Port 22 User root IdentityFile ~/.ssh/local-node.pem Host *github.com Hostname github.com User git IdentityFile ~/.ssh/github_muzi.pem |
日常工作中常常需要 ssh 登录到机房的一些虚拟机上,又因为不同的机器密码不同,遂使用该脚本 ssh 登录到节点上。
#!/bin/bash IP=${1} CMD=${2} USER=root ARGS="-o StrictHostKeyChecking=no -o ControlMaster=auto -o ControlPersist=12h -o ConnectionAttempts=100" PASSWORDS="admin123456 test123456 centos1234" ssh-keygen -R ${1} > /dev/null 2>&1 PASS="" for pass in ${PASSWORDS}; do if sshpass -p ${pass} ssh ${ARGS} ${USER}@${IP} "hostname"; then PASS=${pass}; break ; fi done sshpass -p ${PASS} ssh ${ARGS} ${USER}@${IP} ${CMD} exit 0 |
reset_global_timer() { export SEC0=$(date --utc +%s) } reset_function_timer(){ export SEC1=$(date --utc +%s) } running_time() { SEC2=$(date --utc +%s); DIFFSEC=$((${SEC2} - ${SEC1})); printf "\nSection Time: $(date +%H:%M:%S -ud @${DIFFSEC})\n" SEC2=$(date --utc +%s); DIFFSEC=$((${SEC2} - ${SEC0})); printf "Elapsed Time: $(date +%H:%M:%S -ud @${DIFFSEC})\n\n" } reset_global_timer reset_function_timer running_time |
$ dpkg-query -W -f='${binary:Package}=${Version}\n' |
ARCHITECTURE=$(uname -m) host_architecture=$(dpkg --print-architecture) |
使用华为云 yum 源
$ wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.huaweicloud.com/repository/conf/CentOS-7-anon.repo # 安装华为云 EPEL 源 yum install epel-release -y sed -i "s/#baseurl/baseurl/g" /etc/yum.repos.d/epel.repo sed -i "s/metalink/#metalink/g" /etc/yum.repos.d/epel.repo sed -i "s@https\?://download.fedoraproject.org/pub@https://mirrors.huaweicloud.com@g" /etc/yum.repos.d/epel.repo |
$ sed -i 's/deb.debian.org/mirrors.huaweicloud.com/g' /etc/apt/sources.list $ sed -i 's|security.debian.org/debian-security|mirrors.huaweicloud.com/debian-security|g' /etc/apt/sources.list |
$ sed -i 's/archive.ubuntu.com/mirrors.huaweicloud.com/g' /etc/apt/sources.list Alpine $ echo "http://mirrors.huaweicloud.com/alpine/latest-stable/main/" > /etc/apk/repositories $ echo "http://mirrors.huaweicloud.com/alpine/latest-stable/community/" >> /etc/apk/repositories |
$ update-ca-trust force-enable $ cp domain.crt /etc/pki/ca-trust/source/anchors/domain.crt $ update-ca-trust |
$ cp domain.crt /usr/share/ca-certificates/domain.crt $ echo domain.crt >> /etc/ca-certificates.conf $ update-ca-certificates |
$ cp domain.crt /usr/local/share/ca-certificates/domain.crt $ update-ca-certificates |
#!/bin/sh git filter-branch --env-filter ' OLD_EMAIL="github-actions@github.com" CORRECT_NAME="github-actions" CORRECT_EMAIL="41898282+github-actions[bot]@users.noreply.github.com" if [ "$GIT_COMMITTER_EMAIL" = "$OLD_EMAIL" ] then export GIT_COMMITTER_NAME="$CORRECT_NAME" export GIT_COMMITTER_EMAIL="$CORRECT_EMAIL" fi if [ "$GIT_AUTHOR_EMAIL" = "$OLD_EMAIL" ] then export GIT_AUTHOR_NAME="$CORRECT_NAME" export GIT_AUTHOR_EMAIL="$CORRECT_EMAIL" fi ' --tag-name-filter cat -- --branches --tags |
$ git describe --tags --always |
$ git push --delete origin tag_name # 删除所有 $ git tag -l | xargs -L1 -I {} git push --delete origin {} |
#!/usr/bin/env bash : ${SP_NAME:="init"} : ${NODES:="kube-control-01 kube-control-02 kube-control-03 kube-node-01"} for node in ${NODES}; do if govc snapshot.revert -vm ${node} ${SP_NAME}; then echo "${node} snapshot revert successfully" fi govc vm.info ${node} | grep -q poweredOn || govc vm.power -on ${node} done |