测试 split 计时 shell 脚本参考

#!/bin/bash
# set -euxo
#如果是一个新的环境,则需要设置变量TEST_ENV="new"
TEST_ENV=
#如果在dble本机导入split后的mysqldump文件,则需要设置变量LOADIN="locally",
#如果需要将split后的mysqldump文件cp到后端mysql所在主机去执行导入,则需要设置变量LOADIN="remote"
LOADIN="locally"
test_dir='/tmp/splittest/'
dble_conf_dir='/opt/dble/conf/'
ex_dump_file_name='benchmarksql_with_data'
dump_file_name='benchmarksql_with_data'
shardingNum=10
ex_shardingNum=10
MYSQL='/usr/bin/mysql -uxxxx -pxxxx -P3306'
MYSQL_HOST[0]='xx.xx.xx.101'
MYSQL_HOST[1]='xx.xx.xx.102'
MYSQL_HOST[2]='xx.xx.xx.105'
MYSQL_HOST[3]='xx.xx.xx.108'
DBLE_ROOT='/usr/bin/mysql -uroot -p111111 -P9066 -h127.0.0.1'

function create_backend_mysqls_from_dble_cmd () {
    #make file drop_and_create_shardingnodes_10dn for further test
    drop_ex_shardingnodes="${test_dir}drop_ex_shardingnodes_dnx.sql"
    create_shardingnodes="${test_dir}create_shardingnodes_dnx.sql"
    rm -f ${drop_ex_shardingnodes}
    cat>"${drop_ex_shardingnodes}"<<EOFDROP
drop database @@shardingnode="an\$1-${ex_shardingNum}";
EOFDROP
    rm -f ${create_shardingnodes}
    cat>"${create_shardingnodes}"<<EOFCREATE
reload @@config_all;
create database @@shardingnode="an\$1-${shardingNum}";
EOFCREATE

    ${DBLE_ROOT} < ${drop_ex_shardingnodes}
    if [[ $? != 0 ]]; then
        echo "fail to drop backend schema via dble admin cmd line"
        exit 1
    fi
    #change sharding.xml conf of new_sharding.xml
    mv ${dble_conf_dir}sharding.xml ${dble_conf_dir}sharding_${ex_shardingNum}.xml
    mv ${dble_conf_dir}sharding_${shardingNum}.xml ${dble_conf_dir}sharding.xml
    ${DBLE_ROOT} < ${create_shardingnodes}
    if [[ $? != 0 ]]; then
        echo "fail to create backend schema via dble admin cmd line"
        exit 1
    fi

    # check backend mysqldbs has been created
    ${MYSQL} -h${MYSQL_HOST[1]} -e "show databases;" | grep "dh_dn_" 
    if [[ $? != 0 ]]; then
        echo "fail to create backend schema via dble admin cmd line"
    fi  
}

function generate_rm_old_split_dump_files () {
    filename="${test_dir}rm_old_split_dump_files.sh"
    rm -f ${filename}
cat>"${filename}"<<EOF
#!/usr/bin/expect -f
set timeout -1
set host [lindex \$argv 0]
set file_name [lindex \$argv 1]
spawn ssh test@\$host "rm -f \$file_name.sql-an*;ls -l /tmp/splittest/"
expect "*continue connecting*"
send "yes\r"

expect "*password:"
send "mypasswordfortestuser\r"
expect eof
EOF
}

function rm_old_dump_files_on_each_shardingnodes () {
    rm -f ${test_dir}${ex_dump_file_name}.sql-an*
    # 新环境中首次跑测试的时候需要生成这样一个文件 ${test_dir}rm_old_split_dump_files.sh
    if [[ "${TEST_ENV}" == "new" ]]; then
        generate_rm_old_split_dump_files
    fi
    
    for host in "${MYSQL_HOST[@]}";do
        echo "${host}"   
        expect ${test_dir}rm_old_split_dump_files.sh "${host}" "${test_dir}${ex_dump_file_name}"
    done

}

# split_cmd.sql 文件中有 dump_file_name,此处还可以参数化 -l等参数的数值
function get_split_exec_cost_time () {
    filename="${test_dir}split_cmd.sql"
    rm -f ${filename}
    cat>"${filename}"<<EOF
split /opt/splitTest/${dump_file_name}.sql /tmp/splittest -sbenchmarksql;
EOF
    {
        echo "$(date +["%Y-%m-%d %H:%M:%S"]) <==========================> start exec split cmd on dble manager 9066"
        echo "$(date +["%Y-%m-%d %H:%M:%S"]) <=======> startTime"
        # echo "${DBLE_ROOT} < ${filename}"
        echo "split /opt/splitTest/${dump_file_name}.sql /tmp/splittest -sbenchmarksql; "
    }>> time_record.log

    timer_start=$(date +%s)
    ${DBLE_ROOT} < ${filename}
    if [[ $? != 0 ]]; then
        echo "$(date +["%Y-%m-%d %H:%M:%S"]) !!!!!!!!!!! fail to exec !!!!!!!!!!" >> time_record.log
    fi
    timer_end=$(date +%s)
    duration=$(echo "$timer_end" "$timer_start" | awk '{print $1-$2}')s
    {
        echo "dble管理端执行split的耗时: ${duration} "
        echo "total time is ${duration} "
        echo "$(date +["%Y-%m-%d %H:%M:%S"]) <=======> endTime"
        echo "$(date +["%Y-%m-%d %H:%M:%S"]) <==========================> finish exec split cmd on dble manager 9066"
        echo " "
    }>> time_record.log
        
}

# dble本机远程连接后端mysql,分别并发导入
function loadAll_shardingDumpFiles_on_dble_server () {
    {
        echo " "
        echo "$(date +["%Y-%m-%d %H:%M:%S"]) <==========================> loadAll_shardingDumpFiles_on_dble_server  startTime"
        echo "$(date +["%Y-%m-%d %H:%M:%S"]) <=======> startTime"
    }>> time_record.log

    timer_s=$(date +%s)
    
    for ((i=1;i<="${shardingNum}";i++))
    do
    {
        if [ ! -f "${test_dir}${dump_file_name}.sql-an${i}.sql" ]; then
            mv ${test_dir}${dump_file_name}.sql-an"${i}"-*dump ${test_dir}${dump_file_name}.sql-an"${i}".sql
        fi   

        node=$(( i % 4 ))

        case ${node} in
            1) host=${MYSQL_HOST[0]};;
            2) host=${MYSQL_HOST[1]};;
            3) host=${MYSQL_HOST[2]};;
            0) host=${MYSQL_HOST[3]};;
        esac

        echo "${MYSQL} -h${host} dh_dn_${i} < ${test_dir}${dump_file_name}.sql-an${i}.sql" >>  time_record.log
        ${MYSQL} -h${host} dh_dn_"${i}" < ${test_dir}${dump_file_name}.sql-an"${i}".sql
        
    }&    
    done
    wait
    timer_e=$(date +%s)
    tduration=$(echo "$timer_e" "$timer_s" | awk '{print $1-$2}')s
    {
        echo "split后的dumpfile文件从dble本机并发导入,总时长(即最长耗时)为:   ${tduration}  "
        echo "$(date +["%Y-%m-%d %H:%M:%S"]) <=======> endTime"
        echo "$(date +["%Y-%m-%d %H:%M:%S"]) <==========================> loadAll_shardingDumpFiles_on_dble_server  finishTime"
        echo " "
    }>> time_record.log
}


# local导入耗时不需要此处
function generate_send_split_dump_files () {
    filename="${test_dir}send_split_dump_files.sh"
    rm -f ${filename}
cat>"${filename}"<<EOF
#!/usr/bin/expect -f
set timeout -1
set file_num [lindex \$argv 0]
set host [lindex \$argv 1]
set file_name [lindex \$argv 2]
spawn scp \$file_name.sql-an\$file_num.sql test@\$host:/tmp/splittest/.
expect "*continue connecting*"
send "yes\r"

expect "*password:"
send "mypasswordfortestuser\r"
expect eof
EOF
}

# local导入耗时不需要此处
function change_split_dumpfile_name_and_cp_them_to_their_local () {
    # 新环境中首次跑测试的时候需要生成这样一个文件 ${test_dir}t_dump_files.sh
    if [[ ("${TEST_ENV}" == "new") && ("${LOADIN}" == "remote") ]]; then
        generate_send_split_dump_files
    fi
    
    {
        echo " "
        echo "$(date +["%Y-%m-%d %H:%M:%S"]) <==========================> start change split filename and cp them to their local mysql server hosts"
        echo "$(date +["%Y-%m-%d %H:%M:%S"]) <=======> startTime"
    }>> time_record.log
    timer_st=$(date +%s)
    for ((i=1;i<="${shardingNum}";i++))
    do
    {
        if [ ! -f "${test_dir}${dump_file_name}.sql-an${i}.sql" ]; then
            mv ${test_dir}${dump_file_name}.sql-an"${i}"-*dump ${test_dir}${dump_file_name}.sql-an"${i}".sql
        fi   

        node=$(( i % 4 ))

        case ${node} in
            1) host=${MYSQL_HOST[0]};;
            2) host=${MYSQL_HOST[1]};;
            3) host=${MYSQL_HOST[2]};;
            0) host=${MYSQL_HOST[3]};;
        esac

        # scp ${test_dir}${dump_file_name}.sql-an"${i}".sql test@${host}:/tmp/splittest/.
        expect "${test_dir}"send_split_dump_files.sh "${i}" "${host}" "${test_dir}${dump_file_name}"
    }    
    done
    timer_ed=$(date +%s)
    cpduration=$(echo "$timer_ed" "$timer_st" | awk '{print $1-$2}')s
    {
        echo "split后的dumpfile文件cp到对应后端mysql本机,总时长(即最长耗时)为:   ${cpduration}  "
        echo "$(date +["%Y-%m-%d %H:%M:%S"]) <=======> endTime"
        echo "$(date +["%Y-%m-%d %H:%M:%S"]) <==========================> finish change split filename and cp them to their local mysql server hosts"
        echo " "
    }>> time_record.log
}

function exec_romote_test_time_on_each_hosts () {
    {
        echo " "
        echo "$(date +["%Y-%m-%d %H:%M:%S"]) <==========================> exec_remote_loadin_on_each_host  startTime"
        echo "$(date +["%Y-%m-%d %H:%M:%S"]) <=======> startTime"
    }>> time_record.log

    timer_s=$(date +%s)
    for host in "${MYSQL_HOST[@]}"
    do
    {
        echo "${host}" 
        expect "${test_dir}"exec_shell_on_remote_server.sh "${host}" 
    }&
    done
    wait 
    timer_e=$(date +%s)
    tduration=$(echo "$timer_e" "$timer_s" | awk '{print $1-$2}')s
    {
        echo "split后的dumpfile文件从后端mysql本机并发导入,总时长(即最长耗时)为:   ${tduration}  "
        echo "$(date +["%Y-%m-%d %H:%M:%S"]) <=======> endTime"
        echo "$(date +["%Y-%m-%d %H:%M:%S"]) <==========================> exec_remote_loadin_on_each_host  finishTime"
        echo " "
    }>> time_record.log

}

function clean_backend_schemas_and_old_dump_files () {
    rm_old_dump_files_on_each_shardingnodes
    create_backend_mysqls_from_dble_cmd
}

function test_of_split_dumpfile_on_mysql_respectively () {
    echo "NOW we are testing scp split_dumpfiles to mysql and loadin respectively,and shardingnode num is ${shardingNum}--------------------------------------------------------------">> time_record.log
    clean_backend_schemas_and_old_dump_files
    get_split_exec_cost_time
    change_split_dumpfile_name_and_cp_them_to_their_local
    exec_romote_test_time_on_each_hosts
    echo "NOW we have finished the testing of scp split_dumpfiles to mysql and loadin respectively,and shardingnode num is ${shardingNum}---------------------------------------------">> time_record.log
}

function test_of_split_dumpfile_on_dbleServer () {
    echo "NOW we are testing loadin on dble server locally,and shardingnode num is ${shardingNum}--------------------------------------------------------------">> time_record.log
    clean_backend_schemas_and_old_dump_files
    get_split_exec_cost_time
    loadAll_shardingDumpFiles_on_dble_server
    echo "NOW we have finished the testing of loadin on dble server locally,and shardingnode num is ${shardingNum}---------------------------------------------">> time_record.log
}

if [ ${LOADIN} == "locally" ]; then
     test_of_split_dumpfile_on_dbleServer
elif [ ${LOADIN} == "remote" ]; then
     test_of_split_dumpfile_on_mysql_respectively
else
     echo "please check the file line 7: The value of variable LOADIN should be one of 'locally' or 'remote' ;The variable maybe on line 7"
    exit 1   
fi

爱可生开源社区
423 声望206 粉丝

成立于 2017 年,以开源高质量的运维工具、日常分享技术干货内容、持续的全国性的社区活动为社区己任;目前开源的产品有:SQL审核工具 SQLE,分布式中间件 DBLE、数据传输组件DTLE。


引用和评论

0 条评论