portbuilder

Check-in [e14e6e7adf]
Login

Many hyperlinks are disabled.
Use anonymous login to enable hyperlinks.

Overview
Comment:Import first portbuild2 code
Timelines: family | ancestors | trunk
Files: files | file ages | folders
SHA1:e14e6e7adfbd51590c6d0e4d4d0fa4e8aee54064
User & Date: bapt 2012-07-07 14:12:37
Context
2012-07-07
14:12
Import first portbuild2 code Leaf check-in: e14e6e7adf user: bapt tags: trunk
2012-06-08
11:51
initial empty check-in check-in: cb113d9671 user: bapt tags: trunk
Changes
Hide Diffs Side-by-Side Diffs Ignore Whitespace Patch

Added cli.sh.

            1  +#!/bin/sh
            2  +echo "$@" | nc localhost 4444

Added nscripts/minitor.sh.

            1  +#!/bin/sh
            2  +
            3  +version=1
            4  +
            5  +myself=$0
            6  +mtime=$(stat -f "%m" $myself)
            7  +nbcpus=$(/sbin/sysctl -n hw.ncpu)
            8  +osreldate=$(/sbin/sysctl -n kern.osreldate)
            9  +arch=$(/sbin/sysctl -n hw.machine)
           10  +hostname=$(hostname)
           11  +jobdir="/usr2/portbuild/pids"
           12  +builddir="/usr3/pkgbuild/builds"
           13  +
           14  +[ -d ${jobdir} ] || mkdir -p ${jobdir}
           15  +[ -d ${builddir} ] || mkdir -p ${builddir}
           16  +logger -t monitor "Starting"
           17  +while :; do
           18  +        new=$(stat -f "%m" $myself)
           19  +        [ "$new" != "$mtime" ] && exec $myself
           20  +        newver=$(echo "code version" | nc localhost 4444)
           21  +        [ "$newver" != "$version" ] && echo "code get ${hostname}" | nc localhost 4444
           22  +#       jot ${nbcpus} | while read id; do
           23  +#               daemon -f -p ${jobdir}/${id}.pid \
           24  +#               /usr2/build/scripts/worker.sh ${builddir}
           25  +#       done
           26  +        sleep 30
           27  +done
           28  +

Added nscripts/worker.sh.

            1  +#!/bin/sh
            2  +
            3  +myself=$0
            4  +mtime=$(stat -f "%m" $myself)
            5  +myarch=$(/sbin/sysctl -n hw.machine)
            6  +hostname=$(hostname)
            7  +builddir=$1
            8  +# 30 min
            9  +timeout=1800
           10  +
           11  +run_job() {
           12  +        logger -t worker "running job $1"
           13  +        logger -t worker "job $1 done"
           14  +}
           15  +
           16  +setup_build() {
           17  +        logger -t worker "setting up $1"
           18  +        mkdir -p ${builddir}/$1
           19  +        echo "build setup $hostname $1" | nc localhost 4444
           20  +        local timer=0
           21  +        while ! test -f /usr/pkgbuild/builds/$1/done; do
           22  +                sleep 5
           23  +                timer=$((timer + 5))
           24  +                if [ $timer -gt $timerout ]; then
           25  +                        logger -t worker "aborting $1 setup"
           26  +                        echo "build abort $hostname $1" | nc localhost 4444
           27  +                        rm -rf ${builddir}/$1/
           28  +                        return
           29  +                fi
           30  +        done
           31  +        logger -t worker "setup $1 successful"
           32  +}
           33  +
           34  +logger -t worker "Starting"
           35  +while :; do
           36  +        local activity=0
           37  +        new=$(stat -f "%m" $myself)
           38  +        [ "$new" != "$mtime" ] && exec $myself
           39  +        # cleanup old builds no longer available upstream
           40  +        for b in ${builddir}/*; do
           41  +                [ -f ${b}/keep ] && continue
           42  +                touch ${b}/keep
           43  +                res=$(echo "build exists ${b%%*/}" | nc localhost 4444)
           44  +                if [ "$res" = "no" ]; then
           45  +                        logger -t worker "Destroying old build"
           46  +                        activity=1
           47  +                        rm -rf $b
           48  +                fi
           49  +                [ -f ${b}/keep ] && rm -f ${b}/keep
           50  +        done
           51  +        [ ${activity} -eq 1 ] && continue
           52  +        # check if there are new builds to setup
           53  +        echo "build list ${myarch}" | while read a; do
           54  +                if [ ! -d ${builddir}/builds/$a ]; then
           55  +                        activity=1
           56  +                        setup_build $a
           57  +                        break;
           58  +                fi
           59  +        done
           60  +        [ ${activity} -eq 1 ] && continue
           61  +        echo "job list ${hostname} ${myarch}" | while read j; do
           62  +                res=$(echo "job take ${hostname} $j" | nc localhost 4444)
           63  +                if [ "$res" = "OK" ]; then
           64  +                        activity=1
           65  +                        run_job $j
           66  +                        break;
           67  +                fi
           68  +        done
           69  +        [ ${activity} -eq 1 ] && continue
           70  +        sleep 10
           71  +done
           72  +

Added portbuild.conf.

            1  +mountbase=/a/build/
            2  +pool=a
            3  +codebase=/home/bapt/pb/com/scripts/
            4  +nodebase=/home/bapt/pb/com/nodescripts/
            5  +portsfs=a/snap/ports-head
            6  +srcfsbase=a/snap/

Added qmanager/phmgr.sh.

            1  +#!/bin/sh
            2  +
            3  +# no command should get longer than 3 hours
            4  +timeout=10800
            5  +
            6  +/usr/sbin/daemon -u ports-amd64 -f -c /usr/local/bin/socat -ly -lp "com" \
            7  +	-t ${timeout} -d -d \
            8  +	-L ~ports-amd64/com.lock \
            9  +	-d TCP-LISTEN:4444,fork,reuseaddr,retry \
           10  +	EXEC:/home/bapt/pb/com/scripts/dispatch.sh,stderr

Added scripts/common.shlib.

            1  +#!/bin/sh
            2  +
            3  +zfsns="org.freebsd.portbuild"
            4  +tunneldir=${mountbase}/tunnels
            5  +
            6  +[ -d ${tunneldir} ] || mkdir -p ${tunneldir}
            7  +
            8  +if [ ! -f ${mountbase}/sql ]; then
            9  +        echo ".timeout 100
           10  +CREATE TABLE IF NOT EXISTS nodes (name TEXT NOT NULL UNIQUE, osversion INTEGER, arch TEXT, online INTEGER DEFAULT 0,  ncpu INTEGER);
           11  +CREATE TABLE IF NOT EXISTS builds (id TEXT NOT NULL UNIQUE, state TEXT NOT NULL DEFAULT 'creating', arch TEXT NOT NULL, starttime INTEGER DEFAULT 0);
           12  +" > ${mountbase}/sql
           13  +fi
           14  +
           15  +# Error functions
           16  +
           17  +err() {
           18  +        echo "$@"
           19  +        exit 0
           20  +}
           21  +
           22  +enosuch() { err "No such $1 $2"; }
           23  +eexists() { err "$1 $2 already exists"; }
           24  +
           25  +eargs() {
           26  +        case $# in
           27  +        0) err "No arguments expected" ;;
           28  +        1) err "1 argument expected: $1" ;;
           29  +        *) err "$# arguments expected: $@" ;;
           30  +        esac
           31  +}
           32  +
           33  +# helpers
           34  +
           35  +sql() {
           36  +        [ $# -eq 0 ] && eargs sql_code
           37  +        sqlite3 -init ${mountbase}/sql ${mountbase}/db.sqlite "$@"
           38  +}
           39  +
           40  +sshopts="-q -o StrictHostKeyChecking=no -o HashKnownHosts=no -o PreferredAuthentications=publickey -o ServerAliveInterval=60 -o ServerAliveCountMax=3"
           41  +
           42  +# All the build environment code
           43  +env_get_fs() {
           44  +        zfs list -t filesystem -H -o ${zfsns}:type,${zfsns}:name,name | \
           45  +                awk '/^env[[:space:]]'$1'/ { print $3 }'
           46  +}
           47  +
           48  +fs_exists() {
           49  +        [ $# -ne 1 ] && eargs fs
           50  +        zfs list -Ht filesystem -o name $1 >/dev/null 2>&1 && return 0
           51  +        return 1
           52  +}
           53  +
           54  +env_exists() {
           55  +        local name=$1
           56  +        zfs list -t filesystem -H \
           57  +                -o ${zfsns}:type,${zfsns}:name | \
           58  +                egrep -q "^env[[:space:]]$name$" && return 0
           59  +        return 1
           60  +} 
           61  +
           62  +env_list() {
           63  +        [ $# -eq 0 ] || eargs
           64  +        printf "%-20s %-s\n" "NAME" "MAIL"
           65  +        zfs list -t filesystem -H \
           66  +                -o ${zfsns}:type,${zfsns}:name,${zfsns}:mail | \
           67  +                awk '/^env/ { printf("%-20s %s\n", $2, $3) }'
           68  +}
           69  +
           70  +env_create() {
           71  +        [ $# -lt 2 ] && eargs name mail
           72  +        local name="$1"
           73  +        local mail="$2"
           74  +        shift 2
           75  +        env_exists ${name} && eexists Env $name
           76  +        sudo zfs create -p \
           77  +                -o ${zfsns}:type="env" \
           78  +                -o ${zfsns}:name="$name" \
           79  +                -o ${zfsns}:mail="$mail" \
           80  +                -o mountpoint=${mountbase}/${name} a/build/${name} || err "Not able to create FS"
           81  +        echo "build created"
           82  +}
           83  +        
           84  +env_destroy() {
           85  +        [ $# -ne 1 ] && eargs name
           86  +        local name=$1
           87  +        env_exists ${name} || enosuch env ${name}
           88  +        local base=$(env_get_fs ${name})
           89  +        [ -z ${base} ] && err "Could not get zfs base"
           90  +        sudo zfs destroy -R ${base} || err "Enable to destroy build"
           91  +        echo "build destroyed sucessfully"
           92  +}
           93  + 
           94  +# All the run environment code
           95  +
           96  +run_get_fs() {
           97  +        zfs list -t filesystem -H -o ${zfsns}:type,${zfsns}:name,name | \
           98  +                awk '/^run[[:space:]]'$1'/ { print $3 }'
           99  +}
          100  +
          101  +run_exists() {
          102  +        local name=$1
          103  +        zfs list -t filesystem -H \
          104  +                -o ${zfsns}:type,${zfsns}:id | \
          105  +                egrep -q "^run[[:space:]]$name$" && return 0
          106  +        return 1
          107  +} 
          108  +
          109  +run_list() {
          110  +        local envname=$1
          111  +        if [ -n "${envname}" ]; then
          112  +                printf "%-20s %-15s %7s %s\n" "RUNNAME" "ID" "STATUS" "PATH"
          113  +                zfs list -t filesystem -H \
          114  +                        -o ${zfsns}:type,${zfsns}:env,${zfsns}:name,${zfsns}:id,${zfsns}:status,mountpoint  | \
          115  +                        awk '/^run[[:space:]]'$envname'/ { printf("%-20s %-15s %-7s %s\n", $3, $4, $5, $6) }'
          116  +        else
          117  +                printf "%-20s %-20s %-15s %-7s %s\n" "ENVNAME" "RUNNAME" "ID" "STATUS" "PATH"
          118  +                zfs list -t filesystem -H \
          119  +                        -o ${zfsns}:type,${zfsns}:env,${zfsns}:name,${zfsns}:id,${zfsns}:status,mountpoint | \
          120  +                        awk '/^run/ { printf("%-20s %-20s %-15s %-7s %s\n", $2, $3, $4, $5, $6) }'
          121  +        fi
          122  +}
          123  +
          124  +run_create() {
          125  +        [ $# -ne 3 ] && eargs env_name src_version arch
          126  +        local envname=$1
          127  +        local version=$2
          128  +        local arch=$3
          129  +        local id=$(date +%Y%m%d%H%M%S)
          130  +        env_exists ${envname} || enosuch env ${envname}
          131  +        local srcfs=$(echo ${srcfsbase}/src-${version}/src | sed -e "s,//,/,g")
          132  +        fs_exists ${srcfs} || enosuch version ${version}
          133  +        sudo zfs create -p \
          134  +                -o ${zfsns}:type="run" \
          135  +                -o ${zfsns}:name="${version}-${arch}" \
          136  +                -o ${zfsns}:id="${id}" \
          137  +                -o ${zfsns}:env="${envname}" \
          138  +                -o ${zfsns}:status="new" \
          139  +                -o ${zfsns}:arch="${arch}" \
          140  +                -o ${zfsns}:version="${version}" \
          141  +                a/build/${envname}/${version}-${arch}-${id} || err "Not able to create FS"
          142  +        sudo zfs snapshot ${srcfs}@${id}
          143  +        sudo zfs clone  \
          144  +                -o ${zfsns}:type="src" \
          145  +                ${srcfs}@${id} \
          146  +                a/build/${envname}/${version}-${arch}-${id}/src
          147  +        sudo zfs snapshot ${portsfs}@${id}
          148  +        sudo zfs clone \
          149  +                -o ${zfsns}:type="ports" \
          150  +                ${portsfs}@${id} \
          151  +                a/build/${envname}/${version}-${arch}-${id}/ports                                                                                                        
          152  +        echo "run portsupdate ${envname} ${version} ${arch} to update the ports tree snapshot"
          153  +        echo "run srcupdate ${envname} ${vervion} ${arch} to update the srcs snapshot"
          154  +        echo "run makeworld ${envname} ${version} ${arch} to create a new world"
          155  +        echo "run makebindist ${envname} ${version} ${arch} to create a new world"
          156  +        echo "run prepare ${envname} ${version} ${arch} to create the src and ports archives"
          157  +        echo "run start ${envname} ${version} ${arch} to start building packages"
          158  +}
          159  +                
          160  +run_makeworld() {
          161  +        [ $# -ne 2 ] && eargs envname runname
          162  +        local envname=$1
          163  +        local runname=$2
          164  +        shift 2
          165  +        eval `zfs list -rHd1 -t filesystem \
          166  +                -o ${zfsns}:type,${zfsns}:env,${zfsns}:name,mountpoint,name -s ${zfsns}:id \
          167  +                a/build/${envname} | \
          168  +                awk -v env=${envname} -v run=${runname} '($1 == "run" && $2 == env && $3 == run) { mnt=$4; fs=$5 } END { print "mnt="mnt"\nfs="fs }'`
          169  +        [ -z ${mnt} ] && err "Enable to determine mountpoint for ${runname} in env ${envname}"
          170  +        [ -z ${fs} ] && err "Enable to determine filesystem for ${runname} in env ${envname}"
          171  +        [ -d ${mnt}/src ] || err "No source tree installed"
          172  +        arch=$(zfs get -H -o value ${zfsns}:arch ${fs})
          173  +        sudo ${codebase}/makeworld.sh ${mnt} ${arch} || err "Failed to make world"
          174  +        echo "World successfully build and installed you have a last chance to modify it"
          175  +}
          176  +        
          177  +run_packall() {
          178  +        [ $# -ne 2 ] && eargs envname runname
          179  +        local envname=$1
          180  +        local runname=$2
          181  +        shift 2
          182  +        mnt=`zfs list -rHd1 -t filesystem \
          183  +                -o ${zfsns}:type,${zfsns}:env,${zfsns}:name,mountpoint,name -s ${zfsns}:id \                                                                             
          184  +                a/build/${envname} | \
          185  +                awk -v env=${envname} -v run=${runname} '($1 == "run" && $2 == env && $3 == run) { mnt=$4; fs=$5 } END { print mnt }'`
          186  +        [ -d ${mnt} ] || err "Enable to determine mountpoint for ${runname} in env ${envname}"
          187  +        [ -d ${mnt}/world ] || err "Enable to determine mountpoint for ${runname} in env ${envname}"
          188  +        [ -d ${mnt}/src ] || err "Enable to determine mountpoint for ${runname} in env ${envname}"
          189  +        [ -d ${mnt}/ports ] || err "Enable to determine mountpoint for ${runname} in env ${envname}"
          190  +        rm -f ${mnt}/bindist.tbz ${mnt}/ports.tbz ${mnt}/src.tbz
          191  +        sudo tar cfCj ${mnt}/.bindist.tbz ${mnt}/world . &
          192  +        tar cfCj ${mnt}/.ports.tbz ${mnt} ports &
          193  +        tar cfCj ${mnt}/.src.tbz ${mnt} &
          194  +        wait
          195  +        wait
          196  +        wait
          197  +}
          198  +                
          199  +run_destroy() {
          200  +        [ $# -ne 1 ] && eargs name
          201  +        local name=$1
          202  +        run_exists ${name} || enosuch run ${name}
          203  +        local base=$(run_get_fs ${name})
          204  +        [ -z ${base} ] && err "Could not get zfs base"
          205  +        sudo zfs destroy -R ${base} || err "Enable to destroy run"
          206  +        echo "run destroyed sucessfully"
          207  +}
          208  +        
          209  +online() {
          210  +        printf "%-40s %-7s %-7s %s\n" "NAME" "ARCH" "VERSION" "LAST SEEN"
          211  +        sql "select name, arch, osversion, online from nodes" | \
          212  +                awk -F\| '{ printf("%-40s %-7s %-7s %s\n", $1, $2, $3, $4) }'
          213  +}
          214  +                
          215  +# Code for ssh tunnels
          216  +tunnel_running() {
          217  +        [ $# -ne 1 ] && eargs hostname
          218  +        if [ -f ${tunneldir}/${host}.pid ]; then
          219  +                pgrep -q -F ${tunneldir}/${host}.pid && return 0
          220  +        fi
          221  +        return 1
          222  +}
          223  +        
          224  +tunnel_start() {
          225  +        [ $# -ne 1 ] && eargs hostname                                                                                                                                   
          226  +        local host=$1                                                                                                                                                    
          227  +        tunnel_running ${host} && err "${host} tunnel already running"
          228  +        echo "Starting tunnel ${host}"
          229  +        daemon -f -p ${tunneldir}/${host}.pid -c ${codebase}/runssh.sh ${host}
          230  +#       ${codebase}/runssh.sh ${1} ${tunneldir}
          231  +}
          232  +                
          233  +tunnel_stop() {
          234  +        [ $# -ne 1 ] && eargs hostname
          235  +        local host=$1
          236  +        tunnel_running ${host} || err "${host} tunnel is not running"
          237  +        echo "Stopping tunnel ${host}"
          238  +        pkill -SIGINT -F ${tunneldir}/${host}.pid
          239  +        sql "update nodes set online=0 where name='${host}'"
          240  +}
          241  +            
          242  +tunnel_list() {
          243  +        [ $# -ne 0 ] && eargs
          244  +        [ ! -f ${tunneldir}/hosts ] && return
          245  +        while read host; do
          246  +                local state="off"
          247  +                echo -n "tunnel $host: "
          248  +                tunnel_running ${host} && echo "running" || echo "not running"
          249  +        done < ${tunneldir}/hosts
          250  +}
          251  +        
          252  +tunnel_add() {
          253  +        [ $# -ne 1 ] && eargs hostname  
          254  +        local name=$1
          255  +        if [ -f ${tunneldir}/hosts ]; then                                                                                                                               
          256  +                egrep -q "^${name}$" ${tunneldir}/hosts && err "${name} already added"
          257  +        fi
          258  +        echo "${name}" >> ${tunneldir}/hosts
          259  +}
          260  +        
          261  +tunnel_startall() {
          262  +        [ $# -ne 0 ] && eargs
          263  +        [ ! -f ${tunneldir}/hosts ] && return                                                                                                                            
          264  +        while read host; do
          265  +                tunnel_running ${host} || tunnel_start ${host}
          266  +        done < ${tunneldir}/hosts
          267  +}
          268  +
          269  +tunnel_stopall() {
          270  +        [ $# -ne 0 ] && eargs
          271  +        [ ! -f ${tunneldir}/hosts ] && return
          272  +        while read host; do
          273  +                tunnel_running ${host} && tunnel_stop ${host}
          274  +        done < ${tunneldir}/hosts
          275  +}
          276  +        
          277  +tunnel_del() {
          278  +        [ $# -ne 1 ] && eargs hostname
          279  +        [ ! -f ${tunneldir}/hosts ] && return
          280  +        local name=$1
          281  +        egrep -q "^${name}$" ${tunneldir}/hosts || enosuch hostname ${name}
          282  +        tunnel_running ${name} && tunnel_stop ${name}
          283  +        sed -i '' -e "/^${name}$/d" ${tunneldir}/hosts
          284  +}
          285  +code_version() {
          286  +        [ $# -ne 0 ] && eargs
          287  +        sed -n -e "s/^version=\(.*\)/\1/p" ${nodebase}/monitor.sh
          288  +}
          289  +
          290  +code_get() {
          291  +        [ $# -ne 1 ] && eargs hostname
          292  +        local name=$1
          293  +        rsync -rtl -e ssh ${nodebase}/ ${name}:/usr2/build/scripts
          294  +}
          295  +

Added scripts/dispatch.sh.

            1  +#!/bin/sh
            2  +
            3  +read data
            4  +
            5  +set -- ${data}
            6  +
            7  +. /usr/local/etc/portbuild.conf
            8  +. ${codebase}/common.shlib
            9  +
           10  +cmd=$1
           11  +shift
           12  +test -x /home/bapt/pb/com/scripts/$cmd.sh && {
           13  +        /home/bapt/pb/com/scripts/$cmd.sh $@
           14  +        exit 0
           15  +}
           16  +
           17  +case ${cmd} in
           18  +        ping)
           19  +                ping $@
           20  +                ;;
           21  +        online)
           22  +                online
           23  +                ;;
           24  +        *)
           25  +                subcmd=$1
           26  +                shift
           27  +                ${cmd}_${subcmd} $@ || err "No such command"
           28  +                ;;
           29  +esac
           30  +

Added scripts/makeworld.sh.

            1  +#!/bin/sh
            2  +
            3  +if [ `id -u` -ne 0 ]; then
            4  +        echo "needs to be run as root"
            5  +        exit 1
            6  +fi
            7  +if [ $# -ne 2 ]; then
            8  +        echo "needs to arguments: <path> <arch>"
            9  +        exit 1
           10  +fi
           11  +mnt=$1
           12  +arch=$2
           13  +shift 2
           14  +if [ ! -d ${mnt} ]; then
           15  +        echo "path not found ${mnt}"
           16  +        exit 1
           17  +fi
           18  +if [ ! -d ${mnt}/src ]; then
           19  +        echo "no sources found"
           20  +        exit 1
           21  +fi
           22  +export TARGET_ARCH=${arch}
           23  +# Workaround needed for zfs - 20090321 erwin (XXXbapt: unusure this is still needed)
           24  +export NO_FSCHG=1
           25  +__MAKE_CONF=/dev/null
           26  +[ -f ${mnt}/make.conf ] && __MAKE_CONF=${mnt}/make.conf
           27  +export __MAKE_CONF
           28  +SRCCONF=/dev/null
           29  +[ -f ${mnt}/src.conf ] && SRCCONF=${mnt}/make.conf
           30  +cd ${mnt}/src && make -j8 buildworld || exit 1
           31  +DESTDIR=${mnt}/world
           32  +[ -d ${mnt}/world ] && rm -rf ${mnt}/world
           33  +mkdir -p ${DESTDIR}
           34  +export NEWSPARC_TIMETYPE=__int64_t
           35  +make installworld DESTDIR=${DESTDIR} || exit 1
           36  +
           37  +make DESTDIR=${DESTDIR} distrib-dirs && \
           38  +        make DESTDIR=${DESTDIR} distribution || exit 1
           39  +

Added scripts/runssh.sh.

            1  +#!/bin/sh
            2  +
            3  +. /usr/local/etc/portbuild.conf
            4  +. ${codebase}/common.shlib
            5  +
            6  +norestart=0
            7  +sshpid="-1"
            8  +
            9  +die() {
           10  +        trap SIGCHLD
           11  +        logger -t tunnelssh "Stopping ${name}"
           12  +        # disabling trap
           13  +        trap SIGINT
           14  +        trap SIGTERM
           15  +        trap SIGKILL
           16  +        trap EXIT
           17  +        [ $sshpid != -1 ] && kill -15 $sshpid
           18  +        exit 0
           19  +}
           20  +
           21  +startssh() {
           22  +        logger -t tunnelssh "Starting tunnel ${name}"
           23  +        sql "insert or ignore into nodes (name) values ('$name')"
           24  +        ssh ${sshopts} ${name} "${updatecmd}" | \
           25  +                sed -e 's/^M//g' | while read osversion ncpu arch; do
           26  +                        sql "update nodes set ncpu=${ncpu}, osversion=${osversion}, arch=trim('arch'), online=datetime('now') where name='${name}'"
           27  +                done
           28  +        ssh ${sshopts} ${name} "mkdir -p /usr2/portbuild/pids"
           29  +        ssh ${sshopts} ${name} "pgrep -q -F /usr2/portbuild/pids/monitor.pid >/dev/null 2>&1" || \
           30  +                rsync -rtl -e ssh ${nodebase}/ ${name}:/usr2/portbuild/nscripts
           31  +        ssh ${sshopts} ${name} "/usr/sbin/daemon -f -c -p /usr2/portbuild/pids/monitor.pid /usr2/portbuild/nscripts/monitor.sh"
           32  +        trap sshdead SIGCHLD
           33  +        ssh ${sshopts} -N -R 4444:localhost:4444 ${name} &
           34  +        sshpid=$!
           35  +}
           36  +
           37  +sshdead() {
           38  +        trap SIGCHLD
           39  +        sql "update nodes set online=0 where name='${name}'"
           40  +        logger -t tunnelssh "tunnel ${name} dead"
           41  +        sleep 5
           42  +        trap sshdead SIGCHLD
           43  +        startssh
           44  +}
           45  +
           46  +trap die SIGINT SIGTERM SIGKILL EXIT
           47  +
           48  +name=$1
           49  +startssh
           50  +while :; do
           51  +        wait
           52  +done