Compare commits

...

23 Commits

Author SHA1 Message Date
Erica Portnoy
e9d62701f2 use connections as context managers to ensure they're closed 2020-04-09 12:06:25 -07:00
Erica Portnoy
b8ab15accd work around fabric #2007 2020-04-09 11:58:25 -07:00
Erica Portnoy
0d4aa05f54 catch BaseException instead of Exception 2020-04-09 11:57:20 -07:00
Erica Portnoy
42530c686c move the comment about hardcoding the ggopath 2020-04-09 11:56:55 -07:00
Erica Portnoy
dc861ebf6f Update letstest readme 2020-04-09 11:55:45 -07:00
Erica Portnoy
86fa1163d3 hardcode the gopath due to broken env manamagement in fabric2 2020-04-03 16:55:21 -07:00
Erica Portnoy
dc7743ca0f fix killboulder implementation so I can test creating a new boulder server 2020-04-03 16:07:22 -07:00
Erica Portnoy
81ea9ba1b1 remove deploy_script unused kwargs 2020-04-03 15:46:23 -07:00
Erica Portnoy
e5e0097345 update version used in travis 2020-04-03 15:43:47 -07:00
Erica Portnoy
28da0aa20f add new venv to gitignore 2020-04-03 15:42:56 -07:00
Erica Portnoy
86af07d2c0 update requirements and README 2020-04-03 15:41:23 -07:00
Erica Portnoy
f2a619b1a3 remove final fabric v1 references including local 2020-04-03 15:30:53 -07:00
Erica Portnoy
6f822b6740 get sudo commands working 2020-04-03 14:59:56 -07:00
Erica Portnoy
486dd5f72c get run working with prefix 2020-04-03 13:51:45 -07:00
Erica Portnoy
8242466968 move more cxns to v2 2020-04-02 16:41:47 -07:00
Erica Portnoy
3758e8c226 convert some remote calls to v2 2020-04-02 16:30:01 -07:00
Erica Portnoy
2ebfe591e4 remove remaining execute call 2020-04-02 15:52:12 -07:00
Erica Portnoy
bea83b29d0 remove the execute calls that are obviously doing nothing 2020-04-02 15:46:39 -07:00
Erica Portnoy
1bd4909d79 remove fabric cd 2020-04-02 15:34:40 -07:00
Erica Portnoy
302b02de4f correct lcd removal 2020-04-02 15:31:26 -07:00
Erica Portnoy
8348ca572d remove fabric lcd usage 2020-04-02 14:09:04 -07:00
Erica Portnoy
2f8ae1dae8 use six for urllib instead 2020-04-01 17:37:03 -07:00
Erica Portnoy
3e5cfae06b stop using urllib2 in test farm tests 2020-03-31 15:12:09 -07:00
5 changed files with 132 additions and 124 deletions

1
.gitignore vendored
View File

@@ -35,6 +35,7 @@ tags
tests/letstest/letest-*/ tests/letstest/letest-*/
tests/letstest/*.pem tests/letstest/*.pem
tests/letstest/venv/ tests/letstest/venv/
tests/letstest/venv3/
.venv .venv

View File

@@ -90,24 +90,24 @@ matrix:
before_install: before_install:
addons: addons:
<<: *extended-test-suite <<: *extended-test-suite
- python: "2.7" - python: "3.7"
env: env:
- TOXENV=travis-test-farm-apache2 - TOXENV=travis-test-farm-apache2
- secure: "f+j/Lj9s1lcuKo5sEFrlRd1kIAMnIJI4z0MTI7QF8jl9Fkmbx7KECGzw31TNgzrOSzxSapHbcueFYvNCLKST+kE/8ogMZBbwqXfEDuKpyF6BY3uYoJn+wPVE5pIb8Hhe08xPte8TTDSMIyHI3EyTfcAKrIreauoArePvh/cRvSw=" - secure: "f+j/Lj9s1lcuKo5sEFrlRd1kIAMnIJI4z0MTI7QF8jl9Fkmbx7KECGzw31TNgzrOSzxSapHbcueFYvNCLKST+kE/8ogMZBbwqXfEDuKpyF6BY3uYoJn+wPVE5pIb8Hhe08xPte8TTDSMIyHI3EyTfcAKrIreauoArePvh/cRvSw="
<<: *extended-test-suite <<: *extended-test-suite
- python: "2.7" - python: "3.7"
env: env:
- TOXENV=travis-test-farm-leauto-upgrades - TOXENV=travis-test-farm-leauto-upgrades
- secure: "f+j/Lj9s1lcuKo5sEFrlRd1kIAMnIJI4z0MTI7QF8jl9Fkmbx7KECGzw31TNgzrOSzxSapHbcueFYvNCLKST+kE/8ogMZBbwqXfEDuKpyF6BY3uYoJn+wPVE5pIb8Hhe08xPte8TTDSMIyHI3EyTfcAKrIreauoArePvh/cRvSw=" - secure: "f+j/Lj9s1lcuKo5sEFrlRd1kIAMnIJI4z0MTI7QF8jl9Fkmbx7KECGzw31TNgzrOSzxSapHbcueFYvNCLKST+kE/8ogMZBbwqXfEDuKpyF6BY3uYoJn+wPVE5pIb8Hhe08xPte8TTDSMIyHI3EyTfcAKrIreauoArePvh/cRvSw="
git: git:
depth: false # This is needed to have the history to checkout old versions of certbot-auto. depth: false # This is needed to have the history to checkout old versions of certbot-auto.
<<: *extended-test-suite <<: *extended-test-suite
- python: "2.7" - python: "3.7"
env: env:
- TOXENV=travis-test-farm-certonly-standalone - TOXENV=travis-test-farm-certonly-standalone
- secure: "f+j/Lj9s1lcuKo5sEFrlRd1kIAMnIJI4z0MTI7QF8jl9Fkmbx7KECGzw31TNgzrOSzxSapHbcueFYvNCLKST+kE/8ogMZBbwqXfEDuKpyF6BY3uYoJn+wPVE5pIb8Hhe08xPte8TTDSMIyHI3EyTfcAKrIreauoArePvh/cRvSw=" - secure: "f+j/Lj9s1lcuKo5sEFrlRd1kIAMnIJI4z0MTI7QF8jl9Fkmbx7KECGzw31TNgzrOSzxSapHbcueFYvNCLKST+kE/8ogMZBbwqXfEDuKpyF6BY3uYoJn+wPVE5pIb8Hhe08xPte8TTDSMIyHI3EyTfcAKrIreauoArePvh/cRvSw="
<<: *extended-test-suite <<: *extended-test-suite
- python: "2.7" - python: "3.7"
env: env:
- TOXENV=travis-test-farm-sdists - TOXENV=travis-test-farm-sdists
- secure: "f+j/Lj9s1lcuKo5sEFrlRd1kIAMnIJI4z0MTI7QF8jl9Fkmbx7KECGzw31TNgzrOSzxSapHbcueFYvNCLKST+kE/8ogMZBbwqXfEDuKpyF6BY3uYoJn+wPVE5pIb8Hhe08xPte8TTDSMIyHI3EyTfcAKrIreauoArePvh/cRvSw=" - secure: "f+j/Lj9s1lcuKo5sEFrlRd1kIAMnIJI4z0MTI7QF8jl9Fkmbx7KECGzw31TNgzrOSzxSapHbcueFYvNCLKST+kE/8ogMZBbwqXfEDuKpyF6BY3uYoJn+wPVE5pIb8Hhe08xPte8TTDSMIyHI3EyTfcAKrIreauoArePvh/cRvSw="

View File

@@ -15,12 +15,12 @@ Simple AWS testfarm scripts for certbot client testing
are needed, they need to be requested via online webform. are needed, they need to be requested via online webform.
## Installation and configuration ## Installation and configuration
These tests require Python 2.7, awscli, boto3, PyYAML, and fabric<2.0. If you These tests require Python 3, awscli, boto3, PyYAML, and fabric 2.0+. If you
have Python 2.7 and virtualenv installed, you can use requirements.txt to have Python 3 and virtualenv installed, you can use requirements.txt to
create a virtual environment with a known set of dependencies by running: create a virtual environment with a known set of dependencies by running:
``` ```
virtualenv --python $(command -v python2.7 || command -v python2 || command -v python) venv python3 -m venv venv3
. ./venv/bin/activate . ./venv3/bin/activate
pip install --requirement requirements.txt pip install --requirement requirements.txt
``` ```

View File

@@ -40,23 +40,16 @@ import socket
import sys import sys
import time import time
import traceback import traceback
import urllib2
import boto3 import boto3
from botocore.exceptions import ClientError from botocore.exceptions import ClientError
from six.moves.urllib import error as urllib_error
from six.moves.urllib import request as urllib_request
import yaml import yaml
import fabric from fabric import Config
from fabric.api import cd from fabric import Connection
from fabric.api import env
from fabric.api import execute
from fabric.api import lcd
from fabric.api import local
from fabric.api import run
from fabric.api import sudo
from fabric.context_managers import shell_env
from fabric.operations import get
from fabric.operations import put
# Command line parser # Command line parser
#------------------------------------------------------------------------------- #-------------------------------------------------------------------------------
@@ -203,11 +196,11 @@ def block_until_http_ready(urlstring, wait_time=10, timeout=240):
try: try:
sys.stdout.write('.') sys.stdout.write('.')
sys.stdout.flush() sys.stdout.flush()
req = urllib2.Request(urlstring) req = urllib_request.Request(urlstring)
response = urllib2.urlopen(req) response = urllib_request.urlopen(req)
#if response.code == 200: #if response.code == 200:
server_ready = True server_ready = True
except urllib2.URLError: except urllib_error.URLError:
pass pass
time.sleep(wait_time) time.sleep(wait_time)
t_elapsed += wait_time t_elapsed += wait_time
@@ -244,76 +237,85 @@ def block_until_instance_ready(booting_instance, wait_time=5, extra_wait_time=20
# Fabric Routines # Fabric Routines
#------------------------------------------------------------------------------- #-------------------------------------------------------------------------------
def local_git_clone(repo_url): def local_git_clone(local_cxn, repo_url):
"clones master of repo_url" "clones master of repo_url"
with lcd(LOGDIR): local_cxn.local('cd %s && if [ -d letsencrypt ]; then rm -rf letsencrypt; fi' % LOGDIR)
local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi') local_cxn.local('cd %s && git clone %s letsencrypt'% (LOGDIR, repo_url))
local('git clone %s letsencrypt'% repo_url) local_cxn.local('cd %s && tar czf le.tar.gz letsencrypt'% LOGDIR)
local('tar czf le.tar.gz letsencrypt')
def local_git_branch(repo_url, branch_name): def local_git_branch(local_cxn, repo_url, branch_name):
"clones branch <branch_name> of repo_url" "clones branch <branch_name> of repo_url"
with lcd(LOGDIR): local_cxn.local('cd %s && if [ -d letsencrypt ]; then rm -rf letsencrypt; fi' % LOGDIR)
local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi') local_cxn.local('cd %s && git clone %s letsencrypt --branch %s --single-branch'%
local('git clone %s letsencrypt --branch %s --single-branch'%(repo_url, branch_name)) (LOGDIR, repo_url, branch_name))
local('tar czf le.tar.gz letsencrypt') local_cxn.local('cd %s && tar czf le.tar.gz letsencrypt' % LOGDIR)
def local_git_PR(repo_url, PRnumstr, merge_master=True): def local_git_PR(local_cxn, repo_url, PRnumstr, merge_master=True):
"clones specified pull request from repo_url and optionally merges into master" "clones specified pull request from repo_url and optionally merges into master"
with lcd(LOGDIR): local_cxn.local('cd %s && if [ -d letsencrypt ]; then rm -rf letsencrypt; fi' % LOGDIR)
local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi') local_cxn.local('cd %s && git clone %s letsencrypt' % (LOGDIR, repo_url))
local('git clone %s letsencrypt'% repo_url) local_cxn.local('cd %s && cd letsencrypt && '
local('cd letsencrypt && git fetch origin pull/%s/head:lePRtest'%PRnumstr) 'git fetch origin pull/%s/head:lePRtest' % (LOGDIR, PRnumstr))
local('cd letsencrypt && git checkout lePRtest') local_cxn.local('cd %s && cd letsencrypt && git checkout lePRtest' % LOGDIR)
if merge_master: if merge_master:
local('cd letsencrypt && git remote update origin') local_cxn.local('cd %s && cd letsencrypt && git remote update origin' % LOGDIR)
local('cd letsencrypt && git merge origin/master -m "testmerge"') local_cxn.local('cd %s && cd letsencrypt && '
local('tar czf le.tar.gz letsencrypt') 'git merge origin/master -m "testmerge"' % LOGDIR)
local_cxn.local('cd %s && tar czf le.tar.gz letsencrypt' % LOGDIR)
def local_repo_to_remote(): def local_repo_to_remote(cxn):
"copies local tarball of repo to remote" "copies local tarball of repo to remote"
with lcd(LOGDIR): filename = 'le.tar.gz'
put(local_path='le.tar.gz', remote_path='') local_path = os.path.join(LOGDIR, filename)
run('tar xzf le.tar.gz') cxn.put(local=local_path, remote='')
cxn.run('tar xzf %s' % filename)
def local_repo_clean(): def local_repo_clean(local_cxn):
"delete tarball" "delete tarball"
with lcd(LOGDIR): filename = 'le.tar.gz'
local('rm le.tar.gz') local_path = os.path.join(LOGDIR, filename)
local_cxn.local('rm %s' % local_path)
def deploy_script(scriptpath, *args): def deploy_script(cxn, scriptpath, *args):
"copies to remote and executes local script" "copies to remote and executes local script"
#with lcd('scripts'): cxn.put(local=scriptpath, remote='', preserve_mode=True)
put(local_path=scriptpath, remote_path='', mirror_local_mode=True)
scriptfile = os.path.split(scriptpath)[1] scriptfile = os.path.split(scriptpath)[1]
args_str = ' '.join(args) args_str = ' '.join(args)
run('./'+scriptfile+' '+args_str) cxn.run('./'+scriptfile+' '+args_str)
def run_boulder(): def run_boulder(cxn):
with cd('$GOPATH/src/github.com/letsencrypt/boulder'): boulder_path = '$GOPATH/src/github.com/letsencrypt/boulder'
run('sudo docker-compose up -d') cxn.run('cd %s && sudo docker-compose up -d' % boulder_path)
def config_and_launch_boulder(instance): def config_and_launch_boulder(cxn, instance):
execute(deploy_script, 'scripts/boulder_config.sh') # yes, we're hardcoding the gopath. it's a predetermined AMI.
execute(run_boulder) with cxn.prefix('export GOPATH=/home/ubuntu/gopath'):
deploy_script(cxn, 'scripts/boulder_config.sh')
run_boulder(cxn)
def install_and_launch_certbot(instance, boulder_url, target): def install_and_launch_certbot(cxn, instance, boulder_url, target):
execute(local_repo_to_remote) local_repo_to_remote(cxn)
with shell_env(BOULDER_URL=boulder_url, # This needs to be like this, I promise. 1) The env argument to run doesn't work.
PUBLIC_IP=instance.public_ip_address, # See https://github.com/fabric/fabric/issues/1744. 2) prefix() sticks an && between
PRIVATE_IP=instance.private_ip_address, # the commands, so it needs to be exports rather than no &&s in between for the script subshell.
PUBLIC_HOSTNAME=instance.public_dns_name, with cxn.prefix('export BOULDER_URL=%s && export PUBLIC_IP=%s && export PRIVATE_IP=%s && '
PIP_EXTRA_INDEX_URL=cl_args.alt_pip, 'export PUBLIC_HOSTNAME=%s && export PIP_EXTRA_INDEX_URL=%s && '
OS_TYPE=target['type']): 'export OS_TYPE=%s' %
execute(deploy_script, cl_args.test_script) (boulder_url,
instance.public_ip_address,
instance.private_ip_address,
instance.public_dns_name,
cl_args.alt_pip,
target['type'])):
deploy_script(cxn, cl_args.test_script)
def grab_certbot_log(): def grab_certbot_log(cxn):
"grabs letsencrypt.log via cat into logged stdout" "grabs letsencrypt.log via cat into logged stdout"
sudo('if [ -f /var/log/letsencrypt/letsencrypt.log ]; then \ cxn.sudo('/bin/bash -l -i -c \'if [ -f "/var/log/letsencrypt/letsencrypt.log" ]; then ' +
cat /var/log/letsencrypt/letsencrypt.log; else echo "[novarlog]"; fi') 'cat "/var/log/letsencrypt/letsencrypt.log"; else echo "[novarlog]"; fi\'')
# fallback file if /var/log is unwriteable...? correct? # fallback file if /var/log is unwriteable...? correct?
sudo('if [ -f ./certbot.log ]; then \ cxn.sudo('/bin/bash -l -i -c \'if [ -f ./certbot.log ]; then ' +
cat ./certbot.log; else echo "[nolocallog]"; fi') 'cat ./certbot.log; else echo "[nolocallog]"; fi\'')
def create_client_instance(ec2_client, target, security_group_id, subnet_id): def create_client_instance(ec2_client, target, security_group_id, subnet_id):
@@ -341,7 +343,7 @@ def create_client_instance(ec2_client, target, security_group_id, subnet_id):
userdata=userdata) userdata=userdata)
def test_client_process(inqueue, outqueue, boulder_url): def test_client_process(fab_config, inqueue, outqueue, boulder_url):
cur_proc = mp.current_process() cur_proc = mp.current_process()
for inreq in iter(inqueue.get, SENTINEL): for inreq in iter(inqueue.get, SENTINEL):
ii, instance_id, target = inreq ii, instance_id, target = inreq
@@ -358,30 +360,31 @@ def test_client_process(inqueue, outqueue, boulder_url):
print("[%s : client %d %s %s]" % (cur_proc.name, ii, target['ami'], target['name'])) print("[%s : client %d %s %s]" % (cur_proc.name, ii, target['ami'], target['name']))
instance = block_until_instance_ready(instance) instance = block_until_instance_ready(instance)
print("server %s at %s"%(instance, instance.public_ip_address)) print("server %s at %s"%(instance, instance.public_ip_address))
env.host_string = "%s@%s"%(target['user'], instance.public_ip_address) host_string = "%s@%s"%(target['user'], instance.public_ip_address)
print(env.host_string) print(host_string)
try: with Connection(host_string, config=fab_config) as cxn:
install_and_launch_certbot(instance, boulder_url, target) try:
outqueue.put((ii, target, Status.PASS)) install_and_launch_certbot(cxn, instance, boulder_url, target)
print("%s - %s SUCCESS"%(target['ami'], target['name'])) outqueue.put((ii, target, Status.PASS))
except: print("%s - %s SUCCESS"%(target['ami'], target['name']))
outqueue.put((ii, target, Status.FAIL)) except:
print("%s - %s FAIL"%(target['ami'], target['name'])) outqueue.put((ii, target, Status.FAIL))
traceback.print_exc(file=sys.stdout) print("%s - %s FAIL"%(target['ami'], target['name']))
pass traceback.print_exc(file=sys.stdout)
pass
# append server certbot.log to each per-machine output log # append server certbot.log to each per-machine output log
print("\n\ncertbot.log\n" + "-"*80 + "\n") print("\n\ncertbot.log\n" + "-"*80 + "\n")
try: try:
execute(grab_certbot_log) grab_certbot_log(cxn)
except: except:
print("log fail\n") print("log fail\n")
traceback.print_exc(file=sys.stdout) traceback.print_exc(file=sys.stdout)
pass pass
def cleanup(cl_args, instances, targetlist): def cleanup(cl_args, instances, targetlist, boulder_server):
print('Logs in ', LOGDIR) print('Logs in ', LOGDIR)
# If lengths of instances and targetlist aren't equal, instances failed to # If lengths of instances and targetlist aren't equal, instances failed to
# start before running tests so leaving instances running for debugging # start before running tests so leaving instances running for debugging
@@ -402,19 +405,25 @@ def cleanup(cl_args, instances, targetlist):
def main(): def main():
# Fabric library controlled through global env parameters # Fabric library controlled through global env parameters
env.key_filename = KEYFILE fab_config = Config(overrides={
env.shell = '/bin/bash -l -i -c' "connect_kwargs": {
env.connection_attempts = 5 "key_filename": [KEYFILE], # https://github.com/fabric/fabric/issues/2007
env.timeout = 10 },
# replace default SystemExit thrown by fabric during trouble "run": {
class FabricException(Exception): "echo": True,
pass "pty": True,
env['abort_exception'] = FabricException },
"timeouts": {
"connect": 10,
},
})
# no network connection, so don't worry about closing this one.
local_cxn = Connection('localhost', config=fab_config)
# Set up local copy of git repo # Set up local copy of git repo
#------------------------------------------------------------------------------- #-------------------------------------------------------------------------------
print("Making local dir for test repo and logs: %s"%LOGDIR) print("Making local dir for test repo and logs: %s"%LOGDIR)
local('mkdir %s'%LOGDIR) local_cxn.local('mkdir %s'%LOGDIR)
# figure out what git object to test and locally create it in LOGDIR # figure out what git object to test and locally create it in LOGDIR
print("Making local git repo") print("Making local git repo")
@@ -422,14 +431,14 @@ def main():
if cl_args.pull_request != '~': if cl_args.pull_request != '~':
print('Testing PR %s '%cl_args.pull_request, print('Testing PR %s '%cl_args.pull_request,
"MERGING into master" if cl_args.merge_master else "") "MERGING into master" if cl_args.merge_master else "")
execute(local_git_PR, cl_args.repo, cl_args.pull_request, cl_args.merge_master) local_git_PR(local_cxn, cl_args.repo, cl_args.pull_request, cl_args.merge_master)
elif cl_args.branch != '~': elif cl_args.branch != '~':
print('Testing branch %s of %s'%(cl_args.branch, cl_args.repo)) print('Testing branch %s of %s'%(cl_args.branch, cl_args.repo))
execute(local_git_branch, cl_args.repo, cl_args.branch) local_git_branch(local_cxn, cl_args.repo, cl_args.branch)
else: else:
print('Testing master of %s'%cl_args.repo) print('Testing master of %s'%cl_args.repo)
execute(local_git_clone, cl_args.repo) local_git_clone(local_cxn, cl_args.repo)
except FabricException: except BaseException:
print("FAIL: trouble with git repo") print("FAIL: trouble with git repo")
traceback.print_exc() traceback.print_exc()
exit() exit()
@@ -437,7 +446,7 @@ def main():
# Set up EC2 instances # Set up EC2 instances
#------------------------------------------------------------------------------- #-------------------------------------------------------------------------------
configdata = yaml.load(open(cl_args.config_file, 'r')) configdata = yaml.safe_load(open(cl_args.config_file, 'r'))
targetlist = configdata['targets'] targetlist = configdata['targets']
print('Testing against these images: [%d total]'%len(targetlist)) print('Testing against these images: [%d total]'%len(targetlist))
for target in targetlist: for target in targetlist:
@@ -511,15 +520,16 @@ def main():
print(" server %s"%boulder_server) print(" server %s"%boulder_server)
# env.host_string defines the ssh user and host for connection # host_string defines the ssh user and host for connection
env.host_string = "ubuntu@%s"%boulder_server.public_ip_address host_string = "ubuntu@%s"%boulder_server.public_ip_address
print("Boulder Server at (SSH):", env.host_string) print("Boulder Server at (SSH):", host_string)
if not boulder_preexists: if not boulder_preexists:
print("Configuring and Launching Boulder") print("Configuring and Launching Boulder")
config_and_launch_boulder(boulder_server) with Connection(host_string, config=fab_config) as boulder_cxn:
# blocking often unnecessary, but cheap EC2 VMs can get very slow config_and_launch_boulder(boulder_cxn, boulder_server)
block_until_http_ready('http://%s:4000'%boulder_server.public_ip_address, # blocking often unnecessary, but cheap EC2 VMs can get very slow
wait_time=10, timeout=500) block_until_http_ready('http://%s:4000'%boulder_server.public_ip_address,
wait_time=10, timeout=500)
boulder_url = "http://%s:4000/directory"%boulder_server.private_ip_address boulder_url = "http://%s:4000/directory"%boulder_server.private_ip_address
print("Boulder Server at (public ip): http://%s:4000/directory"%boulder_server.public_ip_address) print("Boulder Server at (public ip): http://%s:4000/directory"%boulder_server.public_ip_address)
@@ -545,7 +555,7 @@ def main():
# initiate process execution # initiate process execution
for i in range(num_processes): for i in range(num_processes):
p = mp.Process(target=test_client_process, args=(inqueue, outqueue, boulder_url)) p = mp.Process(target=test_client_process, args=(fab_config, inqueue, outqueue, boulder_url))
jobs.append(p) jobs.append(p)
p.daemon = True # kills subprocesses if parent is killed p.daemon = True # kills subprocesses if parent is killed
p.start() p.start()
@@ -569,7 +579,7 @@ def main():
outqueue.put(SENTINEL) outqueue.put(SENTINEL)
# clean up # clean up
execute(local_repo_clean) local_repo_clean(local_cxn)
# print and save summary results # print and save summary results
results_file = open(LOGDIR+'/results', 'w') results_file = open(LOGDIR+'/results', 'w')
@@ -594,10 +604,7 @@ def main():
sys.exit(1) sys.exit(1)
finally: finally:
cleanup(cl_args, instances, targetlist) cleanup(cl_args, instances, targetlist, boulder_server)
# kill any connections
fabric.network.disconnect_all()
if __name__ == '__main__': if __name__ == '__main__':

View File

@@ -5,9 +5,9 @@ cffi==1.14.0
cryptography==2.8 cryptography==2.8
docutils==0.15.2 docutils==0.15.2
enum34==1.1.9 enum34==1.1.9
Fabric==1.14.1 Fabric==2.5.0
futures==3.3.0
ipaddress==1.0.23 ipaddress==1.0.23
Invoke==1.4.1
jmespath==0.9.5 jmespath==0.9.5
paramiko==2.7.1 paramiko==2.7.1
pycparser==2.19 pycparser==2.19