Compare commits

...

34 Commits

Author SHA1 Message Date
Brad Warren
1ea5bb5fb8 Merge branch 'louder' into test-letstest-all-changes2 2019-05-16 11:41:43 -07:00
Brad Warren
ebeb7dd8ce flush output 2019-05-16 11:40:42 -07:00
Brad Warren
c6537a3d6f Merge branch 'louder' into test-letstest-all-changes2 2019-05-16 10:31:28 -07:00
Brad Warren
0e9cadcafd Occasionally print output in test farm tests. 2019-05-16 10:24:23 -07:00
Brad Warren
d203947757 Merge remote-tracking branch 'origin/travis-test-farm' into test-letstest-all-changes2 2019-05-15 20:18:37 -07:00
Brad Warren
bd68c00ac7 Merge branch 'dont-suppress-failure' into test-letstest-all-changes2 2019-05-15 20:18:30 -07:00
Brad Warren
1d548c80a6 Merge remote-tracking branch 'origin/letstest-no-profile' into test-letstest-all-changes2 2019-05-15 20:16:36 -07:00
Brad Warren
9044f6c975 Merge branch 'fix-centos6-test-sdists' into test-letstest-all-changes2 2019-05-15 20:15:40 -07:00
Brad Warren
03110c0857 Merge branch 'apache2_targets' into test-letstest-all-changes2 2019-05-15 20:15:30 -07:00
Brad Warren
d11fa0e2d7 Merge branch 'letstest-exit-status' into test-letstest-all-changes2 2019-05-15 20:15:24 -07:00
Brad Warren
7ea7638f29 Merge branch 'letstest-requirements' into test-letstest-all-changes2 2019-05-15 20:14:01 -07:00
Brad Warren
308157ed06 Merge branch 'no-sharing' into test-letstest-all-changes2 2019-05-15 20:13:53 -07:00
Brad Warren
8d898a7183 Incrementally build instances list. 2019-05-15 20:11:46 -07:00
Brad Warren
91b36e3b7f Fix cleanup on failure. 2019-05-15 18:45:10 -07:00
Brad Warren
a4b7fafe4f tox cleanup 2019-05-15 18:28:24 -07:00
Brad Warren
afd823c7ec Make script executable 2019-05-15 18:22:57 -07:00
Brad Warren
500ae4b85e Move test commands to external script. 2019-05-15 18:22:10 -07:00
Brad Warren
944b75d178 Add whitelist_externals 2019-05-15 18:09:00 -07:00
Brad Warren
8952e91fca Add test farm tests to .travis.yml. 2019-05-15 18:04:38 -07:00
Brad Warren
a8d89611a1 Add testenv:travis-test-farm-tests 2019-05-15 18:02:40 -07:00
Brad Warren
b9d71190a7 Allow magic profile name none. 2019-05-15 16:56:06 -07:00
Brad Warren
a76020f6bc Add encrypted private key. 2019-05-15 15:44:25 -07:00
Brad Warren
846de3cc65 fix venv path 2019-05-15 15:16:15 -07:00
Brad Warren
10fa95cba8 Use Python 3 when appropriate. 2019-05-15 15:09:33 -07:00
Brad Warren
60fba5fcfe Update known good apache2 targets. 2019-05-15 15:03:10 -07:00
Brad Warren
ff7fc8486a Exit with a nonzero status when tests fail. 2019-05-14 16:12:09 -07:00
Brad Warren
bcfb070a9d update readme 2019-05-13 15:55:58 -07:00
Brad Warren
1ad6857b56 Add requirements.txt. 2019-05-13 15:55:33 -07:00
Brad Warren
3c041f1655 Create main function. 2019-05-09 16:43:40 -07:00
Brad Warren
a7fd7cdd87 Pass in boulder_url. 2019-05-09 16:43:09 -07:00
Brad Warren
4f510706a1 Remove global boto3 state. 2019-05-09 16:41:02 -07:00
Brad Warren
7f75454675 Don't use EC2 global to block on instance start. 2019-05-09 16:33:30 -07:00
Brad Warren
3fbdef6a64 Set sentinel at top of script. 2019-05-09 16:31:26 -07:00
Brad Warren
2065775193 Set LOGDIR at top of script. 2019-05-09 16:30:56 -07:00
9 changed files with 350 additions and 268 deletions

View File

@@ -94,6 +94,11 @@ matrix:
<<: *not-on-master
# Extended test suite on cron jobs and pushes to tested branches other than master
- python: "2.7"
env:
- TOXENV=travis-test-farm-tests
- secure: "f+j/Lj9s1lcuKo5sEFrlRd1kIAMnIJI4z0MTI7QF8jl9Fkmbx7KECGzw31TNgzrOSzxSapHbcueFYvNCLKST+kE/8ogMZBbwqXfEDuKpyF6BY3uYoJn+wPVE5pIb8Hhe08xPte8TTDSMIyHI3EyTfcAKrIreauoArePvh/cRvSw="
<<: *extended-test-suite
- python: "3.7"
dist: xenial
env: TOXENV=py37 CERTBOT_NO_PIN=1

View File

@@ -14,15 +14,17 @@ Simple AWS testfarm scripts for certbot client testing
- AWS EC2 has a default limit of 20 t2/t1 instances, if more
are needed, they need to be requested via online webform.
## Usage
- To install the necessary dependencies on Ubuntu 16.04, run:
## Installation and configuration
These tests require Python 2.7, awscli, boto3, PyYAML, and fabric<2.0. If you
have Python 2.7 and virtualenv installed, you can use requirements.txt to
create a virtual environment with a known set of dependencies by running:
```
sudo apt install awscli python-yaml python-boto3 fabric
virtualenv --python $(command -v python2.7 || command -v python2 || command -v python) venv
. ./venv/bin/activate
pip install --requirement requirements.txt
```
- Requires AWS IAM secrets to be set up with aws cli
- Requires an AWS associated keyfile <keyname>.pem
You can then configure AWS credentials and create a key by running:
```
>aws configure --profile <profile name>
[interactive: enter secrets for IAM role]
@@ -30,9 +32,10 @@ sudo apt install awscli python-yaml python-boto3 fabric
```
Note: whatever you pick for `<key name>` will be shown to other users with AWS access.
When prompted for a default region name, enter: `us-east-1`
When prompted for a default region name, enter: `us-east-1`.
then:
## Usage
To run tests, activate the virtual environment you created above and run:
```
>python multitester.py targets.yaml /path/to/your/key.pem <profile name> scripts/<test to run>
```

View File

@@ -1,13 +1,18 @@
targets:
#-----------------------------------------------------------------------------
# Apache 2.4
- ami: ami-26d5af4c
name: ubuntu15.10
#Ubuntu
- ami: ami-064bd2d44a1d6c097
name: ubuntu18.10
type: ubuntu
virt: hvm
user: ubuntu
- ami: ami-d92e6bb3
name: ubuntu15.04LTS
- ami: ami-012fd5eb46f56731f
name: ubuntu18.04LTS
type: ubuntu
virt: hvm
user: ubuntu
- ami: ami-09677e0a6b14905b0
name: ubuntu16.04LTS
type: ubuntu
virt: hvm
user: ubuntu
@@ -21,37 +26,29 @@ targets:
type: ubuntu
virt: pv
user: ubuntu
- ami: ami-116d857a
name: debian8.1
type: debian
#-----------------------------------------------------------------------------
# Debian
- ami: ami-003f19e0e687de1cd
name: debian9
type: ubuntu
virt: hvm
user: admin
- ami: ami-077bf3962f29d3fa4
name: debian8.1
type: ubuntu
virt: hvm
user: admin
userdata: |
#cloud-init
runcmd:
- [ apt-get, install, -y, curl ]
#-----------------------------------------------------------------------------
# Apache 2.2
# - ami: ami-0611546c
# name: ubuntu12.04LTS
# type: ubuntu
# virt: hvm
# user: ubuntu
# - ami: ami-e0efab88
# name: debian7.8.aws.1
# type: debian
# virt: hvm
# user: admin
# userdata: |
# #cloud-init
# runcmd:
# - [ apt-get, install, -y, curl ]
# - ami: ami-e6eeaa8e
# name: debian7.8.aws.1_32bit
# type: debian
# virt: pv
# user: admin
# userdata: |
# #cloud-init
# runcmd:
# - [ apt-get, install, -y, curl ]
# Fedora
- ami: ami-5c69df23
name: fedora28
type: centos
virt: hvm
user: fedora
#-----------------------------------------------------------------------------
# CentOS
- ami: ami-9887c6e7
name: centos7
type: centos
virt: hvm
user: centos

View File

@@ -94,18 +94,21 @@ cl_args = parser.parse_args()
# assumes naming: <key_filename> = <keyname>.pem
KEYFILE = cl_args.key_file
KEYNAME = os.path.split(cl_args.key_file)[1].split('.pem')[0]
PROFILE = cl_args.aws_profile
PROFILE = None if cl_args.aws_profile.lower() == 'none' else cl_args.aws_profile
# Globals
#-------------------------------------------------------------------------------
BOULDER_AMI = 'ami-072a9534772bec854' # premade shared boulder AMI 18.04LTS us-east-1
LOGDIR = "" #points to logging / working directory
# boto3/AWS api globals
AWS_SESSION = None
EC2 = None
LOGDIR = "letest-%d"%int(time.time()) #points to logging / working directory
SECURITY_GROUP_NAME = 'certbot-security-group'
SENTINEL = None #queue kill signal
SUBNET_NAME = 'certbot-subnet'
class Status(object):
"""Possible statuses of client tests."""
PASS = 'pass'
FAIL = 'fail'
# Boto3/AWS automation functions
#-------------------------------------------------------------------------------
def should_use_subnet(subnet):
@@ -139,7 +142,8 @@ def make_security_group(vpc):
mysg.authorize_ingress(IpProtocol="udp", CidrIp="0.0.0.0/0", FromPort=60000, ToPort=61000)
return mysg
def make_instance(instance_name,
def make_instance(ec2_client,
instance_name,
ami_id,
keyname,
security_group_id,
@@ -147,8 +151,8 @@ def make_instance(instance_name,
machine_type='t2.micro',
userdata=""): #userdata contains bash or cloud-init script
new_instance = EC2.create_instances(
BlockDeviceMappings=_get_block_device_mappings(ami_id),
new_instance = ec2_client.create_instances(
BlockDeviceMappings=_get_block_device_mappings(ec2_client, ami_id),
ImageId=ami_id,
SecurityGroupIds=[security_group_id],
SubnetId=subnet_id,
@@ -173,7 +177,7 @@ def make_instance(instance_name,
raise
return new_instance
def _get_block_device_mappings(ami_id):
def _get_block_device_mappings(ec2_client, ami_id):
"""Returns the list of block device mappings to ensure cleanup.
This list sets connected EBS volumes to be deleted when the EC2
@@ -186,7 +190,7 @@ def _get_block_device_mappings(ami_id):
# * https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-blockdev-template.html
return [{'DeviceName': mapping['DeviceName'],
'Ebs': {'DeleteOnTermination': True}}
for mapping in EC2.Image(ami_id).block_device_mappings
for mapping in ec2_client.Image(ami_id).block_device_mappings
if not mapping.get('Ebs', {}).get('DeleteOnTermination', True)]
@@ -225,20 +229,17 @@ def block_until_ssh_open(ipstring, wait_time=10, timeout=120):
def block_until_instance_ready(booting_instance, wait_time=5, extra_wait_time=20):
"Blocks booting_instance until AWS EC2 instance is ready to accept SSH connections"
# the reinstantiation from id is necessary to force boto3
# to correctly update the 'state' variable during init
_id = booting_instance.id
_instance = EC2.Instance(id=_id)
_state = _instance.state['Name']
_ip = _instance.public_ip_address
while _state != 'running' or _ip is None:
state = booting_instance.state['Name']
ip = booting_instance.public_ip_address
while state != 'running' or ip is None:
time.sleep(wait_time)
_instance = EC2.Instance(id=_id)
_state = _instance.state['Name']
_ip = _instance.public_ip_address
block_until_ssh_open(_ip)
# The instance needs to be reloaded to update its local attributes.
booting_instance.reload()
state = booting_instance.state['Name']
ip = booting_instance.public_ip_address
block_until_ssh_open(ip)
time.sleep(extra_wait_time)
return _instance
return booting_instance
# Fabric Routines
@@ -314,53 +315,56 @@ def grab_certbot_log():
sudo('if [ -f ./certbot.log ]; then \
cat ./certbot.log; else echo "[nolocallog]"; fi')
def create_client_instances(targetlist, security_group_id, subnet_id):
"Create a fleet of client instances"
instances = []
print("Creating instances: ", end="")
for target in targetlist:
if target['virt'] == 'hvm':
machine_type = 't2.medium' if cl_args.fast else 't2.micro'
else:
# 32 bit systems
machine_type = 'c1.medium' if cl_args.fast else 't1.micro'
if 'userdata' in target.keys():
userdata = target['userdata']
else:
userdata = ''
name = 'le-%s'%target['name']
print(name, end=" ")
instances.append(make_instance(name,
target['ami'],
KEYNAME,
machine_type=machine_type,
security_group_id=security_group_id,
subnet_id=subnet_id,
userdata=userdata))
print()
return instances
def create_client_instance(ec2_client, target, security_group_id, subnet_id):
"""Create a single client instance for running tests."""
if target['virt'] == 'hvm':
machine_type = 't2.medium' if cl_args.fast else 't2.micro'
else:
# 32 bit systems
machine_type = 'c1.medium' if cl_args.fast else 't1.micro'
if 'userdata' in target.keys():
userdata = target['userdata']
else:
userdata = ''
name = 'le-%s'%target['name']
print(name, end=" ")
return make_instance(ec2_client,
name,
target['ami'],
KEYNAME,
machine_type=machine_type,
security_group_id=security_group_id,
subnet_id=subnet_id,
userdata=userdata)
def test_client_process(inqueue, outqueue):
def test_client_process(inqueue, outqueue, boulder_url):
cur_proc = mp.current_process()
for inreq in iter(inqueue.get, SENTINEL):
ii, target = inreq
ii, instance_id, target = inreq
# Each client process is given its own session due to the suggestion at
# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/resources.html?highlight=multithreading#multithreading-multiprocessing.
aws_session = boto3.session.Session(profile_name=PROFILE)
ec2_client = aws_session.resource('ec2')
instance = ec2_client.Instance(id=instance_id)
#save all stdout to log file
sys.stdout = open(LOGDIR+'/'+'%d_%s.log'%(ii,target['name']), 'w')
print("[%s : client %d %s %s]" % (cur_proc.name, ii, target['ami'], target['name']))
instances[ii] = block_until_instance_ready(instances[ii])
print("server %s at %s"%(instances[ii], instances[ii].public_ip_address))
env.host_string = "%s@%s"%(target['user'], instances[ii].public_ip_address)
instance = block_until_instance_ready(instance)
print("server %s at %s"%(instance, instance.public_ip_address))
env.host_string = "%s@%s"%(target['user'], instance.public_ip_address)
print(env.host_string)
try:
install_and_launch_certbot(instances[ii], boulder_url, target)
outqueue.put((ii, target, 'pass'))
install_and_launch_certbot(instance, boulder_url, target)
outqueue.put((ii, target, Status.PASS))
print("%s - %s SUCCESS"%(target['ami'], target['name']))
except:
outqueue.put((ii, target, 'fail'))
outqueue.put((ii, target, Status.FAIL))
print("%s - %s FAIL"%(target['ami'], target['name']))
traceback.print_exc(file=sys.stdout)
pass
@@ -377,7 +381,10 @@ def test_client_process(inqueue, outqueue):
def cleanup(cl_args, instances, targetlist):
print('Logs in ', LOGDIR)
if not cl_args.saveinstances:
# If lengths of instances and targetlist aren't equal, instances failed to
# start before running tests so leaving instances running for debugging
# isn't very useful. Let's cleanup after ourselves instead.
if len(instances) == len(targetlist) or not cl_args.saveinstances:
print('Terminating EC2 Instances')
if cl_args.killboulder:
boulder_server.terminate()
@@ -391,187 +398,202 @@ def cleanup(cl_args, instances, targetlist):
"%s@%s"%(target['user'], instances[ii].public_ip_address))
def main():
# Fabric library controlled through global env parameters
env.key_filename = KEYFILE
env.shell = '/bin/bash -l -i -c'
env.connection_attempts = 5
env.timeout = 10
# replace default SystemExit thrown by fabric during trouble
class FabricException(Exception):
pass
env['abort_exception'] = FabricException
#-------------------------------------------------------------------------------
# SCRIPT BEGINS
#-------------------------------------------------------------------------------
# Set up local copy of git repo
#-------------------------------------------------------------------------------
print("Making local dir for test repo and logs: %s"%LOGDIR)
local('mkdir %s'%LOGDIR)
# Fabric library controlled through global env parameters
env.key_filename = KEYFILE
env.shell = '/bin/bash -l -i -c'
env.connection_attempts = 5
env.timeout = 10
# replace default SystemExit thrown by fabric during trouble
class FabricException(Exception):
pass
env['abort_exception'] = FabricException
# figure out what git object to test and locally create it in LOGDIR
print("Making local git repo")
try:
if cl_args.pull_request != '~':
print('Testing PR %s '%cl_args.pull_request,
"MERGING into master" if cl_args.merge_master else "")
execute(local_git_PR, cl_args.repo, cl_args.pull_request, cl_args.merge_master)
elif cl_args.branch != '~':
print('Testing branch %s of %s'%(cl_args.branch, cl_args.repo))
execute(local_git_branch, cl_args.repo, cl_args.branch)
else:
print('Testing master of %s'%cl_args.repo)
execute(local_git_clone, cl_args.repo)
except FabricException:
print("FAIL: trouble with git repo")
traceback.print_exc()
exit()
# Set up local copy of git repo
#-------------------------------------------------------------------------------
LOGDIR = "letest-%d"%int(time.time())
print("Making local dir for test repo and logs: %s"%LOGDIR)
local('mkdir %s'%LOGDIR)
# figure out what git object to test and locally create it in LOGDIR
print("Making local git repo")
try:
if cl_args.pull_request != '~':
print('Testing PR %s '%cl_args.pull_request,
"MERGING into master" if cl_args.merge_master else "")
execute(local_git_PR, cl_args.repo, cl_args.pull_request, cl_args.merge_master)
elif cl_args.branch != '~':
print('Testing branch %s of %s'%(cl_args.branch, cl_args.repo))
execute(local_git_branch, cl_args.repo, cl_args.branch)
# Set up EC2 instances
#-------------------------------------------------------------------------------
configdata = yaml.load(open(cl_args.config_file, 'r'))
targetlist = configdata['targets']
print('Testing against these images: [%d total]'%len(targetlist))
for target in targetlist:
print(target['ami'], target['name'])
print("Connecting to EC2 using\n profile %s\n keyname %s\n keyfile %s"%(PROFILE, KEYNAME, KEYFILE))
aws_session = boto3.session.Session(profile_name=PROFILE)
ec2_client = aws_session.resource('ec2')
print("Determining Subnet")
for subnet in ec2_client.subnets.all():
if should_use_subnet(subnet):
subnet_id = subnet.id
vpc_id = subnet.vpc.id
break
else:
print('Testing master of %s'%cl_args.repo)
execute(local_git_clone, cl_args.repo)
except FabricException:
print("FAIL: trouble with git repo")
traceback.print_exc()
exit()
print("No usable subnet exists!")
print("Please create a VPC with a subnet named {0}".format(SUBNET_NAME))
print("that maps public IPv4 addresses to instances launched in the subnet.")
sys.exit(1)
print("Making Security Group")
vpc = ec2_client.Vpc(vpc_id)
sg_exists = False
for sg in vpc.security_groups.all():
if sg.group_name == SECURITY_GROUP_NAME:
security_group_id = sg.id
sg_exists = True
print(" %s already exists"%SECURITY_GROUP_NAME)
if not sg_exists:
security_group_id = make_security_group(vpc).id
time.sleep(30)
boulder_preexists = False
boulder_servers = ec2_client.instances.filter(Filters=[
{'Name': 'tag:Name', 'Values': ['le-boulderserver']},
{'Name': 'instance-state-name', 'Values': ['running']}])
boulder_server = next(iter(boulder_servers), None)
print("Requesting Instances...")
if boulder_server:
print("Found existing boulder server:", boulder_server)
boulder_preexists = True
else:
print("Can't find a boulder server, starting one...")
boulder_server = make_instance(ec2_client,
'le-boulderserver',
BOULDER_AMI,
KEYNAME,
machine_type='t2.micro',
#machine_type='t2.medium',
security_group_id=security_group_id,
subnet_id=subnet_id)
instances = []
try:
if not cl_args.boulderonly:
print("Creating instances: ", end="")
for target in targetlist:
instances.append(create_client_instance(ec2_client, target, security_group_id, subnet_id))
print()
# Configure and launch boulder server
#-------------------------------------------------------------------------------
print("Waiting on Boulder Server")
boulder_server = block_until_instance_ready(boulder_server)
print(" server %s"%boulder_server)
# Set up EC2 instances
#-------------------------------------------------------------------------------
configdata = yaml.load(open(cl_args.config_file, 'r'))
targetlist = configdata['targets']
print('Testing against these images: [%d total]'%len(targetlist))
for target in targetlist:
print(target['ami'], target['name'])
# env.host_string defines the ssh user and host for connection
env.host_string = "ubuntu@%s"%boulder_server.public_ip_address
print("Boulder Server at (SSH):", env.host_string)
if not boulder_preexists:
print("Configuring and Launching Boulder")
config_and_launch_boulder(boulder_server)
# blocking often unnecessary, but cheap EC2 VMs can get very slow
block_until_http_ready('http://%s:4000'%boulder_server.public_ip_address,
wait_time=10, timeout=500)
print("Connecting to EC2 using\n profile %s\n keyname %s\n keyfile %s"%(PROFILE, KEYNAME, KEYFILE))
AWS_SESSION = boto3.session.Session(profile_name=PROFILE)
EC2 = AWS_SESSION.resource('ec2')
boulder_url = "http://%s:4000/directory"%boulder_server.private_ip_address
print("Boulder Server at (public ip): http://%s:4000/directory"%boulder_server.public_ip_address)
print("Boulder Server at (EC2 private ip): %s"%boulder_url)
print("Determining Subnet")
for subnet in EC2.subnets.all():
if should_use_subnet(subnet):
subnet_id = subnet.id
vpc_id = subnet.vpc.id
break
else:
print("No usable subnet exists!")
print("Please create a VPC with a subnet named {0}".format(SUBNET_NAME))
print("that maps public IPv4 addresses to instances launched in the subnet.")
sys.exit(1)
if cl_args.boulderonly:
sys.exit(0)
print("Making Security Group")
vpc = EC2.Vpc(vpc_id)
sg_exists = False
for sg in vpc.security_groups.all():
if sg.group_name == SECURITY_GROUP_NAME:
security_group_id = sg.id
sg_exists = True
print(" %s already exists"%SECURITY_GROUP_NAME)
if not sg_exists:
security_group_id = make_security_group(vpc).id
time.sleep(30)
# Install and launch client scripts in parallel
#-------------------------------------------------------------------------------
print("Uploading and running test script in parallel: %s"%cl_args.test_script)
print("Output routed to log files in %s"%LOGDIR)
# (Advice: always use Manager.Queue, never regular multiprocessing.Queue
# the latter has implementation flaws that deadlock it in some circumstances)
manager = Manager()
outqueue = manager.Queue()
inqueue = manager.Queue()
boulder_preexists = False
boulder_servers = EC2.instances.filter(Filters=[
{'Name': 'tag:Name', 'Values': ['le-boulderserver']},
{'Name': 'instance-state-name', 'Values': ['running']}])
boulder_server = next(iter(boulder_servers), None)
print("Requesting Instances...")
if boulder_server:
print("Found existing boulder server:", boulder_server)
boulder_preexists = True
else:
print("Can't find a boulder server, starting one...")
boulder_server = make_instance('le-boulderserver',
BOULDER_AMI,
KEYNAME,
machine_type='t2.micro',
#machine_type='t2.medium',
security_group_id=security_group_id,
subnet_id=subnet_id)
try:
if not cl_args.boulderonly:
instances = create_client_instances(targetlist, security_group_id, subnet_id)
# Configure and launch boulder server
#-------------------------------------------------------------------------------
print("Waiting on Boulder Server")
boulder_server = block_until_instance_ready(boulder_server)
print(" server %s"%boulder_server)
# launch as many processes as clients to test
num_processes = len(targetlist)
jobs = [] #keep a reference to current procs
# env.host_string defines the ssh user and host for connection
env.host_string = "ubuntu@%s"%boulder_server.public_ip_address
print("Boulder Server at (SSH):", env.host_string)
if not boulder_preexists:
print("Configuring and Launching Boulder")
config_and_launch_boulder(boulder_server)
# blocking often unnecessary, but cheap EC2 VMs can get very slow
block_until_http_ready('http://%s:4000'%boulder_server.public_ip_address,
wait_time=10, timeout=500)
# initiate process execution
for i in range(num_processes):
p = mp.Process(target=test_client_process, args=(inqueue, outqueue, boulder_url))
jobs.append(p)
p.daemon = True # kills subprocesses if parent is killed
p.start()
boulder_url = "http://%s:4000/directory"%boulder_server.private_ip_address
print("Boulder Server at (public ip): http://%s:4000/directory"%boulder_server.public_ip_address)
print("Boulder Server at (EC2 private ip): %s"%boulder_url)
# fill up work queue
for ii, target in enumerate(targetlist):
inqueue.put((ii, instances[ii].id, target))
if cl_args.boulderonly:
sys.exit(0)
# add SENTINELs to end client processes
for i in range(num_processes):
inqueue.put(SENTINEL)
# wait on termination of client processes
timeout = 5 * 60
for p in jobs:
p.join(timeout)
while p.is_alive():
print('Waiting on client processes...')
sys.stdout.flush()
p.join(timeout)
# add SENTINEL to output queue
outqueue.put(SENTINEL)
# Install and launch client scripts in parallel
#-------------------------------------------------------------------------------
print("Uploading and running test script in parallel: %s"%cl_args.test_script)
print("Output routed to log files in %s"%LOGDIR)
# (Advice: always use Manager.Queue, never regular multiprocessing.Queue
# the latter has implementation flaws that deadlock it in some circumstances)
manager = Manager()
outqueue = manager.Queue()
inqueue = manager.Queue()
SENTINEL = None #queue kill signal
# clean up
execute(local_repo_clean)
# launch as many processes as clients to test
num_processes = len(targetlist)
jobs = [] #keep a reference to current procs
# print and save summary results
results_file = open(LOGDIR+'/results', 'w')
outputs = [outq for outq in iter(outqueue.get, SENTINEL)]
outputs.sort(key=lambda x: x[0])
failed = False
for outq in outputs:
ii, target, status = outq
if status == Status.FAIL:
failed = True
print('%d %s %s'%(ii, target['name'], status))
results_file.write('%d %s %s\n'%(ii, target['name'], status))
if len(outputs) != num_processes:
failed = True
failure_message = 'FAILURE: Some target machines failed to run and were not tested. ' +\
'Tests should be rerun.'
print(failure_message)
results_file.write(failure_message + '\n')
results_file.close()
if failed:
sys.exit(1)
finally:
cleanup(cl_args, instances, targetlist)
# kill any connections
fabric.network.disconnect_all()
# initiate process execution
for i in range(num_processes):
p = mp.Process(target=test_client_process, args=(inqueue, outqueue))
jobs.append(p)
p.daemon = True # kills subprocesses if parent is killed
p.start()
# fill up work queue
for ii, target in enumerate(targetlist):
inqueue.put((ii, target))
# add SENTINELs to end client processes
for i in range(num_processes):
inqueue.put(SENTINEL)
# wait on termination of client processes
for p in jobs:
p.join()
# add SENTINEL to output queue
outqueue.put(SENTINEL)
# clean up
execute(local_repo_clean)
# print and save summary results
results_file = open(LOGDIR+'/results', 'w')
outputs = [outq for outq in iter(outqueue.get, SENTINEL)]
outputs.sort(key=lambda x: x[0])
for outq in outputs:
ii, target, status = outq
print('%d %s %s'%(ii, target['name'], status))
results_file.write('%d %s %s\n'%(ii, target['name'], status))
if len(outputs) != num_processes:
failure_message = 'FAILURE: Some target machines failed to run and were not tested. ' +\
'Tests should be rerun.'
print(failure_message)
results_file.write(failure_message + '\n')
results_file.close()
finally:
cleanup(cl_args, instances, targetlist)
# kill any connections
fabric.network.disconnect_all()
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,25 @@
asn1crypto==0.24.0
awscli==1.16.157
bcrypt==3.1.6
boto3==1.9.146
botocore==1.12.147
cffi==1.12.3
colorama==0.3.9
cryptography==2.4.2
docutils==0.14
enum34==1.1.6
Fabric==1.14.1
futures==3.2.0
idna==2.8
ipaddress==1.0.22
jmespath==0.9.4
paramiko==2.4.2
pyasn1==0.4.5
pycparser==2.19
PyNaCl==1.3.0
python-dateutil==2.8.0
PyYAML==3.10
rsa==3.4.2
s3transfer==0.2.0
six==1.12.0
urllib3==1.24.3

View File

@@ -1,15 +1,24 @@
#!/bin/sh -xe
cd letsencrypt
./certbot-auto --os-packages-only -n --debug
./certbot-auto --install-only -n --debug
PLUGINS="certbot-apache certbot-nginx"
PYTHON_MAJOR_VERSION=$(/opt/eff.org/certbot/venv/bin/python --version 2>&1 | cut -d" " -f 2 | cut -d. -f1)
TEMP_DIR=$(mktemp -d)
VERSION=$(letsencrypt-auto-source/version.py)
if [ "$PYTHON_MAJOR_VERSION" = "3" ]; then
VENV_PATH="venv3"
VENV_SCRIPT="tools/venv3.py"
else
VENV_SCRIPT="tools/venv.py"
VENV_PATH="venv"
fi
# setup venv
tools/venv.py --requirement letsencrypt-auto-source/pieces/dependency-requirements.txt
. ./venv/bin/activate
"$VENV_SCRIPT" --requirement letsencrypt-auto-source/pieces/dependency-requirements.txt
. "$VENV_PATH/bin/activate"
# pytest is needed to run tests on some of our packages so we install a pinned version here.
tools/pip_install.py pytest

Binary file not shown.

13
tests/letstest/travis.sh Executable file
View File

@@ -0,0 +1,13 @@
#!/bin/bash -eux
#
# Runs test farm tests in Travis.
cd $(dirname "$0")
openssl aes-256-cbc -K $encrypted_9a387195a62e_key -iv $encrypted_9a387195a62e_iv -in travis-test-farm.pem.enc -out travis-test-farm.pem -d
python multitester.py apache2_targets.yaml ./travis-test-farm.pem none scripts/test_apache2.sh
for script in test_leauto_upgrades.sh test_letsencrypt_auto_certonly_standalone.sh test_sdists.sh; do
# Sleep after each test to give AWS time to terminate instances.
sleep 30s
python multitester.py targets.yaml ./travis-test-farm.pem none "scripts/$script"
done

View File

@@ -274,3 +274,11 @@ commands =
--acme-server={env:ACME_SERVER:pebble}
passenv = DOCKER_*
setenv = {[testenv:py27-oldest]setenv}
[testenv:travis-test-farm-tests]
commands = {toxinidir}/tests/letstest/travis.sh
deps = -rtests/letstest/requirements.txt
passenv =
AWS_*
encrypted_*
setenv = AWS_DEFAULT_REGION=us-east-1