Compare commits
31 Commits
1.17.x
...
test-letst
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d74f423b09 | ||
|
|
c92ef367a4 | ||
|
|
796d321e44 | ||
|
|
9aa04fb59a | ||
|
|
0e26d1d8f4 | ||
|
|
78bab48e7c | ||
|
|
5497d59f5b | ||
|
|
b760d198b5 | ||
|
|
94cd3d93e4 | ||
|
|
b5edb3a2df | ||
|
|
ba70e1748d | ||
|
|
fa5fd25eda | ||
|
|
611fb266ff | ||
|
|
6e466c9740 | ||
|
|
da34e9efc3 | ||
|
|
8d898a7183 | ||
|
|
91b36e3b7f | ||
|
|
b9d71190a7 | ||
|
|
a76020f6bc | ||
|
|
846de3cc65 | ||
|
|
10fa95cba8 | ||
|
|
60fba5fcfe | ||
|
|
ff7fc8486a | ||
|
|
bcfb070a9d | ||
|
|
1ad6857b56 | ||
|
|
3c041f1655 | ||
|
|
a7fd7cdd87 | ||
|
|
4f510706a1 | ||
|
|
7f75454675 | ||
|
|
3fbdef6a64 | ||
|
|
2065775193 |
206
.travis.yml
206
.travis.yml
@@ -30,202 +30,15 @@ extended-test-suite: &extended-test-suite
|
||||
|
||||
matrix:
|
||||
include:
|
||||
# Main test suite
|
||||
- python: "2.7"
|
||||
env: ACME_SERVER=pebble TOXENV=integration
|
||||
sudo: required
|
||||
services: docker
|
||||
<<: *not-on-master
|
||||
|
||||
# This job is always executed, including on master
|
||||
- python: "2.7"
|
||||
env: TOXENV=py27-cover FYI="py27 tests + code coverage"
|
||||
|
||||
- sudo: required
|
||||
env: TOXENV=nginx_compat
|
||||
services: docker
|
||||
before_install:
|
||||
addons:
|
||||
<<: *not-on-master
|
||||
- python: "2.7"
|
||||
env: TOXENV=lint
|
||||
<<: *not-on-master
|
||||
- python: "3.4"
|
||||
env: TOXENV=mypy
|
||||
<<: *not-on-master
|
||||
- python: "3.5"
|
||||
env: TOXENV=mypy
|
||||
<<: *not-on-master
|
||||
- python: "2.7"
|
||||
env: TOXENV='py27-{acme,apache,certbot,dns,nginx,postfix}-oldest'
|
||||
sudo: required
|
||||
services: docker
|
||||
<<: *not-on-master
|
||||
- python: "3.4"
|
||||
env: TOXENV=py34
|
||||
sudo: required
|
||||
services: docker
|
||||
<<: *not-on-master
|
||||
- python: "3.7"
|
||||
dist: xenial
|
||||
env: TOXENV=py37
|
||||
sudo: required
|
||||
services: docker
|
||||
<<: *not-on-master
|
||||
- sudo: required
|
||||
env: TOXENV=apache_compat
|
||||
services: docker
|
||||
before_install:
|
||||
addons:
|
||||
<<: *not-on-master
|
||||
- sudo: required
|
||||
env: TOXENV=le_auto_trusty
|
||||
services: docker
|
||||
before_install:
|
||||
addons:
|
||||
<<: *not-on-master
|
||||
- python: "2.7"
|
||||
env: TOXENV=apacheconftest-with-pebble
|
||||
sudo: required
|
||||
services: docker
|
||||
<<: *not-on-master
|
||||
- python: "2.7"
|
||||
env: TOXENV=nginxroundtrip
|
||||
<<: *not-on-master
|
||||
|
||||
# Extended test suite on cron jobs and pushes to tested branches other than master
|
||||
- python: "3.7"
|
||||
dist: xenial
|
||||
env: TOXENV=py37 CERTBOT_NO_PIN=1
|
||||
env:
|
||||
- TOXENV=travis-test-farm-tests-part1
|
||||
- secure: "f+j/Lj9s1lcuKo5sEFrlRd1kIAMnIJI4z0MTI7QF8jl9Fkmbx7KECGzw31TNgzrOSzxSapHbcueFYvNCLKST+kE/8ogMZBbwqXfEDuKpyF6BY3uYoJn+wPVE5pIb8Hhe08xPte8TTDSMIyHI3EyTfcAKrIreauoArePvh/cRvSw="
|
||||
<<: *extended-test-suite
|
||||
- python: "2.7"
|
||||
env: ACME_SERVER=boulder-v1 TOXENV=integration
|
||||
sudo: required
|
||||
services: docker
|
||||
<<: *extended-test-suite
|
||||
- python: "2.7"
|
||||
env: ACME_SERVER=boulder-v2 TOXENV=integration
|
||||
sudo: required
|
||||
services: docker
|
||||
<<: *extended-test-suite
|
||||
- python: "2.7"
|
||||
env: TOXENV=py27-certbot-oldest
|
||||
<<: *extended-test-suite
|
||||
- python: "2.7"
|
||||
env: TOXENV=py27-nginx-oldest
|
||||
<<: *extended-test-suite
|
||||
- python: "2.7"
|
||||
env: ACME_SERVER=boulder-v1 TOXENV=integration-certbot-oldest
|
||||
sudo: required
|
||||
services: docker
|
||||
<<: *extended-test-suite
|
||||
- python: "2.7"
|
||||
env: ACME_SERVER=boulder-v2 TOXENV=integration-certbot-oldest
|
||||
sudo: required
|
||||
services: docker
|
||||
<<: *extended-test-suite
|
||||
- python: "2.7"
|
||||
env: ACME_SERVER=boulder-v1 TOXENV=integration-nginx-oldest
|
||||
sudo: required
|
||||
services: docker
|
||||
<<: *extended-test-suite
|
||||
- python: "2.7"
|
||||
env: ACME_SERVER=boulder-v2 TOXENV=integration-nginx-oldest
|
||||
sudo: required
|
||||
services: docker
|
||||
<<: *extended-test-suite
|
||||
- python: "3.4"
|
||||
env: TOXENV=py34
|
||||
<<: *extended-test-suite
|
||||
- python: "3.5"
|
||||
env: TOXENV=py35
|
||||
<<: *extended-test-suite
|
||||
- python: "3.6"
|
||||
env: TOXENV=py36
|
||||
<<: *extended-test-suite
|
||||
- python: "3.7"
|
||||
dist: xenial
|
||||
env: TOXENV=py37
|
||||
<<: *extended-test-suite
|
||||
- python: "3.4"
|
||||
env: ACME_SERVER=boulder-v1 TOXENV=integration
|
||||
sudo: required
|
||||
services: docker
|
||||
<<: *extended-test-suite
|
||||
- python: "3.4"
|
||||
env: ACME_SERVER=boulder-v2 TOXENV=integration
|
||||
sudo: required
|
||||
services: docker
|
||||
<<: *extended-test-suite
|
||||
- python: "3.5"
|
||||
env: ACME_SERVER=boulder-v1 TOXENV=integration
|
||||
sudo: required
|
||||
services: docker
|
||||
<<: *extended-test-suite
|
||||
- python: "3.5"
|
||||
env: ACME_SERVER=boulder-v2 TOXENV=integration
|
||||
sudo: required
|
||||
services: docker
|
||||
<<: *extended-test-suite
|
||||
- python: "3.6"
|
||||
env: ACME_SERVER=boulder-v1 TOXENV=integration
|
||||
sudo: required
|
||||
services: docker
|
||||
<<: *extended-test-suite
|
||||
- python: "3.6"
|
||||
env: ACME_SERVER=boulder-v2 TOXENV=integration
|
||||
sudo: required
|
||||
services: docker
|
||||
<<: *extended-test-suite
|
||||
- python: "3.7"
|
||||
dist: xenial
|
||||
env: ACME_SERVER=boulder-v1 TOXENV=integration
|
||||
sudo: required
|
||||
services: docker
|
||||
<<: *extended-test-suite
|
||||
- python: "3.7"
|
||||
dist: xenial
|
||||
env: ACME_SERVER=boulder-v2 TOXENV=integration
|
||||
sudo: required
|
||||
services: docker
|
||||
<<: *extended-test-suite
|
||||
- sudo: required
|
||||
env: TOXENV=le_auto_xenial
|
||||
services: docker
|
||||
<<: *extended-test-suite
|
||||
- sudo: required
|
||||
env: TOXENV=le_auto_jessie
|
||||
services: docker
|
||||
<<: *extended-test-suite
|
||||
- sudo: required
|
||||
env: TOXENV=le_auto_centos6
|
||||
services: docker
|
||||
<<: *extended-test-suite
|
||||
- sudo: required
|
||||
env: TOXENV=docker_dev
|
||||
services: docker
|
||||
addons:
|
||||
apt:
|
||||
packages: # don't install nginx and apache
|
||||
- libaugeas0
|
||||
<<: *extended-test-suite
|
||||
- language: generic
|
||||
env: TOXENV=py27
|
||||
os: osx
|
||||
addons:
|
||||
homebrew:
|
||||
packages:
|
||||
- augeas
|
||||
- python2
|
||||
<<: *extended-test-suite
|
||||
- language: generic
|
||||
env: TOXENV=py3
|
||||
os: osx
|
||||
addons:
|
||||
homebrew:
|
||||
packages:
|
||||
- augeas
|
||||
- python3
|
||||
env:
|
||||
- TOXENV=travis-test-farm-tests-part2
|
||||
- secure: "f+j/Lj9s1lcuKo5sEFrlRd1kIAMnIJI4z0MTI7QF8jl9Fkmbx7KECGzw31TNgzrOSzxSapHbcueFYvNCLKST+kE/8ogMZBbwqXfEDuKpyF6BY3uYoJn+wPVE5pIb8Hhe08xPte8TTDSMIyHI3EyTfcAKrIreauoArePvh/cRvSw="
|
||||
<<: *extended-test-suite
|
||||
|
||||
# container-based infrastructure
|
||||
@@ -252,10 +65,3 @@ after_success: '[ "$TOXENV" == "py27-cover" ] && codecov -F linux'
|
||||
|
||||
notifications:
|
||||
email: false
|
||||
irc:
|
||||
channels:
|
||||
- secure: "SGWZl3ownKx9xKVV2VnGt7DqkTmutJ89oJV9tjKhSs84kLijU6EYdPnllqISpfHMTxXflNZuxtGo0wTDYHXBuZL47w1O32W6nzuXdra5zC+i4sYQwYULUsyfOv9gJX8zWAULiK0Z3r0oho45U+FR5ZN6TPCidi8/eGU+EEPwaAw="
|
||||
on_cancel: never
|
||||
on_success: never
|
||||
on_failure: always
|
||||
use_notice: true
|
||||
|
||||
@@ -14,15 +14,17 @@ Simple AWS testfarm scripts for certbot client testing
|
||||
- AWS EC2 has a default limit of 20 t2/t1 instances, if more
|
||||
are needed, they need to be requested via online webform.
|
||||
|
||||
## Usage
|
||||
- To install the necessary dependencies on Ubuntu 16.04, run:
|
||||
## Installation and configuration
|
||||
These tests require Python 2.7, awscli, boto3, PyYAML, and fabric<2.0. If you
|
||||
have Python 2.7 and virtualenv installed, you can use requirements.txt to
|
||||
create a virtual environment with a known set of dependencies by running:
|
||||
```
|
||||
sudo apt install awscli python-yaml python-boto3 fabric
|
||||
virtualenv --python $(command -v python2.7 || command -v python2 || command -v python) venv
|
||||
. ./venv/bin/activate
|
||||
pip install --requirement requirements.txt
|
||||
```
|
||||
|
||||
- Requires AWS IAM secrets to be set up with aws cli
|
||||
- Requires an AWS associated keyfile <keyname>.pem
|
||||
|
||||
You can then configure AWS credentials and create a key by running:
|
||||
```
|
||||
>aws configure --profile <profile name>
|
||||
[interactive: enter secrets for IAM role]
|
||||
@@ -30,9 +32,10 @@ sudo apt install awscli python-yaml python-boto3 fabric
|
||||
```
|
||||
Note: whatever you pick for `<key name>` will be shown to other users with AWS access.
|
||||
|
||||
When prompted for a default region name, enter: `us-east-1`
|
||||
When prompted for a default region name, enter: `us-east-1`.
|
||||
|
||||
then:
|
||||
## Usage
|
||||
To run tests, activate the virtual environment you created above and run:
|
||||
```
|
||||
>python multitester.py targets.yaml /path/to/your/key.pem <profile name> scripts/<test to run>
|
||||
```
|
||||
|
||||
@@ -1,13 +1,18 @@
|
||||
targets:
|
||||
#-----------------------------------------------------------------------------
|
||||
# Apache 2.4
|
||||
- ami: ami-26d5af4c
|
||||
name: ubuntu15.10
|
||||
#Ubuntu
|
||||
- ami: ami-064bd2d44a1d6c097
|
||||
name: ubuntu18.10
|
||||
type: ubuntu
|
||||
virt: hvm
|
||||
user: ubuntu
|
||||
- ami: ami-d92e6bb3
|
||||
name: ubuntu15.04LTS
|
||||
- ami: ami-012fd5eb46f56731f
|
||||
name: ubuntu18.04LTS
|
||||
type: ubuntu
|
||||
virt: hvm
|
||||
user: ubuntu
|
||||
- ami: ami-09677e0a6b14905b0
|
||||
name: ubuntu16.04LTS
|
||||
type: ubuntu
|
||||
virt: hvm
|
||||
user: ubuntu
|
||||
@@ -21,37 +26,29 @@ targets:
|
||||
type: ubuntu
|
||||
virt: pv
|
||||
user: ubuntu
|
||||
- ami: ami-116d857a
|
||||
name: debian8.1
|
||||
type: debian
|
||||
#-----------------------------------------------------------------------------
|
||||
# Debian
|
||||
- ami: ami-003f19e0e687de1cd
|
||||
name: debian9
|
||||
type: ubuntu
|
||||
virt: hvm
|
||||
user: admin
|
||||
- ami: ami-077bf3962f29d3fa4
|
||||
name: debian8.1
|
||||
type: ubuntu
|
||||
virt: hvm
|
||||
user: admin
|
||||
userdata: |
|
||||
#cloud-init
|
||||
runcmd:
|
||||
- [ apt-get, install, -y, curl ]
|
||||
#-----------------------------------------------------------------------------
|
||||
# Apache 2.2
|
||||
# - ami: ami-0611546c
|
||||
# name: ubuntu12.04LTS
|
||||
# type: ubuntu
|
||||
# virt: hvm
|
||||
# user: ubuntu
|
||||
# - ami: ami-e0efab88
|
||||
# name: debian7.8.aws.1
|
||||
# type: debian
|
||||
# virt: hvm
|
||||
# user: admin
|
||||
# userdata: |
|
||||
# #cloud-init
|
||||
# runcmd:
|
||||
# - [ apt-get, install, -y, curl ]
|
||||
# - ami: ami-e6eeaa8e
|
||||
# name: debian7.8.aws.1_32bit
|
||||
# type: debian
|
||||
# virt: pv
|
||||
# user: admin
|
||||
# userdata: |
|
||||
# #cloud-init
|
||||
# runcmd:
|
||||
# - [ apt-get, install, -y, curl ]
|
||||
# Fedora
|
||||
- ami: ami-5c69df23
|
||||
name: fedora28
|
||||
type: centos
|
||||
virt: hvm
|
||||
user: fedora
|
||||
#-----------------------------------------------------------------------------
|
||||
# CentOS
|
||||
- ami: ami-9887c6e7
|
||||
name: centos7
|
||||
type: centos
|
||||
virt: hvm
|
||||
user: centos
|
||||
|
||||
@@ -94,18 +94,21 @@ cl_args = parser.parse_args()
|
||||
# assumes naming: <key_filename> = <keyname>.pem
|
||||
KEYFILE = cl_args.key_file
|
||||
KEYNAME = os.path.split(cl_args.key_file)[1].split('.pem')[0]
|
||||
PROFILE = cl_args.aws_profile
|
||||
PROFILE = None if cl_args.aws_profile.lower() == 'none' else cl_args.aws_profile
|
||||
|
||||
# Globals
|
||||
#-------------------------------------------------------------------------------
|
||||
BOULDER_AMI = 'ami-072a9534772bec854' # premade shared boulder AMI 18.04LTS us-east-1
|
||||
LOGDIR = "" #points to logging / working directory
|
||||
# boto3/AWS api globals
|
||||
AWS_SESSION = None
|
||||
EC2 = None
|
||||
LOGDIR = "letest-%d"%int(time.time()) #points to logging / working directory
|
||||
SECURITY_GROUP_NAME = 'certbot-security-group'
|
||||
SENTINEL = None #queue kill signal
|
||||
SUBNET_NAME = 'certbot-subnet'
|
||||
|
||||
class Status(object):
|
||||
"""Possible statuses of client tests."""
|
||||
PASS = 'pass'
|
||||
FAIL = 'fail'
|
||||
|
||||
# Boto3/AWS automation functions
|
||||
#-------------------------------------------------------------------------------
|
||||
def should_use_subnet(subnet):
|
||||
@@ -139,16 +142,19 @@ def make_security_group(vpc):
|
||||
mysg.authorize_ingress(IpProtocol="udp", CidrIp="0.0.0.0/0", FromPort=60000, ToPort=61000)
|
||||
return mysg
|
||||
|
||||
def make_instance(instance_name,
|
||||
def make_instance(ec2_client,
|
||||
instance_name,
|
||||
ami_id,
|
||||
keyname,
|
||||
security_group_id,
|
||||
subnet_id,
|
||||
machine_type='t2.micro',
|
||||
userdata=""): #userdata contains bash or cloud-init script
|
||||
|
||||
new_instance = EC2.create_instances(
|
||||
BlockDeviceMappings=_get_block_device_mappings(ami_id),
|
||||
block_device_mappings = _get_block_device_mappings(ec2_client, ami_id)
|
||||
tags = [{'Key': 'Name', 'Value': instance_name}]
|
||||
tag_spec = [{'ResourceType': 'instance', 'Tags': tags}]
|
||||
return ec2_client.create_instances(
|
||||
BlockDeviceMappings=block_device_mappings,
|
||||
ImageId=ami_id,
|
||||
SecurityGroupIds=[security_group_id],
|
||||
SubnetId=subnet_id,
|
||||
@@ -156,24 +162,10 @@ def make_instance(instance_name,
|
||||
MinCount=1,
|
||||
MaxCount=1,
|
||||
UserData=userdata,
|
||||
InstanceType=machine_type)[0]
|
||||
InstanceType=machine_type,
|
||||
TagSpecifications=tag_spec)[0]
|
||||
|
||||
# brief pause to prevent rare error on EC2 delay, should block until ready instead
|
||||
time.sleep(1.0)
|
||||
|
||||
# give instance a name
|
||||
try:
|
||||
new_instance.create_tags(Tags=[{'Key': 'Name', 'Value': instance_name}])
|
||||
except ClientError as e:
|
||||
if "InvalidInstanceID.NotFound" in str(e):
|
||||
# This seems to be ephemeral... retry
|
||||
time.sleep(1)
|
||||
new_instance.create_tags(Tags=[{'Key': 'Name', 'Value': instance_name}])
|
||||
else:
|
||||
raise
|
||||
return new_instance
|
||||
|
||||
def _get_block_device_mappings(ami_id):
|
||||
def _get_block_device_mappings(ec2_client, ami_id):
|
||||
"""Returns the list of block device mappings to ensure cleanup.
|
||||
|
||||
This list sets connected EBS volumes to be deleted when the EC2
|
||||
@@ -186,7 +178,7 @@ def _get_block_device_mappings(ami_id):
|
||||
# * https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-blockdev-template.html
|
||||
return [{'DeviceName': mapping['DeviceName'],
|
||||
'Ebs': {'DeleteOnTermination': True}}
|
||||
for mapping in EC2.Image(ami_id).block_device_mappings
|
||||
for mapping in ec2_client.Image(ami_id).block_device_mappings
|
||||
if not mapping.get('Ebs', {}).get('DeleteOnTermination', True)]
|
||||
|
||||
|
||||
@@ -225,20 +217,17 @@ def block_until_ssh_open(ipstring, wait_time=10, timeout=120):
|
||||
|
||||
def block_until_instance_ready(booting_instance, wait_time=5, extra_wait_time=20):
|
||||
"Blocks booting_instance until AWS EC2 instance is ready to accept SSH connections"
|
||||
# the reinstantiation from id is necessary to force boto3
|
||||
# to correctly update the 'state' variable during init
|
||||
_id = booting_instance.id
|
||||
_instance = EC2.Instance(id=_id)
|
||||
_state = _instance.state['Name']
|
||||
_ip = _instance.public_ip_address
|
||||
while _state != 'running' or _ip is None:
|
||||
state = booting_instance.state['Name']
|
||||
ip = booting_instance.public_ip_address
|
||||
while state != 'running' or ip is None:
|
||||
time.sleep(wait_time)
|
||||
_instance = EC2.Instance(id=_id)
|
||||
_state = _instance.state['Name']
|
||||
_ip = _instance.public_ip_address
|
||||
block_until_ssh_open(_ip)
|
||||
# The instance needs to be reloaded to update its local attributes.
|
||||
booting_instance.reload()
|
||||
state = booting_instance.state['Name']
|
||||
ip = booting_instance.public_ip_address
|
||||
block_until_ssh_open(ip)
|
||||
time.sleep(extra_wait_time)
|
||||
return _instance
|
||||
return booting_instance
|
||||
|
||||
|
||||
# Fabric Routines
|
||||
@@ -314,53 +303,56 @@ def grab_certbot_log():
|
||||
sudo('if [ -f ./certbot.log ]; then \
|
||||
cat ./certbot.log; else echo "[nolocallog]"; fi')
|
||||
|
||||
def create_client_instances(targetlist, security_group_id, subnet_id):
|
||||
"Create a fleet of client instances"
|
||||
instances = []
|
||||
print("Creating instances: ", end="")
|
||||
for target in targetlist:
|
||||
if target['virt'] == 'hvm':
|
||||
machine_type = 't2.medium' if cl_args.fast else 't2.micro'
|
||||
else:
|
||||
# 32 bit systems
|
||||
machine_type = 'c1.medium' if cl_args.fast else 't1.micro'
|
||||
if 'userdata' in target.keys():
|
||||
userdata = target['userdata']
|
||||
else:
|
||||
userdata = ''
|
||||
name = 'le-%s'%target['name']
|
||||
print(name, end=" ")
|
||||
instances.append(make_instance(name,
|
||||
target['ami'],
|
||||
KEYNAME,
|
||||
machine_type=machine_type,
|
||||
security_group_id=security_group_id,
|
||||
subnet_id=subnet_id,
|
||||
userdata=userdata))
|
||||
print()
|
||||
return instances
|
||||
|
||||
def create_client_instance(ec2_client, target, security_group_id, subnet_id):
|
||||
"""Create a single client instance for running tests."""
|
||||
if target['virt'] == 'hvm':
|
||||
machine_type = 't2.medium' if cl_args.fast else 't2.micro'
|
||||
else:
|
||||
# 32 bit systems
|
||||
machine_type = 'c1.medium' if cl_args.fast else 't1.micro'
|
||||
if 'userdata' in target.keys():
|
||||
userdata = target['userdata']
|
||||
else:
|
||||
userdata = ''
|
||||
name = 'le-%s'%target['name']
|
||||
print(name, end=" ")
|
||||
return make_instance(ec2_client,
|
||||
name,
|
||||
target['ami'],
|
||||
KEYNAME,
|
||||
machine_type=machine_type,
|
||||
security_group_id=security_group_id,
|
||||
subnet_id=subnet_id,
|
||||
userdata=userdata)
|
||||
|
||||
|
||||
def test_client_process(inqueue, outqueue):
|
||||
def test_client_process(inqueue, outqueue, boulder_url):
|
||||
cur_proc = mp.current_process()
|
||||
for inreq in iter(inqueue.get, SENTINEL):
|
||||
ii, target = inreq
|
||||
ii, instance_id, target = inreq
|
||||
|
||||
# Each client process is given its own session due to the suggestion at
|
||||
# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/resources.html?highlight=multithreading#multithreading-multiprocessing.
|
||||
aws_session = boto3.session.Session(profile_name=PROFILE)
|
||||
ec2_client = aws_session.resource('ec2')
|
||||
instance = ec2_client.Instance(id=instance_id)
|
||||
|
||||
#save all stdout to log file
|
||||
sys.stdout = open(LOGDIR+'/'+'%d_%s.log'%(ii,target['name']), 'w')
|
||||
|
||||
print("[%s : client %d %s %s]" % (cur_proc.name, ii, target['ami'], target['name']))
|
||||
instances[ii] = block_until_instance_ready(instances[ii])
|
||||
print("server %s at %s"%(instances[ii], instances[ii].public_ip_address))
|
||||
env.host_string = "%s@%s"%(target['user'], instances[ii].public_ip_address)
|
||||
instance = block_until_instance_ready(instance)
|
||||
print("server %s at %s"%(instance, instance.public_ip_address))
|
||||
env.host_string = "%s@%s"%(target['user'], instance.public_ip_address)
|
||||
print(env.host_string)
|
||||
|
||||
try:
|
||||
install_and_launch_certbot(instances[ii], boulder_url, target)
|
||||
outqueue.put((ii, target, 'pass'))
|
||||
install_and_launch_certbot(instance, boulder_url, target)
|
||||
outqueue.put((ii, target, Status.PASS))
|
||||
print("%s - %s SUCCESS"%(target['ami'], target['name']))
|
||||
except:
|
||||
outqueue.put((ii, target, 'fail'))
|
||||
outqueue.put((ii, target, Status.FAIL))
|
||||
print("%s - %s FAIL"%(target['ami'], target['name']))
|
||||
traceback.print_exc(file=sys.stdout)
|
||||
pass
|
||||
@@ -377,7 +369,10 @@ def test_client_process(inqueue, outqueue):
|
||||
|
||||
def cleanup(cl_args, instances, targetlist):
|
||||
print('Logs in ', LOGDIR)
|
||||
if not cl_args.saveinstances:
|
||||
# If lengths of instances and targetlist aren't equal, instances failed to
|
||||
# start before running tests so leaving instances running for debugging
|
||||
# isn't very useful. Let's cleanup after ourselves instead.
|
||||
if len(instances) == len(targetlist) or not cl_args.saveinstances:
|
||||
print('Terminating EC2 Instances')
|
||||
if cl_args.killboulder:
|
||||
boulder_server.terminate()
|
||||
@@ -391,187 +386,202 @@ def cleanup(cl_args, instances, targetlist):
|
||||
"%s@%s"%(target['user'], instances[ii].public_ip_address))
|
||||
|
||||
|
||||
def main():
|
||||
# Fabric library controlled through global env parameters
|
||||
env.key_filename = KEYFILE
|
||||
env.shell = '/bin/bash -l -i -c'
|
||||
env.connection_attempts = 5
|
||||
env.timeout = 10
|
||||
# replace default SystemExit thrown by fabric during trouble
|
||||
class FabricException(Exception):
|
||||
pass
|
||||
env['abort_exception'] = FabricException
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# SCRIPT BEGINS
|
||||
#-------------------------------------------------------------------------------
|
||||
# Set up local copy of git repo
|
||||
#-------------------------------------------------------------------------------
|
||||
print("Making local dir for test repo and logs: %s"%LOGDIR)
|
||||
local('mkdir %s'%LOGDIR)
|
||||
|
||||
# Fabric library controlled through global env parameters
|
||||
env.key_filename = KEYFILE
|
||||
env.shell = '/bin/bash -l -i -c'
|
||||
env.connection_attempts = 5
|
||||
env.timeout = 10
|
||||
# replace default SystemExit thrown by fabric during trouble
|
||||
class FabricException(Exception):
|
||||
pass
|
||||
env['abort_exception'] = FabricException
|
||||
# figure out what git object to test and locally create it in LOGDIR
|
||||
print("Making local git repo")
|
||||
try:
|
||||
if cl_args.pull_request != '~':
|
||||
print('Testing PR %s '%cl_args.pull_request,
|
||||
"MERGING into master" if cl_args.merge_master else "")
|
||||
execute(local_git_PR, cl_args.repo, cl_args.pull_request, cl_args.merge_master)
|
||||
elif cl_args.branch != '~':
|
||||
print('Testing branch %s of %s'%(cl_args.branch, cl_args.repo))
|
||||
execute(local_git_branch, cl_args.repo, cl_args.branch)
|
||||
else:
|
||||
print('Testing master of %s'%cl_args.repo)
|
||||
execute(local_git_clone, cl_args.repo)
|
||||
except FabricException:
|
||||
print("FAIL: trouble with git repo")
|
||||
traceback.print_exc()
|
||||
exit()
|
||||
|
||||
# Set up local copy of git repo
|
||||
#-------------------------------------------------------------------------------
|
||||
LOGDIR = "letest-%d"%int(time.time())
|
||||
print("Making local dir for test repo and logs: %s"%LOGDIR)
|
||||
local('mkdir %s'%LOGDIR)
|
||||
|
||||
# figure out what git object to test and locally create it in LOGDIR
|
||||
print("Making local git repo")
|
||||
try:
|
||||
if cl_args.pull_request != '~':
|
||||
print('Testing PR %s '%cl_args.pull_request,
|
||||
"MERGING into master" if cl_args.merge_master else "")
|
||||
execute(local_git_PR, cl_args.repo, cl_args.pull_request, cl_args.merge_master)
|
||||
elif cl_args.branch != '~':
|
||||
print('Testing branch %s of %s'%(cl_args.branch, cl_args.repo))
|
||||
execute(local_git_branch, cl_args.repo, cl_args.branch)
|
||||
# Set up EC2 instances
|
||||
#-------------------------------------------------------------------------------
|
||||
configdata = yaml.load(open(cl_args.config_file, 'r'))
|
||||
targetlist = configdata['targets']
|
||||
print('Testing against these images: [%d total]'%len(targetlist))
|
||||
for target in targetlist:
|
||||
print(target['ami'], target['name'])
|
||||
|
||||
print("Connecting to EC2 using\n profile %s\n keyname %s\n keyfile %s"%(PROFILE, KEYNAME, KEYFILE))
|
||||
aws_session = boto3.session.Session(profile_name=PROFILE)
|
||||
ec2_client = aws_session.resource('ec2')
|
||||
|
||||
print("Determining Subnet")
|
||||
for subnet in ec2_client.subnets.all():
|
||||
if should_use_subnet(subnet):
|
||||
subnet_id = subnet.id
|
||||
vpc_id = subnet.vpc.id
|
||||
break
|
||||
else:
|
||||
print('Testing master of %s'%cl_args.repo)
|
||||
execute(local_git_clone, cl_args.repo)
|
||||
except FabricException:
|
||||
print("FAIL: trouble with git repo")
|
||||
traceback.print_exc()
|
||||
exit()
|
||||
print("No usable subnet exists!")
|
||||
print("Please create a VPC with a subnet named {0}".format(SUBNET_NAME))
|
||||
print("that maps public IPv4 addresses to instances launched in the subnet.")
|
||||
sys.exit(1)
|
||||
|
||||
print("Making Security Group")
|
||||
vpc = ec2_client.Vpc(vpc_id)
|
||||
sg_exists = False
|
||||
for sg in vpc.security_groups.all():
|
||||
if sg.group_name == SECURITY_GROUP_NAME:
|
||||
security_group_id = sg.id
|
||||
sg_exists = True
|
||||
print(" %s already exists"%SECURITY_GROUP_NAME)
|
||||
if not sg_exists:
|
||||
security_group_id = make_security_group(vpc).id
|
||||
time.sleep(30)
|
||||
|
||||
boulder_preexists = False
|
||||
boulder_servers = ec2_client.instances.filter(Filters=[
|
||||
{'Name': 'tag:Name', 'Values': ['le-boulderserver']},
|
||||
{'Name': 'instance-state-name', 'Values': ['running']}])
|
||||
|
||||
boulder_server = next(iter(boulder_servers), None)
|
||||
|
||||
print("Requesting Instances...")
|
||||
if boulder_server:
|
||||
print("Found existing boulder server:", boulder_server)
|
||||
boulder_preexists = True
|
||||
else:
|
||||
print("Can't find a boulder server, starting one...")
|
||||
boulder_server = make_instance(ec2_client,
|
||||
'le-boulderserver',
|
||||
BOULDER_AMI,
|
||||
KEYNAME,
|
||||
machine_type='t2.micro',
|
||||
#machine_type='t2.medium',
|
||||
security_group_id=security_group_id,
|
||||
subnet_id=subnet_id)
|
||||
|
||||
instances = []
|
||||
try:
|
||||
if not cl_args.boulderonly:
|
||||
print("Creating instances: ", end="")
|
||||
for target in targetlist:
|
||||
instances.append(create_client_instance(ec2_client, target, security_group_id, subnet_id))
|
||||
print()
|
||||
|
||||
# Configure and launch boulder server
|
||||
#-------------------------------------------------------------------------------
|
||||
print("Waiting on Boulder Server")
|
||||
boulder_server = block_until_instance_ready(boulder_server)
|
||||
print(" server %s"%boulder_server)
|
||||
|
||||
|
||||
# Set up EC2 instances
|
||||
#-------------------------------------------------------------------------------
|
||||
configdata = yaml.load(open(cl_args.config_file, 'r'))
|
||||
targetlist = configdata['targets']
|
||||
print('Testing against these images: [%d total]'%len(targetlist))
|
||||
for target in targetlist:
|
||||
print(target['ami'], target['name'])
|
||||
# env.host_string defines the ssh user and host for connection
|
||||
env.host_string = "ubuntu@%s"%boulder_server.public_ip_address
|
||||
print("Boulder Server at (SSH):", env.host_string)
|
||||
if not boulder_preexists:
|
||||
print("Configuring and Launching Boulder")
|
||||
config_and_launch_boulder(boulder_server)
|
||||
# blocking often unnecessary, but cheap EC2 VMs can get very slow
|
||||
block_until_http_ready('http://%s:4000'%boulder_server.public_ip_address,
|
||||
wait_time=10, timeout=500)
|
||||
|
||||
print("Connecting to EC2 using\n profile %s\n keyname %s\n keyfile %s"%(PROFILE, KEYNAME, KEYFILE))
|
||||
AWS_SESSION = boto3.session.Session(profile_name=PROFILE)
|
||||
EC2 = AWS_SESSION.resource('ec2')
|
||||
boulder_url = "http://%s:4000/directory"%boulder_server.private_ip_address
|
||||
print("Boulder Server at (public ip): http://%s:4000/directory"%boulder_server.public_ip_address)
|
||||
print("Boulder Server at (EC2 private ip): %s"%boulder_url)
|
||||
|
||||
print("Determining Subnet")
|
||||
for subnet in EC2.subnets.all():
|
||||
if should_use_subnet(subnet):
|
||||
subnet_id = subnet.id
|
||||
vpc_id = subnet.vpc.id
|
||||
break
|
||||
else:
|
||||
print("No usable subnet exists!")
|
||||
print("Please create a VPC with a subnet named {0}".format(SUBNET_NAME))
|
||||
print("that maps public IPv4 addresses to instances launched in the subnet.")
|
||||
sys.exit(1)
|
||||
if cl_args.boulderonly:
|
||||
sys.exit(0)
|
||||
|
||||
print("Making Security Group")
|
||||
vpc = EC2.Vpc(vpc_id)
|
||||
sg_exists = False
|
||||
for sg in vpc.security_groups.all():
|
||||
if sg.group_name == SECURITY_GROUP_NAME:
|
||||
security_group_id = sg.id
|
||||
sg_exists = True
|
||||
print(" %s already exists"%SECURITY_GROUP_NAME)
|
||||
if not sg_exists:
|
||||
security_group_id = make_security_group(vpc).id
|
||||
time.sleep(30)
|
||||
# Install and launch client scripts in parallel
|
||||
#-------------------------------------------------------------------------------
|
||||
print("Uploading and running test script in parallel: %s"%cl_args.test_script)
|
||||
print("Output routed to log files in %s"%LOGDIR)
|
||||
# (Advice: always use Manager.Queue, never regular multiprocessing.Queue
|
||||
# the latter has implementation flaws that deadlock it in some circumstances)
|
||||
manager = Manager()
|
||||
outqueue = manager.Queue()
|
||||
inqueue = manager.Queue()
|
||||
|
||||
boulder_preexists = False
|
||||
boulder_servers = EC2.instances.filter(Filters=[
|
||||
{'Name': 'tag:Name', 'Values': ['le-boulderserver']},
|
||||
{'Name': 'instance-state-name', 'Values': ['running']}])
|
||||
|
||||
boulder_server = next(iter(boulder_servers), None)
|
||||
|
||||
print("Requesting Instances...")
|
||||
if boulder_server:
|
||||
print("Found existing boulder server:", boulder_server)
|
||||
boulder_preexists = True
|
||||
else:
|
||||
print("Can't find a boulder server, starting one...")
|
||||
boulder_server = make_instance('le-boulderserver',
|
||||
BOULDER_AMI,
|
||||
KEYNAME,
|
||||
machine_type='t2.micro',
|
||||
#machine_type='t2.medium',
|
||||
security_group_id=security_group_id,
|
||||
subnet_id=subnet_id)
|
||||
|
||||
try:
|
||||
if not cl_args.boulderonly:
|
||||
instances = create_client_instances(targetlist, security_group_id, subnet_id)
|
||||
|
||||
# Configure and launch boulder server
|
||||
#-------------------------------------------------------------------------------
|
||||
print("Waiting on Boulder Server")
|
||||
boulder_server = block_until_instance_ready(boulder_server)
|
||||
print(" server %s"%boulder_server)
|
||||
# launch as many processes as clients to test
|
||||
num_processes = len(targetlist)
|
||||
jobs = [] #keep a reference to current procs
|
||||
|
||||
|
||||
# env.host_string defines the ssh user and host for connection
|
||||
env.host_string = "ubuntu@%s"%boulder_server.public_ip_address
|
||||
print("Boulder Server at (SSH):", env.host_string)
|
||||
if not boulder_preexists:
|
||||
print("Configuring and Launching Boulder")
|
||||
config_and_launch_boulder(boulder_server)
|
||||
# blocking often unnecessary, but cheap EC2 VMs can get very slow
|
||||
block_until_http_ready('http://%s:4000'%boulder_server.public_ip_address,
|
||||
wait_time=10, timeout=500)
|
||||
# initiate process execution
|
||||
for i in range(num_processes):
|
||||
p = mp.Process(target=test_client_process, args=(inqueue, outqueue, boulder_url))
|
||||
jobs.append(p)
|
||||
p.daemon = True # kills subprocesses if parent is killed
|
||||
p.start()
|
||||
|
||||
boulder_url = "http://%s:4000/directory"%boulder_server.private_ip_address
|
||||
print("Boulder Server at (public ip): http://%s:4000/directory"%boulder_server.public_ip_address)
|
||||
print("Boulder Server at (EC2 private ip): %s"%boulder_url)
|
||||
# fill up work queue
|
||||
for ii, target in enumerate(targetlist):
|
||||
inqueue.put((ii, instances[ii].id, target))
|
||||
|
||||
if cl_args.boulderonly:
|
||||
sys.exit(0)
|
||||
# add SENTINELs to end client processes
|
||||
for i in range(num_processes):
|
||||
inqueue.put(SENTINEL)
|
||||
print('Waiting on client processes', end='')
|
||||
for p in jobs:
|
||||
while p.is_alive():
|
||||
p.join(5 * 60)
|
||||
# Regularly print output to keep Travis happy
|
||||
print('.', end='')
|
||||
sys.stdout.flush()
|
||||
print()
|
||||
# add SENTINEL to output queue
|
||||
outqueue.put(SENTINEL)
|
||||
|
||||
# Install and launch client scripts in parallel
|
||||
#-------------------------------------------------------------------------------
|
||||
print("Uploading and running test script in parallel: %s"%cl_args.test_script)
|
||||
print("Output routed to log files in %s"%LOGDIR)
|
||||
# (Advice: always use Manager.Queue, never regular multiprocessing.Queue
|
||||
# the latter has implementation flaws that deadlock it in some circumstances)
|
||||
manager = Manager()
|
||||
outqueue = manager.Queue()
|
||||
inqueue = manager.Queue()
|
||||
SENTINEL = None #queue kill signal
|
||||
# clean up
|
||||
execute(local_repo_clean)
|
||||
|
||||
# launch as many processes as clients to test
|
||||
num_processes = len(targetlist)
|
||||
jobs = [] #keep a reference to current procs
|
||||
# print and save summary results
|
||||
results_file = open(LOGDIR+'/results', 'w')
|
||||
outputs = [outq for outq in iter(outqueue.get, SENTINEL)]
|
||||
outputs.sort(key=lambda x: x[0])
|
||||
failed = False
|
||||
for outq in outputs:
|
||||
ii, target, status = outq
|
||||
if status == Status.FAIL:
|
||||
failed = True
|
||||
print('%d %s %s'%(ii, target['name'], status))
|
||||
results_file.write('%d %s %s\n'%(ii, target['name'], status))
|
||||
if len(outputs) != num_processes:
|
||||
failed = True
|
||||
failure_message = 'FAILURE: Some target machines failed to run and were not tested. ' +\
|
||||
'Tests should be rerun.'
|
||||
print(failure_message)
|
||||
results_file.write(failure_message + '\n')
|
||||
results_file.close()
|
||||
|
||||
if failed:
|
||||
sys.exit(1)
|
||||
|
||||
finally:
|
||||
cleanup(cl_args, instances, targetlist)
|
||||
|
||||
# kill any connections
|
||||
fabric.network.disconnect_all()
|
||||
|
||||
|
||||
# initiate process execution
|
||||
for i in range(num_processes):
|
||||
p = mp.Process(target=test_client_process, args=(inqueue, outqueue))
|
||||
jobs.append(p)
|
||||
p.daemon = True # kills subprocesses if parent is killed
|
||||
p.start()
|
||||
|
||||
# fill up work queue
|
||||
for ii, target in enumerate(targetlist):
|
||||
inqueue.put((ii, target))
|
||||
|
||||
# add SENTINELs to end client processes
|
||||
for i in range(num_processes):
|
||||
inqueue.put(SENTINEL)
|
||||
# wait on termination of client processes
|
||||
for p in jobs:
|
||||
p.join()
|
||||
# add SENTINEL to output queue
|
||||
outqueue.put(SENTINEL)
|
||||
|
||||
# clean up
|
||||
execute(local_repo_clean)
|
||||
|
||||
# print and save summary results
|
||||
results_file = open(LOGDIR+'/results', 'w')
|
||||
outputs = [outq for outq in iter(outqueue.get, SENTINEL)]
|
||||
outputs.sort(key=lambda x: x[0])
|
||||
for outq in outputs:
|
||||
ii, target, status = outq
|
||||
print('%d %s %s'%(ii, target['name'], status))
|
||||
results_file.write('%d %s %s\n'%(ii, target['name'], status))
|
||||
if len(outputs) != num_processes:
|
||||
failure_message = 'FAILURE: Some target machines failed to run and were not tested. ' +\
|
||||
'Tests should be rerun.'
|
||||
print(failure_message)
|
||||
results_file.write(failure_message + '\n')
|
||||
results_file.close()
|
||||
|
||||
finally:
|
||||
cleanup(cl_args, instances, targetlist)
|
||||
|
||||
# kill any connections
|
||||
fabric.network.disconnect_all()
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
25
tests/letstest/requirements.txt
Normal file
25
tests/letstest/requirements.txt
Normal file
@@ -0,0 +1,25 @@
|
||||
asn1crypto==0.24.0
|
||||
awscli==1.16.157
|
||||
bcrypt==3.1.6
|
||||
boto3==1.9.146
|
||||
botocore==1.12.147
|
||||
cffi==1.12.3
|
||||
colorama==0.3.9
|
||||
cryptography==2.4.2
|
||||
docutils==0.14
|
||||
enum34==1.1.6
|
||||
Fabric==1.14.1
|
||||
futures==3.2.0
|
||||
idna==2.8
|
||||
ipaddress==1.0.22
|
||||
jmespath==0.9.4
|
||||
paramiko==2.4.2
|
||||
pyasn1==0.4.5
|
||||
pycparser==2.19
|
||||
PyNaCl==1.3.0
|
||||
python-dateutil==2.8.0
|
||||
PyYAML==3.10
|
||||
rsa==3.4.2
|
||||
s3transfer==0.2.0
|
||||
six==1.12.0
|
||||
urllib3==1.24.3
|
||||
@@ -1,15 +1,24 @@
|
||||
#!/bin/sh -xe
|
||||
|
||||
cd letsencrypt
|
||||
./certbot-auto --os-packages-only -n --debug
|
||||
./certbot-auto --install-only -n --debug
|
||||
|
||||
PLUGINS="certbot-apache certbot-nginx"
|
||||
PYTHON_MAJOR_VERSION=$(/opt/eff.org/certbot/venv/bin/python --version 2>&1 | cut -d" " -f 2 | cut -d. -f1)
|
||||
TEMP_DIR=$(mktemp -d)
|
||||
VERSION=$(letsencrypt-auto-source/version.py)
|
||||
|
||||
if [ "$PYTHON_MAJOR_VERSION" = "3" ]; then
|
||||
VENV_PATH="venv3"
|
||||
VENV_SCRIPT="tools/venv3.py"
|
||||
else
|
||||
VENV_SCRIPT="tools/venv.py"
|
||||
VENV_PATH="venv"
|
||||
fi
|
||||
|
||||
# setup venv
|
||||
tools/venv.py --requirement letsencrypt-auto-source/pieces/dependency-requirements.txt
|
||||
. ./venv/bin/activate
|
||||
"$VENV_SCRIPT" --requirement letsencrypt-auto-source/pieces/dependency-requirements.txt
|
||||
. "$VENV_PATH/bin/activate"
|
||||
# pytest is needed to run tests on some of our packages so we install a pinned version here.
|
||||
tools/pip_install.py pytest
|
||||
|
||||
|
||||
BIN
tests/letstest/travis-test-farm.pem.enc
Normal file
BIN
tests/letstest/travis-test-farm.pem.enc
Normal file
Binary file not shown.
31
tox.ini
31
tox.ini
@@ -274,3 +274,34 @@ commands =
|
||||
--acme-server={env:ACME_SERVER:pebble}
|
||||
passenv = DOCKER_*
|
||||
setenv = {[testenv:py27-oldest]setenv}
|
||||
|
||||
[testenv:travis-test-farm-tests-base]
|
||||
changedir = tests/letstest
|
||||
commands =
|
||||
openssl aes-256-cbc -K {env:encrypted_9a387195a62e_key} -iv {env:encrypted_9a387195a62e_iv} -in travis-test-farm.pem.enc -out travis-test-farm.pem -d
|
||||
deps = -rtests/letstest/requirements.txt
|
||||
passenv = AWS_*
|
||||
setenv = AWS_DEFAULT_REGION=us-east-1
|
||||
whitelist_externals = openssl
|
||||
|
||||
[testenv:travis-test-farm-tests-part1]
|
||||
changedir = {[testenv:travis-test-farm-tests-base]changedir}
|
||||
commands =
|
||||
{[testenv:travis-test-farm-tests-base]commands}
|
||||
python multitester.py apache2_targets.yaml travis-test-farm.pem none scripts/test_apache2.sh --fast
|
||||
python multitester.py targets.yaml travis-test-farm.pem none scripts/test_leauto_upgrades.sh --fast
|
||||
deps = {[testenv:travis-test-farm-tests-base]deps}
|
||||
passenv = {[testenv:travis-test-farm-tests-base]passenv}
|
||||
setenv = {[testenv:travis-test-farm-tests-base]setenv}
|
||||
whitelist_externals = {[testenv:travis-test-farm-tests-base]whitelist_externals}
|
||||
|
||||
[testenv:travis-test-farm-tests-part2]
|
||||
changedir = {[testenv:travis-test-farm-tests-base]changedir}
|
||||
commands =
|
||||
{[testenv:travis-test-farm-tests-base]commands}
|
||||
python multitester.py targets.yaml travis-test-farm.pem none scripts/test_letsencrypt_auto_certonly_standalone.sh --fast
|
||||
python multitester.py targets.yaml travis-test-farm.pem none scripts/test_sdists.sh --fast
|
||||
deps = {[testenv:travis-test-farm-tests-base]deps}
|
||||
passenv = {[testenv:travis-test-farm-tests-base]passenv}
|
||||
setenv = {[testenv:travis-test-farm-tests-base]setenv}
|
||||
whitelist_externals = {[testenv:travis-test-farm-tests-base]whitelist_externals}
|
||||
|
||||
Reference in New Issue
Block a user