Compare commits
4 Commits
master
...
multiteste
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
efd49da45e | ||
|
|
f1a3c3a9e1 | ||
|
|
14d0fb63f7 | ||
|
|
bd05761e04 |
@@ -32,7 +32,10 @@ see:
|
||||
from __future__ import print_function
|
||||
from __future__ import with_statement
|
||||
|
||||
import atexit
|
||||
import sys, os, time, argparse, socket
|
||||
import random
|
||||
import string
|
||||
import multiprocessing as mp
|
||||
from multiprocessing import Manager
|
||||
import urllib2
|
||||
@@ -103,6 +106,8 @@ LOGDIR = "" #points to logging / working directory
|
||||
# boto3/AWS api globals
|
||||
AWS_SESSION = None
|
||||
EC2 = None
|
||||
TAG_KEY = 'MultitesterRunID'
|
||||
TAG_VALUE = None
|
||||
|
||||
# Boto3/AWS automation functions
|
||||
#-------------------------------------------------------------------------------
|
||||
@@ -126,64 +131,39 @@ def make_instance(instance_name,
|
||||
machine_type='t2.micro',
|
||||
security_groups=['letsencrypt_test'],
|
||||
userdata=""): #userdata contains bash or cloud-init script
|
||||
assert TAG_VALUE, 'TAG_VALUE must be set to use this function!'
|
||||
tag_specs = []
|
||||
for rtype in ('instance', 'volume',):
|
||||
tag_specs.append({'ResourceType': rtype,
|
||||
'Tags': [{'Key': TAG_KEY, 'Value': TAG_VALUE}]})
|
||||
for tag_spec in tag_specs:
|
||||
if tag_spec['ResourceType'] == 'instance':
|
||||
tag_spec['Tags'].append({'Key': 'Name', 'Value': instance_name})
|
||||
|
||||
new_instance = EC2.create_instances(
|
||||
block_device_mappings = []
|
||||
image = EC2.Image(ami_id)
|
||||
for mapping in image.block_device_mappings:
|
||||
if 'Ebs' in mapping and not mapping['Ebs'].get('DeleteOnTermination', False):
|
||||
block_device_mappings.append(
|
||||
{'DeviceName': mapping['DeviceName'],
|
||||
'Ebs': {'DeleteOnTermination': True}})
|
||||
|
||||
return EC2.create_instances(
|
||||
BlockDeviceMappings=block_device_mappings,
|
||||
ImageId=ami_id,
|
||||
SecurityGroups=security_groups,
|
||||
KeyName=keyname,
|
||||
MinCount=1,
|
||||
MaxCount=1,
|
||||
UserData=userdata,
|
||||
InstanceType=machine_type)[0]
|
||||
|
||||
# brief pause to prevent rare error on EC2 delay, should block until ready instead
|
||||
time.sleep(1.0)
|
||||
|
||||
# give instance a name
|
||||
try:
|
||||
new_instance.create_tags(Tags=[{'Key': 'Name', 'Value': instance_name}])
|
||||
except ClientError as e:
|
||||
if "InvalidInstanceID.NotFound" in str(e):
|
||||
# This seems to be ephemeral... retry
|
||||
time.sleep(1)
|
||||
new_instance.create_tags(Tags=[{'Key': 'Name', 'Value': instance_name}])
|
||||
else:
|
||||
raise
|
||||
return new_instance
|
||||
|
||||
def terminate_and_clean(instances):
|
||||
"""
|
||||
Some AMIs specify EBS stores that won't delete on instance termination.
|
||||
These must be manually deleted after shutdown.
|
||||
"""
|
||||
volumes_to_delete = []
|
||||
for instance in instances:
|
||||
for bdmap in instance.block_device_mappings:
|
||||
if 'Ebs' in bdmap.keys():
|
||||
if not bdmap['Ebs']['DeleteOnTermination']:
|
||||
volumes_to_delete.append(bdmap['Ebs']['VolumeId'])
|
||||
InstanceType=machine_type,
|
||||
TagSpecifications=tag_specs)[0]
|
||||
|
||||
def terminate_instances(instances):
|
||||
"""Terminate all instances in the given list."""
|
||||
for instance in instances:
|
||||
instance.terminate()
|
||||
|
||||
# can't delete volumes until all attaching instances are terminated
|
||||
_ids = [instance.id for instance in instances]
|
||||
all_terminated = False
|
||||
while not all_terminated:
|
||||
all_terminated = True
|
||||
for _id in _ids:
|
||||
# necessary to reinit object for boto3 to get true state
|
||||
inst = EC2.Instance(id=_id)
|
||||
if inst.state['Name'] != 'terminated':
|
||||
all_terminated = False
|
||||
time.sleep(5)
|
||||
|
||||
for vol_id in volumes_to_delete:
|
||||
volume = EC2.Volume(id=vol_id)
|
||||
volume.delete()
|
||||
|
||||
return volumes_to_delete
|
||||
|
||||
|
||||
# Helper Routines
|
||||
#-------------------------------------------------------------------------------
|
||||
@@ -235,6 +215,32 @@ def block_until_instance_ready(booting_instance, wait_time=5, extra_wait_time=20
|
||||
time.sleep(extra_wait_time)
|
||||
return _instance
|
||||
|
||||
def get_run_id_filter(id_):
|
||||
"""Returns the filter to use to find resources with id_."""
|
||||
return {'Name': 'tag:' + TAG_KEY, 'Values': [id_]}
|
||||
|
||||
def is_unique_run_id(id_):
|
||||
"""Checks if ID is a unique run ID."""
|
||||
filters = [get_run_id_filter(id_)]
|
||||
for resource in (EC2.instances, EC2.volumes):
|
||||
for _ in resource.filter(Filters=filters):
|
||||
return False
|
||||
return True
|
||||
|
||||
def create_unique_run_id():
|
||||
"""Creates a unique ID for the current run."""
|
||||
chars = string.ascii_letters + string.digits
|
||||
while True:
|
||||
id_ = ''.join(random.choice(chars) for _ in range(16))
|
||||
if is_unique_run_id(id_):
|
||||
return id_
|
||||
|
||||
def get_client_instances():
|
||||
"""Returns all client instances created by the current run."""
|
||||
filters = [get_run_id_filter(TAG_VALUE)]
|
||||
boulder_tag = {'Key': 'Name', 'Value': 'le-boulderserver'}
|
||||
return [instance for instance in EC2.instances.filter(Filters=filters)
|
||||
if boulder_tag not in instance.tags]
|
||||
|
||||
# Fabric Routines
|
||||
#-------------------------------------------------------------------------------
|
||||
@@ -366,20 +372,45 @@ def test_client_process(inqueue, outqueue):
|
||||
print("log fail\n")
|
||||
pass
|
||||
|
||||
def print_manual_cleanup_instructions():
|
||||
"""Print the magic awscli invocations to cleanup these tests."""
|
||||
print('To cleanup AWS resources used by this test, run:')
|
||||
instances_cmd = 'aws ec2 terminate-instances --profile ' + PROFILE
|
||||
instances_cmd += ' --instance-ids $(aws ec2 describe-instances --filters'
|
||||
instances_cmd += ' "Name=tag:' + TAG_KEY + ',Values=' + TAG_VALUE
|
||||
instances_cmd += '" --output text --profile ' + PROFILE
|
||||
instances_cmd += ' | grep INSTANCES | cut -f8)'
|
||||
print(instances_cmd)
|
||||
print('After waiting for those instances to shut down, '
|
||||
'you can delete any abandoned volumes by running:')
|
||||
volumes_cmd = 'aws ec2 describe-volumes --filters "Name=tag:' + TAG_KEY
|
||||
volumes_cmd += ',Values=' + TAG_VALUE + '" --output text'
|
||||
volumes_cmd += ' --profile '+ PROFILE + ' | grep VOLUMES | cut -f8 | '
|
||||
volumes_cmd += 'xargs -n1 aws ec2 delete-volume --profile ' + PROFILE
|
||||
volumes_cmd += ' --volume-id'
|
||||
print(volumes_cmd)
|
||||
|
||||
def cleanup(cl_args, instances, targetlist):
|
||||
print('Logs in ', LOGDIR)
|
||||
if not cl_args.saveinstances:
|
||||
print('Terminating EC2 Instances and Cleaning Dangling EBS Volumes')
|
||||
if cl_args.killboulder:
|
||||
boulder_server.terminate()
|
||||
terminate_and_clean(instances)
|
||||
else:
|
||||
# print login information for the boxes for debugging
|
||||
for ii, target in enumerate(targetlist):
|
||||
print(target['name'],
|
||||
target['ami'],
|
||||
"%s@%s"%(target['user'], instances[ii].public_ip_address))
|
||||
def cleanup(cl_args, targetlist):
|
||||
try:
|
||||
print('Logs in ', LOGDIR)
|
||||
instances = get_client_instances()
|
||||
if cl_args.saveinstances:
|
||||
for instance in instances:
|
||||
target = next(t for t in targetlist
|
||||
if t['ami'] == instance.image_id)
|
||||
print(target['name'],
|
||||
target['ami'],
|
||||
"%s@%s"%(target['user'], instance.public_ip_address))
|
||||
print_manual_cleanup_instructions()
|
||||
else:
|
||||
print('Terminating EC2 Instances')
|
||||
if cl_args.killboulder:
|
||||
boulder_server.terminate()
|
||||
terminate_instances(instances)
|
||||
except:
|
||||
print('An error occurred during cleanup!')
|
||||
print_manual_cleanup_instructions()
|
||||
raise
|
||||
|
||||
|
||||
|
||||
@@ -443,6 +474,8 @@ if not sg_exists:
|
||||
make_security_group()
|
||||
time.sleep(30)
|
||||
|
||||
TAG_VALUE = create_unique_run_id()
|
||||
|
||||
boulder_preexists = False
|
||||
boulder_servers = EC2.instances.filter(Filters=[
|
||||
{'Name': 'tag:Name', 'Values': ['le-boulderserver']},
|
||||
@@ -450,6 +483,7 @@ boulder_servers = EC2.instances.filter(Filters=[
|
||||
|
||||
boulder_server = next(iter(boulder_servers), None)
|
||||
|
||||
atexit.register(cleanup, cl_args, targetlist)
|
||||
print("Requesting Instances...")
|
||||
if boulder_server:
|
||||
print("Found existing boulder server:", boulder_server)
|
||||
@@ -541,7 +575,5 @@ try:
|
||||
results_file.close()
|
||||
|
||||
finally:
|
||||
cleanup(cl_args, instances, targetlist)
|
||||
|
||||
# kill any connections
|
||||
fabric.network.disconnect_all()
|
||||
|
||||
Reference in New Issue
Block a user