Compare commits

...

13 Commits

Author SHA1 Message Date
Brad Warren
4af68f0c91 test snaps only 2021-04-07 12:26:50 -07:00
Brad Warren
317360e9db don't use all which exits early 2021-04-07 12:11:13 -07:00
Brad Warren
0adc9585d9 remove unused variable 2021-04-07 11:59:41 -07:00
Brad Warren
ab2978f56a rename variable 2021-04-07 11:14:59 -07:00
Brad Warren
b0a533d4e4 remove workspaces dict 2021-04-07 11:13:24 -07:00
Brad Warren
a1695d76a6 consistently flush output 2021-04-07 10:52:26 -07:00
Brad Warren
faf9d28607 python better 2021-04-05 16:26:20 -07:00
Brad Warren
ea34a0229e preserve logfiles 2021-04-05 16:23:00 -07:00
Brad Warren
bbab191857 clarify purpose of lock 2021-04-05 12:49:21 -07:00
Brad Warren
1a5624992f lock when printing output 2021-04-05 12:42:50 -07:00
Brad Warren
b3c159aaa5 print more output 2021-04-05 12:36:15 -07:00
Adrien Ferrand
0f9f902b6e Use typing-extensions to ensure certbot dev environment is compatible with Python 3.6/3.7 (#8776)
Fixes #8773

I took option 2 from the issue mentionned above (importing `typing-extensions` on dev dependencies) to avoid modifying certbot runtime requirements given that what needs to be added is useful for mypy only.

I did not change the Python version used to execute the linting and mypy on the standard tests, given that the tox `docker_dev` target already checks if the development environment is working for Python < 3.8.
2021-04-05 11:53:57 -07:00
Brad Warren
33f177b361 Upgrade Python to 3.8.9. (#8775)
Over the weekend, Python released new versions of Python 3.8 and Python 3.9 partially in response to the OpenSSL CVEs discussed at https://github.com/certbot/certbot/pull/8741#issuecomment-809644789. You can see this mentioned in their changelog at https://docs.python.org/release/3.8.9/whatsnew/changelog.html#build.

This PR updates the windows installer to use that new release so all of our distribution methods that contain their own copy of OpenSSL are patched for the release tomorrow.

You can see tests passing with this change at https://dev.azure.com/certbot/certbot/_build/results?buildId=3751&view=results. You can see Python 3.8.9 being downloaded instead of an older version at https://dev.azure.com/certbot/certbot/_build/results?buildId=3751&view=logs&j=ad29f110-3cce-5317-4ef2-0a692ae1dee7&t=901eeead-396c-5477-aba2-f402fdcfb885&l=1055.
2021-04-05 11:15:09 -07:00
8 changed files with 91 additions and 211 deletions

View File

@@ -1,127 +1,4 @@
jobs:
- job: docker_build
pool:
vmImage: ubuntu-18.04
strategy:
matrix:
amd64:
DOCKER_ARCH: amd64
# Do not run the heavy non-amd64 builds for test branches
${{ if not(startsWith(variables['Build.SourceBranchName'], 'test-')) }}:
arm32v6:
DOCKER_ARCH: arm32v6
arm64v8:
DOCKER_ARCH: arm64v8
# The default timeout of 60 minutes is a little low for compiling
# cryptography on ARM architectures.
timeoutInMinutes: 180
steps:
- bash: set -e && tools/docker/build.sh $(dockerTag) $DOCKER_ARCH
displayName: Build the Docker images
# We don't filter for the Docker Hub organization to continue to allow
# easy testing of these scripts on forks.
- bash: |
set -e
DOCKER_IMAGES=$(docker images --filter reference='*/certbot' --filter reference='*/dns-*' --format '{{.Repository}}')
docker save --output images.tar $DOCKER_IMAGES
displayName: Save the Docker images
# If the name of the tar file or artifact changes, the deploy stage will
# also need to be updated.
- bash: set -e && mv images.tar $(Build.ArtifactStagingDirectory)
displayName: Prepare Docker artifact
- task: PublishPipelineArtifact@1
inputs:
path: $(Build.ArtifactStagingDirectory)
artifact: docker_$(DOCKER_ARCH)
displayName: Store Docker artifact
- job: docker_run
dependsOn: docker_build
pool:
vmImage: ubuntu-18.04
steps:
- task: DownloadPipelineArtifact@2
inputs:
artifact: docker_amd64
path: $(Build.SourcesDirectory)
displayName: Retrieve Docker images
- bash: set -e && docker load --input $(Build.SourcesDirectory)/images.tar
displayName: Load Docker images
- bash: |
set -ex
DOCKER_IMAGES=$(docker images --filter reference='*/certbot' --filter reference='*/dns-*' --format '{{.Repository}}:{{.Tag}}')
for DOCKER_IMAGE in ${DOCKER_IMAGES}
do docker run --rm "${DOCKER_IMAGE}" plugins --prepare
done
displayName: Run integration tests for Docker images
- job: installer_build
pool:
vmImage: vs2017-win2016
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: 3.8
architecture: x86
addToPath: true
- script: |
python -m venv venv
venv\Scripts\python tools\pipstrap.py
venv\Scripts\python tools\pip_install.py -e windows-installer
displayName: Prepare Windows installer build environment
- script: |
venv\Scripts\construct-windows-installer
displayName: Build Certbot installer
- task: CopyFiles@2
inputs:
sourceFolder: $(System.DefaultWorkingDirectory)/windows-installer/build/nsis
contents: '*.exe'
targetFolder: $(Build.ArtifactStagingDirectory)
- task: PublishPipelineArtifact@1
inputs:
path: $(Build.ArtifactStagingDirectory)
# If we change the artifact's name, it should also be changed in tools/create_github_release.py
artifact: windows-installer
displayName: Publish Windows installer
- job: installer_run
dependsOn: installer_build
strategy:
matrix:
win2019:
imageName: windows-2019
win2016:
imageName: vs2017-win2016
pool:
vmImage: $(imageName)
steps:
- powershell: |
if ($PSVersionTable.PSVersion.Major -ne 5) {
throw "Powershell version is not 5.x"
}
condition: eq(variables['imageName'], 'vs2017-win2016')
displayName: Check Powershell 5.x is used in vs2017-win2016
- task: UsePythonVersion@0
inputs:
versionSpec: 3.8
addToPath: true
- task: DownloadPipelineArtifact@2
inputs:
artifact: windows-installer
path: $(Build.SourcesDirectory)/bin
displayName: Retrieve Windows installer
- script: |
python -m venv venv
venv\Scripts\python tools\pipstrap.py
venv\Scripts\python tools\pip_install.py -e certbot-ci
env:
PIP_NO_BUILD_ISOLATION: no
displayName: Prepare Certbot-CI
- script: |
set PATH=%ProgramFiles(x86)%\Certbot\bin;%PATH%
venv\Scripts\python -m pytest certbot-ci\windows_installer_integration_tests --allow-persistent-changes --installer-path $(Build.SourcesDirectory)\bin\certbot-beta-installer-win32.exe
displayName: Run windows installer integration tests
- script: |
set PATH=%ProgramFiles(x86)%\Certbot\bin;%PATH%
venv\Scripts\python -m pytest certbot-ci\certbot_integration_tests\certbot_tests -n 4
displayName: Run certbot integration tests
- job: snaps_build
pool:
vmImage: ubuntu-18.04

View File

@@ -1,6 +1,4 @@
stages:
- stage: TestAndPackage
jobs:
- template: ../jobs/standard-tests-jobs.yml
- template: ../jobs/extended-tests-jobs.yml
- template: ../jobs/packaging-jobs.yml

View File

@@ -12,7 +12,7 @@ from certbot.tests import acme_util
from certbot.tests import util as test_util
if typing.TYPE_CHECKING:
from typing import Protocol
from typing_extensions import Protocol
else:
Protocol = object # type: ignore

View File

@@ -18,7 +18,7 @@ try:
except ImportError: # pragma: no cover
from unittest import mock # type: ignore
if typing.TYPE_CHECKING:
from typing import Protocol
from typing_extensions import Protocol
else:
Protocol = object # type: ignore

View File

@@ -77,6 +77,9 @@ dev_extras = [
'pytest',
'pytest-cov',
'pytest-xdist',
# typing-extensions is required to import typing.Protocol and make the mypy checks
# pass (along with pylint about non-existent objects) on Python 3.6 & 3.7
'typing-extensions',
'tox',
'twine',
'wheel',

View File

@@ -18,8 +18,8 @@ backcall==0.2.0
bcrypt==3.2.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6"
beautifulsoup4==4.9.3; python_version >= "3.6" and python_version < "4.0"
bleach==3.3.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.5.0"
boto3==1.17.42; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6"
botocore==1.20.42; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6"
boto3==1.17.44; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6"
botocore==1.20.44; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6"
cachecontrol==0.12.6; python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.5.0"
cached-property==1.5.2; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6"
cachetools==4.2.1; python_version >= "3.5" and python_version < "4.0" and (python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6")
@@ -36,7 +36,7 @@ configobj==5.0.6; python_version >= "3.6"
coverage==4.5.4; (python_version >= "2.6" and python_full_version < "3.0.0") or (python_full_version >= "3.3.0" and python_version < "4")
crashtest==0.3.1; python_version >= "3.6" and python_version < "4.0" and (python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.5.0")
cryptography==3.4.7; python_version >= "3.6" and python_full_version < "3.0.0" and python_version < "4.0" and (python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.5.0") and sys_platform == "linux" or python_full_version >= "3.5.0" and python_version >= "3.6" and python_version < "4.0" and (python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.5.0") and sys_platform == "linux"
decorator==4.4.2; python_version == "3.6" and python_full_version < "3.0.0" or python_version == "3.6" and python_full_version >= "3.2.0"
decorator==5.0.5
deprecated==1.2.12; python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.4.0"
distlib==0.3.1; python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.5.0"
distro==1.5.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6"
@@ -99,7 +99,7 @@ ply==3.11; python_version >= "3.6"
poetry-core==1.0.2; python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.5.0"
poetry==1.1.5; python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.5.0"
prompt-toolkit==3.0.3
protobuf==3.15.6; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6"
protobuf==3.15.7; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6"
ptyprocess==0.7.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.5.0"
py==1.10.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6"
pyasn1-modules==0.2.8; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6"
@@ -124,7 +124,7 @@ pytest==3.2.5
python-augeas==0.5.0
python-dateutil==2.8.1; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6"
python-digitalocean==1.16.0; python_version >= "3.6"
python-dotenv==0.16.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6"
python-dotenv==0.17.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.4.0" and python_version >= "3.6"
pytz==2021.1; python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.6.0"
pywin32-ctypes==0.2.0; python_version >= "3.6" and python_version < "4.0" and (python_version >= "3.6" and python_full_version < "3.0.0" or python_version >= "3.6" and python_full_version >= "3.5.0") and sys_platform == "win32"
pywin32==300; sys_platform == "win32" and python_version >= "3.6" and (python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6")
@@ -144,7 +144,7 @@ shellingham==1.4.0; python_version >= "3.6" and python_full_version < "3.0.0" or
six==1.15.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.5.0" and python_version >= "3.6"
snowballstemmer==2.1.0; python_version >= "3.6"
soupsieve==2.2.1; python_version >= "3.6"
sphinx-rtd-theme==0.5.1; python_version >= "3.6"
sphinx-rtd-theme==0.5.2; python_version >= "3.6"
sphinx==3.5.3; python_version >= "3.6"
sphinxcontrib-applehelp==1.0.2; python_version >= "3.6"
sphinxcontrib-devhelp==1.0.2; python_version >= "3.6"

View File

@@ -1,6 +1,7 @@
#!/usr/bin/env python3
import argparse
import datetime
import functools
import glob
from multiprocessing import Manager
from multiprocessing import Pool
@@ -28,21 +29,34 @@ CERTBOT_DIR = dirname(dirname(dirname(realpath(__file__))))
PLUGINS = [basename(path) for path in glob.glob(join(CERTBOT_DIR, 'certbot-dns-*'))]
# In Python, stdout and stderr are buffered in each process by default. When
# printing output from multiple processes, this can cause delays in printing
# output with lines from different processes being interleaved depending
# on when the output for that process is flushed. To prevent this, we override
# print so that it always flushes its output. Disabling output buffering can
# also be done through command line flags or environment variables set when the
# Python process starts, but this approach was taken instead to ensure
# consistent behavior regardless of how the script is invoked.
print = functools.partial(print, flush=True)
def _snap_log_name(target: str, arch: str):
return f'{target}_{arch}.txt'
def _execute_build(
target: str, archs: Set[str], status: Dict[str, Dict[str, str]],
workspace: str) -> Tuple[int, List[str]]:
temp_workspace = None
try:
# Snapcraft remote-build has a recover feature, that will make it reconnect to an existing
# build on Launchpad if possible. However, the signature used to retrieve a potential
# build is not based on the content of the sources used to build a snap, but on a hash
# of the snapcraft current working directory (the path itself, not the content).
# It means that every build started from /my/path/to/certbot will always be considered
# as the same build, whatever the actual sources are.
# To circumvent this, we create a temporary folder and use it as a workspace to build
# the snap: this path is random, making the recover feature effectively noop.
temp_workspace = tempfile.mkdtemp()
# Snapcraft remote-build has a recover feature, that will make it reconnect to an existing
# build on Launchpad if possible. However, the signature used to retrieve a potential
# build is not based on the content of the sources used to build a snap, but on a hash
# of the snapcraft current working directory (the path itself, not the content).
# It means that every build started from /my/path/to/certbot will always be considered
# as the same build, whatever the actual sources are.
# To circumvent this, we create a temporary folder and use it as a workspace to build
# the snap: this path is random, making the recover feature effectively noop.
with tempfile.TemporaryDirectory() as temp_workspace:
ignore = None
if target == 'certbot':
ignore = shutil.ignore_patterns(".git", "venv*", ".tox")
@@ -71,20 +85,18 @@ def _execute_build(
for path in glob.glob(join(temp_workspace, '*.snap')):
shutil.copy(path, workspace)
for arch in archs:
log_name = _snap_log_name(target, arch)
log_path = join(temp_workspace, log_name)
if exists(log_path):
shutil.copy(log_path, workspace)
return process_state, process_output
except BaseException as e:
print(e)
sys.stdout.flush()
raise e
finally:
if temp_workspace:
shutil.rmtree(temp_workspace, ignore_errors=True)
def _build_snap(
target: str, archs: Set[str], status: Dict[str, Dict[str, str]],
running: Dict[str, bool], lock: Lock) -> Dict[str, str]:
running: Dict[str, bool], output_lock: Lock) -> bool:
status[target] = {arch: '...' for arch in archs}
if target == 'certbot':
@@ -92,24 +104,19 @@ def _build_snap(
else:
workspace = join(CERTBOT_DIR, target)
build_success = False
retry = 3
while retry:
exit_code, process_output = _execute_build(target, archs, status, workspace)
with output_lock:
print(f'Build {target} for {",".join(archs)} (attempt {4-retry}/3) ended with '
f'exit code {exit_code}.')
print(f'Build {target} for {",".join(archs)} (attempt {4-retry}/3) ended with '
f'exit code {exit_code}.')
sys.stdout.flush()
with lock:
dump_output = exit_code != 0
failed_archs = [arch for arch in archs if status[target][arch] != 'Successfully built']
if any(arch for arch in archs if status[target][arch] == 'Chroot problem'):
print('Some builds failed with the status "Chroot problem".')
print('This status is known to make any future build fail until either '
'the source code changes or the build on Launchpad is deleted.')
print('Please fix the build appropriately before trying a new one.')
# It is useless to retry in this situation.
retry = 0
# If the command failed or any architecture wasn't built
# successfully, let's try to print all the output about the problem
# that we can.
dump_output = exit_code != 0 or failed_archs
if exit_code == 0 and not failed_archs:
# We expect to have all target snaps available, or something bad happened.
snaps_list = glob.glob(join(workspace, '*.snap'))
@@ -118,16 +125,12 @@ def _build_snap(
f'(current list: {snaps_list}).')
dump_output = True
else:
build_success = True
break
if failed_archs:
# We expect each failed build to have a log file, or something bad happened.
for arch in failed_archs:
if not exists(join(workspace, f'{target}_{arch}.txt')):
dump_output = True
print(f'Missing output on a failed build {target} for {arch}.')
if dump_output:
print(f'Dumping snapcraft remote-build output build for {target}:')
print('\n'.join(process_output))
_dump_failed_build_logs(target, archs, status, workspace)
# Retry the remote build if it has been interrupted (non zero status code)
# or if some builds have failed.
@@ -135,7 +138,7 @@ def _build_snap(
running[target] = False
return {target: workspace}
return build_success
def _extract_state(project: str, output: str, status: Dict[str, Dict[str, str]]) -> None:
@@ -167,53 +170,45 @@ def _dump_status_helper(archs: Set[str], status: Dict[str, Dict[str, str]]) -> N
print(f'|{"-" * 26}' * len(headers))
print()
sys.stdout.flush()
def _dump_status(
archs: Set[str], status: Dict[str, Dict[str, str]],
running: Dict[str, bool]) -> None:
running: Dict[str, bool], output_lock: Lock) -> None:
while any(running.values()):
print(f'Remote build status at {datetime.datetime.now()}')
_dump_status_helper(archs, status)
with output_lock:
print(f'Remote build status at {datetime.datetime.now()}')
_dump_status_helper(archs, status)
time.sleep(10)
def _dump_results(
targets: Set[str], archs: Set[str], status: Dict[str, Dict[str, str]],
workspaces: Dict[str, str]) -> bool:
failures = False
for target in targets:
for arch in archs:
result = status[target][arch]
def _dump_failed_build_logs(
target: str, archs: Set[str], status: Dict[str, Dict[str, str]],
workspace: str) -> None:
for arch in archs:
result = status[target][arch]
if result != 'Successfully built':
failures = True
if result != 'Successfully built':
failures = True
build_output_path = join(workspaces[target], f'{target}_{arch}.txt')
if not exists(build_output_path):
build_output = f'No output has been dumped by snapcraft remote-build.'
else:
with open(join(workspaces[target], f'{target}_{arch}.txt')) as file_h:
build_output = file_h.read()
build_output_name = _snap_log_name(target, arch)
build_output_path = join(workspace, build_output_name)
if not exists(build_output_path):
build_output = f'No output has been dumped by snapcraft remote-build.'
else:
with open(build_output_path) as file_h:
build_output = file_h.read()
print(f'Output for failed build target={target} arch={arch}')
print('-------------------------------------------')
print(build_output)
print('-------------------------------------------')
print()
print(f'Output for failed build target={target} arch={arch}')
print('-------------------------------------------')
print(build_output)
print('-------------------------------------------')
print()
if not failures:
print('All builds succeeded.')
else:
print('Some builds failed.')
print()
def _dump_results(archs: Set[str], status: Dict[str, Dict[str, str]]) -> None:
print(f'Results for remote build finished at {datetime.datetime.now()}')
_dump_status_helper(archs, status)
return failures
def main():
parser = argparse.ArgumentParser()
@@ -252,12 +247,14 @@ def main():
with manager, pool:
status: Dict[str, Dict[str, str]] = manager.dict()
running = manager.dict({target: True for target in targets})
lock = manager.Lock()
# While multiple processes are running, this lock should be acquired
# before printing output.
output_lock = manager.Lock()
async_results = [pool.apply_async(_build_snap, (target, archs, status, running, lock))
async_results = [pool.apply_async(_build_snap, (target, archs, status, running, output_lock))
for target in targets]
process = Process(target=_dump_status, args=(archs, status, running))
process = Process(target=_dump_status, args=(archs, status, running, output_lock))
process.start()
try:
@@ -266,11 +263,16 @@ def main():
if process.is_alive():
raise ValueError(f"Timeout out reached ({args.timeout} seconds) during the build!")
workspaces = {}
build_success = True
for async_result in async_results:
workspaces.update(async_result.get())
if not async_results.get():
build_success = False
if _dump_results(targets, archs, status, workspaces):
_dump_results(archs, status)
if build_success:
print('All builds succeeded.')
else:
print('Some builds failed.')
raise ValueError("There were failures during the build!")
finally:
process.terminate()

View File

@@ -7,7 +7,7 @@ import subprocess
import sys
import time
PYTHON_VERSION = (3, 8, 8)
PYTHON_VERSION = (3, 8, 9)
PYTHON_BITNESS = 32
NSIS_VERSION = '3.06.1'