##// END OF EJS Templates
automation: support and use Debian Buster by default...
Gregory Szorc -
r43288:d1d919f6 default
parent child Browse files
Show More
@@ -1,1202 +1,1210 b''
1 # aws.py - Automation code for Amazon Web Services
1 # aws.py - Automation code for Amazon Web Services
2 #
2 #
3 # Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # no-check-code because Python 3 native.
8 # no-check-code because Python 3 native.
9
9
10 import contextlib
10 import contextlib
11 import copy
11 import copy
12 import hashlib
12 import hashlib
13 import json
13 import json
14 import os
14 import os
15 import pathlib
15 import pathlib
16 import subprocess
16 import subprocess
17 import time
17 import time
18
18
19 import boto3
19 import boto3
20 import botocore.exceptions
20 import botocore.exceptions
21
21
22 from .linux import (
22 from .linux import (
23 BOOTSTRAP_DEBIAN,
23 BOOTSTRAP_DEBIAN,
24 )
24 )
25 from .ssh import (
25 from .ssh import (
26 exec_command as ssh_exec_command,
26 exec_command as ssh_exec_command,
27 wait_for_ssh,
27 wait_for_ssh,
28 )
28 )
29 from .winrm import (
29 from .winrm import (
30 run_powershell,
30 run_powershell,
31 wait_for_winrm,
31 wait_for_winrm,
32 )
32 )
33
33
34
34
35 SOURCE_ROOT = pathlib.Path(os.path.abspath(__file__)).parent.parent.parent.parent
35 SOURCE_ROOT = pathlib.Path(os.path.abspath(__file__)).parent.parent.parent.parent
36
36
37 INSTALL_WINDOWS_DEPENDENCIES = (SOURCE_ROOT / 'contrib' /
37 INSTALL_WINDOWS_DEPENDENCIES = (SOURCE_ROOT / 'contrib' /
38 'install-windows-dependencies.ps1')
38 'install-windows-dependencies.ps1')
39
39
40
40
41 INSTANCE_TYPES_WITH_STORAGE = {
41 INSTANCE_TYPES_WITH_STORAGE = {
42 'c5d',
42 'c5d',
43 'd2',
43 'd2',
44 'h1',
44 'h1',
45 'i3',
45 'i3',
46 'm5ad',
46 'm5ad',
47 'm5d',
47 'm5d',
48 'r5d',
48 'r5d',
49 'r5ad',
49 'r5ad',
50 'x1',
50 'x1',
51 'z1d',
51 'z1d',
52 }
52 }
53
53
54
54
55 AMAZON_ACCOUNT_ID = '801119661308'
55 AMAZON_ACCOUNT_ID = '801119661308'
56 DEBIAN_ACCOUNT_ID = '379101102735'
56 DEBIAN_ACCOUNT_ID = '379101102735'
57 DEBIAN_ACCOUNT_ID_2 = '136693071363'
57 UBUNTU_ACCOUNT_ID = '099720109477'
58 UBUNTU_ACCOUNT_ID = '099720109477'
58
59
59
60
60 WINDOWS_BASE_IMAGE_NAME = 'Windows_Server-2019-English-Full-Base-2019.07.12'
61 WINDOWS_BASE_IMAGE_NAME = 'Windows_Server-2019-English-Full-Base-2019.07.12'
61
62
62
63
63 KEY_PAIRS = {
64 KEY_PAIRS = {
64 'automation',
65 'automation',
65 }
66 }
66
67
67
68
68 SECURITY_GROUPS = {
69 SECURITY_GROUPS = {
69 'linux-dev-1': {
70 'linux-dev-1': {
70 'description': 'Mercurial Linux instances that perform build/test automation',
71 'description': 'Mercurial Linux instances that perform build/test automation',
71 'ingress': [
72 'ingress': [
72 {
73 {
73 'FromPort': 22,
74 'FromPort': 22,
74 'ToPort': 22,
75 'ToPort': 22,
75 'IpProtocol': 'tcp',
76 'IpProtocol': 'tcp',
76 'IpRanges': [
77 'IpRanges': [
77 {
78 {
78 'CidrIp': '0.0.0.0/0',
79 'CidrIp': '0.0.0.0/0',
79 'Description': 'SSH from entire Internet',
80 'Description': 'SSH from entire Internet',
80 },
81 },
81 ],
82 ],
82 },
83 },
83 ],
84 ],
84 },
85 },
85 'windows-dev-1': {
86 'windows-dev-1': {
86 'description': 'Mercurial Windows instances that perform build automation',
87 'description': 'Mercurial Windows instances that perform build automation',
87 'ingress': [
88 'ingress': [
88 {
89 {
89 'FromPort': 22,
90 'FromPort': 22,
90 'ToPort': 22,
91 'ToPort': 22,
91 'IpProtocol': 'tcp',
92 'IpProtocol': 'tcp',
92 'IpRanges': [
93 'IpRanges': [
93 {
94 {
94 'CidrIp': '0.0.0.0/0',
95 'CidrIp': '0.0.0.0/0',
95 'Description': 'SSH from entire Internet',
96 'Description': 'SSH from entire Internet',
96 },
97 },
97 ],
98 ],
98 },
99 },
99 {
100 {
100 'FromPort': 3389,
101 'FromPort': 3389,
101 'ToPort': 3389,
102 'ToPort': 3389,
102 'IpProtocol': 'tcp',
103 'IpProtocol': 'tcp',
103 'IpRanges': [
104 'IpRanges': [
104 {
105 {
105 'CidrIp': '0.0.0.0/0',
106 'CidrIp': '0.0.0.0/0',
106 'Description': 'RDP from entire Internet',
107 'Description': 'RDP from entire Internet',
107 },
108 },
108 ],
109 ],
109
110
110 },
111 },
111 {
112 {
112 'FromPort': 5985,
113 'FromPort': 5985,
113 'ToPort': 5986,
114 'ToPort': 5986,
114 'IpProtocol': 'tcp',
115 'IpProtocol': 'tcp',
115 'IpRanges': [
116 'IpRanges': [
116 {
117 {
117 'CidrIp': '0.0.0.0/0',
118 'CidrIp': '0.0.0.0/0',
118 'Description': 'PowerShell Remoting (Windows Remote Management)',
119 'Description': 'PowerShell Remoting (Windows Remote Management)',
119 },
120 },
120 ],
121 ],
121 }
122 }
122 ],
123 ],
123 },
124 },
124 }
125 }
125
126
126
127
127 IAM_ROLES = {
128 IAM_ROLES = {
128 'ephemeral-ec2-role-1': {
129 'ephemeral-ec2-role-1': {
129 'description': 'Mercurial temporary EC2 instances',
130 'description': 'Mercurial temporary EC2 instances',
130 'policy_arns': [
131 'policy_arns': [
131 'arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM',
132 'arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM',
132 ],
133 ],
133 },
134 },
134 }
135 }
135
136
136
137
137 ASSUME_ROLE_POLICY_DOCUMENT = '''
138 ASSUME_ROLE_POLICY_DOCUMENT = '''
138 {
139 {
139 "Version": "2012-10-17",
140 "Version": "2012-10-17",
140 "Statement": [
141 "Statement": [
141 {
142 {
142 "Effect": "Allow",
143 "Effect": "Allow",
143 "Principal": {
144 "Principal": {
144 "Service": "ec2.amazonaws.com"
145 "Service": "ec2.amazonaws.com"
145 },
146 },
146 "Action": "sts:AssumeRole"
147 "Action": "sts:AssumeRole"
147 }
148 }
148 ]
149 ]
149 }
150 }
150 '''.strip()
151 '''.strip()
151
152
152
153
153 IAM_INSTANCE_PROFILES = {
154 IAM_INSTANCE_PROFILES = {
154 'ephemeral-ec2-1': {
155 'ephemeral-ec2-1': {
155 'roles': [
156 'roles': [
156 'ephemeral-ec2-role-1',
157 'ephemeral-ec2-role-1',
157 ],
158 ],
158 }
159 }
159 }
160 }
160
161
161
162
162 # User Data for Windows EC2 instance. Mainly used to set the password
163 # User Data for Windows EC2 instance. Mainly used to set the password
163 # and configure WinRM.
164 # and configure WinRM.
164 # Inspired by the User Data script used by Packer
165 # Inspired by the User Data script used by Packer
165 # (from https://www.packer.io/intro/getting-started/build-image.html).
166 # (from https://www.packer.io/intro/getting-started/build-image.html).
166 WINDOWS_USER_DATA = r'''
167 WINDOWS_USER_DATA = r'''
167 <powershell>
168 <powershell>
168
169
169 # TODO enable this once we figure out what is failing.
170 # TODO enable this once we figure out what is failing.
170 #$ErrorActionPreference = "stop"
171 #$ErrorActionPreference = "stop"
171
172
172 # Set administrator password
173 # Set administrator password
173 net user Administrator "%s"
174 net user Administrator "%s"
174 wmic useraccount where "name='Administrator'" set PasswordExpires=FALSE
175 wmic useraccount where "name='Administrator'" set PasswordExpires=FALSE
175
176
176 # First, make sure WinRM can't be connected to
177 # First, make sure WinRM can't be connected to
177 netsh advfirewall firewall set rule name="Windows Remote Management (HTTP-In)" new enable=yes action=block
178 netsh advfirewall firewall set rule name="Windows Remote Management (HTTP-In)" new enable=yes action=block
178
179
179 # Delete any existing WinRM listeners
180 # Delete any existing WinRM listeners
180 winrm delete winrm/config/listener?Address=*+Transport=HTTP 2>$Null
181 winrm delete winrm/config/listener?Address=*+Transport=HTTP 2>$Null
181 winrm delete winrm/config/listener?Address=*+Transport=HTTPS 2>$Null
182 winrm delete winrm/config/listener?Address=*+Transport=HTTPS 2>$Null
182
183
183 # Create a new WinRM listener and configure
184 # Create a new WinRM listener and configure
184 winrm create winrm/config/listener?Address=*+Transport=HTTP
185 winrm create winrm/config/listener?Address=*+Transport=HTTP
185 winrm set winrm/config/winrs '@{MaxMemoryPerShellMB="0"}'
186 winrm set winrm/config/winrs '@{MaxMemoryPerShellMB="0"}'
186 winrm set winrm/config '@{MaxTimeoutms="7200000"}'
187 winrm set winrm/config '@{MaxTimeoutms="7200000"}'
187 winrm set winrm/config/service '@{AllowUnencrypted="true"}'
188 winrm set winrm/config/service '@{AllowUnencrypted="true"}'
188 winrm set winrm/config/service '@{MaxConcurrentOperationsPerUser="12000"}'
189 winrm set winrm/config/service '@{MaxConcurrentOperationsPerUser="12000"}'
189 winrm set winrm/config/service/auth '@{Basic="true"}'
190 winrm set winrm/config/service/auth '@{Basic="true"}'
190 winrm set winrm/config/client/auth '@{Basic="true"}'
191 winrm set winrm/config/client/auth '@{Basic="true"}'
191
192
192 # Configure UAC to allow privilege elevation in remote shells
193 # Configure UAC to allow privilege elevation in remote shells
193 $Key = 'HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System'
194 $Key = 'HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System'
194 $Setting = 'LocalAccountTokenFilterPolicy'
195 $Setting = 'LocalAccountTokenFilterPolicy'
195 Set-ItemProperty -Path $Key -Name $Setting -Value 1 -Force
196 Set-ItemProperty -Path $Key -Name $Setting -Value 1 -Force
196
197
197 # Configure and restart the WinRM Service; Enable the required firewall exception
198 # Configure and restart the WinRM Service; Enable the required firewall exception
198 Stop-Service -Name WinRM
199 Stop-Service -Name WinRM
199 Set-Service -Name WinRM -StartupType Automatic
200 Set-Service -Name WinRM -StartupType Automatic
200 netsh advfirewall firewall set rule name="Windows Remote Management (HTTP-In)" new action=allow localip=any remoteip=any
201 netsh advfirewall firewall set rule name="Windows Remote Management (HTTP-In)" new action=allow localip=any remoteip=any
201 Start-Service -Name WinRM
202 Start-Service -Name WinRM
202
203
203 # Disable firewall on private network interfaces so prompts don't appear.
204 # Disable firewall on private network interfaces so prompts don't appear.
204 Set-NetFirewallProfile -Name private -Enabled false
205 Set-NetFirewallProfile -Name private -Enabled false
205 </powershell>
206 </powershell>
206 '''.lstrip()
207 '''.lstrip()
207
208
208
209
209 WINDOWS_BOOTSTRAP_POWERSHELL = '''
210 WINDOWS_BOOTSTRAP_POWERSHELL = '''
210 Write-Output "installing PowerShell dependencies"
211 Write-Output "installing PowerShell dependencies"
211 Install-PackageProvider -Name NuGet -MinimumVersion 2.8.5.201 -Force
212 Install-PackageProvider -Name NuGet -MinimumVersion 2.8.5.201 -Force
212 Set-PSRepository -Name PSGallery -InstallationPolicy Trusted
213 Set-PSRepository -Name PSGallery -InstallationPolicy Trusted
213 Install-Module -Name OpenSSHUtils -RequiredVersion 0.0.2.0
214 Install-Module -Name OpenSSHUtils -RequiredVersion 0.0.2.0
214
215
215 Write-Output "installing OpenSSL server"
216 Write-Output "installing OpenSSL server"
216 Add-WindowsCapability -Online -Name OpenSSH.Server~~~~0.0.1.0
217 Add-WindowsCapability -Online -Name OpenSSH.Server~~~~0.0.1.0
217 # Various tools will attempt to use older versions of .NET. So we enable
218 # Various tools will attempt to use older versions of .NET. So we enable
218 # the feature that provides them so it doesn't have to be auto-enabled
219 # the feature that provides them so it doesn't have to be auto-enabled
219 # later.
220 # later.
220 Write-Output "enabling .NET Framework feature"
221 Write-Output "enabling .NET Framework feature"
221 Install-WindowsFeature -Name Net-Framework-Core
222 Install-WindowsFeature -Name Net-Framework-Core
222 '''
223 '''
223
224
224
225
225 class AWSConnection:
226 class AWSConnection:
226 """Manages the state of a connection with AWS."""
227 """Manages the state of a connection with AWS."""
227
228
228 def __init__(self, automation, region: str, ensure_ec2_state: bool=True):
229 def __init__(self, automation, region: str, ensure_ec2_state: bool=True):
229 self.automation = automation
230 self.automation = automation
230 self.local_state_path = automation.state_path
231 self.local_state_path = automation.state_path
231
232
232 self.prefix = 'hg-'
233 self.prefix = 'hg-'
233
234
234 self.session = boto3.session.Session(region_name=region)
235 self.session = boto3.session.Session(region_name=region)
235 self.ec2client = self.session.client('ec2')
236 self.ec2client = self.session.client('ec2')
236 self.ec2resource = self.session.resource('ec2')
237 self.ec2resource = self.session.resource('ec2')
237 self.iamclient = self.session.client('iam')
238 self.iamclient = self.session.client('iam')
238 self.iamresource = self.session.resource('iam')
239 self.iamresource = self.session.resource('iam')
239 self.security_groups = {}
240 self.security_groups = {}
240
241
241 if ensure_ec2_state:
242 if ensure_ec2_state:
242 ensure_key_pairs(automation.state_path, self.ec2resource)
243 ensure_key_pairs(automation.state_path, self.ec2resource)
243 self.security_groups = ensure_security_groups(self.ec2resource)
244 self.security_groups = ensure_security_groups(self.ec2resource)
244 ensure_iam_state(self.iamclient, self.iamresource)
245 ensure_iam_state(self.iamclient, self.iamresource)
245
246
246 def key_pair_path_private(self, name):
247 def key_pair_path_private(self, name):
247 """Path to a key pair private key file."""
248 """Path to a key pair private key file."""
248 return self.local_state_path / 'keys' / ('keypair-%s' % name)
249 return self.local_state_path / 'keys' / ('keypair-%s' % name)
249
250
250 def key_pair_path_public(self, name):
251 def key_pair_path_public(self, name):
251 return self.local_state_path / 'keys' / ('keypair-%s.pub' % name)
252 return self.local_state_path / 'keys' / ('keypair-%s.pub' % name)
252
253
253
254
254 def rsa_key_fingerprint(p: pathlib.Path):
255 def rsa_key_fingerprint(p: pathlib.Path):
255 """Compute the fingerprint of an RSA private key."""
256 """Compute the fingerprint of an RSA private key."""
256
257
257 # TODO use rsa package.
258 # TODO use rsa package.
258 res = subprocess.run(
259 res = subprocess.run(
259 ['openssl', 'pkcs8', '-in', str(p), '-nocrypt', '-topk8',
260 ['openssl', 'pkcs8', '-in', str(p), '-nocrypt', '-topk8',
260 '-outform', 'DER'],
261 '-outform', 'DER'],
261 capture_output=True,
262 capture_output=True,
262 check=True)
263 check=True)
263
264
264 sha1 = hashlib.sha1(res.stdout).hexdigest()
265 sha1 = hashlib.sha1(res.stdout).hexdigest()
265 return ':'.join(a + b for a, b in zip(sha1[::2], sha1[1::2]))
266 return ':'.join(a + b for a, b in zip(sha1[::2], sha1[1::2]))
266
267
267
268
268 def ensure_key_pairs(state_path: pathlib.Path, ec2resource, prefix='hg-'):
269 def ensure_key_pairs(state_path: pathlib.Path, ec2resource, prefix='hg-'):
269 remote_existing = {}
270 remote_existing = {}
270
271
271 for kpi in ec2resource.key_pairs.all():
272 for kpi in ec2resource.key_pairs.all():
272 if kpi.name.startswith(prefix):
273 if kpi.name.startswith(prefix):
273 remote_existing[kpi.name[len(prefix):]] = kpi.key_fingerprint
274 remote_existing[kpi.name[len(prefix):]] = kpi.key_fingerprint
274
275
275 # Validate that we have these keys locally.
276 # Validate that we have these keys locally.
276 key_path = state_path / 'keys'
277 key_path = state_path / 'keys'
277 key_path.mkdir(exist_ok=True, mode=0o700)
278 key_path.mkdir(exist_ok=True, mode=0o700)
278
279
279 def remove_remote(name):
280 def remove_remote(name):
280 print('deleting key pair %s' % name)
281 print('deleting key pair %s' % name)
281 key = ec2resource.KeyPair(name)
282 key = ec2resource.KeyPair(name)
282 key.delete()
283 key.delete()
283
284
284 def remove_local(name):
285 def remove_local(name):
285 pub_full = key_path / ('keypair-%s.pub' % name)
286 pub_full = key_path / ('keypair-%s.pub' % name)
286 priv_full = key_path / ('keypair-%s' % name)
287 priv_full = key_path / ('keypair-%s' % name)
287
288
288 print('removing %s' % pub_full)
289 print('removing %s' % pub_full)
289 pub_full.unlink()
290 pub_full.unlink()
290 print('removing %s' % priv_full)
291 print('removing %s' % priv_full)
291 priv_full.unlink()
292 priv_full.unlink()
292
293
293 local_existing = {}
294 local_existing = {}
294
295
295 for f in sorted(os.listdir(key_path)):
296 for f in sorted(os.listdir(key_path)):
296 if not f.startswith('keypair-') or not f.endswith('.pub'):
297 if not f.startswith('keypair-') or not f.endswith('.pub'):
297 continue
298 continue
298
299
299 name = f[len('keypair-'):-len('.pub')]
300 name = f[len('keypair-'):-len('.pub')]
300
301
301 pub_full = key_path / f
302 pub_full = key_path / f
302 priv_full = key_path / ('keypair-%s' % name)
303 priv_full = key_path / ('keypair-%s' % name)
303
304
304 with open(pub_full, 'r', encoding='ascii') as fh:
305 with open(pub_full, 'r', encoding='ascii') as fh:
305 data = fh.read()
306 data = fh.read()
306
307
307 if not data.startswith('ssh-rsa '):
308 if not data.startswith('ssh-rsa '):
308 print('unexpected format for key pair file: %s; removing' %
309 print('unexpected format for key pair file: %s; removing' %
309 pub_full)
310 pub_full)
310 pub_full.unlink()
311 pub_full.unlink()
311 priv_full.unlink()
312 priv_full.unlink()
312 continue
313 continue
313
314
314 local_existing[name] = rsa_key_fingerprint(priv_full)
315 local_existing[name] = rsa_key_fingerprint(priv_full)
315
316
316 for name in sorted(set(remote_existing) | set(local_existing)):
317 for name in sorted(set(remote_existing) | set(local_existing)):
317 if name not in local_existing:
318 if name not in local_existing:
318 actual = '%s%s' % (prefix, name)
319 actual = '%s%s' % (prefix, name)
319 print('remote key %s does not exist locally' % name)
320 print('remote key %s does not exist locally' % name)
320 remove_remote(actual)
321 remove_remote(actual)
321 del remote_existing[name]
322 del remote_existing[name]
322
323
323 elif name not in remote_existing:
324 elif name not in remote_existing:
324 print('local key %s does not exist remotely' % name)
325 print('local key %s does not exist remotely' % name)
325 remove_local(name)
326 remove_local(name)
326 del local_existing[name]
327 del local_existing[name]
327
328
328 elif remote_existing[name] != local_existing[name]:
329 elif remote_existing[name] != local_existing[name]:
329 print('key fingerprint mismatch for %s; '
330 print('key fingerprint mismatch for %s; '
330 'removing from local and remote' % name)
331 'removing from local and remote' % name)
331 remove_local(name)
332 remove_local(name)
332 remove_remote('%s%s' % (prefix, name))
333 remove_remote('%s%s' % (prefix, name))
333 del local_existing[name]
334 del local_existing[name]
334 del remote_existing[name]
335 del remote_existing[name]
335
336
336 missing = KEY_PAIRS - set(remote_existing)
337 missing = KEY_PAIRS - set(remote_existing)
337
338
338 for name in sorted(missing):
339 for name in sorted(missing):
339 actual = '%s%s' % (prefix, name)
340 actual = '%s%s' % (prefix, name)
340 print('creating key pair %s' % actual)
341 print('creating key pair %s' % actual)
341
342
342 priv_full = key_path / ('keypair-%s' % name)
343 priv_full = key_path / ('keypair-%s' % name)
343 pub_full = key_path / ('keypair-%s.pub' % name)
344 pub_full = key_path / ('keypair-%s.pub' % name)
344
345
345 kp = ec2resource.create_key_pair(KeyName=actual)
346 kp = ec2resource.create_key_pair(KeyName=actual)
346
347
347 with priv_full.open('w', encoding='ascii') as fh:
348 with priv_full.open('w', encoding='ascii') as fh:
348 fh.write(kp.key_material)
349 fh.write(kp.key_material)
349 fh.write('\n')
350 fh.write('\n')
350
351
351 priv_full.chmod(0o0600)
352 priv_full.chmod(0o0600)
352
353
353 # SSH public key can be extracted via `ssh-keygen`.
354 # SSH public key can be extracted via `ssh-keygen`.
354 with pub_full.open('w', encoding='ascii') as fh:
355 with pub_full.open('w', encoding='ascii') as fh:
355 subprocess.run(
356 subprocess.run(
356 ['ssh-keygen', '-y', '-f', str(priv_full)],
357 ['ssh-keygen', '-y', '-f', str(priv_full)],
357 stdout=fh,
358 stdout=fh,
358 check=True)
359 check=True)
359
360
360 pub_full.chmod(0o0600)
361 pub_full.chmod(0o0600)
361
362
362
363
363 def delete_instance_profile(profile):
364 def delete_instance_profile(profile):
364 for role in profile.roles:
365 for role in profile.roles:
365 print('removing role %s from instance profile %s' % (role.name,
366 print('removing role %s from instance profile %s' % (role.name,
366 profile.name))
367 profile.name))
367 profile.remove_role(RoleName=role.name)
368 profile.remove_role(RoleName=role.name)
368
369
369 print('deleting instance profile %s' % profile.name)
370 print('deleting instance profile %s' % profile.name)
370 profile.delete()
371 profile.delete()
371
372
372
373
373 def ensure_iam_state(iamclient, iamresource, prefix='hg-'):
374 def ensure_iam_state(iamclient, iamresource, prefix='hg-'):
374 """Ensure IAM state is in sync with our canonical definition."""
375 """Ensure IAM state is in sync with our canonical definition."""
375
376
376 remote_profiles = {}
377 remote_profiles = {}
377
378
378 for profile in iamresource.instance_profiles.all():
379 for profile in iamresource.instance_profiles.all():
379 if profile.name.startswith(prefix):
380 if profile.name.startswith(prefix):
380 remote_profiles[profile.name[len(prefix):]] = profile
381 remote_profiles[profile.name[len(prefix):]] = profile
381
382
382 for name in sorted(set(remote_profiles) - set(IAM_INSTANCE_PROFILES)):
383 for name in sorted(set(remote_profiles) - set(IAM_INSTANCE_PROFILES)):
383 delete_instance_profile(remote_profiles[name])
384 delete_instance_profile(remote_profiles[name])
384 del remote_profiles[name]
385 del remote_profiles[name]
385
386
386 remote_roles = {}
387 remote_roles = {}
387
388
388 for role in iamresource.roles.all():
389 for role in iamresource.roles.all():
389 if role.name.startswith(prefix):
390 if role.name.startswith(prefix):
390 remote_roles[role.name[len(prefix):]] = role
391 remote_roles[role.name[len(prefix):]] = role
391
392
392 for name in sorted(set(remote_roles) - set(IAM_ROLES)):
393 for name in sorted(set(remote_roles) - set(IAM_ROLES)):
393 role = remote_roles[name]
394 role = remote_roles[name]
394
395
395 print('removing role %s' % role.name)
396 print('removing role %s' % role.name)
396 role.delete()
397 role.delete()
397 del remote_roles[name]
398 del remote_roles[name]
398
399
399 # We've purged remote state that doesn't belong. Create missing
400 # We've purged remote state that doesn't belong. Create missing
400 # instance profiles and roles.
401 # instance profiles and roles.
401 for name in sorted(set(IAM_INSTANCE_PROFILES) - set(remote_profiles)):
402 for name in sorted(set(IAM_INSTANCE_PROFILES) - set(remote_profiles)):
402 actual = '%s%s' % (prefix, name)
403 actual = '%s%s' % (prefix, name)
403 print('creating IAM instance profile %s' % actual)
404 print('creating IAM instance profile %s' % actual)
404
405
405 profile = iamresource.create_instance_profile(
406 profile = iamresource.create_instance_profile(
406 InstanceProfileName=actual)
407 InstanceProfileName=actual)
407 remote_profiles[name] = profile
408 remote_profiles[name] = profile
408
409
409 waiter = iamclient.get_waiter('instance_profile_exists')
410 waiter = iamclient.get_waiter('instance_profile_exists')
410 waiter.wait(InstanceProfileName=actual)
411 waiter.wait(InstanceProfileName=actual)
411 print('IAM instance profile %s is available' % actual)
412 print('IAM instance profile %s is available' % actual)
412
413
413 for name in sorted(set(IAM_ROLES) - set(remote_roles)):
414 for name in sorted(set(IAM_ROLES) - set(remote_roles)):
414 entry = IAM_ROLES[name]
415 entry = IAM_ROLES[name]
415
416
416 actual = '%s%s' % (prefix, name)
417 actual = '%s%s' % (prefix, name)
417 print('creating IAM role %s' % actual)
418 print('creating IAM role %s' % actual)
418
419
419 role = iamresource.create_role(
420 role = iamresource.create_role(
420 RoleName=actual,
421 RoleName=actual,
421 Description=entry['description'],
422 Description=entry['description'],
422 AssumeRolePolicyDocument=ASSUME_ROLE_POLICY_DOCUMENT,
423 AssumeRolePolicyDocument=ASSUME_ROLE_POLICY_DOCUMENT,
423 )
424 )
424
425
425 waiter = iamclient.get_waiter('role_exists')
426 waiter = iamclient.get_waiter('role_exists')
426 waiter.wait(RoleName=actual)
427 waiter.wait(RoleName=actual)
427 print('IAM role %s is available' % actual)
428 print('IAM role %s is available' % actual)
428
429
429 remote_roles[name] = role
430 remote_roles[name] = role
430
431
431 for arn in entry['policy_arns']:
432 for arn in entry['policy_arns']:
432 print('attaching policy %s to %s' % (arn, role.name))
433 print('attaching policy %s to %s' % (arn, role.name))
433 role.attach_policy(PolicyArn=arn)
434 role.attach_policy(PolicyArn=arn)
434
435
435 # Now reconcile state of profiles.
436 # Now reconcile state of profiles.
436 for name, meta in sorted(IAM_INSTANCE_PROFILES.items()):
437 for name, meta in sorted(IAM_INSTANCE_PROFILES.items()):
437 profile = remote_profiles[name]
438 profile = remote_profiles[name]
438 wanted = {'%s%s' % (prefix, role) for role in meta['roles']}
439 wanted = {'%s%s' % (prefix, role) for role in meta['roles']}
439 have = {role.name for role in profile.roles}
440 have = {role.name for role in profile.roles}
440
441
441 for role in sorted(have - wanted):
442 for role in sorted(have - wanted):
442 print('removing role %s from %s' % (role, profile.name))
443 print('removing role %s from %s' % (role, profile.name))
443 profile.remove_role(RoleName=role)
444 profile.remove_role(RoleName=role)
444
445
445 for role in sorted(wanted - have):
446 for role in sorted(wanted - have):
446 print('adding role %s to %s' % (role, profile.name))
447 print('adding role %s to %s' % (role, profile.name))
447 profile.add_role(RoleName=role)
448 profile.add_role(RoleName=role)
448
449
449
450
450 def find_image(ec2resource, owner_id, name):
451 def find_image(ec2resource, owner_id, name):
451 """Find an AMI by its owner ID and name."""
452 """Find an AMI by its owner ID and name."""
452
453
453 images = ec2resource.images.filter(
454 images = ec2resource.images.filter(
454 Filters=[
455 Filters=[
455 {
456 {
456 'Name': 'owner-id',
457 'Name': 'owner-id',
457 'Values': [owner_id],
458 'Values': [owner_id],
458 },
459 },
459 {
460 {
460 'Name': 'state',
461 'Name': 'state',
461 'Values': ['available'],
462 'Values': ['available'],
462 },
463 },
463 {
464 {
464 'Name': 'image-type',
465 'Name': 'image-type',
465 'Values': ['machine'],
466 'Values': ['machine'],
466 },
467 },
467 {
468 {
468 'Name': 'name',
469 'Name': 'name',
469 'Values': [name],
470 'Values': [name],
470 },
471 },
471 ])
472 ])
472
473
473 for image in images:
474 for image in images:
474 return image
475 return image
475
476
476 raise Exception('unable to find image for %s' % name)
477 raise Exception('unable to find image for %s' % name)
477
478
478
479
479 def ensure_security_groups(ec2resource, prefix='hg-'):
480 def ensure_security_groups(ec2resource, prefix='hg-'):
480 """Ensure all necessary Mercurial security groups are present.
481 """Ensure all necessary Mercurial security groups are present.
481
482
482 All security groups are prefixed with ``hg-`` by default. Any security
483 All security groups are prefixed with ``hg-`` by default. Any security
483 groups having this prefix but aren't in our list are deleted.
484 groups having this prefix but aren't in our list are deleted.
484 """
485 """
485 existing = {}
486 existing = {}
486
487
487 for group in ec2resource.security_groups.all():
488 for group in ec2resource.security_groups.all():
488 if group.group_name.startswith(prefix):
489 if group.group_name.startswith(prefix):
489 existing[group.group_name[len(prefix):]] = group
490 existing[group.group_name[len(prefix):]] = group
490
491
491 purge = set(existing) - set(SECURITY_GROUPS)
492 purge = set(existing) - set(SECURITY_GROUPS)
492
493
493 for name in sorted(purge):
494 for name in sorted(purge):
494 group = existing[name]
495 group = existing[name]
495 print('removing legacy security group: %s' % group.group_name)
496 print('removing legacy security group: %s' % group.group_name)
496 group.delete()
497 group.delete()
497
498
498 security_groups = {}
499 security_groups = {}
499
500
500 for name, group in sorted(SECURITY_GROUPS.items()):
501 for name, group in sorted(SECURITY_GROUPS.items()):
501 if name in existing:
502 if name in existing:
502 security_groups[name] = existing[name]
503 security_groups[name] = existing[name]
503 continue
504 continue
504
505
505 actual = '%s%s' % (prefix, name)
506 actual = '%s%s' % (prefix, name)
506 print('adding security group %s' % actual)
507 print('adding security group %s' % actual)
507
508
508 group_res = ec2resource.create_security_group(
509 group_res = ec2resource.create_security_group(
509 Description=group['description'],
510 Description=group['description'],
510 GroupName=actual,
511 GroupName=actual,
511 )
512 )
512
513
513 group_res.authorize_ingress(
514 group_res.authorize_ingress(
514 IpPermissions=group['ingress'],
515 IpPermissions=group['ingress'],
515 )
516 )
516
517
517 security_groups[name] = group_res
518 security_groups[name] = group_res
518
519
519 return security_groups
520 return security_groups
520
521
521
522
522 def terminate_ec2_instances(ec2resource, prefix='hg-'):
523 def terminate_ec2_instances(ec2resource, prefix='hg-'):
523 """Terminate all EC2 instances managed by us."""
524 """Terminate all EC2 instances managed by us."""
524 waiting = []
525 waiting = []
525
526
526 for instance in ec2resource.instances.all():
527 for instance in ec2resource.instances.all():
527 if instance.state['Name'] == 'terminated':
528 if instance.state['Name'] == 'terminated':
528 continue
529 continue
529
530
530 for tag in instance.tags or []:
531 for tag in instance.tags or []:
531 if tag['Key'] == 'Name' and tag['Value'].startswith(prefix):
532 if tag['Key'] == 'Name' and tag['Value'].startswith(prefix):
532 print('terminating %s' % instance.id)
533 print('terminating %s' % instance.id)
533 instance.terminate()
534 instance.terminate()
534 waiting.append(instance)
535 waiting.append(instance)
535
536
536 for instance in waiting:
537 for instance in waiting:
537 instance.wait_until_terminated()
538 instance.wait_until_terminated()
538
539
539
540
540 def remove_resources(c, prefix='hg-'):
541 def remove_resources(c, prefix='hg-'):
541 """Purge all of our resources in this EC2 region."""
542 """Purge all of our resources in this EC2 region."""
542 ec2resource = c.ec2resource
543 ec2resource = c.ec2resource
543 iamresource = c.iamresource
544 iamresource = c.iamresource
544
545
545 terminate_ec2_instances(ec2resource, prefix=prefix)
546 terminate_ec2_instances(ec2resource, prefix=prefix)
546
547
547 for image in ec2resource.images.filter(Owners=['self']):
548 for image in ec2resource.images.filter(Owners=['self']):
548 if image.name.startswith(prefix):
549 if image.name.startswith(prefix):
549 remove_ami(ec2resource, image)
550 remove_ami(ec2resource, image)
550
551
551 for group in ec2resource.security_groups.all():
552 for group in ec2resource.security_groups.all():
552 if group.group_name.startswith(prefix):
553 if group.group_name.startswith(prefix):
553 print('removing security group %s' % group.group_name)
554 print('removing security group %s' % group.group_name)
554 group.delete()
555 group.delete()
555
556
556 for profile in iamresource.instance_profiles.all():
557 for profile in iamresource.instance_profiles.all():
557 if profile.name.startswith(prefix):
558 if profile.name.startswith(prefix):
558 delete_instance_profile(profile)
559 delete_instance_profile(profile)
559
560
560 for role in iamresource.roles.all():
561 for role in iamresource.roles.all():
561 if role.name.startswith(prefix):
562 if role.name.startswith(prefix):
562 for p in role.attached_policies.all():
563 for p in role.attached_policies.all():
563 print('detaching policy %s from %s' % (p.arn, role.name))
564 print('detaching policy %s from %s' % (p.arn, role.name))
564 role.detach_policy(PolicyArn=p.arn)
565 role.detach_policy(PolicyArn=p.arn)
565
566
566 print('removing role %s' % role.name)
567 print('removing role %s' % role.name)
567 role.delete()
568 role.delete()
568
569
569
570
570 def wait_for_ip_addresses(instances):
571 def wait_for_ip_addresses(instances):
571 """Wait for the public IP addresses of an iterable of instances."""
572 """Wait for the public IP addresses of an iterable of instances."""
572 for instance in instances:
573 for instance in instances:
573 while True:
574 while True:
574 if not instance.public_ip_address:
575 if not instance.public_ip_address:
575 time.sleep(2)
576 time.sleep(2)
576 instance.reload()
577 instance.reload()
577 continue
578 continue
578
579
579 print('public IP address for %s: %s' % (
580 print('public IP address for %s: %s' % (
580 instance.id, instance.public_ip_address))
581 instance.id, instance.public_ip_address))
581 break
582 break
582
583
583
584
584 def remove_ami(ec2resource, image):
585 def remove_ami(ec2resource, image):
585 """Remove an AMI and its underlying snapshots."""
586 """Remove an AMI and its underlying snapshots."""
586 snapshots = []
587 snapshots = []
587
588
588 for device in image.block_device_mappings:
589 for device in image.block_device_mappings:
589 if 'Ebs' in device:
590 if 'Ebs' in device:
590 snapshots.append(ec2resource.Snapshot(device['Ebs']['SnapshotId']))
591 snapshots.append(ec2resource.Snapshot(device['Ebs']['SnapshotId']))
591
592
592 print('deregistering %s' % image.id)
593 print('deregistering %s' % image.id)
593 image.deregister()
594 image.deregister()
594
595
595 for snapshot in snapshots:
596 for snapshot in snapshots:
596 print('deleting snapshot %s' % snapshot.id)
597 print('deleting snapshot %s' % snapshot.id)
597 snapshot.delete()
598 snapshot.delete()
598
599
599
600
600 def wait_for_ssm(ssmclient, instances):
601 def wait_for_ssm(ssmclient, instances):
601 """Wait for SSM to come online for an iterable of instance IDs."""
602 """Wait for SSM to come online for an iterable of instance IDs."""
602 while True:
603 while True:
603 res = ssmclient.describe_instance_information(
604 res = ssmclient.describe_instance_information(
604 Filters=[
605 Filters=[
605 {
606 {
606 'Key': 'InstanceIds',
607 'Key': 'InstanceIds',
607 'Values': [i.id for i in instances],
608 'Values': [i.id for i in instances],
608 },
609 },
609 ],
610 ],
610 )
611 )
611
612
612 available = len(res['InstanceInformationList'])
613 available = len(res['InstanceInformationList'])
613 wanted = len(instances)
614 wanted = len(instances)
614
615
615 print('%d/%d instances available in SSM' % (available, wanted))
616 print('%d/%d instances available in SSM' % (available, wanted))
616
617
617 if available == wanted:
618 if available == wanted:
618 return
619 return
619
620
620 time.sleep(2)
621 time.sleep(2)
621
622
622
623
623 def run_ssm_command(ssmclient, instances, document_name, parameters):
624 def run_ssm_command(ssmclient, instances, document_name, parameters):
624 """Run a PowerShell script on an EC2 instance."""
625 """Run a PowerShell script on an EC2 instance."""
625
626
626 res = ssmclient.send_command(
627 res = ssmclient.send_command(
627 InstanceIds=[i.id for i in instances],
628 InstanceIds=[i.id for i in instances],
628 DocumentName=document_name,
629 DocumentName=document_name,
629 Parameters=parameters,
630 Parameters=parameters,
630 CloudWatchOutputConfig={
631 CloudWatchOutputConfig={
631 'CloudWatchOutputEnabled': True,
632 'CloudWatchOutputEnabled': True,
632 },
633 },
633 )
634 )
634
635
635 command_id = res['Command']['CommandId']
636 command_id = res['Command']['CommandId']
636
637
637 for instance in instances:
638 for instance in instances:
638 while True:
639 while True:
639 try:
640 try:
640 res = ssmclient.get_command_invocation(
641 res = ssmclient.get_command_invocation(
641 CommandId=command_id,
642 CommandId=command_id,
642 InstanceId=instance.id,
643 InstanceId=instance.id,
643 )
644 )
644 except botocore.exceptions.ClientError as e:
645 except botocore.exceptions.ClientError as e:
645 if e.response['Error']['Code'] == 'InvocationDoesNotExist':
646 if e.response['Error']['Code'] == 'InvocationDoesNotExist':
646 print('could not find SSM command invocation; waiting')
647 print('could not find SSM command invocation; waiting')
647 time.sleep(1)
648 time.sleep(1)
648 continue
649 continue
649 else:
650 else:
650 raise
651 raise
651
652
652 if res['Status'] == 'Success':
653 if res['Status'] == 'Success':
653 break
654 break
654 elif res['Status'] in ('Pending', 'InProgress', 'Delayed'):
655 elif res['Status'] in ('Pending', 'InProgress', 'Delayed'):
655 time.sleep(2)
656 time.sleep(2)
656 else:
657 else:
657 raise Exception('command failed on %s: %s' % (
658 raise Exception('command failed on %s: %s' % (
658 instance.id, res['Status']))
659 instance.id, res['Status']))
659
660
660
661
661 @contextlib.contextmanager
662 @contextlib.contextmanager
662 def temporary_ec2_instances(ec2resource, config):
663 def temporary_ec2_instances(ec2resource, config):
663 """Create temporary EC2 instances.
664 """Create temporary EC2 instances.
664
665
665 This is a proxy to ``ec2client.run_instances(**config)`` that takes care of
666 This is a proxy to ``ec2client.run_instances(**config)`` that takes care of
666 managing the lifecycle of the instances.
667 managing the lifecycle of the instances.
667
668
668 When the context manager exits, the instances are terminated.
669 When the context manager exits, the instances are terminated.
669
670
670 The context manager evaluates to the list of data structures
671 The context manager evaluates to the list of data structures
671 describing each created instance. The instances may not be available
672 describing each created instance. The instances may not be available
672 for work immediately: it is up to the caller to wait for the instance
673 for work immediately: it is up to the caller to wait for the instance
673 to start responding.
674 to start responding.
674 """
675 """
675
676
676 ids = None
677 ids = None
677
678
678 try:
679 try:
679 res = ec2resource.create_instances(**config)
680 res = ec2resource.create_instances(**config)
680
681
681 ids = [i.id for i in res]
682 ids = [i.id for i in res]
682 print('started instances: %s' % ' '.join(ids))
683 print('started instances: %s' % ' '.join(ids))
683
684
684 yield res
685 yield res
685 finally:
686 finally:
686 if ids:
687 if ids:
687 print('terminating instances: %s' % ' '.join(ids))
688 print('terminating instances: %s' % ' '.join(ids))
688 for instance in res:
689 for instance in res:
689 instance.terminate()
690 instance.terminate()
690 print('terminated %d instances' % len(ids))
691 print('terminated %d instances' % len(ids))
691
692
692
693
693 @contextlib.contextmanager
694 @contextlib.contextmanager
694 def create_temp_windows_ec2_instances(c: AWSConnection, config):
695 def create_temp_windows_ec2_instances(c: AWSConnection, config):
695 """Create temporary Windows EC2 instances.
696 """Create temporary Windows EC2 instances.
696
697
697 This is a higher-level wrapper around ``create_temp_ec2_instances()`` that
698 This is a higher-level wrapper around ``create_temp_ec2_instances()`` that
698 configures the Windows instance for Windows Remote Management. The emitted
699 configures the Windows instance for Windows Remote Management. The emitted
699 instances will have a ``winrm_client`` attribute containing a
700 instances will have a ``winrm_client`` attribute containing a
700 ``pypsrp.client.Client`` instance bound to the instance.
701 ``pypsrp.client.Client`` instance bound to the instance.
701 """
702 """
702 if 'IamInstanceProfile' in config:
703 if 'IamInstanceProfile' in config:
703 raise ValueError('IamInstanceProfile cannot be provided in config')
704 raise ValueError('IamInstanceProfile cannot be provided in config')
704 if 'UserData' in config:
705 if 'UserData' in config:
705 raise ValueError('UserData cannot be provided in config')
706 raise ValueError('UserData cannot be provided in config')
706
707
707 password = c.automation.default_password()
708 password = c.automation.default_password()
708
709
709 config = copy.deepcopy(config)
710 config = copy.deepcopy(config)
710 config['IamInstanceProfile'] = {
711 config['IamInstanceProfile'] = {
711 'Name': 'hg-ephemeral-ec2-1',
712 'Name': 'hg-ephemeral-ec2-1',
712 }
713 }
713 config.setdefault('TagSpecifications', []).append({
714 config.setdefault('TagSpecifications', []).append({
714 'ResourceType': 'instance',
715 'ResourceType': 'instance',
715 'Tags': [{'Key': 'Name', 'Value': 'hg-temp-windows'}],
716 'Tags': [{'Key': 'Name', 'Value': 'hg-temp-windows'}],
716 })
717 })
717 config['UserData'] = WINDOWS_USER_DATA % password
718 config['UserData'] = WINDOWS_USER_DATA % password
718
719
719 with temporary_ec2_instances(c.ec2resource, config) as instances:
720 with temporary_ec2_instances(c.ec2resource, config) as instances:
720 wait_for_ip_addresses(instances)
721 wait_for_ip_addresses(instances)
721
722
722 print('waiting for Windows Remote Management service...')
723 print('waiting for Windows Remote Management service...')
723
724
724 for instance in instances:
725 for instance in instances:
725 client = wait_for_winrm(instance.public_ip_address, 'Administrator', password)
726 client = wait_for_winrm(instance.public_ip_address, 'Administrator', password)
726 print('established WinRM connection to %s' % instance.id)
727 print('established WinRM connection to %s' % instance.id)
727 instance.winrm_client = client
728 instance.winrm_client = client
728
729
729 yield instances
730 yield instances
730
731
731
732
732 def resolve_fingerprint(fingerprint):
733 def resolve_fingerprint(fingerprint):
733 fingerprint = json.dumps(fingerprint, sort_keys=True)
734 fingerprint = json.dumps(fingerprint, sort_keys=True)
734 return hashlib.sha256(fingerprint.encode('utf-8')).hexdigest()
735 return hashlib.sha256(fingerprint.encode('utf-8')).hexdigest()
735
736
736
737
737 def find_and_reconcile_image(ec2resource, name, fingerprint):
738 def find_and_reconcile_image(ec2resource, name, fingerprint):
738 """Attempt to find an existing EC2 AMI with a name and fingerprint.
739 """Attempt to find an existing EC2 AMI with a name and fingerprint.
739
740
740 If an image with the specified fingerprint is found, it is returned.
741 If an image with the specified fingerprint is found, it is returned.
741 Otherwise None is returned.
742 Otherwise None is returned.
742
743
743 Existing images for the specified name that don't have the specified
744 Existing images for the specified name that don't have the specified
744 fingerprint or are missing required metadata or deleted.
745 fingerprint or are missing required metadata or deleted.
745 """
746 """
746 # Find existing AMIs with this name and delete the ones that are invalid.
747 # Find existing AMIs with this name and delete the ones that are invalid.
747 # Store a reference to a good image so it can be returned one the
748 # Store a reference to a good image so it can be returned one the
748 # image state is reconciled.
749 # image state is reconciled.
749 images = ec2resource.images.filter(
750 images = ec2resource.images.filter(
750 Filters=[{'Name': 'name', 'Values': [name]}])
751 Filters=[{'Name': 'name', 'Values': [name]}])
751
752
752 existing_image = None
753 existing_image = None
753
754
754 for image in images:
755 for image in images:
755 if image.tags is None:
756 if image.tags is None:
756 print('image %s for %s lacks required tags; removing' % (
757 print('image %s for %s lacks required tags; removing' % (
757 image.id, image.name))
758 image.id, image.name))
758 remove_ami(ec2resource, image)
759 remove_ami(ec2resource, image)
759 else:
760 else:
760 tags = {t['Key']: t['Value'] for t in image.tags}
761 tags = {t['Key']: t['Value'] for t in image.tags}
761
762
762 if tags.get('HGIMAGEFINGERPRINT') == fingerprint:
763 if tags.get('HGIMAGEFINGERPRINT') == fingerprint:
763 existing_image = image
764 existing_image = image
764 else:
765 else:
765 print('image %s for %s has wrong fingerprint; removing' % (
766 print('image %s for %s has wrong fingerprint; removing' % (
766 image.id, image.name))
767 image.id, image.name))
767 remove_ami(ec2resource, image)
768 remove_ami(ec2resource, image)
768
769
769 return existing_image
770 return existing_image
770
771
771
772
772 def create_ami_from_instance(ec2client, instance, name, description,
773 def create_ami_from_instance(ec2client, instance, name, description,
773 fingerprint):
774 fingerprint):
774 """Create an AMI from a running instance.
775 """Create an AMI from a running instance.
775
776
776 Returns the ``ec2resource.Image`` representing the created AMI.
777 Returns the ``ec2resource.Image`` representing the created AMI.
777 """
778 """
778 instance.stop()
779 instance.stop()
779
780
780 ec2client.get_waiter('instance_stopped').wait(
781 ec2client.get_waiter('instance_stopped').wait(
781 InstanceIds=[instance.id],
782 InstanceIds=[instance.id],
782 WaiterConfig={
783 WaiterConfig={
783 'Delay': 5,
784 'Delay': 5,
784 })
785 })
785 print('%s is stopped' % instance.id)
786 print('%s is stopped' % instance.id)
786
787
787 image = instance.create_image(
788 image = instance.create_image(
788 Name=name,
789 Name=name,
789 Description=description,
790 Description=description,
790 )
791 )
791
792
792 image.create_tags(Tags=[
793 image.create_tags(Tags=[
793 {
794 {
794 'Key': 'HGIMAGEFINGERPRINT',
795 'Key': 'HGIMAGEFINGERPRINT',
795 'Value': fingerprint,
796 'Value': fingerprint,
796 },
797 },
797 ])
798 ])
798
799
799 print('waiting for image %s' % image.id)
800 print('waiting for image %s' % image.id)
800
801
801 ec2client.get_waiter('image_available').wait(
802 ec2client.get_waiter('image_available').wait(
802 ImageIds=[image.id],
803 ImageIds=[image.id],
803 )
804 )
804
805
805 print('image %s available as %s' % (image.id, image.name))
806 print('image %s available as %s' % (image.id, image.name))
806
807
807 return image
808 return image
808
809
809
810
810 def ensure_linux_dev_ami(c: AWSConnection, distro='debian9', prefix='hg-'):
811 def ensure_linux_dev_ami(c: AWSConnection, distro='debian10', prefix='hg-'):
811 """Ensures a Linux development AMI is available and up-to-date.
812 """Ensures a Linux development AMI is available and up-to-date.
812
813
813 Returns an ``ec2.Image`` of either an existing AMI or a newly-built one.
814 Returns an ``ec2.Image`` of either an existing AMI or a newly-built one.
814 """
815 """
815 ec2client = c.ec2client
816 ec2client = c.ec2client
816 ec2resource = c.ec2resource
817 ec2resource = c.ec2resource
817
818
818 name = '%s%s-%s' % (prefix, 'linux-dev', distro)
819 name = '%s%s-%s' % (prefix, 'linux-dev', distro)
819
820
820 if distro == 'debian9':
821 if distro == 'debian9':
821 image = find_image(
822 image = find_image(
822 ec2resource,
823 ec2resource,
823 DEBIAN_ACCOUNT_ID,
824 DEBIAN_ACCOUNT_ID,
824 'debian-stretch-hvm-x86_64-gp2-2019-09-08-17994',
825 'debian-stretch-hvm-x86_64-gp2-2019-09-08-17994',
825 )
826 )
826 ssh_username = 'admin'
827 ssh_username = 'admin'
828 elif distro == 'debian10':
829 image = find_image(
830 ec2resource,
831 DEBIAN_ACCOUNT_ID_2,
832 'debian-10-amd64-20190909-10',
833 )
834 ssh_username = 'admin'
827 elif distro == 'ubuntu18.04':
835 elif distro == 'ubuntu18.04':
828 image = find_image(
836 image = find_image(
829 ec2resource,
837 ec2resource,
830 UBUNTU_ACCOUNT_ID,
838 UBUNTU_ACCOUNT_ID,
831 'ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-20190918',
839 'ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-20190918',
832 )
840 )
833 ssh_username = 'ubuntu'
841 ssh_username = 'ubuntu'
834 elif distro == 'ubuntu19.04':
842 elif distro == 'ubuntu19.04':
835 image = find_image(
843 image = find_image(
836 ec2resource,
844 ec2resource,
837 UBUNTU_ACCOUNT_ID,
845 UBUNTU_ACCOUNT_ID,
838 'ubuntu/images/hvm-ssd/ubuntu-disco-19.04-amd64-server-20190918',
846 'ubuntu/images/hvm-ssd/ubuntu-disco-19.04-amd64-server-20190918',
839 )
847 )
840 ssh_username = 'ubuntu'
848 ssh_username = 'ubuntu'
841 else:
849 else:
842 raise ValueError('unsupported Linux distro: %s' % distro)
850 raise ValueError('unsupported Linux distro: %s' % distro)
843
851
844 config = {
852 config = {
845 'BlockDeviceMappings': [
853 'BlockDeviceMappings': [
846 {
854 {
847 'DeviceName': image.block_device_mappings[0]['DeviceName'],
855 'DeviceName': image.block_device_mappings[0]['DeviceName'],
848 'Ebs': {
856 'Ebs': {
849 'DeleteOnTermination': True,
857 'DeleteOnTermination': True,
850 'VolumeSize': 10,
858 'VolumeSize': 10,
851 'VolumeType': 'gp2',
859 'VolumeType': 'gp2',
852 },
860 },
853 },
861 },
854 ],
862 ],
855 'EbsOptimized': True,
863 'EbsOptimized': True,
856 'ImageId': image.id,
864 'ImageId': image.id,
857 'InstanceInitiatedShutdownBehavior': 'stop',
865 'InstanceInitiatedShutdownBehavior': 'stop',
858 # 8 VCPUs for compiling Python.
866 # 8 VCPUs for compiling Python.
859 'InstanceType': 't3.2xlarge',
867 'InstanceType': 't3.2xlarge',
860 'KeyName': '%sautomation' % prefix,
868 'KeyName': '%sautomation' % prefix,
861 'MaxCount': 1,
869 'MaxCount': 1,
862 'MinCount': 1,
870 'MinCount': 1,
863 'SecurityGroupIds': [c.security_groups['linux-dev-1'].id],
871 'SecurityGroupIds': [c.security_groups['linux-dev-1'].id],
864 }
872 }
865
873
866 requirements2_path = (pathlib.Path(__file__).parent.parent /
874 requirements2_path = (pathlib.Path(__file__).parent.parent /
867 'linux-requirements-py2.txt')
875 'linux-requirements-py2.txt')
868 requirements3_path = (pathlib.Path(__file__).parent.parent /
876 requirements3_path = (pathlib.Path(__file__).parent.parent /
869 'linux-requirements-py3.txt')
877 'linux-requirements-py3.txt')
870 with requirements2_path.open('r', encoding='utf-8') as fh:
878 with requirements2_path.open('r', encoding='utf-8') as fh:
871 requirements2 = fh.read()
879 requirements2 = fh.read()
872 with requirements3_path.open('r', encoding='utf-8') as fh:
880 with requirements3_path.open('r', encoding='utf-8') as fh:
873 requirements3 = fh.read()
881 requirements3 = fh.read()
874
882
875 # Compute a deterministic fingerprint to determine whether image needs to
883 # Compute a deterministic fingerprint to determine whether image needs to
876 # be regenerated.
884 # be regenerated.
877 fingerprint = resolve_fingerprint({
885 fingerprint = resolve_fingerprint({
878 'instance_config': config,
886 'instance_config': config,
879 'bootstrap_script': BOOTSTRAP_DEBIAN,
887 'bootstrap_script': BOOTSTRAP_DEBIAN,
880 'requirements_py2': requirements2,
888 'requirements_py2': requirements2,
881 'requirements_py3': requirements3,
889 'requirements_py3': requirements3,
882 })
890 })
883
891
884 existing_image = find_and_reconcile_image(ec2resource, name, fingerprint)
892 existing_image = find_and_reconcile_image(ec2resource, name, fingerprint)
885
893
886 if existing_image:
894 if existing_image:
887 return existing_image
895 return existing_image
888
896
889 print('no suitable %s image found; creating one...' % name)
897 print('no suitable %s image found; creating one...' % name)
890
898
891 with temporary_ec2_instances(ec2resource, config) as instances:
899 with temporary_ec2_instances(ec2resource, config) as instances:
892 wait_for_ip_addresses(instances)
900 wait_for_ip_addresses(instances)
893
901
894 instance = instances[0]
902 instance = instances[0]
895
903
896 client = wait_for_ssh(
904 client = wait_for_ssh(
897 instance.public_ip_address, 22,
905 instance.public_ip_address, 22,
898 username=ssh_username,
906 username=ssh_username,
899 key_filename=str(c.key_pair_path_private('automation')))
907 key_filename=str(c.key_pair_path_private('automation')))
900
908
901 home = '/home/%s' % ssh_username
909 home = '/home/%s' % ssh_username
902
910
903 with client:
911 with client:
904 print('connecting to SSH server')
912 print('connecting to SSH server')
905 sftp = client.open_sftp()
913 sftp = client.open_sftp()
906
914
907 print('uploading bootstrap files')
915 print('uploading bootstrap files')
908 with sftp.open('%s/bootstrap' % home, 'wb') as fh:
916 with sftp.open('%s/bootstrap' % home, 'wb') as fh:
909 fh.write(BOOTSTRAP_DEBIAN)
917 fh.write(BOOTSTRAP_DEBIAN)
910 fh.chmod(0o0700)
918 fh.chmod(0o0700)
911
919
912 with sftp.open('%s/requirements-py2.txt' % home, 'wb') as fh:
920 with sftp.open('%s/requirements-py2.txt' % home, 'wb') as fh:
913 fh.write(requirements2)
921 fh.write(requirements2)
914 fh.chmod(0o0700)
922 fh.chmod(0o0700)
915
923
916 with sftp.open('%s/requirements-py3.txt' % home, 'wb') as fh:
924 with sftp.open('%s/requirements-py3.txt' % home, 'wb') as fh:
917 fh.write(requirements3)
925 fh.write(requirements3)
918 fh.chmod(0o0700)
926 fh.chmod(0o0700)
919
927
920 print('executing bootstrap')
928 print('executing bootstrap')
921 chan, stdin, stdout = ssh_exec_command(client,
929 chan, stdin, stdout = ssh_exec_command(client,
922 '%s/bootstrap' % home)
930 '%s/bootstrap' % home)
923 stdin.close()
931 stdin.close()
924
932
925 for line in stdout:
933 for line in stdout:
926 print(line, end='')
934 print(line, end='')
927
935
928 res = chan.recv_exit_status()
936 res = chan.recv_exit_status()
929 if res:
937 if res:
930 raise Exception('non-0 exit from bootstrap: %d' % res)
938 raise Exception('non-0 exit from bootstrap: %d' % res)
931
939
932 print('bootstrap completed; stopping %s to create %s' % (
940 print('bootstrap completed; stopping %s to create %s' % (
933 instance.id, name))
941 instance.id, name))
934
942
935 return create_ami_from_instance(ec2client, instance, name,
943 return create_ami_from_instance(ec2client, instance, name,
936 'Mercurial Linux development environment',
944 'Mercurial Linux development environment',
937 fingerprint)
945 fingerprint)
938
946
939
947
940 @contextlib.contextmanager
948 @contextlib.contextmanager
941 def temporary_linux_dev_instances(c: AWSConnection, image, instance_type,
949 def temporary_linux_dev_instances(c: AWSConnection, image, instance_type,
942 prefix='hg-', ensure_extra_volume=False):
950 prefix='hg-', ensure_extra_volume=False):
943 """Create temporary Linux development EC2 instances.
951 """Create temporary Linux development EC2 instances.
944
952
945 Context manager resolves to a list of ``ec2.Instance`` that were created
953 Context manager resolves to a list of ``ec2.Instance`` that were created
946 and are running.
954 and are running.
947
955
948 ``ensure_extra_volume`` can be set to ``True`` to require that instances
956 ``ensure_extra_volume`` can be set to ``True`` to require that instances
949 have a 2nd storage volume available other than the primary AMI volume.
957 have a 2nd storage volume available other than the primary AMI volume.
950 For instance types with instance storage, this does nothing special.
958 For instance types with instance storage, this does nothing special.
951 But for instance types without instance storage, an additional EBS volume
959 But for instance types without instance storage, an additional EBS volume
952 will be added to the instance.
960 will be added to the instance.
953
961
954 Instances have an ``ssh_client`` attribute containing a paramiko SSHClient
962 Instances have an ``ssh_client`` attribute containing a paramiko SSHClient
955 instance bound to the instance.
963 instance bound to the instance.
956
964
957 Instances have an ``ssh_private_key_path`` attributing containing the
965 Instances have an ``ssh_private_key_path`` attributing containing the
958 str path to the SSH private key to connect to the instance.
966 str path to the SSH private key to connect to the instance.
959 """
967 """
960
968
961 block_device_mappings = [
969 block_device_mappings = [
962 {
970 {
963 'DeviceName': image.block_device_mappings[0]['DeviceName'],
971 'DeviceName': image.block_device_mappings[0]['DeviceName'],
964 'Ebs': {
972 'Ebs': {
965 'DeleteOnTermination': True,
973 'DeleteOnTermination': True,
966 'VolumeSize': 12,
974 'VolumeSize': 12,
967 'VolumeType': 'gp2',
975 'VolumeType': 'gp2',
968 },
976 },
969 }
977 }
970 ]
978 ]
971
979
972 # This is not an exhaustive list of instance types having instance storage.
980 # This is not an exhaustive list of instance types having instance storage.
973 # But
981 # But
974 if (ensure_extra_volume
982 if (ensure_extra_volume
975 and not instance_type.startswith(tuple(INSTANCE_TYPES_WITH_STORAGE))):
983 and not instance_type.startswith(tuple(INSTANCE_TYPES_WITH_STORAGE))):
976 main_device = block_device_mappings[0]['DeviceName']
984 main_device = block_device_mappings[0]['DeviceName']
977
985
978 if main_device == 'xvda':
986 if main_device == 'xvda':
979 second_device = 'xvdb'
987 second_device = 'xvdb'
980 elif main_device == '/dev/sda1':
988 elif main_device == '/dev/sda1':
981 second_device = '/dev/sdb'
989 second_device = '/dev/sdb'
982 else:
990 else:
983 raise ValueError('unhandled primary EBS device name: %s' %
991 raise ValueError('unhandled primary EBS device name: %s' %
984 main_device)
992 main_device)
985
993
986 block_device_mappings.append({
994 block_device_mappings.append({
987 'DeviceName': second_device,
995 'DeviceName': second_device,
988 'Ebs': {
996 'Ebs': {
989 'DeleteOnTermination': True,
997 'DeleteOnTermination': True,
990 'VolumeSize': 8,
998 'VolumeSize': 8,
991 'VolumeType': 'gp2',
999 'VolumeType': 'gp2',
992 }
1000 }
993 })
1001 })
994
1002
995 config = {
1003 config = {
996 'BlockDeviceMappings': block_device_mappings,
1004 'BlockDeviceMappings': block_device_mappings,
997 'EbsOptimized': True,
1005 'EbsOptimized': True,
998 'ImageId': image.id,
1006 'ImageId': image.id,
999 'InstanceInitiatedShutdownBehavior': 'terminate',
1007 'InstanceInitiatedShutdownBehavior': 'terminate',
1000 'InstanceType': instance_type,
1008 'InstanceType': instance_type,
1001 'KeyName': '%sautomation' % prefix,
1009 'KeyName': '%sautomation' % prefix,
1002 'MaxCount': 1,
1010 'MaxCount': 1,
1003 'MinCount': 1,
1011 'MinCount': 1,
1004 'SecurityGroupIds': [c.security_groups['linux-dev-1'].id],
1012 'SecurityGroupIds': [c.security_groups['linux-dev-1'].id],
1005 }
1013 }
1006
1014
1007 with temporary_ec2_instances(c.ec2resource, config) as instances:
1015 with temporary_ec2_instances(c.ec2resource, config) as instances:
1008 wait_for_ip_addresses(instances)
1016 wait_for_ip_addresses(instances)
1009
1017
1010 ssh_private_key_path = str(c.key_pair_path_private('automation'))
1018 ssh_private_key_path = str(c.key_pair_path_private('automation'))
1011
1019
1012 for instance in instances:
1020 for instance in instances:
1013 client = wait_for_ssh(
1021 client = wait_for_ssh(
1014 instance.public_ip_address, 22,
1022 instance.public_ip_address, 22,
1015 username='hg',
1023 username='hg',
1016 key_filename=ssh_private_key_path)
1024 key_filename=ssh_private_key_path)
1017
1025
1018 instance.ssh_client = client
1026 instance.ssh_client = client
1019 instance.ssh_private_key_path = ssh_private_key_path
1027 instance.ssh_private_key_path = ssh_private_key_path
1020
1028
1021 try:
1029 try:
1022 yield instances
1030 yield instances
1023 finally:
1031 finally:
1024 for instance in instances:
1032 for instance in instances:
1025 instance.ssh_client.close()
1033 instance.ssh_client.close()
1026
1034
1027
1035
1028 def ensure_windows_dev_ami(c: AWSConnection, prefix='hg-',
1036 def ensure_windows_dev_ami(c: AWSConnection, prefix='hg-',
1029 base_image_name=WINDOWS_BASE_IMAGE_NAME):
1037 base_image_name=WINDOWS_BASE_IMAGE_NAME):
1030 """Ensure Windows Development AMI is available and up-to-date.
1038 """Ensure Windows Development AMI is available and up-to-date.
1031
1039
1032 If necessary, a modern AMI will be built by starting a temporary EC2
1040 If necessary, a modern AMI will be built by starting a temporary EC2
1033 instance and bootstrapping it.
1041 instance and bootstrapping it.
1034
1042
1035 Obsolete AMIs will be deleted so there is only a single AMI having the
1043 Obsolete AMIs will be deleted so there is only a single AMI having the
1036 desired name.
1044 desired name.
1037
1045
1038 Returns an ``ec2.Image`` of either an existing AMI or a newly-built
1046 Returns an ``ec2.Image`` of either an existing AMI or a newly-built
1039 one.
1047 one.
1040 """
1048 """
1041 ec2client = c.ec2client
1049 ec2client = c.ec2client
1042 ec2resource = c.ec2resource
1050 ec2resource = c.ec2resource
1043 ssmclient = c.session.client('ssm')
1051 ssmclient = c.session.client('ssm')
1044
1052
1045 name = '%s%s' % (prefix, 'windows-dev')
1053 name = '%s%s' % (prefix, 'windows-dev')
1046
1054
1047 image = find_image(ec2resource, AMAZON_ACCOUNT_ID, base_image_name)
1055 image = find_image(ec2resource, AMAZON_ACCOUNT_ID, base_image_name)
1048
1056
1049 config = {
1057 config = {
1050 'BlockDeviceMappings': [
1058 'BlockDeviceMappings': [
1051 {
1059 {
1052 'DeviceName': '/dev/sda1',
1060 'DeviceName': '/dev/sda1',
1053 'Ebs': {
1061 'Ebs': {
1054 'DeleteOnTermination': True,
1062 'DeleteOnTermination': True,
1055 'VolumeSize': 32,
1063 'VolumeSize': 32,
1056 'VolumeType': 'gp2',
1064 'VolumeType': 'gp2',
1057 },
1065 },
1058 }
1066 }
1059 ],
1067 ],
1060 'ImageId': image.id,
1068 'ImageId': image.id,
1061 'InstanceInitiatedShutdownBehavior': 'stop',
1069 'InstanceInitiatedShutdownBehavior': 'stop',
1062 'InstanceType': 't3.medium',
1070 'InstanceType': 't3.medium',
1063 'KeyName': '%sautomation' % prefix,
1071 'KeyName': '%sautomation' % prefix,
1064 'MaxCount': 1,
1072 'MaxCount': 1,
1065 'MinCount': 1,
1073 'MinCount': 1,
1066 'SecurityGroupIds': [c.security_groups['windows-dev-1'].id],
1074 'SecurityGroupIds': [c.security_groups['windows-dev-1'].id],
1067 }
1075 }
1068
1076
1069 commands = [
1077 commands = [
1070 # Need to start the service so sshd_config is generated.
1078 # Need to start the service so sshd_config is generated.
1071 'Start-Service sshd',
1079 'Start-Service sshd',
1072 'Write-Output "modifying sshd_config"',
1080 'Write-Output "modifying sshd_config"',
1073 r'$content = Get-Content C:\ProgramData\ssh\sshd_config',
1081 r'$content = Get-Content C:\ProgramData\ssh\sshd_config',
1074 '$content = $content -replace "Match Group administrators","" -replace "AuthorizedKeysFile __PROGRAMDATA__/ssh/administrators_authorized_keys",""',
1082 '$content = $content -replace "Match Group administrators","" -replace "AuthorizedKeysFile __PROGRAMDATA__/ssh/administrators_authorized_keys",""',
1075 r'$content | Set-Content C:\ProgramData\ssh\sshd_config',
1083 r'$content | Set-Content C:\ProgramData\ssh\sshd_config',
1076 'Import-Module OpenSSHUtils',
1084 'Import-Module OpenSSHUtils',
1077 r'Repair-SshdConfigPermission C:\ProgramData\ssh\sshd_config -Confirm:$false',
1085 r'Repair-SshdConfigPermission C:\ProgramData\ssh\sshd_config -Confirm:$false',
1078 'Restart-Service sshd',
1086 'Restart-Service sshd',
1079 'Write-Output "installing OpenSSL client"',
1087 'Write-Output "installing OpenSSL client"',
1080 'Add-WindowsCapability -Online -Name OpenSSH.Client~~~~0.0.1.0',
1088 'Add-WindowsCapability -Online -Name OpenSSH.Client~~~~0.0.1.0',
1081 'Set-Service -Name sshd -StartupType "Automatic"',
1089 'Set-Service -Name sshd -StartupType "Automatic"',
1082 'Write-Output "OpenSSH server running"',
1090 'Write-Output "OpenSSH server running"',
1083 ]
1091 ]
1084
1092
1085 with INSTALL_WINDOWS_DEPENDENCIES.open('r', encoding='utf-8') as fh:
1093 with INSTALL_WINDOWS_DEPENDENCIES.open('r', encoding='utf-8') as fh:
1086 commands.extend(l.rstrip() for l in fh)
1094 commands.extend(l.rstrip() for l in fh)
1087
1095
1088 # Disable Windows Defender when bootstrapping because it just slows
1096 # Disable Windows Defender when bootstrapping because it just slows
1089 # things down.
1097 # things down.
1090 commands.insert(0, 'Set-MpPreference -DisableRealtimeMonitoring $true')
1098 commands.insert(0, 'Set-MpPreference -DisableRealtimeMonitoring $true')
1091 commands.append('Set-MpPreference -DisableRealtimeMonitoring $false')
1099 commands.append('Set-MpPreference -DisableRealtimeMonitoring $false')
1092
1100
1093 # Compute a deterministic fingerprint to determine whether image needs
1101 # Compute a deterministic fingerprint to determine whether image needs
1094 # to be regenerated.
1102 # to be regenerated.
1095 fingerprint = resolve_fingerprint({
1103 fingerprint = resolve_fingerprint({
1096 'instance_config': config,
1104 'instance_config': config,
1097 'user_data': WINDOWS_USER_DATA,
1105 'user_data': WINDOWS_USER_DATA,
1098 'initial_bootstrap': WINDOWS_BOOTSTRAP_POWERSHELL,
1106 'initial_bootstrap': WINDOWS_BOOTSTRAP_POWERSHELL,
1099 'bootstrap_commands': commands,
1107 'bootstrap_commands': commands,
1100 'base_image_name': base_image_name,
1108 'base_image_name': base_image_name,
1101 })
1109 })
1102
1110
1103 existing_image = find_and_reconcile_image(ec2resource, name, fingerprint)
1111 existing_image = find_and_reconcile_image(ec2resource, name, fingerprint)
1104
1112
1105 if existing_image:
1113 if existing_image:
1106 return existing_image
1114 return existing_image
1107
1115
1108 print('no suitable Windows development image found; creating one...')
1116 print('no suitable Windows development image found; creating one...')
1109
1117
1110 with create_temp_windows_ec2_instances(c, config) as instances:
1118 with create_temp_windows_ec2_instances(c, config) as instances:
1111 assert len(instances) == 1
1119 assert len(instances) == 1
1112 instance = instances[0]
1120 instance = instances[0]
1113
1121
1114 wait_for_ssm(ssmclient, [instance])
1122 wait_for_ssm(ssmclient, [instance])
1115
1123
1116 # On first boot, install various Windows updates.
1124 # On first boot, install various Windows updates.
1117 # We would ideally use PowerShell Remoting for this. However, there are
1125 # We would ideally use PowerShell Remoting for this. However, there are
1118 # trust issues that make it difficult to invoke Windows Update
1126 # trust issues that make it difficult to invoke Windows Update
1119 # remotely. So we use SSM, which has a mechanism for running Windows
1127 # remotely. So we use SSM, which has a mechanism for running Windows
1120 # Update.
1128 # Update.
1121 print('installing Windows features...')
1129 print('installing Windows features...')
1122 run_ssm_command(
1130 run_ssm_command(
1123 ssmclient,
1131 ssmclient,
1124 [instance],
1132 [instance],
1125 'AWS-RunPowerShellScript',
1133 'AWS-RunPowerShellScript',
1126 {
1134 {
1127 'commands': WINDOWS_BOOTSTRAP_POWERSHELL.split('\n'),
1135 'commands': WINDOWS_BOOTSTRAP_POWERSHELL.split('\n'),
1128 },
1136 },
1129 )
1137 )
1130
1138
1131 # Reboot so all updates are fully applied.
1139 # Reboot so all updates are fully applied.
1132 #
1140 #
1133 # We don't use instance.reboot() here because it is asynchronous and
1141 # We don't use instance.reboot() here because it is asynchronous and
1134 # we don't know when exactly the instance has rebooted. It could take
1142 # we don't know when exactly the instance has rebooted. It could take
1135 # a while to stop and we may start trying to interact with the instance
1143 # a while to stop and we may start trying to interact with the instance
1136 # before it has rebooted.
1144 # before it has rebooted.
1137 print('rebooting instance %s' % instance.id)
1145 print('rebooting instance %s' % instance.id)
1138 instance.stop()
1146 instance.stop()
1139 ec2client.get_waiter('instance_stopped').wait(
1147 ec2client.get_waiter('instance_stopped').wait(
1140 InstanceIds=[instance.id],
1148 InstanceIds=[instance.id],
1141 WaiterConfig={
1149 WaiterConfig={
1142 'Delay': 5,
1150 'Delay': 5,
1143 })
1151 })
1144
1152
1145 instance.start()
1153 instance.start()
1146 wait_for_ip_addresses([instance])
1154 wait_for_ip_addresses([instance])
1147
1155
1148 # There is a race condition here between the User Data PS script running
1156 # There is a race condition here between the User Data PS script running
1149 # and us connecting to WinRM. This can manifest as
1157 # and us connecting to WinRM. This can manifest as
1150 # "AuthorizationManager check failed" failures during run_powershell().
1158 # "AuthorizationManager check failed" failures during run_powershell().
1151 # TODO figure out a workaround.
1159 # TODO figure out a workaround.
1152
1160
1153 print('waiting for Windows Remote Management to come back...')
1161 print('waiting for Windows Remote Management to come back...')
1154 client = wait_for_winrm(instance.public_ip_address, 'Administrator',
1162 client = wait_for_winrm(instance.public_ip_address, 'Administrator',
1155 c.automation.default_password())
1163 c.automation.default_password())
1156 print('established WinRM connection to %s' % instance.id)
1164 print('established WinRM connection to %s' % instance.id)
1157 instance.winrm_client = client
1165 instance.winrm_client = client
1158
1166
1159 print('bootstrapping instance...')
1167 print('bootstrapping instance...')
1160 run_powershell(instance.winrm_client, '\n'.join(commands))
1168 run_powershell(instance.winrm_client, '\n'.join(commands))
1161
1169
1162 print('bootstrap completed; stopping %s to create image' % instance.id)
1170 print('bootstrap completed; stopping %s to create image' % instance.id)
1163 return create_ami_from_instance(ec2client, instance, name,
1171 return create_ami_from_instance(ec2client, instance, name,
1164 'Mercurial Windows development environment',
1172 'Mercurial Windows development environment',
1165 fingerprint)
1173 fingerprint)
1166
1174
1167
1175
1168 @contextlib.contextmanager
1176 @contextlib.contextmanager
1169 def temporary_windows_dev_instances(c: AWSConnection, image, instance_type,
1177 def temporary_windows_dev_instances(c: AWSConnection, image, instance_type,
1170 prefix='hg-', disable_antivirus=False):
1178 prefix='hg-', disable_antivirus=False):
1171 """Create a temporary Windows development EC2 instance.
1179 """Create a temporary Windows development EC2 instance.
1172
1180
1173 Context manager resolves to the list of ``EC2.Instance`` that were created.
1181 Context manager resolves to the list of ``EC2.Instance`` that were created.
1174 """
1182 """
1175 config = {
1183 config = {
1176 'BlockDeviceMappings': [
1184 'BlockDeviceMappings': [
1177 {
1185 {
1178 'DeviceName': '/dev/sda1',
1186 'DeviceName': '/dev/sda1',
1179 'Ebs': {
1187 'Ebs': {
1180 'DeleteOnTermination': True,
1188 'DeleteOnTermination': True,
1181 'VolumeSize': 32,
1189 'VolumeSize': 32,
1182 'VolumeType': 'gp2',
1190 'VolumeType': 'gp2',
1183 },
1191 },
1184 }
1192 }
1185 ],
1193 ],
1186 'ImageId': image.id,
1194 'ImageId': image.id,
1187 'InstanceInitiatedShutdownBehavior': 'stop',
1195 'InstanceInitiatedShutdownBehavior': 'stop',
1188 'InstanceType': instance_type,
1196 'InstanceType': instance_type,
1189 'KeyName': '%sautomation' % prefix,
1197 'KeyName': '%sautomation' % prefix,
1190 'MaxCount': 1,
1198 'MaxCount': 1,
1191 'MinCount': 1,
1199 'MinCount': 1,
1192 'SecurityGroupIds': [c.security_groups['windows-dev-1'].id],
1200 'SecurityGroupIds': [c.security_groups['windows-dev-1'].id],
1193 }
1201 }
1194
1202
1195 with create_temp_windows_ec2_instances(c, config) as instances:
1203 with create_temp_windows_ec2_instances(c, config) as instances:
1196 if disable_antivirus:
1204 if disable_antivirus:
1197 for instance in instances:
1205 for instance in instances:
1198 run_powershell(
1206 run_powershell(
1199 instance.winrm_client,
1207 instance.winrm_client,
1200 'Set-MpPreference -DisableRealtimeMonitoring $true')
1208 'Set-MpPreference -DisableRealtimeMonitoring $true')
1201
1209
1202 yield instances
1210 yield instances
@@ -1,460 +1,460 b''
1 # cli.py - Command line interface for automation
1 # cli.py - Command line interface for automation
2 #
2 #
3 # Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # no-check-code because Python 3 native.
8 # no-check-code because Python 3 native.
9
9
10 import argparse
10 import argparse
11 import concurrent.futures as futures
11 import concurrent.futures as futures
12 import os
12 import os
13 import pathlib
13 import pathlib
14 import time
14 import time
15
15
16 from . import (
16 from . import (
17 aws,
17 aws,
18 HGAutomation,
18 HGAutomation,
19 linux,
19 linux,
20 windows,
20 windows,
21 )
21 )
22
22
23
23
24 SOURCE_ROOT = pathlib.Path(os.path.abspath(__file__)).parent.parent.parent.parent
24 SOURCE_ROOT = pathlib.Path(os.path.abspath(__file__)).parent.parent.parent.parent
25 DIST_PATH = SOURCE_ROOT / 'dist'
25 DIST_PATH = SOURCE_ROOT / 'dist'
26
26
27
27
28 def bootstrap_linux_dev(hga: HGAutomation, aws_region, distros=None,
28 def bootstrap_linux_dev(hga: HGAutomation, aws_region, distros=None,
29 parallel=False):
29 parallel=False):
30 c = hga.aws_connection(aws_region)
30 c = hga.aws_connection(aws_region)
31
31
32 if distros:
32 if distros:
33 distros = distros.split(',')
33 distros = distros.split(',')
34 else:
34 else:
35 distros = sorted(linux.DISTROS)
35 distros = sorted(linux.DISTROS)
36
36
37 # TODO There is a wonky interaction involving KeyboardInterrupt whereby
37 # TODO There is a wonky interaction involving KeyboardInterrupt whereby
38 # the context manager that is supposed to terminate the temporary EC2
38 # the context manager that is supposed to terminate the temporary EC2
39 # instance doesn't run. Until we fix this, make parallel building opt-in
39 # instance doesn't run. Until we fix this, make parallel building opt-in
40 # so we don't orphan instances.
40 # so we don't orphan instances.
41 if parallel:
41 if parallel:
42 fs = []
42 fs = []
43
43
44 with futures.ThreadPoolExecutor(len(distros)) as e:
44 with futures.ThreadPoolExecutor(len(distros)) as e:
45 for distro in distros:
45 for distro in distros:
46 fs.append(e.submit(aws.ensure_linux_dev_ami, c, distro=distro))
46 fs.append(e.submit(aws.ensure_linux_dev_ami, c, distro=distro))
47
47
48 for f in fs:
48 for f in fs:
49 f.result()
49 f.result()
50 else:
50 else:
51 for distro in distros:
51 for distro in distros:
52 aws.ensure_linux_dev_ami(c, distro=distro)
52 aws.ensure_linux_dev_ami(c, distro=distro)
53
53
54
54
55 def bootstrap_windows_dev(hga: HGAutomation, aws_region, base_image_name):
55 def bootstrap_windows_dev(hga: HGAutomation, aws_region, base_image_name):
56 c = hga.aws_connection(aws_region)
56 c = hga.aws_connection(aws_region)
57 image = aws.ensure_windows_dev_ami(c, base_image_name=base_image_name)
57 image = aws.ensure_windows_dev_ami(c, base_image_name=base_image_name)
58 print('Windows development AMI available as %s' % image.id)
58 print('Windows development AMI available as %s' % image.id)
59
59
60
60
61 def build_inno(hga: HGAutomation, aws_region, arch, revision, version,
61 def build_inno(hga: HGAutomation, aws_region, arch, revision, version,
62 base_image_name):
62 base_image_name):
63 c = hga.aws_connection(aws_region)
63 c = hga.aws_connection(aws_region)
64 image = aws.ensure_windows_dev_ami(c, base_image_name=base_image_name)
64 image = aws.ensure_windows_dev_ami(c, base_image_name=base_image_name)
65 DIST_PATH.mkdir(exist_ok=True)
65 DIST_PATH.mkdir(exist_ok=True)
66
66
67 with aws.temporary_windows_dev_instances(c, image, 't3.medium') as insts:
67 with aws.temporary_windows_dev_instances(c, image, 't3.medium') as insts:
68 instance = insts[0]
68 instance = insts[0]
69
69
70 windows.synchronize_hg(SOURCE_ROOT, revision, instance)
70 windows.synchronize_hg(SOURCE_ROOT, revision, instance)
71
71
72 for a in arch:
72 for a in arch:
73 windows.build_inno_installer(instance.winrm_client, a,
73 windows.build_inno_installer(instance.winrm_client, a,
74 DIST_PATH,
74 DIST_PATH,
75 version=version)
75 version=version)
76
76
77
77
78 def build_wix(hga: HGAutomation, aws_region, arch, revision, version,
78 def build_wix(hga: HGAutomation, aws_region, arch, revision, version,
79 base_image_name):
79 base_image_name):
80 c = hga.aws_connection(aws_region)
80 c = hga.aws_connection(aws_region)
81 image = aws.ensure_windows_dev_ami(c, base_image_name=base_image_name)
81 image = aws.ensure_windows_dev_ami(c, base_image_name=base_image_name)
82 DIST_PATH.mkdir(exist_ok=True)
82 DIST_PATH.mkdir(exist_ok=True)
83
83
84 with aws.temporary_windows_dev_instances(c, image, 't3.medium') as insts:
84 with aws.temporary_windows_dev_instances(c, image, 't3.medium') as insts:
85 instance = insts[0]
85 instance = insts[0]
86
86
87 windows.synchronize_hg(SOURCE_ROOT, revision, instance)
87 windows.synchronize_hg(SOURCE_ROOT, revision, instance)
88
88
89 for a in arch:
89 for a in arch:
90 windows.build_wix_installer(instance.winrm_client, a,
90 windows.build_wix_installer(instance.winrm_client, a,
91 DIST_PATH, version=version)
91 DIST_PATH, version=version)
92
92
93
93
94 def build_windows_wheel(hga: HGAutomation, aws_region, arch, revision,
94 def build_windows_wheel(hga: HGAutomation, aws_region, arch, revision,
95 base_image_name):
95 base_image_name):
96 c = hga.aws_connection(aws_region)
96 c = hga.aws_connection(aws_region)
97 image = aws.ensure_windows_dev_ami(c, base_image_name=base_image_name)
97 image = aws.ensure_windows_dev_ami(c, base_image_name=base_image_name)
98 DIST_PATH.mkdir(exist_ok=True)
98 DIST_PATH.mkdir(exist_ok=True)
99
99
100 with aws.temporary_windows_dev_instances(c, image, 't3.medium') as insts:
100 with aws.temporary_windows_dev_instances(c, image, 't3.medium') as insts:
101 instance = insts[0]
101 instance = insts[0]
102
102
103 windows.synchronize_hg(SOURCE_ROOT, revision, instance)
103 windows.synchronize_hg(SOURCE_ROOT, revision, instance)
104
104
105 for a in arch:
105 for a in arch:
106 windows.build_wheel(instance.winrm_client, a, DIST_PATH)
106 windows.build_wheel(instance.winrm_client, a, DIST_PATH)
107
107
108
108
109 def build_all_windows_packages(hga: HGAutomation, aws_region, revision,
109 def build_all_windows_packages(hga: HGAutomation, aws_region, revision,
110 version, base_image_name):
110 version, base_image_name):
111 c = hga.aws_connection(aws_region)
111 c = hga.aws_connection(aws_region)
112 image = aws.ensure_windows_dev_ami(c, base_image_name=base_image_name)
112 image = aws.ensure_windows_dev_ami(c, base_image_name=base_image_name)
113 DIST_PATH.mkdir(exist_ok=True)
113 DIST_PATH.mkdir(exist_ok=True)
114
114
115 with aws.temporary_windows_dev_instances(c, image, 't3.medium') as insts:
115 with aws.temporary_windows_dev_instances(c, image, 't3.medium') as insts:
116 instance = insts[0]
116 instance = insts[0]
117
117
118 winrm_client = instance.winrm_client
118 winrm_client = instance.winrm_client
119
119
120 windows.synchronize_hg(SOURCE_ROOT, revision, instance)
120 windows.synchronize_hg(SOURCE_ROOT, revision, instance)
121
121
122 for arch in ('x86', 'x64'):
122 for arch in ('x86', 'x64'):
123 windows.purge_hg(winrm_client)
123 windows.purge_hg(winrm_client)
124 windows.build_wheel(winrm_client, arch, DIST_PATH)
124 windows.build_wheel(winrm_client, arch, DIST_PATH)
125 windows.purge_hg(winrm_client)
125 windows.purge_hg(winrm_client)
126 windows.build_inno_installer(winrm_client, arch, DIST_PATH,
126 windows.build_inno_installer(winrm_client, arch, DIST_PATH,
127 version=version)
127 version=version)
128 windows.purge_hg(winrm_client)
128 windows.purge_hg(winrm_client)
129 windows.build_wix_installer(winrm_client, arch, DIST_PATH,
129 windows.build_wix_installer(winrm_client, arch, DIST_PATH,
130 version=version)
130 version=version)
131
131
132
132
133 def terminate_ec2_instances(hga: HGAutomation, aws_region):
133 def terminate_ec2_instances(hga: HGAutomation, aws_region):
134 c = hga.aws_connection(aws_region, ensure_ec2_state=False)
134 c = hga.aws_connection(aws_region, ensure_ec2_state=False)
135 aws.terminate_ec2_instances(c.ec2resource)
135 aws.terminate_ec2_instances(c.ec2resource)
136
136
137
137
138 def purge_ec2_resources(hga: HGAutomation, aws_region):
138 def purge_ec2_resources(hga: HGAutomation, aws_region):
139 c = hga.aws_connection(aws_region, ensure_ec2_state=False)
139 c = hga.aws_connection(aws_region, ensure_ec2_state=False)
140 aws.remove_resources(c)
140 aws.remove_resources(c)
141
141
142
142
143 def run_tests_linux(hga: HGAutomation, aws_region, instance_type,
143 def run_tests_linux(hga: HGAutomation, aws_region, instance_type,
144 python_version, test_flags, distro, filesystem):
144 python_version, test_flags, distro, filesystem):
145 c = hga.aws_connection(aws_region)
145 c = hga.aws_connection(aws_region)
146 image = aws.ensure_linux_dev_ami(c, distro=distro)
146 image = aws.ensure_linux_dev_ami(c, distro=distro)
147
147
148 t_start = time.time()
148 t_start = time.time()
149
149
150 ensure_extra_volume = filesystem not in ('default', 'tmpfs')
150 ensure_extra_volume = filesystem not in ('default', 'tmpfs')
151
151
152 with aws.temporary_linux_dev_instances(
152 with aws.temporary_linux_dev_instances(
153 c, image, instance_type,
153 c, image, instance_type,
154 ensure_extra_volume=ensure_extra_volume) as insts:
154 ensure_extra_volume=ensure_extra_volume) as insts:
155
155
156 instance = insts[0]
156 instance = insts[0]
157
157
158 linux.prepare_exec_environment(instance.ssh_client,
158 linux.prepare_exec_environment(instance.ssh_client,
159 filesystem=filesystem)
159 filesystem=filesystem)
160 linux.synchronize_hg(SOURCE_ROOT, instance, '.')
160 linux.synchronize_hg(SOURCE_ROOT, instance, '.')
161 t_prepared = time.time()
161 t_prepared = time.time()
162 linux.run_tests(instance.ssh_client, python_version,
162 linux.run_tests(instance.ssh_client, python_version,
163 test_flags)
163 test_flags)
164 t_done = time.time()
164 t_done = time.time()
165
165
166 t_setup = t_prepared - t_start
166 t_setup = t_prepared - t_start
167 t_all = t_done - t_start
167 t_all = t_done - t_start
168
168
169 print(
169 print(
170 'total time: %.1fs; setup: %.1fs; tests: %.1fs; setup overhead: %.1f%%'
170 'total time: %.1fs; setup: %.1fs; tests: %.1fs; setup overhead: %.1f%%'
171 % (t_all, t_setup, t_done - t_prepared, t_setup / t_all * 100.0))
171 % (t_all, t_setup, t_done - t_prepared, t_setup / t_all * 100.0))
172
172
173
173
174 def run_tests_windows(hga: HGAutomation, aws_region, instance_type,
174 def run_tests_windows(hga: HGAutomation, aws_region, instance_type,
175 python_version, arch, test_flags, base_image_name):
175 python_version, arch, test_flags, base_image_name):
176 c = hga.aws_connection(aws_region)
176 c = hga.aws_connection(aws_region)
177 image = aws.ensure_windows_dev_ami(c, base_image_name=base_image_name)
177 image = aws.ensure_windows_dev_ami(c, base_image_name=base_image_name)
178
178
179 with aws.temporary_windows_dev_instances(c, image, instance_type,
179 with aws.temporary_windows_dev_instances(c, image, instance_type,
180 disable_antivirus=True) as insts:
180 disable_antivirus=True) as insts:
181 instance = insts[0]
181 instance = insts[0]
182
182
183 windows.synchronize_hg(SOURCE_ROOT, '.', instance)
183 windows.synchronize_hg(SOURCE_ROOT, '.', instance)
184 windows.run_tests(instance.winrm_client, python_version, arch,
184 windows.run_tests(instance.winrm_client, python_version, arch,
185 test_flags)
185 test_flags)
186
186
187
187
188 def publish_windows_artifacts(hg: HGAutomation, aws_region, version: str,
188 def publish_windows_artifacts(hg: HGAutomation, aws_region, version: str,
189 pypi: bool, mercurial_scm_org: bool,
189 pypi: bool, mercurial_scm_org: bool,
190 ssh_username: str):
190 ssh_username: str):
191 windows.publish_artifacts(DIST_PATH, version,
191 windows.publish_artifacts(DIST_PATH, version,
192 pypi=pypi, mercurial_scm_org=mercurial_scm_org,
192 pypi=pypi, mercurial_scm_org=mercurial_scm_org,
193 ssh_username=ssh_username)
193 ssh_username=ssh_username)
194
194
195
195
196 def get_parser():
196 def get_parser():
197 parser = argparse.ArgumentParser()
197 parser = argparse.ArgumentParser()
198
198
199 parser.add_argument(
199 parser.add_argument(
200 '--state-path',
200 '--state-path',
201 default='~/.hgautomation',
201 default='~/.hgautomation',
202 help='Path for local state files',
202 help='Path for local state files',
203 )
203 )
204 parser.add_argument(
204 parser.add_argument(
205 '--aws-region',
205 '--aws-region',
206 help='AWS region to use',
206 help='AWS region to use',
207 default='us-west-1',
207 default='us-west-1',
208 )
208 )
209
209
210 subparsers = parser.add_subparsers()
210 subparsers = parser.add_subparsers()
211
211
212 sp = subparsers.add_parser(
212 sp = subparsers.add_parser(
213 'bootstrap-linux-dev',
213 'bootstrap-linux-dev',
214 help='Bootstrap Linux development environments',
214 help='Bootstrap Linux development environments',
215 )
215 )
216 sp.add_argument(
216 sp.add_argument(
217 '--distros',
217 '--distros',
218 help='Comma delimited list of distros to bootstrap',
218 help='Comma delimited list of distros to bootstrap',
219 )
219 )
220 sp.add_argument(
220 sp.add_argument(
221 '--parallel',
221 '--parallel',
222 action='store_true',
222 action='store_true',
223 help='Generate AMIs in parallel (not CTRL-c safe)'
223 help='Generate AMIs in parallel (not CTRL-c safe)'
224 )
224 )
225 sp.set_defaults(func=bootstrap_linux_dev)
225 sp.set_defaults(func=bootstrap_linux_dev)
226
226
227 sp = subparsers.add_parser(
227 sp = subparsers.add_parser(
228 'bootstrap-windows-dev',
228 'bootstrap-windows-dev',
229 help='Bootstrap the Windows development environment',
229 help='Bootstrap the Windows development environment',
230 )
230 )
231 sp.add_argument(
231 sp.add_argument(
232 '--base-image-name',
232 '--base-image-name',
233 help='AMI name of base image',
233 help='AMI name of base image',
234 default=aws.WINDOWS_BASE_IMAGE_NAME,
234 default=aws.WINDOWS_BASE_IMAGE_NAME,
235 )
235 )
236 sp.set_defaults(func=bootstrap_windows_dev)
236 sp.set_defaults(func=bootstrap_windows_dev)
237
237
238 sp = subparsers.add_parser(
238 sp = subparsers.add_parser(
239 'build-all-windows-packages',
239 'build-all-windows-packages',
240 help='Build all Windows packages',
240 help='Build all Windows packages',
241 )
241 )
242 sp.add_argument(
242 sp.add_argument(
243 '--revision',
243 '--revision',
244 help='Mercurial revision to build',
244 help='Mercurial revision to build',
245 default='.',
245 default='.',
246 )
246 )
247 sp.add_argument(
247 sp.add_argument(
248 '--version',
248 '--version',
249 help='Mercurial version string to use',
249 help='Mercurial version string to use',
250 )
250 )
251 sp.add_argument(
251 sp.add_argument(
252 '--base-image-name',
252 '--base-image-name',
253 help='AMI name of base image',
253 help='AMI name of base image',
254 default=aws.WINDOWS_BASE_IMAGE_NAME,
254 default=aws.WINDOWS_BASE_IMAGE_NAME,
255 )
255 )
256 sp.set_defaults(func=build_all_windows_packages)
256 sp.set_defaults(func=build_all_windows_packages)
257
257
258 sp = subparsers.add_parser(
258 sp = subparsers.add_parser(
259 'build-inno',
259 'build-inno',
260 help='Build Inno Setup installer(s)',
260 help='Build Inno Setup installer(s)',
261 )
261 )
262 sp.add_argument(
262 sp.add_argument(
263 '--arch',
263 '--arch',
264 help='Architecture to build for',
264 help='Architecture to build for',
265 choices={'x86', 'x64'},
265 choices={'x86', 'x64'},
266 nargs='*',
266 nargs='*',
267 default=['x64'],
267 default=['x64'],
268 )
268 )
269 sp.add_argument(
269 sp.add_argument(
270 '--revision',
270 '--revision',
271 help='Mercurial revision to build',
271 help='Mercurial revision to build',
272 default='.',
272 default='.',
273 )
273 )
274 sp.add_argument(
274 sp.add_argument(
275 '--version',
275 '--version',
276 help='Mercurial version string to use in installer',
276 help='Mercurial version string to use in installer',
277 )
277 )
278 sp.add_argument(
278 sp.add_argument(
279 '--base-image-name',
279 '--base-image-name',
280 help='AMI name of base image',
280 help='AMI name of base image',
281 default=aws.WINDOWS_BASE_IMAGE_NAME,
281 default=aws.WINDOWS_BASE_IMAGE_NAME,
282 )
282 )
283 sp.set_defaults(func=build_inno)
283 sp.set_defaults(func=build_inno)
284
284
285 sp = subparsers.add_parser(
285 sp = subparsers.add_parser(
286 'build-windows-wheel',
286 'build-windows-wheel',
287 help='Build Windows wheel(s)',
287 help='Build Windows wheel(s)',
288 )
288 )
289 sp.add_argument(
289 sp.add_argument(
290 '--arch',
290 '--arch',
291 help='Architecture to build for',
291 help='Architecture to build for',
292 choices={'x86', 'x64'},
292 choices={'x86', 'x64'},
293 nargs='*',
293 nargs='*',
294 default=['x64'],
294 default=['x64'],
295 )
295 )
296 sp.add_argument(
296 sp.add_argument(
297 '--revision',
297 '--revision',
298 help='Mercurial revision to build',
298 help='Mercurial revision to build',
299 default='.',
299 default='.',
300 )
300 )
301 sp.add_argument(
301 sp.add_argument(
302 '--base-image-name',
302 '--base-image-name',
303 help='AMI name of base image',
303 help='AMI name of base image',
304 default=aws.WINDOWS_BASE_IMAGE_NAME,
304 default=aws.WINDOWS_BASE_IMAGE_NAME,
305 )
305 )
306 sp.set_defaults(func=build_windows_wheel)
306 sp.set_defaults(func=build_windows_wheel)
307
307
308 sp = subparsers.add_parser(
308 sp = subparsers.add_parser(
309 'build-wix',
309 'build-wix',
310 help='Build WiX installer(s)'
310 help='Build WiX installer(s)'
311 )
311 )
312 sp.add_argument(
312 sp.add_argument(
313 '--arch',
313 '--arch',
314 help='Architecture to build for',
314 help='Architecture to build for',
315 choices={'x86', 'x64'},
315 choices={'x86', 'x64'},
316 nargs='*',
316 nargs='*',
317 default=['x64'],
317 default=['x64'],
318 )
318 )
319 sp.add_argument(
319 sp.add_argument(
320 '--revision',
320 '--revision',
321 help='Mercurial revision to build',
321 help='Mercurial revision to build',
322 default='.',
322 default='.',
323 )
323 )
324 sp.add_argument(
324 sp.add_argument(
325 '--version',
325 '--version',
326 help='Mercurial version string to use in installer',
326 help='Mercurial version string to use in installer',
327 )
327 )
328 sp.add_argument(
328 sp.add_argument(
329 '--base-image-name',
329 '--base-image-name',
330 help='AMI name of base image',
330 help='AMI name of base image',
331 default=aws.WINDOWS_BASE_IMAGE_NAME,
331 default=aws.WINDOWS_BASE_IMAGE_NAME,
332 )
332 )
333 sp.set_defaults(func=build_wix)
333 sp.set_defaults(func=build_wix)
334
334
335 sp = subparsers.add_parser(
335 sp = subparsers.add_parser(
336 'terminate-ec2-instances',
336 'terminate-ec2-instances',
337 help='Terminate all active EC2 instances managed by us',
337 help='Terminate all active EC2 instances managed by us',
338 )
338 )
339 sp.set_defaults(func=terminate_ec2_instances)
339 sp.set_defaults(func=terminate_ec2_instances)
340
340
341 sp = subparsers.add_parser(
341 sp = subparsers.add_parser(
342 'purge-ec2-resources',
342 'purge-ec2-resources',
343 help='Purge all EC2 resources managed by us',
343 help='Purge all EC2 resources managed by us',
344 )
344 )
345 sp.set_defaults(func=purge_ec2_resources)
345 sp.set_defaults(func=purge_ec2_resources)
346
346
347 sp = subparsers.add_parser(
347 sp = subparsers.add_parser(
348 'run-tests-linux',
348 'run-tests-linux',
349 help='Run tests on Linux',
349 help='Run tests on Linux',
350 )
350 )
351 sp.add_argument(
351 sp.add_argument(
352 '--distro',
352 '--distro',
353 help='Linux distribution to run tests on',
353 help='Linux distribution to run tests on',
354 choices=linux.DISTROS,
354 choices=linux.DISTROS,
355 default='debian9',
355 default='debian10',
356 )
356 )
357 sp.add_argument(
357 sp.add_argument(
358 '--filesystem',
358 '--filesystem',
359 help='Filesystem type to use',
359 help='Filesystem type to use',
360 choices={'btrfs', 'default', 'ext3', 'ext4', 'jfs', 'tmpfs', 'xfs'},
360 choices={'btrfs', 'default', 'ext3', 'ext4', 'jfs', 'tmpfs', 'xfs'},
361 default='default',
361 default='default',
362 )
362 )
363 sp.add_argument(
363 sp.add_argument(
364 '--instance-type',
364 '--instance-type',
365 help='EC2 instance type to use',
365 help='EC2 instance type to use',
366 default='c5.9xlarge',
366 default='c5.9xlarge',
367 )
367 )
368 sp.add_argument(
368 sp.add_argument(
369 '--python-version',
369 '--python-version',
370 help='Python version to use',
370 help='Python version to use',
371 choices={'system2', 'system3', '2.7', '3.5', '3.6', '3.7', '3.8',
371 choices={'system2', 'system3', '2.7', '3.5', '3.6', '3.7', '3.8',
372 'pypy', 'pypy3.5', 'pypy3.6'},
372 'pypy', 'pypy3.5', 'pypy3.6'},
373 default='system2',
373 default='system2',
374 )
374 )
375 sp.add_argument(
375 sp.add_argument(
376 'test_flags',
376 'test_flags',
377 help='Extra command line flags to pass to run-tests.py',
377 help='Extra command line flags to pass to run-tests.py',
378 nargs='*',
378 nargs='*',
379 )
379 )
380 sp.set_defaults(func=run_tests_linux)
380 sp.set_defaults(func=run_tests_linux)
381
381
382 sp = subparsers.add_parser(
382 sp = subparsers.add_parser(
383 'run-tests-windows',
383 'run-tests-windows',
384 help='Run tests on Windows',
384 help='Run tests on Windows',
385 )
385 )
386 sp.add_argument(
386 sp.add_argument(
387 '--instance-type',
387 '--instance-type',
388 help='EC2 instance type to use',
388 help='EC2 instance type to use',
389 default='t3.medium',
389 default='t3.medium',
390 )
390 )
391 sp.add_argument(
391 sp.add_argument(
392 '--python-version',
392 '--python-version',
393 help='Python version to use',
393 help='Python version to use',
394 choices={'2.7', '3.5', '3.6', '3.7', '3.8'},
394 choices={'2.7', '3.5', '3.6', '3.7', '3.8'},
395 default='2.7',
395 default='2.7',
396 )
396 )
397 sp.add_argument(
397 sp.add_argument(
398 '--arch',
398 '--arch',
399 help='Architecture to test',
399 help='Architecture to test',
400 choices={'x86', 'x64'},
400 choices={'x86', 'x64'},
401 default='x64',
401 default='x64',
402 )
402 )
403 sp.add_argument(
403 sp.add_argument(
404 '--test-flags',
404 '--test-flags',
405 help='Extra command line flags to pass to run-tests.py',
405 help='Extra command line flags to pass to run-tests.py',
406 )
406 )
407 sp.add_argument(
407 sp.add_argument(
408 '--base-image-name',
408 '--base-image-name',
409 help='AMI name of base image',
409 help='AMI name of base image',
410 default=aws.WINDOWS_BASE_IMAGE_NAME,
410 default=aws.WINDOWS_BASE_IMAGE_NAME,
411 )
411 )
412 sp.set_defaults(func=run_tests_windows)
412 sp.set_defaults(func=run_tests_windows)
413
413
414 sp = subparsers.add_parser(
414 sp = subparsers.add_parser(
415 'publish-windows-artifacts',
415 'publish-windows-artifacts',
416 help='Publish built Windows artifacts (wheels, installers, etc)'
416 help='Publish built Windows artifacts (wheels, installers, etc)'
417 )
417 )
418 sp.add_argument(
418 sp.add_argument(
419 '--no-pypi',
419 '--no-pypi',
420 dest='pypi',
420 dest='pypi',
421 action='store_false',
421 action='store_false',
422 default=True,
422 default=True,
423 help='Skip uploading to PyPI',
423 help='Skip uploading to PyPI',
424 )
424 )
425 sp.add_argument(
425 sp.add_argument(
426 '--no-mercurial-scm-org',
426 '--no-mercurial-scm-org',
427 dest='mercurial_scm_org',
427 dest='mercurial_scm_org',
428 action='store_false',
428 action='store_false',
429 default=True,
429 default=True,
430 help='Skip uploading to www.mercurial-scm.org',
430 help='Skip uploading to www.mercurial-scm.org',
431 )
431 )
432 sp.add_argument(
432 sp.add_argument(
433 '--ssh-username',
433 '--ssh-username',
434 help='SSH username for mercurial-scm.org',
434 help='SSH username for mercurial-scm.org',
435 )
435 )
436 sp.add_argument(
436 sp.add_argument(
437 'version',
437 'version',
438 help='Mercurial version string to locate local packages',
438 help='Mercurial version string to locate local packages',
439 )
439 )
440 sp.set_defaults(func=publish_windows_artifacts)
440 sp.set_defaults(func=publish_windows_artifacts)
441
441
442 return parser
442 return parser
443
443
444
444
445 def main():
445 def main():
446 parser = get_parser()
446 parser = get_parser()
447 args = parser.parse_args()
447 args = parser.parse_args()
448
448
449 local_state_path = pathlib.Path(os.path.expanduser(args.state_path))
449 local_state_path = pathlib.Path(os.path.expanduser(args.state_path))
450 automation = HGAutomation(local_state_path)
450 automation = HGAutomation(local_state_path)
451
451
452 if not hasattr(args, 'func'):
452 if not hasattr(args, 'func'):
453 parser.print_help()
453 parser.print_help()
454 return
454 return
455
455
456 kwargs = dict(vars(args))
456 kwargs = dict(vars(args))
457 del kwargs['func']
457 del kwargs['func']
458 del kwargs['state_path']
458 del kwargs['state_path']
459
459
460 args.func(automation, **kwargs)
460 args.func(automation, **kwargs)
@@ -1,562 +1,567 b''
1 # linux.py - Linux specific automation functionality
1 # linux.py - Linux specific automation functionality
2 #
2 #
3 # Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # no-check-code because Python 3 native.
8 # no-check-code because Python 3 native.
9
9
10 import os
10 import os
11 import pathlib
11 import pathlib
12 import shlex
12 import shlex
13 import subprocess
13 import subprocess
14 import tempfile
14 import tempfile
15
15
16 from .ssh import (
16 from .ssh import (
17 exec_command,
17 exec_command,
18 )
18 )
19
19
20
20
21 # Linux distributions that are supported.
21 # Linux distributions that are supported.
22 DISTROS = {
22 DISTROS = {
23 'debian9',
23 'debian9',
24 'debian10',
24 'ubuntu18.04',
25 'ubuntu18.04',
25 'ubuntu19.04',
26 'ubuntu19.04',
26 }
27 }
27
28
28 INSTALL_PYTHONS = r'''
29 INSTALL_PYTHONS = r'''
29 PYENV2_VERSIONS="2.7.16 pypy2.7-7.1.1"
30 PYENV2_VERSIONS="2.7.16 pypy2.7-7.1.1"
30 PYENV3_VERSIONS="3.5.7 3.6.9 3.7.4 3.8-dev pypy3.5-7.0.0 pypy3.6-7.1.1"
31 PYENV3_VERSIONS="3.5.7 3.6.9 3.7.4 3.8-dev pypy3.5-7.0.0 pypy3.6-7.1.1"
31
32
32 git clone https://github.com/pyenv/pyenv.git /hgdev/pyenv
33 git clone https://github.com/pyenv/pyenv.git /hgdev/pyenv
33 pushd /hgdev/pyenv
34 pushd /hgdev/pyenv
34 git checkout 17f44b7cd6f58ea2fa68ec0371fb9e7a826b8be2
35 git checkout 17f44b7cd6f58ea2fa68ec0371fb9e7a826b8be2
35 popd
36 popd
36
37
37 export PYENV_ROOT="/hgdev/pyenv"
38 export PYENV_ROOT="/hgdev/pyenv"
38 export PATH="$PYENV_ROOT/bin:$PATH"
39 export PATH="$PYENV_ROOT/bin:$PATH"
39
40
40 # pip 19.2.3.
41 # pip 19.2.3.
41 PIP_SHA256=57e3643ff19f018f8a00dfaa6b7e4620e3c1a7a2171fd218425366ec006b3bfe
42 PIP_SHA256=57e3643ff19f018f8a00dfaa6b7e4620e3c1a7a2171fd218425366ec006b3bfe
42 wget -O get-pip.py --progress dot:mega https://github.com/pypa/get-pip/raw/309a56c5fd94bd1134053a541cb4657a4e47e09d/get-pip.py
43 wget -O get-pip.py --progress dot:mega https://github.com/pypa/get-pip/raw/309a56c5fd94bd1134053a541cb4657a4e47e09d/get-pip.py
43 echo "${PIP_SHA256} get-pip.py" | sha256sum --check -
44 echo "${PIP_SHA256} get-pip.py" | sha256sum --check -
44
45
45 VIRTUALENV_SHA256=f78d81b62d3147396ac33fc9d77579ddc42cc2a98dd9ea38886f616b33bc7fb2
46 VIRTUALENV_SHA256=f78d81b62d3147396ac33fc9d77579ddc42cc2a98dd9ea38886f616b33bc7fb2
46 VIRTUALENV_TARBALL=virtualenv-16.7.5.tar.gz
47 VIRTUALENV_TARBALL=virtualenv-16.7.5.tar.gz
47 wget -O ${VIRTUALENV_TARBALL} --progress dot:mega https://files.pythonhosted.org/packages/66/f0/6867af06d2e2f511e4e1d7094ff663acdebc4f15d4a0cb0fed1007395124/${VIRTUALENV_TARBALL}
48 wget -O ${VIRTUALENV_TARBALL} --progress dot:mega https://files.pythonhosted.org/packages/66/f0/6867af06d2e2f511e4e1d7094ff663acdebc4f15d4a0cb0fed1007395124/${VIRTUALENV_TARBALL}
48 echo "${VIRTUALENV_SHA256} ${VIRTUALENV_TARBALL}" | sha256sum --check -
49 echo "${VIRTUALENV_SHA256} ${VIRTUALENV_TARBALL}" | sha256sum --check -
49
50
50 for v in ${PYENV2_VERSIONS}; do
51 for v in ${PYENV2_VERSIONS}; do
51 pyenv install -v ${v}
52 pyenv install -v ${v}
52 ${PYENV_ROOT}/versions/${v}/bin/python get-pip.py
53 ${PYENV_ROOT}/versions/${v}/bin/python get-pip.py
53 ${PYENV_ROOT}/versions/${v}/bin/pip install ${VIRTUALENV_TARBALL}
54 ${PYENV_ROOT}/versions/${v}/bin/pip install ${VIRTUALENV_TARBALL}
54 ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/requirements-py2.txt
55 ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/requirements-py2.txt
55 done
56 done
56
57
57 for v in ${PYENV3_VERSIONS}; do
58 for v in ${PYENV3_VERSIONS}; do
58 pyenv install -v ${v}
59 pyenv install -v ${v}
59 ${PYENV_ROOT}/versions/${v}/bin/python get-pip.py
60 ${PYENV_ROOT}/versions/${v}/bin/python get-pip.py
60 ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/requirements-py3.txt
61 ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/requirements-py3.txt
61 done
62 done
62
63
63 pyenv global ${PYENV2_VERSIONS} ${PYENV3_VERSIONS} system
64 pyenv global ${PYENV2_VERSIONS} ${PYENV3_VERSIONS} system
64 '''.lstrip().replace('\r\n', '\n')
65 '''.lstrip().replace('\r\n', '\n')
65
66
66
67
67 INSTALL_RUST = r'''
68 INSTALL_RUST = r'''
68 RUSTUP_INIT_SHA256=a46fe67199b7bcbbde2dcbc23ae08db6f29883e260e23899a88b9073effc9076
69 RUSTUP_INIT_SHA256=a46fe67199b7bcbbde2dcbc23ae08db6f29883e260e23899a88b9073effc9076
69 wget -O rustup-init --progress dot:mega https://static.rust-lang.org/rustup/archive/1.18.3/x86_64-unknown-linux-gnu/rustup-init
70 wget -O rustup-init --progress dot:mega https://static.rust-lang.org/rustup/archive/1.18.3/x86_64-unknown-linux-gnu/rustup-init
70 echo "${RUSTUP_INIT_SHA256} rustup-init" | sha256sum --check -
71 echo "${RUSTUP_INIT_SHA256} rustup-init" | sha256sum --check -
71
72
72 chmod +x rustup-init
73 chmod +x rustup-init
73 sudo -H -u hg -g hg ./rustup-init -y
74 sudo -H -u hg -g hg ./rustup-init -y
74 sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup install 1.31.1 1.34.2
75 sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup install 1.31.1 1.34.2
75 sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup component add clippy
76 sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup component add clippy
76 '''
77 '''
77
78
78
79
79 BOOTSTRAP_VIRTUALENV = r'''
80 BOOTSTRAP_VIRTUALENV = r'''
80 /usr/bin/virtualenv /hgdev/venv-bootstrap
81 /usr/bin/virtualenv /hgdev/venv-bootstrap
81
82
82 HG_SHA256=35fc8ba5e0379c1b3affa2757e83fb0509e8ac314cbd9f1fd133cf265d16e49f
83 HG_SHA256=35fc8ba5e0379c1b3affa2757e83fb0509e8ac314cbd9f1fd133cf265d16e49f
83 HG_TARBALL=mercurial-5.1.1.tar.gz
84 HG_TARBALL=mercurial-5.1.1.tar.gz
84
85
85 wget -O ${HG_TARBALL} --progress dot:mega https://www.mercurial-scm.org/release/${HG_TARBALL}
86 wget -O ${HG_TARBALL} --progress dot:mega https://www.mercurial-scm.org/release/${HG_TARBALL}
86 echo "${HG_SHA256} ${HG_TARBALL}" | sha256sum --check -
87 echo "${HG_SHA256} ${HG_TARBALL}" | sha256sum --check -
87
88
88 /hgdev/venv-bootstrap/bin/pip install ${HG_TARBALL}
89 /hgdev/venv-bootstrap/bin/pip install ${HG_TARBALL}
89 '''.lstrip().replace('\r\n', '\n')
90 '''.lstrip().replace('\r\n', '\n')
90
91
91
92
92 BOOTSTRAP_DEBIAN = r'''
93 BOOTSTRAP_DEBIAN = r'''
93 #!/bin/bash
94 #!/bin/bash
94
95
95 set -ex
96 set -ex
96
97
97 DISTRO=`grep DISTRIB_ID /etc/lsb-release | awk -F= '{{print $2}}'`
98 DISTRO=`grep DISTRIB_ID /etc/lsb-release | awk -F= '{{print $2}}'`
98 DEBIAN_VERSION=`cat /etc/debian_version`
99 DEBIAN_VERSION=`cat /etc/debian_version`
99 LSB_RELEASE=`lsb_release -cs`
100 LSB_RELEASE=`lsb_release -cs`
100
101
101 sudo /usr/sbin/groupadd hg
102 sudo /usr/sbin/groupadd hg
102 sudo /usr/sbin/groupadd docker
103 sudo /usr/sbin/groupadd docker
103 sudo /usr/sbin/useradd -g hg -G sudo,docker -d /home/hg -m -s /bin/bash hg
104 sudo /usr/sbin/useradd -g hg -G sudo,docker -d /home/hg -m -s /bin/bash hg
104 sudo mkdir /home/hg/.ssh
105 sudo mkdir /home/hg/.ssh
105 sudo cp ~/.ssh/authorized_keys /home/hg/.ssh/authorized_keys
106 sudo cp ~/.ssh/authorized_keys /home/hg/.ssh/authorized_keys
106 sudo chown -R hg:hg /home/hg/.ssh
107 sudo chown -R hg:hg /home/hg/.ssh
107 sudo chmod 700 /home/hg/.ssh
108 sudo chmod 700 /home/hg/.ssh
108 sudo chmod 600 /home/hg/.ssh/authorized_keys
109 sudo chmod 600 /home/hg/.ssh/authorized_keys
109
110
110 cat << EOF | sudo tee /etc/sudoers.d/90-hg
111 cat << EOF | sudo tee /etc/sudoers.d/90-hg
111 hg ALL=(ALL) NOPASSWD:ALL
112 hg ALL=(ALL) NOPASSWD:ALL
112 EOF
113 EOF
113
114
114 sudo apt-get update
115 sudo apt-get update
115 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq dist-upgrade
116 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq dist-upgrade
116
117
117 # Install packages necessary to set up Docker Apt repo.
118 # Install packages necessary to set up Docker Apt repo.
118 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install --no-install-recommends \
119 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install --no-install-recommends \
119 apt-transport-https \
120 apt-transport-https \
120 gnupg
121 gnupg
121
122
122 cat > docker-apt-key << EOF
123 cat > docker-apt-key << EOF
123 -----BEGIN PGP PUBLIC KEY BLOCK-----
124 -----BEGIN PGP PUBLIC KEY BLOCK-----
124
125
125 mQINBFit2ioBEADhWpZ8/wvZ6hUTiXOwQHXMAlaFHcPH9hAtr4F1y2+OYdbtMuth
126 mQINBFit2ioBEADhWpZ8/wvZ6hUTiXOwQHXMAlaFHcPH9hAtr4F1y2+OYdbtMuth
126 lqqwp028AqyY+PRfVMtSYMbjuQuu5byyKR01BbqYhuS3jtqQmljZ/bJvXqnmiVXh
127 lqqwp028AqyY+PRfVMtSYMbjuQuu5byyKR01BbqYhuS3jtqQmljZ/bJvXqnmiVXh
127 38UuLa+z077PxyxQhu5BbqntTPQMfiyqEiU+BKbq2WmANUKQf+1AmZY/IruOXbnq
128 38UuLa+z077PxyxQhu5BbqntTPQMfiyqEiU+BKbq2WmANUKQf+1AmZY/IruOXbnq
128 L4C1+gJ8vfmXQt99npCaxEjaNRVYfOS8QcixNzHUYnb6emjlANyEVlZzeqo7XKl7
129 L4C1+gJ8vfmXQt99npCaxEjaNRVYfOS8QcixNzHUYnb6emjlANyEVlZzeqo7XKl7
129 UrwV5inawTSzWNvtjEjj4nJL8NsLwscpLPQUhTQ+7BbQXAwAmeHCUTQIvvWXqw0N
130 UrwV5inawTSzWNvtjEjj4nJL8NsLwscpLPQUhTQ+7BbQXAwAmeHCUTQIvvWXqw0N
130 cmhh4HgeQscQHYgOJjjDVfoY5MucvglbIgCqfzAHW9jxmRL4qbMZj+b1XoePEtht
131 cmhh4HgeQscQHYgOJjjDVfoY5MucvglbIgCqfzAHW9jxmRL4qbMZj+b1XoePEtht
131 ku4bIQN1X5P07fNWzlgaRL5Z4POXDDZTlIQ/El58j9kp4bnWRCJW0lya+f8ocodo
132 ku4bIQN1X5P07fNWzlgaRL5Z4POXDDZTlIQ/El58j9kp4bnWRCJW0lya+f8ocodo
132 vZZ+Doi+fy4D5ZGrL4XEcIQP/Lv5uFyf+kQtl/94VFYVJOleAv8W92KdgDkhTcTD
133 vZZ+Doi+fy4D5ZGrL4XEcIQP/Lv5uFyf+kQtl/94VFYVJOleAv8W92KdgDkhTcTD
133 G7c0tIkVEKNUq48b3aQ64NOZQW7fVjfoKwEZdOqPE72Pa45jrZzvUFxSpdiNk2tZ
134 G7c0tIkVEKNUq48b3aQ64NOZQW7fVjfoKwEZdOqPE72Pa45jrZzvUFxSpdiNk2tZ
134 XYukHjlxxEgBdC/J3cMMNRE1F4NCA3ApfV1Y7/hTeOnmDuDYwr9/obA8t016Yljj
135 XYukHjlxxEgBdC/J3cMMNRE1F4NCA3ApfV1Y7/hTeOnmDuDYwr9/obA8t016Yljj
135 q5rdkywPf4JF8mXUW5eCN1vAFHxeg9ZWemhBtQmGxXnw9M+z6hWwc6ahmwARAQAB
136 q5rdkywPf4JF8mXUW5eCN1vAFHxeg9ZWemhBtQmGxXnw9M+z6hWwc6ahmwARAQAB
136 tCtEb2NrZXIgUmVsZWFzZSAoQ0UgZGViKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3
137 tCtEb2NrZXIgUmVsZWFzZSAoQ0UgZGViKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3
137 BBMBCgAhBQJYrefAAhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEI2BgDwO
138 BBMBCgAhBQJYrefAAhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEI2BgDwO
138 v82IsskP/iQZo68flDQmNvn8X5XTd6RRaUH33kXYXquT6NkHJciS7E2gTJmqvMqd
139 v82IsskP/iQZo68flDQmNvn8X5XTd6RRaUH33kXYXquT6NkHJciS7E2gTJmqvMqd
139 tI4mNYHCSEYxI5qrcYV5YqX9P6+Ko+vozo4nseUQLPH/ATQ4qL0Zok+1jkag3Lgk
140 tI4mNYHCSEYxI5qrcYV5YqX9P6+Ko+vozo4nseUQLPH/ATQ4qL0Zok+1jkag3Lgk
140 jonyUf9bwtWxFp05HC3GMHPhhcUSexCxQLQvnFWXD2sWLKivHp2fT8QbRGeZ+d3m
141 jonyUf9bwtWxFp05HC3GMHPhhcUSexCxQLQvnFWXD2sWLKivHp2fT8QbRGeZ+d3m
141 6fqcd5Fu7pxsqm0EUDK5NL+nPIgYhN+auTrhgzhK1CShfGccM/wfRlei9Utz6p9P
142 6fqcd5Fu7pxsqm0EUDK5NL+nPIgYhN+auTrhgzhK1CShfGccM/wfRlei9Utz6p9P
142 XRKIlWnXtT4qNGZNTN0tR+NLG/6Bqd8OYBaFAUcue/w1VW6JQ2VGYZHnZu9S8LMc
143 XRKIlWnXtT4qNGZNTN0tR+NLG/6Bqd8OYBaFAUcue/w1VW6JQ2VGYZHnZu9S8LMc
143 FYBa5Ig9PxwGQOgq6RDKDbV+PqTQT5EFMeR1mrjckk4DQJjbxeMZbiNMG5kGECA8
144 FYBa5Ig9PxwGQOgq6RDKDbV+PqTQT5EFMeR1mrjckk4DQJjbxeMZbiNMG5kGECA8
144 g383P3elhn03WGbEEa4MNc3Z4+7c236QI3xWJfNPdUbXRaAwhy/6rTSFbzwKB0Jm
145 g383P3elhn03WGbEEa4MNc3Z4+7c236QI3xWJfNPdUbXRaAwhy/6rTSFbzwKB0Jm
145 ebwzQfwjQY6f55MiI/RqDCyuPj3r3jyVRkK86pQKBAJwFHyqj9KaKXMZjfVnowLh
146 ebwzQfwjQY6f55MiI/RqDCyuPj3r3jyVRkK86pQKBAJwFHyqj9KaKXMZjfVnowLh
146 9svIGfNbGHpucATqREvUHuQbNnqkCx8VVhtYkhDb9fEP2xBu5VvHbR+3nfVhMut5
147 9svIGfNbGHpucATqREvUHuQbNnqkCx8VVhtYkhDb9fEP2xBu5VvHbR+3nfVhMut5
147 G34Ct5RS7Jt6LIfFdtcn8CaSas/l1HbiGeRgc70X/9aYx/V/CEJv0lIe8gP6uDoW
148 G34Ct5RS7Jt6LIfFdtcn8CaSas/l1HbiGeRgc70X/9aYx/V/CEJv0lIe8gP6uDoW
148 FPIZ7d6vH+Vro6xuWEGiuMaiznap2KhZmpkgfupyFmplh0s6knymuQINBFit2ioB
149 FPIZ7d6vH+Vro6xuWEGiuMaiznap2KhZmpkgfupyFmplh0s6knymuQINBFit2ioB
149 EADneL9S9m4vhU3blaRjVUUyJ7b/qTjcSylvCH5XUE6R2k+ckEZjfAMZPLpO+/tF
150 EADneL9S9m4vhU3blaRjVUUyJ7b/qTjcSylvCH5XUE6R2k+ckEZjfAMZPLpO+/tF
150 M2JIJMD4SifKuS3xck9KtZGCufGmcwiLQRzeHF7vJUKrLD5RTkNi23ydvWZgPjtx
151 M2JIJMD4SifKuS3xck9KtZGCufGmcwiLQRzeHF7vJUKrLD5RTkNi23ydvWZgPjtx
151 Q+DTT1Zcn7BrQFY6FgnRoUVIxwtdw1bMY/89rsFgS5wwuMESd3Q2RYgb7EOFOpnu
152 Q+DTT1Zcn7BrQFY6FgnRoUVIxwtdw1bMY/89rsFgS5wwuMESd3Q2RYgb7EOFOpnu
152 w6da7WakWf4IhnF5nsNYGDVaIHzpiqCl+uTbf1epCjrOlIzkZ3Z3Yk5CM/TiFzPk
153 w6da7WakWf4IhnF5nsNYGDVaIHzpiqCl+uTbf1epCjrOlIzkZ3Z3Yk5CM/TiFzPk
153 z2lLz89cpD8U+NtCsfagWWfjd2U3jDapgH+7nQnCEWpROtzaKHG6lA3pXdix5zG8
154 z2lLz89cpD8U+NtCsfagWWfjd2U3jDapgH+7nQnCEWpROtzaKHG6lA3pXdix5zG8
154 eRc6/0IbUSWvfjKxLLPfNeCS2pCL3IeEI5nothEEYdQH6szpLog79xB9dVnJyKJb
155 eRc6/0IbUSWvfjKxLLPfNeCS2pCL3IeEI5nothEEYdQH6szpLog79xB9dVnJyKJb
155 VfxXnseoYqVrRz2VVbUI5Blwm6B40E3eGVfUQWiux54DspyVMMk41Mx7QJ3iynIa
156 VfxXnseoYqVrRz2VVbUI5Blwm6B40E3eGVfUQWiux54DspyVMMk41Mx7QJ3iynIa
156 1N4ZAqVMAEruyXTRTxc9XW0tYhDMA/1GYvz0EmFpm8LzTHA6sFVtPm/ZlNCX6P1X
157 1N4ZAqVMAEruyXTRTxc9XW0tYhDMA/1GYvz0EmFpm8LzTHA6sFVtPm/ZlNCX6P1X
157 zJwrv7DSQKD6GGlBQUX+OeEJ8tTkkf8QTJSPUdh8P8YxDFS5EOGAvhhpMBYD42kQ
158 zJwrv7DSQKD6GGlBQUX+OeEJ8tTkkf8QTJSPUdh8P8YxDFS5EOGAvhhpMBYD42kQ
158 pqXjEC+XcycTvGI7impgv9PDY1RCC1zkBjKPa120rNhv/hkVk/YhuGoajoHyy4h7
159 pqXjEC+XcycTvGI7impgv9PDY1RCC1zkBjKPa120rNhv/hkVk/YhuGoajoHyy4h7
159 ZQopdcMtpN2dgmhEegny9JCSwxfQmQ0zK0g7m6SHiKMwjwARAQABiQQ+BBgBCAAJ
160 ZQopdcMtpN2dgmhEegny9JCSwxfQmQ0zK0g7m6SHiKMwjwARAQABiQQ+BBgBCAAJ
160 BQJYrdoqAhsCAikJEI2BgDwOv82IwV0gBBkBCAAGBQJYrdoqAAoJEH6gqcPyc/zY
161 BQJYrdoqAhsCAikJEI2BgDwOv82IwV0gBBkBCAAGBQJYrdoqAAoJEH6gqcPyc/zY
161 1WAP/2wJ+R0gE6qsce3rjaIz58PJmc8goKrir5hnElWhPgbq7cYIsW5qiFyLhkdp
162 1WAP/2wJ+R0gE6qsce3rjaIz58PJmc8goKrir5hnElWhPgbq7cYIsW5qiFyLhkdp
162 YcMmhD9mRiPpQn6Ya2w3e3B8zfIVKipbMBnke/ytZ9M7qHmDCcjoiSmwEXN3wKYI
163 YcMmhD9mRiPpQn6Ya2w3e3B8zfIVKipbMBnke/ytZ9M7qHmDCcjoiSmwEXN3wKYI
163 mD9VHONsl/CG1rU9Isw1jtB5g1YxuBA7M/m36XN6x2u+NtNMDB9P56yc4gfsZVES
164 mD9VHONsl/CG1rU9Isw1jtB5g1YxuBA7M/m36XN6x2u+NtNMDB9P56yc4gfsZVES
164 KA9v+yY2/l45L8d/WUkUi0YXomn6hyBGI7JrBLq0CX37GEYP6O9rrKipfz73XfO7
165 KA9v+yY2/l45L8d/WUkUi0YXomn6hyBGI7JrBLq0CX37GEYP6O9rrKipfz73XfO7
165 JIGzOKZlljb/D9RX/g7nRbCn+3EtH7xnk+TK/50euEKw8SMUg147sJTcpQmv6UzZ
166 JIGzOKZlljb/D9RX/g7nRbCn+3EtH7xnk+TK/50euEKw8SMUg147sJTcpQmv6UzZ
166 cM4JgL0HbHVCojV4C/plELwMddALOFeYQzTif6sMRPf+3DSj8frbInjChC3yOLy0
167 cM4JgL0HbHVCojV4C/plELwMddALOFeYQzTif6sMRPf+3DSj8frbInjChC3yOLy0
167 6br92KFom17EIj2CAcoeq7UPhi2oouYBwPxh5ytdehJkoo+sN7RIWua6P2WSmon5
168 6br92KFom17EIj2CAcoeq7UPhi2oouYBwPxh5ytdehJkoo+sN7RIWua6P2WSmon5
168 U888cSylXC0+ADFdgLX9K2zrDVYUG1vo8CX0vzxFBaHwN6Px26fhIT1/hYUHQR1z
169 U888cSylXC0+ADFdgLX9K2zrDVYUG1vo8CX0vzxFBaHwN6Px26fhIT1/hYUHQR1z
169 VfNDcyQmXqkOnZvvoMfz/Q0s9BhFJ/zU6AgQbIZE/hm1spsfgvtsD1frZfygXJ9f
170 VfNDcyQmXqkOnZvvoMfz/Q0s9BhFJ/zU6AgQbIZE/hm1spsfgvtsD1frZfygXJ9f
170 irP+MSAI80xHSf91qSRZOj4Pl3ZJNbq4yYxv0b1pkMqeGdjdCYhLU+LZ4wbQmpCk
171 irP+MSAI80xHSf91qSRZOj4Pl3ZJNbq4yYxv0b1pkMqeGdjdCYhLU+LZ4wbQmpCk
171 SVe2prlLureigXtmZfkqevRz7FrIZiu9ky8wnCAPwC7/zmS18rgP/17bOtL4/iIz
172 SVe2prlLureigXtmZfkqevRz7FrIZiu9ky8wnCAPwC7/zmS18rgP/17bOtL4/iIz
172 QhxAAoAMWVrGyJivSkjhSGx1uCojsWfsTAm11P7jsruIL61ZzMUVE2aM3Pmj5G+W
173 QhxAAoAMWVrGyJivSkjhSGx1uCojsWfsTAm11P7jsruIL61ZzMUVE2aM3Pmj5G+W
173 9AcZ58Em+1WsVnAXdUR//bMmhyr8wL/G1YO1V3JEJTRdxsSxdYa4deGBBY/Adpsw
174 9AcZ58Em+1WsVnAXdUR//bMmhyr8wL/G1YO1V3JEJTRdxsSxdYa4deGBBY/Adpsw
174 24jxhOJR+lsJpqIUeb999+R8euDhRHG9eFO7DRu6weatUJ6suupoDTRWtr/4yGqe
175 24jxhOJR+lsJpqIUeb999+R8euDhRHG9eFO7DRu6weatUJ6suupoDTRWtr/4yGqe
175 dKxV3qQhNLSnaAzqW/1nA3iUB4k7kCaKZxhdhDbClf9P37qaRW467BLCVO/coL3y
176 dKxV3qQhNLSnaAzqW/1nA3iUB4k7kCaKZxhdhDbClf9P37qaRW467BLCVO/coL3y
176 Vm50dwdrNtKpMBh3ZpbB1uJvgi9mXtyBOMJ3v8RZeDzFiG8HdCtg9RvIt/AIFoHR
177 Vm50dwdrNtKpMBh3ZpbB1uJvgi9mXtyBOMJ3v8RZeDzFiG8HdCtg9RvIt/AIFoHR
177 H3S+U79NT6i0KPzLImDfs8T7RlpyuMc4Ufs8ggyg9v3Ae6cN3eQyxcK3w0cbBwsh
178 H3S+U79NT6i0KPzLImDfs8T7RlpyuMc4Ufs8ggyg9v3Ae6cN3eQyxcK3w0cbBwsh
178 /nQNfsA6uu+9H7NhbehBMhYnpNZyrHzCmzyXkauwRAqoCbGCNykTRwsur9gS41TQ
179 /nQNfsA6uu+9H7NhbehBMhYnpNZyrHzCmzyXkauwRAqoCbGCNykTRwsur9gS41TQ
179 M8ssD1jFheOJf3hODnkKU+HKjvMROl1DK7zdmLdNzA1cvtZH/nCC9KPj1z8QC47S
180 M8ssD1jFheOJf3hODnkKU+HKjvMROl1DK7zdmLdNzA1cvtZH/nCC9KPj1z8QC47S
180 xx+dTZSx4ONAhwbS/LN3PoKtn8LPjY9NP9uDWI+TWYquS2U+KHDrBDlsgozDbs/O
181 xx+dTZSx4ONAhwbS/LN3PoKtn8LPjY9NP9uDWI+TWYquS2U+KHDrBDlsgozDbs/O
181 jCxcpDzNmXpWQHEtHU7649OXHP7UeNST1mCUCH5qdank0V1iejF6/CfTFU4MfcrG
182 jCxcpDzNmXpWQHEtHU7649OXHP7UeNST1mCUCH5qdank0V1iejF6/CfTFU4MfcrG
182 YT90qFF93M3v01BbxP+EIY2/9tiIPbrd
183 YT90qFF93M3v01BbxP+EIY2/9tiIPbrd
183 =0YYh
184 =0YYh
184 -----END PGP PUBLIC KEY BLOCK-----
185 -----END PGP PUBLIC KEY BLOCK-----
185 EOF
186 EOF
186
187
187 sudo apt-key add docker-apt-key
188 sudo apt-key add docker-apt-key
188
189
189 if [ "$LSB_RELEASE" = "stretch" ]; then
190 if [ "$LSB_RELEASE" = "stretch" ]; then
190 cat << EOF | sudo tee -a /etc/apt/sources.list
191 cat << EOF | sudo tee -a /etc/apt/sources.list
191 # Need backports for clang-format-6.0
192 # Need backports for clang-format-6.0
192 deb http://deb.debian.org/debian stretch-backports main
193 deb http://deb.debian.org/debian stretch-backports main
194 EOF
195 fi
193
196
197 if [ "$LSB_RELEASE" = "stretch" -o "$LSB_RELEASE" = "buster" ]; then
198 cat << EOF | sudo tee -a /etc/apt/sources.list
194 # Sources are useful if we want to compile things locally.
199 # Sources are useful if we want to compile things locally.
195 deb-src http://deb.debian.org/debian stretch main
200 deb-src http://deb.debian.org/debian $LSB_RELEASE main
196 deb-src http://security.debian.org/debian-security stretch/updates main
201 deb-src http://security.debian.org/debian-security $LSB_RELEASE/updates main
197 deb-src http://deb.debian.org/debian stretch-updates main
202 deb-src http://deb.debian.org/debian $LSB_RELEASE-updates main
198 deb-src http://deb.debian.org/debian stretch-backports main
203 deb-src http://deb.debian.org/debian $LSB_RELEASE-backports main
199
204
200 deb [arch=amd64] https://download.docker.com/linux/debian stretch stable
205 deb [arch=amd64] https://download.docker.com/linux/debian $LSB_RELEASE stable
201 EOF
206 EOF
202
207
203 elif [ "$DISTRO" = "Ubuntu" ]; then
208 elif [ "$DISTRO" = "Ubuntu" ]; then
204 cat << EOF | sudo tee -a /etc/apt/sources.list
209 cat << EOF | sudo tee -a /etc/apt/sources.list
205 deb [arch=amd64] https://download.docker.com/linux/ubuntu $LSB_RELEASE stable
210 deb [arch=amd64] https://download.docker.com/linux/ubuntu $LSB_RELEASE stable
206 EOF
211 EOF
207
212
208 fi
213 fi
209
214
210 sudo apt-get update
215 sudo apt-get update
211
216
212 PACKAGES="\
217 PACKAGES="\
213 awscli \
218 awscli \
214 btrfs-progs \
219 btrfs-progs \
215 build-essential \
220 build-essential \
216 bzr \
221 bzr \
217 clang-format-6.0 \
222 clang-format-6.0 \
218 cvs \
223 cvs \
219 darcs \
224 darcs \
220 debhelper \
225 debhelper \
221 devscripts \
226 devscripts \
222 docker-ce \
227 docker-ce \
223 dpkg-dev \
228 dpkg-dev \
224 dstat \
229 dstat \
225 emacs \
230 emacs \
226 gettext \
231 gettext \
227 git \
232 git \
228 htop \
233 htop \
229 iotop \
234 iotop \
230 jfsutils \
235 jfsutils \
231 libbz2-dev \
236 libbz2-dev \
232 libexpat1-dev \
237 libexpat1-dev \
233 libffi-dev \
238 libffi-dev \
234 libgdbm-dev \
239 libgdbm-dev \
235 liblzma-dev \
240 liblzma-dev \
236 libncurses5-dev \
241 libncurses5-dev \
237 libnss3-dev \
242 libnss3-dev \
238 libreadline-dev \
243 libreadline-dev \
239 libsqlite3-dev \
244 libsqlite3-dev \
240 libssl-dev \
245 libssl-dev \
241 netbase \
246 netbase \
242 ntfs-3g \
247 ntfs-3g \
243 nvme-cli \
248 nvme-cli \
244 pyflakes \
249 pyflakes \
245 pyflakes3 \
250 pyflakes3 \
246 pylint \
251 pylint \
247 pylint3 \
252 pylint3 \
248 python-all-dev \
253 python-all-dev \
249 python-dev \
254 python-dev \
250 python-docutils \
255 python-docutils \
251 python-fuzzywuzzy \
256 python-fuzzywuzzy \
252 python-pygments \
257 python-pygments \
253 python-subversion \
258 python-subversion \
254 python-vcr \
259 python-vcr \
255 python3-boto3 \
260 python3-boto3 \
256 python3-dev \
261 python3-dev \
257 python3-docutils \
262 python3-docutils \
258 python3-fuzzywuzzy \
263 python3-fuzzywuzzy \
259 python3-pygments \
264 python3-pygments \
260 python3-vcr \
265 python3-vcr \
261 rsync \
266 rsync \
262 sqlite3 \
267 sqlite3 \
263 subversion \
268 subversion \
264 tcl-dev \
269 tcl-dev \
265 tk-dev \
270 tk-dev \
266 tla \
271 tla \
267 unzip \
272 unzip \
268 uuid-dev \
273 uuid-dev \
269 vim \
274 vim \
270 virtualenv \
275 virtualenv \
271 wget \
276 wget \
272 xfsprogs \
277 xfsprogs \
273 zip \
278 zip \
274 zlib1g-dev"
279 zlib1g-dev"
275
280
276 if [ "LSB_RELEASE" = "stretch" ]; then
281 if [ "LSB_RELEASE" = "stretch" ]; then
277 PACKAGES="$PACKAGES linux-perf"
282 PACKAGES="$PACKAGES linux-perf"
278 elif [ "$DISTRO" = "Ubuntu" ]; then
283 elif [ "$DISTRO" = "Ubuntu" ]; then
279 PACKAGES="$PACKAGES linux-tools-common"
284 PACKAGES="$PACKAGES linux-tools-common"
280 fi
285 fi
281
286
282 # Ubuntu 19.04 removes monotone.
287 # Monotone only available in older releases.
283 if [ "$LSB_RELEASE" != "disco" ]; then
288 if [ "$LSB_RELEASE" = "stretch" -o "$LSB_RELEASE" = "xenial" ]; then
284 PACKAGES="$PACKAGES monotone"
289 PACKAGES="$PACKAGES monotone"
285 fi
290 fi
286
291
287 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install --no-install-recommends $PACKAGES
292 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install --no-install-recommends $PACKAGES
288
293
289 # Create clang-format symlink so test harness finds it.
294 # Create clang-format symlink so test harness finds it.
290 sudo update-alternatives --install /usr/bin/clang-format clang-format \
295 sudo update-alternatives --install /usr/bin/clang-format clang-format \
291 /usr/bin/clang-format-6.0 1000
296 /usr/bin/clang-format-6.0 1000
292
297
293 sudo mkdir /hgdev
298 sudo mkdir /hgdev
294 # Will be normalized to hg:hg later.
299 # Will be normalized to hg:hg later.
295 sudo chown `whoami` /hgdev
300 sudo chown `whoami` /hgdev
296
301
297 {install_rust}
302 {install_rust}
298
303
299 cp requirements-py2.txt /hgdev/requirements-py2.txt
304 cp requirements-py2.txt /hgdev/requirements-py2.txt
300 cp requirements-py3.txt /hgdev/requirements-py3.txt
305 cp requirements-py3.txt /hgdev/requirements-py3.txt
301
306
302 # Disable the pip version check because it uses the network and can
307 # Disable the pip version check because it uses the network and can
303 # be annoying.
308 # be annoying.
304 cat << EOF | sudo tee -a /etc/pip.conf
309 cat << EOF | sudo tee -a /etc/pip.conf
305 [global]
310 [global]
306 disable-pip-version-check = True
311 disable-pip-version-check = True
307 EOF
312 EOF
308
313
309 {install_pythons}
314 {install_pythons}
310 {bootstrap_virtualenv}
315 {bootstrap_virtualenv}
311
316
312 /hgdev/venv-bootstrap/bin/hg clone https://www.mercurial-scm.org/repo/hg /hgdev/src
317 /hgdev/venv-bootstrap/bin/hg clone https://www.mercurial-scm.org/repo/hg /hgdev/src
313
318
314 # Mark the repo as non-publishing.
319 # Mark the repo as non-publishing.
315 cat >> /hgdev/src/.hg/hgrc << EOF
320 cat >> /hgdev/src/.hg/hgrc << EOF
316 [phases]
321 [phases]
317 publish = false
322 publish = false
318 EOF
323 EOF
319
324
320 sudo chown -R hg:hg /hgdev
325 sudo chown -R hg:hg /hgdev
321 '''.lstrip().format(
326 '''.lstrip().format(
322 install_rust=INSTALL_RUST,
327 install_rust=INSTALL_RUST,
323 install_pythons=INSTALL_PYTHONS,
328 install_pythons=INSTALL_PYTHONS,
324 bootstrap_virtualenv=BOOTSTRAP_VIRTUALENV
329 bootstrap_virtualenv=BOOTSTRAP_VIRTUALENV
325 ).replace('\r\n', '\n')
330 ).replace('\r\n', '\n')
326
331
327
332
328 # Prepares /hgdev for operations.
333 # Prepares /hgdev for operations.
329 PREPARE_HGDEV = '''
334 PREPARE_HGDEV = '''
330 #!/bin/bash
335 #!/bin/bash
331
336
332 set -e
337 set -e
333
338
334 FS=$1
339 FS=$1
335
340
336 ensure_device() {
341 ensure_device() {
337 if [ -z "${DEVICE}" ]; then
342 if [ -z "${DEVICE}" ]; then
338 echo "could not find block device to format"
343 echo "could not find block device to format"
339 exit 1
344 exit 1
340 fi
345 fi
341 }
346 }
342
347
343 # Determine device to partition for extra filesystem.
348 # Determine device to partition for extra filesystem.
344 # If only 1 volume is present, it will be the root volume and
349 # If only 1 volume is present, it will be the root volume and
345 # should be /dev/nvme0. If multiple volumes are present, the
350 # should be /dev/nvme0. If multiple volumes are present, the
346 # root volume could be nvme0 or nvme1. Use whichever one doesn't have
351 # root volume could be nvme0 or nvme1. Use whichever one doesn't have
347 # a partition.
352 # a partition.
348 if [ -e /dev/nvme1n1 ]; then
353 if [ -e /dev/nvme1n1 ]; then
349 if [ -e /dev/nvme0n1p1 ]; then
354 if [ -e /dev/nvme0n1p1 ]; then
350 DEVICE=/dev/nvme1n1
355 DEVICE=/dev/nvme1n1
351 else
356 else
352 DEVICE=/dev/nvme0n1
357 DEVICE=/dev/nvme0n1
353 fi
358 fi
354 else
359 else
355 DEVICE=
360 DEVICE=
356 fi
361 fi
357
362
358 sudo mkdir /hgwork
363 sudo mkdir /hgwork
359
364
360 if [ "${FS}" != "default" -a "${FS}" != "tmpfs" ]; then
365 if [ "${FS}" != "default" -a "${FS}" != "tmpfs" ]; then
361 ensure_device
366 ensure_device
362 echo "creating ${FS} filesystem on ${DEVICE}"
367 echo "creating ${FS} filesystem on ${DEVICE}"
363 fi
368 fi
364
369
365 if [ "${FS}" = "default" ]; then
370 if [ "${FS}" = "default" ]; then
366 :
371 :
367
372
368 elif [ "${FS}" = "btrfs" ]; then
373 elif [ "${FS}" = "btrfs" ]; then
369 sudo mkfs.btrfs ${DEVICE}
374 sudo mkfs.btrfs ${DEVICE}
370 sudo mount ${DEVICE} /hgwork
375 sudo mount ${DEVICE} /hgwork
371
376
372 elif [ "${FS}" = "ext3" ]; then
377 elif [ "${FS}" = "ext3" ]; then
373 # lazy_journal_init speeds up filesystem creation at the expense of
378 # lazy_journal_init speeds up filesystem creation at the expense of
374 # integrity if things crash. We are an ephemeral instance, so we don't
379 # integrity if things crash. We are an ephemeral instance, so we don't
375 # care about integrity.
380 # care about integrity.
376 sudo mkfs.ext3 -E lazy_journal_init=1 ${DEVICE}
381 sudo mkfs.ext3 -E lazy_journal_init=1 ${DEVICE}
377 sudo mount ${DEVICE} /hgwork
382 sudo mount ${DEVICE} /hgwork
378
383
379 elif [ "${FS}" = "ext4" ]; then
384 elif [ "${FS}" = "ext4" ]; then
380 sudo mkfs.ext4 -E lazy_journal_init=1 ${DEVICE}
385 sudo mkfs.ext4 -E lazy_journal_init=1 ${DEVICE}
381 sudo mount ${DEVICE} /hgwork
386 sudo mount ${DEVICE} /hgwork
382
387
383 elif [ "${FS}" = "jfs" ]; then
388 elif [ "${FS}" = "jfs" ]; then
384 sudo mkfs.jfs ${DEVICE}
389 sudo mkfs.jfs ${DEVICE}
385 sudo mount ${DEVICE} /hgwork
390 sudo mount ${DEVICE} /hgwork
386
391
387 elif [ "${FS}" = "tmpfs" ]; then
392 elif [ "${FS}" = "tmpfs" ]; then
388 echo "creating tmpfs volume in /hgwork"
393 echo "creating tmpfs volume in /hgwork"
389 sudo mount -t tmpfs -o size=1024M tmpfs /hgwork
394 sudo mount -t tmpfs -o size=1024M tmpfs /hgwork
390
395
391 elif [ "${FS}" = "xfs" ]; then
396 elif [ "${FS}" = "xfs" ]; then
392 sudo mkfs.xfs ${DEVICE}
397 sudo mkfs.xfs ${DEVICE}
393 sudo mount ${DEVICE} /hgwork
398 sudo mount ${DEVICE} /hgwork
394
399
395 else
400 else
396 echo "unsupported filesystem: ${FS}"
401 echo "unsupported filesystem: ${FS}"
397 exit 1
402 exit 1
398 fi
403 fi
399
404
400 echo "/hgwork ready"
405 echo "/hgwork ready"
401
406
402 sudo chown hg:hg /hgwork
407 sudo chown hg:hg /hgwork
403 mkdir /hgwork/tmp
408 mkdir /hgwork/tmp
404 chown hg:hg /hgwork/tmp
409 chown hg:hg /hgwork/tmp
405
410
406 rsync -a /hgdev/src /hgwork/
411 rsync -a /hgdev/src /hgwork/
407 '''.lstrip().replace('\r\n', '\n')
412 '''.lstrip().replace('\r\n', '\n')
408
413
409
414
410 HG_UPDATE_CLEAN = '''
415 HG_UPDATE_CLEAN = '''
411 set -ex
416 set -ex
412
417
413 HG=/hgdev/venv-bootstrap/bin/hg
418 HG=/hgdev/venv-bootstrap/bin/hg
414
419
415 cd /hgwork/src
420 cd /hgwork/src
416 ${HG} --config extensions.purge= purge --all
421 ${HG} --config extensions.purge= purge --all
417 ${HG} update -C $1
422 ${HG} update -C $1
418 ${HG} log -r .
423 ${HG} log -r .
419 '''.lstrip().replace('\r\n', '\n')
424 '''.lstrip().replace('\r\n', '\n')
420
425
421
426
422 def prepare_exec_environment(ssh_client, filesystem='default'):
427 def prepare_exec_environment(ssh_client, filesystem='default'):
423 """Prepare an EC2 instance to execute things.
428 """Prepare an EC2 instance to execute things.
424
429
425 The AMI has an ``/hgdev`` bootstrapped with various Python installs
430 The AMI has an ``/hgdev`` bootstrapped with various Python installs
426 and a clone of the Mercurial repo.
431 and a clone of the Mercurial repo.
427
432
428 In EC2, EBS volumes launched from snapshots have wonky performance behavior.
433 In EC2, EBS volumes launched from snapshots have wonky performance behavior.
429 Notably, blocks have to be copied on first access, which makes volume
434 Notably, blocks have to be copied on first access, which makes volume
430 I/O extremely slow on fresh volumes.
435 I/O extremely slow on fresh volumes.
431
436
432 Furthermore, we may want to run operations, tests, etc on alternative
437 Furthermore, we may want to run operations, tests, etc on alternative
433 filesystems so we examine behavior on different filesystems.
438 filesystems so we examine behavior on different filesystems.
434
439
435 This function is used to facilitate executing operations on alternate
440 This function is used to facilitate executing operations on alternate
436 volumes.
441 volumes.
437 """
442 """
438 sftp = ssh_client.open_sftp()
443 sftp = ssh_client.open_sftp()
439
444
440 with sftp.open('/hgdev/prepare-hgdev', 'wb') as fh:
445 with sftp.open('/hgdev/prepare-hgdev', 'wb') as fh:
441 fh.write(PREPARE_HGDEV)
446 fh.write(PREPARE_HGDEV)
442 fh.chmod(0o0777)
447 fh.chmod(0o0777)
443
448
444 command = 'sudo /hgdev/prepare-hgdev %s' % filesystem
449 command = 'sudo /hgdev/prepare-hgdev %s' % filesystem
445 chan, stdin, stdout = exec_command(ssh_client, command)
450 chan, stdin, stdout = exec_command(ssh_client, command)
446 stdin.close()
451 stdin.close()
447
452
448 for line in stdout:
453 for line in stdout:
449 print(line, end='')
454 print(line, end='')
450
455
451 res = chan.recv_exit_status()
456 res = chan.recv_exit_status()
452
457
453 if res:
458 if res:
454 raise Exception('non-0 exit code updating working directory; %d'
459 raise Exception('non-0 exit code updating working directory; %d'
455 % res)
460 % res)
456
461
457
462
458 def synchronize_hg(source_path: pathlib.Path, ec2_instance, revision: str=None):
463 def synchronize_hg(source_path: pathlib.Path, ec2_instance, revision: str=None):
459 """Synchronize a local Mercurial source path to remote EC2 instance."""
464 """Synchronize a local Mercurial source path to remote EC2 instance."""
460
465
461 with tempfile.TemporaryDirectory() as temp_dir:
466 with tempfile.TemporaryDirectory() as temp_dir:
462 temp_dir = pathlib.Path(temp_dir)
467 temp_dir = pathlib.Path(temp_dir)
463
468
464 ssh_dir = temp_dir / '.ssh'
469 ssh_dir = temp_dir / '.ssh'
465 ssh_dir.mkdir()
470 ssh_dir.mkdir()
466 ssh_dir.chmod(0o0700)
471 ssh_dir.chmod(0o0700)
467
472
468 public_ip = ec2_instance.public_ip_address
473 public_ip = ec2_instance.public_ip_address
469
474
470 ssh_config = ssh_dir / 'config'
475 ssh_config = ssh_dir / 'config'
471
476
472 with ssh_config.open('w', encoding='utf-8') as fh:
477 with ssh_config.open('w', encoding='utf-8') as fh:
473 fh.write('Host %s\n' % public_ip)
478 fh.write('Host %s\n' % public_ip)
474 fh.write(' User hg\n')
479 fh.write(' User hg\n')
475 fh.write(' StrictHostKeyChecking no\n')
480 fh.write(' StrictHostKeyChecking no\n')
476 fh.write(' UserKnownHostsFile %s\n' % (ssh_dir / 'known_hosts'))
481 fh.write(' UserKnownHostsFile %s\n' % (ssh_dir / 'known_hosts'))
477 fh.write(' IdentityFile %s\n' % ec2_instance.ssh_private_key_path)
482 fh.write(' IdentityFile %s\n' % ec2_instance.ssh_private_key_path)
478
483
479 if not (source_path / '.hg').is_dir():
484 if not (source_path / '.hg').is_dir():
480 raise Exception('%s is not a Mercurial repository; synchronization '
485 raise Exception('%s is not a Mercurial repository; synchronization '
481 'not yet supported' % source_path)
486 'not yet supported' % source_path)
482
487
483 env = dict(os.environ)
488 env = dict(os.environ)
484 env['HGPLAIN'] = '1'
489 env['HGPLAIN'] = '1'
485 env['HGENCODING'] = 'utf-8'
490 env['HGENCODING'] = 'utf-8'
486
491
487 hg_bin = source_path / 'hg'
492 hg_bin = source_path / 'hg'
488
493
489 res = subprocess.run(
494 res = subprocess.run(
490 ['python2.7', str(hg_bin), 'log', '-r', revision, '-T', '{node}'],
495 ['python2.7', str(hg_bin), 'log', '-r', revision, '-T', '{node}'],
491 cwd=str(source_path), env=env, check=True, capture_output=True)
496 cwd=str(source_path), env=env, check=True, capture_output=True)
492
497
493 full_revision = res.stdout.decode('ascii')
498 full_revision = res.stdout.decode('ascii')
494
499
495 args = [
500 args = [
496 'python2.7', str(hg_bin),
501 'python2.7', str(hg_bin),
497 '--config', 'ui.ssh=ssh -F %s' % ssh_config,
502 '--config', 'ui.ssh=ssh -F %s' % ssh_config,
498 '--config', 'ui.remotecmd=/hgdev/venv-bootstrap/bin/hg',
503 '--config', 'ui.remotecmd=/hgdev/venv-bootstrap/bin/hg',
499 # Also ensure .hgtags changes are present so auto version
504 # Also ensure .hgtags changes are present so auto version
500 # calculation works.
505 # calculation works.
501 'push', '-f', '-r', full_revision, '-r', 'file(.hgtags)',
506 'push', '-f', '-r', full_revision, '-r', 'file(.hgtags)',
502 'ssh://%s//hgwork/src' % public_ip,
507 'ssh://%s//hgwork/src' % public_ip,
503 ]
508 ]
504
509
505 res = subprocess.run(args, cwd=str(source_path), env=env)
510 res = subprocess.run(args, cwd=str(source_path), env=env)
506
511
507 # Allow 1 (no-op) to not trigger error.
512 # Allow 1 (no-op) to not trigger error.
508 if res.returncode not in (0, 1):
513 if res.returncode not in (0, 1):
509 res.check_returncode()
514 res.check_returncode()
510
515
511 # TODO support synchronizing dirty working directory.
516 # TODO support synchronizing dirty working directory.
512
517
513 sftp = ec2_instance.ssh_client.open_sftp()
518 sftp = ec2_instance.ssh_client.open_sftp()
514
519
515 with sftp.open('/hgdev/hgup', 'wb') as fh:
520 with sftp.open('/hgdev/hgup', 'wb') as fh:
516 fh.write(HG_UPDATE_CLEAN)
521 fh.write(HG_UPDATE_CLEAN)
517 fh.chmod(0o0700)
522 fh.chmod(0o0700)
518
523
519 chan, stdin, stdout = exec_command(
524 chan, stdin, stdout = exec_command(
520 ec2_instance.ssh_client, '/hgdev/hgup %s' % full_revision)
525 ec2_instance.ssh_client, '/hgdev/hgup %s' % full_revision)
521 stdin.close()
526 stdin.close()
522
527
523 for line in stdout:
528 for line in stdout:
524 print(line, end='')
529 print(line, end='')
525
530
526 res = chan.recv_exit_status()
531 res = chan.recv_exit_status()
527
532
528 if res:
533 if res:
529 raise Exception('non-0 exit code updating working directory; %d'
534 raise Exception('non-0 exit code updating working directory; %d'
530 % res)
535 % res)
531
536
532
537
533 def run_tests(ssh_client, python_version, test_flags=None):
538 def run_tests(ssh_client, python_version, test_flags=None):
534 """Run tests on a remote Linux machine via an SSH client."""
539 """Run tests on a remote Linux machine via an SSH client."""
535 test_flags = test_flags or []
540 test_flags = test_flags or []
536
541
537 print('running tests')
542 print('running tests')
538
543
539 if python_version == 'system2':
544 if python_version == 'system2':
540 python = '/usr/bin/python2'
545 python = '/usr/bin/python2'
541 elif python_version == 'system3':
546 elif python_version == 'system3':
542 python = '/usr/bin/python3'
547 python = '/usr/bin/python3'
543 elif python_version.startswith('pypy'):
548 elif python_version.startswith('pypy'):
544 python = '/hgdev/pyenv/shims/%s' % python_version
549 python = '/hgdev/pyenv/shims/%s' % python_version
545 else:
550 else:
546 python = '/hgdev/pyenv/shims/python%s' % python_version
551 python = '/hgdev/pyenv/shims/python%s' % python_version
547
552
548 test_flags = ' '.join(shlex.quote(a) for a in test_flags)
553 test_flags = ' '.join(shlex.quote(a) for a in test_flags)
549
554
550 command = (
555 command = (
551 '/bin/sh -c "export TMPDIR=/hgwork/tmp; '
556 '/bin/sh -c "export TMPDIR=/hgwork/tmp; '
552 'cd /hgwork/src/tests && %s run-tests.py %s"' % (
557 'cd /hgwork/src/tests && %s run-tests.py %s"' % (
553 python, test_flags))
558 python, test_flags))
554
559
555 chan, stdin, stdout = exec_command(ssh_client, command)
560 chan, stdin, stdout = exec_command(ssh_client, command)
556
561
557 stdin.close()
562 stdin.close()
558
563
559 for line in stdout:
564 for line in stdout:
560 print(line, end='')
565 print(line, end='')
561
566
562 return chan.recv_exit_status()
567 return chan.recv_exit_status()
General Comments 0
You need to be logged in to leave comments. Login now