##// END OF EJS Templates
automation: support and use Debian Buster by default...
Gregory Szorc -
r43288:d1d919f6 default
parent child Browse files
Show More
@@ -1,1202 +1,1210 b''
1 1 # aws.py - Automation code for Amazon Web Services
2 2 #
3 3 # Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 # no-check-code because Python 3 native.
9 9
10 10 import contextlib
11 11 import copy
12 12 import hashlib
13 13 import json
14 14 import os
15 15 import pathlib
16 16 import subprocess
17 17 import time
18 18
19 19 import boto3
20 20 import botocore.exceptions
21 21
22 22 from .linux import (
23 23 BOOTSTRAP_DEBIAN,
24 24 )
25 25 from .ssh import (
26 26 exec_command as ssh_exec_command,
27 27 wait_for_ssh,
28 28 )
29 29 from .winrm import (
30 30 run_powershell,
31 31 wait_for_winrm,
32 32 )
33 33
34 34
35 35 SOURCE_ROOT = pathlib.Path(os.path.abspath(__file__)).parent.parent.parent.parent
36 36
37 37 INSTALL_WINDOWS_DEPENDENCIES = (SOURCE_ROOT / 'contrib' /
38 38 'install-windows-dependencies.ps1')
39 39
40 40
41 41 INSTANCE_TYPES_WITH_STORAGE = {
42 42 'c5d',
43 43 'd2',
44 44 'h1',
45 45 'i3',
46 46 'm5ad',
47 47 'm5d',
48 48 'r5d',
49 49 'r5ad',
50 50 'x1',
51 51 'z1d',
52 52 }
53 53
54 54
55 55 AMAZON_ACCOUNT_ID = '801119661308'
56 56 DEBIAN_ACCOUNT_ID = '379101102735'
57 DEBIAN_ACCOUNT_ID_2 = '136693071363'
57 58 UBUNTU_ACCOUNT_ID = '099720109477'
58 59
59 60
60 61 WINDOWS_BASE_IMAGE_NAME = 'Windows_Server-2019-English-Full-Base-2019.07.12'
61 62
62 63
63 64 KEY_PAIRS = {
64 65 'automation',
65 66 }
66 67
67 68
68 69 SECURITY_GROUPS = {
69 70 'linux-dev-1': {
70 71 'description': 'Mercurial Linux instances that perform build/test automation',
71 72 'ingress': [
72 73 {
73 74 'FromPort': 22,
74 75 'ToPort': 22,
75 76 'IpProtocol': 'tcp',
76 77 'IpRanges': [
77 78 {
78 79 'CidrIp': '0.0.0.0/0',
79 80 'Description': 'SSH from entire Internet',
80 81 },
81 82 ],
82 83 },
83 84 ],
84 85 },
85 86 'windows-dev-1': {
86 87 'description': 'Mercurial Windows instances that perform build automation',
87 88 'ingress': [
88 89 {
89 90 'FromPort': 22,
90 91 'ToPort': 22,
91 92 'IpProtocol': 'tcp',
92 93 'IpRanges': [
93 94 {
94 95 'CidrIp': '0.0.0.0/0',
95 96 'Description': 'SSH from entire Internet',
96 97 },
97 98 ],
98 99 },
99 100 {
100 101 'FromPort': 3389,
101 102 'ToPort': 3389,
102 103 'IpProtocol': 'tcp',
103 104 'IpRanges': [
104 105 {
105 106 'CidrIp': '0.0.0.0/0',
106 107 'Description': 'RDP from entire Internet',
107 108 },
108 109 ],
109 110
110 111 },
111 112 {
112 113 'FromPort': 5985,
113 114 'ToPort': 5986,
114 115 'IpProtocol': 'tcp',
115 116 'IpRanges': [
116 117 {
117 118 'CidrIp': '0.0.0.0/0',
118 119 'Description': 'PowerShell Remoting (Windows Remote Management)',
119 120 },
120 121 ],
121 122 }
122 123 ],
123 124 },
124 125 }
125 126
126 127
127 128 IAM_ROLES = {
128 129 'ephemeral-ec2-role-1': {
129 130 'description': 'Mercurial temporary EC2 instances',
130 131 'policy_arns': [
131 132 'arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM',
132 133 ],
133 134 },
134 135 }
135 136
136 137
137 138 ASSUME_ROLE_POLICY_DOCUMENT = '''
138 139 {
139 140 "Version": "2012-10-17",
140 141 "Statement": [
141 142 {
142 143 "Effect": "Allow",
143 144 "Principal": {
144 145 "Service": "ec2.amazonaws.com"
145 146 },
146 147 "Action": "sts:AssumeRole"
147 148 }
148 149 ]
149 150 }
150 151 '''.strip()
151 152
152 153
153 154 IAM_INSTANCE_PROFILES = {
154 155 'ephemeral-ec2-1': {
155 156 'roles': [
156 157 'ephemeral-ec2-role-1',
157 158 ],
158 159 }
159 160 }
160 161
161 162
162 163 # User Data for Windows EC2 instance. Mainly used to set the password
163 164 # and configure WinRM.
164 165 # Inspired by the User Data script used by Packer
165 166 # (from https://www.packer.io/intro/getting-started/build-image.html).
166 167 WINDOWS_USER_DATA = r'''
167 168 <powershell>
168 169
169 170 # TODO enable this once we figure out what is failing.
170 171 #$ErrorActionPreference = "stop"
171 172
172 173 # Set administrator password
173 174 net user Administrator "%s"
174 175 wmic useraccount where "name='Administrator'" set PasswordExpires=FALSE
175 176
176 177 # First, make sure WinRM can't be connected to
177 178 netsh advfirewall firewall set rule name="Windows Remote Management (HTTP-In)" new enable=yes action=block
178 179
179 180 # Delete any existing WinRM listeners
180 181 winrm delete winrm/config/listener?Address=*+Transport=HTTP 2>$Null
181 182 winrm delete winrm/config/listener?Address=*+Transport=HTTPS 2>$Null
182 183
183 184 # Create a new WinRM listener and configure
184 185 winrm create winrm/config/listener?Address=*+Transport=HTTP
185 186 winrm set winrm/config/winrs '@{MaxMemoryPerShellMB="0"}'
186 187 winrm set winrm/config '@{MaxTimeoutms="7200000"}'
187 188 winrm set winrm/config/service '@{AllowUnencrypted="true"}'
188 189 winrm set winrm/config/service '@{MaxConcurrentOperationsPerUser="12000"}'
189 190 winrm set winrm/config/service/auth '@{Basic="true"}'
190 191 winrm set winrm/config/client/auth '@{Basic="true"}'
191 192
192 193 # Configure UAC to allow privilege elevation in remote shells
193 194 $Key = 'HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System'
194 195 $Setting = 'LocalAccountTokenFilterPolicy'
195 196 Set-ItemProperty -Path $Key -Name $Setting -Value 1 -Force
196 197
197 198 # Configure and restart the WinRM Service; Enable the required firewall exception
198 199 Stop-Service -Name WinRM
199 200 Set-Service -Name WinRM -StartupType Automatic
200 201 netsh advfirewall firewall set rule name="Windows Remote Management (HTTP-In)" new action=allow localip=any remoteip=any
201 202 Start-Service -Name WinRM
202 203
203 204 # Disable firewall on private network interfaces so prompts don't appear.
204 205 Set-NetFirewallProfile -Name private -Enabled false
205 206 </powershell>
206 207 '''.lstrip()
207 208
208 209
209 210 WINDOWS_BOOTSTRAP_POWERSHELL = '''
210 211 Write-Output "installing PowerShell dependencies"
211 212 Install-PackageProvider -Name NuGet -MinimumVersion 2.8.5.201 -Force
212 213 Set-PSRepository -Name PSGallery -InstallationPolicy Trusted
213 214 Install-Module -Name OpenSSHUtils -RequiredVersion 0.0.2.0
214 215
215 216 Write-Output "installing OpenSSL server"
216 217 Add-WindowsCapability -Online -Name OpenSSH.Server~~~~0.0.1.0
217 218 # Various tools will attempt to use older versions of .NET. So we enable
218 219 # the feature that provides them so it doesn't have to be auto-enabled
219 220 # later.
220 221 Write-Output "enabling .NET Framework feature"
221 222 Install-WindowsFeature -Name Net-Framework-Core
222 223 '''
223 224
224 225
225 226 class AWSConnection:
226 227 """Manages the state of a connection with AWS."""
227 228
228 229 def __init__(self, automation, region: str, ensure_ec2_state: bool=True):
229 230 self.automation = automation
230 231 self.local_state_path = automation.state_path
231 232
232 233 self.prefix = 'hg-'
233 234
234 235 self.session = boto3.session.Session(region_name=region)
235 236 self.ec2client = self.session.client('ec2')
236 237 self.ec2resource = self.session.resource('ec2')
237 238 self.iamclient = self.session.client('iam')
238 239 self.iamresource = self.session.resource('iam')
239 240 self.security_groups = {}
240 241
241 242 if ensure_ec2_state:
242 243 ensure_key_pairs(automation.state_path, self.ec2resource)
243 244 self.security_groups = ensure_security_groups(self.ec2resource)
244 245 ensure_iam_state(self.iamclient, self.iamresource)
245 246
246 247 def key_pair_path_private(self, name):
247 248 """Path to a key pair private key file."""
248 249 return self.local_state_path / 'keys' / ('keypair-%s' % name)
249 250
250 251 def key_pair_path_public(self, name):
251 252 return self.local_state_path / 'keys' / ('keypair-%s.pub' % name)
252 253
253 254
254 255 def rsa_key_fingerprint(p: pathlib.Path):
255 256 """Compute the fingerprint of an RSA private key."""
256 257
257 258 # TODO use rsa package.
258 259 res = subprocess.run(
259 260 ['openssl', 'pkcs8', '-in', str(p), '-nocrypt', '-topk8',
260 261 '-outform', 'DER'],
261 262 capture_output=True,
262 263 check=True)
263 264
264 265 sha1 = hashlib.sha1(res.stdout).hexdigest()
265 266 return ':'.join(a + b for a, b in zip(sha1[::2], sha1[1::2]))
266 267
267 268
268 269 def ensure_key_pairs(state_path: pathlib.Path, ec2resource, prefix='hg-'):
269 270 remote_existing = {}
270 271
271 272 for kpi in ec2resource.key_pairs.all():
272 273 if kpi.name.startswith(prefix):
273 274 remote_existing[kpi.name[len(prefix):]] = kpi.key_fingerprint
274 275
275 276 # Validate that we have these keys locally.
276 277 key_path = state_path / 'keys'
277 278 key_path.mkdir(exist_ok=True, mode=0o700)
278 279
279 280 def remove_remote(name):
280 281 print('deleting key pair %s' % name)
281 282 key = ec2resource.KeyPair(name)
282 283 key.delete()
283 284
284 285 def remove_local(name):
285 286 pub_full = key_path / ('keypair-%s.pub' % name)
286 287 priv_full = key_path / ('keypair-%s' % name)
287 288
288 289 print('removing %s' % pub_full)
289 290 pub_full.unlink()
290 291 print('removing %s' % priv_full)
291 292 priv_full.unlink()
292 293
293 294 local_existing = {}
294 295
295 296 for f in sorted(os.listdir(key_path)):
296 297 if not f.startswith('keypair-') or not f.endswith('.pub'):
297 298 continue
298 299
299 300 name = f[len('keypair-'):-len('.pub')]
300 301
301 302 pub_full = key_path / f
302 303 priv_full = key_path / ('keypair-%s' % name)
303 304
304 305 with open(pub_full, 'r', encoding='ascii') as fh:
305 306 data = fh.read()
306 307
307 308 if not data.startswith('ssh-rsa '):
308 309 print('unexpected format for key pair file: %s; removing' %
309 310 pub_full)
310 311 pub_full.unlink()
311 312 priv_full.unlink()
312 313 continue
313 314
314 315 local_existing[name] = rsa_key_fingerprint(priv_full)
315 316
316 317 for name in sorted(set(remote_existing) | set(local_existing)):
317 318 if name not in local_existing:
318 319 actual = '%s%s' % (prefix, name)
319 320 print('remote key %s does not exist locally' % name)
320 321 remove_remote(actual)
321 322 del remote_existing[name]
322 323
323 324 elif name not in remote_existing:
324 325 print('local key %s does not exist remotely' % name)
325 326 remove_local(name)
326 327 del local_existing[name]
327 328
328 329 elif remote_existing[name] != local_existing[name]:
329 330 print('key fingerprint mismatch for %s; '
330 331 'removing from local and remote' % name)
331 332 remove_local(name)
332 333 remove_remote('%s%s' % (prefix, name))
333 334 del local_existing[name]
334 335 del remote_existing[name]
335 336
336 337 missing = KEY_PAIRS - set(remote_existing)
337 338
338 339 for name in sorted(missing):
339 340 actual = '%s%s' % (prefix, name)
340 341 print('creating key pair %s' % actual)
341 342
342 343 priv_full = key_path / ('keypair-%s' % name)
343 344 pub_full = key_path / ('keypair-%s.pub' % name)
344 345
345 346 kp = ec2resource.create_key_pair(KeyName=actual)
346 347
347 348 with priv_full.open('w', encoding='ascii') as fh:
348 349 fh.write(kp.key_material)
349 350 fh.write('\n')
350 351
351 352 priv_full.chmod(0o0600)
352 353
353 354 # SSH public key can be extracted via `ssh-keygen`.
354 355 with pub_full.open('w', encoding='ascii') as fh:
355 356 subprocess.run(
356 357 ['ssh-keygen', '-y', '-f', str(priv_full)],
357 358 stdout=fh,
358 359 check=True)
359 360
360 361 pub_full.chmod(0o0600)
361 362
362 363
363 364 def delete_instance_profile(profile):
364 365 for role in profile.roles:
365 366 print('removing role %s from instance profile %s' % (role.name,
366 367 profile.name))
367 368 profile.remove_role(RoleName=role.name)
368 369
369 370 print('deleting instance profile %s' % profile.name)
370 371 profile.delete()
371 372
372 373
373 374 def ensure_iam_state(iamclient, iamresource, prefix='hg-'):
374 375 """Ensure IAM state is in sync with our canonical definition."""
375 376
376 377 remote_profiles = {}
377 378
378 379 for profile in iamresource.instance_profiles.all():
379 380 if profile.name.startswith(prefix):
380 381 remote_profiles[profile.name[len(prefix):]] = profile
381 382
382 383 for name in sorted(set(remote_profiles) - set(IAM_INSTANCE_PROFILES)):
383 384 delete_instance_profile(remote_profiles[name])
384 385 del remote_profiles[name]
385 386
386 387 remote_roles = {}
387 388
388 389 for role in iamresource.roles.all():
389 390 if role.name.startswith(prefix):
390 391 remote_roles[role.name[len(prefix):]] = role
391 392
392 393 for name in sorted(set(remote_roles) - set(IAM_ROLES)):
393 394 role = remote_roles[name]
394 395
395 396 print('removing role %s' % role.name)
396 397 role.delete()
397 398 del remote_roles[name]
398 399
399 400 # We've purged remote state that doesn't belong. Create missing
400 401 # instance profiles and roles.
401 402 for name in sorted(set(IAM_INSTANCE_PROFILES) - set(remote_profiles)):
402 403 actual = '%s%s' % (prefix, name)
403 404 print('creating IAM instance profile %s' % actual)
404 405
405 406 profile = iamresource.create_instance_profile(
406 407 InstanceProfileName=actual)
407 408 remote_profiles[name] = profile
408 409
409 410 waiter = iamclient.get_waiter('instance_profile_exists')
410 411 waiter.wait(InstanceProfileName=actual)
411 412 print('IAM instance profile %s is available' % actual)
412 413
413 414 for name in sorted(set(IAM_ROLES) - set(remote_roles)):
414 415 entry = IAM_ROLES[name]
415 416
416 417 actual = '%s%s' % (prefix, name)
417 418 print('creating IAM role %s' % actual)
418 419
419 420 role = iamresource.create_role(
420 421 RoleName=actual,
421 422 Description=entry['description'],
422 423 AssumeRolePolicyDocument=ASSUME_ROLE_POLICY_DOCUMENT,
423 424 )
424 425
425 426 waiter = iamclient.get_waiter('role_exists')
426 427 waiter.wait(RoleName=actual)
427 428 print('IAM role %s is available' % actual)
428 429
429 430 remote_roles[name] = role
430 431
431 432 for arn in entry['policy_arns']:
432 433 print('attaching policy %s to %s' % (arn, role.name))
433 434 role.attach_policy(PolicyArn=arn)
434 435
435 436 # Now reconcile state of profiles.
436 437 for name, meta in sorted(IAM_INSTANCE_PROFILES.items()):
437 438 profile = remote_profiles[name]
438 439 wanted = {'%s%s' % (prefix, role) for role in meta['roles']}
439 440 have = {role.name for role in profile.roles}
440 441
441 442 for role in sorted(have - wanted):
442 443 print('removing role %s from %s' % (role, profile.name))
443 444 profile.remove_role(RoleName=role)
444 445
445 446 for role in sorted(wanted - have):
446 447 print('adding role %s to %s' % (role, profile.name))
447 448 profile.add_role(RoleName=role)
448 449
449 450
450 451 def find_image(ec2resource, owner_id, name):
451 452 """Find an AMI by its owner ID and name."""
452 453
453 454 images = ec2resource.images.filter(
454 455 Filters=[
455 456 {
456 457 'Name': 'owner-id',
457 458 'Values': [owner_id],
458 459 },
459 460 {
460 461 'Name': 'state',
461 462 'Values': ['available'],
462 463 },
463 464 {
464 465 'Name': 'image-type',
465 466 'Values': ['machine'],
466 467 },
467 468 {
468 469 'Name': 'name',
469 470 'Values': [name],
470 471 },
471 472 ])
472 473
473 474 for image in images:
474 475 return image
475 476
476 477 raise Exception('unable to find image for %s' % name)
477 478
478 479
479 480 def ensure_security_groups(ec2resource, prefix='hg-'):
480 481 """Ensure all necessary Mercurial security groups are present.
481 482
482 483 All security groups are prefixed with ``hg-`` by default. Any security
483 484 groups having this prefix but aren't in our list are deleted.
484 485 """
485 486 existing = {}
486 487
487 488 for group in ec2resource.security_groups.all():
488 489 if group.group_name.startswith(prefix):
489 490 existing[group.group_name[len(prefix):]] = group
490 491
491 492 purge = set(existing) - set(SECURITY_GROUPS)
492 493
493 494 for name in sorted(purge):
494 495 group = existing[name]
495 496 print('removing legacy security group: %s' % group.group_name)
496 497 group.delete()
497 498
498 499 security_groups = {}
499 500
500 501 for name, group in sorted(SECURITY_GROUPS.items()):
501 502 if name in existing:
502 503 security_groups[name] = existing[name]
503 504 continue
504 505
505 506 actual = '%s%s' % (prefix, name)
506 507 print('adding security group %s' % actual)
507 508
508 509 group_res = ec2resource.create_security_group(
509 510 Description=group['description'],
510 511 GroupName=actual,
511 512 )
512 513
513 514 group_res.authorize_ingress(
514 515 IpPermissions=group['ingress'],
515 516 )
516 517
517 518 security_groups[name] = group_res
518 519
519 520 return security_groups
520 521
521 522
522 523 def terminate_ec2_instances(ec2resource, prefix='hg-'):
523 524 """Terminate all EC2 instances managed by us."""
524 525 waiting = []
525 526
526 527 for instance in ec2resource.instances.all():
527 528 if instance.state['Name'] == 'terminated':
528 529 continue
529 530
530 531 for tag in instance.tags or []:
531 532 if tag['Key'] == 'Name' and tag['Value'].startswith(prefix):
532 533 print('terminating %s' % instance.id)
533 534 instance.terminate()
534 535 waiting.append(instance)
535 536
536 537 for instance in waiting:
537 538 instance.wait_until_terminated()
538 539
539 540
540 541 def remove_resources(c, prefix='hg-'):
541 542 """Purge all of our resources in this EC2 region."""
542 543 ec2resource = c.ec2resource
543 544 iamresource = c.iamresource
544 545
545 546 terminate_ec2_instances(ec2resource, prefix=prefix)
546 547
547 548 for image in ec2resource.images.filter(Owners=['self']):
548 549 if image.name.startswith(prefix):
549 550 remove_ami(ec2resource, image)
550 551
551 552 for group in ec2resource.security_groups.all():
552 553 if group.group_name.startswith(prefix):
553 554 print('removing security group %s' % group.group_name)
554 555 group.delete()
555 556
556 557 for profile in iamresource.instance_profiles.all():
557 558 if profile.name.startswith(prefix):
558 559 delete_instance_profile(profile)
559 560
560 561 for role in iamresource.roles.all():
561 562 if role.name.startswith(prefix):
562 563 for p in role.attached_policies.all():
563 564 print('detaching policy %s from %s' % (p.arn, role.name))
564 565 role.detach_policy(PolicyArn=p.arn)
565 566
566 567 print('removing role %s' % role.name)
567 568 role.delete()
568 569
569 570
570 571 def wait_for_ip_addresses(instances):
571 572 """Wait for the public IP addresses of an iterable of instances."""
572 573 for instance in instances:
573 574 while True:
574 575 if not instance.public_ip_address:
575 576 time.sleep(2)
576 577 instance.reload()
577 578 continue
578 579
579 580 print('public IP address for %s: %s' % (
580 581 instance.id, instance.public_ip_address))
581 582 break
582 583
583 584
584 585 def remove_ami(ec2resource, image):
585 586 """Remove an AMI and its underlying snapshots."""
586 587 snapshots = []
587 588
588 589 for device in image.block_device_mappings:
589 590 if 'Ebs' in device:
590 591 snapshots.append(ec2resource.Snapshot(device['Ebs']['SnapshotId']))
591 592
592 593 print('deregistering %s' % image.id)
593 594 image.deregister()
594 595
595 596 for snapshot in snapshots:
596 597 print('deleting snapshot %s' % snapshot.id)
597 598 snapshot.delete()
598 599
599 600
600 601 def wait_for_ssm(ssmclient, instances):
601 602 """Wait for SSM to come online for an iterable of instance IDs."""
602 603 while True:
603 604 res = ssmclient.describe_instance_information(
604 605 Filters=[
605 606 {
606 607 'Key': 'InstanceIds',
607 608 'Values': [i.id for i in instances],
608 609 },
609 610 ],
610 611 )
611 612
612 613 available = len(res['InstanceInformationList'])
613 614 wanted = len(instances)
614 615
615 616 print('%d/%d instances available in SSM' % (available, wanted))
616 617
617 618 if available == wanted:
618 619 return
619 620
620 621 time.sleep(2)
621 622
622 623
623 624 def run_ssm_command(ssmclient, instances, document_name, parameters):
624 625 """Run a PowerShell script on an EC2 instance."""
625 626
626 627 res = ssmclient.send_command(
627 628 InstanceIds=[i.id for i in instances],
628 629 DocumentName=document_name,
629 630 Parameters=parameters,
630 631 CloudWatchOutputConfig={
631 632 'CloudWatchOutputEnabled': True,
632 633 },
633 634 )
634 635
635 636 command_id = res['Command']['CommandId']
636 637
637 638 for instance in instances:
638 639 while True:
639 640 try:
640 641 res = ssmclient.get_command_invocation(
641 642 CommandId=command_id,
642 643 InstanceId=instance.id,
643 644 )
644 645 except botocore.exceptions.ClientError as e:
645 646 if e.response['Error']['Code'] == 'InvocationDoesNotExist':
646 647 print('could not find SSM command invocation; waiting')
647 648 time.sleep(1)
648 649 continue
649 650 else:
650 651 raise
651 652
652 653 if res['Status'] == 'Success':
653 654 break
654 655 elif res['Status'] in ('Pending', 'InProgress', 'Delayed'):
655 656 time.sleep(2)
656 657 else:
657 658 raise Exception('command failed on %s: %s' % (
658 659 instance.id, res['Status']))
659 660
660 661
661 662 @contextlib.contextmanager
662 663 def temporary_ec2_instances(ec2resource, config):
663 664 """Create temporary EC2 instances.
664 665
665 666 This is a proxy to ``ec2client.run_instances(**config)`` that takes care of
666 667 managing the lifecycle of the instances.
667 668
668 669 When the context manager exits, the instances are terminated.
669 670
670 671 The context manager evaluates to the list of data structures
671 672 describing each created instance. The instances may not be available
672 673 for work immediately: it is up to the caller to wait for the instance
673 674 to start responding.
674 675 """
675 676
676 677 ids = None
677 678
678 679 try:
679 680 res = ec2resource.create_instances(**config)
680 681
681 682 ids = [i.id for i in res]
682 683 print('started instances: %s' % ' '.join(ids))
683 684
684 685 yield res
685 686 finally:
686 687 if ids:
687 688 print('terminating instances: %s' % ' '.join(ids))
688 689 for instance in res:
689 690 instance.terminate()
690 691 print('terminated %d instances' % len(ids))
691 692
692 693
693 694 @contextlib.contextmanager
694 695 def create_temp_windows_ec2_instances(c: AWSConnection, config):
695 696 """Create temporary Windows EC2 instances.
696 697
697 698 This is a higher-level wrapper around ``create_temp_ec2_instances()`` that
698 699 configures the Windows instance for Windows Remote Management. The emitted
699 700 instances will have a ``winrm_client`` attribute containing a
700 701 ``pypsrp.client.Client`` instance bound to the instance.
701 702 """
702 703 if 'IamInstanceProfile' in config:
703 704 raise ValueError('IamInstanceProfile cannot be provided in config')
704 705 if 'UserData' in config:
705 706 raise ValueError('UserData cannot be provided in config')
706 707
707 708 password = c.automation.default_password()
708 709
709 710 config = copy.deepcopy(config)
710 711 config['IamInstanceProfile'] = {
711 712 'Name': 'hg-ephemeral-ec2-1',
712 713 }
713 714 config.setdefault('TagSpecifications', []).append({
714 715 'ResourceType': 'instance',
715 716 'Tags': [{'Key': 'Name', 'Value': 'hg-temp-windows'}],
716 717 })
717 718 config['UserData'] = WINDOWS_USER_DATA % password
718 719
719 720 with temporary_ec2_instances(c.ec2resource, config) as instances:
720 721 wait_for_ip_addresses(instances)
721 722
722 723 print('waiting for Windows Remote Management service...')
723 724
724 725 for instance in instances:
725 726 client = wait_for_winrm(instance.public_ip_address, 'Administrator', password)
726 727 print('established WinRM connection to %s' % instance.id)
727 728 instance.winrm_client = client
728 729
729 730 yield instances
730 731
731 732
732 733 def resolve_fingerprint(fingerprint):
733 734 fingerprint = json.dumps(fingerprint, sort_keys=True)
734 735 return hashlib.sha256(fingerprint.encode('utf-8')).hexdigest()
735 736
736 737
737 738 def find_and_reconcile_image(ec2resource, name, fingerprint):
738 739 """Attempt to find an existing EC2 AMI with a name and fingerprint.
739 740
740 741 If an image with the specified fingerprint is found, it is returned.
741 742 Otherwise None is returned.
742 743
743 744 Existing images for the specified name that don't have the specified
744 745 fingerprint or are missing required metadata or deleted.
745 746 """
746 747 # Find existing AMIs with this name and delete the ones that are invalid.
747 748 # Store a reference to a good image so it can be returned one the
748 749 # image state is reconciled.
749 750 images = ec2resource.images.filter(
750 751 Filters=[{'Name': 'name', 'Values': [name]}])
751 752
752 753 existing_image = None
753 754
754 755 for image in images:
755 756 if image.tags is None:
756 757 print('image %s for %s lacks required tags; removing' % (
757 758 image.id, image.name))
758 759 remove_ami(ec2resource, image)
759 760 else:
760 761 tags = {t['Key']: t['Value'] for t in image.tags}
761 762
762 763 if tags.get('HGIMAGEFINGERPRINT') == fingerprint:
763 764 existing_image = image
764 765 else:
765 766 print('image %s for %s has wrong fingerprint; removing' % (
766 767 image.id, image.name))
767 768 remove_ami(ec2resource, image)
768 769
769 770 return existing_image
770 771
771 772
772 773 def create_ami_from_instance(ec2client, instance, name, description,
773 774 fingerprint):
774 775 """Create an AMI from a running instance.
775 776
776 777 Returns the ``ec2resource.Image`` representing the created AMI.
777 778 """
778 779 instance.stop()
779 780
780 781 ec2client.get_waiter('instance_stopped').wait(
781 782 InstanceIds=[instance.id],
782 783 WaiterConfig={
783 784 'Delay': 5,
784 785 })
785 786 print('%s is stopped' % instance.id)
786 787
787 788 image = instance.create_image(
788 789 Name=name,
789 790 Description=description,
790 791 )
791 792
792 793 image.create_tags(Tags=[
793 794 {
794 795 'Key': 'HGIMAGEFINGERPRINT',
795 796 'Value': fingerprint,
796 797 },
797 798 ])
798 799
799 800 print('waiting for image %s' % image.id)
800 801
801 802 ec2client.get_waiter('image_available').wait(
802 803 ImageIds=[image.id],
803 804 )
804 805
805 806 print('image %s available as %s' % (image.id, image.name))
806 807
807 808 return image
808 809
809 810
810 def ensure_linux_dev_ami(c: AWSConnection, distro='debian9', prefix='hg-'):
811 def ensure_linux_dev_ami(c: AWSConnection, distro='debian10', prefix='hg-'):
811 812 """Ensures a Linux development AMI is available and up-to-date.
812 813
813 814 Returns an ``ec2.Image`` of either an existing AMI or a newly-built one.
814 815 """
815 816 ec2client = c.ec2client
816 817 ec2resource = c.ec2resource
817 818
818 819 name = '%s%s-%s' % (prefix, 'linux-dev', distro)
819 820
820 821 if distro == 'debian9':
821 822 image = find_image(
822 823 ec2resource,
823 824 DEBIAN_ACCOUNT_ID,
824 825 'debian-stretch-hvm-x86_64-gp2-2019-09-08-17994',
825 826 )
826 827 ssh_username = 'admin'
828 elif distro == 'debian10':
829 image = find_image(
830 ec2resource,
831 DEBIAN_ACCOUNT_ID_2,
832 'debian-10-amd64-20190909-10',
833 )
834 ssh_username = 'admin'
827 835 elif distro == 'ubuntu18.04':
828 836 image = find_image(
829 837 ec2resource,
830 838 UBUNTU_ACCOUNT_ID,
831 839 'ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-20190918',
832 840 )
833 841 ssh_username = 'ubuntu'
834 842 elif distro == 'ubuntu19.04':
835 843 image = find_image(
836 844 ec2resource,
837 845 UBUNTU_ACCOUNT_ID,
838 846 'ubuntu/images/hvm-ssd/ubuntu-disco-19.04-amd64-server-20190918',
839 847 )
840 848 ssh_username = 'ubuntu'
841 849 else:
842 850 raise ValueError('unsupported Linux distro: %s' % distro)
843 851
844 852 config = {
845 853 'BlockDeviceMappings': [
846 854 {
847 855 'DeviceName': image.block_device_mappings[0]['DeviceName'],
848 856 'Ebs': {
849 857 'DeleteOnTermination': True,
850 858 'VolumeSize': 10,
851 859 'VolumeType': 'gp2',
852 860 },
853 861 },
854 862 ],
855 863 'EbsOptimized': True,
856 864 'ImageId': image.id,
857 865 'InstanceInitiatedShutdownBehavior': 'stop',
858 866 # 8 VCPUs for compiling Python.
859 867 'InstanceType': 't3.2xlarge',
860 868 'KeyName': '%sautomation' % prefix,
861 869 'MaxCount': 1,
862 870 'MinCount': 1,
863 871 'SecurityGroupIds': [c.security_groups['linux-dev-1'].id],
864 872 }
865 873
866 874 requirements2_path = (pathlib.Path(__file__).parent.parent /
867 875 'linux-requirements-py2.txt')
868 876 requirements3_path = (pathlib.Path(__file__).parent.parent /
869 877 'linux-requirements-py3.txt')
870 878 with requirements2_path.open('r', encoding='utf-8') as fh:
871 879 requirements2 = fh.read()
872 880 with requirements3_path.open('r', encoding='utf-8') as fh:
873 881 requirements3 = fh.read()
874 882
875 883 # Compute a deterministic fingerprint to determine whether image needs to
876 884 # be regenerated.
877 885 fingerprint = resolve_fingerprint({
878 886 'instance_config': config,
879 887 'bootstrap_script': BOOTSTRAP_DEBIAN,
880 888 'requirements_py2': requirements2,
881 889 'requirements_py3': requirements3,
882 890 })
883 891
884 892 existing_image = find_and_reconcile_image(ec2resource, name, fingerprint)
885 893
886 894 if existing_image:
887 895 return existing_image
888 896
889 897 print('no suitable %s image found; creating one...' % name)
890 898
891 899 with temporary_ec2_instances(ec2resource, config) as instances:
892 900 wait_for_ip_addresses(instances)
893 901
894 902 instance = instances[0]
895 903
896 904 client = wait_for_ssh(
897 905 instance.public_ip_address, 22,
898 906 username=ssh_username,
899 907 key_filename=str(c.key_pair_path_private('automation')))
900 908
901 909 home = '/home/%s' % ssh_username
902 910
903 911 with client:
904 912 print('connecting to SSH server')
905 913 sftp = client.open_sftp()
906 914
907 915 print('uploading bootstrap files')
908 916 with sftp.open('%s/bootstrap' % home, 'wb') as fh:
909 917 fh.write(BOOTSTRAP_DEBIAN)
910 918 fh.chmod(0o0700)
911 919
912 920 with sftp.open('%s/requirements-py2.txt' % home, 'wb') as fh:
913 921 fh.write(requirements2)
914 922 fh.chmod(0o0700)
915 923
916 924 with sftp.open('%s/requirements-py3.txt' % home, 'wb') as fh:
917 925 fh.write(requirements3)
918 926 fh.chmod(0o0700)
919 927
920 928 print('executing bootstrap')
921 929 chan, stdin, stdout = ssh_exec_command(client,
922 930 '%s/bootstrap' % home)
923 931 stdin.close()
924 932
925 933 for line in stdout:
926 934 print(line, end='')
927 935
928 936 res = chan.recv_exit_status()
929 937 if res:
930 938 raise Exception('non-0 exit from bootstrap: %d' % res)
931 939
932 940 print('bootstrap completed; stopping %s to create %s' % (
933 941 instance.id, name))
934 942
935 943 return create_ami_from_instance(ec2client, instance, name,
936 944 'Mercurial Linux development environment',
937 945 fingerprint)
938 946
939 947
940 948 @contextlib.contextmanager
941 949 def temporary_linux_dev_instances(c: AWSConnection, image, instance_type,
942 950 prefix='hg-', ensure_extra_volume=False):
943 951 """Create temporary Linux development EC2 instances.
944 952
945 953 Context manager resolves to a list of ``ec2.Instance`` that were created
946 954 and are running.
947 955
948 956 ``ensure_extra_volume`` can be set to ``True`` to require that instances
949 957 have a 2nd storage volume available other than the primary AMI volume.
950 958 For instance types with instance storage, this does nothing special.
951 959 But for instance types without instance storage, an additional EBS volume
952 960 will be added to the instance.
953 961
954 962 Instances have an ``ssh_client`` attribute containing a paramiko SSHClient
955 963 instance bound to the instance.
956 964
957 965 Instances have an ``ssh_private_key_path`` attributing containing the
958 966 str path to the SSH private key to connect to the instance.
959 967 """
960 968
961 969 block_device_mappings = [
962 970 {
963 971 'DeviceName': image.block_device_mappings[0]['DeviceName'],
964 972 'Ebs': {
965 973 'DeleteOnTermination': True,
966 974 'VolumeSize': 12,
967 975 'VolumeType': 'gp2',
968 976 },
969 977 }
970 978 ]
971 979
972 980 # This is not an exhaustive list of instance types having instance storage.
973 981 # But
974 982 if (ensure_extra_volume
975 983 and not instance_type.startswith(tuple(INSTANCE_TYPES_WITH_STORAGE))):
976 984 main_device = block_device_mappings[0]['DeviceName']
977 985
978 986 if main_device == 'xvda':
979 987 second_device = 'xvdb'
980 988 elif main_device == '/dev/sda1':
981 989 second_device = '/dev/sdb'
982 990 else:
983 991 raise ValueError('unhandled primary EBS device name: %s' %
984 992 main_device)
985 993
986 994 block_device_mappings.append({
987 995 'DeviceName': second_device,
988 996 'Ebs': {
989 997 'DeleteOnTermination': True,
990 998 'VolumeSize': 8,
991 999 'VolumeType': 'gp2',
992 1000 }
993 1001 })
994 1002
995 1003 config = {
996 1004 'BlockDeviceMappings': block_device_mappings,
997 1005 'EbsOptimized': True,
998 1006 'ImageId': image.id,
999 1007 'InstanceInitiatedShutdownBehavior': 'terminate',
1000 1008 'InstanceType': instance_type,
1001 1009 'KeyName': '%sautomation' % prefix,
1002 1010 'MaxCount': 1,
1003 1011 'MinCount': 1,
1004 1012 'SecurityGroupIds': [c.security_groups['linux-dev-1'].id],
1005 1013 }
1006 1014
1007 1015 with temporary_ec2_instances(c.ec2resource, config) as instances:
1008 1016 wait_for_ip_addresses(instances)
1009 1017
1010 1018 ssh_private_key_path = str(c.key_pair_path_private('automation'))
1011 1019
1012 1020 for instance in instances:
1013 1021 client = wait_for_ssh(
1014 1022 instance.public_ip_address, 22,
1015 1023 username='hg',
1016 1024 key_filename=ssh_private_key_path)
1017 1025
1018 1026 instance.ssh_client = client
1019 1027 instance.ssh_private_key_path = ssh_private_key_path
1020 1028
1021 1029 try:
1022 1030 yield instances
1023 1031 finally:
1024 1032 for instance in instances:
1025 1033 instance.ssh_client.close()
1026 1034
1027 1035
1028 1036 def ensure_windows_dev_ami(c: AWSConnection, prefix='hg-',
1029 1037 base_image_name=WINDOWS_BASE_IMAGE_NAME):
1030 1038 """Ensure Windows Development AMI is available and up-to-date.
1031 1039
1032 1040 If necessary, a modern AMI will be built by starting a temporary EC2
1033 1041 instance and bootstrapping it.
1034 1042
1035 1043 Obsolete AMIs will be deleted so there is only a single AMI having the
1036 1044 desired name.
1037 1045
1038 1046 Returns an ``ec2.Image`` of either an existing AMI or a newly-built
1039 1047 one.
1040 1048 """
1041 1049 ec2client = c.ec2client
1042 1050 ec2resource = c.ec2resource
1043 1051 ssmclient = c.session.client('ssm')
1044 1052
1045 1053 name = '%s%s' % (prefix, 'windows-dev')
1046 1054
1047 1055 image = find_image(ec2resource, AMAZON_ACCOUNT_ID, base_image_name)
1048 1056
1049 1057 config = {
1050 1058 'BlockDeviceMappings': [
1051 1059 {
1052 1060 'DeviceName': '/dev/sda1',
1053 1061 'Ebs': {
1054 1062 'DeleteOnTermination': True,
1055 1063 'VolumeSize': 32,
1056 1064 'VolumeType': 'gp2',
1057 1065 },
1058 1066 }
1059 1067 ],
1060 1068 'ImageId': image.id,
1061 1069 'InstanceInitiatedShutdownBehavior': 'stop',
1062 1070 'InstanceType': 't3.medium',
1063 1071 'KeyName': '%sautomation' % prefix,
1064 1072 'MaxCount': 1,
1065 1073 'MinCount': 1,
1066 1074 'SecurityGroupIds': [c.security_groups['windows-dev-1'].id],
1067 1075 }
1068 1076
1069 1077 commands = [
1070 1078 # Need to start the service so sshd_config is generated.
1071 1079 'Start-Service sshd',
1072 1080 'Write-Output "modifying sshd_config"',
1073 1081 r'$content = Get-Content C:\ProgramData\ssh\sshd_config',
1074 1082 '$content = $content -replace "Match Group administrators","" -replace "AuthorizedKeysFile __PROGRAMDATA__/ssh/administrators_authorized_keys",""',
1075 1083 r'$content | Set-Content C:\ProgramData\ssh\sshd_config',
1076 1084 'Import-Module OpenSSHUtils',
1077 1085 r'Repair-SshdConfigPermission C:\ProgramData\ssh\sshd_config -Confirm:$false',
1078 1086 'Restart-Service sshd',
1079 1087 'Write-Output "installing OpenSSL client"',
1080 1088 'Add-WindowsCapability -Online -Name OpenSSH.Client~~~~0.0.1.0',
1081 1089 'Set-Service -Name sshd -StartupType "Automatic"',
1082 1090 'Write-Output "OpenSSH server running"',
1083 1091 ]
1084 1092
1085 1093 with INSTALL_WINDOWS_DEPENDENCIES.open('r', encoding='utf-8') as fh:
1086 1094 commands.extend(l.rstrip() for l in fh)
1087 1095
1088 1096 # Disable Windows Defender when bootstrapping because it just slows
1089 1097 # things down.
1090 1098 commands.insert(0, 'Set-MpPreference -DisableRealtimeMonitoring $true')
1091 1099 commands.append('Set-MpPreference -DisableRealtimeMonitoring $false')
1092 1100
1093 1101 # Compute a deterministic fingerprint to determine whether image needs
1094 1102 # to be regenerated.
1095 1103 fingerprint = resolve_fingerprint({
1096 1104 'instance_config': config,
1097 1105 'user_data': WINDOWS_USER_DATA,
1098 1106 'initial_bootstrap': WINDOWS_BOOTSTRAP_POWERSHELL,
1099 1107 'bootstrap_commands': commands,
1100 1108 'base_image_name': base_image_name,
1101 1109 })
1102 1110
1103 1111 existing_image = find_and_reconcile_image(ec2resource, name, fingerprint)
1104 1112
1105 1113 if existing_image:
1106 1114 return existing_image
1107 1115
1108 1116 print('no suitable Windows development image found; creating one...')
1109 1117
1110 1118 with create_temp_windows_ec2_instances(c, config) as instances:
1111 1119 assert len(instances) == 1
1112 1120 instance = instances[0]
1113 1121
1114 1122 wait_for_ssm(ssmclient, [instance])
1115 1123
1116 1124 # On first boot, install various Windows updates.
1117 1125 # We would ideally use PowerShell Remoting for this. However, there are
1118 1126 # trust issues that make it difficult to invoke Windows Update
1119 1127 # remotely. So we use SSM, which has a mechanism for running Windows
1120 1128 # Update.
1121 1129 print('installing Windows features...')
1122 1130 run_ssm_command(
1123 1131 ssmclient,
1124 1132 [instance],
1125 1133 'AWS-RunPowerShellScript',
1126 1134 {
1127 1135 'commands': WINDOWS_BOOTSTRAP_POWERSHELL.split('\n'),
1128 1136 },
1129 1137 )
1130 1138
1131 1139 # Reboot so all updates are fully applied.
1132 1140 #
1133 1141 # We don't use instance.reboot() here because it is asynchronous and
1134 1142 # we don't know when exactly the instance has rebooted. It could take
1135 1143 # a while to stop and we may start trying to interact with the instance
1136 1144 # before it has rebooted.
1137 1145 print('rebooting instance %s' % instance.id)
1138 1146 instance.stop()
1139 1147 ec2client.get_waiter('instance_stopped').wait(
1140 1148 InstanceIds=[instance.id],
1141 1149 WaiterConfig={
1142 1150 'Delay': 5,
1143 1151 })
1144 1152
1145 1153 instance.start()
1146 1154 wait_for_ip_addresses([instance])
1147 1155
1148 1156 # There is a race condition here between the User Data PS script running
1149 1157 # and us connecting to WinRM. This can manifest as
1150 1158 # "AuthorizationManager check failed" failures during run_powershell().
1151 1159 # TODO figure out a workaround.
1152 1160
1153 1161 print('waiting for Windows Remote Management to come back...')
1154 1162 client = wait_for_winrm(instance.public_ip_address, 'Administrator',
1155 1163 c.automation.default_password())
1156 1164 print('established WinRM connection to %s' % instance.id)
1157 1165 instance.winrm_client = client
1158 1166
1159 1167 print('bootstrapping instance...')
1160 1168 run_powershell(instance.winrm_client, '\n'.join(commands))
1161 1169
1162 1170 print('bootstrap completed; stopping %s to create image' % instance.id)
1163 1171 return create_ami_from_instance(ec2client, instance, name,
1164 1172 'Mercurial Windows development environment',
1165 1173 fingerprint)
1166 1174
1167 1175
1168 1176 @contextlib.contextmanager
1169 1177 def temporary_windows_dev_instances(c: AWSConnection, image, instance_type,
1170 1178 prefix='hg-', disable_antivirus=False):
1171 1179 """Create a temporary Windows development EC2 instance.
1172 1180
1173 1181 Context manager resolves to the list of ``EC2.Instance`` that were created.
1174 1182 """
1175 1183 config = {
1176 1184 'BlockDeviceMappings': [
1177 1185 {
1178 1186 'DeviceName': '/dev/sda1',
1179 1187 'Ebs': {
1180 1188 'DeleteOnTermination': True,
1181 1189 'VolumeSize': 32,
1182 1190 'VolumeType': 'gp2',
1183 1191 },
1184 1192 }
1185 1193 ],
1186 1194 'ImageId': image.id,
1187 1195 'InstanceInitiatedShutdownBehavior': 'stop',
1188 1196 'InstanceType': instance_type,
1189 1197 'KeyName': '%sautomation' % prefix,
1190 1198 'MaxCount': 1,
1191 1199 'MinCount': 1,
1192 1200 'SecurityGroupIds': [c.security_groups['windows-dev-1'].id],
1193 1201 }
1194 1202
1195 1203 with create_temp_windows_ec2_instances(c, config) as instances:
1196 1204 if disable_antivirus:
1197 1205 for instance in instances:
1198 1206 run_powershell(
1199 1207 instance.winrm_client,
1200 1208 'Set-MpPreference -DisableRealtimeMonitoring $true')
1201 1209
1202 1210 yield instances
@@ -1,460 +1,460 b''
1 1 # cli.py - Command line interface for automation
2 2 #
3 3 # Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 # no-check-code because Python 3 native.
9 9
10 10 import argparse
11 11 import concurrent.futures as futures
12 12 import os
13 13 import pathlib
14 14 import time
15 15
16 16 from . import (
17 17 aws,
18 18 HGAutomation,
19 19 linux,
20 20 windows,
21 21 )
22 22
23 23
24 24 SOURCE_ROOT = pathlib.Path(os.path.abspath(__file__)).parent.parent.parent.parent
25 25 DIST_PATH = SOURCE_ROOT / 'dist'
26 26
27 27
28 28 def bootstrap_linux_dev(hga: HGAutomation, aws_region, distros=None,
29 29 parallel=False):
30 30 c = hga.aws_connection(aws_region)
31 31
32 32 if distros:
33 33 distros = distros.split(',')
34 34 else:
35 35 distros = sorted(linux.DISTROS)
36 36
37 37 # TODO There is a wonky interaction involving KeyboardInterrupt whereby
38 38 # the context manager that is supposed to terminate the temporary EC2
39 39 # instance doesn't run. Until we fix this, make parallel building opt-in
40 40 # so we don't orphan instances.
41 41 if parallel:
42 42 fs = []
43 43
44 44 with futures.ThreadPoolExecutor(len(distros)) as e:
45 45 for distro in distros:
46 46 fs.append(e.submit(aws.ensure_linux_dev_ami, c, distro=distro))
47 47
48 48 for f in fs:
49 49 f.result()
50 50 else:
51 51 for distro in distros:
52 52 aws.ensure_linux_dev_ami(c, distro=distro)
53 53
54 54
55 55 def bootstrap_windows_dev(hga: HGAutomation, aws_region, base_image_name):
56 56 c = hga.aws_connection(aws_region)
57 57 image = aws.ensure_windows_dev_ami(c, base_image_name=base_image_name)
58 58 print('Windows development AMI available as %s' % image.id)
59 59
60 60
61 61 def build_inno(hga: HGAutomation, aws_region, arch, revision, version,
62 62 base_image_name):
63 63 c = hga.aws_connection(aws_region)
64 64 image = aws.ensure_windows_dev_ami(c, base_image_name=base_image_name)
65 65 DIST_PATH.mkdir(exist_ok=True)
66 66
67 67 with aws.temporary_windows_dev_instances(c, image, 't3.medium') as insts:
68 68 instance = insts[0]
69 69
70 70 windows.synchronize_hg(SOURCE_ROOT, revision, instance)
71 71
72 72 for a in arch:
73 73 windows.build_inno_installer(instance.winrm_client, a,
74 74 DIST_PATH,
75 75 version=version)
76 76
77 77
78 78 def build_wix(hga: HGAutomation, aws_region, arch, revision, version,
79 79 base_image_name):
80 80 c = hga.aws_connection(aws_region)
81 81 image = aws.ensure_windows_dev_ami(c, base_image_name=base_image_name)
82 82 DIST_PATH.mkdir(exist_ok=True)
83 83
84 84 with aws.temporary_windows_dev_instances(c, image, 't3.medium') as insts:
85 85 instance = insts[0]
86 86
87 87 windows.synchronize_hg(SOURCE_ROOT, revision, instance)
88 88
89 89 for a in arch:
90 90 windows.build_wix_installer(instance.winrm_client, a,
91 91 DIST_PATH, version=version)
92 92
93 93
94 94 def build_windows_wheel(hga: HGAutomation, aws_region, arch, revision,
95 95 base_image_name):
96 96 c = hga.aws_connection(aws_region)
97 97 image = aws.ensure_windows_dev_ami(c, base_image_name=base_image_name)
98 98 DIST_PATH.mkdir(exist_ok=True)
99 99
100 100 with aws.temporary_windows_dev_instances(c, image, 't3.medium') as insts:
101 101 instance = insts[0]
102 102
103 103 windows.synchronize_hg(SOURCE_ROOT, revision, instance)
104 104
105 105 for a in arch:
106 106 windows.build_wheel(instance.winrm_client, a, DIST_PATH)
107 107
108 108
109 109 def build_all_windows_packages(hga: HGAutomation, aws_region, revision,
110 110 version, base_image_name):
111 111 c = hga.aws_connection(aws_region)
112 112 image = aws.ensure_windows_dev_ami(c, base_image_name=base_image_name)
113 113 DIST_PATH.mkdir(exist_ok=True)
114 114
115 115 with aws.temporary_windows_dev_instances(c, image, 't3.medium') as insts:
116 116 instance = insts[0]
117 117
118 118 winrm_client = instance.winrm_client
119 119
120 120 windows.synchronize_hg(SOURCE_ROOT, revision, instance)
121 121
122 122 for arch in ('x86', 'x64'):
123 123 windows.purge_hg(winrm_client)
124 124 windows.build_wheel(winrm_client, arch, DIST_PATH)
125 125 windows.purge_hg(winrm_client)
126 126 windows.build_inno_installer(winrm_client, arch, DIST_PATH,
127 127 version=version)
128 128 windows.purge_hg(winrm_client)
129 129 windows.build_wix_installer(winrm_client, arch, DIST_PATH,
130 130 version=version)
131 131
132 132
133 133 def terminate_ec2_instances(hga: HGAutomation, aws_region):
134 134 c = hga.aws_connection(aws_region, ensure_ec2_state=False)
135 135 aws.terminate_ec2_instances(c.ec2resource)
136 136
137 137
138 138 def purge_ec2_resources(hga: HGAutomation, aws_region):
139 139 c = hga.aws_connection(aws_region, ensure_ec2_state=False)
140 140 aws.remove_resources(c)
141 141
142 142
143 143 def run_tests_linux(hga: HGAutomation, aws_region, instance_type,
144 144 python_version, test_flags, distro, filesystem):
145 145 c = hga.aws_connection(aws_region)
146 146 image = aws.ensure_linux_dev_ami(c, distro=distro)
147 147
148 148 t_start = time.time()
149 149
150 150 ensure_extra_volume = filesystem not in ('default', 'tmpfs')
151 151
152 152 with aws.temporary_linux_dev_instances(
153 153 c, image, instance_type,
154 154 ensure_extra_volume=ensure_extra_volume) as insts:
155 155
156 156 instance = insts[0]
157 157
158 158 linux.prepare_exec_environment(instance.ssh_client,
159 159 filesystem=filesystem)
160 160 linux.synchronize_hg(SOURCE_ROOT, instance, '.')
161 161 t_prepared = time.time()
162 162 linux.run_tests(instance.ssh_client, python_version,
163 163 test_flags)
164 164 t_done = time.time()
165 165
166 166 t_setup = t_prepared - t_start
167 167 t_all = t_done - t_start
168 168
169 169 print(
170 170 'total time: %.1fs; setup: %.1fs; tests: %.1fs; setup overhead: %.1f%%'
171 171 % (t_all, t_setup, t_done - t_prepared, t_setup / t_all * 100.0))
172 172
173 173
174 174 def run_tests_windows(hga: HGAutomation, aws_region, instance_type,
175 175 python_version, arch, test_flags, base_image_name):
176 176 c = hga.aws_connection(aws_region)
177 177 image = aws.ensure_windows_dev_ami(c, base_image_name=base_image_name)
178 178
179 179 with aws.temporary_windows_dev_instances(c, image, instance_type,
180 180 disable_antivirus=True) as insts:
181 181 instance = insts[0]
182 182
183 183 windows.synchronize_hg(SOURCE_ROOT, '.', instance)
184 184 windows.run_tests(instance.winrm_client, python_version, arch,
185 185 test_flags)
186 186
187 187
188 188 def publish_windows_artifacts(hg: HGAutomation, aws_region, version: str,
189 189 pypi: bool, mercurial_scm_org: bool,
190 190 ssh_username: str):
191 191 windows.publish_artifacts(DIST_PATH, version,
192 192 pypi=pypi, mercurial_scm_org=mercurial_scm_org,
193 193 ssh_username=ssh_username)
194 194
195 195
196 196 def get_parser():
197 197 parser = argparse.ArgumentParser()
198 198
199 199 parser.add_argument(
200 200 '--state-path',
201 201 default='~/.hgautomation',
202 202 help='Path for local state files',
203 203 )
204 204 parser.add_argument(
205 205 '--aws-region',
206 206 help='AWS region to use',
207 207 default='us-west-1',
208 208 )
209 209
210 210 subparsers = parser.add_subparsers()
211 211
212 212 sp = subparsers.add_parser(
213 213 'bootstrap-linux-dev',
214 214 help='Bootstrap Linux development environments',
215 215 )
216 216 sp.add_argument(
217 217 '--distros',
218 218 help='Comma delimited list of distros to bootstrap',
219 219 )
220 220 sp.add_argument(
221 221 '--parallel',
222 222 action='store_true',
223 223 help='Generate AMIs in parallel (not CTRL-c safe)'
224 224 )
225 225 sp.set_defaults(func=bootstrap_linux_dev)
226 226
227 227 sp = subparsers.add_parser(
228 228 'bootstrap-windows-dev',
229 229 help='Bootstrap the Windows development environment',
230 230 )
231 231 sp.add_argument(
232 232 '--base-image-name',
233 233 help='AMI name of base image',
234 234 default=aws.WINDOWS_BASE_IMAGE_NAME,
235 235 )
236 236 sp.set_defaults(func=bootstrap_windows_dev)
237 237
238 238 sp = subparsers.add_parser(
239 239 'build-all-windows-packages',
240 240 help='Build all Windows packages',
241 241 )
242 242 sp.add_argument(
243 243 '--revision',
244 244 help='Mercurial revision to build',
245 245 default='.',
246 246 )
247 247 sp.add_argument(
248 248 '--version',
249 249 help='Mercurial version string to use',
250 250 )
251 251 sp.add_argument(
252 252 '--base-image-name',
253 253 help='AMI name of base image',
254 254 default=aws.WINDOWS_BASE_IMAGE_NAME,
255 255 )
256 256 sp.set_defaults(func=build_all_windows_packages)
257 257
258 258 sp = subparsers.add_parser(
259 259 'build-inno',
260 260 help='Build Inno Setup installer(s)',
261 261 )
262 262 sp.add_argument(
263 263 '--arch',
264 264 help='Architecture to build for',
265 265 choices={'x86', 'x64'},
266 266 nargs='*',
267 267 default=['x64'],
268 268 )
269 269 sp.add_argument(
270 270 '--revision',
271 271 help='Mercurial revision to build',
272 272 default='.',
273 273 )
274 274 sp.add_argument(
275 275 '--version',
276 276 help='Mercurial version string to use in installer',
277 277 )
278 278 sp.add_argument(
279 279 '--base-image-name',
280 280 help='AMI name of base image',
281 281 default=aws.WINDOWS_BASE_IMAGE_NAME,
282 282 )
283 283 sp.set_defaults(func=build_inno)
284 284
285 285 sp = subparsers.add_parser(
286 286 'build-windows-wheel',
287 287 help='Build Windows wheel(s)',
288 288 )
289 289 sp.add_argument(
290 290 '--arch',
291 291 help='Architecture to build for',
292 292 choices={'x86', 'x64'},
293 293 nargs='*',
294 294 default=['x64'],
295 295 )
296 296 sp.add_argument(
297 297 '--revision',
298 298 help='Mercurial revision to build',
299 299 default='.',
300 300 )
301 301 sp.add_argument(
302 302 '--base-image-name',
303 303 help='AMI name of base image',
304 304 default=aws.WINDOWS_BASE_IMAGE_NAME,
305 305 )
306 306 sp.set_defaults(func=build_windows_wheel)
307 307
308 308 sp = subparsers.add_parser(
309 309 'build-wix',
310 310 help='Build WiX installer(s)'
311 311 )
312 312 sp.add_argument(
313 313 '--arch',
314 314 help='Architecture to build for',
315 315 choices={'x86', 'x64'},
316 316 nargs='*',
317 317 default=['x64'],
318 318 )
319 319 sp.add_argument(
320 320 '--revision',
321 321 help='Mercurial revision to build',
322 322 default='.',
323 323 )
324 324 sp.add_argument(
325 325 '--version',
326 326 help='Mercurial version string to use in installer',
327 327 )
328 328 sp.add_argument(
329 329 '--base-image-name',
330 330 help='AMI name of base image',
331 331 default=aws.WINDOWS_BASE_IMAGE_NAME,
332 332 )
333 333 sp.set_defaults(func=build_wix)
334 334
335 335 sp = subparsers.add_parser(
336 336 'terminate-ec2-instances',
337 337 help='Terminate all active EC2 instances managed by us',
338 338 )
339 339 sp.set_defaults(func=terminate_ec2_instances)
340 340
341 341 sp = subparsers.add_parser(
342 342 'purge-ec2-resources',
343 343 help='Purge all EC2 resources managed by us',
344 344 )
345 345 sp.set_defaults(func=purge_ec2_resources)
346 346
347 347 sp = subparsers.add_parser(
348 348 'run-tests-linux',
349 349 help='Run tests on Linux',
350 350 )
351 351 sp.add_argument(
352 352 '--distro',
353 353 help='Linux distribution to run tests on',
354 354 choices=linux.DISTROS,
355 default='debian9',
355 default='debian10',
356 356 )
357 357 sp.add_argument(
358 358 '--filesystem',
359 359 help='Filesystem type to use',
360 360 choices={'btrfs', 'default', 'ext3', 'ext4', 'jfs', 'tmpfs', 'xfs'},
361 361 default='default',
362 362 )
363 363 sp.add_argument(
364 364 '--instance-type',
365 365 help='EC2 instance type to use',
366 366 default='c5.9xlarge',
367 367 )
368 368 sp.add_argument(
369 369 '--python-version',
370 370 help='Python version to use',
371 371 choices={'system2', 'system3', '2.7', '3.5', '3.6', '3.7', '3.8',
372 372 'pypy', 'pypy3.5', 'pypy3.6'},
373 373 default='system2',
374 374 )
375 375 sp.add_argument(
376 376 'test_flags',
377 377 help='Extra command line flags to pass to run-tests.py',
378 378 nargs='*',
379 379 )
380 380 sp.set_defaults(func=run_tests_linux)
381 381
382 382 sp = subparsers.add_parser(
383 383 'run-tests-windows',
384 384 help='Run tests on Windows',
385 385 )
386 386 sp.add_argument(
387 387 '--instance-type',
388 388 help='EC2 instance type to use',
389 389 default='t3.medium',
390 390 )
391 391 sp.add_argument(
392 392 '--python-version',
393 393 help='Python version to use',
394 394 choices={'2.7', '3.5', '3.6', '3.7', '3.8'},
395 395 default='2.7',
396 396 )
397 397 sp.add_argument(
398 398 '--arch',
399 399 help='Architecture to test',
400 400 choices={'x86', 'x64'},
401 401 default='x64',
402 402 )
403 403 sp.add_argument(
404 404 '--test-flags',
405 405 help='Extra command line flags to pass to run-tests.py',
406 406 )
407 407 sp.add_argument(
408 408 '--base-image-name',
409 409 help='AMI name of base image',
410 410 default=aws.WINDOWS_BASE_IMAGE_NAME,
411 411 )
412 412 sp.set_defaults(func=run_tests_windows)
413 413
414 414 sp = subparsers.add_parser(
415 415 'publish-windows-artifacts',
416 416 help='Publish built Windows artifacts (wheels, installers, etc)'
417 417 )
418 418 sp.add_argument(
419 419 '--no-pypi',
420 420 dest='pypi',
421 421 action='store_false',
422 422 default=True,
423 423 help='Skip uploading to PyPI',
424 424 )
425 425 sp.add_argument(
426 426 '--no-mercurial-scm-org',
427 427 dest='mercurial_scm_org',
428 428 action='store_false',
429 429 default=True,
430 430 help='Skip uploading to www.mercurial-scm.org',
431 431 )
432 432 sp.add_argument(
433 433 '--ssh-username',
434 434 help='SSH username for mercurial-scm.org',
435 435 )
436 436 sp.add_argument(
437 437 'version',
438 438 help='Mercurial version string to locate local packages',
439 439 )
440 440 sp.set_defaults(func=publish_windows_artifacts)
441 441
442 442 return parser
443 443
444 444
445 445 def main():
446 446 parser = get_parser()
447 447 args = parser.parse_args()
448 448
449 449 local_state_path = pathlib.Path(os.path.expanduser(args.state_path))
450 450 automation = HGAutomation(local_state_path)
451 451
452 452 if not hasattr(args, 'func'):
453 453 parser.print_help()
454 454 return
455 455
456 456 kwargs = dict(vars(args))
457 457 del kwargs['func']
458 458 del kwargs['state_path']
459 459
460 460 args.func(automation, **kwargs)
@@ -1,562 +1,567 b''
1 1 # linux.py - Linux specific automation functionality
2 2 #
3 3 # Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 # no-check-code because Python 3 native.
9 9
10 10 import os
11 11 import pathlib
12 12 import shlex
13 13 import subprocess
14 14 import tempfile
15 15
16 16 from .ssh import (
17 17 exec_command,
18 18 )
19 19
20 20
21 21 # Linux distributions that are supported.
22 22 DISTROS = {
23 23 'debian9',
24 'debian10',
24 25 'ubuntu18.04',
25 26 'ubuntu19.04',
26 27 }
27 28
28 29 INSTALL_PYTHONS = r'''
29 30 PYENV2_VERSIONS="2.7.16 pypy2.7-7.1.1"
30 31 PYENV3_VERSIONS="3.5.7 3.6.9 3.7.4 3.8-dev pypy3.5-7.0.0 pypy3.6-7.1.1"
31 32
32 33 git clone https://github.com/pyenv/pyenv.git /hgdev/pyenv
33 34 pushd /hgdev/pyenv
34 35 git checkout 17f44b7cd6f58ea2fa68ec0371fb9e7a826b8be2
35 36 popd
36 37
37 38 export PYENV_ROOT="/hgdev/pyenv"
38 39 export PATH="$PYENV_ROOT/bin:$PATH"
39 40
40 41 # pip 19.2.3.
41 42 PIP_SHA256=57e3643ff19f018f8a00dfaa6b7e4620e3c1a7a2171fd218425366ec006b3bfe
42 43 wget -O get-pip.py --progress dot:mega https://github.com/pypa/get-pip/raw/309a56c5fd94bd1134053a541cb4657a4e47e09d/get-pip.py
43 44 echo "${PIP_SHA256} get-pip.py" | sha256sum --check -
44 45
45 46 VIRTUALENV_SHA256=f78d81b62d3147396ac33fc9d77579ddc42cc2a98dd9ea38886f616b33bc7fb2
46 47 VIRTUALENV_TARBALL=virtualenv-16.7.5.tar.gz
47 48 wget -O ${VIRTUALENV_TARBALL} --progress dot:mega https://files.pythonhosted.org/packages/66/f0/6867af06d2e2f511e4e1d7094ff663acdebc4f15d4a0cb0fed1007395124/${VIRTUALENV_TARBALL}
48 49 echo "${VIRTUALENV_SHA256} ${VIRTUALENV_TARBALL}" | sha256sum --check -
49 50
50 51 for v in ${PYENV2_VERSIONS}; do
51 52 pyenv install -v ${v}
52 53 ${PYENV_ROOT}/versions/${v}/bin/python get-pip.py
53 54 ${PYENV_ROOT}/versions/${v}/bin/pip install ${VIRTUALENV_TARBALL}
54 55 ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/requirements-py2.txt
55 56 done
56 57
57 58 for v in ${PYENV3_VERSIONS}; do
58 59 pyenv install -v ${v}
59 60 ${PYENV_ROOT}/versions/${v}/bin/python get-pip.py
60 61 ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/requirements-py3.txt
61 62 done
62 63
63 64 pyenv global ${PYENV2_VERSIONS} ${PYENV3_VERSIONS} system
64 65 '''.lstrip().replace('\r\n', '\n')
65 66
66 67
67 68 INSTALL_RUST = r'''
68 69 RUSTUP_INIT_SHA256=a46fe67199b7bcbbde2dcbc23ae08db6f29883e260e23899a88b9073effc9076
69 70 wget -O rustup-init --progress dot:mega https://static.rust-lang.org/rustup/archive/1.18.3/x86_64-unknown-linux-gnu/rustup-init
70 71 echo "${RUSTUP_INIT_SHA256} rustup-init" | sha256sum --check -
71 72
72 73 chmod +x rustup-init
73 74 sudo -H -u hg -g hg ./rustup-init -y
74 75 sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup install 1.31.1 1.34.2
75 76 sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup component add clippy
76 77 '''
77 78
78 79
79 80 BOOTSTRAP_VIRTUALENV = r'''
80 81 /usr/bin/virtualenv /hgdev/venv-bootstrap
81 82
82 83 HG_SHA256=35fc8ba5e0379c1b3affa2757e83fb0509e8ac314cbd9f1fd133cf265d16e49f
83 84 HG_TARBALL=mercurial-5.1.1.tar.gz
84 85
85 86 wget -O ${HG_TARBALL} --progress dot:mega https://www.mercurial-scm.org/release/${HG_TARBALL}
86 87 echo "${HG_SHA256} ${HG_TARBALL}" | sha256sum --check -
87 88
88 89 /hgdev/venv-bootstrap/bin/pip install ${HG_TARBALL}
89 90 '''.lstrip().replace('\r\n', '\n')
90 91
91 92
92 93 BOOTSTRAP_DEBIAN = r'''
93 94 #!/bin/bash
94 95
95 96 set -ex
96 97
97 98 DISTRO=`grep DISTRIB_ID /etc/lsb-release | awk -F= '{{print $2}}'`
98 99 DEBIAN_VERSION=`cat /etc/debian_version`
99 100 LSB_RELEASE=`lsb_release -cs`
100 101
101 102 sudo /usr/sbin/groupadd hg
102 103 sudo /usr/sbin/groupadd docker
103 104 sudo /usr/sbin/useradd -g hg -G sudo,docker -d /home/hg -m -s /bin/bash hg
104 105 sudo mkdir /home/hg/.ssh
105 106 sudo cp ~/.ssh/authorized_keys /home/hg/.ssh/authorized_keys
106 107 sudo chown -R hg:hg /home/hg/.ssh
107 108 sudo chmod 700 /home/hg/.ssh
108 109 sudo chmod 600 /home/hg/.ssh/authorized_keys
109 110
110 111 cat << EOF | sudo tee /etc/sudoers.d/90-hg
111 112 hg ALL=(ALL) NOPASSWD:ALL
112 113 EOF
113 114
114 115 sudo apt-get update
115 116 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq dist-upgrade
116 117
117 118 # Install packages necessary to set up Docker Apt repo.
118 119 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install --no-install-recommends \
119 120 apt-transport-https \
120 121 gnupg
121 122
122 123 cat > docker-apt-key << EOF
123 124 -----BEGIN PGP PUBLIC KEY BLOCK-----
124 125
125 126 mQINBFit2ioBEADhWpZ8/wvZ6hUTiXOwQHXMAlaFHcPH9hAtr4F1y2+OYdbtMuth
126 127 lqqwp028AqyY+PRfVMtSYMbjuQuu5byyKR01BbqYhuS3jtqQmljZ/bJvXqnmiVXh
127 128 38UuLa+z077PxyxQhu5BbqntTPQMfiyqEiU+BKbq2WmANUKQf+1AmZY/IruOXbnq
128 129 L4C1+gJ8vfmXQt99npCaxEjaNRVYfOS8QcixNzHUYnb6emjlANyEVlZzeqo7XKl7
129 130 UrwV5inawTSzWNvtjEjj4nJL8NsLwscpLPQUhTQ+7BbQXAwAmeHCUTQIvvWXqw0N
130 131 cmhh4HgeQscQHYgOJjjDVfoY5MucvglbIgCqfzAHW9jxmRL4qbMZj+b1XoePEtht
131 132 ku4bIQN1X5P07fNWzlgaRL5Z4POXDDZTlIQ/El58j9kp4bnWRCJW0lya+f8ocodo
132 133 vZZ+Doi+fy4D5ZGrL4XEcIQP/Lv5uFyf+kQtl/94VFYVJOleAv8W92KdgDkhTcTD
133 134 G7c0tIkVEKNUq48b3aQ64NOZQW7fVjfoKwEZdOqPE72Pa45jrZzvUFxSpdiNk2tZ
134 135 XYukHjlxxEgBdC/J3cMMNRE1F4NCA3ApfV1Y7/hTeOnmDuDYwr9/obA8t016Yljj
135 136 q5rdkywPf4JF8mXUW5eCN1vAFHxeg9ZWemhBtQmGxXnw9M+z6hWwc6ahmwARAQAB
136 137 tCtEb2NrZXIgUmVsZWFzZSAoQ0UgZGViKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3
137 138 BBMBCgAhBQJYrefAAhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEI2BgDwO
138 139 v82IsskP/iQZo68flDQmNvn8X5XTd6RRaUH33kXYXquT6NkHJciS7E2gTJmqvMqd
139 140 tI4mNYHCSEYxI5qrcYV5YqX9P6+Ko+vozo4nseUQLPH/ATQ4qL0Zok+1jkag3Lgk
140 141 jonyUf9bwtWxFp05HC3GMHPhhcUSexCxQLQvnFWXD2sWLKivHp2fT8QbRGeZ+d3m
141 142 6fqcd5Fu7pxsqm0EUDK5NL+nPIgYhN+auTrhgzhK1CShfGccM/wfRlei9Utz6p9P
142 143 XRKIlWnXtT4qNGZNTN0tR+NLG/6Bqd8OYBaFAUcue/w1VW6JQ2VGYZHnZu9S8LMc
143 144 FYBa5Ig9PxwGQOgq6RDKDbV+PqTQT5EFMeR1mrjckk4DQJjbxeMZbiNMG5kGECA8
144 145 g383P3elhn03WGbEEa4MNc3Z4+7c236QI3xWJfNPdUbXRaAwhy/6rTSFbzwKB0Jm
145 146 ebwzQfwjQY6f55MiI/RqDCyuPj3r3jyVRkK86pQKBAJwFHyqj9KaKXMZjfVnowLh
146 147 9svIGfNbGHpucATqREvUHuQbNnqkCx8VVhtYkhDb9fEP2xBu5VvHbR+3nfVhMut5
147 148 G34Ct5RS7Jt6LIfFdtcn8CaSas/l1HbiGeRgc70X/9aYx/V/CEJv0lIe8gP6uDoW
148 149 FPIZ7d6vH+Vro6xuWEGiuMaiznap2KhZmpkgfupyFmplh0s6knymuQINBFit2ioB
149 150 EADneL9S9m4vhU3blaRjVUUyJ7b/qTjcSylvCH5XUE6R2k+ckEZjfAMZPLpO+/tF
150 151 M2JIJMD4SifKuS3xck9KtZGCufGmcwiLQRzeHF7vJUKrLD5RTkNi23ydvWZgPjtx
151 152 Q+DTT1Zcn7BrQFY6FgnRoUVIxwtdw1bMY/89rsFgS5wwuMESd3Q2RYgb7EOFOpnu
152 153 w6da7WakWf4IhnF5nsNYGDVaIHzpiqCl+uTbf1epCjrOlIzkZ3Z3Yk5CM/TiFzPk
153 154 z2lLz89cpD8U+NtCsfagWWfjd2U3jDapgH+7nQnCEWpROtzaKHG6lA3pXdix5zG8
154 155 eRc6/0IbUSWvfjKxLLPfNeCS2pCL3IeEI5nothEEYdQH6szpLog79xB9dVnJyKJb
155 156 VfxXnseoYqVrRz2VVbUI5Blwm6B40E3eGVfUQWiux54DspyVMMk41Mx7QJ3iynIa
156 157 1N4ZAqVMAEruyXTRTxc9XW0tYhDMA/1GYvz0EmFpm8LzTHA6sFVtPm/ZlNCX6P1X
157 158 zJwrv7DSQKD6GGlBQUX+OeEJ8tTkkf8QTJSPUdh8P8YxDFS5EOGAvhhpMBYD42kQ
158 159 pqXjEC+XcycTvGI7impgv9PDY1RCC1zkBjKPa120rNhv/hkVk/YhuGoajoHyy4h7
159 160 ZQopdcMtpN2dgmhEegny9JCSwxfQmQ0zK0g7m6SHiKMwjwARAQABiQQ+BBgBCAAJ
160 161 BQJYrdoqAhsCAikJEI2BgDwOv82IwV0gBBkBCAAGBQJYrdoqAAoJEH6gqcPyc/zY
161 162 1WAP/2wJ+R0gE6qsce3rjaIz58PJmc8goKrir5hnElWhPgbq7cYIsW5qiFyLhkdp
162 163 YcMmhD9mRiPpQn6Ya2w3e3B8zfIVKipbMBnke/ytZ9M7qHmDCcjoiSmwEXN3wKYI
163 164 mD9VHONsl/CG1rU9Isw1jtB5g1YxuBA7M/m36XN6x2u+NtNMDB9P56yc4gfsZVES
164 165 KA9v+yY2/l45L8d/WUkUi0YXomn6hyBGI7JrBLq0CX37GEYP6O9rrKipfz73XfO7
165 166 JIGzOKZlljb/D9RX/g7nRbCn+3EtH7xnk+TK/50euEKw8SMUg147sJTcpQmv6UzZ
166 167 cM4JgL0HbHVCojV4C/plELwMddALOFeYQzTif6sMRPf+3DSj8frbInjChC3yOLy0
167 168 6br92KFom17EIj2CAcoeq7UPhi2oouYBwPxh5ytdehJkoo+sN7RIWua6P2WSmon5
168 169 U888cSylXC0+ADFdgLX9K2zrDVYUG1vo8CX0vzxFBaHwN6Px26fhIT1/hYUHQR1z
169 170 VfNDcyQmXqkOnZvvoMfz/Q0s9BhFJ/zU6AgQbIZE/hm1spsfgvtsD1frZfygXJ9f
170 171 irP+MSAI80xHSf91qSRZOj4Pl3ZJNbq4yYxv0b1pkMqeGdjdCYhLU+LZ4wbQmpCk
171 172 SVe2prlLureigXtmZfkqevRz7FrIZiu9ky8wnCAPwC7/zmS18rgP/17bOtL4/iIz
172 173 QhxAAoAMWVrGyJivSkjhSGx1uCojsWfsTAm11P7jsruIL61ZzMUVE2aM3Pmj5G+W
173 174 9AcZ58Em+1WsVnAXdUR//bMmhyr8wL/G1YO1V3JEJTRdxsSxdYa4deGBBY/Adpsw
174 175 24jxhOJR+lsJpqIUeb999+R8euDhRHG9eFO7DRu6weatUJ6suupoDTRWtr/4yGqe
175 176 dKxV3qQhNLSnaAzqW/1nA3iUB4k7kCaKZxhdhDbClf9P37qaRW467BLCVO/coL3y
176 177 Vm50dwdrNtKpMBh3ZpbB1uJvgi9mXtyBOMJ3v8RZeDzFiG8HdCtg9RvIt/AIFoHR
177 178 H3S+U79NT6i0KPzLImDfs8T7RlpyuMc4Ufs8ggyg9v3Ae6cN3eQyxcK3w0cbBwsh
178 179 /nQNfsA6uu+9H7NhbehBMhYnpNZyrHzCmzyXkauwRAqoCbGCNykTRwsur9gS41TQ
179 180 M8ssD1jFheOJf3hODnkKU+HKjvMROl1DK7zdmLdNzA1cvtZH/nCC9KPj1z8QC47S
180 181 xx+dTZSx4ONAhwbS/LN3PoKtn8LPjY9NP9uDWI+TWYquS2U+KHDrBDlsgozDbs/O
181 182 jCxcpDzNmXpWQHEtHU7649OXHP7UeNST1mCUCH5qdank0V1iejF6/CfTFU4MfcrG
182 183 YT90qFF93M3v01BbxP+EIY2/9tiIPbrd
183 184 =0YYh
184 185 -----END PGP PUBLIC KEY BLOCK-----
185 186 EOF
186 187
187 188 sudo apt-key add docker-apt-key
188 189
189 190 if [ "$LSB_RELEASE" = "stretch" ]; then
190 191 cat << EOF | sudo tee -a /etc/apt/sources.list
191 192 # Need backports for clang-format-6.0
192 193 deb http://deb.debian.org/debian stretch-backports main
194 EOF
195 fi
193 196
197 if [ "$LSB_RELEASE" = "stretch" -o "$LSB_RELEASE" = "buster" ]; then
198 cat << EOF | sudo tee -a /etc/apt/sources.list
194 199 # Sources are useful if we want to compile things locally.
195 deb-src http://deb.debian.org/debian stretch main
196 deb-src http://security.debian.org/debian-security stretch/updates main
197 deb-src http://deb.debian.org/debian stretch-updates main
198 deb-src http://deb.debian.org/debian stretch-backports main
200 deb-src http://deb.debian.org/debian $LSB_RELEASE main
201 deb-src http://security.debian.org/debian-security $LSB_RELEASE/updates main
202 deb-src http://deb.debian.org/debian $LSB_RELEASE-updates main
203 deb-src http://deb.debian.org/debian $LSB_RELEASE-backports main
199 204
200 deb [arch=amd64] https://download.docker.com/linux/debian stretch stable
205 deb [arch=amd64] https://download.docker.com/linux/debian $LSB_RELEASE stable
201 206 EOF
202 207
203 208 elif [ "$DISTRO" = "Ubuntu" ]; then
204 209 cat << EOF | sudo tee -a /etc/apt/sources.list
205 210 deb [arch=amd64] https://download.docker.com/linux/ubuntu $LSB_RELEASE stable
206 211 EOF
207 212
208 213 fi
209 214
210 215 sudo apt-get update
211 216
212 217 PACKAGES="\
213 218 awscli \
214 219 btrfs-progs \
215 220 build-essential \
216 221 bzr \
217 222 clang-format-6.0 \
218 223 cvs \
219 224 darcs \
220 225 debhelper \
221 226 devscripts \
222 227 docker-ce \
223 228 dpkg-dev \
224 229 dstat \
225 230 emacs \
226 231 gettext \
227 232 git \
228 233 htop \
229 234 iotop \
230 235 jfsutils \
231 236 libbz2-dev \
232 237 libexpat1-dev \
233 238 libffi-dev \
234 239 libgdbm-dev \
235 240 liblzma-dev \
236 241 libncurses5-dev \
237 242 libnss3-dev \
238 243 libreadline-dev \
239 244 libsqlite3-dev \
240 245 libssl-dev \
241 246 netbase \
242 247 ntfs-3g \
243 248 nvme-cli \
244 249 pyflakes \
245 250 pyflakes3 \
246 251 pylint \
247 252 pylint3 \
248 253 python-all-dev \
249 254 python-dev \
250 255 python-docutils \
251 256 python-fuzzywuzzy \
252 257 python-pygments \
253 258 python-subversion \
254 259 python-vcr \
255 260 python3-boto3 \
256 261 python3-dev \
257 262 python3-docutils \
258 263 python3-fuzzywuzzy \
259 264 python3-pygments \
260 265 python3-vcr \
261 266 rsync \
262 267 sqlite3 \
263 268 subversion \
264 269 tcl-dev \
265 270 tk-dev \
266 271 tla \
267 272 unzip \
268 273 uuid-dev \
269 274 vim \
270 275 virtualenv \
271 276 wget \
272 277 xfsprogs \
273 278 zip \
274 279 zlib1g-dev"
275 280
276 281 if [ "LSB_RELEASE" = "stretch" ]; then
277 282 PACKAGES="$PACKAGES linux-perf"
278 283 elif [ "$DISTRO" = "Ubuntu" ]; then
279 284 PACKAGES="$PACKAGES linux-tools-common"
280 285 fi
281 286
282 # Ubuntu 19.04 removes monotone.
283 if [ "$LSB_RELEASE" != "disco" ]; then
287 # Monotone only available in older releases.
288 if [ "$LSB_RELEASE" = "stretch" -o "$LSB_RELEASE" = "xenial" ]; then
284 289 PACKAGES="$PACKAGES monotone"
285 290 fi
286 291
287 292 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install --no-install-recommends $PACKAGES
288 293
289 294 # Create clang-format symlink so test harness finds it.
290 295 sudo update-alternatives --install /usr/bin/clang-format clang-format \
291 296 /usr/bin/clang-format-6.0 1000
292 297
293 298 sudo mkdir /hgdev
294 299 # Will be normalized to hg:hg later.
295 300 sudo chown `whoami` /hgdev
296 301
297 302 {install_rust}
298 303
299 304 cp requirements-py2.txt /hgdev/requirements-py2.txt
300 305 cp requirements-py3.txt /hgdev/requirements-py3.txt
301 306
302 307 # Disable the pip version check because it uses the network and can
303 308 # be annoying.
304 309 cat << EOF | sudo tee -a /etc/pip.conf
305 310 [global]
306 311 disable-pip-version-check = True
307 312 EOF
308 313
309 314 {install_pythons}
310 315 {bootstrap_virtualenv}
311 316
312 317 /hgdev/venv-bootstrap/bin/hg clone https://www.mercurial-scm.org/repo/hg /hgdev/src
313 318
314 319 # Mark the repo as non-publishing.
315 320 cat >> /hgdev/src/.hg/hgrc << EOF
316 321 [phases]
317 322 publish = false
318 323 EOF
319 324
320 325 sudo chown -R hg:hg /hgdev
321 326 '''.lstrip().format(
322 327 install_rust=INSTALL_RUST,
323 328 install_pythons=INSTALL_PYTHONS,
324 329 bootstrap_virtualenv=BOOTSTRAP_VIRTUALENV
325 330 ).replace('\r\n', '\n')
326 331
327 332
328 333 # Prepares /hgdev for operations.
329 334 PREPARE_HGDEV = '''
330 335 #!/bin/bash
331 336
332 337 set -e
333 338
334 339 FS=$1
335 340
336 341 ensure_device() {
337 342 if [ -z "${DEVICE}" ]; then
338 343 echo "could not find block device to format"
339 344 exit 1
340 345 fi
341 346 }
342 347
343 348 # Determine device to partition for extra filesystem.
344 349 # If only 1 volume is present, it will be the root volume and
345 350 # should be /dev/nvme0. If multiple volumes are present, the
346 351 # root volume could be nvme0 or nvme1. Use whichever one doesn't have
347 352 # a partition.
348 353 if [ -e /dev/nvme1n1 ]; then
349 354 if [ -e /dev/nvme0n1p1 ]; then
350 355 DEVICE=/dev/nvme1n1
351 356 else
352 357 DEVICE=/dev/nvme0n1
353 358 fi
354 359 else
355 360 DEVICE=
356 361 fi
357 362
358 363 sudo mkdir /hgwork
359 364
360 365 if [ "${FS}" != "default" -a "${FS}" != "tmpfs" ]; then
361 366 ensure_device
362 367 echo "creating ${FS} filesystem on ${DEVICE}"
363 368 fi
364 369
365 370 if [ "${FS}" = "default" ]; then
366 371 :
367 372
368 373 elif [ "${FS}" = "btrfs" ]; then
369 374 sudo mkfs.btrfs ${DEVICE}
370 375 sudo mount ${DEVICE} /hgwork
371 376
372 377 elif [ "${FS}" = "ext3" ]; then
373 378 # lazy_journal_init speeds up filesystem creation at the expense of
374 379 # integrity if things crash. We are an ephemeral instance, so we don't
375 380 # care about integrity.
376 381 sudo mkfs.ext3 -E lazy_journal_init=1 ${DEVICE}
377 382 sudo mount ${DEVICE} /hgwork
378 383
379 384 elif [ "${FS}" = "ext4" ]; then
380 385 sudo mkfs.ext4 -E lazy_journal_init=1 ${DEVICE}
381 386 sudo mount ${DEVICE} /hgwork
382 387
383 388 elif [ "${FS}" = "jfs" ]; then
384 389 sudo mkfs.jfs ${DEVICE}
385 390 sudo mount ${DEVICE} /hgwork
386 391
387 392 elif [ "${FS}" = "tmpfs" ]; then
388 393 echo "creating tmpfs volume in /hgwork"
389 394 sudo mount -t tmpfs -o size=1024M tmpfs /hgwork
390 395
391 396 elif [ "${FS}" = "xfs" ]; then
392 397 sudo mkfs.xfs ${DEVICE}
393 398 sudo mount ${DEVICE} /hgwork
394 399
395 400 else
396 401 echo "unsupported filesystem: ${FS}"
397 402 exit 1
398 403 fi
399 404
400 405 echo "/hgwork ready"
401 406
402 407 sudo chown hg:hg /hgwork
403 408 mkdir /hgwork/tmp
404 409 chown hg:hg /hgwork/tmp
405 410
406 411 rsync -a /hgdev/src /hgwork/
407 412 '''.lstrip().replace('\r\n', '\n')
408 413
409 414
410 415 HG_UPDATE_CLEAN = '''
411 416 set -ex
412 417
413 418 HG=/hgdev/venv-bootstrap/bin/hg
414 419
415 420 cd /hgwork/src
416 421 ${HG} --config extensions.purge= purge --all
417 422 ${HG} update -C $1
418 423 ${HG} log -r .
419 424 '''.lstrip().replace('\r\n', '\n')
420 425
421 426
422 427 def prepare_exec_environment(ssh_client, filesystem='default'):
423 428 """Prepare an EC2 instance to execute things.
424 429
425 430 The AMI has an ``/hgdev`` bootstrapped with various Python installs
426 431 and a clone of the Mercurial repo.
427 432
428 433 In EC2, EBS volumes launched from snapshots have wonky performance behavior.
429 434 Notably, blocks have to be copied on first access, which makes volume
430 435 I/O extremely slow on fresh volumes.
431 436
432 437 Furthermore, we may want to run operations, tests, etc on alternative
433 438 filesystems so we examine behavior on different filesystems.
434 439
435 440 This function is used to facilitate executing operations on alternate
436 441 volumes.
437 442 """
438 443 sftp = ssh_client.open_sftp()
439 444
440 445 with sftp.open('/hgdev/prepare-hgdev', 'wb') as fh:
441 446 fh.write(PREPARE_HGDEV)
442 447 fh.chmod(0o0777)
443 448
444 449 command = 'sudo /hgdev/prepare-hgdev %s' % filesystem
445 450 chan, stdin, stdout = exec_command(ssh_client, command)
446 451 stdin.close()
447 452
448 453 for line in stdout:
449 454 print(line, end='')
450 455
451 456 res = chan.recv_exit_status()
452 457
453 458 if res:
454 459 raise Exception('non-0 exit code updating working directory; %d'
455 460 % res)
456 461
457 462
458 463 def synchronize_hg(source_path: pathlib.Path, ec2_instance, revision: str=None):
459 464 """Synchronize a local Mercurial source path to remote EC2 instance."""
460 465
461 466 with tempfile.TemporaryDirectory() as temp_dir:
462 467 temp_dir = pathlib.Path(temp_dir)
463 468
464 469 ssh_dir = temp_dir / '.ssh'
465 470 ssh_dir.mkdir()
466 471 ssh_dir.chmod(0o0700)
467 472
468 473 public_ip = ec2_instance.public_ip_address
469 474
470 475 ssh_config = ssh_dir / 'config'
471 476
472 477 with ssh_config.open('w', encoding='utf-8') as fh:
473 478 fh.write('Host %s\n' % public_ip)
474 479 fh.write(' User hg\n')
475 480 fh.write(' StrictHostKeyChecking no\n')
476 481 fh.write(' UserKnownHostsFile %s\n' % (ssh_dir / 'known_hosts'))
477 482 fh.write(' IdentityFile %s\n' % ec2_instance.ssh_private_key_path)
478 483
479 484 if not (source_path / '.hg').is_dir():
480 485 raise Exception('%s is not a Mercurial repository; synchronization '
481 486 'not yet supported' % source_path)
482 487
483 488 env = dict(os.environ)
484 489 env['HGPLAIN'] = '1'
485 490 env['HGENCODING'] = 'utf-8'
486 491
487 492 hg_bin = source_path / 'hg'
488 493
489 494 res = subprocess.run(
490 495 ['python2.7', str(hg_bin), 'log', '-r', revision, '-T', '{node}'],
491 496 cwd=str(source_path), env=env, check=True, capture_output=True)
492 497
493 498 full_revision = res.stdout.decode('ascii')
494 499
495 500 args = [
496 501 'python2.7', str(hg_bin),
497 502 '--config', 'ui.ssh=ssh -F %s' % ssh_config,
498 503 '--config', 'ui.remotecmd=/hgdev/venv-bootstrap/bin/hg',
499 504 # Also ensure .hgtags changes are present so auto version
500 505 # calculation works.
501 506 'push', '-f', '-r', full_revision, '-r', 'file(.hgtags)',
502 507 'ssh://%s//hgwork/src' % public_ip,
503 508 ]
504 509
505 510 res = subprocess.run(args, cwd=str(source_path), env=env)
506 511
507 512 # Allow 1 (no-op) to not trigger error.
508 513 if res.returncode not in (0, 1):
509 514 res.check_returncode()
510 515
511 516 # TODO support synchronizing dirty working directory.
512 517
513 518 sftp = ec2_instance.ssh_client.open_sftp()
514 519
515 520 with sftp.open('/hgdev/hgup', 'wb') as fh:
516 521 fh.write(HG_UPDATE_CLEAN)
517 522 fh.chmod(0o0700)
518 523
519 524 chan, stdin, stdout = exec_command(
520 525 ec2_instance.ssh_client, '/hgdev/hgup %s' % full_revision)
521 526 stdin.close()
522 527
523 528 for line in stdout:
524 529 print(line, end='')
525 530
526 531 res = chan.recv_exit_status()
527 532
528 533 if res:
529 534 raise Exception('non-0 exit code updating working directory; %d'
530 535 % res)
531 536
532 537
533 538 def run_tests(ssh_client, python_version, test_flags=None):
534 539 """Run tests on a remote Linux machine via an SSH client."""
535 540 test_flags = test_flags or []
536 541
537 542 print('running tests')
538 543
539 544 if python_version == 'system2':
540 545 python = '/usr/bin/python2'
541 546 elif python_version == 'system3':
542 547 python = '/usr/bin/python3'
543 548 elif python_version.startswith('pypy'):
544 549 python = '/hgdev/pyenv/shims/%s' % python_version
545 550 else:
546 551 python = '/hgdev/pyenv/shims/python%s' % python_version
547 552
548 553 test_flags = ' '.join(shlex.quote(a) for a in test_flags)
549 554
550 555 command = (
551 556 '/bin/sh -c "export TMPDIR=/hgwork/tmp; '
552 557 'cd /hgwork/src/tests && %s run-tests.py %s"' % (
553 558 python, test_flags))
554 559
555 560 chan, stdin, stdout = exec_command(ssh_client, command)
556 561
557 562 stdin.close()
558 563
559 564 for line in stdout:
560 565 print(line, end='')
561 566
562 567 return chan.recv_exit_status()
General Comments 0
You need to be logged in to leave comments. Login now