##// END OF EJS Templates
automation: remove Ubuntu 18.10...
Gregory Szorc -
r43283:3aa227fe default
parent child Browse files
Show More
@@ -1,1209 +1,1202 b''
1 1 # aws.py - Automation code for Amazon Web Services
2 2 #
3 3 # Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 # no-check-code because Python 3 native.
9 9
10 10 import contextlib
11 11 import copy
12 12 import hashlib
13 13 import json
14 14 import os
15 15 import pathlib
16 16 import subprocess
17 17 import time
18 18
19 19 import boto3
20 20 import botocore.exceptions
21 21
22 22 from .linux import (
23 23 BOOTSTRAP_DEBIAN,
24 24 )
25 25 from .ssh import (
26 26 exec_command as ssh_exec_command,
27 27 wait_for_ssh,
28 28 )
29 29 from .winrm import (
30 30 run_powershell,
31 31 wait_for_winrm,
32 32 )
33 33
34 34
35 35 SOURCE_ROOT = pathlib.Path(os.path.abspath(__file__)).parent.parent.parent.parent
36 36
37 37 INSTALL_WINDOWS_DEPENDENCIES = (SOURCE_ROOT / 'contrib' /
38 38 'install-windows-dependencies.ps1')
39 39
40 40
41 41 INSTANCE_TYPES_WITH_STORAGE = {
42 42 'c5d',
43 43 'd2',
44 44 'h1',
45 45 'i3',
46 46 'm5ad',
47 47 'm5d',
48 48 'r5d',
49 49 'r5ad',
50 50 'x1',
51 51 'z1d',
52 52 }
53 53
54 54
55 55 AMAZON_ACCOUNT_ID = '801119661308'
56 56 DEBIAN_ACCOUNT_ID = '379101102735'
57 57 UBUNTU_ACCOUNT_ID = '099720109477'
58 58
59 59
60 60 WINDOWS_BASE_IMAGE_NAME = 'Windows_Server-2019-English-Full-Base-2019.07.12'
61 61
62 62
63 63 KEY_PAIRS = {
64 64 'automation',
65 65 }
66 66
67 67
68 68 SECURITY_GROUPS = {
69 69 'linux-dev-1': {
70 70 'description': 'Mercurial Linux instances that perform build/test automation',
71 71 'ingress': [
72 72 {
73 73 'FromPort': 22,
74 74 'ToPort': 22,
75 75 'IpProtocol': 'tcp',
76 76 'IpRanges': [
77 77 {
78 78 'CidrIp': '0.0.0.0/0',
79 79 'Description': 'SSH from entire Internet',
80 80 },
81 81 ],
82 82 },
83 83 ],
84 84 },
85 85 'windows-dev-1': {
86 86 'description': 'Mercurial Windows instances that perform build automation',
87 87 'ingress': [
88 88 {
89 89 'FromPort': 22,
90 90 'ToPort': 22,
91 91 'IpProtocol': 'tcp',
92 92 'IpRanges': [
93 93 {
94 94 'CidrIp': '0.0.0.0/0',
95 95 'Description': 'SSH from entire Internet',
96 96 },
97 97 ],
98 98 },
99 99 {
100 100 'FromPort': 3389,
101 101 'ToPort': 3389,
102 102 'IpProtocol': 'tcp',
103 103 'IpRanges': [
104 104 {
105 105 'CidrIp': '0.0.0.0/0',
106 106 'Description': 'RDP from entire Internet',
107 107 },
108 108 ],
109 109
110 110 },
111 111 {
112 112 'FromPort': 5985,
113 113 'ToPort': 5986,
114 114 'IpProtocol': 'tcp',
115 115 'IpRanges': [
116 116 {
117 117 'CidrIp': '0.0.0.0/0',
118 118 'Description': 'PowerShell Remoting (Windows Remote Management)',
119 119 },
120 120 ],
121 121 }
122 122 ],
123 123 },
124 124 }
125 125
126 126
127 127 IAM_ROLES = {
128 128 'ephemeral-ec2-role-1': {
129 129 'description': 'Mercurial temporary EC2 instances',
130 130 'policy_arns': [
131 131 'arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM',
132 132 ],
133 133 },
134 134 }
135 135
136 136
137 137 ASSUME_ROLE_POLICY_DOCUMENT = '''
138 138 {
139 139 "Version": "2012-10-17",
140 140 "Statement": [
141 141 {
142 142 "Effect": "Allow",
143 143 "Principal": {
144 144 "Service": "ec2.amazonaws.com"
145 145 },
146 146 "Action": "sts:AssumeRole"
147 147 }
148 148 ]
149 149 }
150 150 '''.strip()
151 151
152 152
153 153 IAM_INSTANCE_PROFILES = {
154 154 'ephemeral-ec2-1': {
155 155 'roles': [
156 156 'ephemeral-ec2-role-1',
157 157 ],
158 158 }
159 159 }
160 160
161 161
162 162 # User Data for Windows EC2 instance. Mainly used to set the password
163 163 # and configure WinRM.
164 164 # Inspired by the User Data script used by Packer
165 165 # (from https://www.packer.io/intro/getting-started/build-image.html).
166 166 WINDOWS_USER_DATA = r'''
167 167 <powershell>
168 168
169 169 # TODO enable this once we figure out what is failing.
170 170 #$ErrorActionPreference = "stop"
171 171
172 172 # Set administrator password
173 173 net user Administrator "%s"
174 174 wmic useraccount where "name='Administrator'" set PasswordExpires=FALSE
175 175
176 176 # First, make sure WinRM can't be connected to
177 177 netsh advfirewall firewall set rule name="Windows Remote Management (HTTP-In)" new enable=yes action=block
178 178
179 179 # Delete any existing WinRM listeners
180 180 winrm delete winrm/config/listener?Address=*+Transport=HTTP 2>$Null
181 181 winrm delete winrm/config/listener?Address=*+Transport=HTTPS 2>$Null
182 182
183 183 # Create a new WinRM listener and configure
184 184 winrm create winrm/config/listener?Address=*+Transport=HTTP
185 185 winrm set winrm/config/winrs '@{MaxMemoryPerShellMB="0"}'
186 186 winrm set winrm/config '@{MaxTimeoutms="7200000"}'
187 187 winrm set winrm/config/service '@{AllowUnencrypted="true"}'
188 188 winrm set winrm/config/service '@{MaxConcurrentOperationsPerUser="12000"}'
189 189 winrm set winrm/config/service/auth '@{Basic="true"}'
190 190 winrm set winrm/config/client/auth '@{Basic="true"}'
191 191
192 192 # Configure UAC to allow privilege elevation in remote shells
193 193 $Key = 'HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System'
194 194 $Setting = 'LocalAccountTokenFilterPolicy'
195 195 Set-ItemProperty -Path $Key -Name $Setting -Value 1 -Force
196 196
197 197 # Configure and restart the WinRM Service; Enable the required firewall exception
198 198 Stop-Service -Name WinRM
199 199 Set-Service -Name WinRM -StartupType Automatic
200 200 netsh advfirewall firewall set rule name="Windows Remote Management (HTTP-In)" new action=allow localip=any remoteip=any
201 201 Start-Service -Name WinRM
202 202
203 203 # Disable firewall on private network interfaces so prompts don't appear.
204 204 Set-NetFirewallProfile -Name private -Enabled false
205 205 </powershell>
206 206 '''.lstrip()
207 207
208 208
209 209 WINDOWS_BOOTSTRAP_POWERSHELL = '''
210 210 Write-Output "installing PowerShell dependencies"
211 211 Install-PackageProvider -Name NuGet -MinimumVersion 2.8.5.201 -Force
212 212 Set-PSRepository -Name PSGallery -InstallationPolicy Trusted
213 213 Install-Module -Name OpenSSHUtils -RequiredVersion 0.0.2.0
214 214
215 215 Write-Output "installing OpenSSL server"
216 216 Add-WindowsCapability -Online -Name OpenSSH.Server~~~~0.0.1.0
217 217 # Various tools will attempt to use older versions of .NET. So we enable
218 218 # the feature that provides them so it doesn't have to be auto-enabled
219 219 # later.
220 220 Write-Output "enabling .NET Framework feature"
221 221 Install-WindowsFeature -Name Net-Framework-Core
222 222 '''
223 223
224 224
225 225 class AWSConnection:
226 226 """Manages the state of a connection with AWS."""
227 227
228 228 def __init__(self, automation, region: str, ensure_ec2_state: bool=True):
229 229 self.automation = automation
230 230 self.local_state_path = automation.state_path
231 231
232 232 self.prefix = 'hg-'
233 233
234 234 self.session = boto3.session.Session(region_name=region)
235 235 self.ec2client = self.session.client('ec2')
236 236 self.ec2resource = self.session.resource('ec2')
237 237 self.iamclient = self.session.client('iam')
238 238 self.iamresource = self.session.resource('iam')
239 239 self.security_groups = {}
240 240
241 241 if ensure_ec2_state:
242 242 ensure_key_pairs(automation.state_path, self.ec2resource)
243 243 self.security_groups = ensure_security_groups(self.ec2resource)
244 244 ensure_iam_state(self.iamclient, self.iamresource)
245 245
246 246 def key_pair_path_private(self, name):
247 247 """Path to a key pair private key file."""
248 248 return self.local_state_path / 'keys' / ('keypair-%s' % name)
249 249
250 250 def key_pair_path_public(self, name):
251 251 return self.local_state_path / 'keys' / ('keypair-%s.pub' % name)
252 252
253 253
254 254 def rsa_key_fingerprint(p: pathlib.Path):
255 255 """Compute the fingerprint of an RSA private key."""
256 256
257 257 # TODO use rsa package.
258 258 res = subprocess.run(
259 259 ['openssl', 'pkcs8', '-in', str(p), '-nocrypt', '-topk8',
260 260 '-outform', 'DER'],
261 261 capture_output=True,
262 262 check=True)
263 263
264 264 sha1 = hashlib.sha1(res.stdout).hexdigest()
265 265 return ':'.join(a + b for a, b in zip(sha1[::2], sha1[1::2]))
266 266
267 267
268 268 def ensure_key_pairs(state_path: pathlib.Path, ec2resource, prefix='hg-'):
269 269 remote_existing = {}
270 270
271 271 for kpi in ec2resource.key_pairs.all():
272 272 if kpi.name.startswith(prefix):
273 273 remote_existing[kpi.name[len(prefix):]] = kpi.key_fingerprint
274 274
275 275 # Validate that we have these keys locally.
276 276 key_path = state_path / 'keys'
277 277 key_path.mkdir(exist_ok=True, mode=0o700)
278 278
279 279 def remove_remote(name):
280 280 print('deleting key pair %s' % name)
281 281 key = ec2resource.KeyPair(name)
282 282 key.delete()
283 283
284 284 def remove_local(name):
285 285 pub_full = key_path / ('keypair-%s.pub' % name)
286 286 priv_full = key_path / ('keypair-%s' % name)
287 287
288 288 print('removing %s' % pub_full)
289 289 pub_full.unlink()
290 290 print('removing %s' % priv_full)
291 291 priv_full.unlink()
292 292
293 293 local_existing = {}
294 294
295 295 for f in sorted(os.listdir(key_path)):
296 296 if not f.startswith('keypair-') or not f.endswith('.pub'):
297 297 continue
298 298
299 299 name = f[len('keypair-'):-len('.pub')]
300 300
301 301 pub_full = key_path / f
302 302 priv_full = key_path / ('keypair-%s' % name)
303 303
304 304 with open(pub_full, 'r', encoding='ascii') as fh:
305 305 data = fh.read()
306 306
307 307 if not data.startswith('ssh-rsa '):
308 308 print('unexpected format for key pair file: %s; removing' %
309 309 pub_full)
310 310 pub_full.unlink()
311 311 priv_full.unlink()
312 312 continue
313 313
314 314 local_existing[name] = rsa_key_fingerprint(priv_full)
315 315
316 316 for name in sorted(set(remote_existing) | set(local_existing)):
317 317 if name not in local_existing:
318 318 actual = '%s%s' % (prefix, name)
319 319 print('remote key %s does not exist locally' % name)
320 320 remove_remote(actual)
321 321 del remote_existing[name]
322 322
323 323 elif name not in remote_existing:
324 324 print('local key %s does not exist remotely' % name)
325 325 remove_local(name)
326 326 del local_existing[name]
327 327
328 328 elif remote_existing[name] != local_existing[name]:
329 329 print('key fingerprint mismatch for %s; '
330 330 'removing from local and remote' % name)
331 331 remove_local(name)
332 332 remove_remote('%s%s' % (prefix, name))
333 333 del local_existing[name]
334 334 del remote_existing[name]
335 335
336 336 missing = KEY_PAIRS - set(remote_existing)
337 337
338 338 for name in sorted(missing):
339 339 actual = '%s%s' % (prefix, name)
340 340 print('creating key pair %s' % actual)
341 341
342 342 priv_full = key_path / ('keypair-%s' % name)
343 343 pub_full = key_path / ('keypair-%s.pub' % name)
344 344
345 345 kp = ec2resource.create_key_pair(KeyName=actual)
346 346
347 347 with priv_full.open('w', encoding='ascii') as fh:
348 348 fh.write(kp.key_material)
349 349 fh.write('\n')
350 350
351 351 priv_full.chmod(0o0600)
352 352
353 353 # SSH public key can be extracted via `ssh-keygen`.
354 354 with pub_full.open('w', encoding='ascii') as fh:
355 355 subprocess.run(
356 356 ['ssh-keygen', '-y', '-f', str(priv_full)],
357 357 stdout=fh,
358 358 check=True)
359 359
360 360 pub_full.chmod(0o0600)
361 361
362 362
363 363 def delete_instance_profile(profile):
364 364 for role in profile.roles:
365 365 print('removing role %s from instance profile %s' % (role.name,
366 366 profile.name))
367 367 profile.remove_role(RoleName=role.name)
368 368
369 369 print('deleting instance profile %s' % profile.name)
370 370 profile.delete()
371 371
372 372
373 373 def ensure_iam_state(iamclient, iamresource, prefix='hg-'):
374 374 """Ensure IAM state is in sync with our canonical definition."""
375 375
376 376 remote_profiles = {}
377 377
378 378 for profile in iamresource.instance_profiles.all():
379 379 if profile.name.startswith(prefix):
380 380 remote_profiles[profile.name[len(prefix):]] = profile
381 381
382 382 for name in sorted(set(remote_profiles) - set(IAM_INSTANCE_PROFILES)):
383 383 delete_instance_profile(remote_profiles[name])
384 384 del remote_profiles[name]
385 385
386 386 remote_roles = {}
387 387
388 388 for role in iamresource.roles.all():
389 389 if role.name.startswith(prefix):
390 390 remote_roles[role.name[len(prefix):]] = role
391 391
392 392 for name in sorted(set(remote_roles) - set(IAM_ROLES)):
393 393 role = remote_roles[name]
394 394
395 395 print('removing role %s' % role.name)
396 396 role.delete()
397 397 del remote_roles[name]
398 398
399 399 # We've purged remote state that doesn't belong. Create missing
400 400 # instance profiles and roles.
401 401 for name in sorted(set(IAM_INSTANCE_PROFILES) - set(remote_profiles)):
402 402 actual = '%s%s' % (prefix, name)
403 403 print('creating IAM instance profile %s' % actual)
404 404
405 405 profile = iamresource.create_instance_profile(
406 406 InstanceProfileName=actual)
407 407 remote_profiles[name] = profile
408 408
409 409 waiter = iamclient.get_waiter('instance_profile_exists')
410 410 waiter.wait(InstanceProfileName=actual)
411 411 print('IAM instance profile %s is available' % actual)
412 412
413 413 for name in sorted(set(IAM_ROLES) - set(remote_roles)):
414 414 entry = IAM_ROLES[name]
415 415
416 416 actual = '%s%s' % (prefix, name)
417 417 print('creating IAM role %s' % actual)
418 418
419 419 role = iamresource.create_role(
420 420 RoleName=actual,
421 421 Description=entry['description'],
422 422 AssumeRolePolicyDocument=ASSUME_ROLE_POLICY_DOCUMENT,
423 423 )
424 424
425 425 waiter = iamclient.get_waiter('role_exists')
426 426 waiter.wait(RoleName=actual)
427 427 print('IAM role %s is available' % actual)
428 428
429 429 remote_roles[name] = role
430 430
431 431 for arn in entry['policy_arns']:
432 432 print('attaching policy %s to %s' % (arn, role.name))
433 433 role.attach_policy(PolicyArn=arn)
434 434
435 435 # Now reconcile state of profiles.
436 436 for name, meta in sorted(IAM_INSTANCE_PROFILES.items()):
437 437 profile = remote_profiles[name]
438 438 wanted = {'%s%s' % (prefix, role) for role in meta['roles']}
439 439 have = {role.name for role in profile.roles}
440 440
441 441 for role in sorted(have - wanted):
442 442 print('removing role %s from %s' % (role, profile.name))
443 443 profile.remove_role(RoleName=role)
444 444
445 445 for role in sorted(wanted - have):
446 446 print('adding role %s to %s' % (role, profile.name))
447 447 profile.add_role(RoleName=role)
448 448
449 449
450 450 def find_image(ec2resource, owner_id, name):
451 451 """Find an AMI by its owner ID and name."""
452 452
453 453 images = ec2resource.images.filter(
454 454 Filters=[
455 455 {
456 456 'Name': 'owner-id',
457 457 'Values': [owner_id],
458 458 },
459 459 {
460 460 'Name': 'state',
461 461 'Values': ['available'],
462 462 },
463 463 {
464 464 'Name': 'image-type',
465 465 'Values': ['machine'],
466 466 },
467 467 {
468 468 'Name': 'name',
469 469 'Values': [name],
470 470 },
471 471 ])
472 472
473 473 for image in images:
474 474 return image
475 475
476 476 raise Exception('unable to find image for %s' % name)
477 477
478 478
479 479 def ensure_security_groups(ec2resource, prefix='hg-'):
480 480 """Ensure all necessary Mercurial security groups are present.
481 481
482 482 All security groups are prefixed with ``hg-`` by default. Any security
483 483 groups having this prefix but aren't in our list are deleted.
484 484 """
485 485 existing = {}
486 486
487 487 for group in ec2resource.security_groups.all():
488 488 if group.group_name.startswith(prefix):
489 489 existing[group.group_name[len(prefix):]] = group
490 490
491 491 purge = set(existing) - set(SECURITY_GROUPS)
492 492
493 493 for name in sorted(purge):
494 494 group = existing[name]
495 495 print('removing legacy security group: %s' % group.group_name)
496 496 group.delete()
497 497
498 498 security_groups = {}
499 499
500 500 for name, group in sorted(SECURITY_GROUPS.items()):
501 501 if name in existing:
502 502 security_groups[name] = existing[name]
503 503 continue
504 504
505 505 actual = '%s%s' % (prefix, name)
506 506 print('adding security group %s' % actual)
507 507
508 508 group_res = ec2resource.create_security_group(
509 509 Description=group['description'],
510 510 GroupName=actual,
511 511 )
512 512
513 513 group_res.authorize_ingress(
514 514 IpPermissions=group['ingress'],
515 515 )
516 516
517 517 security_groups[name] = group_res
518 518
519 519 return security_groups
520 520
521 521
522 522 def terminate_ec2_instances(ec2resource, prefix='hg-'):
523 523 """Terminate all EC2 instances managed by us."""
524 524 waiting = []
525 525
526 526 for instance in ec2resource.instances.all():
527 527 if instance.state['Name'] == 'terminated':
528 528 continue
529 529
530 530 for tag in instance.tags or []:
531 531 if tag['Key'] == 'Name' and tag['Value'].startswith(prefix):
532 532 print('terminating %s' % instance.id)
533 533 instance.terminate()
534 534 waiting.append(instance)
535 535
536 536 for instance in waiting:
537 537 instance.wait_until_terminated()
538 538
539 539
540 540 def remove_resources(c, prefix='hg-'):
541 541 """Purge all of our resources in this EC2 region."""
542 542 ec2resource = c.ec2resource
543 543 iamresource = c.iamresource
544 544
545 545 terminate_ec2_instances(ec2resource, prefix=prefix)
546 546
547 547 for image in ec2resource.images.filter(Owners=['self']):
548 548 if image.name.startswith(prefix):
549 549 remove_ami(ec2resource, image)
550 550
551 551 for group in ec2resource.security_groups.all():
552 552 if group.group_name.startswith(prefix):
553 553 print('removing security group %s' % group.group_name)
554 554 group.delete()
555 555
556 556 for profile in iamresource.instance_profiles.all():
557 557 if profile.name.startswith(prefix):
558 558 delete_instance_profile(profile)
559 559
560 560 for role in iamresource.roles.all():
561 561 if role.name.startswith(prefix):
562 562 for p in role.attached_policies.all():
563 563 print('detaching policy %s from %s' % (p.arn, role.name))
564 564 role.detach_policy(PolicyArn=p.arn)
565 565
566 566 print('removing role %s' % role.name)
567 567 role.delete()
568 568
569 569
570 570 def wait_for_ip_addresses(instances):
571 571 """Wait for the public IP addresses of an iterable of instances."""
572 572 for instance in instances:
573 573 while True:
574 574 if not instance.public_ip_address:
575 575 time.sleep(2)
576 576 instance.reload()
577 577 continue
578 578
579 579 print('public IP address for %s: %s' % (
580 580 instance.id, instance.public_ip_address))
581 581 break
582 582
583 583
584 584 def remove_ami(ec2resource, image):
585 585 """Remove an AMI and its underlying snapshots."""
586 586 snapshots = []
587 587
588 588 for device in image.block_device_mappings:
589 589 if 'Ebs' in device:
590 590 snapshots.append(ec2resource.Snapshot(device['Ebs']['SnapshotId']))
591 591
592 592 print('deregistering %s' % image.id)
593 593 image.deregister()
594 594
595 595 for snapshot in snapshots:
596 596 print('deleting snapshot %s' % snapshot.id)
597 597 snapshot.delete()
598 598
599 599
600 600 def wait_for_ssm(ssmclient, instances):
601 601 """Wait for SSM to come online for an iterable of instance IDs."""
602 602 while True:
603 603 res = ssmclient.describe_instance_information(
604 604 Filters=[
605 605 {
606 606 'Key': 'InstanceIds',
607 607 'Values': [i.id for i in instances],
608 608 },
609 609 ],
610 610 )
611 611
612 612 available = len(res['InstanceInformationList'])
613 613 wanted = len(instances)
614 614
615 615 print('%d/%d instances available in SSM' % (available, wanted))
616 616
617 617 if available == wanted:
618 618 return
619 619
620 620 time.sleep(2)
621 621
622 622
623 623 def run_ssm_command(ssmclient, instances, document_name, parameters):
624 624 """Run a PowerShell script on an EC2 instance."""
625 625
626 626 res = ssmclient.send_command(
627 627 InstanceIds=[i.id for i in instances],
628 628 DocumentName=document_name,
629 629 Parameters=parameters,
630 630 CloudWatchOutputConfig={
631 631 'CloudWatchOutputEnabled': True,
632 632 },
633 633 )
634 634
635 635 command_id = res['Command']['CommandId']
636 636
637 637 for instance in instances:
638 638 while True:
639 639 try:
640 640 res = ssmclient.get_command_invocation(
641 641 CommandId=command_id,
642 642 InstanceId=instance.id,
643 643 )
644 644 except botocore.exceptions.ClientError as e:
645 645 if e.response['Error']['Code'] == 'InvocationDoesNotExist':
646 646 print('could not find SSM command invocation; waiting')
647 647 time.sleep(1)
648 648 continue
649 649 else:
650 650 raise
651 651
652 652 if res['Status'] == 'Success':
653 653 break
654 654 elif res['Status'] in ('Pending', 'InProgress', 'Delayed'):
655 655 time.sleep(2)
656 656 else:
657 657 raise Exception('command failed on %s: %s' % (
658 658 instance.id, res['Status']))
659 659
660 660
661 661 @contextlib.contextmanager
662 662 def temporary_ec2_instances(ec2resource, config):
663 663 """Create temporary EC2 instances.
664 664
665 665 This is a proxy to ``ec2client.run_instances(**config)`` that takes care of
666 666 managing the lifecycle of the instances.
667 667
668 668 When the context manager exits, the instances are terminated.
669 669
670 670 The context manager evaluates to the list of data structures
671 671 describing each created instance. The instances may not be available
672 672 for work immediately: it is up to the caller to wait for the instance
673 673 to start responding.
674 674 """
675 675
676 676 ids = None
677 677
678 678 try:
679 679 res = ec2resource.create_instances(**config)
680 680
681 681 ids = [i.id for i in res]
682 682 print('started instances: %s' % ' '.join(ids))
683 683
684 684 yield res
685 685 finally:
686 686 if ids:
687 687 print('terminating instances: %s' % ' '.join(ids))
688 688 for instance in res:
689 689 instance.terminate()
690 690 print('terminated %d instances' % len(ids))
691 691
692 692
693 693 @contextlib.contextmanager
694 694 def create_temp_windows_ec2_instances(c: AWSConnection, config):
695 695 """Create temporary Windows EC2 instances.
696 696
697 697 This is a higher-level wrapper around ``create_temp_ec2_instances()`` that
698 698 configures the Windows instance for Windows Remote Management. The emitted
699 699 instances will have a ``winrm_client`` attribute containing a
700 700 ``pypsrp.client.Client`` instance bound to the instance.
701 701 """
702 702 if 'IamInstanceProfile' in config:
703 703 raise ValueError('IamInstanceProfile cannot be provided in config')
704 704 if 'UserData' in config:
705 705 raise ValueError('UserData cannot be provided in config')
706 706
707 707 password = c.automation.default_password()
708 708
709 709 config = copy.deepcopy(config)
710 710 config['IamInstanceProfile'] = {
711 711 'Name': 'hg-ephemeral-ec2-1',
712 712 }
713 713 config.setdefault('TagSpecifications', []).append({
714 714 'ResourceType': 'instance',
715 715 'Tags': [{'Key': 'Name', 'Value': 'hg-temp-windows'}],
716 716 })
717 717 config['UserData'] = WINDOWS_USER_DATA % password
718 718
719 719 with temporary_ec2_instances(c.ec2resource, config) as instances:
720 720 wait_for_ip_addresses(instances)
721 721
722 722 print('waiting for Windows Remote Management service...')
723 723
724 724 for instance in instances:
725 725 client = wait_for_winrm(instance.public_ip_address, 'Administrator', password)
726 726 print('established WinRM connection to %s' % instance.id)
727 727 instance.winrm_client = client
728 728
729 729 yield instances
730 730
731 731
732 732 def resolve_fingerprint(fingerprint):
733 733 fingerprint = json.dumps(fingerprint, sort_keys=True)
734 734 return hashlib.sha256(fingerprint.encode('utf-8')).hexdigest()
735 735
736 736
737 737 def find_and_reconcile_image(ec2resource, name, fingerprint):
738 738 """Attempt to find an existing EC2 AMI with a name and fingerprint.
739 739
740 740 If an image with the specified fingerprint is found, it is returned.
741 741 Otherwise None is returned.
742 742
743 743 Existing images for the specified name that don't have the specified
744 744 fingerprint or are missing required metadata or deleted.
745 745 """
746 746 # Find existing AMIs with this name and delete the ones that are invalid.
747 747 # Store a reference to a good image so it can be returned one the
748 748 # image state is reconciled.
749 749 images = ec2resource.images.filter(
750 750 Filters=[{'Name': 'name', 'Values': [name]}])
751 751
752 752 existing_image = None
753 753
754 754 for image in images:
755 755 if image.tags is None:
756 756 print('image %s for %s lacks required tags; removing' % (
757 757 image.id, image.name))
758 758 remove_ami(ec2resource, image)
759 759 else:
760 760 tags = {t['Key']: t['Value'] for t in image.tags}
761 761
762 762 if tags.get('HGIMAGEFINGERPRINT') == fingerprint:
763 763 existing_image = image
764 764 else:
765 765 print('image %s for %s has wrong fingerprint; removing' % (
766 766 image.id, image.name))
767 767 remove_ami(ec2resource, image)
768 768
769 769 return existing_image
770 770
771 771
772 772 def create_ami_from_instance(ec2client, instance, name, description,
773 773 fingerprint):
774 774 """Create an AMI from a running instance.
775 775
776 776 Returns the ``ec2resource.Image`` representing the created AMI.
777 777 """
778 778 instance.stop()
779 779
780 780 ec2client.get_waiter('instance_stopped').wait(
781 781 InstanceIds=[instance.id],
782 782 WaiterConfig={
783 783 'Delay': 5,
784 784 })
785 785 print('%s is stopped' % instance.id)
786 786
787 787 image = instance.create_image(
788 788 Name=name,
789 789 Description=description,
790 790 )
791 791
792 792 image.create_tags(Tags=[
793 793 {
794 794 'Key': 'HGIMAGEFINGERPRINT',
795 795 'Value': fingerprint,
796 796 },
797 797 ])
798 798
799 799 print('waiting for image %s' % image.id)
800 800
801 801 ec2client.get_waiter('image_available').wait(
802 802 ImageIds=[image.id],
803 803 )
804 804
805 805 print('image %s available as %s' % (image.id, image.name))
806 806
807 807 return image
808 808
809 809
810 810 def ensure_linux_dev_ami(c: AWSConnection, distro='debian9', prefix='hg-'):
811 811 """Ensures a Linux development AMI is available and up-to-date.
812 812
813 813 Returns an ``ec2.Image`` of either an existing AMI or a newly-built one.
814 814 """
815 815 ec2client = c.ec2client
816 816 ec2resource = c.ec2resource
817 817
818 818 name = '%s%s-%s' % (prefix, 'linux-dev', distro)
819 819
820 820 if distro == 'debian9':
821 821 image = find_image(
822 822 ec2resource,
823 823 DEBIAN_ACCOUNT_ID,
824 824 'debian-stretch-hvm-x86_64-gp2-2019-02-19-26620',
825 825 )
826 826 ssh_username = 'admin'
827 827 elif distro == 'ubuntu18.04':
828 828 image = find_image(
829 829 ec2resource,
830 830 UBUNTU_ACCOUNT_ID,
831 831 'ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-20190403',
832 832 )
833 833 ssh_username = 'ubuntu'
834 elif distro == 'ubuntu18.10':
835 image = find_image(
836 ec2resource,
837 UBUNTU_ACCOUNT_ID,
838 'ubuntu/images/hvm-ssd/ubuntu-cosmic-18.10-amd64-server-20190402',
839 )
840 ssh_username = 'ubuntu'
841 834 elif distro == 'ubuntu19.04':
842 835 image = find_image(
843 836 ec2resource,
844 837 UBUNTU_ACCOUNT_ID,
845 838 'ubuntu/images/hvm-ssd/ubuntu-disco-19.04-amd64-server-20190417',
846 839 )
847 840 ssh_username = 'ubuntu'
848 841 else:
849 842 raise ValueError('unsupported Linux distro: %s' % distro)
850 843
851 844 config = {
852 845 'BlockDeviceMappings': [
853 846 {
854 847 'DeviceName': image.block_device_mappings[0]['DeviceName'],
855 848 'Ebs': {
856 849 'DeleteOnTermination': True,
857 850 'VolumeSize': 8,
858 851 'VolumeType': 'gp2',
859 852 },
860 853 },
861 854 ],
862 855 'EbsOptimized': True,
863 856 'ImageId': image.id,
864 857 'InstanceInitiatedShutdownBehavior': 'stop',
865 858 # 8 VCPUs for compiling Python.
866 859 'InstanceType': 't3.2xlarge',
867 860 'KeyName': '%sautomation' % prefix,
868 861 'MaxCount': 1,
869 862 'MinCount': 1,
870 863 'SecurityGroupIds': [c.security_groups['linux-dev-1'].id],
871 864 }
872 865
873 866 requirements2_path = (pathlib.Path(__file__).parent.parent /
874 867 'linux-requirements-py2.txt')
875 868 requirements3_path = (pathlib.Path(__file__).parent.parent /
876 869 'linux-requirements-py3.txt')
877 870 with requirements2_path.open('r', encoding='utf-8') as fh:
878 871 requirements2 = fh.read()
879 872 with requirements3_path.open('r', encoding='utf-8') as fh:
880 873 requirements3 = fh.read()
881 874
882 875 # Compute a deterministic fingerprint to determine whether image needs to
883 876 # be regenerated.
884 877 fingerprint = resolve_fingerprint({
885 878 'instance_config': config,
886 879 'bootstrap_script': BOOTSTRAP_DEBIAN,
887 880 'requirements_py2': requirements2,
888 881 'requirements_py3': requirements3,
889 882 })
890 883
891 884 existing_image = find_and_reconcile_image(ec2resource, name, fingerprint)
892 885
893 886 if existing_image:
894 887 return existing_image
895 888
896 889 print('no suitable %s image found; creating one...' % name)
897 890
898 891 with temporary_ec2_instances(ec2resource, config) as instances:
899 892 wait_for_ip_addresses(instances)
900 893
901 894 instance = instances[0]
902 895
903 896 client = wait_for_ssh(
904 897 instance.public_ip_address, 22,
905 898 username=ssh_username,
906 899 key_filename=str(c.key_pair_path_private('automation')))
907 900
908 901 home = '/home/%s' % ssh_username
909 902
910 903 with client:
911 904 print('connecting to SSH server')
912 905 sftp = client.open_sftp()
913 906
914 907 print('uploading bootstrap files')
915 908 with sftp.open('%s/bootstrap' % home, 'wb') as fh:
916 909 fh.write(BOOTSTRAP_DEBIAN)
917 910 fh.chmod(0o0700)
918 911
919 912 with sftp.open('%s/requirements-py2.txt' % home, 'wb') as fh:
920 913 fh.write(requirements2)
921 914 fh.chmod(0o0700)
922 915
923 916 with sftp.open('%s/requirements-py3.txt' % home, 'wb') as fh:
924 917 fh.write(requirements3)
925 918 fh.chmod(0o0700)
926 919
927 920 print('executing bootstrap')
928 921 chan, stdin, stdout = ssh_exec_command(client,
929 922 '%s/bootstrap' % home)
930 923 stdin.close()
931 924
932 925 for line in stdout:
933 926 print(line, end='')
934 927
935 928 res = chan.recv_exit_status()
936 929 if res:
937 930 raise Exception('non-0 exit from bootstrap: %d' % res)
938 931
939 932 print('bootstrap completed; stopping %s to create %s' % (
940 933 instance.id, name))
941 934
942 935 return create_ami_from_instance(ec2client, instance, name,
943 936 'Mercurial Linux development environment',
944 937 fingerprint)
945 938
946 939
947 940 @contextlib.contextmanager
948 941 def temporary_linux_dev_instances(c: AWSConnection, image, instance_type,
949 942 prefix='hg-', ensure_extra_volume=False):
950 943 """Create temporary Linux development EC2 instances.
951 944
952 945 Context manager resolves to a list of ``ec2.Instance`` that were created
953 946 and are running.
954 947
955 948 ``ensure_extra_volume`` can be set to ``True`` to require that instances
956 949 have a 2nd storage volume available other than the primary AMI volume.
957 950 For instance types with instance storage, this does nothing special.
958 951 But for instance types without instance storage, an additional EBS volume
959 952 will be added to the instance.
960 953
961 954 Instances have an ``ssh_client`` attribute containing a paramiko SSHClient
962 955 instance bound to the instance.
963 956
964 957 Instances have an ``ssh_private_key_path`` attributing containing the
965 958 str path to the SSH private key to connect to the instance.
966 959 """
967 960
968 961 block_device_mappings = [
969 962 {
970 963 'DeviceName': image.block_device_mappings[0]['DeviceName'],
971 964 'Ebs': {
972 965 'DeleteOnTermination': True,
973 966 'VolumeSize': 12,
974 967 'VolumeType': 'gp2',
975 968 },
976 969 }
977 970 ]
978 971
979 972 # This is not an exhaustive list of instance types having instance storage.
980 973 # But
981 974 if (ensure_extra_volume
982 975 and not instance_type.startswith(tuple(INSTANCE_TYPES_WITH_STORAGE))):
983 976 main_device = block_device_mappings[0]['DeviceName']
984 977
985 978 if main_device == 'xvda':
986 979 second_device = 'xvdb'
987 980 elif main_device == '/dev/sda1':
988 981 second_device = '/dev/sdb'
989 982 else:
990 983 raise ValueError('unhandled primary EBS device name: %s' %
991 984 main_device)
992 985
993 986 block_device_mappings.append({
994 987 'DeviceName': second_device,
995 988 'Ebs': {
996 989 'DeleteOnTermination': True,
997 990 'VolumeSize': 8,
998 991 'VolumeType': 'gp2',
999 992 }
1000 993 })
1001 994
1002 995 config = {
1003 996 'BlockDeviceMappings': block_device_mappings,
1004 997 'EbsOptimized': True,
1005 998 'ImageId': image.id,
1006 999 'InstanceInitiatedShutdownBehavior': 'terminate',
1007 1000 'InstanceType': instance_type,
1008 1001 'KeyName': '%sautomation' % prefix,
1009 1002 'MaxCount': 1,
1010 1003 'MinCount': 1,
1011 1004 'SecurityGroupIds': [c.security_groups['linux-dev-1'].id],
1012 1005 }
1013 1006
1014 1007 with temporary_ec2_instances(c.ec2resource, config) as instances:
1015 1008 wait_for_ip_addresses(instances)
1016 1009
1017 1010 ssh_private_key_path = str(c.key_pair_path_private('automation'))
1018 1011
1019 1012 for instance in instances:
1020 1013 client = wait_for_ssh(
1021 1014 instance.public_ip_address, 22,
1022 1015 username='hg',
1023 1016 key_filename=ssh_private_key_path)
1024 1017
1025 1018 instance.ssh_client = client
1026 1019 instance.ssh_private_key_path = ssh_private_key_path
1027 1020
1028 1021 try:
1029 1022 yield instances
1030 1023 finally:
1031 1024 for instance in instances:
1032 1025 instance.ssh_client.close()
1033 1026
1034 1027
1035 1028 def ensure_windows_dev_ami(c: AWSConnection, prefix='hg-',
1036 1029 base_image_name=WINDOWS_BASE_IMAGE_NAME):
1037 1030 """Ensure Windows Development AMI is available and up-to-date.
1038 1031
1039 1032 If necessary, a modern AMI will be built by starting a temporary EC2
1040 1033 instance and bootstrapping it.
1041 1034
1042 1035 Obsolete AMIs will be deleted so there is only a single AMI having the
1043 1036 desired name.
1044 1037
1045 1038 Returns an ``ec2.Image`` of either an existing AMI or a newly-built
1046 1039 one.
1047 1040 """
1048 1041 ec2client = c.ec2client
1049 1042 ec2resource = c.ec2resource
1050 1043 ssmclient = c.session.client('ssm')
1051 1044
1052 1045 name = '%s%s' % (prefix, 'windows-dev')
1053 1046
1054 1047 image = find_image(ec2resource, AMAZON_ACCOUNT_ID, base_image_name)
1055 1048
1056 1049 config = {
1057 1050 'BlockDeviceMappings': [
1058 1051 {
1059 1052 'DeviceName': '/dev/sda1',
1060 1053 'Ebs': {
1061 1054 'DeleteOnTermination': True,
1062 1055 'VolumeSize': 32,
1063 1056 'VolumeType': 'gp2',
1064 1057 },
1065 1058 }
1066 1059 ],
1067 1060 'ImageId': image.id,
1068 1061 'InstanceInitiatedShutdownBehavior': 'stop',
1069 1062 'InstanceType': 't3.medium',
1070 1063 'KeyName': '%sautomation' % prefix,
1071 1064 'MaxCount': 1,
1072 1065 'MinCount': 1,
1073 1066 'SecurityGroupIds': [c.security_groups['windows-dev-1'].id],
1074 1067 }
1075 1068
1076 1069 commands = [
1077 1070 # Need to start the service so sshd_config is generated.
1078 1071 'Start-Service sshd',
1079 1072 'Write-Output "modifying sshd_config"',
1080 1073 r'$content = Get-Content C:\ProgramData\ssh\sshd_config',
1081 1074 '$content = $content -replace "Match Group administrators","" -replace "AuthorizedKeysFile __PROGRAMDATA__/ssh/administrators_authorized_keys",""',
1082 1075 r'$content | Set-Content C:\ProgramData\ssh\sshd_config',
1083 1076 'Import-Module OpenSSHUtils',
1084 1077 r'Repair-SshdConfigPermission C:\ProgramData\ssh\sshd_config -Confirm:$false',
1085 1078 'Restart-Service sshd',
1086 1079 'Write-Output "installing OpenSSL client"',
1087 1080 'Add-WindowsCapability -Online -Name OpenSSH.Client~~~~0.0.1.0',
1088 1081 'Set-Service -Name sshd -StartupType "Automatic"',
1089 1082 'Write-Output "OpenSSH server running"',
1090 1083 ]
1091 1084
1092 1085 with INSTALL_WINDOWS_DEPENDENCIES.open('r', encoding='utf-8') as fh:
1093 1086 commands.extend(l.rstrip() for l in fh)
1094 1087
1095 1088 # Disable Windows Defender when bootstrapping because it just slows
1096 1089 # things down.
1097 1090 commands.insert(0, 'Set-MpPreference -DisableRealtimeMonitoring $true')
1098 1091 commands.append('Set-MpPreference -DisableRealtimeMonitoring $false')
1099 1092
1100 1093 # Compute a deterministic fingerprint to determine whether image needs
1101 1094 # to be regenerated.
1102 1095 fingerprint = resolve_fingerprint({
1103 1096 'instance_config': config,
1104 1097 'user_data': WINDOWS_USER_DATA,
1105 1098 'initial_bootstrap': WINDOWS_BOOTSTRAP_POWERSHELL,
1106 1099 'bootstrap_commands': commands,
1107 1100 'base_image_name': base_image_name,
1108 1101 })
1109 1102
1110 1103 existing_image = find_and_reconcile_image(ec2resource, name, fingerprint)
1111 1104
1112 1105 if existing_image:
1113 1106 return existing_image
1114 1107
1115 1108 print('no suitable Windows development image found; creating one...')
1116 1109
1117 1110 with create_temp_windows_ec2_instances(c, config) as instances:
1118 1111 assert len(instances) == 1
1119 1112 instance = instances[0]
1120 1113
1121 1114 wait_for_ssm(ssmclient, [instance])
1122 1115
1123 1116 # On first boot, install various Windows updates.
1124 1117 # We would ideally use PowerShell Remoting for this. However, there are
1125 1118 # trust issues that make it difficult to invoke Windows Update
1126 1119 # remotely. So we use SSM, which has a mechanism for running Windows
1127 1120 # Update.
1128 1121 print('installing Windows features...')
1129 1122 run_ssm_command(
1130 1123 ssmclient,
1131 1124 [instance],
1132 1125 'AWS-RunPowerShellScript',
1133 1126 {
1134 1127 'commands': WINDOWS_BOOTSTRAP_POWERSHELL.split('\n'),
1135 1128 },
1136 1129 )
1137 1130
1138 1131 # Reboot so all updates are fully applied.
1139 1132 #
1140 1133 # We don't use instance.reboot() here because it is asynchronous and
1141 1134 # we don't know when exactly the instance has rebooted. It could take
1142 1135 # a while to stop and we may start trying to interact with the instance
1143 1136 # before it has rebooted.
1144 1137 print('rebooting instance %s' % instance.id)
1145 1138 instance.stop()
1146 1139 ec2client.get_waiter('instance_stopped').wait(
1147 1140 InstanceIds=[instance.id],
1148 1141 WaiterConfig={
1149 1142 'Delay': 5,
1150 1143 })
1151 1144
1152 1145 instance.start()
1153 1146 wait_for_ip_addresses([instance])
1154 1147
1155 1148 # There is a race condition here between the User Data PS script running
1156 1149 # and us connecting to WinRM. This can manifest as
1157 1150 # "AuthorizationManager check failed" failures during run_powershell().
1158 1151 # TODO figure out a workaround.
1159 1152
1160 1153 print('waiting for Windows Remote Management to come back...')
1161 1154 client = wait_for_winrm(instance.public_ip_address, 'Administrator',
1162 1155 c.automation.default_password())
1163 1156 print('established WinRM connection to %s' % instance.id)
1164 1157 instance.winrm_client = client
1165 1158
1166 1159 print('bootstrapping instance...')
1167 1160 run_powershell(instance.winrm_client, '\n'.join(commands))
1168 1161
1169 1162 print('bootstrap completed; stopping %s to create image' % instance.id)
1170 1163 return create_ami_from_instance(ec2client, instance, name,
1171 1164 'Mercurial Windows development environment',
1172 1165 fingerprint)
1173 1166
1174 1167
1175 1168 @contextlib.contextmanager
1176 1169 def temporary_windows_dev_instances(c: AWSConnection, image, instance_type,
1177 1170 prefix='hg-', disable_antivirus=False):
1178 1171 """Create a temporary Windows development EC2 instance.
1179 1172
1180 1173 Context manager resolves to the list of ``EC2.Instance`` that were created.
1181 1174 """
1182 1175 config = {
1183 1176 'BlockDeviceMappings': [
1184 1177 {
1185 1178 'DeviceName': '/dev/sda1',
1186 1179 'Ebs': {
1187 1180 'DeleteOnTermination': True,
1188 1181 'VolumeSize': 32,
1189 1182 'VolumeType': 'gp2',
1190 1183 },
1191 1184 }
1192 1185 ],
1193 1186 'ImageId': image.id,
1194 1187 'InstanceInitiatedShutdownBehavior': 'stop',
1195 1188 'InstanceType': instance_type,
1196 1189 'KeyName': '%sautomation' % prefix,
1197 1190 'MaxCount': 1,
1198 1191 'MinCount': 1,
1199 1192 'SecurityGroupIds': [c.security_groups['windows-dev-1'].id],
1200 1193 }
1201 1194
1202 1195 with create_temp_windows_ec2_instances(c, config) as instances:
1203 1196 if disable_antivirus:
1204 1197 for instance in instances:
1205 1198 run_powershell(
1206 1199 instance.winrm_client,
1207 1200 'Set-MpPreference -DisableRealtimeMonitoring $true')
1208 1201
1209 1202 yield instances
@@ -1,561 +1,560 b''
1 1 # linux.py - Linux specific automation functionality
2 2 #
3 3 # Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 # no-check-code because Python 3 native.
9 9
10 10 import os
11 11 import pathlib
12 12 import shlex
13 13 import subprocess
14 14 import tempfile
15 15
16 16 from .ssh import (
17 17 exec_command,
18 18 )
19 19
20 20
21 21 # Linux distributions that are supported.
22 22 DISTROS = {
23 23 'debian9',
24 24 'ubuntu18.04',
25 'ubuntu18.10',
26 25 'ubuntu19.04',
27 26 }
28 27
29 28 INSTALL_PYTHONS = r'''
30 29 PYENV2_VERSIONS="2.7.16 pypy2.7-7.1.1"
31 30 PYENV3_VERSIONS="3.5.7 3.6.9 3.7.4 3.8-dev pypy3.5-7.0.0 pypy3.6-7.1.1"
32 31
33 32 git clone https://github.com/pyenv/pyenv.git /hgdev/pyenv
34 33 pushd /hgdev/pyenv
35 34 git checkout 17f44b7cd6f58ea2fa68ec0371fb9e7a826b8be2
36 35 popd
37 36
38 37 export PYENV_ROOT="/hgdev/pyenv"
39 38 export PATH="$PYENV_ROOT/bin:$PATH"
40 39
41 40 # pip 19.0.3.
42 41 PIP_SHA256=efe99298f3fbb1f56201ce6b81d2658067d2f7d7dfc2d412e0d3cacc9a397c61
43 42 wget -O get-pip.py --progress dot:mega https://github.com/pypa/get-pip/raw/fee32c376da1ff6496a798986d7939cd51e1644f/get-pip.py
44 43 echo "${PIP_SHA256} get-pip.py" | sha256sum --check -
45 44
46 45 VIRTUALENV_SHA256=984d7e607b0a5d1329425dd8845bd971b957424b5ba664729fab51ab8c11bc39
47 46 VIRTUALENV_TARBALL=virtualenv-16.4.3.tar.gz
48 47 wget -O ${VIRTUALENV_TARBALL} --progress dot:mega https://files.pythonhosted.org/packages/37/db/89d6b043b22052109da35416abc3c397655e4bd3cff031446ba02b9654fa/${VIRTUALENV_TARBALL}
49 48 echo "${VIRTUALENV_SHA256} ${VIRTUALENV_TARBALL}" | sha256sum --check -
50 49
51 50 for v in ${PYENV2_VERSIONS}; do
52 51 pyenv install -v ${v}
53 52 ${PYENV_ROOT}/versions/${v}/bin/python get-pip.py
54 53 ${PYENV_ROOT}/versions/${v}/bin/pip install ${VIRTUALENV_TARBALL}
55 54 ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/requirements-py2.txt
56 55 done
57 56
58 57 for v in ${PYENV3_VERSIONS}; do
59 58 pyenv install -v ${v}
60 59 ${PYENV_ROOT}/versions/${v}/bin/python get-pip.py
61 60 ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/requirements-py3.txt
62 61 done
63 62
64 63 pyenv global ${PYENV2_VERSIONS} ${PYENV3_VERSIONS} system
65 64 '''.lstrip().replace('\r\n', '\n')
66 65
67 66
68 67 INSTALL_RUST = r'''
69 68 RUSTUP_INIT_SHA256=a46fe67199b7bcbbde2dcbc23ae08db6f29883e260e23899a88b9073effc9076
70 69 wget -O rustup-init --progress dot:mega https://static.rust-lang.org/rustup/archive/1.18.3/x86_64-unknown-linux-gnu/rustup-init
71 70 echo "${RUSTUP_INIT_SHA256} rustup-init" | sha256sum --check -
72 71
73 72 chmod +x rustup-init
74 73 sudo -H -u hg -g hg ./rustup-init -y
75 74 sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup install 1.31.1 1.34.2
76 75 sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup component add clippy
77 76 '''
78 77
79 78
80 79 BOOTSTRAP_VIRTUALENV = r'''
81 80 /usr/bin/virtualenv /hgdev/venv-bootstrap
82 81
83 82 HG_SHA256=1bdd21bb87d1e05fb5cd395d488d0e0cc2f2f90ce0fd248e31a03595da5ccb47
84 83 HG_TARBALL=mercurial-4.9.1.tar.gz
85 84
86 85 wget -O ${HG_TARBALL} --progress dot:mega https://www.mercurial-scm.org/release/${HG_TARBALL}
87 86 echo "${HG_SHA256} ${HG_TARBALL}" | sha256sum --check -
88 87
89 88 /hgdev/venv-bootstrap/bin/pip install ${HG_TARBALL}
90 89 '''.lstrip().replace('\r\n', '\n')
91 90
92 91
93 92 BOOTSTRAP_DEBIAN = r'''
94 93 #!/bin/bash
95 94
96 95 set -ex
97 96
98 97 DISTRO=`grep DISTRIB_ID /etc/lsb-release | awk -F= '{{print $2}}'`
99 98 DEBIAN_VERSION=`cat /etc/debian_version`
100 99 LSB_RELEASE=`lsb_release -cs`
101 100
102 101 sudo /usr/sbin/groupadd hg
103 102 sudo /usr/sbin/groupadd docker
104 103 sudo /usr/sbin/useradd -g hg -G sudo,docker -d /home/hg -m -s /bin/bash hg
105 104 sudo mkdir /home/hg/.ssh
106 105 sudo cp ~/.ssh/authorized_keys /home/hg/.ssh/authorized_keys
107 106 sudo chown -R hg:hg /home/hg/.ssh
108 107 sudo chmod 700 /home/hg/.ssh
109 108 sudo chmod 600 /home/hg/.ssh/authorized_keys
110 109
111 110 cat << EOF | sudo tee /etc/sudoers.d/90-hg
112 111 hg ALL=(ALL) NOPASSWD:ALL
113 112 EOF
114 113
115 114 sudo apt-get update
116 115 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq dist-upgrade
117 116
118 117 # Install packages necessary to set up Docker Apt repo.
119 118 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install --no-install-recommends \
120 119 apt-transport-https \
121 120 gnupg
122 121
123 122 cat > docker-apt-key << EOF
124 123 -----BEGIN PGP PUBLIC KEY BLOCK-----
125 124
126 125 mQINBFit2ioBEADhWpZ8/wvZ6hUTiXOwQHXMAlaFHcPH9hAtr4F1y2+OYdbtMuth
127 126 lqqwp028AqyY+PRfVMtSYMbjuQuu5byyKR01BbqYhuS3jtqQmljZ/bJvXqnmiVXh
128 127 38UuLa+z077PxyxQhu5BbqntTPQMfiyqEiU+BKbq2WmANUKQf+1AmZY/IruOXbnq
129 128 L4C1+gJ8vfmXQt99npCaxEjaNRVYfOS8QcixNzHUYnb6emjlANyEVlZzeqo7XKl7
130 129 UrwV5inawTSzWNvtjEjj4nJL8NsLwscpLPQUhTQ+7BbQXAwAmeHCUTQIvvWXqw0N
131 130 cmhh4HgeQscQHYgOJjjDVfoY5MucvglbIgCqfzAHW9jxmRL4qbMZj+b1XoePEtht
132 131 ku4bIQN1X5P07fNWzlgaRL5Z4POXDDZTlIQ/El58j9kp4bnWRCJW0lya+f8ocodo
133 132 vZZ+Doi+fy4D5ZGrL4XEcIQP/Lv5uFyf+kQtl/94VFYVJOleAv8W92KdgDkhTcTD
134 133 G7c0tIkVEKNUq48b3aQ64NOZQW7fVjfoKwEZdOqPE72Pa45jrZzvUFxSpdiNk2tZ
135 134 XYukHjlxxEgBdC/J3cMMNRE1F4NCA3ApfV1Y7/hTeOnmDuDYwr9/obA8t016Yljj
136 135 q5rdkywPf4JF8mXUW5eCN1vAFHxeg9ZWemhBtQmGxXnw9M+z6hWwc6ahmwARAQAB
137 136 tCtEb2NrZXIgUmVsZWFzZSAoQ0UgZGViKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3
138 137 BBMBCgAhBQJYrefAAhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEI2BgDwO
139 138 v82IsskP/iQZo68flDQmNvn8X5XTd6RRaUH33kXYXquT6NkHJciS7E2gTJmqvMqd
140 139 tI4mNYHCSEYxI5qrcYV5YqX9P6+Ko+vozo4nseUQLPH/ATQ4qL0Zok+1jkag3Lgk
141 140 jonyUf9bwtWxFp05HC3GMHPhhcUSexCxQLQvnFWXD2sWLKivHp2fT8QbRGeZ+d3m
142 141 6fqcd5Fu7pxsqm0EUDK5NL+nPIgYhN+auTrhgzhK1CShfGccM/wfRlei9Utz6p9P
143 142 XRKIlWnXtT4qNGZNTN0tR+NLG/6Bqd8OYBaFAUcue/w1VW6JQ2VGYZHnZu9S8LMc
144 143 FYBa5Ig9PxwGQOgq6RDKDbV+PqTQT5EFMeR1mrjckk4DQJjbxeMZbiNMG5kGECA8
145 144 g383P3elhn03WGbEEa4MNc3Z4+7c236QI3xWJfNPdUbXRaAwhy/6rTSFbzwKB0Jm
146 145 ebwzQfwjQY6f55MiI/RqDCyuPj3r3jyVRkK86pQKBAJwFHyqj9KaKXMZjfVnowLh
147 146 9svIGfNbGHpucATqREvUHuQbNnqkCx8VVhtYkhDb9fEP2xBu5VvHbR+3nfVhMut5
148 147 G34Ct5RS7Jt6LIfFdtcn8CaSas/l1HbiGeRgc70X/9aYx/V/CEJv0lIe8gP6uDoW
149 148 FPIZ7d6vH+Vro6xuWEGiuMaiznap2KhZmpkgfupyFmplh0s6knymuQINBFit2ioB
150 149 EADneL9S9m4vhU3blaRjVUUyJ7b/qTjcSylvCH5XUE6R2k+ckEZjfAMZPLpO+/tF
151 150 M2JIJMD4SifKuS3xck9KtZGCufGmcwiLQRzeHF7vJUKrLD5RTkNi23ydvWZgPjtx
152 151 Q+DTT1Zcn7BrQFY6FgnRoUVIxwtdw1bMY/89rsFgS5wwuMESd3Q2RYgb7EOFOpnu
153 152 w6da7WakWf4IhnF5nsNYGDVaIHzpiqCl+uTbf1epCjrOlIzkZ3Z3Yk5CM/TiFzPk
154 153 z2lLz89cpD8U+NtCsfagWWfjd2U3jDapgH+7nQnCEWpROtzaKHG6lA3pXdix5zG8
155 154 eRc6/0IbUSWvfjKxLLPfNeCS2pCL3IeEI5nothEEYdQH6szpLog79xB9dVnJyKJb
156 155 VfxXnseoYqVrRz2VVbUI5Blwm6B40E3eGVfUQWiux54DspyVMMk41Mx7QJ3iynIa
157 156 1N4ZAqVMAEruyXTRTxc9XW0tYhDMA/1GYvz0EmFpm8LzTHA6sFVtPm/ZlNCX6P1X
158 157 zJwrv7DSQKD6GGlBQUX+OeEJ8tTkkf8QTJSPUdh8P8YxDFS5EOGAvhhpMBYD42kQ
159 158 pqXjEC+XcycTvGI7impgv9PDY1RCC1zkBjKPa120rNhv/hkVk/YhuGoajoHyy4h7
160 159 ZQopdcMtpN2dgmhEegny9JCSwxfQmQ0zK0g7m6SHiKMwjwARAQABiQQ+BBgBCAAJ
161 160 BQJYrdoqAhsCAikJEI2BgDwOv82IwV0gBBkBCAAGBQJYrdoqAAoJEH6gqcPyc/zY
162 161 1WAP/2wJ+R0gE6qsce3rjaIz58PJmc8goKrir5hnElWhPgbq7cYIsW5qiFyLhkdp
163 162 YcMmhD9mRiPpQn6Ya2w3e3B8zfIVKipbMBnke/ytZ9M7qHmDCcjoiSmwEXN3wKYI
164 163 mD9VHONsl/CG1rU9Isw1jtB5g1YxuBA7M/m36XN6x2u+NtNMDB9P56yc4gfsZVES
165 164 KA9v+yY2/l45L8d/WUkUi0YXomn6hyBGI7JrBLq0CX37GEYP6O9rrKipfz73XfO7
166 165 JIGzOKZlljb/D9RX/g7nRbCn+3EtH7xnk+TK/50euEKw8SMUg147sJTcpQmv6UzZ
167 166 cM4JgL0HbHVCojV4C/plELwMddALOFeYQzTif6sMRPf+3DSj8frbInjChC3yOLy0
168 167 6br92KFom17EIj2CAcoeq7UPhi2oouYBwPxh5ytdehJkoo+sN7RIWua6P2WSmon5
169 168 U888cSylXC0+ADFdgLX9K2zrDVYUG1vo8CX0vzxFBaHwN6Px26fhIT1/hYUHQR1z
170 169 VfNDcyQmXqkOnZvvoMfz/Q0s9BhFJ/zU6AgQbIZE/hm1spsfgvtsD1frZfygXJ9f
171 170 irP+MSAI80xHSf91qSRZOj4Pl3ZJNbq4yYxv0b1pkMqeGdjdCYhLU+LZ4wbQmpCk
172 171 SVe2prlLureigXtmZfkqevRz7FrIZiu9ky8wnCAPwC7/zmS18rgP/17bOtL4/iIz
173 172 QhxAAoAMWVrGyJivSkjhSGx1uCojsWfsTAm11P7jsruIL61ZzMUVE2aM3Pmj5G+W
174 173 9AcZ58Em+1WsVnAXdUR//bMmhyr8wL/G1YO1V3JEJTRdxsSxdYa4deGBBY/Adpsw
175 174 24jxhOJR+lsJpqIUeb999+R8euDhRHG9eFO7DRu6weatUJ6suupoDTRWtr/4yGqe
176 175 dKxV3qQhNLSnaAzqW/1nA3iUB4k7kCaKZxhdhDbClf9P37qaRW467BLCVO/coL3y
177 176 Vm50dwdrNtKpMBh3ZpbB1uJvgi9mXtyBOMJ3v8RZeDzFiG8HdCtg9RvIt/AIFoHR
178 177 H3S+U79NT6i0KPzLImDfs8T7RlpyuMc4Ufs8ggyg9v3Ae6cN3eQyxcK3w0cbBwsh
179 178 /nQNfsA6uu+9H7NhbehBMhYnpNZyrHzCmzyXkauwRAqoCbGCNykTRwsur9gS41TQ
180 179 M8ssD1jFheOJf3hODnkKU+HKjvMROl1DK7zdmLdNzA1cvtZH/nCC9KPj1z8QC47S
181 180 xx+dTZSx4ONAhwbS/LN3PoKtn8LPjY9NP9uDWI+TWYquS2U+KHDrBDlsgozDbs/O
182 181 jCxcpDzNmXpWQHEtHU7649OXHP7UeNST1mCUCH5qdank0V1iejF6/CfTFU4MfcrG
183 182 YT90qFF93M3v01BbxP+EIY2/9tiIPbrd
184 183 =0YYh
185 184 -----END PGP PUBLIC KEY BLOCK-----
186 185 EOF
187 186
188 187 sudo apt-key add docker-apt-key
189 188
190 189 if [ "$LSB_RELEASE" = "stretch" ]; then
191 190 cat << EOF | sudo tee -a /etc/apt/sources.list
192 191 # Need backports for clang-format-6.0
193 192 deb http://deb.debian.org/debian stretch-backports main
194 193
195 194 # Sources are useful if we want to compile things locally.
196 195 deb-src http://deb.debian.org/debian stretch main
197 196 deb-src http://security.debian.org/debian-security stretch/updates main
198 197 deb-src http://deb.debian.org/debian stretch-updates main
199 198 deb-src http://deb.debian.org/debian stretch-backports main
200 199
201 200 deb [arch=amd64] https://download.docker.com/linux/debian stretch stable
202 201 EOF
203 202
204 203 elif [ "$DISTRO" = "Ubuntu" ]; then
205 204 cat << EOF | sudo tee -a /etc/apt/sources.list
206 205 deb [arch=amd64] https://download.docker.com/linux/ubuntu $LSB_RELEASE stable
207 206 EOF
208 207
209 208 fi
210 209
211 210 sudo apt-get update
212 211
213 212 PACKAGES="\
214 213 btrfs-progs \
215 214 build-essential \
216 215 bzr \
217 216 clang-format-6.0 \
218 217 cvs \
219 218 darcs \
220 219 debhelper \
221 220 devscripts \
222 221 docker-ce \
223 222 dpkg-dev \
224 223 dstat \
225 224 emacs \
226 225 gettext \
227 226 git \
228 227 htop \
229 228 iotop \
230 229 jfsutils \
231 230 libbz2-dev \
232 231 libexpat1-dev \
233 232 libffi-dev \
234 233 libgdbm-dev \
235 234 liblzma-dev \
236 235 libncurses5-dev \
237 236 libnss3-dev \
238 237 libreadline-dev \
239 238 libsqlite3-dev \
240 239 libssl-dev \
241 240 netbase \
242 241 ntfs-3g \
243 242 nvme-cli \
244 243 pyflakes \
245 244 pyflakes3 \
246 245 pylint \
247 246 pylint3 \
248 247 python-all-dev \
249 248 python-dev \
250 249 python-docutils \
251 250 python-fuzzywuzzy \
252 251 python-pygments \
253 252 python-subversion \
254 253 python-vcr \
255 254 python3-dev \
256 255 python3-docutils \
257 256 python3-fuzzywuzzy \
258 257 python3-pygments \
259 258 python3-vcr \
260 259 rsync \
261 260 sqlite3 \
262 261 subversion \
263 262 tcl-dev \
264 263 tk-dev \
265 264 tla \
266 265 unzip \
267 266 uuid-dev \
268 267 vim \
269 268 virtualenv \
270 269 wget \
271 270 xfsprogs \
272 271 zip \
273 272 zlib1g-dev"
274 273
275 274 if [ "LSB_RELEASE" = "stretch" ]; then
276 275 PACKAGES="$PACKAGES linux-perf"
277 276 elif [ "$DISTRO" = "Ubuntu" ]; then
278 277 PACKAGES="$PACKAGES linux-tools-common"
279 278 fi
280 279
281 280 # Ubuntu 19.04 removes monotone.
282 281 if [ "$LSB_RELEASE" != "disco" ]; then
283 282 PACKAGES="$PACKAGES monotone"
284 283 fi
285 284
286 285 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install --no-install-recommends $PACKAGES
287 286
288 287 # Create clang-format symlink so test harness finds it.
289 288 sudo update-alternatives --install /usr/bin/clang-format clang-format \
290 289 /usr/bin/clang-format-6.0 1000
291 290
292 291 sudo mkdir /hgdev
293 292 # Will be normalized to hg:hg later.
294 293 sudo chown `whoami` /hgdev
295 294
296 295 {install_rust}
297 296
298 297 cp requirements-py2.txt /hgdev/requirements-py2.txt
299 298 cp requirements-py3.txt /hgdev/requirements-py3.txt
300 299
301 300 # Disable the pip version check because it uses the network and can
302 301 # be annoying.
303 302 cat << EOF | sudo tee -a /etc/pip.conf
304 303 [global]
305 304 disable-pip-version-check = True
306 305 EOF
307 306
308 307 {install_pythons}
309 308 {bootstrap_virtualenv}
310 309
311 310 /hgdev/venv-bootstrap/bin/hg clone https://www.mercurial-scm.org/repo/hg /hgdev/src
312 311
313 312 # Mark the repo as non-publishing.
314 313 cat >> /hgdev/src/.hg/hgrc << EOF
315 314 [phases]
316 315 publish = false
317 316 EOF
318 317
319 318 sudo chown -R hg:hg /hgdev
320 319 '''.lstrip().format(
321 320 install_rust=INSTALL_RUST,
322 321 install_pythons=INSTALL_PYTHONS,
323 322 bootstrap_virtualenv=BOOTSTRAP_VIRTUALENV
324 323 ).replace('\r\n', '\n')
325 324
326 325
327 326 # Prepares /hgdev for operations.
328 327 PREPARE_HGDEV = '''
329 328 #!/bin/bash
330 329
331 330 set -e
332 331
333 332 FS=$1
334 333
335 334 ensure_device() {
336 335 if [ -z "${DEVICE}" ]; then
337 336 echo "could not find block device to format"
338 337 exit 1
339 338 fi
340 339 }
341 340
342 341 # Determine device to partition for extra filesystem.
343 342 # If only 1 volume is present, it will be the root volume and
344 343 # should be /dev/nvme0. If multiple volumes are present, the
345 344 # root volume could be nvme0 or nvme1. Use whichever one doesn't have
346 345 # a partition.
347 346 if [ -e /dev/nvme1n1 ]; then
348 347 if [ -e /dev/nvme0n1p1 ]; then
349 348 DEVICE=/dev/nvme1n1
350 349 else
351 350 DEVICE=/dev/nvme0n1
352 351 fi
353 352 else
354 353 DEVICE=
355 354 fi
356 355
357 356 sudo mkdir /hgwork
358 357
359 358 if [ "${FS}" != "default" -a "${FS}" != "tmpfs" ]; then
360 359 ensure_device
361 360 echo "creating ${FS} filesystem on ${DEVICE}"
362 361 fi
363 362
364 363 if [ "${FS}" = "default" ]; then
365 364 :
366 365
367 366 elif [ "${FS}" = "btrfs" ]; then
368 367 sudo mkfs.btrfs ${DEVICE}
369 368 sudo mount ${DEVICE} /hgwork
370 369
371 370 elif [ "${FS}" = "ext3" ]; then
372 371 # lazy_journal_init speeds up filesystem creation at the expense of
373 372 # integrity if things crash. We are an ephemeral instance, so we don't
374 373 # care about integrity.
375 374 sudo mkfs.ext3 -E lazy_journal_init=1 ${DEVICE}
376 375 sudo mount ${DEVICE} /hgwork
377 376
378 377 elif [ "${FS}" = "ext4" ]; then
379 378 sudo mkfs.ext4 -E lazy_journal_init=1 ${DEVICE}
380 379 sudo mount ${DEVICE} /hgwork
381 380
382 381 elif [ "${FS}" = "jfs" ]; then
383 382 sudo mkfs.jfs ${DEVICE}
384 383 sudo mount ${DEVICE} /hgwork
385 384
386 385 elif [ "${FS}" = "tmpfs" ]; then
387 386 echo "creating tmpfs volume in /hgwork"
388 387 sudo mount -t tmpfs -o size=1024M tmpfs /hgwork
389 388
390 389 elif [ "${FS}" = "xfs" ]; then
391 390 sudo mkfs.xfs ${DEVICE}
392 391 sudo mount ${DEVICE} /hgwork
393 392
394 393 else
395 394 echo "unsupported filesystem: ${FS}"
396 395 exit 1
397 396 fi
398 397
399 398 echo "/hgwork ready"
400 399
401 400 sudo chown hg:hg /hgwork
402 401 mkdir /hgwork/tmp
403 402 chown hg:hg /hgwork/tmp
404 403
405 404 rsync -a /hgdev/src /hgwork/
406 405 '''.lstrip().replace('\r\n', '\n')
407 406
408 407
409 408 HG_UPDATE_CLEAN = '''
410 409 set -ex
411 410
412 411 HG=/hgdev/venv-bootstrap/bin/hg
413 412
414 413 cd /hgwork/src
415 414 ${HG} --config extensions.purge= purge --all
416 415 ${HG} update -C $1
417 416 ${HG} log -r .
418 417 '''.lstrip().replace('\r\n', '\n')
419 418
420 419
421 420 def prepare_exec_environment(ssh_client, filesystem='default'):
422 421 """Prepare an EC2 instance to execute things.
423 422
424 423 The AMI has an ``/hgdev`` bootstrapped with various Python installs
425 424 and a clone of the Mercurial repo.
426 425
427 426 In EC2, EBS volumes launched from snapshots have wonky performance behavior.
428 427 Notably, blocks have to be copied on first access, which makes volume
429 428 I/O extremely slow on fresh volumes.
430 429
431 430 Furthermore, we may want to run operations, tests, etc on alternative
432 431 filesystems so we examine behavior on different filesystems.
433 432
434 433 This function is used to facilitate executing operations on alternate
435 434 volumes.
436 435 """
437 436 sftp = ssh_client.open_sftp()
438 437
439 438 with sftp.open('/hgdev/prepare-hgdev', 'wb') as fh:
440 439 fh.write(PREPARE_HGDEV)
441 440 fh.chmod(0o0777)
442 441
443 442 command = 'sudo /hgdev/prepare-hgdev %s' % filesystem
444 443 chan, stdin, stdout = exec_command(ssh_client, command)
445 444 stdin.close()
446 445
447 446 for line in stdout:
448 447 print(line, end='')
449 448
450 449 res = chan.recv_exit_status()
451 450
452 451 if res:
453 452 raise Exception('non-0 exit code updating working directory; %d'
454 453 % res)
455 454
456 455
457 456 def synchronize_hg(source_path: pathlib.Path, ec2_instance, revision: str=None):
458 457 """Synchronize a local Mercurial source path to remote EC2 instance."""
459 458
460 459 with tempfile.TemporaryDirectory() as temp_dir:
461 460 temp_dir = pathlib.Path(temp_dir)
462 461
463 462 ssh_dir = temp_dir / '.ssh'
464 463 ssh_dir.mkdir()
465 464 ssh_dir.chmod(0o0700)
466 465
467 466 public_ip = ec2_instance.public_ip_address
468 467
469 468 ssh_config = ssh_dir / 'config'
470 469
471 470 with ssh_config.open('w', encoding='utf-8') as fh:
472 471 fh.write('Host %s\n' % public_ip)
473 472 fh.write(' User hg\n')
474 473 fh.write(' StrictHostKeyChecking no\n')
475 474 fh.write(' UserKnownHostsFile %s\n' % (ssh_dir / 'known_hosts'))
476 475 fh.write(' IdentityFile %s\n' % ec2_instance.ssh_private_key_path)
477 476
478 477 if not (source_path / '.hg').is_dir():
479 478 raise Exception('%s is not a Mercurial repository; synchronization '
480 479 'not yet supported' % source_path)
481 480
482 481 env = dict(os.environ)
483 482 env['HGPLAIN'] = '1'
484 483 env['HGENCODING'] = 'utf-8'
485 484
486 485 hg_bin = source_path / 'hg'
487 486
488 487 res = subprocess.run(
489 488 ['python2.7', str(hg_bin), 'log', '-r', revision, '-T', '{node}'],
490 489 cwd=str(source_path), env=env, check=True, capture_output=True)
491 490
492 491 full_revision = res.stdout.decode('ascii')
493 492
494 493 args = [
495 494 'python2.7', str(hg_bin),
496 495 '--config', 'ui.ssh=ssh -F %s' % ssh_config,
497 496 '--config', 'ui.remotecmd=/hgdev/venv-bootstrap/bin/hg',
498 497 # Also ensure .hgtags changes are present so auto version
499 498 # calculation works.
500 499 'push', '-f', '-r', full_revision, '-r', 'file(.hgtags)',
501 500 'ssh://%s//hgwork/src' % public_ip,
502 501 ]
503 502
504 503 res = subprocess.run(args, cwd=str(source_path), env=env)
505 504
506 505 # Allow 1 (no-op) to not trigger error.
507 506 if res.returncode not in (0, 1):
508 507 res.check_returncode()
509 508
510 509 # TODO support synchronizing dirty working directory.
511 510
512 511 sftp = ec2_instance.ssh_client.open_sftp()
513 512
514 513 with sftp.open('/hgdev/hgup', 'wb') as fh:
515 514 fh.write(HG_UPDATE_CLEAN)
516 515 fh.chmod(0o0700)
517 516
518 517 chan, stdin, stdout = exec_command(
519 518 ec2_instance.ssh_client, '/hgdev/hgup %s' % full_revision)
520 519 stdin.close()
521 520
522 521 for line in stdout:
523 522 print(line, end='')
524 523
525 524 res = chan.recv_exit_status()
526 525
527 526 if res:
528 527 raise Exception('non-0 exit code updating working directory; %d'
529 528 % res)
530 529
531 530
532 531 def run_tests(ssh_client, python_version, test_flags=None):
533 532 """Run tests on a remote Linux machine via an SSH client."""
534 533 test_flags = test_flags or []
535 534
536 535 print('running tests')
537 536
538 537 if python_version == 'system2':
539 538 python = '/usr/bin/python2'
540 539 elif python_version == 'system3':
541 540 python = '/usr/bin/python3'
542 541 elif python_version.startswith('pypy'):
543 542 python = '/hgdev/pyenv/shims/%s' % python_version
544 543 else:
545 544 python = '/hgdev/pyenv/shims/python%s' % python_version
546 545
547 546 test_flags = ' '.join(shlex.quote(a) for a in test_flags)
548 547
549 548 command = (
550 549 '/bin/sh -c "export TMPDIR=/hgwork/tmp; '
551 550 'cd /hgwork/src/tests && %s run-tests.py %s"' % (
552 551 python, test_flags))
553 552
554 553 chan, stdin, stdout = exec_command(ssh_client, command)
555 554
556 555 stdin.close()
557 556
558 557 for line in stdout:
559 558 print(line, end='')
560 559
561 560 return chan.recv_exit_status()
General Comments 0
You need to be logged in to leave comments. Login now