##// END OF EJS Templates
automation: remove Ubuntu 18.10...
Gregory Szorc -
r43283:3aa227fe default
parent child Browse files
Show More
@@ -1,1209 +1,1202 b''
1 # aws.py - Automation code for Amazon Web Services
1 # aws.py - Automation code for Amazon Web Services
2 #
2 #
3 # Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # no-check-code because Python 3 native.
8 # no-check-code because Python 3 native.
9
9
10 import contextlib
10 import contextlib
11 import copy
11 import copy
12 import hashlib
12 import hashlib
13 import json
13 import json
14 import os
14 import os
15 import pathlib
15 import pathlib
16 import subprocess
16 import subprocess
17 import time
17 import time
18
18
19 import boto3
19 import boto3
20 import botocore.exceptions
20 import botocore.exceptions
21
21
22 from .linux import (
22 from .linux import (
23 BOOTSTRAP_DEBIAN,
23 BOOTSTRAP_DEBIAN,
24 )
24 )
25 from .ssh import (
25 from .ssh import (
26 exec_command as ssh_exec_command,
26 exec_command as ssh_exec_command,
27 wait_for_ssh,
27 wait_for_ssh,
28 )
28 )
29 from .winrm import (
29 from .winrm import (
30 run_powershell,
30 run_powershell,
31 wait_for_winrm,
31 wait_for_winrm,
32 )
32 )
33
33
34
34
35 SOURCE_ROOT = pathlib.Path(os.path.abspath(__file__)).parent.parent.parent.parent
35 SOURCE_ROOT = pathlib.Path(os.path.abspath(__file__)).parent.parent.parent.parent
36
36
37 INSTALL_WINDOWS_DEPENDENCIES = (SOURCE_ROOT / 'contrib' /
37 INSTALL_WINDOWS_DEPENDENCIES = (SOURCE_ROOT / 'contrib' /
38 'install-windows-dependencies.ps1')
38 'install-windows-dependencies.ps1')
39
39
40
40
41 INSTANCE_TYPES_WITH_STORAGE = {
41 INSTANCE_TYPES_WITH_STORAGE = {
42 'c5d',
42 'c5d',
43 'd2',
43 'd2',
44 'h1',
44 'h1',
45 'i3',
45 'i3',
46 'm5ad',
46 'm5ad',
47 'm5d',
47 'm5d',
48 'r5d',
48 'r5d',
49 'r5ad',
49 'r5ad',
50 'x1',
50 'x1',
51 'z1d',
51 'z1d',
52 }
52 }
53
53
54
54
55 AMAZON_ACCOUNT_ID = '801119661308'
55 AMAZON_ACCOUNT_ID = '801119661308'
56 DEBIAN_ACCOUNT_ID = '379101102735'
56 DEBIAN_ACCOUNT_ID = '379101102735'
57 UBUNTU_ACCOUNT_ID = '099720109477'
57 UBUNTU_ACCOUNT_ID = '099720109477'
58
58
59
59
60 WINDOWS_BASE_IMAGE_NAME = 'Windows_Server-2019-English-Full-Base-2019.07.12'
60 WINDOWS_BASE_IMAGE_NAME = 'Windows_Server-2019-English-Full-Base-2019.07.12'
61
61
62
62
63 KEY_PAIRS = {
63 KEY_PAIRS = {
64 'automation',
64 'automation',
65 }
65 }
66
66
67
67
68 SECURITY_GROUPS = {
68 SECURITY_GROUPS = {
69 'linux-dev-1': {
69 'linux-dev-1': {
70 'description': 'Mercurial Linux instances that perform build/test automation',
70 'description': 'Mercurial Linux instances that perform build/test automation',
71 'ingress': [
71 'ingress': [
72 {
72 {
73 'FromPort': 22,
73 'FromPort': 22,
74 'ToPort': 22,
74 'ToPort': 22,
75 'IpProtocol': 'tcp',
75 'IpProtocol': 'tcp',
76 'IpRanges': [
76 'IpRanges': [
77 {
77 {
78 'CidrIp': '0.0.0.0/0',
78 'CidrIp': '0.0.0.0/0',
79 'Description': 'SSH from entire Internet',
79 'Description': 'SSH from entire Internet',
80 },
80 },
81 ],
81 ],
82 },
82 },
83 ],
83 ],
84 },
84 },
85 'windows-dev-1': {
85 'windows-dev-1': {
86 'description': 'Mercurial Windows instances that perform build automation',
86 'description': 'Mercurial Windows instances that perform build automation',
87 'ingress': [
87 'ingress': [
88 {
88 {
89 'FromPort': 22,
89 'FromPort': 22,
90 'ToPort': 22,
90 'ToPort': 22,
91 'IpProtocol': 'tcp',
91 'IpProtocol': 'tcp',
92 'IpRanges': [
92 'IpRanges': [
93 {
93 {
94 'CidrIp': '0.0.0.0/0',
94 'CidrIp': '0.0.0.0/0',
95 'Description': 'SSH from entire Internet',
95 'Description': 'SSH from entire Internet',
96 },
96 },
97 ],
97 ],
98 },
98 },
99 {
99 {
100 'FromPort': 3389,
100 'FromPort': 3389,
101 'ToPort': 3389,
101 'ToPort': 3389,
102 'IpProtocol': 'tcp',
102 'IpProtocol': 'tcp',
103 'IpRanges': [
103 'IpRanges': [
104 {
104 {
105 'CidrIp': '0.0.0.0/0',
105 'CidrIp': '0.0.0.0/0',
106 'Description': 'RDP from entire Internet',
106 'Description': 'RDP from entire Internet',
107 },
107 },
108 ],
108 ],
109
109
110 },
110 },
111 {
111 {
112 'FromPort': 5985,
112 'FromPort': 5985,
113 'ToPort': 5986,
113 'ToPort': 5986,
114 'IpProtocol': 'tcp',
114 'IpProtocol': 'tcp',
115 'IpRanges': [
115 'IpRanges': [
116 {
116 {
117 'CidrIp': '0.0.0.0/0',
117 'CidrIp': '0.0.0.0/0',
118 'Description': 'PowerShell Remoting (Windows Remote Management)',
118 'Description': 'PowerShell Remoting (Windows Remote Management)',
119 },
119 },
120 ],
120 ],
121 }
121 }
122 ],
122 ],
123 },
123 },
124 }
124 }
125
125
126
126
127 IAM_ROLES = {
127 IAM_ROLES = {
128 'ephemeral-ec2-role-1': {
128 'ephemeral-ec2-role-1': {
129 'description': 'Mercurial temporary EC2 instances',
129 'description': 'Mercurial temporary EC2 instances',
130 'policy_arns': [
130 'policy_arns': [
131 'arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM',
131 'arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM',
132 ],
132 ],
133 },
133 },
134 }
134 }
135
135
136
136
137 ASSUME_ROLE_POLICY_DOCUMENT = '''
137 ASSUME_ROLE_POLICY_DOCUMENT = '''
138 {
138 {
139 "Version": "2012-10-17",
139 "Version": "2012-10-17",
140 "Statement": [
140 "Statement": [
141 {
141 {
142 "Effect": "Allow",
142 "Effect": "Allow",
143 "Principal": {
143 "Principal": {
144 "Service": "ec2.amazonaws.com"
144 "Service": "ec2.amazonaws.com"
145 },
145 },
146 "Action": "sts:AssumeRole"
146 "Action": "sts:AssumeRole"
147 }
147 }
148 ]
148 ]
149 }
149 }
150 '''.strip()
150 '''.strip()
151
151
152
152
153 IAM_INSTANCE_PROFILES = {
153 IAM_INSTANCE_PROFILES = {
154 'ephemeral-ec2-1': {
154 'ephemeral-ec2-1': {
155 'roles': [
155 'roles': [
156 'ephemeral-ec2-role-1',
156 'ephemeral-ec2-role-1',
157 ],
157 ],
158 }
158 }
159 }
159 }
160
160
161
161
162 # User Data for Windows EC2 instance. Mainly used to set the password
162 # User Data for Windows EC2 instance. Mainly used to set the password
163 # and configure WinRM.
163 # and configure WinRM.
164 # Inspired by the User Data script used by Packer
164 # Inspired by the User Data script used by Packer
165 # (from https://www.packer.io/intro/getting-started/build-image.html).
165 # (from https://www.packer.io/intro/getting-started/build-image.html).
166 WINDOWS_USER_DATA = r'''
166 WINDOWS_USER_DATA = r'''
167 <powershell>
167 <powershell>
168
168
169 # TODO enable this once we figure out what is failing.
169 # TODO enable this once we figure out what is failing.
170 #$ErrorActionPreference = "stop"
170 #$ErrorActionPreference = "stop"
171
171
172 # Set administrator password
172 # Set administrator password
173 net user Administrator "%s"
173 net user Administrator "%s"
174 wmic useraccount where "name='Administrator'" set PasswordExpires=FALSE
174 wmic useraccount where "name='Administrator'" set PasswordExpires=FALSE
175
175
176 # First, make sure WinRM can't be connected to
176 # First, make sure WinRM can't be connected to
177 netsh advfirewall firewall set rule name="Windows Remote Management (HTTP-In)" new enable=yes action=block
177 netsh advfirewall firewall set rule name="Windows Remote Management (HTTP-In)" new enable=yes action=block
178
178
179 # Delete any existing WinRM listeners
179 # Delete any existing WinRM listeners
180 winrm delete winrm/config/listener?Address=*+Transport=HTTP 2>$Null
180 winrm delete winrm/config/listener?Address=*+Transport=HTTP 2>$Null
181 winrm delete winrm/config/listener?Address=*+Transport=HTTPS 2>$Null
181 winrm delete winrm/config/listener?Address=*+Transport=HTTPS 2>$Null
182
182
183 # Create a new WinRM listener and configure
183 # Create a new WinRM listener and configure
184 winrm create winrm/config/listener?Address=*+Transport=HTTP
184 winrm create winrm/config/listener?Address=*+Transport=HTTP
185 winrm set winrm/config/winrs '@{MaxMemoryPerShellMB="0"}'
185 winrm set winrm/config/winrs '@{MaxMemoryPerShellMB="0"}'
186 winrm set winrm/config '@{MaxTimeoutms="7200000"}'
186 winrm set winrm/config '@{MaxTimeoutms="7200000"}'
187 winrm set winrm/config/service '@{AllowUnencrypted="true"}'
187 winrm set winrm/config/service '@{AllowUnencrypted="true"}'
188 winrm set winrm/config/service '@{MaxConcurrentOperationsPerUser="12000"}'
188 winrm set winrm/config/service '@{MaxConcurrentOperationsPerUser="12000"}'
189 winrm set winrm/config/service/auth '@{Basic="true"}'
189 winrm set winrm/config/service/auth '@{Basic="true"}'
190 winrm set winrm/config/client/auth '@{Basic="true"}'
190 winrm set winrm/config/client/auth '@{Basic="true"}'
191
191
192 # Configure UAC to allow privilege elevation in remote shells
192 # Configure UAC to allow privilege elevation in remote shells
193 $Key = 'HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System'
193 $Key = 'HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System'
194 $Setting = 'LocalAccountTokenFilterPolicy'
194 $Setting = 'LocalAccountTokenFilterPolicy'
195 Set-ItemProperty -Path $Key -Name $Setting -Value 1 -Force
195 Set-ItemProperty -Path $Key -Name $Setting -Value 1 -Force
196
196
197 # Configure and restart the WinRM Service; Enable the required firewall exception
197 # Configure and restart the WinRM Service; Enable the required firewall exception
198 Stop-Service -Name WinRM
198 Stop-Service -Name WinRM
199 Set-Service -Name WinRM -StartupType Automatic
199 Set-Service -Name WinRM -StartupType Automatic
200 netsh advfirewall firewall set rule name="Windows Remote Management (HTTP-In)" new action=allow localip=any remoteip=any
200 netsh advfirewall firewall set rule name="Windows Remote Management (HTTP-In)" new action=allow localip=any remoteip=any
201 Start-Service -Name WinRM
201 Start-Service -Name WinRM
202
202
203 # Disable firewall on private network interfaces so prompts don't appear.
203 # Disable firewall on private network interfaces so prompts don't appear.
204 Set-NetFirewallProfile -Name private -Enabled false
204 Set-NetFirewallProfile -Name private -Enabled false
205 </powershell>
205 </powershell>
206 '''.lstrip()
206 '''.lstrip()
207
207
208
208
209 WINDOWS_BOOTSTRAP_POWERSHELL = '''
209 WINDOWS_BOOTSTRAP_POWERSHELL = '''
210 Write-Output "installing PowerShell dependencies"
210 Write-Output "installing PowerShell dependencies"
211 Install-PackageProvider -Name NuGet -MinimumVersion 2.8.5.201 -Force
211 Install-PackageProvider -Name NuGet -MinimumVersion 2.8.5.201 -Force
212 Set-PSRepository -Name PSGallery -InstallationPolicy Trusted
212 Set-PSRepository -Name PSGallery -InstallationPolicy Trusted
213 Install-Module -Name OpenSSHUtils -RequiredVersion 0.0.2.0
213 Install-Module -Name OpenSSHUtils -RequiredVersion 0.0.2.0
214
214
215 Write-Output "installing OpenSSL server"
215 Write-Output "installing OpenSSL server"
216 Add-WindowsCapability -Online -Name OpenSSH.Server~~~~0.0.1.0
216 Add-WindowsCapability -Online -Name OpenSSH.Server~~~~0.0.1.0
217 # Various tools will attempt to use older versions of .NET. So we enable
217 # Various tools will attempt to use older versions of .NET. So we enable
218 # the feature that provides them so it doesn't have to be auto-enabled
218 # the feature that provides them so it doesn't have to be auto-enabled
219 # later.
219 # later.
220 Write-Output "enabling .NET Framework feature"
220 Write-Output "enabling .NET Framework feature"
221 Install-WindowsFeature -Name Net-Framework-Core
221 Install-WindowsFeature -Name Net-Framework-Core
222 '''
222 '''
223
223
224
224
225 class AWSConnection:
225 class AWSConnection:
226 """Manages the state of a connection with AWS."""
226 """Manages the state of a connection with AWS."""
227
227
228 def __init__(self, automation, region: str, ensure_ec2_state: bool=True):
228 def __init__(self, automation, region: str, ensure_ec2_state: bool=True):
229 self.automation = automation
229 self.automation = automation
230 self.local_state_path = automation.state_path
230 self.local_state_path = automation.state_path
231
231
232 self.prefix = 'hg-'
232 self.prefix = 'hg-'
233
233
234 self.session = boto3.session.Session(region_name=region)
234 self.session = boto3.session.Session(region_name=region)
235 self.ec2client = self.session.client('ec2')
235 self.ec2client = self.session.client('ec2')
236 self.ec2resource = self.session.resource('ec2')
236 self.ec2resource = self.session.resource('ec2')
237 self.iamclient = self.session.client('iam')
237 self.iamclient = self.session.client('iam')
238 self.iamresource = self.session.resource('iam')
238 self.iamresource = self.session.resource('iam')
239 self.security_groups = {}
239 self.security_groups = {}
240
240
241 if ensure_ec2_state:
241 if ensure_ec2_state:
242 ensure_key_pairs(automation.state_path, self.ec2resource)
242 ensure_key_pairs(automation.state_path, self.ec2resource)
243 self.security_groups = ensure_security_groups(self.ec2resource)
243 self.security_groups = ensure_security_groups(self.ec2resource)
244 ensure_iam_state(self.iamclient, self.iamresource)
244 ensure_iam_state(self.iamclient, self.iamresource)
245
245
246 def key_pair_path_private(self, name):
246 def key_pair_path_private(self, name):
247 """Path to a key pair private key file."""
247 """Path to a key pair private key file."""
248 return self.local_state_path / 'keys' / ('keypair-%s' % name)
248 return self.local_state_path / 'keys' / ('keypair-%s' % name)
249
249
250 def key_pair_path_public(self, name):
250 def key_pair_path_public(self, name):
251 return self.local_state_path / 'keys' / ('keypair-%s.pub' % name)
251 return self.local_state_path / 'keys' / ('keypair-%s.pub' % name)
252
252
253
253
254 def rsa_key_fingerprint(p: pathlib.Path):
254 def rsa_key_fingerprint(p: pathlib.Path):
255 """Compute the fingerprint of an RSA private key."""
255 """Compute the fingerprint of an RSA private key."""
256
256
257 # TODO use rsa package.
257 # TODO use rsa package.
258 res = subprocess.run(
258 res = subprocess.run(
259 ['openssl', 'pkcs8', '-in', str(p), '-nocrypt', '-topk8',
259 ['openssl', 'pkcs8', '-in', str(p), '-nocrypt', '-topk8',
260 '-outform', 'DER'],
260 '-outform', 'DER'],
261 capture_output=True,
261 capture_output=True,
262 check=True)
262 check=True)
263
263
264 sha1 = hashlib.sha1(res.stdout).hexdigest()
264 sha1 = hashlib.sha1(res.stdout).hexdigest()
265 return ':'.join(a + b for a, b in zip(sha1[::2], sha1[1::2]))
265 return ':'.join(a + b for a, b in zip(sha1[::2], sha1[1::2]))
266
266
267
267
268 def ensure_key_pairs(state_path: pathlib.Path, ec2resource, prefix='hg-'):
268 def ensure_key_pairs(state_path: pathlib.Path, ec2resource, prefix='hg-'):
269 remote_existing = {}
269 remote_existing = {}
270
270
271 for kpi in ec2resource.key_pairs.all():
271 for kpi in ec2resource.key_pairs.all():
272 if kpi.name.startswith(prefix):
272 if kpi.name.startswith(prefix):
273 remote_existing[kpi.name[len(prefix):]] = kpi.key_fingerprint
273 remote_existing[kpi.name[len(prefix):]] = kpi.key_fingerprint
274
274
275 # Validate that we have these keys locally.
275 # Validate that we have these keys locally.
276 key_path = state_path / 'keys'
276 key_path = state_path / 'keys'
277 key_path.mkdir(exist_ok=True, mode=0o700)
277 key_path.mkdir(exist_ok=True, mode=0o700)
278
278
279 def remove_remote(name):
279 def remove_remote(name):
280 print('deleting key pair %s' % name)
280 print('deleting key pair %s' % name)
281 key = ec2resource.KeyPair(name)
281 key = ec2resource.KeyPair(name)
282 key.delete()
282 key.delete()
283
283
284 def remove_local(name):
284 def remove_local(name):
285 pub_full = key_path / ('keypair-%s.pub' % name)
285 pub_full = key_path / ('keypair-%s.pub' % name)
286 priv_full = key_path / ('keypair-%s' % name)
286 priv_full = key_path / ('keypair-%s' % name)
287
287
288 print('removing %s' % pub_full)
288 print('removing %s' % pub_full)
289 pub_full.unlink()
289 pub_full.unlink()
290 print('removing %s' % priv_full)
290 print('removing %s' % priv_full)
291 priv_full.unlink()
291 priv_full.unlink()
292
292
293 local_existing = {}
293 local_existing = {}
294
294
295 for f in sorted(os.listdir(key_path)):
295 for f in sorted(os.listdir(key_path)):
296 if not f.startswith('keypair-') or not f.endswith('.pub'):
296 if not f.startswith('keypair-') or not f.endswith('.pub'):
297 continue
297 continue
298
298
299 name = f[len('keypair-'):-len('.pub')]
299 name = f[len('keypair-'):-len('.pub')]
300
300
301 pub_full = key_path / f
301 pub_full = key_path / f
302 priv_full = key_path / ('keypair-%s' % name)
302 priv_full = key_path / ('keypair-%s' % name)
303
303
304 with open(pub_full, 'r', encoding='ascii') as fh:
304 with open(pub_full, 'r', encoding='ascii') as fh:
305 data = fh.read()
305 data = fh.read()
306
306
307 if not data.startswith('ssh-rsa '):
307 if not data.startswith('ssh-rsa '):
308 print('unexpected format for key pair file: %s; removing' %
308 print('unexpected format for key pair file: %s; removing' %
309 pub_full)
309 pub_full)
310 pub_full.unlink()
310 pub_full.unlink()
311 priv_full.unlink()
311 priv_full.unlink()
312 continue
312 continue
313
313
314 local_existing[name] = rsa_key_fingerprint(priv_full)
314 local_existing[name] = rsa_key_fingerprint(priv_full)
315
315
316 for name in sorted(set(remote_existing) | set(local_existing)):
316 for name in sorted(set(remote_existing) | set(local_existing)):
317 if name not in local_existing:
317 if name not in local_existing:
318 actual = '%s%s' % (prefix, name)
318 actual = '%s%s' % (prefix, name)
319 print('remote key %s does not exist locally' % name)
319 print('remote key %s does not exist locally' % name)
320 remove_remote(actual)
320 remove_remote(actual)
321 del remote_existing[name]
321 del remote_existing[name]
322
322
323 elif name not in remote_existing:
323 elif name not in remote_existing:
324 print('local key %s does not exist remotely' % name)
324 print('local key %s does not exist remotely' % name)
325 remove_local(name)
325 remove_local(name)
326 del local_existing[name]
326 del local_existing[name]
327
327
328 elif remote_existing[name] != local_existing[name]:
328 elif remote_existing[name] != local_existing[name]:
329 print('key fingerprint mismatch for %s; '
329 print('key fingerprint mismatch for %s; '
330 'removing from local and remote' % name)
330 'removing from local and remote' % name)
331 remove_local(name)
331 remove_local(name)
332 remove_remote('%s%s' % (prefix, name))
332 remove_remote('%s%s' % (prefix, name))
333 del local_existing[name]
333 del local_existing[name]
334 del remote_existing[name]
334 del remote_existing[name]
335
335
336 missing = KEY_PAIRS - set(remote_existing)
336 missing = KEY_PAIRS - set(remote_existing)
337
337
338 for name in sorted(missing):
338 for name in sorted(missing):
339 actual = '%s%s' % (prefix, name)
339 actual = '%s%s' % (prefix, name)
340 print('creating key pair %s' % actual)
340 print('creating key pair %s' % actual)
341
341
342 priv_full = key_path / ('keypair-%s' % name)
342 priv_full = key_path / ('keypair-%s' % name)
343 pub_full = key_path / ('keypair-%s.pub' % name)
343 pub_full = key_path / ('keypair-%s.pub' % name)
344
344
345 kp = ec2resource.create_key_pair(KeyName=actual)
345 kp = ec2resource.create_key_pair(KeyName=actual)
346
346
347 with priv_full.open('w', encoding='ascii') as fh:
347 with priv_full.open('w', encoding='ascii') as fh:
348 fh.write(kp.key_material)
348 fh.write(kp.key_material)
349 fh.write('\n')
349 fh.write('\n')
350
350
351 priv_full.chmod(0o0600)
351 priv_full.chmod(0o0600)
352
352
353 # SSH public key can be extracted via `ssh-keygen`.
353 # SSH public key can be extracted via `ssh-keygen`.
354 with pub_full.open('w', encoding='ascii') as fh:
354 with pub_full.open('w', encoding='ascii') as fh:
355 subprocess.run(
355 subprocess.run(
356 ['ssh-keygen', '-y', '-f', str(priv_full)],
356 ['ssh-keygen', '-y', '-f', str(priv_full)],
357 stdout=fh,
357 stdout=fh,
358 check=True)
358 check=True)
359
359
360 pub_full.chmod(0o0600)
360 pub_full.chmod(0o0600)
361
361
362
362
363 def delete_instance_profile(profile):
363 def delete_instance_profile(profile):
364 for role in profile.roles:
364 for role in profile.roles:
365 print('removing role %s from instance profile %s' % (role.name,
365 print('removing role %s from instance profile %s' % (role.name,
366 profile.name))
366 profile.name))
367 profile.remove_role(RoleName=role.name)
367 profile.remove_role(RoleName=role.name)
368
368
369 print('deleting instance profile %s' % profile.name)
369 print('deleting instance profile %s' % profile.name)
370 profile.delete()
370 profile.delete()
371
371
372
372
373 def ensure_iam_state(iamclient, iamresource, prefix='hg-'):
373 def ensure_iam_state(iamclient, iamresource, prefix='hg-'):
374 """Ensure IAM state is in sync with our canonical definition."""
374 """Ensure IAM state is in sync with our canonical definition."""
375
375
376 remote_profiles = {}
376 remote_profiles = {}
377
377
378 for profile in iamresource.instance_profiles.all():
378 for profile in iamresource.instance_profiles.all():
379 if profile.name.startswith(prefix):
379 if profile.name.startswith(prefix):
380 remote_profiles[profile.name[len(prefix):]] = profile
380 remote_profiles[profile.name[len(prefix):]] = profile
381
381
382 for name in sorted(set(remote_profiles) - set(IAM_INSTANCE_PROFILES)):
382 for name in sorted(set(remote_profiles) - set(IAM_INSTANCE_PROFILES)):
383 delete_instance_profile(remote_profiles[name])
383 delete_instance_profile(remote_profiles[name])
384 del remote_profiles[name]
384 del remote_profiles[name]
385
385
386 remote_roles = {}
386 remote_roles = {}
387
387
388 for role in iamresource.roles.all():
388 for role in iamresource.roles.all():
389 if role.name.startswith(prefix):
389 if role.name.startswith(prefix):
390 remote_roles[role.name[len(prefix):]] = role
390 remote_roles[role.name[len(prefix):]] = role
391
391
392 for name in sorted(set(remote_roles) - set(IAM_ROLES)):
392 for name in sorted(set(remote_roles) - set(IAM_ROLES)):
393 role = remote_roles[name]
393 role = remote_roles[name]
394
394
395 print('removing role %s' % role.name)
395 print('removing role %s' % role.name)
396 role.delete()
396 role.delete()
397 del remote_roles[name]
397 del remote_roles[name]
398
398
399 # We've purged remote state that doesn't belong. Create missing
399 # We've purged remote state that doesn't belong. Create missing
400 # instance profiles and roles.
400 # instance profiles and roles.
401 for name in sorted(set(IAM_INSTANCE_PROFILES) - set(remote_profiles)):
401 for name in sorted(set(IAM_INSTANCE_PROFILES) - set(remote_profiles)):
402 actual = '%s%s' % (prefix, name)
402 actual = '%s%s' % (prefix, name)
403 print('creating IAM instance profile %s' % actual)
403 print('creating IAM instance profile %s' % actual)
404
404
405 profile = iamresource.create_instance_profile(
405 profile = iamresource.create_instance_profile(
406 InstanceProfileName=actual)
406 InstanceProfileName=actual)
407 remote_profiles[name] = profile
407 remote_profiles[name] = profile
408
408
409 waiter = iamclient.get_waiter('instance_profile_exists')
409 waiter = iamclient.get_waiter('instance_profile_exists')
410 waiter.wait(InstanceProfileName=actual)
410 waiter.wait(InstanceProfileName=actual)
411 print('IAM instance profile %s is available' % actual)
411 print('IAM instance profile %s is available' % actual)
412
412
413 for name in sorted(set(IAM_ROLES) - set(remote_roles)):
413 for name in sorted(set(IAM_ROLES) - set(remote_roles)):
414 entry = IAM_ROLES[name]
414 entry = IAM_ROLES[name]
415
415
416 actual = '%s%s' % (prefix, name)
416 actual = '%s%s' % (prefix, name)
417 print('creating IAM role %s' % actual)
417 print('creating IAM role %s' % actual)
418
418
419 role = iamresource.create_role(
419 role = iamresource.create_role(
420 RoleName=actual,
420 RoleName=actual,
421 Description=entry['description'],
421 Description=entry['description'],
422 AssumeRolePolicyDocument=ASSUME_ROLE_POLICY_DOCUMENT,
422 AssumeRolePolicyDocument=ASSUME_ROLE_POLICY_DOCUMENT,
423 )
423 )
424
424
425 waiter = iamclient.get_waiter('role_exists')
425 waiter = iamclient.get_waiter('role_exists')
426 waiter.wait(RoleName=actual)
426 waiter.wait(RoleName=actual)
427 print('IAM role %s is available' % actual)
427 print('IAM role %s is available' % actual)
428
428
429 remote_roles[name] = role
429 remote_roles[name] = role
430
430
431 for arn in entry['policy_arns']:
431 for arn in entry['policy_arns']:
432 print('attaching policy %s to %s' % (arn, role.name))
432 print('attaching policy %s to %s' % (arn, role.name))
433 role.attach_policy(PolicyArn=arn)
433 role.attach_policy(PolicyArn=arn)
434
434
435 # Now reconcile state of profiles.
435 # Now reconcile state of profiles.
436 for name, meta in sorted(IAM_INSTANCE_PROFILES.items()):
436 for name, meta in sorted(IAM_INSTANCE_PROFILES.items()):
437 profile = remote_profiles[name]
437 profile = remote_profiles[name]
438 wanted = {'%s%s' % (prefix, role) for role in meta['roles']}
438 wanted = {'%s%s' % (prefix, role) for role in meta['roles']}
439 have = {role.name for role in profile.roles}
439 have = {role.name for role in profile.roles}
440
440
441 for role in sorted(have - wanted):
441 for role in sorted(have - wanted):
442 print('removing role %s from %s' % (role, profile.name))
442 print('removing role %s from %s' % (role, profile.name))
443 profile.remove_role(RoleName=role)
443 profile.remove_role(RoleName=role)
444
444
445 for role in sorted(wanted - have):
445 for role in sorted(wanted - have):
446 print('adding role %s to %s' % (role, profile.name))
446 print('adding role %s to %s' % (role, profile.name))
447 profile.add_role(RoleName=role)
447 profile.add_role(RoleName=role)
448
448
449
449
450 def find_image(ec2resource, owner_id, name):
450 def find_image(ec2resource, owner_id, name):
451 """Find an AMI by its owner ID and name."""
451 """Find an AMI by its owner ID and name."""
452
452
453 images = ec2resource.images.filter(
453 images = ec2resource.images.filter(
454 Filters=[
454 Filters=[
455 {
455 {
456 'Name': 'owner-id',
456 'Name': 'owner-id',
457 'Values': [owner_id],
457 'Values': [owner_id],
458 },
458 },
459 {
459 {
460 'Name': 'state',
460 'Name': 'state',
461 'Values': ['available'],
461 'Values': ['available'],
462 },
462 },
463 {
463 {
464 'Name': 'image-type',
464 'Name': 'image-type',
465 'Values': ['machine'],
465 'Values': ['machine'],
466 },
466 },
467 {
467 {
468 'Name': 'name',
468 'Name': 'name',
469 'Values': [name],
469 'Values': [name],
470 },
470 },
471 ])
471 ])
472
472
473 for image in images:
473 for image in images:
474 return image
474 return image
475
475
476 raise Exception('unable to find image for %s' % name)
476 raise Exception('unable to find image for %s' % name)
477
477
478
478
479 def ensure_security_groups(ec2resource, prefix='hg-'):
479 def ensure_security_groups(ec2resource, prefix='hg-'):
480 """Ensure all necessary Mercurial security groups are present.
480 """Ensure all necessary Mercurial security groups are present.
481
481
482 All security groups are prefixed with ``hg-`` by default. Any security
482 All security groups are prefixed with ``hg-`` by default. Any security
483 groups having this prefix but aren't in our list are deleted.
483 groups having this prefix but aren't in our list are deleted.
484 """
484 """
485 existing = {}
485 existing = {}
486
486
487 for group in ec2resource.security_groups.all():
487 for group in ec2resource.security_groups.all():
488 if group.group_name.startswith(prefix):
488 if group.group_name.startswith(prefix):
489 existing[group.group_name[len(prefix):]] = group
489 existing[group.group_name[len(prefix):]] = group
490
490
491 purge = set(existing) - set(SECURITY_GROUPS)
491 purge = set(existing) - set(SECURITY_GROUPS)
492
492
493 for name in sorted(purge):
493 for name in sorted(purge):
494 group = existing[name]
494 group = existing[name]
495 print('removing legacy security group: %s' % group.group_name)
495 print('removing legacy security group: %s' % group.group_name)
496 group.delete()
496 group.delete()
497
497
498 security_groups = {}
498 security_groups = {}
499
499
500 for name, group in sorted(SECURITY_GROUPS.items()):
500 for name, group in sorted(SECURITY_GROUPS.items()):
501 if name in existing:
501 if name in existing:
502 security_groups[name] = existing[name]
502 security_groups[name] = existing[name]
503 continue
503 continue
504
504
505 actual = '%s%s' % (prefix, name)
505 actual = '%s%s' % (prefix, name)
506 print('adding security group %s' % actual)
506 print('adding security group %s' % actual)
507
507
508 group_res = ec2resource.create_security_group(
508 group_res = ec2resource.create_security_group(
509 Description=group['description'],
509 Description=group['description'],
510 GroupName=actual,
510 GroupName=actual,
511 )
511 )
512
512
513 group_res.authorize_ingress(
513 group_res.authorize_ingress(
514 IpPermissions=group['ingress'],
514 IpPermissions=group['ingress'],
515 )
515 )
516
516
517 security_groups[name] = group_res
517 security_groups[name] = group_res
518
518
519 return security_groups
519 return security_groups
520
520
521
521
522 def terminate_ec2_instances(ec2resource, prefix='hg-'):
522 def terminate_ec2_instances(ec2resource, prefix='hg-'):
523 """Terminate all EC2 instances managed by us."""
523 """Terminate all EC2 instances managed by us."""
524 waiting = []
524 waiting = []
525
525
526 for instance in ec2resource.instances.all():
526 for instance in ec2resource.instances.all():
527 if instance.state['Name'] == 'terminated':
527 if instance.state['Name'] == 'terminated':
528 continue
528 continue
529
529
530 for tag in instance.tags or []:
530 for tag in instance.tags or []:
531 if tag['Key'] == 'Name' and tag['Value'].startswith(prefix):
531 if tag['Key'] == 'Name' and tag['Value'].startswith(prefix):
532 print('terminating %s' % instance.id)
532 print('terminating %s' % instance.id)
533 instance.terminate()
533 instance.terminate()
534 waiting.append(instance)
534 waiting.append(instance)
535
535
536 for instance in waiting:
536 for instance in waiting:
537 instance.wait_until_terminated()
537 instance.wait_until_terminated()
538
538
539
539
540 def remove_resources(c, prefix='hg-'):
540 def remove_resources(c, prefix='hg-'):
541 """Purge all of our resources in this EC2 region."""
541 """Purge all of our resources in this EC2 region."""
542 ec2resource = c.ec2resource
542 ec2resource = c.ec2resource
543 iamresource = c.iamresource
543 iamresource = c.iamresource
544
544
545 terminate_ec2_instances(ec2resource, prefix=prefix)
545 terminate_ec2_instances(ec2resource, prefix=prefix)
546
546
547 for image in ec2resource.images.filter(Owners=['self']):
547 for image in ec2resource.images.filter(Owners=['self']):
548 if image.name.startswith(prefix):
548 if image.name.startswith(prefix):
549 remove_ami(ec2resource, image)
549 remove_ami(ec2resource, image)
550
550
551 for group in ec2resource.security_groups.all():
551 for group in ec2resource.security_groups.all():
552 if group.group_name.startswith(prefix):
552 if group.group_name.startswith(prefix):
553 print('removing security group %s' % group.group_name)
553 print('removing security group %s' % group.group_name)
554 group.delete()
554 group.delete()
555
555
556 for profile in iamresource.instance_profiles.all():
556 for profile in iamresource.instance_profiles.all():
557 if profile.name.startswith(prefix):
557 if profile.name.startswith(prefix):
558 delete_instance_profile(profile)
558 delete_instance_profile(profile)
559
559
560 for role in iamresource.roles.all():
560 for role in iamresource.roles.all():
561 if role.name.startswith(prefix):
561 if role.name.startswith(prefix):
562 for p in role.attached_policies.all():
562 for p in role.attached_policies.all():
563 print('detaching policy %s from %s' % (p.arn, role.name))
563 print('detaching policy %s from %s' % (p.arn, role.name))
564 role.detach_policy(PolicyArn=p.arn)
564 role.detach_policy(PolicyArn=p.arn)
565
565
566 print('removing role %s' % role.name)
566 print('removing role %s' % role.name)
567 role.delete()
567 role.delete()
568
568
569
569
570 def wait_for_ip_addresses(instances):
570 def wait_for_ip_addresses(instances):
571 """Wait for the public IP addresses of an iterable of instances."""
571 """Wait for the public IP addresses of an iterable of instances."""
572 for instance in instances:
572 for instance in instances:
573 while True:
573 while True:
574 if not instance.public_ip_address:
574 if not instance.public_ip_address:
575 time.sleep(2)
575 time.sleep(2)
576 instance.reload()
576 instance.reload()
577 continue
577 continue
578
578
579 print('public IP address for %s: %s' % (
579 print('public IP address for %s: %s' % (
580 instance.id, instance.public_ip_address))
580 instance.id, instance.public_ip_address))
581 break
581 break
582
582
583
583
584 def remove_ami(ec2resource, image):
584 def remove_ami(ec2resource, image):
585 """Remove an AMI and its underlying snapshots."""
585 """Remove an AMI and its underlying snapshots."""
586 snapshots = []
586 snapshots = []
587
587
588 for device in image.block_device_mappings:
588 for device in image.block_device_mappings:
589 if 'Ebs' in device:
589 if 'Ebs' in device:
590 snapshots.append(ec2resource.Snapshot(device['Ebs']['SnapshotId']))
590 snapshots.append(ec2resource.Snapshot(device['Ebs']['SnapshotId']))
591
591
592 print('deregistering %s' % image.id)
592 print('deregistering %s' % image.id)
593 image.deregister()
593 image.deregister()
594
594
595 for snapshot in snapshots:
595 for snapshot in snapshots:
596 print('deleting snapshot %s' % snapshot.id)
596 print('deleting snapshot %s' % snapshot.id)
597 snapshot.delete()
597 snapshot.delete()
598
598
599
599
600 def wait_for_ssm(ssmclient, instances):
600 def wait_for_ssm(ssmclient, instances):
601 """Wait for SSM to come online for an iterable of instance IDs."""
601 """Wait for SSM to come online for an iterable of instance IDs."""
602 while True:
602 while True:
603 res = ssmclient.describe_instance_information(
603 res = ssmclient.describe_instance_information(
604 Filters=[
604 Filters=[
605 {
605 {
606 'Key': 'InstanceIds',
606 'Key': 'InstanceIds',
607 'Values': [i.id for i in instances],
607 'Values': [i.id for i in instances],
608 },
608 },
609 ],
609 ],
610 )
610 )
611
611
612 available = len(res['InstanceInformationList'])
612 available = len(res['InstanceInformationList'])
613 wanted = len(instances)
613 wanted = len(instances)
614
614
615 print('%d/%d instances available in SSM' % (available, wanted))
615 print('%d/%d instances available in SSM' % (available, wanted))
616
616
617 if available == wanted:
617 if available == wanted:
618 return
618 return
619
619
620 time.sleep(2)
620 time.sleep(2)
621
621
622
622
623 def run_ssm_command(ssmclient, instances, document_name, parameters):
623 def run_ssm_command(ssmclient, instances, document_name, parameters):
624 """Run a PowerShell script on an EC2 instance."""
624 """Run a PowerShell script on an EC2 instance."""
625
625
626 res = ssmclient.send_command(
626 res = ssmclient.send_command(
627 InstanceIds=[i.id for i in instances],
627 InstanceIds=[i.id for i in instances],
628 DocumentName=document_name,
628 DocumentName=document_name,
629 Parameters=parameters,
629 Parameters=parameters,
630 CloudWatchOutputConfig={
630 CloudWatchOutputConfig={
631 'CloudWatchOutputEnabled': True,
631 'CloudWatchOutputEnabled': True,
632 },
632 },
633 )
633 )
634
634
635 command_id = res['Command']['CommandId']
635 command_id = res['Command']['CommandId']
636
636
637 for instance in instances:
637 for instance in instances:
638 while True:
638 while True:
639 try:
639 try:
640 res = ssmclient.get_command_invocation(
640 res = ssmclient.get_command_invocation(
641 CommandId=command_id,
641 CommandId=command_id,
642 InstanceId=instance.id,
642 InstanceId=instance.id,
643 )
643 )
644 except botocore.exceptions.ClientError as e:
644 except botocore.exceptions.ClientError as e:
645 if e.response['Error']['Code'] == 'InvocationDoesNotExist':
645 if e.response['Error']['Code'] == 'InvocationDoesNotExist':
646 print('could not find SSM command invocation; waiting')
646 print('could not find SSM command invocation; waiting')
647 time.sleep(1)
647 time.sleep(1)
648 continue
648 continue
649 else:
649 else:
650 raise
650 raise
651
651
652 if res['Status'] == 'Success':
652 if res['Status'] == 'Success':
653 break
653 break
654 elif res['Status'] in ('Pending', 'InProgress', 'Delayed'):
654 elif res['Status'] in ('Pending', 'InProgress', 'Delayed'):
655 time.sleep(2)
655 time.sleep(2)
656 else:
656 else:
657 raise Exception('command failed on %s: %s' % (
657 raise Exception('command failed on %s: %s' % (
658 instance.id, res['Status']))
658 instance.id, res['Status']))
659
659
660
660
661 @contextlib.contextmanager
661 @contextlib.contextmanager
662 def temporary_ec2_instances(ec2resource, config):
662 def temporary_ec2_instances(ec2resource, config):
663 """Create temporary EC2 instances.
663 """Create temporary EC2 instances.
664
664
665 This is a proxy to ``ec2client.run_instances(**config)`` that takes care of
665 This is a proxy to ``ec2client.run_instances(**config)`` that takes care of
666 managing the lifecycle of the instances.
666 managing the lifecycle of the instances.
667
667
668 When the context manager exits, the instances are terminated.
668 When the context manager exits, the instances are terminated.
669
669
670 The context manager evaluates to the list of data structures
670 The context manager evaluates to the list of data structures
671 describing each created instance. The instances may not be available
671 describing each created instance. The instances may not be available
672 for work immediately: it is up to the caller to wait for the instance
672 for work immediately: it is up to the caller to wait for the instance
673 to start responding.
673 to start responding.
674 """
674 """
675
675
676 ids = None
676 ids = None
677
677
678 try:
678 try:
679 res = ec2resource.create_instances(**config)
679 res = ec2resource.create_instances(**config)
680
680
681 ids = [i.id for i in res]
681 ids = [i.id for i in res]
682 print('started instances: %s' % ' '.join(ids))
682 print('started instances: %s' % ' '.join(ids))
683
683
684 yield res
684 yield res
685 finally:
685 finally:
686 if ids:
686 if ids:
687 print('terminating instances: %s' % ' '.join(ids))
687 print('terminating instances: %s' % ' '.join(ids))
688 for instance in res:
688 for instance in res:
689 instance.terminate()
689 instance.terminate()
690 print('terminated %d instances' % len(ids))
690 print('terminated %d instances' % len(ids))
691
691
692
692
693 @contextlib.contextmanager
693 @contextlib.contextmanager
694 def create_temp_windows_ec2_instances(c: AWSConnection, config):
694 def create_temp_windows_ec2_instances(c: AWSConnection, config):
695 """Create temporary Windows EC2 instances.
695 """Create temporary Windows EC2 instances.
696
696
697 This is a higher-level wrapper around ``create_temp_ec2_instances()`` that
697 This is a higher-level wrapper around ``create_temp_ec2_instances()`` that
698 configures the Windows instance for Windows Remote Management. The emitted
698 configures the Windows instance for Windows Remote Management. The emitted
699 instances will have a ``winrm_client`` attribute containing a
699 instances will have a ``winrm_client`` attribute containing a
700 ``pypsrp.client.Client`` instance bound to the instance.
700 ``pypsrp.client.Client`` instance bound to the instance.
701 """
701 """
702 if 'IamInstanceProfile' in config:
702 if 'IamInstanceProfile' in config:
703 raise ValueError('IamInstanceProfile cannot be provided in config')
703 raise ValueError('IamInstanceProfile cannot be provided in config')
704 if 'UserData' in config:
704 if 'UserData' in config:
705 raise ValueError('UserData cannot be provided in config')
705 raise ValueError('UserData cannot be provided in config')
706
706
707 password = c.automation.default_password()
707 password = c.automation.default_password()
708
708
709 config = copy.deepcopy(config)
709 config = copy.deepcopy(config)
710 config['IamInstanceProfile'] = {
710 config['IamInstanceProfile'] = {
711 'Name': 'hg-ephemeral-ec2-1',
711 'Name': 'hg-ephemeral-ec2-1',
712 }
712 }
713 config.setdefault('TagSpecifications', []).append({
713 config.setdefault('TagSpecifications', []).append({
714 'ResourceType': 'instance',
714 'ResourceType': 'instance',
715 'Tags': [{'Key': 'Name', 'Value': 'hg-temp-windows'}],
715 'Tags': [{'Key': 'Name', 'Value': 'hg-temp-windows'}],
716 })
716 })
717 config['UserData'] = WINDOWS_USER_DATA % password
717 config['UserData'] = WINDOWS_USER_DATA % password
718
718
719 with temporary_ec2_instances(c.ec2resource, config) as instances:
719 with temporary_ec2_instances(c.ec2resource, config) as instances:
720 wait_for_ip_addresses(instances)
720 wait_for_ip_addresses(instances)
721
721
722 print('waiting for Windows Remote Management service...')
722 print('waiting for Windows Remote Management service...')
723
723
724 for instance in instances:
724 for instance in instances:
725 client = wait_for_winrm(instance.public_ip_address, 'Administrator', password)
725 client = wait_for_winrm(instance.public_ip_address, 'Administrator', password)
726 print('established WinRM connection to %s' % instance.id)
726 print('established WinRM connection to %s' % instance.id)
727 instance.winrm_client = client
727 instance.winrm_client = client
728
728
729 yield instances
729 yield instances
730
730
731
731
732 def resolve_fingerprint(fingerprint):
732 def resolve_fingerprint(fingerprint):
733 fingerprint = json.dumps(fingerprint, sort_keys=True)
733 fingerprint = json.dumps(fingerprint, sort_keys=True)
734 return hashlib.sha256(fingerprint.encode('utf-8')).hexdigest()
734 return hashlib.sha256(fingerprint.encode('utf-8')).hexdigest()
735
735
736
736
737 def find_and_reconcile_image(ec2resource, name, fingerprint):
737 def find_and_reconcile_image(ec2resource, name, fingerprint):
738 """Attempt to find an existing EC2 AMI with a name and fingerprint.
738 """Attempt to find an existing EC2 AMI with a name and fingerprint.
739
739
740 If an image with the specified fingerprint is found, it is returned.
740 If an image with the specified fingerprint is found, it is returned.
741 Otherwise None is returned.
741 Otherwise None is returned.
742
742
743 Existing images for the specified name that don't have the specified
743 Existing images for the specified name that don't have the specified
744 fingerprint or are missing required metadata or deleted.
744 fingerprint or are missing required metadata or deleted.
745 """
745 """
746 # Find existing AMIs with this name and delete the ones that are invalid.
746 # Find existing AMIs with this name and delete the ones that are invalid.
747 # Store a reference to a good image so it can be returned one the
747 # Store a reference to a good image so it can be returned one the
748 # image state is reconciled.
748 # image state is reconciled.
749 images = ec2resource.images.filter(
749 images = ec2resource.images.filter(
750 Filters=[{'Name': 'name', 'Values': [name]}])
750 Filters=[{'Name': 'name', 'Values': [name]}])
751
751
752 existing_image = None
752 existing_image = None
753
753
754 for image in images:
754 for image in images:
755 if image.tags is None:
755 if image.tags is None:
756 print('image %s for %s lacks required tags; removing' % (
756 print('image %s for %s lacks required tags; removing' % (
757 image.id, image.name))
757 image.id, image.name))
758 remove_ami(ec2resource, image)
758 remove_ami(ec2resource, image)
759 else:
759 else:
760 tags = {t['Key']: t['Value'] for t in image.tags}
760 tags = {t['Key']: t['Value'] for t in image.tags}
761
761
762 if tags.get('HGIMAGEFINGERPRINT') == fingerprint:
762 if tags.get('HGIMAGEFINGERPRINT') == fingerprint:
763 existing_image = image
763 existing_image = image
764 else:
764 else:
765 print('image %s for %s has wrong fingerprint; removing' % (
765 print('image %s for %s has wrong fingerprint; removing' % (
766 image.id, image.name))
766 image.id, image.name))
767 remove_ami(ec2resource, image)
767 remove_ami(ec2resource, image)
768
768
769 return existing_image
769 return existing_image
770
770
771
771
772 def create_ami_from_instance(ec2client, instance, name, description,
772 def create_ami_from_instance(ec2client, instance, name, description,
773 fingerprint):
773 fingerprint):
774 """Create an AMI from a running instance.
774 """Create an AMI from a running instance.
775
775
776 Returns the ``ec2resource.Image`` representing the created AMI.
776 Returns the ``ec2resource.Image`` representing the created AMI.
777 """
777 """
778 instance.stop()
778 instance.stop()
779
779
780 ec2client.get_waiter('instance_stopped').wait(
780 ec2client.get_waiter('instance_stopped').wait(
781 InstanceIds=[instance.id],
781 InstanceIds=[instance.id],
782 WaiterConfig={
782 WaiterConfig={
783 'Delay': 5,
783 'Delay': 5,
784 })
784 })
785 print('%s is stopped' % instance.id)
785 print('%s is stopped' % instance.id)
786
786
787 image = instance.create_image(
787 image = instance.create_image(
788 Name=name,
788 Name=name,
789 Description=description,
789 Description=description,
790 )
790 )
791
791
792 image.create_tags(Tags=[
792 image.create_tags(Tags=[
793 {
793 {
794 'Key': 'HGIMAGEFINGERPRINT',
794 'Key': 'HGIMAGEFINGERPRINT',
795 'Value': fingerprint,
795 'Value': fingerprint,
796 },
796 },
797 ])
797 ])
798
798
799 print('waiting for image %s' % image.id)
799 print('waiting for image %s' % image.id)
800
800
801 ec2client.get_waiter('image_available').wait(
801 ec2client.get_waiter('image_available').wait(
802 ImageIds=[image.id],
802 ImageIds=[image.id],
803 )
803 )
804
804
805 print('image %s available as %s' % (image.id, image.name))
805 print('image %s available as %s' % (image.id, image.name))
806
806
807 return image
807 return image
808
808
809
809
810 def ensure_linux_dev_ami(c: AWSConnection, distro='debian9', prefix='hg-'):
810 def ensure_linux_dev_ami(c: AWSConnection, distro='debian9', prefix='hg-'):
811 """Ensures a Linux development AMI is available and up-to-date.
811 """Ensures a Linux development AMI is available and up-to-date.
812
812
813 Returns an ``ec2.Image`` of either an existing AMI or a newly-built one.
813 Returns an ``ec2.Image`` of either an existing AMI or a newly-built one.
814 """
814 """
815 ec2client = c.ec2client
815 ec2client = c.ec2client
816 ec2resource = c.ec2resource
816 ec2resource = c.ec2resource
817
817
818 name = '%s%s-%s' % (prefix, 'linux-dev', distro)
818 name = '%s%s-%s' % (prefix, 'linux-dev', distro)
819
819
820 if distro == 'debian9':
820 if distro == 'debian9':
821 image = find_image(
821 image = find_image(
822 ec2resource,
822 ec2resource,
823 DEBIAN_ACCOUNT_ID,
823 DEBIAN_ACCOUNT_ID,
824 'debian-stretch-hvm-x86_64-gp2-2019-02-19-26620',
824 'debian-stretch-hvm-x86_64-gp2-2019-02-19-26620',
825 )
825 )
826 ssh_username = 'admin'
826 ssh_username = 'admin'
827 elif distro == 'ubuntu18.04':
827 elif distro == 'ubuntu18.04':
828 image = find_image(
828 image = find_image(
829 ec2resource,
829 ec2resource,
830 UBUNTU_ACCOUNT_ID,
830 UBUNTU_ACCOUNT_ID,
831 'ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-20190403',
831 'ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-20190403',
832 )
832 )
833 ssh_username = 'ubuntu'
833 ssh_username = 'ubuntu'
834 elif distro == 'ubuntu18.10':
835 image = find_image(
836 ec2resource,
837 UBUNTU_ACCOUNT_ID,
838 'ubuntu/images/hvm-ssd/ubuntu-cosmic-18.10-amd64-server-20190402',
839 )
840 ssh_username = 'ubuntu'
841 elif distro == 'ubuntu19.04':
834 elif distro == 'ubuntu19.04':
842 image = find_image(
835 image = find_image(
843 ec2resource,
836 ec2resource,
844 UBUNTU_ACCOUNT_ID,
837 UBUNTU_ACCOUNT_ID,
845 'ubuntu/images/hvm-ssd/ubuntu-disco-19.04-amd64-server-20190417',
838 'ubuntu/images/hvm-ssd/ubuntu-disco-19.04-amd64-server-20190417',
846 )
839 )
847 ssh_username = 'ubuntu'
840 ssh_username = 'ubuntu'
848 else:
841 else:
849 raise ValueError('unsupported Linux distro: %s' % distro)
842 raise ValueError('unsupported Linux distro: %s' % distro)
850
843
851 config = {
844 config = {
852 'BlockDeviceMappings': [
845 'BlockDeviceMappings': [
853 {
846 {
854 'DeviceName': image.block_device_mappings[0]['DeviceName'],
847 'DeviceName': image.block_device_mappings[0]['DeviceName'],
855 'Ebs': {
848 'Ebs': {
856 'DeleteOnTermination': True,
849 'DeleteOnTermination': True,
857 'VolumeSize': 8,
850 'VolumeSize': 8,
858 'VolumeType': 'gp2',
851 'VolumeType': 'gp2',
859 },
852 },
860 },
853 },
861 ],
854 ],
862 'EbsOptimized': True,
855 'EbsOptimized': True,
863 'ImageId': image.id,
856 'ImageId': image.id,
864 'InstanceInitiatedShutdownBehavior': 'stop',
857 'InstanceInitiatedShutdownBehavior': 'stop',
865 # 8 VCPUs for compiling Python.
858 # 8 VCPUs for compiling Python.
866 'InstanceType': 't3.2xlarge',
859 'InstanceType': 't3.2xlarge',
867 'KeyName': '%sautomation' % prefix,
860 'KeyName': '%sautomation' % prefix,
868 'MaxCount': 1,
861 'MaxCount': 1,
869 'MinCount': 1,
862 'MinCount': 1,
870 'SecurityGroupIds': [c.security_groups['linux-dev-1'].id],
863 'SecurityGroupIds': [c.security_groups['linux-dev-1'].id],
871 }
864 }
872
865
873 requirements2_path = (pathlib.Path(__file__).parent.parent /
866 requirements2_path = (pathlib.Path(__file__).parent.parent /
874 'linux-requirements-py2.txt')
867 'linux-requirements-py2.txt')
875 requirements3_path = (pathlib.Path(__file__).parent.parent /
868 requirements3_path = (pathlib.Path(__file__).parent.parent /
876 'linux-requirements-py3.txt')
869 'linux-requirements-py3.txt')
877 with requirements2_path.open('r', encoding='utf-8') as fh:
870 with requirements2_path.open('r', encoding='utf-8') as fh:
878 requirements2 = fh.read()
871 requirements2 = fh.read()
879 with requirements3_path.open('r', encoding='utf-8') as fh:
872 with requirements3_path.open('r', encoding='utf-8') as fh:
880 requirements3 = fh.read()
873 requirements3 = fh.read()
881
874
882 # Compute a deterministic fingerprint to determine whether image needs to
875 # Compute a deterministic fingerprint to determine whether image needs to
883 # be regenerated.
876 # be regenerated.
884 fingerprint = resolve_fingerprint({
877 fingerprint = resolve_fingerprint({
885 'instance_config': config,
878 'instance_config': config,
886 'bootstrap_script': BOOTSTRAP_DEBIAN,
879 'bootstrap_script': BOOTSTRAP_DEBIAN,
887 'requirements_py2': requirements2,
880 'requirements_py2': requirements2,
888 'requirements_py3': requirements3,
881 'requirements_py3': requirements3,
889 })
882 })
890
883
891 existing_image = find_and_reconcile_image(ec2resource, name, fingerprint)
884 existing_image = find_and_reconcile_image(ec2resource, name, fingerprint)
892
885
893 if existing_image:
886 if existing_image:
894 return existing_image
887 return existing_image
895
888
896 print('no suitable %s image found; creating one...' % name)
889 print('no suitable %s image found; creating one...' % name)
897
890
898 with temporary_ec2_instances(ec2resource, config) as instances:
891 with temporary_ec2_instances(ec2resource, config) as instances:
899 wait_for_ip_addresses(instances)
892 wait_for_ip_addresses(instances)
900
893
901 instance = instances[0]
894 instance = instances[0]
902
895
903 client = wait_for_ssh(
896 client = wait_for_ssh(
904 instance.public_ip_address, 22,
897 instance.public_ip_address, 22,
905 username=ssh_username,
898 username=ssh_username,
906 key_filename=str(c.key_pair_path_private('automation')))
899 key_filename=str(c.key_pair_path_private('automation')))
907
900
908 home = '/home/%s' % ssh_username
901 home = '/home/%s' % ssh_username
909
902
910 with client:
903 with client:
911 print('connecting to SSH server')
904 print('connecting to SSH server')
912 sftp = client.open_sftp()
905 sftp = client.open_sftp()
913
906
914 print('uploading bootstrap files')
907 print('uploading bootstrap files')
915 with sftp.open('%s/bootstrap' % home, 'wb') as fh:
908 with sftp.open('%s/bootstrap' % home, 'wb') as fh:
916 fh.write(BOOTSTRAP_DEBIAN)
909 fh.write(BOOTSTRAP_DEBIAN)
917 fh.chmod(0o0700)
910 fh.chmod(0o0700)
918
911
919 with sftp.open('%s/requirements-py2.txt' % home, 'wb') as fh:
912 with sftp.open('%s/requirements-py2.txt' % home, 'wb') as fh:
920 fh.write(requirements2)
913 fh.write(requirements2)
921 fh.chmod(0o0700)
914 fh.chmod(0o0700)
922
915
923 with sftp.open('%s/requirements-py3.txt' % home, 'wb') as fh:
916 with sftp.open('%s/requirements-py3.txt' % home, 'wb') as fh:
924 fh.write(requirements3)
917 fh.write(requirements3)
925 fh.chmod(0o0700)
918 fh.chmod(0o0700)
926
919
927 print('executing bootstrap')
920 print('executing bootstrap')
928 chan, stdin, stdout = ssh_exec_command(client,
921 chan, stdin, stdout = ssh_exec_command(client,
929 '%s/bootstrap' % home)
922 '%s/bootstrap' % home)
930 stdin.close()
923 stdin.close()
931
924
932 for line in stdout:
925 for line in stdout:
933 print(line, end='')
926 print(line, end='')
934
927
935 res = chan.recv_exit_status()
928 res = chan.recv_exit_status()
936 if res:
929 if res:
937 raise Exception('non-0 exit from bootstrap: %d' % res)
930 raise Exception('non-0 exit from bootstrap: %d' % res)
938
931
939 print('bootstrap completed; stopping %s to create %s' % (
932 print('bootstrap completed; stopping %s to create %s' % (
940 instance.id, name))
933 instance.id, name))
941
934
942 return create_ami_from_instance(ec2client, instance, name,
935 return create_ami_from_instance(ec2client, instance, name,
943 'Mercurial Linux development environment',
936 'Mercurial Linux development environment',
944 fingerprint)
937 fingerprint)
945
938
946
939
947 @contextlib.contextmanager
940 @contextlib.contextmanager
948 def temporary_linux_dev_instances(c: AWSConnection, image, instance_type,
941 def temporary_linux_dev_instances(c: AWSConnection, image, instance_type,
949 prefix='hg-', ensure_extra_volume=False):
942 prefix='hg-', ensure_extra_volume=False):
950 """Create temporary Linux development EC2 instances.
943 """Create temporary Linux development EC2 instances.
951
944
952 Context manager resolves to a list of ``ec2.Instance`` that were created
945 Context manager resolves to a list of ``ec2.Instance`` that were created
953 and are running.
946 and are running.
954
947
955 ``ensure_extra_volume`` can be set to ``True`` to require that instances
948 ``ensure_extra_volume`` can be set to ``True`` to require that instances
956 have a 2nd storage volume available other than the primary AMI volume.
949 have a 2nd storage volume available other than the primary AMI volume.
957 For instance types with instance storage, this does nothing special.
950 For instance types with instance storage, this does nothing special.
958 But for instance types without instance storage, an additional EBS volume
951 But for instance types without instance storage, an additional EBS volume
959 will be added to the instance.
952 will be added to the instance.
960
953
961 Instances have an ``ssh_client`` attribute containing a paramiko SSHClient
954 Instances have an ``ssh_client`` attribute containing a paramiko SSHClient
962 instance bound to the instance.
955 instance bound to the instance.
963
956
964 Instances have an ``ssh_private_key_path`` attributing containing the
957 Instances have an ``ssh_private_key_path`` attributing containing the
965 str path to the SSH private key to connect to the instance.
958 str path to the SSH private key to connect to the instance.
966 """
959 """
967
960
968 block_device_mappings = [
961 block_device_mappings = [
969 {
962 {
970 'DeviceName': image.block_device_mappings[0]['DeviceName'],
963 'DeviceName': image.block_device_mappings[0]['DeviceName'],
971 'Ebs': {
964 'Ebs': {
972 'DeleteOnTermination': True,
965 'DeleteOnTermination': True,
973 'VolumeSize': 12,
966 'VolumeSize': 12,
974 'VolumeType': 'gp2',
967 'VolumeType': 'gp2',
975 },
968 },
976 }
969 }
977 ]
970 ]
978
971
979 # This is not an exhaustive list of instance types having instance storage.
972 # This is not an exhaustive list of instance types having instance storage.
980 # But
973 # But
981 if (ensure_extra_volume
974 if (ensure_extra_volume
982 and not instance_type.startswith(tuple(INSTANCE_TYPES_WITH_STORAGE))):
975 and not instance_type.startswith(tuple(INSTANCE_TYPES_WITH_STORAGE))):
983 main_device = block_device_mappings[0]['DeviceName']
976 main_device = block_device_mappings[0]['DeviceName']
984
977
985 if main_device == 'xvda':
978 if main_device == 'xvda':
986 second_device = 'xvdb'
979 second_device = 'xvdb'
987 elif main_device == '/dev/sda1':
980 elif main_device == '/dev/sda1':
988 second_device = '/dev/sdb'
981 second_device = '/dev/sdb'
989 else:
982 else:
990 raise ValueError('unhandled primary EBS device name: %s' %
983 raise ValueError('unhandled primary EBS device name: %s' %
991 main_device)
984 main_device)
992
985
993 block_device_mappings.append({
986 block_device_mappings.append({
994 'DeviceName': second_device,
987 'DeviceName': second_device,
995 'Ebs': {
988 'Ebs': {
996 'DeleteOnTermination': True,
989 'DeleteOnTermination': True,
997 'VolumeSize': 8,
990 'VolumeSize': 8,
998 'VolumeType': 'gp2',
991 'VolumeType': 'gp2',
999 }
992 }
1000 })
993 })
1001
994
1002 config = {
995 config = {
1003 'BlockDeviceMappings': block_device_mappings,
996 'BlockDeviceMappings': block_device_mappings,
1004 'EbsOptimized': True,
997 'EbsOptimized': True,
1005 'ImageId': image.id,
998 'ImageId': image.id,
1006 'InstanceInitiatedShutdownBehavior': 'terminate',
999 'InstanceInitiatedShutdownBehavior': 'terminate',
1007 'InstanceType': instance_type,
1000 'InstanceType': instance_type,
1008 'KeyName': '%sautomation' % prefix,
1001 'KeyName': '%sautomation' % prefix,
1009 'MaxCount': 1,
1002 'MaxCount': 1,
1010 'MinCount': 1,
1003 'MinCount': 1,
1011 'SecurityGroupIds': [c.security_groups['linux-dev-1'].id],
1004 'SecurityGroupIds': [c.security_groups['linux-dev-1'].id],
1012 }
1005 }
1013
1006
1014 with temporary_ec2_instances(c.ec2resource, config) as instances:
1007 with temporary_ec2_instances(c.ec2resource, config) as instances:
1015 wait_for_ip_addresses(instances)
1008 wait_for_ip_addresses(instances)
1016
1009
1017 ssh_private_key_path = str(c.key_pair_path_private('automation'))
1010 ssh_private_key_path = str(c.key_pair_path_private('automation'))
1018
1011
1019 for instance in instances:
1012 for instance in instances:
1020 client = wait_for_ssh(
1013 client = wait_for_ssh(
1021 instance.public_ip_address, 22,
1014 instance.public_ip_address, 22,
1022 username='hg',
1015 username='hg',
1023 key_filename=ssh_private_key_path)
1016 key_filename=ssh_private_key_path)
1024
1017
1025 instance.ssh_client = client
1018 instance.ssh_client = client
1026 instance.ssh_private_key_path = ssh_private_key_path
1019 instance.ssh_private_key_path = ssh_private_key_path
1027
1020
1028 try:
1021 try:
1029 yield instances
1022 yield instances
1030 finally:
1023 finally:
1031 for instance in instances:
1024 for instance in instances:
1032 instance.ssh_client.close()
1025 instance.ssh_client.close()
1033
1026
1034
1027
1035 def ensure_windows_dev_ami(c: AWSConnection, prefix='hg-',
1028 def ensure_windows_dev_ami(c: AWSConnection, prefix='hg-',
1036 base_image_name=WINDOWS_BASE_IMAGE_NAME):
1029 base_image_name=WINDOWS_BASE_IMAGE_NAME):
1037 """Ensure Windows Development AMI is available and up-to-date.
1030 """Ensure Windows Development AMI is available and up-to-date.
1038
1031
1039 If necessary, a modern AMI will be built by starting a temporary EC2
1032 If necessary, a modern AMI will be built by starting a temporary EC2
1040 instance and bootstrapping it.
1033 instance and bootstrapping it.
1041
1034
1042 Obsolete AMIs will be deleted so there is only a single AMI having the
1035 Obsolete AMIs will be deleted so there is only a single AMI having the
1043 desired name.
1036 desired name.
1044
1037
1045 Returns an ``ec2.Image`` of either an existing AMI or a newly-built
1038 Returns an ``ec2.Image`` of either an existing AMI or a newly-built
1046 one.
1039 one.
1047 """
1040 """
1048 ec2client = c.ec2client
1041 ec2client = c.ec2client
1049 ec2resource = c.ec2resource
1042 ec2resource = c.ec2resource
1050 ssmclient = c.session.client('ssm')
1043 ssmclient = c.session.client('ssm')
1051
1044
1052 name = '%s%s' % (prefix, 'windows-dev')
1045 name = '%s%s' % (prefix, 'windows-dev')
1053
1046
1054 image = find_image(ec2resource, AMAZON_ACCOUNT_ID, base_image_name)
1047 image = find_image(ec2resource, AMAZON_ACCOUNT_ID, base_image_name)
1055
1048
1056 config = {
1049 config = {
1057 'BlockDeviceMappings': [
1050 'BlockDeviceMappings': [
1058 {
1051 {
1059 'DeviceName': '/dev/sda1',
1052 'DeviceName': '/dev/sda1',
1060 'Ebs': {
1053 'Ebs': {
1061 'DeleteOnTermination': True,
1054 'DeleteOnTermination': True,
1062 'VolumeSize': 32,
1055 'VolumeSize': 32,
1063 'VolumeType': 'gp2',
1056 'VolumeType': 'gp2',
1064 },
1057 },
1065 }
1058 }
1066 ],
1059 ],
1067 'ImageId': image.id,
1060 'ImageId': image.id,
1068 'InstanceInitiatedShutdownBehavior': 'stop',
1061 'InstanceInitiatedShutdownBehavior': 'stop',
1069 'InstanceType': 't3.medium',
1062 'InstanceType': 't3.medium',
1070 'KeyName': '%sautomation' % prefix,
1063 'KeyName': '%sautomation' % prefix,
1071 'MaxCount': 1,
1064 'MaxCount': 1,
1072 'MinCount': 1,
1065 'MinCount': 1,
1073 'SecurityGroupIds': [c.security_groups['windows-dev-1'].id],
1066 'SecurityGroupIds': [c.security_groups['windows-dev-1'].id],
1074 }
1067 }
1075
1068
1076 commands = [
1069 commands = [
1077 # Need to start the service so sshd_config is generated.
1070 # Need to start the service so sshd_config is generated.
1078 'Start-Service sshd',
1071 'Start-Service sshd',
1079 'Write-Output "modifying sshd_config"',
1072 'Write-Output "modifying sshd_config"',
1080 r'$content = Get-Content C:\ProgramData\ssh\sshd_config',
1073 r'$content = Get-Content C:\ProgramData\ssh\sshd_config',
1081 '$content = $content -replace "Match Group administrators","" -replace "AuthorizedKeysFile __PROGRAMDATA__/ssh/administrators_authorized_keys",""',
1074 '$content = $content -replace "Match Group administrators","" -replace "AuthorizedKeysFile __PROGRAMDATA__/ssh/administrators_authorized_keys",""',
1082 r'$content | Set-Content C:\ProgramData\ssh\sshd_config',
1075 r'$content | Set-Content C:\ProgramData\ssh\sshd_config',
1083 'Import-Module OpenSSHUtils',
1076 'Import-Module OpenSSHUtils',
1084 r'Repair-SshdConfigPermission C:\ProgramData\ssh\sshd_config -Confirm:$false',
1077 r'Repair-SshdConfigPermission C:\ProgramData\ssh\sshd_config -Confirm:$false',
1085 'Restart-Service sshd',
1078 'Restart-Service sshd',
1086 'Write-Output "installing OpenSSL client"',
1079 'Write-Output "installing OpenSSL client"',
1087 'Add-WindowsCapability -Online -Name OpenSSH.Client~~~~0.0.1.0',
1080 'Add-WindowsCapability -Online -Name OpenSSH.Client~~~~0.0.1.0',
1088 'Set-Service -Name sshd -StartupType "Automatic"',
1081 'Set-Service -Name sshd -StartupType "Automatic"',
1089 'Write-Output "OpenSSH server running"',
1082 'Write-Output "OpenSSH server running"',
1090 ]
1083 ]
1091
1084
1092 with INSTALL_WINDOWS_DEPENDENCIES.open('r', encoding='utf-8') as fh:
1085 with INSTALL_WINDOWS_DEPENDENCIES.open('r', encoding='utf-8') as fh:
1093 commands.extend(l.rstrip() for l in fh)
1086 commands.extend(l.rstrip() for l in fh)
1094
1087
1095 # Disable Windows Defender when bootstrapping because it just slows
1088 # Disable Windows Defender when bootstrapping because it just slows
1096 # things down.
1089 # things down.
1097 commands.insert(0, 'Set-MpPreference -DisableRealtimeMonitoring $true')
1090 commands.insert(0, 'Set-MpPreference -DisableRealtimeMonitoring $true')
1098 commands.append('Set-MpPreference -DisableRealtimeMonitoring $false')
1091 commands.append('Set-MpPreference -DisableRealtimeMonitoring $false')
1099
1092
1100 # Compute a deterministic fingerprint to determine whether image needs
1093 # Compute a deterministic fingerprint to determine whether image needs
1101 # to be regenerated.
1094 # to be regenerated.
1102 fingerprint = resolve_fingerprint({
1095 fingerprint = resolve_fingerprint({
1103 'instance_config': config,
1096 'instance_config': config,
1104 'user_data': WINDOWS_USER_DATA,
1097 'user_data': WINDOWS_USER_DATA,
1105 'initial_bootstrap': WINDOWS_BOOTSTRAP_POWERSHELL,
1098 'initial_bootstrap': WINDOWS_BOOTSTRAP_POWERSHELL,
1106 'bootstrap_commands': commands,
1099 'bootstrap_commands': commands,
1107 'base_image_name': base_image_name,
1100 'base_image_name': base_image_name,
1108 })
1101 })
1109
1102
1110 existing_image = find_and_reconcile_image(ec2resource, name, fingerprint)
1103 existing_image = find_and_reconcile_image(ec2resource, name, fingerprint)
1111
1104
1112 if existing_image:
1105 if existing_image:
1113 return existing_image
1106 return existing_image
1114
1107
1115 print('no suitable Windows development image found; creating one...')
1108 print('no suitable Windows development image found; creating one...')
1116
1109
1117 with create_temp_windows_ec2_instances(c, config) as instances:
1110 with create_temp_windows_ec2_instances(c, config) as instances:
1118 assert len(instances) == 1
1111 assert len(instances) == 1
1119 instance = instances[0]
1112 instance = instances[0]
1120
1113
1121 wait_for_ssm(ssmclient, [instance])
1114 wait_for_ssm(ssmclient, [instance])
1122
1115
1123 # On first boot, install various Windows updates.
1116 # On first boot, install various Windows updates.
1124 # We would ideally use PowerShell Remoting for this. However, there are
1117 # We would ideally use PowerShell Remoting for this. However, there are
1125 # trust issues that make it difficult to invoke Windows Update
1118 # trust issues that make it difficult to invoke Windows Update
1126 # remotely. So we use SSM, which has a mechanism for running Windows
1119 # remotely. So we use SSM, which has a mechanism for running Windows
1127 # Update.
1120 # Update.
1128 print('installing Windows features...')
1121 print('installing Windows features...')
1129 run_ssm_command(
1122 run_ssm_command(
1130 ssmclient,
1123 ssmclient,
1131 [instance],
1124 [instance],
1132 'AWS-RunPowerShellScript',
1125 'AWS-RunPowerShellScript',
1133 {
1126 {
1134 'commands': WINDOWS_BOOTSTRAP_POWERSHELL.split('\n'),
1127 'commands': WINDOWS_BOOTSTRAP_POWERSHELL.split('\n'),
1135 },
1128 },
1136 )
1129 )
1137
1130
1138 # Reboot so all updates are fully applied.
1131 # Reboot so all updates are fully applied.
1139 #
1132 #
1140 # We don't use instance.reboot() here because it is asynchronous and
1133 # We don't use instance.reboot() here because it is asynchronous and
1141 # we don't know when exactly the instance has rebooted. It could take
1134 # we don't know when exactly the instance has rebooted. It could take
1142 # a while to stop and we may start trying to interact with the instance
1135 # a while to stop and we may start trying to interact with the instance
1143 # before it has rebooted.
1136 # before it has rebooted.
1144 print('rebooting instance %s' % instance.id)
1137 print('rebooting instance %s' % instance.id)
1145 instance.stop()
1138 instance.stop()
1146 ec2client.get_waiter('instance_stopped').wait(
1139 ec2client.get_waiter('instance_stopped').wait(
1147 InstanceIds=[instance.id],
1140 InstanceIds=[instance.id],
1148 WaiterConfig={
1141 WaiterConfig={
1149 'Delay': 5,
1142 'Delay': 5,
1150 })
1143 })
1151
1144
1152 instance.start()
1145 instance.start()
1153 wait_for_ip_addresses([instance])
1146 wait_for_ip_addresses([instance])
1154
1147
1155 # There is a race condition here between the User Data PS script running
1148 # There is a race condition here between the User Data PS script running
1156 # and us connecting to WinRM. This can manifest as
1149 # and us connecting to WinRM. This can manifest as
1157 # "AuthorizationManager check failed" failures during run_powershell().
1150 # "AuthorizationManager check failed" failures during run_powershell().
1158 # TODO figure out a workaround.
1151 # TODO figure out a workaround.
1159
1152
1160 print('waiting for Windows Remote Management to come back...')
1153 print('waiting for Windows Remote Management to come back...')
1161 client = wait_for_winrm(instance.public_ip_address, 'Administrator',
1154 client = wait_for_winrm(instance.public_ip_address, 'Administrator',
1162 c.automation.default_password())
1155 c.automation.default_password())
1163 print('established WinRM connection to %s' % instance.id)
1156 print('established WinRM connection to %s' % instance.id)
1164 instance.winrm_client = client
1157 instance.winrm_client = client
1165
1158
1166 print('bootstrapping instance...')
1159 print('bootstrapping instance...')
1167 run_powershell(instance.winrm_client, '\n'.join(commands))
1160 run_powershell(instance.winrm_client, '\n'.join(commands))
1168
1161
1169 print('bootstrap completed; stopping %s to create image' % instance.id)
1162 print('bootstrap completed; stopping %s to create image' % instance.id)
1170 return create_ami_from_instance(ec2client, instance, name,
1163 return create_ami_from_instance(ec2client, instance, name,
1171 'Mercurial Windows development environment',
1164 'Mercurial Windows development environment',
1172 fingerprint)
1165 fingerprint)
1173
1166
1174
1167
1175 @contextlib.contextmanager
1168 @contextlib.contextmanager
1176 def temporary_windows_dev_instances(c: AWSConnection, image, instance_type,
1169 def temporary_windows_dev_instances(c: AWSConnection, image, instance_type,
1177 prefix='hg-', disable_antivirus=False):
1170 prefix='hg-', disable_antivirus=False):
1178 """Create a temporary Windows development EC2 instance.
1171 """Create a temporary Windows development EC2 instance.
1179
1172
1180 Context manager resolves to the list of ``EC2.Instance`` that were created.
1173 Context manager resolves to the list of ``EC2.Instance`` that were created.
1181 """
1174 """
1182 config = {
1175 config = {
1183 'BlockDeviceMappings': [
1176 'BlockDeviceMappings': [
1184 {
1177 {
1185 'DeviceName': '/dev/sda1',
1178 'DeviceName': '/dev/sda1',
1186 'Ebs': {
1179 'Ebs': {
1187 'DeleteOnTermination': True,
1180 'DeleteOnTermination': True,
1188 'VolumeSize': 32,
1181 'VolumeSize': 32,
1189 'VolumeType': 'gp2',
1182 'VolumeType': 'gp2',
1190 },
1183 },
1191 }
1184 }
1192 ],
1185 ],
1193 'ImageId': image.id,
1186 'ImageId': image.id,
1194 'InstanceInitiatedShutdownBehavior': 'stop',
1187 'InstanceInitiatedShutdownBehavior': 'stop',
1195 'InstanceType': instance_type,
1188 'InstanceType': instance_type,
1196 'KeyName': '%sautomation' % prefix,
1189 'KeyName': '%sautomation' % prefix,
1197 'MaxCount': 1,
1190 'MaxCount': 1,
1198 'MinCount': 1,
1191 'MinCount': 1,
1199 'SecurityGroupIds': [c.security_groups['windows-dev-1'].id],
1192 'SecurityGroupIds': [c.security_groups['windows-dev-1'].id],
1200 }
1193 }
1201
1194
1202 with create_temp_windows_ec2_instances(c, config) as instances:
1195 with create_temp_windows_ec2_instances(c, config) as instances:
1203 if disable_antivirus:
1196 if disable_antivirus:
1204 for instance in instances:
1197 for instance in instances:
1205 run_powershell(
1198 run_powershell(
1206 instance.winrm_client,
1199 instance.winrm_client,
1207 'Set-MpPreference -DisableRealtimeMonitoring $true')
1200 'Set-MpPreference -DisableRealtimeMonitoring $true')
1208
1201
1209 yield instances
1202 yield instances
@@ -1,561 +1,560 b''
1 # linux.py - Linux specific automation functionality
1 # linux.py - Linux specific automation functionality
2 #
2 #
3 # Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # no-check-code because Python 3 native.
8 # no-check-code because Python 3 native.
9
9
10 import os
10 import os
11 import pathlib
11 import pathlib
12 import shlex
12 import shlex
13 import subprocess
13 import subprocess
14 import tempfile
14 import tempfile
15
15
16 from .ssh import (
16 from .ssh import (
17 exec_command,
17 exec_command,
18 )
18 )
19
19
20
20
21 # Linux distributions that are supported.
21 # Linux distributions that are supported.
22 DISTROS = {
22 DISTROS = {
23 'debian9',
23 'debian9',
24 'ubuntu18.04',
24 'ubuntu18.04',
25 'ubuntu18.10',
26 'ubuntu19.04',
25 'ubuntu19.04',
27 }
26 }
28
27
29 INSTALL_PYTHONS = r'''
28 INSTALL_PYTHONS = r'''
30 PYENV2_VERSIONS="2.7.16 pypy2.7-7.1.1"
29 PYENV2_VERSIONS="2.7.16 pypy2.7-7.1.1"
31 PYENV3_VERSIONS="3.5.7 3.6.9 3.7.4 3.8-dev pypy3.5-7.0.0 pypy3.6-7.1.1"
30 PYENV3_VERSIONS="3.5.7 3.6.9 3.7.4 3.8-dev pypy3.5-7.0.0 pypy3.6-7.1.1"
32
31
33 git clone https://github.com/pyenv/pyenv.git /hgdev/pyenv
32 git clone https://github.com/pyenv/pyenv.git /hgdev/pyenv
34 pushd /hgdev/pyenv
33 pushd /hgdev/pyenv
35 git checkout 17f44b7cd6f58ea2fa68ec0371fb9e7a826b8be2
34 git checkout 17f44b7cd6f58ea2fa68ec0371fb9e7a826b8be2
36 popd
35 popd
37
36
38 export PYENV_ROOT="/hgdev/pyenv"
37 export PYENV_ROOT="/hgdev/pyenv"
39 export PATH="$PYENV_ROOT/bin:$PATH"
38 export PATH="$PYENV_ROOT/bin:$PATH"
40
39
41 # pip 19.0.3.
40 # pip 19.0.3.
42 PIP_SHA256=efe99298f3fbb1f56201ce6b81d2658067d2f7d7dfc2d412e0d3cacc9a397c61
41 PIP_SHA256=efe99298f3fbb1f56201ce6b81d2658067d2f7d7dfc2d412e0d3cacc9a397c61
43 wget -O get-pip.py --progress dot:mega https://github.com/pypa/get-pip/raw/fee32c376da1ff6496a798986d7939cd51e1644f/get-pip.py
42 wget -O get-pip.py --progress dot:mega https://github.com/pypa/get-pip/raw/fee32c376da1ff6496a798986d7939cd51e1644f/get-pip.py
44 echo "${PIP_SHA256} get-pip.py" | sha256sum --check -
43 echo "${PIP_SHA256} get-pip.py" | sha256sum --check -
45
44
46 VIRTUALENV_SHA256=984d7e607b0a5d1329425dd8845bd971b957424b5ba664729fab51ab8c11bc39
45 VIRTUALENV_SHA256=984d7e607b0a5d1329425dd8845bd971b957424b5ba664729fab51ab8c11bc39
47 VIRTUALENV_TARBALL=virtualenv-16.4.3.tar.gz
46 VIRTUALENV_TARBALL=virtualenv-16.4.3.tar.gz
48 wget -O ${VIRTUALENV_TARBALL} --progress dot:mega https://files.pythonhosted.org/packages/37/db/89d6b043b22052109da35416abc3c397655e4bd3cff031446ba02b9654fa/${VIRTUALENV_TARBALL}
47 wget -O ${VIRTUALENV_TARBALL} --progress dot:mega https://files.pythonhosted.org/packages/37/db/89d6b043b22052109da35416abc3c397655e4bd3cff031446ba02b9654fa/${VIRTUALENV_TARBALL}
49 echo "${VIRTUALENV_SHA256} ${VIRTUALENV_TARBALL}" | sha256sum --check -
48 echo "${VIRTUALENV_SHA256} ${VIRTUALENV_TARBALL}" | sha256sum --check -
50
49
51 for v in ${PYENV2_VERSIONS}; do
50 for v in ${PYENV2_VERSIONS}; do
52 pyenv install -v ${v}
51 pyenv install -v ${v}
53 ${PYENV_ROOT}/versions/${v}/bin/python get-pip.py
52 ${PYENV_ROOT}/versions/${v}/bin/python get-pip.py
54 ${PYENV_ROOT}/versions/${v}/bin/pip install ${VIRTUALENV_TARBALL}
53 ${PYENV_ROOT}/versions/${v}/bin/pip install ${VIRTUALENV_TARBALL}
55 ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/requirements-py2.txt
54 ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/requirements-py2.txt
56 done
55 done
57
56
58 for v in ${PYENV3_VERSIONS}; do
57 for v in ${PYENV3_VERSIONS}; do
59 pyenv install -v ${v}
58 pyenv install -v ${v}
60 ${PYENV_ROOT}/versions/${v}/bin/python get-pip.py
59 ${PYENV_ROOT}/versions/${v}/bin/python get-pip.py
61 ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/requirements-py3.txt
60 ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/requirements-py3.txt
62 done
61 done
63
62
64 pyenv global ${PYENV2_VERSIONS} ${PYENV3_VERSIONS} system
63 pyenv global ${PYENV2_VERSIONS} ${PYENV3_VERSIONS} system
65 '''.lstrip().replace('\r\n', '\n')
64 '''.lstrip().replace('\r\n', '\n')
66
65
67
66
68 INSTALL_RUST = r'''
67 INSTALL_RUST = r'''
69 RUSTUP_INIT_SHA256=a46fe67199b7bcbbde2dcbc23ae08db6f29883e260e23899a88b9073effc9076
68 RUSTUP_INIT_SHA256=a46fe67199b7bcbbde2dcbc23ae08db6f29883e260e23899a88b9073effc9076
70 wget -O rustup-init --progress dot:mega https://static.rust-lang.org/rustup/archive/1.18.3/x86_64-unknown-linux-gnu/rustup-init
69 wget -O rustup-init --progress dot:mega https://static.rust-lang.org/rustup/archive/1.18.3/x86_64-unknown-linux-gnu/rustup-init
71 echo "${RUSTUP_INIT_SHA256} rustup-init" | sha256sum --check -
70 echo "${RUSTUP_INIT_SHA256} rustup-init" | sha256sum --check -
72
71
73 chmod +x rustup-init
72 chmod +x rustup-init
74 sudo -H -u hg -g hg ./rustup-init -y
73 sudo -H -u hg -g hg ./rustup-init -y
75 sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup install 1.31.1 1.34.2
74 sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup install 1.31.1 1.34.2
76 sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup component add clippy
75 sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup component add clippy
77 '''
76 '''
78
77
79
78
80 BOOTSTRAP_VIRTUALENV = r'''
79 BOOTSTRAP_VIRTUALENV = r'''
81 /usr/bin/virtualenv /hgdev/venv-bootstrap
80 /usr/bin/virtualenv /hgdev/venv-bootstrap
82
81
83 HG_SHA256=1bdd21bb87d1e05fb5cd395d488d0e0cc2f2f90ce0fd248e31a03595da5ccb47
82 HG_SHA256=1bdd21bb87d1e05fb5cd395d488d0e0cc2f2f90ce0fd248e31a03595da5ccb47
84 HG_TARBALL=mercurial-4.9.1.tar.gz
83 HG_TARBALL=mercurial-4.9.1.tar.gz
85
84
86 wget -O ${HG_TARBALL} --progress dot:mega https://www.mercurial-scm.org/release/${HG_TARBALL}
85 wget -O ${HG_TARBALL} --progress dot:mega https://www.mercurial-scm.org/release/${HG_TARBALL}
87 echo "${HG_SHA256} ${HG_TARBALL}" | sha256sum --check -
86 echo "${HG_SHA256} ${HG_TARBALL}" | sha256sum --check -
88
87
89 /hgdev/venv-bootstrap/bin/pip install ${HG_TARBALL}
88 /hgdev/venv-bootstrap/bin/pip install ${HG_TARBALL}
90 '''.lstrip().replace('\r\n', '\n')
89 '''.lstrip().replace('\r\n', '\n')
91
90
92
91
93 BOOTSTRAP_DEBIAN = r'''
92 BOOTSTRAP_DEBIAN = r'''
94 #!/bin/bash
93 #!/bin/bash
95
94
96 set -ex
95 set -ex
97
96
98 DISTRO=`grep DISTRIB_ID /etc/lsb-release | awk -F= '{{print $2}}'`
97 DISTRO=`grep DISTRIB_ID /etc/lsb-release | awk -F= '{{print $2}}'`
99 DEBIAN_VERSION=`cat /etc/debian_version`
98 DEBIAN_VERSION=`cat /etc/debian_version`
100 LSB_RELEASE=`lsb_release -cs`
99 LSB_RELEASE=`lsb_release -cs`
101
100
102 sudo /usr/sbin/groupadd hg
101 sudo /usr/sbin/groupadd hg
103 sudo /usr/sbin/groupadd docker
102 sudo /usr/sbin/groupadd docker
104 sudo /usr/sbin/useradd -g hg -G sudo,docker -d /home/hg -m -s /bin/bash hg
103 sudo /usr/sbin/useradd -g hg -G sudo,docker -d /home/hg -m -s /bin/bash hg
105 sudo mkdir /home/hg/.ssh
104 sudo mkdir /home/hg/.ssh
106 sudo cp ~/.ssh/authorized_keys /home/hg/.ssh/authorized_keys
105 sudo cp ~/.ssh/authorized_keys /home/hg/.ssh/authorized_keys
107 sudo chown -R hg:hg /home/hg/.ssh
106 sudo chown -R hg:hg /home/hg/.ssh
108 sudo chmod 700 /home/hg/.ssh
107 sudo chmod 700 /home/hg/.ssh
109 sudo chmod 600 /home/hg/.ssh/authorized_keys
108 sudo chmod 600 /home/hg/.ssh/authorized_keys
110
109
111 cat << EOF | sudo tee /etc/sudoers.d/90-hg
110 cat << EOF | sudo tee /etc/sudoers.d/90-hg
112 hg ALL=(ALL) NOPASSWD:ALL
111 hg ALL=(ALL) NOPASSWD:ALL
113 EOF
112 EOF
114
113
115 sudo apt-get update
114 sudo apt-get update
116 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq dist-upgrade
115 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq dist-upgrade
117
116
118 # Install packages necessary to set up Docker Apt repo.
117 # Install packages necessary to set up Docker Apt repo.
119 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install --no-install-recommends \
118 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install --no-install-recommends \
120 apt-transport-https \
119 apt-transport-https \
121 gnupg
120 gnupg
122
121
123 cat > docker-apt-key << EOF
122 cat > docker-apt-key << EOF
124 -----BEGIN PGP PUBLIC KEY BLOCK-----
123 -----BEGIN PGP PUBLIC KEY BLOCK-----
125
124
126 mQINBFit2ioBEADhWpZ8/wvZ6hUTiXOwQHXMAlaFHcPH9hAtr4F1y2+OYdbtMuth
125 mQINBFit2ioBEADhWpZ8/wvZ6hUTiXOwQHXMAlaFHcPH9hAtr4F1y2+OYdbtMuth
127 lqqwp028AqyY+PRfVMtSYMbjuQuu5byyKR01BbqYhuS3jtqQmljZ/bJvXqnmiVXh
126 lqqwp028AqyY+PRfVMtSYMbjuQuu5byyKR01BbqYhuS3jtqQmljZ/bJvXqnmiVXh
128 38UuLa+z077PxyxQhu5BbqntTPQMfiyqEiU+BKbq2WmANUKQf+1AmZY/IruOXbnq
127 38UuLa+z077PxyxQhu5BbqntTPQMfiyqEiU+BKbq2WmANUKQf+1AmZY/IruOXbnq
129 L4C1+gJ8vfmXQt99npCaxEjaNRVYfOS8QcixNzHUYnb6emjlANyEVlZzeqo7XKl7
128 L4C1+gJ8vfmXQt99npCaxEjaNRVYfOS8QcixNzHUYnb6emjlANyEVlZzeqo7XKl7
130 UrwV5inawTSzWNvtjEjj4nJL8NsLwscpLPQUhTQ+7BbQXAwAmeHCUTQIvvWXqw0N
129 UrwV5inawTSzWNvtjEjj4nJL8NsLwscpLPQUhTQ+7BbQXAwAmeHCUTQIvvWXqw0N
131 cmhh4HgeQscQHYgOJjjDVfoY5MucvglbIgCqfzAHW9jxmRL4qbMZj+b1XoePEtht
130 cmhh4HgeQscQHYgOJjjDVfoY5MucvglbIgCqfzAHW9jxmRL4qbMZj+b1XoePEtht
132 ku4bIQN1X5P07fNWzlgaRL5Z4POXDDZTlIQ/El58j9kp4bnWRCJW0lya+f8ocodo
131 ku4bIQN1X5P07fNWzlgaRL5Z4POXDDZTlIQ/El58j9kp4bnWRCJW0lya+f8ocodo
133 vZZ+Doi+fy4D5ZGrL4XEcIQP/Lv5uFyf+kQtl/94VFYVJOleAv8W92KdgDkhTcTD
132 vZZ+Doi+fy4D5ZGrL4XEcIQP/Lv5uFyf+kQtl/94VFYVJOleAv8W92KdgDkhTcTD
134 G7c0tIkVEKNUq48b3aQ64NOZQW7fVjfoKwEZdOqPE72Pa45jrZzvUFxSpdiNk2tZ
133 G7c0tIkVEKNUq48b3aQ64NOZQW7fVjfoKwEZdOqPE72Pa45jrZzvUFxSpdiNk2tZ
135 XYukHjlxxEgBdC/J3cMMNRE1F4NCA3ApfV1Y7/hTeOnmDuDYwr9/obA8t016Yljj
134 XYukHjlxxEgBdC/J3cMMNRE1F4NCA3ApfV1Y7/hTeOnmDuDYwr9/obA8t016Yljj
136 q5rdkywPf4JF8mXUW5eCN1vAFHxeg9ZWemhBtQmGxXnw9M+z6hWwc6ahmwARAQAB
135 q5rdkywPf4JF8mXUW5eCN1vAFHxeg9ZWemhBtQmGxXnw9M+z6hWwc6ahmwARAQAB
137 tCtEb2NrZXIgUmVsZWFzZSAoQ0UgZGViKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3
136 tCtEb2NrZXIgUmVsZWFzZSAoQ0UgZGViKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3
138 BBMBCgAhBQJYrefAAhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEI2BgDwO
137 BBMBCgAhBQJYrefAAhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEI2BgDwO
139 v82IsskP/iQZo68flDQmNvn8X5XTd6RRaUH33kXYXquT6NkHJciS7E2gTJmqvMqd
138 v82IsskP/iQZo68flDQmNvn8X5XTd6RRaUH33kXYXquT6NkHJciS7E2gTJmqvMqd
140 tI4mNYHCSEYxI5qrcYV5YqX9P6+Ko+vozo4nseUQLPH/ATQ4qL0Zok+1jkag3Lgk
139 tI4mNYHCSEYxI5qrcYV5YqX9P6+Ko+vozo4nseUQLPH/ATQ4qL0Zok+1jkag3Lgk
141 jonyUf9bwtWxFp05HC3GMHPhhcUSexCxQLQvnFWXD2sWLKivHp2fT8QbRGeZ+d3m
140 jonyUf9bwtWxFp05HC3GMHPhhcUSexCxQLQvnFWXD2sWLKivHp2fT8QbRGeZ+d3m
142 6fqcd5Fu7pxsqm0EUDK5NL+nPIgYhN+auTrhgzhK1CShfGccM/wfRlei9Utz6p9P
141 6fqcd5Fu7pxsqm0EUDK5NL+nPIgYhN+auTrhgzhK1CShfGccM/wfRlei9Utz6p9P
143 XRKIlWnXtT4qNGZNTN0tR+NLG/6Bqd8OYBaFAUcue/w1VW6JQ2VGYZHnZu9S8LMc
142 XRKIlWnXtT4qNGZNTN0tR+NLG/6Bqd8OYBaFAUcue/w1VW6JQ2VGYZHnZu9S8LMc
144 FYBa5Ig9PxwGQOgq6RDKDbV+PqTQT5EFMeR1mrjckk4DQJjbxeMZbiNMG5kGECA8
143 FYBa5Ig9PxwGQOgq6RDKDbV+PqTQT5EFMeR1mrjckk4DQJjbxeMZbiNMG5kGECA8
145 g383P3elhn03WGbEEa4MNc3Z4+7c236QI3xWJfNPdUbXRaAwhy/6rTSFbzwKB0Jm
144 g383P3elhn03WGbEEa4MNc3Z4+7c236QI3xWJfNPdUbXRaAwhy/6rTSFbzwKB0Jm
146 ebwzQfwjQY6f55MiI/RqDCyuPj3r3jyVRkK86pQKBAJwFHyqj9KaKXMZjfVnowLh
145 ebwzQfwjQY6f55MiI/RqDCyuPj3r3jyVRkK86pQKBAJwFHyqj9KaKXMZjfVnowLh
147 9svIGfNbGHpucATqREvUHuQbNnqkCx8VVhtYkhDb9fEP2xBu5VvHbR+3nfVhMut5
146 9svIGfNbGHpucATqREvUHuQbNnqkCx8VVhtYkhDb9fEP2xBu5VvHbR+3nfVhMut5
148 G34Ct5RS7Jt6LIfFdtcn8CaSas/l1HbiGeRgc70X/9aYx/V/CEJv0lIe8gP6uDoW
147 G34Ct5RS7Jt6LIfFdtcn8CaSas/l1HbiGeRgc70X/9aYx/V/CEJv0lIe8gP6uDoW
149 FPIZ7d6vH+Vro6xuWEGiuMaiznap2KhZmpkgfupyFmplh0s6knymuQINBFit2ioB
148 FPIZ7d6vH+Vro6xuWEGiuMaiznap2KhZmpkgfupyFmplh0s6knymuQINBFit2ioB
150 EADneL9S9m4vhU3blaRjVUUyJ7b/qTjcSylvCH5XUE6R2k+ckEZjfAMZPLpO+/tF
149 EADneL9S9m4vhU3blaRjVUUyJ7b/qTjcSylvCH5XUE6R2k+ckEZjfAMZPLpO+/tF
151 M2JIJMD4SifKuS3xck9KtZGCufGmcwiLQRzeHF7vJUKrLD5RTkNi23ydvWZgPjtx
150 M2JIJMD4SifKuS3xck9KtZGCufGmcwiLQRzeHF7vJUKrLD5RTkNi23ydvWZgPjtx
152 Q+DTT1Zcn7BrQFY6FgnRoUVIxwtdw1bMY/89rsFgS5wwuMESd3Q2RYgb7EOFOpnu
151 Q+DTT1Zcn7BrQFY6FgnRoUVIxwtdw1bMY/89rsFgS5wwuMESd3Q2RYgb7EOFOpnu
153 w6da7WakWf4IhnF5nsNYGDVaIHzpiqCl+uTbf1epCjrOlIzkZ3Z3Yk5CM/TiFzPk
152 w6da7WakWf4IhnF5nsNYGDVaIHzpiqCl+uTbf1epCjrOlIzkZ3Z3Yk5CM/TiFzPk
154 z2lLz89cpD8U+NtCsfagWWfjd2U3jDapgH+7nQnCEWpROtzaKHG6lA3pXdix5zG8
153 z2lLz89cpD8U+NtCsfagWWfjd2U3jDapgH+7nQnCEWpROtzaKHG6lA3pXdix5zG8
155 eRc6/0IbUSWvfjKxLLPfNeCS2pCL3IeEI5nothEEYdQH6szpLog79xB9dVnJyKJb
154 eRc6/0IbUSWvfjKxLLPfNeCS2pCL3IeEI5nothEEYdQH6szpLog79xB9dVnJyKJb
156 VfxXnseoYqVrRz2VVbUI5Blwm6B40E3eGVfUQWiux54DspyVMMk41Mx7QJ3iynIa
155 VfxXnseoYqVrRz2VVbUI5Blwm6B40E3eGVfUQWiux54DspyVMMk41Mx7QJ3iynIa
157 1N4ZAqVMAEruyXTRTxc9XW0tYhDMA/1GYvz0EmFpm8LzTHA6sFVtPm/ZlNCX6P1X
156 1N4ZAqVMAEruyXTRTxc9XW0tYhDMA/1GYvz0EmFpm8LzTHA6sFVtPm/ZlNCX6P1X
158 zJwrv7DSQKD6GGlBQUX+OeEJ8tTkkf8QTJSPUdh8P8YxDFS5EOGAvhhpMBYD42kQ
157 zJwrv7DSQKD6GGlBQUX+OeEJ8tTkkf8QTJSPUdh8P8YxDFS5EOGAvhhpMBYD42kQ
159 pqXjEC+XcycTvGI7impgv9PDY1RCC1zkBjKPa120rNhv/hkVk/YhuGoajoHyy4h7
158 pqXjEC+XcycTvGI7impgv9PDY1RCC1zkBjKPa120rNhv/hkVk/YhuGoajoHyy4h7
160 ZQopdcMtpN2dgmhEegny9JCSwxfQmQ0zK0g7m6SHiKMwjwARAQABiQQ+BBgBCAAJ
159 ZQopdcMtpN2dgmhEegny9JCSwxfQmQ0zK0g7m6SHiKMwjwARAQABiQQ+BBgBCAAJ
161 BQJYrdoqAhsCAikJEI2BgDwOv82IwV0gBBkBCAAGBQJYrdoqAAoJEH6gqcPyc/zY
160 BQJYrdoqAhsCAikJEI2BgDwOv82IwV0gBBkBCAAGBQJYrdoqAAoJEH6gqcPyc/zY
162 1WAP/2wJ+R0gE6qsce3rjaIz58PJmc8goKrir5hnElWhPgbq7cYIsW5qiFyLhkdp
161 1WAP/2wJ+R0gE6qsce3rjaIz58PJmc8goKrir5hnElWhPgbq7cYIsW5qiFyLhkdp
163 YcMmhD9mRiPpQn6Ya2w3e3B8zfIVKipbMBnke/ytZ9M7qHmDCcjoiSmwEXN3wKYI
162 YcMmhD9mRiPpQn6Ya2w3e3B8zfIVKipbMBnke/ytZ9M7qHmDCcjoiSmwEXN3wKYI
164 mD9VHONsl/CG1rU9Isw1jtB5g1YxuBA7M/m36XN6x2u+NtNMDB9P56yc4gfsZVES
163 mD9VHONsl/CG1rU9Isw1jtB5g1YxuBA7M/m36XN6x2u+NtNMDB9P56yc4gfsZVES
165 KA9v+yY2/l45L8d/WUkUi0YXomn6hyBGI7JrBLq0CX37GEYP6O9rrKipfz73XfO7
164 KA9v+yY2/l45L8d/WUkUi0YXomn6hyBGI7JrBLq0CX37GEYP6O9rrKipfz73XfO7
166 JIGzOKZlljb/D9RX/g7nRbCn+3EtH7xnk+TK/50euEKw8SMUg147sJTcpQmv6UzZ
165 JIGzOKZlljb/D9RX/g7nRbCn+3EtH7xnk+TK/50euEKw8SMUg147sJTcpQmv6UzZ
167 cM4JgL0HbHVCojV4C/plELwMddALOFeYQzTif6sMRPf+3DSj8frbInjChC3yOLy0
166 cM4JgL0HbHVCojV4C/plELwMddALOFeYQzTif6sMRPf+3DSj8frbInjChC3yOLy0
168 6br92KFom17EIj2CAcoeq7UPhi2oouYBwPxh5ytdehJkoo+sN7RIWua6P2WSmon5
167 6br92KFom17EIj2CAcoeq7UPhi2oouYBwPxh5ytdehJkoo+sN7RIWua6P2WSmon5
169 U888cSylXC0+ADFdgLX9K2zrDVYUG1vo8CX0vzxFBaHwN6Px26fhIT1/hYUHQR1z
168 U888cSylXC0+ADFdgLX9K2zrDVYUG1vo8CX0vzxFBaHwN6Px26fhIT1/hYUHQR1z
170 VfNDcyQmXqkOnZvvoMfz/Q0s9BhFJ/zU6AgQbIZE/hm1spsfgvtsD1frZfygXJ9f
169 VfNDcyQmXqkOnZvvoMfz/Q0s9BhFJ/zU6AgQbIZE/hm1spsfgvtsD1frZfygXJ9f
171 irP+MSAI80xHSf91qSRZOj4Pl3ZJNbq4yYxv0b1pkMqeGdjdCYhLU+LZ4wbQmpCk
170 irP+MSAI80xHSf91qSRZOj4Pl3ZJNbq4yYxv0b1pkMqeGdjdCYhLU+LZ4wbQmpCk
172 SVe2prlLureigXtmZfkqevRz7FrIZiu9ky8wnCAPwC7/zmS18rgP/17bOtL4/iIz
171 SVe2prlLureigXtmZfkqevRz7FrIZiu9ky8wnCAPwC7/zmS18rgP/17bOtL4/iIz
173 QhxAAoAMWVrGyJivSkjhSGx1uCojsWfsTAm11P7jsruIL61ZzMUVE2aM3Pmj5G+W
172 QhxAAoAMWVrGyJivSkjhSGx1uCojsWfsTAm11P7jsruIL61ZzMUVE2aM3Pmj5G+W
174 9AcZ58Em+1WsVnAXdUR//bMmhyr8wL/G1YO1V3JEJTRdxsSxdYa4deGBBY/Adpsw
173 9AcZ58Em+1WsVnAXdUR//bMmhyr8wL/G1YO1V3JEJTRdxsSxdYa4deGBBY/Adpsw
175 24jxhOJR+lsJpqIUeb999+R8euDhRHG9eFO7DRu6weatUJ6suupoDTRWtr/4yGqe
174 24jxhOJR+lsJpqIUeb999+R8euDhRHG9eFO7DRu6weatUJ6suupoDTRWtr/4yGqe
176 dKxV3qQhNLSnaAzqW/1nA3iUB4k7kCaKZxhdhDbClf9P37qaRW467BLCVO/coL3y
175 dKxV3qQhNLSnaAzqW/1nA3iUB4k7kCaKZxhdhDbClf9P37qaRW467BLCVO/coL3y
177 Vm50dwdrNtKpMBh3ZpbB1uJvgi9mXtyBOMJ3v8RZeDzFiG8HdCtg9RvIt/AIFoHR
176 Vm50dwdrNtKpMBh3ZpbB1uJvgi9mXtyBOMJ3v8RZeDzFiG8HdCtg9RvIt/AIFoHR
178 H3S+U79NT6i0KPzLImDfs8T7RlpyuMc4Ufs8ggyg9v3Ae6cN3eQyxcK3w0cbBwsh
177 H3S+U79NT6i0KPzLImDfs8T7RlpyuMc4Ufs8ggyg9v3Ae6cN3eQyxcK3w0cbBwsh
179 /nQNfsA6uu+9H7NhbehBMhYnpNZyrHzCmzyXkauwRAqoCbGCNykTRwsur9gS41TQ
178 /nQNfsA6uu+9H7NhbehBMhYnpNZyrHzCmzyXkauwRAqoCbGCNykTRwsur9gS41TQ
180 M8ssD1jFheOJf3hODnkKU+HKjvMROl1DK7zdmLdNzA1cvtZH/nCC9KPj1z8QC47S
179 M8ssD1jFheOJf3hODnkKU+HKjvMROl1DK7zdmLdNzA1cvtZH/nCC9KPj1z8QC47S
181 xx+dTZSx4ONAhwbS/LN3PoKtn8LPjY9NP9uDWI+TWYquS2U+KHDrBDlsgozDbs/O
180 xx+dTZSx4ONAhwbS/LN3PoKtn8LPjY9NP9uDWI+TWYquS2U+KHDrBDlsgozDbs/O
182 jCxcpDzNmXpWQHEtHU7649OXHP7UeNST1mCUCH5qdank0V1iejF6/CfTFU4MfcrG
181 jCxcpDzNmXpWQHEtHU7649OXHP7UeNST1mCUCH5qdank0V1iejF6/CfTFU4MfcrG
183 YT90qFF93M3v01BbxP+EIY2/9tiIPbrd
182 YT90qFF93M3v01BbxP+EIY2/9tiIPbrd
184 =0YYh
183 =0YYh
185 -----END PGP PUBLIC KEY BLOCK-----
184 -----END PGP PUBLIC KEY BLOCK-----
186 EOF
185 EOF
187
186
188 sudo apt-key add docker-apt-key
187 sudo apt-key add docker-apt-key
189
188
190 if [ "$LSB_RELEASE" = "stretch" ]; then
189 if [ "$LSB_RELEASE" = "stretch" ]; then
191 cat << EOF | sudo tee -a /etc/apt/sources.list
190 cat << EOF | sudo tee -a /etc/apt/sources.list
192 # Need backports for clang-format-6.0
191 # Need backports for clang-format-6.0
193 deb http://deb.debian.org/debian stretch-backports main
192 deb http://deb.debian.org/debian stretch-backports main
194
193
195 # Sources are useful if we want to compile things locally.
194 # Sources are useful if we want to compile things locally.
196 deb-src http://deb.debian.org/debian stretch main
195 deb-src http://deb.debian.org/debian stretch main
197 deb-src http://security.debian.org/debian-security stretch/updates main
196 deb-src http://security.debian.org/debian-security stretch/updates main
198 deb-src http://deb.debian.org/debian stretch-updates main
197 deb-src http://deb.debian.org/debian stretch-updates main
199 deb-src http://deb.debian.org/debian stretch-backports main
198 deb-src http://deb.debian.org/debian stretch-backports main
200
199
201 deb [arch=amd64] https://download.docker.com/linux/debian stretch stable
200 deb [arch=amd64] https://download.docker.com/linux/debian stretch stable
202 EOF
201 EOF
203
202
204 elif [ "$DISTRO" = "Ubuntu" ]; then
203 elif [ "$DISTRO" = "Ubuntu" ]; then
205 cat << EOF | sudo tee -a /etc/apt/sources.list
204 cat << EOF | sudo tee -a /etc/apt/sources.list
206 deb [arch=amd64] https://download.docker.com/linux/ubuntu $LSB_RELEASE stable
205 deb [arch=amd64] https://download.docker.com/linux/ubuntu $LSB_RELEASE stable
207 EOF
206 EOF
208
207
209 fi
208 fi
210
209
211 sudo apt-get update
210 sudo apt-get update
212
211
213 PACKAGES="\
212 PACKAGES="\
214 btrfs-progs \
213 btrfs-progs \
215 build-essential \
214 build-essential \
216 bzr \
215 bzr \
217 clang-format-6.0 \
216 clang-format-6.0 \
218 cvs \
217 cvs \
219 darcs \
218 darcs \
220 debhelper \
219 debhelper \
221 devscripts \
220 devscripts \
222 docker-ce \
221 docker-ce \
223 dpkg-dev \
222 dpkg-dev \
224 dstat \
223 dstat \
225 emacs \
224 emacs \
226 gettext \
225 gettext \
227 git \
226 git \
228 htop \
227 htop \
229 iotop \
228 iotop \
230 jfsutils \
229 jfsutils \
231 libbz2-dev \
230 libbz2-dev \
232 libexpat1-dev \
231 libexpat1-dev \
233 libffi-dev \
232 libffi-dev \
234 libgdbm-dev \
233 libgdbm-dev \
235 liblzma-dev \
234 liblzma-dev \
236 libncurses5-dev \
235 libncurses5-dev \
237 libnss3-dev \
236 libnss3-dev \
238 libreadline-dev \
237 libreadline-dev \
239 libsqlite3-dev \
238 libsqlite3-dev \
240 libssl-dev \
239 libssl-dev \
241 netbase \
240 netbase \
242 ntfs-3g \
241 ntfs-3g \
243 nvme-cli \
242 nvme-cli \
244 pyflakes \
243 pyflakes \
245 pyflakes3 \
244 pyflakes3 \
246 pylint \
245 pylint \
247 pylint3 \
246 pylint3 \
248 python-all-dev \
247 python-all-dev \
249 python-dev \
248 python-dev \
250 python-docutils \
249 python-docutils \
251 python-fuzzywuzzy \
250 python-fuzzywuzzy \
252 python-pygments \
251 python-pygments \
253 python-subversion \
252 python-subversion \
254 python-vcr \
253 python-vcr \
255 python3-dev \
254 python3-dev \
256 python3-docutils \
255 python3-docutils \
257 python3-fuzzywuzzy \
256 python3-fuzzywuzzy \
258 python3-pygments \
257 python3-pygments \
259 python3-vcr \
258 python3-vcr \
260 rsync \
259 rsync \
261 sqlite3 \
260 sqlite3 \
262 subversion \
261 subversion \
263 tcl-dev \
262 tcl-dev \
264 tk-dev \
263 tk-dev \
265 tla \
264 tla \
266 unzip \
265 unzip \
267 uuid-dev \
266 uuid-dev \
268 vim \
267 vim \
269 virtualenv \
268 virtualenv \
270 wget \
269 wget \
271 xfsprogs \
270 xfsprogs \
272 zip \
271 zip \
273 zlib1g-dev"
272 zlib1g-dev"
274
273
275 if [ "LSB_RELEASE" = "stretch" ]; then
274 if [ "LSB_RELEASE" = "stretch" ]; then
276 PACKAGES="$PACKAGES linux-perf"
275 PACKAGES="$PACKAGES linux-perf"
277 elif [ "$DISTRO" = "Ubuntu" ]; then
276 elif [ "$DISTRO" = "Ubuntu" ]; then
278 PACKAGES="$PACKAGES linux-tools-common"
277 PACKAGES="$PACKAGES linux-tools-common"
279 fi
278 fi
280
279
281 # Ubuntu 19.04 removes monotone.
280 # Ubuntu 19.04 removes monotone.
282 if [ "$LSB_RELEASE" != "disco" ]; then
281 if [ "$LSB_RELEASE" != "disco" ]; then
283 PACKAGES="$PACKAGES monotone"
282 PACKAGES="$PACKAGES monotone"
284 fi
283 fi
285
284
286 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install --no-install-recommends $PACKAGES
285 sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install --no-install-recommends $PACKAGES
287
286
288 # Create clang-format symlink so test harness finds it.
287 # Create clang-format symlink so test harness finds it.
289 sudo update-alternatives --install /usr/bin/clang-format clang-format \
288 sudo update-alternatives --install /usr/bin/clang-format clang-format \
290 /usr/bin/clang-format-6.0 1000
289 /usr/bin/clang-format-6.0 1000
291
290
292 sudo mkdir /hgdev
291 sudo mkdir /hgdev
293 # Will be normalized to hg:hg later.
292 # Will be normalized to hg:hg later.
294 sudo chown `whoami` /hgdev
293 sudo chown `whoami` /hgdev
295
294
296 {install_rust}
295 {install_rust}
297
296
298 cp requirements-py2.txt /hgdev/requirements-py2.txt
297 cp requirements-py2.txt /hgdev/requirements-py2.txt
299 cp requirements-py3.txt /hgdev/requirements-py3.txt
298 cp requirements-py3.txt /hgdev/requirements-py3.txt
300
299
301 # Disable the pip version check because it uses the network and can
300 # Disable the pip version check because it uses the network and can
302 # be annoying.
301 # be annoying.
303 cat << EOF | sudo tee -a /etc/pip.conf
302 cat << EOF | sudo tee -a /etc/pip.conf
304 [global]
303 [global]
305 disable-pip-version-check = True
304 disable-pip-version-check = True
306 EOF
305 EOF
307
306
308 {install_pythons}
307 {install_pythons}
309 {bootstrap_virtualenv}
308 {bootstrap_virtualenv}
310
309
311 /hgdev/venv-bootstrap/bin/hg clone https://www.mercurial-scm.org/repo/hg /hgdev/src
310 /hgdev/venv-bootstrap/bin/hg clone https://www.mercurial-scm.org/repo/hg /hgdev/src
312
311
313 # Mark the repo as non-publishing.
312 # Mark the repo as non-publishing.
314 cat >> /hgdev/src/.hg/hgrc << EOF
313 cat >> /hgdev/src/.hg/hgrc << EOF
315 [phases]
314 [phases]
316 publish = false
315 publish = false
317 EOF
316 EOF
318
317
319 sudo chown -R hg:hg /hgdev
318 sudo chown -R hg:hg /hgdev
320 '''.lstrip().format(
319 '''.lstrip().format(
321 install_rust=INSTALL_RUST,
320 install_rust=INSTALL_RUST,
322 install_pythons=INSTALL_PYTHONS,
321 install_pythons=INSTALL_PYTHONS,
323 bootstrap_virtualenv=BOOTSTRAP_VIRTUALENV
322 bootstrap_virtualenv=BOOTSTRAP_VIRTUALENV
324 ).replace('\r\n', '\n')
323 ).replace('\r\n', '\n')
325
324
326
325
327 # Prepares /hgdev for operations.
326 # Prepares /hgdev for operations.
328 PREPARE_HGDEV = '''
327 PREPARE_HGDEV = '''
329 #!/bin/bash
328 #!/bin/bash
330
329
331 set -e
330 set -e
332
331
333 FS=$1
332 FS=$1
334
333
335 ensure_device() {
334 ensure_device() {
336 if [ -z "${DEVICE}" ]; then
335 if [ -z "${DEVICE}" ]; then
337 echo "could not find block device to format"
336 echo "could not find block device to format"
338 exit 1
337 exit 1
339 fi
338 fi
340 }
339 }
341
340
342 # Determine device to partition for extra filesystem.
341 # Determine device to partition for extra filesystem.
343 # If only 1 volume is present, it will be the root volume and
342 # If only 1 volume is present, it will be the root volume and
344 # should be /dev/nvme0. If multiple volumes are present, the
343 # should be /dev/nvme0. If multiple volumes are present, the
345 # root volume could be nvme0 or nvme1. Use whichever one doesn't have
344 # root volume could be nvme0 or nvme1. Use whichever one doesn't have
346 # a partition.
345 # a partition.
347 if [ -e /dev/nvme1n1 ]; then
346 if [ -e /dev/nvme1n1 ]; then
348 if [ -e /dev/nvme0n1p1 ]; then
347 if [ -e /dev/nvme0n1p1 ]; then
349 DEVICE=/dev/nvme1n1
348 DEVICE=/dev/nvme1n1
350 else
349 else
351 DEVICE=/dev/nvme0n1
350 DEVICE=/dev/nvme0n1
352 fi
351 fi
353 else
352 else
354 DEVICE=
353 DEVICE=
355 fi
354 fi
356
355
357 sudo mkdir /hgwork
356 sudo mkdir /hgwork
358
357
359 if [ "${FS}" != "default" -a "${FS}" != "tmpfs" ]; then
358 if [ "${FS}" != "default" -a "${FS}" != "tmpfs" ]; then
360 ensure_device
359 ensure_device
361 echo "creating ${FS} filesystem on ${DEVICE}"
360 echo "creating ${FS} filesystem on ${DEVICE}"
362 fi
361 fi
363
362
364 if [ "${FS}" = "default" ]; then
363 if [ "${FS}" = "default" ]; then
365 :
364 :
366
365
367 elif [ "${FS}" = "btrfs" ]; then
366 elif [ "${FS}" = "btrfs" ]; then
368 sudo mkfs.btrfs ${DEVICE}
367 sudo mkfs.btrfs ${DEVICE}
369 sudo mount ${DEVICE} /hgwork
368 sudo mount ${DEVICE} /hgwork
370
369
371 elif [ "${FS}" = "ext3" ]; then
370 elif [ "${FS}" = "ext3" ]; then
372 # lazy_journal_init speeds up filesystem creation at the expense of
371 # lazy_journal_init speeds up filesystem creation at the expense of
373 # integrity if things crash. We are an ephemeral instance, so we don't
372 # integrity if things crash. We are an ephemeral instance, so we don't
374 # care about integrity.
373 # care about integrity.
375 sudo mkfs.ext3 -E lazy_journal_init=1 ${DEVICE}
374 sudo mkfs.ext3 -E lazy_journal_init=1 ${DEVICE}
376 sudo mount ${DEVICE} /hgwork
375 sudo mount ${DEVICE} /hgwork
377
376
378 elif [ "${FS}" = "ext4" ]; then
377 elif [ "${FS}" = "ext4" ]; then
379 sudo mkfs.ext4 -E lazy_journal_init=1 ${DEVICE}
378 sudo mkfs.ext4 -E lazy_journal_init=1 ${DEVICE}
380 sudo mount ${DEVICE} /hgwork
379 sudo mount ${DEVICE} /hgwork
381
380
382 elif [ "${FS}" = "jfs" ]; then
381 elif [ "${FS}" = "jfs" ]; then
383 sudo mkfs.jfs ${DEVICE}
382 sudo mkfs.jfs ${DEVICE}
384 sudo mount ${DEVICE} /hgwork
383 sudo mount ${DEVICE} /hgwork
385
384
386 elif [ "${FS}" = "tmpfs" ]; then
385 elif [ "${FS}" = "tmpfs" ]; then
387 echo "creating tmpfs volume in /hgwork"
386 echo "creating tmpfs volume in /hgwork"
388 sudo mount -t tmpfs -o size=1024M tmpfs /hgwork
387 sudo mount -t tmpfs -o size=1024M tmpfs /hgwork
389
388
390 elif [ "${FS}" = "xfs" ]; then
389 elif [ "${FS}" = "xfs" ]; then
391 sudo mkfs.xfs ${DEVICE}
390 sudo mkfs.xfs ${DEVICE}
392 sudo mount ${DEVICE} /hgwork
391 sudo mount ${DEVICE} /hgwork
393
392
394 else
393 else
395 echo "unsupported filesystem: ${FS}"
394 echo "unsupported filesystem: ${FS}"
396 exit 1
395 exit 1
397 fi
396 fi
398
397
399 echo "/hgwork ready"
398 echo "/hgwork ready"
400
399
401 sudo chown hg:hg /hgwork
400 sudo chown hg:hg /hgwork
402 mkdir /hgwork/tmp
401 mkdir /hgwork/tmp
403 chown hg:hg /hgwork/tmp
402 chown hg:hg /hgwork/tmp
404
403
405 rsync -a /hgdev/src /hgwork/
404 rsync -a /hgdev/src /hgwork/
406 '''.lstrip().replace('\r\n', '\n')
405 '''.lstrip().replace('\r\n', '\n')
407
406
408
407
409 HG_UPDATE_CLEAN = '''
408 HG_UPDATE_CLEAN = '''
410 set -ex
409 set -ex
411
410
412 HG=/hgdev/venv-bootstrap/bin/hg
411 HG=/hgdev/venv-bootstrap/bin/hg
413
412
414 cd /hgwork/src
413 cd /hgwork/src
415 ${HG} --config extensions.purge= purge --all
414 ${HG} --config extensions.purge= purge --all
416 ${HG} update -C $1
415 ${HG} update -C $1
417 ${HG} log -r .
416 ${HG} log -r .
418 '''.lstrip().replace('\r\n', '\n')
417 '''.lstrip().replace('\r\n', '\n')
419
418
420
419
421 def prepare_exec_environment(ssh_client, filesystem='default'):
420 def prepare_exec_environment(ssh_client, filesystem='default'):
422 """Prepare an EC2 instance to execute things.
421 """Prepare an EC2 instance to execute things.
423
422
424 The AMI has an ``/hgdev`` bootstrapped with various Python installs
423 The AMI has an ``/hgdev`` bootstrapped with various Python installs
425 and a clone of the Mercurial repo.
424 and a clone of the Mercurial repo.
426
425
427 In EC2, EBS volumes launched from snapshots have wonky performance behavior.
426 In EC2, EBS volumes launched from snapshots have wonky performance behavior.
428 Notably, blocks have to be copied on first access, which makes volume
427 Notably, blocks have to be copied on first access, which makes volume
429 I/O extremely slow on fresh volumes.
428 I/O extremely slow on fresh volumes.
430
429
431 Furthermore, we may want to run operations, tests, etc on alternative
430 Furthermore, we may want to run operations, tests, etc on alternative
432 filesystems so we examine behavior on different filesystems.
431 filesystems so we examine behavior on different filesystems.
433
432
434 This function is used to facilitate executing operations on alternate
433 This function is used to facilitate executing operations on alternate
435 volumes.
434 volumes.
436 """
435 """
437 sftp = ssh_client.open_sftp()
436 sftp = ssh_client.open_sftp()
438
437
439 with sftp.open('/hgdev/prepare-hgdev', 'wb') as fh:
438 with sftp.open('/hgdev/prepare-hgdev', 'wb') as fh:
440 fh.write(PREPARE_HGDEV)
439 fh.write(PREPARE_HGDEV)
441 fh.chmod(0o0777)
440 fh.chmod(0o0777)
442
441
443 command = 'sudo /hgdev/prepare-hgdev %s' % filesystem
442 command = 'sudo /hgdev/prepare-hgdev %s' % filesystem
444 chan, stdin, stdout = exec_command(ssh_client, command)
443 chan, stdin, stdout = exec_command(ssh_client, command)
445 stdin.close()
444 stdin.close()
446
445
447 for line in stdout:
446 for line in stdout:
448 print(line, end='')
447 print(line, end='')
449
448
450 res = chan.recv_exit_status()
449 res = chan.recv_exit_status()
451
450
452 if res:
451 if res:
453 raise Exception('non-0 exit code updating working directory; %d'
452 raise Exception('non-0 exit code updating working directory; %d'
454 % res)
453 % res)
455
454
456
455
457 def synchronize_hg(source_path: pathlib.Path, ec2_instance, revision: str=None):
456 def synchronize_hg(source_path: pathlib.Path, ec2_instance, revision: str=None):
458 """Synchronize a local Mercurial source path to remote EC2 instance."""
457 """Synchronize a local Mercurial source path to remote EC2 instance."""
459
458
460 with tempfile.TemporaryDirectory() as temp_dir:
459 with tempfile.TemporaryDirectory() as temp_dir:
461 temp_dir = pathlib.Path(temp_dir)
460 temp_dir = pathlib.Path(temp_dir)
462
461
463 ssh_dir = temp_dir / '.ssh'
462 ssh_dir = temp_dir / '.ssh'
464 ssh_dir.mkdir()
463 ssh_dir.mkdir()
465 ssh_dir.chmod(0o0700)
464 ssh_dir.chmod(0o0700)
466
465
467 public_ip = ec2_instance.public_ip_address
466 public_ip = ec2_instance.public_ip_address
468
467
469 ssh_config = ssh_dir / 'config'
468 ssh_config = ssh_dir / 'config'
470
469
471 with ssh_config.open('w', encoding='utf-8') as fh:
470 with ssh_config.open('w', encoding='utf-8') as fh:
472 fh.write('Host %s\n' % public_ip)
471 fh.write('Host %s\n' % public_ip)
473 fh.write(' User hg\n')
472 fh.write(' User hg\n')
474 fh.write(' StrictHostKeyChecking no\n')
473 fh.write(' StrictHostKeyChecking no\n')
475 fh.write(' UserKnownHostsFile %s\n' % (ssh_dir / 'known_hosts'))
474 fh.write(' UserKnownHostsFile %s\n' % (ssh_dir / 'known_hosts'))
476 fh.write(' IdentityFile %s\n' % ec2_instance.ssh_private_key_path)
475 fh.write(' IdentityFile %s\n' % ec2_instance.ssh_private_key_path)
477
476
478 if not (source_path / '.hg').is_dir():
477 if not (source_path / '.hg').is_dir():
479 raise Exception('%s is not a Mercurial repository; synchronization '
478 raise Exception('%s is not a Mercurial repository; synchronization '
480 'not yet supported' % source_path)
479 'not yet supported' % source_path)
481
480
482 env = dict(os.environ)
481 env = dict(os.environ)
483 env['HGPLAIN'] = '1'
482 env['HGPLAIN'] = '1'
484 env['HGENCODING'] = 'utf-8'
483 env['HGENCODING'] = 'utf-8'
485
484
486 hg_bin = source_path / 'hg'
485 hg_bin = source_path / 'hg'
487
486
488 res = subprocess.run(
487 res = subprocess.run(
489 ['python2.7', str(hg_bin), 'log', '-r', revision, '-T', '{node}'],
488 ['python2.7', str(hg_bin), 'log', '-r', revision, '-T', '{node}'],
490 cwd=str(source_path), env=env, check=True, capture_output=True)
489 cwd=str(source_path), env=env, check=True, capture_output=True)
491
490
492 full_revision = res.stdout.decode('ascii')
491 full_revision = res.stdout.decode('ascii')
493
492
494 args = [
493 args = [
495 'python2.7', str(hg_bin),
494 'python2.7', str(hg_bin),
496 '--config', 'ui.ssh=ssh -F %s' % ssh_config,
495 '--config', 'ui.ssh=ssh -F %s' % ssh_config,
497 '--config', 'ui.remotecmd=/hgdev/venv-bootstrap/bin/hg',
496 '--config', 'ui.remotecmd=/hgdev/venv-bootstrap/bin/hg',
498 # Also ensure .hgtags changes are present so auto version
497 # Also ensure .hgtags changes are present so auto version
499 # calculation works.
498 # calculation works.
500 'push', '-f', '-r', full_revision, '-r', 'file(.hgtags)',
499 'push', '-f', '-r', full_revision, '-r', 'file(.hgtags)',
501 'ssh://%s//hgwork/src' % public_ip,
500 'ssh://%s//hgwork/src' % public_ip,
502 ]
501 ]
503
502
504 res = subprocess.run(args, cwd=str(source_path), env=env)
503 res = subprocess.run(args, cwd=str(source_path), env=env)
505
504
506 # Allow 1 (no-op) to not trigger error.
505 # Allow 1 (no-op) to not trigger error.
507 if res.returncode not in (0, 1):
506 if res.returncode not in (0, 1):
508 res.check_returncode()
507 res.check_returncode()
509
508
510 # TODO support synchronizing dirty working directory.
509 # TODO support synchronizing dirty working directory.
511
510
512 sftp = ec2_instance.ssh_client.open_sftp()
511 sftp = ec2_instance.ssh_client.open_sftp()
513
512
514 with sftp.open('/hgdev/hgup', 'wb') as fh:
513 with sftp.open('/hgdev/hgup', 'wb') as fh:
515 fh.write(HG_UPDATE_CLEAN)
514 fh.write(HG_UPDATE_CLEAN)
516 fh.chmod(0o0700)
515 fh.chmod(0o0700)
517
516
518 chan, stdin, stdout = exec_command(
517 chan, stdin, stdout = exec_command(
519 ec2_instance.ssh_client, '/hgdev/hgup %s' % full_revision)
518 ec2_instance.ssh_client, '/hgdev/hgup %s' % full_revision)
520 stdin.close()
519 stdin.close()
521
520
522 for line in stdout:
521 for line in stdout:
523 print(line, end='')
522 print(line, end='')
524
523
525 res = chan.recv_exit_status()
524 res = chan.recv_exit_status()
526
525
527 if res:
526 if res:
528 raise Exception('non-0 exit code updating working directory; %d'
527 raise Exception('non-0 exit code updating working directory; %d'
529 % res)
528 % res)
530
529
531
530
532 def run_tests(ssh_client, python_version, test_flags=None):
531 def run_tests(ssh_client, python_version, test_flags=None):
533 """Run tests on a remote Linux machine via an SSH client."""
532 """Run tests on a remote Linux machine via an SSH client."""
534 test_flags = test_flags or []
533 test_flags = test_flags or []
535
534
536 print('running tests')
535 print('running tests')
537
536
538 if python_version == 'system2':
537 if python_version == 'system2':
539 python = '/usr/bin/python2'
538 python = '/usr/bin/python2'
540 elif python_version == 'system3':
539 elif python_version == 'system3':
541 python = '/usr/bin/python3'
540 python = '/usr/bin/python3'
542 elif python_version.startswith('pypy'):
541 elif python_version.startswith('pypy'):
543 python = '/hgdev/pyenv/shims/%s' % python_version
542 python = '/hgdev/pyenv/shims/%s' % python_version
544 else:
543 else:
545 python = '/hgdev/pyenv/shims/python%s' % python_version
544 python = '/hgdev/pyenv/shims/python%s' % python_version
546
545
547 test_flags = ' '.join(shlex.quote(a) for a in test_flags)
546 test_flags = ' '.join(shlex.quote(a) for a in test_flags)
548
547
549 command = (
548 command = (
550 '/bin/sh -c "export TMPDIR=/hgwork/tmp; '
549 '/bin/sh -c "export TMPDIR=/hgwork/tmp; '
551 'cd /hgwork/src/tests && %s run-tests.py %s"' % (
550 'cd /hgwork/src/tests && %s run-tests.py %s"' % (
552 python, test_flags))
551 python, test_flags))
553
552
554 chan, stdin, stdout = exec_command(ssh_client, command)
553 chan, stdin, stdout = exec_command(ssh_client, command)
555
554
556 stdin.close()
555 stdin.close()
557
556
558 for line in stdout:
557 for line in stdout:
559 print(line, end='')
558 print(line, end='')
560
559
561 return chan.recv_exit_status()
560 return chan.recv_exit_status()
General Comments 0
You need to be logged in to leave comments. Login now