##// END OF EJS Templates
merge with stable
Pulkit Goyal -
r44767:63d84c18 merge default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -0,0 +1,75 b''
1 image: octobus/ci-mercurial-core
2
3 # The runner made a clone as root.
4 # We make a new clone owned by user used to run the step.
5 before_script:
6 - hg clone . /tmp/mercurial-ci/ --noupdate
7 - hg -R /tmp/mercurial-ci/ update `hg log --rev '.' --template '{node}'`
8 - cd /tmp/mercurial-ci/
9 - (cd tests; ls -1 test-check-*.*) > /tmp/check-tests.txt
10
11 variables:
12 PYTHON: python
13
14 .runtests_template: &runtests
15 script:
16 - cd tests/
17 - echo "python used, $PYTHON"
18 - echo "$RUNTEST_ARGS"
19 - $PYTHON run-tests.py --color=always $RUNTEST_ARGS
20
21 checks-py2:
22 <<: *runtests
23 variables:
24 RUNTEST_ARGS: "--time --test-list /tmp/check-tests.txt"
25
26 checks-py3:
27 <<: *runtests
28 variables:
29 RUNTEST_ARGS: "--time --test-list /tmp/check-tests.txt"
30 PYTHON: python3
31
32 rust-cargo-test-py2: &rust_cargo_test
33 script:
34 - echo "python used, $PYTHON"
35 - make rust-tests
36
37 rust-cargo-test-py3:
38 <<: *rust_cargo_test
39 variables:
40 PYTHON: python3
41
42 test-py2:
43 <<: *runtests
44 variables:
45 RUNTEST_ARGS: "--blacklist /tmp/check-tests.txt"
46
47 test-py3:
48 <<: *runtests
49 variables:
50 RUNTEST_ARGS: "--blacklist /tmp/check-tests.txt"
51 PYTHON: python3
52
53 test-py2-pure:
54 <<: *runtests
55 variables:
56 RUNTEST_ARGS: "--pure --blacklist /tmp/check-tests.txt"
57
58 test-py3-pure:
59 <<: *runtests
60 variables:
61 RUNTEST_ARGS: "--pure --blacklist /tmp/check-tests.txt"
62 PYTHON: python3
63
64 test-py2-rust:
65 <<: *runtests
66 variables:
67 HGWITHRUSTEXT: cpython
68 RUNTEST_ARGS: "--blacklist /tmp/check-tests.txt"
69
70 test-py3-rust:
71 <<: *runtests
72 variables:
73 HGWITHRUSTEXT: cpython
74 RUNTEST_ARGS: "--blacklist /tmp/check-tests.txt"
75 PYTHON: python3
@@ -1,497 +1,499 b''
1 # wix.py - WiX installer functionality
1 # wix.py - WiX installer functionality
2 #
2 #
3 # Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2019 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # no-check-code because Python 3 native.
8 # no-check-code because Python 3 native.
9
9
10 import collections
10 import collections
11 import os
11 import os
12 import pathlib
12 import pathlib
13 import re
13 import re
14 import shutil
14 import shutil
15 import subprocess
15 import subprocess
16 import typing
16 import typing
17 import uuid
17 import uuid
18 import xml.dom.minidom
18 import xml.dom.minidom
19
19
20 from .downloads import download_entry
20 from .downloads import download_entry
21 from .py2exe import (
21 from .py2exe import (
22 build_py2exe,
22 build_py2exe,
23 stage_install,
23 stage_install,
24 )
24 )
25 from .util import (
25 from .util import (
26 extract_zip_to_directory,
26 extract_zip_to_directory,
27 normalize_windows_version,
27 normalize_windows_version,
28 process_install_rules,
28 process_install_rules,
29 sign_with_signtool,
29 sign_with_signtool,
30 )
30 )
31
31
32
32
33 EXTRA_PACKAGES = {
33 EXTRA_PACKAGES = {
34 'dulwich',
34 'dulwich',
35 'distutils',
35 'distutils',
36 'keyring',
36 'keyring',
37 'pygments',
37 'pygments',
38 'win32ctypes',
38 'win32ctypes',
39 }
39 }
40
40
41
41
42 EXTRA_INSTALL_RULES = [
42 EXTRA_INSTALL_RULES = [
43 ('contrib/packaging/wix/COPYING.rtf', 'COPYING.rtf'),
43 ('contrib/packaging/wix/COPYING.rtf', 'COPYING.rtf'),
44 ('contrib/win32/mercurial.ini', 'defaultrc/mercurial.rc'),
44 ('contrib/win32/mercurial.ini', 'defaultrc/mercurial.rc'),
45 ]
45 ]
46
46
47 STAGING_REMOVE_FILES = [
47 STAGING_REMOVE_FILES = [
48 # We use the RTF variant.
48 # We use the RTF variant.
49 'copying.txt',
49 'copying.txt',
50 ]
50 ]
51
51
52 SHORTCUTS = {
52 SHORTCUTS = {
53 # hg.1.html'
53 # hg.1.html'
54 'hg.file.5d3e441c_28d9_5542_afd0_cdd4234f12d5': {
54 'hg.file.5d3e441c_28d9_5542_afd0_cdd4234f12d5': {
55 'Name': 'Mercurial Command Reference',
55 'Name': 'Mercurial Command Reference',
56 },
56 },
57 # hgignore.5.html
57 # hgignore.5.html
58 'hg.file.5757d8e0_f207_5e10_a2ec_3ba0a062f431': {
58 'hg.file.5757d8e0_f207_5e10_a2ec_3ba0a062f431': {
59 'Name': 'Mercurial Ignore Files',
59 'Name': 'Mercurial Ignore Files',
60 },
60 },
61 # hgrc.5.html
61 # hgrc.5.html
62 'hg.file.92e605fd_1d1a_5dc6_9fc0_5d2998eb8f5e': {
62 'hg.file.92e605fd_1d1a_5dc6_9fc0_5d2998eb8f5e': {
63 'Name': 'Mercurial Configuration Files',
63 'Name': 'Mercurial Configuration Files',
64 },
64 },
65 }
65 }
66
66
67
67
68 def find_version(source_dir: pathlib.Path):
68 def find_version(source_dir: pathlib.Path):
69 version_py = source_dir / 'mercurial' / '__version__.py'
69 version_py = source_dir / 'mercurial' / '__version__.py'
70
70
71 with version_py.open('r', encoding='utf-8') as fh:
71 with version_py.open('r', encoding='utf-8') as fh:
72 source = fh.read().strip()
72 source = fh.read().strip()
73
73
74 m = re.search('version = b"(.*)"', source)
74 m = re.search('version = b"(.*)"', source)
75 return m.group(1)
75 return m.group(1)
76
76
77
77
78 def ensure_vc90_merge_modules(build_dir):
78 def ensure_vc90_merge_modules(build_dir):
79 x86 = (
79 x86 = (
80 download_entry(
80 download_entry(
81 'vc9-crt-x86-msm',
81 'vc9-crt-x86-msm',
82 build_dir,
82 build_dir,
83 local_name='microsoft.vcxx.crt.x86_msm.msm',
83 local_name='microsoft.vcxx.crt.x86_msm.msm',
84 )[0],
84 )[0],
85 download_entry(
85 download_entry(
86 'vc9-crt-x86-msm-policy',
86 'vc9-crt-x86-msm-policy',
87 build_dir,
87 build_dir,
88 local_name='policy.x.xx.microsoft.vcxx.crt.x86_msm.msm',
88 local_name='policy.x.xx.microsoft.vcxx.crt.x86_msm.msm',
89 )[0],
89 )[0],
90 )
90 )
91
91
92 x64 = (
92 x64 = (
93 download_entry(
93 download_entry(
94 'vc9-crt-x64-msm',
94 'vc9-crt-x64-msm',
95 build_dir,
95 build_dir,
96 local_name='microsoft.vcxx.crt.x64_msm.msm',
96 local_name='microsoft.vcxx.crt.x64_msm.msm',
97 )[0],
97 )[0],
98 download_entry(
98 download_entry(
99 'vc9-crt-x64-msm-policy',
99 'vc9-crt-x64-msm-policy',
100 build_dir,
100 build_dir,
101 local_name='policy.x.xx.microsoft.vcxx.crt.x64_msm.msm',
101 local_name='policy.x.xx.microsoft.vcxx.crt.x64_msm.msm',
102 )[0],
102 )[0],
103 )
103 )
104 return {
104 return {
105 'x86': x86,
105 'x86': x86,
106 'x64': x64,
106 'x64': x64,
107 }
107 }
108
108
109
109
110 def run_candle(wix, cwd, wxs, source_dir, defines=None):
110 def run_candle(wix, cwd, wxs, source_dir, defines=None):
111 args = [
111 args = [
112 str(wix / 'candle.exe'),
112 str(wix / 'candle.exe'),
113 '-nologo',
113 '-nologo',
114 str(wxs),
114 str(wxs),
115 '-dSourceDir=%s' % source_dir,
115 '-dSourceDir=%s' % source_dir,
116 ]
116 ]
117
117
118 if defines:
118 if defines:
119 args.extend('-d%s=%s' % define for define in sorted(defines.items()))
119 args.extend('-d%s=%s' % define for define in sorted(defines.items()))
120
120
121 subprocess.run(args, cwd=str(cwd), check=True)
121 subprocess.run(args, cwd=str(cwd), check=True)
122
122
123
123
124 def make_post_build_signing_fn(
124 def make_post_build_signing_fn(
125 name,
125 name,
126 subject_name=None,
126 subject_name=None,
127 cert_path=None,
127 cert_path=None,
128 cert_password=None,
128 cert_password=None,
129 timestamp_url=None,
129 timestamp_url=None,
130 ):
130 ):
131 """Create a callable that will use signtool to sign hg.exe."""
131 """Create a callable that will use signtool to sign hg.exe."""
132
132
133 def post_build_sign(source_dir, build_dir, dist_dir, version):
133 def post_build_sign(source_dir, build_dir, dist_dir, version):
134 description = '%s %s' % (name, version)
134 description = '%s %s' % (name, version)
135
135
136 sign_with_signtool(
136 sign_with_signtool(
137 dist_dir / 'hg.exe',
137 dist_dir / 'hg.exe',
138 description,
138 description,
139 subject_name=subject_name,
139 subject_name=subject_name,
140 cert_path=cert_path,
140 cert_path=cert_path,
141 cert_password=cert_password,
141 cert_password=cert_password,
142 timestamp_url=timestamp_url,
142 timestamp_url=timestamp_url,
143 )
143 )
144
144
145 return post_build_sign
145 return post_build_sign
146
146
147
147
148 def make_files_xml(staging_dir: pathlib.Path, is_x64) -> str:
148 def make_files_xml(staging_dir: pathlib.Path, is_x64) -> str:
149 """Create XML string listing every file to be installed."""
149 """Create XML string listing every file to be installed."""
150
150
151 # We derive GUIDs from a deterministic file path identifier.
151 # We derive GUIDs from a deterministic file path identifier.
152 # We shoehorn the name into something that looks like a URL because
152 # We shoehorn the name into something that looks like a URL because
153 # the UUID namespaces are supposed to work that way (even though
153 # the UUID namespaces are supposed to work that way (even though
154 # the input data probably is never validated).
154 # the input data probably is never validated).
155
155
156 doc = xml.dom.minidom.parseString(
156 doc = xml.dom.minidom.parseString(
157 '<?xml version="1.0" encoding="utf-8"?>'
157 '<?xml version="1.0" encoding="utf-8"?>'
158 '<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">'
158 '<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">'
159 '</Wix>'
159 '</Wix>'
160 )
160 )
161
161
162 # Assemble the install layout by directory. This makes it easier to
162 # Assemble the install layout by directory. This makes it easier to
163 # emit XML, since each directory has separate entities.
163 # emit XML, since each directory has separate entities.
164 manifest = collections.defaultdict(dict)
164 manifest = collections.defaultdict(dict)
165
165
166 for root, dirs, files in os.walk(staging_dir):
166 for root, dirs, files in os.walk(staging_dir):
167 dirs.sort()
167 dirs.sort()
168
168
169 root = pathlib.Path(root)
169 root = pathlib.Path(root)
170 rel_dir = root.relative_to(staging_dir)
170 rel_dir = root.relative_to(staging_dir)
171
171
172 for i in range(len(rel_dir.parts)):
172 for i in range(len(rel_dir.parts)):
173 parent = '/'.join(rel_dir.parts[0 : i + 1])
173 parent = '/'.join(rel_dir.parts[0 : i + 1])
174 manifest.setdefault(parent, {})
174 manifest.setdefault(parent, {})
175
175
176 for f in sorted(files):
176 for f in sorted(files):
177 full = root / f
177 full = root / f
178 manifest[str(rel_dir).replace('\\', '/')][full.name] = full
178 manifest[str(rel_dir).replace('\\', '/')][full.name] = full
179
179
180 component_groups = collections.defaultdict(list)
180 component_groups = collections.defaultdict(list)
181
181
182 # Now emit a <Fragment> for each directory.
182 # Now emit a <Fragment> for each directory.
183 # Each directory is composed of a <DirectoryRef> pointing to its parent
183 # Each directory is composed of a <DirectoryRef> pointing to its parent
184 # and defines child <Directory>'s and a <Component> with all the files.
184 # and defines child <Directory>'s and a <Component> with all the files.
185 for dir_name, entries in sorted(manifest.items()):
185 for dir_name, entries in sorted(manifest.items()):
186 # The directory id is derived from the path. But the root directory
186 # The directory id is derived from the path. But the root directory
187 # is special.
187 # is special.
188 if dir_name == '.':
188 if dir_name == '.':
189 parent_directory_id = 'INSTALLDIR'
189 parent_directory_id = 'INSTALLDIR'
190 else:
190 else:
191 parent_directory_id = 'hg.dir.%s' % dir_name.replace('/', '.')
191 parent_directory_id = 'hg.dir.%s' % dir_name.replace('/', '.')
192
192
193 fragment = doc.createElement('Fragment')
193 fragment = doc.createElement('Fragment')
194 directory_ref = doc.createElement('DirectoryRef')
194 directory_ref = doc.createElement('DirectoryRef')
195 directory_ref.setAttribute('Id', parent_directory_id)
195 directory_ref.setAttribute('Id', parent_directory_id)
196
196
197 # Add <Directory> entries for immediate children directories.
197 # Add <Directory> entries for immediate children directories.
198 for possible_child in sorted(manifest.keys()):
198 for possible_child in sorted(manifest.keys()):
199 if (
199 if (
200 dir_name == '.'
200 dir_name == '.'
201 and '/' not in possible_child
201 and '/' not in possible_child
202 and possible_child != '.'
202 and possible_child != '.'
203 ):
203 ):
204 child_directory_id = 'hg.dir.%s' % possible_child
204 child_directory_id = 'hg.dir.%s' % possible_child
205 name = possible_child
205 name = possible_child
206 else:
206 else:
207 if not possible_child.startswith('%s/' % dir_name):
207 if not possible_child.startswith('%s/' % dir_name):
208 continue
208 continue
209 name = possible_child[len(dir_name) + 1 :]
209 name = possible_child[len(dir_name) + 1 :]
210 if '/' in name:
210 if '/' in name:
211 continue
211 continue
212
212
213 child_directory_id = 'hg.dir.%s' % possible_child.replace(
213 child_directory_id = 'hg.dir.%s' % possible_child.replace(
214 '/', '.'
214 '/', '.'
215 )
215 )
216
216
217 directory = doc.createElement('Directory')
217 directory = doc.createElement('Directory')
218 directory.setAttribute('Id', child_directory_id)
218 directory.setAttribute('Id', child_directory_id)
219 directory.setAttribute('Name', name)
219 directory.setAttribute('Name', name)
220 directory_ref.appendChild(directory)
220 directory_ref.appendChild(directory)
221
221
222 # Add <Component>s for files in this directory.
222 # Add <Component>s for files in this directory.
223 for rel, source_path in sorted(entries.items()):
223 for rel, source_path in sorted(entries.items()):
224 if dir_name == '.':
224 if dir_name == '.':
225 full_rel = rel
225 full_rel = rel
226 else:
226 else:
227 full_rel = '%s/%s' % (dir_name, rel)
227 full_rel = '%s/%s' % (dir_name, rel)
228
228
229 component_unique_id = (
229 component_unique_id = (
230 'https://www.mercurial-scm.org/wix-installer/0/component/%s'
230 'https://www.mercurial-scm.org/wix-installer/0/component/%s'
231 % full_rel
231 % full_rel
232 )
232 )
233 component_guid = uuid.uuid5(uuid.NAMESPACE_URL, component_unique_id)
233 component_guid = uuid.uuid5(uuid.NAMESPACE_URL, component_unique_id)
234 component_id = 'hg.component.%s' % str(component_guid).replace(
234 component_id = 'hg.component.%s' % str(component_guid).replace(
235 '-', '_'
235 '-', '_'
236 )
236 )
237
237
238 component = doc.createElement('Component')
238 component = doc.createElement('Component')
239
239
240 component.setAttribute('Id', component_id)
240 component.setAttribute('Id', component_id)
241 component.setAttribute('Guid', str(component_guid).upper())
241 component.setAttribute('Guid', str(component_guid).upper())
242 component.setAttribute('Win64', 'yes' if is_x64 else 'no')
242 component.setAttribute('Win64', 'yes' if is_x64 else 'no')
243
243
244 # Assign this component to a top-level group.
244 # Assign this component to a top-level group.
245 if dir_name == '.':
245 if dir_name == '.':
246 component_groups['ROOT'].append(component_id)
246 component_groups['ROOT'].append(component_id)
247 elif '/' in dir_name:
247 elif '/' in dir_name:
248 component_groups[dir_name[0 : dir_name.index('/')]].append(
248 component_groups[dir_name[0 : dir_name.index('/')]].append(
249 component_id
249 component_id
250 )
250 )
251 else:
251 else:
252 component_groups[dir_name].append(component_id)
252 component_groups[dir_name].append(component_id)
253
253
254 unique_id = (
254 unique_id = (
255 'https://www.mercurial-scm.org/wix-installer/0/%s' % full_rel
255 'https://www.mercurial-scm.org/wix-installer/0/%s' % full_rel
256 )
256 )
257 file_guid = uuid.uuid5(uuid.NAMESPACE_URL, unique_id)
257 file_guid = uuid.uuid5(uuid.NAMESPACE_URL, unique_id)
258
258
259 # IDs have length limits. So use GUID to derive them.
259 # IDs have length limits. So use GUID to derive them.
260 file_guid_normalized = str(file_guid).replace('-', '_')
260 file_guid_normalized = str(file_guid).replace('-', '_')
261 file_id = 'hg.file.%s' % file_guid_normalized
261 file_id = 'hg.file.%s' % file_guid_normalized
262
262
263 file_element = doc.createElement('File')
263 file_element = doc.createElement('File')
264 file_element.setAttribute('Id', file_id)
264 file_element.setAttribute('Id', file_id)
265 file_element.setAttribute('Source', str(source_path))
265 file_element.setAttribute('Source', str(source_path))
266 file_element.setAttribute('KeyPath', 'yes')
266 file_element.setAttribute('KeyPath', 'yes')
267 file_element.setAttribute('ReadOnly', 'yes')
267 file_element.setAttribute('ReadOnly', 'yes')
268
268
269 component.appendChild(file_element)
269 component.appendChild(file_element)
270 directory_ref.appendChild(component)
270 directory_ref.appendChild(component)
271
271
272 fragment.appendChild(directory_ref)
272 fragment.appendChild(directory_ref)
273 doc.documentElement.appendChild(fragment)
273 doc.documentElement.appendChild(fragment)
274
274
275 for group, component_ids in sorted(component_groups.items()):
275 for group, component_ids in sorted(component_groups.items()):
276 fragment = doc.createElement('Fragment')
276 fragment = doc.createElement('Fragment')
277 component_group = doc.createElement('ComponentGroup')
277 component_group = doc.createElement('ComponentGroup')
278 component_group.setAttribute('Id', 'hg.group.%s' % group)
278 component_group.setAttribute('Id', 'hg.group.%s' % group)
279
279
280 for component_id in component_ids:
280 for component_id in component_ids:
281 component_ref = doc.createElement('ComponentRef')
281 component_ref = doc.createElement('ComponentRef')
282 component_ref.setAttribute('Id', component_id)
282 component_ref.setAttribute('Id', component_id)
283 component_group.appendChild(component_ref)
283 component_group.appendChild(component_ref)
284
284
285 fragment.appendChild(component_group)
285 fragment.appendChild(component_group)
286 doc.documentElement.appendChild(fragment)
286 doc.documentElement.appendChild(fragment)
287
287
288 # Add <Shortcut> to files that have it defined.
288 # Add <Shortcut> to files that have it defined.
289 for file_id, metadata in sorted(SHORTCUTS.items()):
289 for file_id, metadata in sorted(SHORTCUTS.items()):
290 els = doc.getElementsByTagName('File')
290 els = doc.getElementsByTagName('File')
291 els = [el for el in els if el.getAttribute('Id') == file_id]
291 els = [el for el in els if el.getAttribute('Id') == file_id]
292
292
293 if not els:
293 if not els:
294 raise Exception('could not find File[Id=%s]' % file_id)
294 raise Exception('could not find File[Id=%s]' % file_id)
295
295
296 for el in els:
296 for el in els:
297 shortcut = doc.createElement('Shortcut')
297 shortcut = doc.createElement('Shortcut')
298 shortcut.setAttribute('Id', 'hg.shortcut.%s' % file_id)
298 shortcut.setAttribute('Id', 'hg.shortcut.%s' % file_id)
299 shortcut.setAttribute('Directory', 'ProgramMenuDir')
299 shortcut.setAttribute('Directory', 'ProgramMenuDir')
300 shortcut.setAttribute('Icon', 'hgIcon.ico')
300 shortcut.setAttribute('Icon', 'hgIcon.ico')
301 shortcut.setAttribute('IconIndex', '0')
301 shortcut.setAttribute('IconIndex', '0')
302 shortcut.setAttribute('Advertise', 'yes')
302 shortcut.setAttribute('Advertise', 'yes')
303 for k, v in sorted(metadata.items()):
303 for k, v in sorted(metadata.items()):
304 shortcut.setAttribute(k, v)
304 shortcut.setAttribute(k, v)
305
305
306 el.appendChild(shortcut)
306 el.appendChild(shortcut)
307
307
308 return doc.toprettyxml()
308 return doc.toprettyxml()
309
309
310
310
311 def build_installer(
311 def build_installer(
312 source_dir: pathlib.Path,
312 source_dir: pathlib.Path,
313 python_exe: pathlib.Path,
313 python_exe: pathlib.Path,
314 msi_name='mercurial',
314 msi_name='mercurial',
315 version=None,
315 version=None,
316 post_build_fn=None,
316 post_build_fn=None,
317 extra_packages_script=None,
317 extra_packages_script=None,
318 extra_wxs: typing.Optional[typing.Dict[str, str]] = None,
318 extra_wxs: typing.Optional[typing.Dict[str, str]] = None,
319 extra_features: typing.Optional[typing.List[str]] = None,
319 extra_features: typing.Optional[typing.List[str]] = None,
320 ):
320 ):
321 """Build a WiX MSI installer.
321 """Build a WiX MSI installer.
322
322
323 ``source_dir`` is the path to the Mercurial source tree to use.
323 ``source_dir`` is the path to the Mercurial source tree to use.
324 ``arch`` is the target architecture. either ``x86`` or ``x64``.
324 ``arch`` is the target architecture. either ``x86`` or ``x64``.
325 ``python_exe`` is the path to the Python executable to use/bundle.
325 ``python_exe`` is the path to the Python executable to use/bundle.
326 ``version`` is the Mercurial version string. If not defined,
326 ``version`` is the Mercurial version string. If not defined,
327 ``mercurial/__version__.py`` will be consulted.
327 ``mercurial/__version__.py`` will be consulted.
328 ``post_build_fn`` is a callable that will be called after building
328 ``post_build_fn`` is a callable that will be called after building
329 Mercurial but before invoking WiX. It can be used to e.g. facilitate
329 Mercurial but before invoking WiX. It can be used to e.g. facilitate
330 signing. It is passed the paths to the Mercurial source, build, and
330 signing. It is passed the paths to the Mercurial source, build, and
331 dist directories and the resolved Mercurial version.
331 dist directories and the resolved Mercurial version.
332 ``extra_packages_script`` is a command to be run to inject extra packages
332 ``extra_packages_script`` is a command to be run to inject extra packages
333 into the py2exe binary. It should stage packages into the virtualenv and
333 into the py2exe binary. It should stage packages into the virtualenv and
334 print a null byte followed by a newline-separated list of packages that
334 print a null byte followed by a newline-separated list of packages that
335 should be included in the exe.
335 should be included in the exe.
336 ``extra_wxs`` is a dict of {wxs_name: working_dir_for_wxs_build}.
336 ``extra_wxs`` is a dict of {wxs_name: working_dir_for_wxs_build}.
337 ``extra_features`` is a list of additional named Features to include in
337 ``extra_features`` is a list of additional named Features to include in
338 the build. These must match Feature names in one of the wxs scripts.
338 the build. These must match Feature names in one of the wxs scripts.
339 """
339 """
340 arch = 'x64' if r'\x64' in os.environ.get('LIB', '') else 'x86'
340 arch = 'x64' if r'\x64' in os.environ.get('LIB', '') else 'x86'
341
341
342 hg_build_dir = source_dir / 'build'
342 hg_build_dir = source_dir / 'build'
343 dist_dir = source_dir / 'dist'
343 dist_dir = source_dir / 'dist'
344 wix_dir = source_dir / 'contrib' / 'packaging' / 'wix'
344 wix_dir = source_dir / 'contrib' / 'packaging' / 'wix'
345
345
346 requirements_txt = 'requirements_win32.txt'
346 requirements_txt = (
347 source_dir / 'contrib' / 'packaging' / 'requirements_win32.txt'
348 )
347
349
348 build_py2exe(
350 build_py2exe(
349 source_dir,
351 source_dir,
350 hg_build_dir,
352 hg_build_dir,
351 python_exe,
353 python_exe,
352 'wix',
354 'wix',
353 requirements_txt,
355 requirements_txt,
354 extra_packages=EXTRA_PACKAGES,
356 extra_packages=EXTRA_PACKAGES,
355 extra_packages_script=extra_packages_script,
357 extra_packages_script=extra_packages_script,
356 )
358 )
357
359
358 orig_version = version or find_version(source_dir)
360 orig_version = version or find_version(source_dir)
359 version = normalize_windows_version(orig_version)
361 version = normalize_windows_version(orig_version)
360 print('using version string: %s' % version)
362 print('using version string: %s' % version)
361 if version != orig_version:
363 if version != orig_version:
362 print('(normalized from: %s)' % orig_version)
364 print('(normalized from: %s)' % orig_version)
363
365
364 if post_build_fn:
366 if post_build_fn:
365 post_build_fn(source_dir, hg_build_dir, dist_dir, version)
367 post_build_fn(source_dir, hg_build_dir, dist_dir, version)
366
368
367 build_dir = hg_build_dir / ('wix-%s' % arch)
369 build_dir = hg_build_dir / ('wix-%s' % arch)
368 staging_dir = build_dir / 'stage'
370 staging_dir = build_dir / 'stage'
369
371
370 build_dir.mkdir(exist_ok=True)
372 build_dir.mkdir(exist_ok=True)
371
373
372 # Purge the staging directory for every build so packaging is pristine.
374 # Purge the staging directory for every build so packaging is pristine.
373 if staging_dir.exists():
375 if staging_dir.exists():
374 print('purging %s' % staging_dir)
376 print('purging %s' % staging_dir)
375 shutil.rmtree(staging_dir)
377 shutil.rmtree(staging_dir)
376
378
377 stage_install(source_dir, staging_dir, lower_case=True)
379 stage_install(source_dir, staging_dir, lower_case=True)
378
380
379 # We also install some extra files.
381 # We also install some extra files.
380 process_install_rules(EXTRA_INSTALL_RULES, source_dir, staging_dir)
382 process_install_rules(EXTRA_INSTALL_RULES, source_dir, staging_dir)
381
383
382 # And remove some files we don't want.
384 # And remove some files we don't want.
383 for f in STAGING_REMOVE_FILES:
385 for f in STAGING_REMOVE_FILES:
384 p = staging_dir / f
386 p = staging_dir / f
385 if p.exists():
387 if p.exists():
386 print('removing %s' % p)
388 print('removing %s' % p)
387 p.unlink()
389 p.unlink()
388
390
389 wix_pkg, wix_entry = download_entry('wix', hg_build_dir)
391 wix_pkg, wix_entry = download_entry('wix', hg_build_dir)
390 wix_path = hg_build_dir / ('wix-%s' % wix_entry['version'])
392 wix_path = hg_build_dir / ('wix-%s' % wix_entry['version'])
391
393
392 if not wix_path.exists():
394 if not wix_path.exists():
393 extract_zip_to_directory(wix_pkg, wix_path)
395 extract_zip_to_directory(wix_pkg, wix_path)
394
396
395 ensure_vc90_merge_modules(hg_build_dir)
397 ensure_vc90_merge_modules(hg_build_dir)
396
398
397 source_build_rel = pathlib.Path(os.path.relpath(source_dir, build_dir))
399 source_build_rel = pathlib.Path(os.path.relpath(source_dir, build_dir))
398
400
399 defines = {'Platform': arch}
401 defines = {'Platform': arch}
400
402
401 # Derive a .wxs file with the staged files.
403 # Derive a .wxs file with the staged files.
402 manifest_wxs = build_dir / 'stage.wxs'
404 manifest_wxs = build_dir / 'stage.wxs'
403 with manifest_wxs.open('w', encoding='utf-8') as fh:
405 with manifest_wxs.open('w', encoding='utf-8') as fh:
404 fh.write(make_files_xml(staging_dir, is_x64=arch == 'x64'))
406 fh.write(make_files_xml(staging_dir, is_x64=arch == 'x64'))
405
407
406 run_candle(wix_path, build_dir, manifest_wxs, staging_dir, defines=defines)
408 run_candle(wix_path, build_dir, manifest_wxs, staging_dir, defines=defines)
407
409
408 for source, rel_path in sorted((extra_wxs or {}).items()):
410 for source, rel_path in sorted((extra_wxs or {}).items()):
409 run_candle(wix_path, build_dir, source, rel_path, defines=defines)
411 run_candle(wix_path, build_dir, source, rel_path, defines=defines)
410
412
411 source = wix_dir / 'mercurial.wxs'
413 source = wix_dir / 'mercurial.wxs'
412 defines['Version'] = version
414 defines['Version'] = version
413 defines['Comments'] = 'Installs Mercurial version %s' % version
415 defines['Comments'] = 'Installs Mercurial version %s' % version
414 defines['VCRedistSrcDir'] = str(hg_build_dir)
416 defines['VCRedistSrcDir'] = str(hg_build_dir)
415 if extra_features:
417 if extra_features:
416 assert all(';' not in f for f in extra_features)
418 assert all(';' not in f for f in extra_features)
417 defines['MercurialExtraFeatures'] = ';'.join(extra_features)
419 defines['MercurialExtraFeatures'] = ';'.join(extra_features)
418
420
419 run_candle(wix_path, build_dir, source, source_build_rel, defines=defines)
421 run_candle(wix_path, build_dir, source, source_build_rel, defines=defines)
420
422
421 msi_path = (
423 msi_path = (
422 source_dir / 'dist' / ('%s-%s-%s.msi' % (msi_name, orig_version, arch))
424 source_dir / 'dist' / ('%s-%s-%s.msi' % (msi_name, orig_version, arch))
423 )
425 )
424
426
425 args = [
427 args = [
426 str(wix_path / 'light.exe'),
428 str(wix_path / 'light.exe'),
427 '-nologo',
429 '-nologo',
428 '-ext',
430 '-ext',
429 'WixUIExtension',
431 'WixUIExtension',
430 '-sw1076',
432 '-sw1076',
431 '-spdb',
433 '-spdb',
432 '-o',
434 '-o',
433 str(msi_path),
435 str(msi_path),
434 ]
436 ]
435
437
436 for source, rel_path in sorted((extra_wxs or {}).items()):
438 for source, rel_path in sorted((extra_wxs or {}).items()):
437 assert source.endswith('.wxs')
439 assert source.endswith('.wxs')
438 source = os.path.basename(source)
440 source = os.path.basename(source)
439 args.append(str(build_dir / ('%s.wixobj' % source[:-4])))
441 args.append(str(build_dir / ('%s.wixobj' % source[:-4])))
440
442
441 args.extend(
443 args.extend(
442 [str(build_dir / 'stage.wixobj'), str(build_dir / 'mercurial.wixobj'),]
444 [str(build_dir / 'stage.wixobj'), str(build_dir / 'mercurial.wixobj'),]
443 )
445 )
444
446
445 subprocess.run(args, cwd=str(source_dir), check=True)
447 subprocess.run(args, cwd=str(source_dir), check=True)
446
448
447 print('%s created' % msi_path)
449 print('%s created' % msi_path)
448
450
449 return {
451 return {
450 'msi_path': msi_path,
452 'msi_path': msi_path,
451 }
453 }
452
454
453
455
454 def build_signed_installer(
456 def build_signed_installer(
455 source_dir: pathlib.Path,
457 source_dir: pathlib.Path,
456 python_exe: pathlib.Path,
458 python_exe: pathlib.Path,
457 name: str,
459 name: str,
458 version=None,
460 version=None,
459 subject_name=None,
461 subject_name=None,
460 cert_path=None,
462 cert_path=None,
461 cert_password=None,
463 cert_password=None,
462 timestamp_url=None,
464 timestamp_url=None,
463 extra_packages_script=None,
465 extra_packages_script=None,
464 extra_wxs=None,
466 extra_wxs=None,
465 extra_features=None,
467 extra_features=None,
466 ):
468 ):
467 """Build an installer with signed executables."""
469 """Build an installer with signed executables."""
468
470
469 post_build_fn = make_post_build_signing_fn(
471 post_build_fn = make_post_build_signing_fn(
470 name,
472 name,
471 subject_name=subject_name,
473 subject_name=subject_name,
472 cert_path=cert_path,
474 cert_path=cert_path,
473 cert_password=cert_password,
475 cert_password=cert_password,
474 timestamp_url=timestamp_url,
476 timestamp_url=timestamp_url,
475 )
477 )
476
478
477 info = build_installer(
479 info = build_installer(
478 source_dir,
480 source_dir,
479 python_exe=python_exe,
481 python_exe=python_exe,
480 msi_name=name.lower(),
482 msi_name=name.lower(),
481 version=version,
483 version=version,
482 post_build_fn=post_build_fn,
484 post_build_fn=post_build_fn,
483 extra_packages_script=extra_packages_script,
485 extra_packages_script=extra_packages_script,
484 extra_wxs=extra_wxs,
486 extra_wxs=extra_wxs,
485 extra_features=extra_features,
487 extra_features=extra_features,
486 )
488 )
487
489
488 description = '%s %s' % (name, version)
490 description = '%s %s' % (name, version)
489
491
490 sign_with_signtool(
492 sign_with_signtool(
491 info['msi_path'],
493 info['msi_path'],
492 description,
494 description,
493 subject_name=subject_name,
495 subject_name=subject_name,
494 cert_path=cert_path,
496 cert_path=cert_path,
495 cert_password=cert_password,
497 cert_password=cert_password,
496 timestamp_url=timestamp_url,
498 timestamp_url=timestamp_url,
497 )
499 )
@@ -1,241 +1,243 b''
1 # zeroconf.py - zeroconf support for Mercurial
1 # zeroconf.py - zeroconf support for Mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 '''discover and advertise repositories on the local network
7 '''discover and advertise repositories on the local network
8
8
9 The zeroconf extension will advertise :hg:`serve` instances over
9 The zeroconf extension will advertise :hg:`serve` instances over
10 DNS-SD so that they can be discovered using the :hg:`paths` command
10 DNS-SD so that they can be discovered using the :hg:`paths` command
11 without knowing the server's IP address.
11 without knowing the server's IP address.
12
12
13 To allow other people to discover your repository using run
13 To allow other people to discover your repository using run
14 :hg:`serve` in your repository::
14 :hg:`serve` in your repository::
15
15
16 $ cd test
16 $ cd test
17 $ hg serve
17 $ hg serve
18
18
19 You can discover Zeroconf-enabled repositories by running
19 You can discover Zeroconf-enabled repositories by running
20 :hg:`paths`::
20 :hg:`paths`::
21
21
22 $ hg paths
22 $ hg paths
23 zc-test = http://example.com:8000/test
23 zc-test = http://example.com:8000/test
24 '''
24 '''
25 from __future__ import absolute_import
25 from __future__ import absolute_import
26
26
27 import os
27 import os
28 import socket
28 import socket
29 import time
29 import time
30
30
31 from . import Zeroconf
31 from . import Zeroconf
32 from mercurial import (
32 from mercurial import (
33 dispatch,
33 dispatch,
34 encoding,
34 encoding,
35 extensions,
35 extensions,
36 hg,
36 hg,
37 pycompat,
37 pycompat,
38 rcutil,
38 ui as uimod,
39 ui as uimod,
39 )
40 )
40 from mercurial.hgweb import server as servermod
41 from mercurial.hgweb import server as servermod
41
42
42 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
43 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
43 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
44 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
44 # be specifying the version(s) of Mercurial they are tested with, or
45 # be specifying the version(s) of Mercurial they are tested with, or
45 # leave the attribute unspecified.
46 # leave the attribute unspecified.
46 testedwith = b'ships-with-hg-core'
47 testedwith = b'ships-with-hg-core'
47
48
48 # publish
49 # publish
49
50
50 server = None
51 server = None
51 localip = None
52 localip = None
52
53
53
54
54 def getip():
55 def getip():
55 # finds external-facing interface without sending any packets (Linux)
56 # finds external-facing interface without sending any packets (Linux)
56 try:
57 try:
57 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
58 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
58 s.connect(('1.0.0.1', 0))
59 s.connect(('1.0.0.1', 0))
59 ip = s.getsockname()[0]
60 ip = s.getsockname()[0]
60 return ip
61 return ip
61 except socket.error:
62 except socket.error:
62 pass
63 pass
63
64
64 # Generic method, sometimes gives useless results
65 # Generic method, sometimes gives useless results
65 try:
66 try:
66 dumbip = socket.gethostbyaddr(socket.gethostname())[2][0]
67 dumbip = socket.gethostbyaddr(socket.gethostname())[2][0]
67 if ':' in dumbip:
68 if ':' in dumbip:
68 dumbip = '127.0.0.1'
69 dumbip = '127.0.0.1'
69 if not dumbip.startswith('127.'):
70 if not dumbip.startswith('127.'):
70 return dumbip
71 return dumbip
71 except (socket.gaierror, socket.herror):
72 except (socket.gaierror, socket.herror):
72 dumbip = '127.0.0.1'
73 dumbip = '127.0.0.1'
73
74
74 # works elsewhere, but actually sends a packet
75 # works elsewhere, but actually sends a packet
75 try:
76 try:
76 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
77 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
77 s.connect(('1.0.0.1', 1))
78 s.connect(('1.0.0.1', 1))
78 ip = s.getsockname()[0]
79 ip = s.getsockname()[0]
79 return ip
80 return ip
80 except socket.error:
81 except socket.error:
81 pass
82 pass
82
83
83 return dumbip
84 return dumbip
84
85
85
86
86 def publish(name, desc, path, port):
87 def publish(name, desc, path, port):
87 global server, localip
88 global server, localip
88 if not server:
89 if not server:
89 ip = getip()
90 ip = getip()
90 if ip.startswith('127.'):
91 if ip.startswith('127.'):
91 # if we have no internet connection, this can happen.
92 # if we have no internet connection, this can happen.
92 return
93 return
93 localip = socket.inet_aton(ip)
94 localip = socket.inet_aton(ip)
94 server = Zeroconf.Zeroconf(ip)
95 server = Zeroconf.Zeroconf(ip)
95
96
96 hostname = socket.gethostname().split('.')[0]
97 hostname = socket.gethostname().split('.')[0]
97 host = hostname + ".local"
98 host = hostname + ".local"
98 name = "%s-%s" % (hostname, name)
99 name = "%s-%s" % (hostname, name)
99
100
100 # advertise to browsers
101 # advertise to browsers
101 svc = Zeroconf.ServiceInfo(
102 svc = Zeroconf.ServiceInfo(
102 b'_http._tcp.local.',
103 b'_http._tcp.local.',
103 pycompat.bytestr(name + '._http._tcp.local.'),
104 pycompat.bytestr(name + '._http._tcp.local.'),
104 server=host,
105 server=host,
105 port=port,
106 port=port,
106 properties={b'description': desc, b'path': b"/" + path},
107 properties={b'description': desc, b'path': b"/" + path},
107 address=localip,
108 address=localip,
108 weight=0,
109 weight=0,
109 priority=0,
110 priority=0,
110 )
111 )
111 server.registerService(svc)
112 server.registerService(svc)
112
113
113 # advertise to Mercurial clients
114 # advertise to Mercurial clients
114 svc = Zeroconf.ServiceInfo(
115 svc = Zeroconf.ServiceInfo(
115 b'_hg._tcp.local.',
116 b'_hg._tcp.local.',
116 pycompat.bytestr(name + '._hg._tcp.local.'),
117 pycompat.bytestr(name + '._hg._tcp.local.'),
117 server=host,
118 server=host,
118 port=port,
119 port=port,
119 properties={b'description': desc, b'path': b"/" + path},
120 properties={b'description': desc, b'path': b"/" + path},
120 address=localip,
121 address=localip,
121 weight=0,
122 weight=0,
122 priority=0,
123 priority=0,
123 )
124 )
124 server.registerService(svc)
125 server.registerService(svc)
125
126
126
127
127 def zc_create_server(create_server, ui, app):
128 def zc_create_server(create_server, ui, app):
128 httpd = create_server(ui, app)
129 httpd = create_server(ui, app)
129 port = httpd.port
130 port = httpd.port
130
131
131 try:
132 try:
132 repos = app.repos
133 repos = app.repos
133 except AttributeError:
134 except AttributeError:
134 # single repo
135 # single repo
135 with app._obtainrepo() as repo:
136 with app._obtainrepo() as repo:
136 name = app.reponame or os.path.basename(repo.root)
137 name = app.reponame or os.path.basename(repo.root)
137 path = repo.ui.config(b"web", b"prefix", b"").strip(b'/')
138 path = repo.ui.config(b"web", b"prefix", b"").strip(b'/')
138 desc = repo.ui.config(b"web", b"description")
139 desc = repo.ui.config(b"web", b"description")
139 if not desc:
140 if not desc:
140 desc = name
141 desc = name
141 publish(name, desc, path, port)
142 publish(name, desc, path, port)
142 else:
143 else:
143 # webdir
144 # webdir
144 prefix = app.ui.config(b"web", b"prefix", b"").strip(b'/') + b'/'
145 prefix = app.ui.config(b"web", b"prefix", b"").strip(b'/') + b'/'
145 for repo, path in repos:
146 for repo, path in repos:
146 u = app.ui.copy()
147 u = app.ui.copy()
148 if rcutil.use_repo_hgrc():
147 u.readconfig(os.path.join(path, b'.hg', b'hgrc'))
149 u.readconfig(os.path.join(path, b'.hg', b'hgrc'))
148 name = os.path.basename(repo)
150 name = os.path.basename(repo)
149 path = (prefix + repo).strip(b'/')
151 path = (prefix + repo).strip(b'/')
150 desc = u.config(b'web', b'description')
152 desc = u.config(b'web', b'description')
151 if not desc:
153 if not desc:
152 desc = name
154 desc = name
153 publish(name, desc, path, port)
155 publish(name, desc, path, port)
154 return httpd
156 return httpd
155
157
156
158
157 # listen
159 # listen
158
160
159
161
160 class listener(object):
162 class listener(object):
161 def __init__(self):
163 def __init__(self):
162 self.found = {}
164 self.found = {}
163
165
164 def removeService(self, server, type, name):
166 def removeService(self, server, type, name):
165 if repr(name) in self.found:
167 if repr(name) in self.found:
166 del self.found[repr(name)]
168 del self.found[repr(name)]
167
169
168 def addService(self, server, type, name):
170 def addService(self, server, type, name):
169 self.found[repr(name)] = server.getServiceInfo(type, name)
171 self.found[repr(name)] = server.getServiceInfo(type, name)
170
172
171
173
172 def getzcpaths():
174 def getzcpaths():
173 ip = getip()
175 ip = getip()
174 if ip.startswith('127.'):
176 if ip.startswith('127.'):
175 return
177 return
176 server = Zeroconf.Zeroconf(ip)
178 server = Zeroconf.Zeroconf(ip)
177 l = listener()
179 l = listener()
178 Zeroconf.ServiceBrowser(server, b"_hg._tcp.local.", l)
180 Zeroconf.ServiceBrowser(server, b"_hg._tcp.local.", l)
179 time.sleep(1)
181 time.sleep(1)
180 server.close()
182 server.close()
181 for value in l.found.values():
183 for value in l.found.values():
182 name = value.name[: value.name.index(b'.')]
184 name = value.name[: value.name.index(b'.')]
183 url = "http://%s:%s%s" % (
185 url = "http://%s:%s%s" % (
184 socket.inet_ntoa(value.address),
186 socket.inet_ntoa(value.address),
185 value.port,
187 value.port,
186 value.properties.get("path", "/"),
188 value.properties.get("path", "/"),
187 )
189 )
188 yield b"zc-" + name, pycompat.bytestr(url)
190 yield b"zc-" + name, pycompat.bytestr(url)
189
191
190
192
191 def config(orig, self, section, key, *args, **kwargs):
193 def config(orig, self, section, key, *args, **kwargs):
192 if section == b"paths" and key.startswith(b"zc-"):
194 if section == b"paths" and key.startswith(b"zc-"):
193 for name, path in getzcpaths():
195 for name, path in getzcpaths():
194 if name == key:
196 if name == key:
195 return path
197 return path
196 return orig(self, section, key, *args, **kwargs)
198 return orig(self, section, key, *args, **kwargs)
197
199
198
200
199 def configitems(orig, self, section, *args, **kwargs):
201 def configitems(orig, self, section, *args, **kwargs):
200 repos = orig(self, section, *args, **kwargs)
202 repos = orig(self, section, *args, **kwargs)
201 if section == b"paths":
203 if section == b"paths":
202 repos += getzcpaths()
204 repos += getzcpaths()
203 return repos
205 return repos
204
206
205
207
206 def configsuboptions(orig, self, section, name, *args, **kwargs):
208 def configsuboptions(orig, self, section, name, *args, **kwargs):
207 opt, sub = orig(self, section, name, *args, **kwargs)
209 opt, sub = orig(self, section, name, *args, **kwargs)
208 if section == b"paths" and name.startswith(b"zc-"):
210 if section == b"paths" and name.startswith(b"zc-"):
209 # We have to find the URL in the zeroconf paths. We can't cons up any
211 # We have to find the URL in the zeroconf paths. We can't cons up any
210 # suboptions, so we use any that we found in the original config.
212 # suboptions, so we use any that we found in the original config.
211 for zcname, zcurl in getzcpaths():
213 for zcname, zcurl in getzcpaths():
212 if zcname == name:
214 if zcname == name:
213 return zcurl, sub
215 return zcurl, sub
214 return opt, sub
216 return opt, sub
215
217
216
218
217 def defaultdest(orig, source):
219 def defaultdest(orig, source):
218 for name, path in getzcpaths():
220 for name, path in getzcpaths():
219 if path == source:
221 if path == source:
220 return name.encode(encoding.encoding)
222 return name.encode(encoding.encoding)
221 return orig(source)
223 return orig(source)
222
224
223
225
224 def cleanupafterdispatch(orig, ui, options, cmd, cmdfunc):
226 def cleanupafterdispatch(orig, ui, options, cmd, cmdfunc):
225 try:
227 try:
226 return orig(ui, options, cmd, cmdfunc)
228 return orig(ui, options, cmd, cmdfunc)
227 finally:
229 finally:
228 # we need to call close() on the server to notify() the various
230 # we need to call close() on the server to notify() the various
229 # threading Conditions and allow the background threads to exit
231 # threading Conditions and allow the background threads to exit
230 global server
232 global server
231 if server:
233 if server:
232 server.close()
234 server.close()
233
235
234
236
235 extensions.wrapfunction(dispatch, b'_runcommand', cleanupafterdispatch)
237 extensions.wrapfunction(dispatch, b'_runcommand', cleanupafterdispatch)
236
238
237 extensions.wrapfunction(uimod.ui, b'config', config)
239 extensions.wrapfunction(uimod.ui, b'config', config)
238 extensions.wrapfunction(uimod.ui, b'configitems', configitems)
240 extensions.wrapfunction(uimod.ui, b'configitems', configitems)
239 extensions.wrapfunction(uimod.ui, b'configsuboptions', configsuboptions)
241 extensions.wrapfunction(uimod.ui, b'configsuboptions', configsuboptions)
240 extensions.wrapfunction(hg, b'defaultdest', defaultdest)
242 extensions.wrapfunction(hg, b'defaultdest', defaultdest)
241 extensions.wrapfunction(servermod, b'create_server', zc_create_server)
243 extensions.wrapfunction(servermod, b'create_server', zc_create_server)
@@ -1,2578 +1,2578 b''
1 # bundle2.py - generic container format to transmit arbitrary data.
1 # bundle2.py - generic container format to transmit arbitrary data.
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """Handling of the new bundle2 format
7 """Handling of the new bundle2 format
8
8
9 The goal of bundle2 is to act as an atomically packet to transmit a set of
9 The goal of bundle2 is to act as an atomically packet to transmit a set of
10 payloads in an application agnostic way. It consist in a sequence of "parts"
10 payloads in an application agnostic way. It consist in a sequence of "parts"
11 that will be handed to and processed by the application layer.
11 that will be handed to and processed by the application layer.
12
12
13
13
14 General format architecture
14 General format architecture
15 ===========================
15 ===========================
16
16
17 The format is architectured as follow
17 The format is architectured as follow
18
18
19 - magic string
19 - magic string
20 - stream level parameters
20 - stream level parameters
21 - payload parts (any number)
21 - payload parts (any number)
22 - end of stream marker.
22 - end of stream marker.
23
23
24 the Binary format
24 the Binary format
25 ============================
25 ============================
26
26
27 All numbers are unsigned and big-endian.
27 All numbers are unsigned and big-endian.
28
28
29 stream level parameters
29 stream level parameters
30 ------------------------
30 ------------------------
31
31
32 Binary format is as follow
32 Binary format is as follow
33
33
34 :params size: int32
34 :params size: int32
35
35
36 The total number of Bytes used by the parameters
36 The total number of Bytes used by the parameters
37
37
38 :params value: arbitrary number of Bytes
38 :params value: arbitrary number of Bytes
39
39
40 A blob of `params size` containing the serialized version of all stream level
40 A blob of `params size` containing the serialized version of all stream level
41 parameters.
41 parameters.
42
42
43 The blob contains a space separated list of parameters. Parameters with value
43 The blob contains a space separated list of parameters. Parameters with value
44 are stored in the form `<name>=<value>`. Both name and value are urlquoted.
44 are stored in the form `<name>=<value>`. Both name and value are urlquoted.
45
45
46 Empty name are obviously forbidden.
46 Empty name are obviously forbidden.
47
47
48 Name MUST start with a letter. If this first letter is lower case, the
48 Name MUST start with a letter. If this first letter is lower case, the
49 parameter is advisory and can be safely ignored. However when the first
49 parameter is advisory and can be safely ignored. However when the first
50 letter is capital, the parameter is mandatory and the bundling process MUST
50 letter is capital, the parameter is mandatory and the bundling process MUST
51 stop if he is not able to proceed it.
51 stop if he is not able to proceed it.
52
52
53 Stream parameters use a simple textual format for two main reasons:
53 Stream parameters use a simple textual format for two main reasons:
54
54
55 - Stream level parameters should remain simple and we want to discourage any
55 - Stream level parameters should remain simple and we want to discourage any
56 crazy usage.
56 crazy usage.
57 - Textual data allow easy human inspection of a bundle2 header in case of
57 - Textual data allow easy human inspection of a bundle2 header in case of
58 troubles.
58 troubles.
59
59
60 Any Applicative level options MUST go into a bundle2 part instead.
60 Any Applicative level options MUST go into a bundle2 part instead.
61
61
62 Payload part
62 Payload part
63 ------------------------
63 ------------------------
64
64
65 Binary format is as follow
65 Binary format is as follow
66
66
67 :header size: int32
67 :header size: int32
68
68
69 The total number of Bytes used by the part header. When the header is empty
69 The total number of Bytes used by the part header. When the header is empty
70 (size = 0) this is interpreted as the end of stream marker.
70 (size = 0) this is interpreted as the end of stream marker.
71
71
72 :header:
72 :header:
73
73
74 The header defines how to interpret the part. It contains two piece of
74 The header defines how to interpret the part. It contains two piece of
75 data: the part type, and the part parameters.
75 data: the part type, and the part parameters.
76
76
77 The part type is used to route an application level handler, that can
77 The part type is used to route an application level handler, that can
78 interpret payload.
78 interpret payload.
79
79
80 Part parameters are passed to the application level handler. They are
80 Part parameters are passed to the application level handler. They are
81 meant to convey information that will help the application level object to
81 meant to convey information that will help the application level object to
82 interpret the part payload.
82 interpret the part payload.
83
83
84 The binary format of the header is has follow
84 The binary format of the header is has follow
85
85
86 :typesize: (one byte)
86 :typesize: (one byte)
87
87
88 :parttype: alphanumerical part name (restricted to [a-zA-Z0-9_:-]*)
88 :parttype: alphanumerical part name (restricted to [a-zA-Z0-9_:-]*)
89
89
90 :partid: A 32bits integer (unique in the bundle) that can be used to refer
90 :partid: A 32bits integer (unique in the bundle) that can be used to refer
91 to this part.
91 to this part.
92
92
93 :parameters:
93 :parameters:
94
94
95 Part's parameter may have arbitrary content, the binary structure is::
95 Part's parameter may have arbitrary content, the binary structure is::
96
96
97 <mandatory-count><advisory-count><param-sizes><param-data>
97 <mandatory-count><advisory-count><param-sizes><param-data>
98
98
99 :mandatory-count: 1 byte, number of mandatory parameters
99 :mandatory-count: 1 byte, number of mandatory parameters
100
100
101 :advisory-count: 1 byte, number of advisory parameters
101 :advisory-count: 1 byte, number of advisory parameters
102
102
103 :param-sizes:
103 :param-sizes:
104
104
105 N couple of bytes, where N is the total number of parameters. Each
105 N couple of bytes, where N is the total number of parameters. Each
106 couple contains (<size-of-key>, <size-of-value) for one parameter.
106 couple contains (<size-of-key>, <size-of-value) for one parameter.
107
107
108 :param-data:
108 :param-data:
109
109
110 A blob of bytes from which each parameter key and value can be
110 A blob of bytes from which each parameter key and value can be
111 retrieved using the list of size couples stored in the previous
111 retrieved using the list of size couples stored in the previous
112 field.
112 field.
113
113
114 Mandatory parameters comes first, then the advisory ones.
114 Mandatory parameters comes first, then the advisory ones.
115
115
116 Each parameter's key MUST be unique within the part.
116 Each parameter's key MUST be unique within the part.
117
117
118 :payload:
118 :payload:
119
119
120 payload is a series of `<chunksize><chunkdata>`.
120 payload is a series of `<chunksize><chunkdata>`.
121
121
122 `chunksize` is an int32, `chunkdata` are plain bytes (as much as
122 `chunksize` is an int32, `chunkdata` are plain bytes (as much as
123 `chunksize` says)` The payload part is concluded by a zero size chunk.
123 `chunksize` says)` The payload part is concluded by a zero size chunk.
124
124
125 The current implementation always produces either zero or one chunk.
125 The current implementation always produces either zero or one chunk.
126 This is an implementation limitation that will ultimately be lifted.
126 This is an implementation limitation that will ultimately be lifted.
127
127
128 `chunksize` can be negative to trigger special case processing. No such
128 `chunksize` can be negative to trigger special case processing. No such
129 processing is in place yet.
129 processing is in place yet.
130
130
131 Bundle processing
131 Bundle processing
132 ============================
132 ============================
133
133
134 Each part is processed in order using a "part handler". Handler are registered
134 Each part is processed in order using a "part handler". Handler are registered
135 for a certain part type.
135 for a certain part type.
136
136
137 The matching of a part to its handler is case insensitive. The case of the
137 The matching of a part to its handler is case insensitive. The case of the
138 part type is used to know if a part is mandatory or advisory. If the Part type
138 part type is used to know if a part is mandatory or advisory. If the Part type
139 contains any uppercase char it is considered mandatory. When no handler is
139 contains any uppercase char it is considered mandatory. When no handler is
140 known for a Mandatory part, the process is aborted and an exception is raised.
140 known for a Mandatory part, the process is aborted and an exception is raised.
141 If the part is advisory and no handler is known, the part is ignored. When the
141 If the part is advisory and no handler is known, the part is ignored. When the
142 process is aborted, the full bundle is still read from the stream to keep the
142 process is aborted, the full bundle is still read from the stream to keep the
143 channel usable. But none of the part read from an abort are processed. In the
143 channel usable. But none of the part read from an abort are processed. In the
144 future, dropping the stream may become an option for channel we do not care to
144 future, dropping the stream may become an option for channel we do not care to
145 preserve.
145 preserve.
146 """
146 """
147
147
148 from __future__ import absolute_import, division
148 from __future__ import absolute_import, division
149
149
150 import collections
150 import collections
151 import errno
151 import errno
152 import os
152 import os
153 import re
153 import re
154 import string
154 import string
155 import struct
155 import struct
156 import sys
156 import sys
157
157
158 from .i18n import _
158 from .i18n import _
159 from . import (
159 from . import (
160 bookmarks,
160 bookmarks,
161 changegroup,
161 changegroup,
162 encoding,
162 encoding,
163 error,
163 error,
164 node as nodemod,
164 node as nodemod,
165 obsolete,
165 obsolete,
166 phases,
166 phases,
167 pushkey,
167 pushkey,
168 pycompat,
168 pycompat,
169 streamclone,
169 streamclone,
170 tags,
170 tags,
171 url,
171 url,
172 util,
172 util,
173 )
173 )
174 from .utils import stringutil
174 from .utils import stringutil
175
175
176 urlerr = util.urlerr
176 urlerr = util.urlerr
177 urlreq = util.urlreq
177 urlreq = util.urlreq
178
178
179 _pack = struct.pack
179 _pack = struct.pack
180 _unpack = struct.unpack
180 _unpack = struct.unpack
181
181
182 _fstreamparamsize = b'>i'
182 _fstreamparamsize = b'>i'
183 _fpartheadersize = b'>i'
183 _fpartheadersize = b'>i'
184 _fparttypesize = b'>B'
184 _fparttypesize = b'>B'
185 _fpartid = b'>I'
185 _fpartid = b'>I'
186 _fpayloadsize = b'>i'
186 _fpayloadsize = b'>i'
187 _fpartparamcount = b'>BB'
187 _fpartparamcount = b'>BB'
188
188
189 preferedchunksize = 32768
189 preferedchunksize = 32768
190
190
191 _parttypeforbidden = re.compile(b'[^a-zA-Z0-9_:-]')
191 _parttypeforbidden = re.compile(b'[^a-zA-Z0-9_:-]')
192
192
193
193
194 def outdebug(ui, message):
194 def outdebug(ui, message):
195 """debug regarding output stream (bundling)"""
195 """debug regarding output stream (bundling)"""
196 if ui.configbool(b'devel', b'bundle2.debug'):
196 if ui.configbool(b'devel', b'bundle2.debug'):
197 ui.debug(b'bundle2-output: %s\n' % message)
197 ui.debug(b'bundle2-output: %s\n' % message)
198
198
199
199
200 def indebug(ui, message):
200 def indebug(ui, message):
201 """debug on input stream (unbundling)"""
201 """debug on input stream (unbundling)"""
202 if ui.configbool(b'devel', b'bundle2.debug'):
202 if ui.configbool(b'devel', b'bundle2.debug'):
203 ui.debug(b'bundle2-input: %s\n' % message)
203 ui.debug(b'bundle2-input: %s\n' % message)
204
204
205
205
206 def validateparttype(parttype):
206 def validateparttype(parttype):
207 """raise ValueError if a parttype contains invalid character"""
207 """raise ValueError if a parttype contains invalid character"""
208 if _parttypeforbidden.search(parttype):
208 if _parttypeforbidden.search(parttype):
209 raise ValueError(parttype)
209 raise ValueError(parttype)
210
210
211
211
212 def _makefpartparamsizes(nbparams):
212 def _makefpartparamsizes(nbparams):
213 """return a struct format to read part parameter sizes
213 """return a struct format to read part parameter sizes
214
214
215 The number parameters is variable so we need to build that format
215 The number parameters is variable so we need to build that format
216 dynamically.
216 dynamically.
217 """
217 """
218 return b'>' + (b'BB' * nbparams)
218 return b'>' + (b'BB' * nbparams)
219
219
220
220
221 parthandlermapping = {}
221 parthandlermapping = {}
222
222
223
223
224 def parthandler(parttype, params=()):
224 def parthandler(parttype, params=()):
225 """decorator that register a function as a bundle2 part handler
225 """decorator that register a function as a bundle2 part handler
226
226
227 eg::
227 eg::
228
228
229 @parthandler('myparttype', ('mandatory', 'param', 'handled'))
229 @parthandler('myparttype', ('mandatory', 'param', 'handled'))
230 def myparttypehandler(...):
230 def myparttypehandler(...):
231 '''process a part of type "my part".'''
231 '''process a part of type "my part".'''
232 ...
232 ...
233 """
233 """
234 validateparttype(parttype)
234 validateparttype(parttype)
235
235
236 def _decorator(func):
236 def _decorator(func):
237 lparttype = parttype.lower() # enforce lower case matching.
237 lparttype = parttype.lower() # enforce lower case matching.
238 assert lparttype not in parthandlermapping
238 assert lparttype not in parthandlermapping
239 parthandlermapping[lparttype] = func
239 parthandlermapping[lparttype] = func
240 func.params = frozenset(params)
240 func.params = frozenset(params)
241 return func
241 return func
242
242
243 return _decorator
243 return _decorator
244
244
245
245
246 class unbundlerecords(object):
246 class unbundlerecords(object):
247 """keep record of what happens during and unbundle
247 """keep record of what happens during and unbundle
248
248
249 New records are added using `records.add('cat', obj)`. Where 'cat' is a
249 New records are added using `records.add('cat', obj)`. Where 'cat' is a
250 category of record and obj is an arbitrary object.
250 category of record and obj is an arbitrary object.
251
251
252 `records['cat']` will return all entries of this category 'cat'.
252 `records['cat']` will return all entries of this category 'cat'.
253
253
254 Iterating on the object itself will yield `('category', obj)` tuples
254 Iterating on the object itself will yield `('category', obj)` tuples
255 for all entries.
255 for all entries.
256
256
257 All iterations happens in chronological order.
257 All iterations happens in chronological order.
258 """
258 """
259
259
260 def __init__(self):
260 def __init__(self):
261 self._categories = {}
261 self._categories = {}
262 self._sequences = []
262 self._sequences = []
263 self._replies = {}
263 self._replies = {}
264
264
265 def add(self, category, entry, inreplyto=None):
265 def add(self, category, entry, inreplyto=None):
266 """add a new record of a given category.
266 """add a new record of a given category.
267
267
268 The entry can then be retrieved in the list returned by
268 The entry can then be retrieved in the list returned by
269 self['category']."""
269 self['category']."""
270 self._categories.setdefault(category, []).append(entry)
270 self._categories.setdefault(category, []).append(entry)
271 self._sequences.append((category, entry))
271 self._sequences.append((category, entry))
272 if inreplyto is not None:
272 if inreplyto is not None:
273 self.getreplies(inreplyto).add(category, entry)
273 self.getreplies(inreplyto).add(category, entry)
274
274
275 def getreplies(self, partid):
275 def getreplies(self, partid):
276 """get the records that are replies to a specific part"""
276 """get the records that are replies to a specific part"""
277 return self._replies.setdefault(partid, unbundlerecords())
277 return self._replies.setdefault(partid, unbundlerecords())
278
278
279 def __getitem__(self, cat):
279 def __getitem__(self, cat):
280 return tuple(self._categories.get(cat, ()))
280 return tuple(self._categories.get(cat, ()))
281
281
282 def __iter__(self):
282 def __iter__(self):
283 return iter(self._sequences)
283 return iter(self._sequences)
284
284
285 def __len__(self):
285 def __len__(self):
286 return len(self._sequences)
286 return len(self._sequences)
287
287
288 def __nonzero__(self):
288 def __nonzero__(self):
289 return bool(self._sequences)
289 return bool(self._sequences)
290
290
291 __bool__ = __nonzero__
291 __bool__ = __nonzero__
292
292
293
293
294 class bundleoperation(object):
294 class bundleoperation(object):
295 """an object that represents a single bundling process
295 """an object that represents a single bundling process
296
296
297 Its purpose is to carry unbundle-related objects and states.
297 Its purpose is to carry unbundle-related objects and states.
298
298
299 A new object should be created at the beginning of each bundle processing.
299 A new object should be created at the beginning of each bundle processing.
300 The object is to be returned by the processing function.
300 The object is to be returned by the processing function.
301
301
302 The object has very little content now it will ultimately contain:
302 The object has very little content now it will ultimately contain:
303 * an access to the repo the bundle is applied to,
303 * an access to the repo the bundle is applied to,
304 * a ui object,
304 * a ui object,
305 * a way to retrieve a transaction to add changes to the repo,
305 * a way to retrieve a transaction to add changes to the repo,
306 * a way to record the result of processing each part,
306 * a way to record the result of processing each part,
307 * a way to construct a bundle response when applicable.
307 * a way to construct a bundle response when applicable.
308 """
308 """
309
309
310 def __init__(self, repo, transactiongetter, captureoutput=True, source=b''):
310 def __init__(self, repo, transactiongetter, captureoutput=True, source=b''):
311 self.repo = repo
311 self.repo = repo
312 self.ui = repo.ui
312 self.ui = repo.ui
313 self.records = unbundlerecords()
313 self.records = unbundlerecords()
314 self.reply = None
314 self.reply = None
315 self.captureoutput = captureoutput
315 self.captureoutput = captureoutput
316 self.hookargs = {}
316 self.hookargs = {}
317 self._gettransaction = transactiongetter
317 self._gettransaction = transactiongetter
318 # carries value that can modify part behavior
318 # carries value that can modify part behavior
319 self.modes = {}
319 self.modes = {}
320 self.source = source
320 self.source = source
321
321
322 def gettransaction(self):
322 def gettransaction(self):
323 transaction = self._gettransaction()
323 transaction = self._gettransaction()
324
324
325 if self.hookargs:
325 if self.hookargs:
326 # the ones added to the transaction supercede those added
326 # the ones added to the transaction supercede those added
327 # to the operation.
327 # to the operation.
328 self.hookargs.update(transaction.hookargs)
328 self.hookargs.update(transaction.hookargs)
329 transaction.hookargs = self.hookargs
329 transaction.hookargs = self.hookargs
330
330
331 # mark the hookargs as flushed. further attempts to add to
331 # mark the hookargs as flushed. further attempts to add to
332 # hookargs will result in an abort.
332 # hookargs will result in an abort.
333 self.hookargs = None
333 self.hookargs = None
334
334
335 return transaction
335 return transaction
336
336
337 def addhookargs(self, hookargs):
337 def addhookargs(self, hookargs):
338 if self.hookargs is None:
338 if self.hookargs is None:
339 raise error.ProgrammingError(
339 raise error.ProgrammingError(
340 b'attempted to add hookargs to '
340 b'attempted to add hookargs to '
341 b'operation after transaction started'
341 b'operation after transaction started'
342 )
342 )
343 self.hookargs.update(hookargs)
343 self.hookargs.update(hookargs)
344
344
345
345
346 class TransactionUnavailable(RuntimeError):
346 class TransactionUnavailable(RuntimeError):
347 pass
347 pass
348
348
349
349
350 def _notransaction():
350 def _notransaction():
351 """default method to get a transaction while processing a bundle
351 """default method to get a transaction while processing a bundle
352
352
353 Raise an exception to highlight the fact that no transaction was expected
353 Raise an exception to highlight the fact that no transaction was expected
354 to be created"""
354 to be created"""
355 raise TransactionUnavailable()
355 raise TransactionUnavailable()
356
356
357
357
358 def applybundle(repo, unbundler, tr, source, url=None, **kwargs):
358 def applybundle(repo, unbundler, tr, source, url=None, **kwargs):
359 # transform me into unbundler.apply() as soon as the freeze is lifted
359 # transform me into unbundler.apply() as soon as the freeze is lifted
360 if isinstance(unbundler, unbundle20):
360 if isinstance(unbundler, unbundle20):
361 tr.hookargs[b'bundle2'] = b'1'
361 tr.hookargs[b'bundle2'] = b'1'
362 if source is not None and b'source' not in tr.hookargs:
362 if source is not None and b'source' not in tr.hookargs:
363 tr.hookargs[b'source'] = source
363 tr.hookargs[b'source'] = source
364 if url is not None and b'url' not in tr.hookargs:
364 if url is not None and b'url' not in tr.hookargs:
365 tr.hookargs[b'url'] = url
365 tr.hookargs[b'url'] = url
366 return processbundle(repo, unbundler, lambda: tr, source=source)
366 return processbundle(repo, unbundler, lambda: tr, source=source)
367 else:
367 else:
368 # the transactiongetter won't be used, but we might as well set it
368 # the transactiongetter won't be used, but we might as well set it
369 op = bundleoperation(repo, lambda: tr, source=source)
369 op = bundleoperation(repo, lambda: tr, source=source)
370 _processchangegroup(op, unbundler, tr, source, url, **kwargs)
370 _processchangegroup(op, unbundler, tr, source, url, **kwargs)
371 return op
371 return op
372
372
373
373
374 class partiterator(object):
374 class partiterator(object):
375 def __init__(self, repo, op, unbundler):
375 def __init__(self, repo, op, unbundler):
376 self.repo = repo
376 self.repo = repo
377 self.op = op
377 self.op = op
378 self.unbundler = unbundler
378 self.unbundler = unbundler
379 self.iterator = None
379 self.iterator = None
380 self.count = 0
380 self.count = 0
381 self.current = None
381 self.current = None
382
382
383 def __enter__(self):
383 def __enter__(self):
384 def func():
384 def func():
385 itr = enumerate(self.unbundler.iterparts(), 1)
385 itr = enumerate(self.unbundler.iterparts(), 1)
386 for count, p in itr:
386 for count, p in itr:
387 self.count = count
387 self.count = count
388 self.current = p
388 self.current = p
389 yield p
389 yield p
390 p.consume()
390 p.consume()
391 self.current = None
391 self.current = None
392
392
393 self.iterator = func()
393 self.iterator = func()
394 return self.iterator
394 return self.iterator
395
395
396 def __exit__(self, type, exc, tb):
396 def __exit__(self, type, exc, tb):
397 if not self.iterator:
397 if not self.iterator:
398 return
398 return
399
399
400 # Only gracefully abort in a normal exception situation. User aborts
400 # Only gracefully abort in a normal exception situation. User aborts
401 # like Ctrl+C throw a KeyboardInterrupt which is not a base Exception,
401 # like Ctrl+C throw a KeyboardInterrupt which is not a base Exception,
402 # and should not gracefully cleanup.
402 # and should not gracefully cleanup.
403 if isinstance(exc, Exception):
403 if isinstance(exc, Exception):
404 # Any exceptions seeking to the end of the bundle at this point are
404 # Any exceptions seeking to the end of the bundle at this point are
405 # almost certainly related to the underlying stream being bad.
405 # almost certainly related to the underlying stream being bad.
406 # And, chances are that the exception we're handling is related to
406 # And, chances are that the exception we're handling is related to
407 # getting in that bad state. So, we swallow the seeking error and
407 # getting in that bad state. So, we swallow the seeking error and
408 # re-raise the original error.
408 # re-raise the original error.
409 seekerror = False
409 seekerror = False
410 try:
410 try:
411 if self.current:
411 if self.current:
412 # consume the part content to not corrupt the stream.
412 # consume the part content to not corrupt the stream.
413 self.current.consume()
413 self.current.consume()
414
414
415 for part in self.iterator:
415 for part in self.iterator:
416 # consume the bundle content
416 # consume the bundle content
417 part.consume()
417 part.consume()
418 except Exception:
418 except Exception:
419 seekerror = True
419 seekerror = True
420
420
421 # Small hack to let caller code distinguish exceptions from bundle2
421 # Small hack to let caller code distinguish exceptions from bundle2
422 # processing from processing the old format. This is mostly needed
422 # processing from processing the old format. This is mostly needed
423 # to handle different return codes to unbundle according to the type
423 # to handle different return codes to unbundle according to the type
424 # of bundle. We should probably clean up or drop this return code
424 # of bundle. We should probably clean up or drop this return code
425 # craziness in a future version.
425 # craziness in a future version.
426 exc.duringunbundle2 = True
426 exc.duringunbundle2 = True
427 salvaged = []
427 salvaged = []
428 replycaps = None
428 replycaps = None
429 if self.op.reply is not None:
429 if self.op.reply is not None:
430 salvaged = self.op.reply.salvageoutput()
430 salvaged = self.op.reply.salvageoutput()
431 replycaps = self.op.reply.capabilities
431 replycaps = self.op.reply.capabilities
432 exc._replycaps = replycaps
432 exc._replycaps = replycaps
433 exc._bundle2salvagedoutput = salvaged
433 exc._bundle2salvagedoutput = salvaged
434
434
435 # Re-raising from a variable loses the original stack. So only use
435 # Re-raising from a variable loses the original stack. So only use
436 # that form if we need to.
436 # that form if we need to.
437 if seekerror:
437 if seekerror:
438 raise exc
438 raise exc
439
439
440 self.repo.ui.debug(
440 self.repo.ui.debug(
441 b'bundle2-input-bundle: %i parts total\n' % self.count
441 b'bundle2-input-bundle: %i parts total\n' % self.count
442 )
442 )
443
443
444
444
445 def processbundle(repo, unbundler, transactiongetter=None, op=None, source=b''):
445 def processbundle(repo, unbundler, transactiongetter=None, op=None, source=b''):
446 """This function process a bundle, apply effect to/from a repo
446 """This function process a bundle, apply effect to/from a repo
447
447
448 It iterates over each part then searches for and uses the proper handling
448 It iterates over each part then searches for and uses the proper handling
449 code to process the part. Parts are processed in order.
449 code to process the part. Parts are processed in order.
450
450
451 Unknown Mandatory part will abort the process.
451 Unknown Mandatory part will abort the process.
452
452
453 It is temporarily possible to provide a prebuilt bundleoperation to the
453 It is temporarily possible to provide a prebuilt bundleoperation to the
454 function. This is used to ensure output is properly propagated in case of
454 function. This is used to ensure output is properly propagated in case of
455 an error during the unbundling. This output capturing part will likely be
455 an error during the unbundling. This output capturing part will likely be
456 reworked and this ability will probably go away in the process.
456 reworked and this ability will probably go away in the process.
457 """
457 """
458 if op is None:
458 if op is None:
459 if transactiongetter is None:
459 if transactiongetter is None:
460 transactiongetter = _notransaction
460 transactiongetter = _notransaction
461 op = bundleoperation(repo, transactiongetter, source=source)
461 op = bundleoperation(repo, transactiongetter, source=source)
462 # todo:
462 # todo:
463 # - replace this is a init function soon.
463 # - replace this is a init function soon.
464 # - exception catching
464 # - exception catching
465 unbundler.params
465 unbundler.params
466 if repo.ui.debugflag:
466 if repo.ui.debugflag:
467 msg = [b'bundle2-input-bundle:']
467 msg = [b'bundle2-input-bundle:']
468 if unbundler.params:
468 if unbundler.params:
469 msg.append(b' %i params' % len(unbundler.params))
469 msg.append(b' %i params' % len(unbundler.params))
470 if op._gettransaction is None or op._gettransaction is _notransaction:
470 if op._gettransaction is None or op._gettransaction is _notransaction:
471 msg.append(b' no-transaction')
471 msg.append(b' no-transaction')
472 else:
472 else:
473 msg.append(b' with-transaction')
473 msg.append(b' with-transaction')
474 msg.append(b'\n')
474 msg.append(b'\n')
475 repo.ui.debug(b''.join(msg))
475 repo.ui.debug(b''.join(msg))
476
476
477 processparts(repo, op, unbundler)
477 processparts(repo, op, unbundler)
478
478
479 return op
479 return op
480
480
481
481
482 def processparts(repo, op, unbundler):
482 def processparts(repo, op, unbundler):
483 with partiterator(repo, op, unbundler) as parts:
483 with partiterator(repo, op, unbundler) as parts:
484 for part in parts:
484 for part in parts:
485 _processpart(op, part)
485 _processpart(op, part)
486
486
487
487
488 def _processchangegroup(op, cg, tr, source, url, **kwargs):
488 def _processchangegroup(op, cg, tr, source, url, **kwargs):
489 ret = cg.apply(op.repo, tr, source, url, **kwargs)
489 ret = cg.apply(op.repo, tr, source, url, **kwargs)
490 op.records.add(b'changegroup', {b'return': ret,})
490 op.records.add(b'changegroup', {b'return': ret,})
491 return ret
491 return ret
492
492
493
493
494 def _gethandler(op, part):
494 def _gethandler(op, part):
495 status = b'unknown' # used by debug output
495 status = b'unknown' # used by debug output
496 try:
496 try:
497 handler = parthandlermapping.get(part.type)
497 handler = parthandlermapping.get(part.type)
498 if handler is None:
498 if handler is None:
499 status = b'unsupported-type'
499 status = b'unsupported-type'
500 raise error.BundleUnknownFeatureError(parttype=part.type)
500 raise error.BundleUnknownFeatureError(parttype=part.type)
501 indebug(op.ui, b'found a handler for part %s' % part.type)
501 indebug(op.ui, b'found a handler for part %s' % part.type)
502 unknownparams = part.mandatorykeys - handler.params
502 unknownparams = part.mandatorykeys - handler.params
503 if unknownparams:
503 if unknownparams:
504 unknownparams = list(unknownparams)
504 unknownparams = list(unknownparams)
505 unknownparams.sort()
505 unknownparams.sort()
506 status = b'unsupported-params (%s)' % b', '.join(unknownparams)
506 status = b'unsupported-params (%s)' % b', '.join(unknownparams)
507 raise error.BundleUnknownFeatureError(
507 raise error.BundleUnknownFeatureError(
508 parttype=part.type, params=unknownparams
508 parttype=part.type, params=unknownparams
509 )
509 )
510 status = b'supported'
510 status = b'supported'
511 except error.BundleUnknownFeatureError as exc:
511 except error.BundleUnknownFeatureError as exc:
512 if part.mandatory: # mandatory parts
512 if part.mandatory: # mandatory parts
513 raise
513 raise
514 indebug(op.ui, b'ignoring unsupported advisory part %s' % exc)
514 indebug(op.ui, b'ignoring unsupported advisory part %s' % exc)
515 return # skip to part processing
515 return # skip to part processing
516 finally:
516 finally:
517 if op.ui.debugflag:
517 if op.ui.debugflag:
518 msg = [b'bundle2-input-part: "%s"' % part.type]
518 msg = [b'bundle2-input-part: "%s"' % part.type]
519 if not part.mandatory:
519 if not part.mandatory:
520 msg.append(b' (advisory)')
520 msg.append(b' (advisory)')
521 nbmp = len(part.mandatorykeys)
521 nbmp = len(part.mandatorykeys)
522 nbap = len(part.params) - nbmp
522 nbap = len(part.params) - nbmp
523 if nbmp or nbap:
523 if nbmp or nbap:
524 msg.append(b' (params:')
524 msg.append(b' (params:')
525 if nbmp:
525 if nbmp:
526 msg.append(b' %i mandatory' % nbmp)
526 msg.append(b' %i mandatory' % nbmp)
527 if nbap:
527 if nbap:
528 msg.append(b' %i advisory' % nbmp)
528 msg.append(b' %i advisory' % nbmp)
529 msg.append(b')')
529 msg.append(b')')
530 msg.append(b' %s\n' % status)
530 msg.append(b' %s\n' % status)
531 op.ui.debug(b''.join(msg))
531 op.ui.debug(b''.join(msg))
532
532
533 return handler
533 return handler
534
534
535
535
536 def _processpart(op, part):
536 def _processpart(op, part):
537 """process a single part from a bundle
537 """process a single part from a bundle
538
538
539 The part is guaranteed to have been fully consumed when the function exits
539 The part is guaranteed to have been fully consumed when the function exits
540 (even if an exception is raised)."""
540 (even if an exception is raised)."""
541 handler = _gethandler(op, part)
541 handler = _gethandler(op, part)
542 if handler is None:
542 if handler is None:
543 return
543 return
544
544
545 # handler is called outside the above try block so that we don't
545 # handler is called outside the above try block so that we don't
546 # risk catching KeyErrors from anything other than the
546 # risk catching KeyErrors from anything other than the
547 # parthandlermapping lookup (any KeyError raised by handler()
547 # parthandlermapping lookup (any KeyError raised by handler()
548 # itself represents a defect of a different variety).
548 # itself represents a defect of a different variety).
549 output = None
549 output = None
550 if op.captureoutput and op.reply is not None:
550 if op.captureoutput and op.reply is not None:
551 op.ui.pushbuffer(error=True, subproc=True)
551 op.ui.pushbuffer(error=True, subproc=True)
552 output = b''
552 output = b''
553 try:
553 try:
554 handler(op, part)
554 handler(op, part)
555 finally:
555 finally:
556 if output is not None:
556 if output is not None:
557 output = op.ui.popbuffer()
557 output = op.ui.popbuffer()
558 if output:
558 if output:
559 outpart = op.reply.newpart(b'output', data=output, mandatory=False)
559 outpart = op.reply.newpart(b'output', data=output, mandatory=False)
560 outpart.addparam(
560 outpart.addparam(
561 b'in-reply-to', pycompat.bytestr(part.id), mandatory=False
561 b'in-reply-to', pycompat.bytestr(part.id), mandatory=False
562 )
562 )
563
563
564
564
565 def decodecaps(blob):
565 def decodecaps(blob):
566 """decode a bundle2 caps bytes blob into a dictionary
566 """decode a bundle2 caps bytes blob into a dictionary
567
567
568 The blob is a list of capabilities (one per line)
568 The blob is a list of capabilities (one per line)
569 Capabilities may have values using a line of the form::
569 Capabilities may have values using a line of the form::
570
570
571 capability=value1,value2,value3
571 capability=value1,value2,value3
572
572
573 The values are always a list."""
573 The values are always a list."""
574 caps = {}
574 caps = {}
575 for line in blob.splitlines():
575 for line in blob.splitlines():
576 if not line:
576 if not line:
577 continue
577 continue
578 if b'=' not in line:
578 if b'=' not in line:
579 key, vals = line, ()
579 key, vals = line, ()
580 else:
580 else:
581 key, vals = line.split(b'=', 1)
581 key, vals = line.split(b'=', 1)
582 vals = vals.split(b',')
582 vals = vals.split(b',')
583 key = urlreq.unquote(key)
583 key = urlreq.unquote(key)
584 vals = [urlreq.unquote(v) for v in vals]
584 vals = [urlreq.unquote(v) for v in vals]
585 caps[key] = vals
585 caps[key] = vals
586 return caps
586 return caps
587
587
588
588
589 def encodecaps(caps):
589 def encodecaps(caps):
590 """encode a bundle2 caps dictionary into a bytes blob"""
590 """encode a bundle2 caps dictionary into a bytes blob"""
591 chunks = []
591 chunks = []
592 for ca in sorted(caps):
592 for ca in sorted(caps):
593 vals = caps[ca]
593 vals = caps[ca]
594 ca = urlreq.quote(ca)
594 ca = urlreq.quote(ca)
595 vals = [urlreq.quote(v) for v in vals]
595 vals = [urlreq.quote(v) for v in vals]
596 if vals:
596 if vals:
597 ca = b"%s=%s" % (ca, b','.join(vals))
597 ca = b"%s=%s" % (ca, b','.join(vals))
598 chunks.append(ca)
598 chunks.append(ca)
599 return b'\n'.join(chunks)
599 return b'\n'.join(chunks)
600
600
601
601
602 bundletypes = {
602 bundletypes = {
603 b"": (b"", b'UN'), # only when using unbundle on ssh and old http servers
603 b"": (b"", b'UN'), # only when using unbundle on ssh and old http servers
604 # since the unification ssh accepts a header but there
604 # since the unification ssh accepts a header but there
605 # is no capability signaling it.
605 # is no capability signaling it.
606 b"HG20": (), # special-cased below
606 b"HG20": (), # special-cased below
607 b"HG10UN": (b"HG10UN", b'UN'),
607 b"HG10UN": (b"HG10UN", b'UN'),
608 b"HG10BZ": (b"HG10", b'BZ'),
608 b"HG10BZ": (b"HG10", b'BZ'),
609 b"HG10GZ": (b"HG10GZ", b'GZ'),
609 b"HG10GZ": (b"HG10GZ", b'GZ'),
610 }
610 }
611
611
612 # hgweb uses this list to communicate its preferred type
612 # hgweb uses this list to communicate its preferred type
613 bundlepriority = [b'HG10GZ', b'HG10BZ', b'HG10UN']
613 bundlepriority = [b'HG10GZ', b'HG10BZ', b'HG10UN']
614
614
615
615
616 class bundle20(object):
616 class bundle20(object):
617 """represent an outgoing bundle2 container
617 """represent an outgoing bundle2 container
618
618
619 Use the `addparam` method to add stream level parameter. and `newpart` to
619 Use the `addparam` method to add stream level parameter. and `newpart` to
620 populate it. Then call `getchunks` to retrieve all the binary chunks of
620 populate it. Then call `getchunks` to retrieve all the binary chunks of
621 data that compose the bundle2 container."""
621 data that compose the bundle2 container."""
622
622
623 _magicstring = b'HG20'
623 _magicstring = b'HG20'
624
624
625 def __init__(self, ui, capabilities=()):
625 def __init__(self, ui, capabilities=()):
626 self.ui = ui
626 self.ui = ui
627 self._params = []
627 self._params = []
628 self._parts = []
628 self._parts = []
629 self.capabilities = dict(capabilities)
629 self.capabilities = dict(capabilities)
630 self._compengine = util.compengines.forbundletype(b'UN')
630 self._compengine = util.compengines.forbundletype(b'UN')
631 self._compopts = None
631 self._compopts = None
632 # If compression is being handled by a consumer of the raw
632 # If compression is being handled by a consumer of the raw
633 # data (e.g. the wire protocol), unsetting this flag tells
633 # data (e.g. the wire protocol), unsetting this flag tells
634 # consumers that the bundle is best left uncompressed.
634 # consumers that the bundle is best left uncompressed.
635 self.prefercompressed = True
635 self.prefercompressed = True
636
636
637 def setcompression(self, alg, compopts=None):
637 def setcompression(self, alg, compopts=None):
638 """setup core part compression to <alg>"""
638 """setup core part compression to <alg>"""
639 if alg in (None, b'UN'):
639 if alg in (None, b'UN'):
640 return
640 return
641 assert not any(n.lower() == b'compression' for n, v in self._params)
641 assert not any(n.lower() == b'compression' for n, v in self._params)
642 self.addparam(b'Compression', alg)
642 self.addparam(b'Compression', alg)
643 self._compengine = util.compengines.forbundletype(alg)
643 self._compengine = util.compengines.forbundletype(alg)
644 self._compopts = compopts
644 self._compopts = compopts
645
645
646 @property
646 @property
647 def nbparts(self):
647 def nbparts(self):
648 """total number of parts added to the bundler"""
648 """total number of parts added to the bundler"""
649 return len(self._parts)
649 return len(self._parts)
650
650
651 # methods used to defines the bundle2 content
651 # methods used to defines the bundle2 content
652 def addparam(self, name, value=None):
652 def addparam(self, name, value=None):
653 """add a stream level parameter"""
653 """add a stream level parameter"""
654 if not name:
654 if not name:
655 raise error.ProgrammingError(b'empty parameter name')
655 raise error.ProgrammingError(b'empty parameter name')
656 if name[0:1] not in pycompat.bytestr(
656 if name[0:1] not in pycompat.bytestr(
657 string.ascii_letters # pytype: disable=wrong-arg-types
657 string.ascii_letters # pytype: disable=wrong-arg-types
658 ):
658 ):
659 raise error.ProgrammingError(
659 raise error.ProgrammingError(
660 b'non letter first character: %s' % name
660 b'non letter first character: %s' % name
661 )
661 )
662 self._params.append((name, value))
662 self._params.append((name, value))
663
663
664 def addpart(self, part):
664 def addpart(self, part):
665 """add a new part to the bundle2 container
665 """add a new part to the bundle2 container
666
666
667 Parts contains the actual applicative payload."""
667 Parts contains the actual applicative payload."""
668 assert part.id is None
668 assert part.id is None
669 part.id = len(self._parts) # very cheap counter
669 part.id = len(self._parts) # very cheap counter
670 self._parts.append(part)
670 self._parts.append(part)
671
671
672 def newpart(self, typeid, *args, **kwargs):
672 def newpart(self, typeid, *args, **kwargs):
673 """create a new part and add it to the containers
673 """create a new part and add it to the containers
674
674
675 As the part is directly added to the containers. For now, this means
675 As the part is directly added to the containers. For now, this means
676 that any failure to properly initialize the part after calling
676 that any failure to properly initialize the part after calling
677 ``newpart`` should result in a failure of the whole bundling process.
677 ``newpart`` should result in a failure of the whole bundling process.
678
678
679 You can still fall back to manually create and add if you need better
679 You can still fall back to manually create and add if you need better
680 control."""
680 control."""
681 part = bundlepart(typeid, *args, **kwargs)
681 part = bundlepart(typeid, *args, **kwargs)
682 self.addpart(part)
682 self.addpart(part)
683 return part
683 return part
684
684
685 # methods used to generate the bundle2 stream
685 # methods used to generate the bundle2 stream
686 def getchunks(self):
686 def getchunks(self):
687 if self.ui.debugflag:
687 if self.ui.debugflag:
688 msg = [b'bundle2-output-bundle: "%s",' % self._magicstring]
688 msg = [b'bundle2-output-bundle: "%s",' % self._magicstring]
689 if self._params:
689 if self._params:
690 msg.append(b' (%i params)' % len(self._params))
690 msg.append(b' (%i params)' % len(self._params))
691 msg.append(b' %i parts total\n' % len(self._parts))
691 msg.append(b' %i parts total\n' % len(self._parts))
692 self.ui.debug(b''.join(msg))
692 self.ui.debug(b''.join(msg))
693 outdebug(self.ui, b'start emission of %s stream' % self._magicstring)
693 outdebug(self.ui, b'start emission of %s stream' % self._magicstring)
694 yield self._magicstring
694 yield self._magicstring
695 param = self._paramchunk()
695 param = self._paramchunk()
696 outdebug(self.ui, b'bundle parameter: %s' % param)
696 outdebug(self.ui, b'bundle parameter: %s' % param)
697 yield _pack(_fstreamparamsize, len(param))
697 yield _pack(_fstreamparamsize, len(param))
698 if param:
698 if param:
699 yield param
699 yield param
700 for chunk in self._compengine.compressstream(
700 for chunk in self._compengine.compressstream(
701 self._getcorechunk(), self._compopts
701 self._getcorechunk(), self._compopts
702 ):
702 ):
703 yield chunk
703 yield chunk
704
704
705 def _paramchunk(self):
705 def _paramchunk(self):
706 """return a encoded version of all stream parameters"""
706 """return a encoded version of all stream parameters"""
707 blocks = []
707 blocks = []
708 for par, value in self._params:
708 for par, value in self._params:
709 par = urlreq.quote(par)
709 par = urlreq.quote(par)
710 if value is not None:
710 if value is not None:
711 value = urlreq.quote(value)
711 value = urlreq.quote(value)
712 par = b'%s=%s' % (par, value)
712 par = b'%s=%s' % (par, value)
713 blocks.append(par)
713 blocks.append(par)
714 return b' '.join(blocks)
714 return b' '.join(blocks)
715
715
716 def _getcorechunk(self):
716 def _getcorechunk(self):
717 """yield chunk for the core part of the bundle
717 """yield chunk for the core part of the bundle
718
718
719 (all but headers and parameters)"""
719 (all but headers and parameters)"""
720 outdebug(self.ui, b'start of parts')
720 outdebug(self.ui, b'start of parts')
721 for part in self._parts:
721 for part in self._parts:
722 outdebug(self.ui, b'bundle part: "%s"' % part.type)
722 outdebug(self.ui, b'bundle part: "%s"' % part.type)
723 for chunk in part.getchunks(ui=self.ui):
723 for chunk in part.getchunks(ui=self.ui):
724 yield chunk
724 yield chunk
725 outdebug(self.ui, b'end of bundle')
725 outdebug(self.ui, b'end of bundle')
726 yield _pack(_fpartheadersize, 0)
726 yield _pack(_fpartheadersize, 0)
727
727
728 def salvageoutput(self):
728 def salvageoutput(self):
729 """return a list with a copy of all output parts in the bundle
729 """return a list with a copy of all output parts in the bundle
730
730
731 This is meant to be used during error handling to make sure we preserve
731 This is meant to be used during error handling to make sure we preserve
732 server output"""
732 server output"""
733 salvaged = []
733 salvaged = []
734 for part in self._parts:
734 for part in self._parts:
735 if part.type.startswith(b'output'):
735 if part.type.startswith(b'output'):
736 salvaged.append(part.copy())
736 salvaged.append(part.copy())
737 return salvaged
737 return salvaged
738
738
739
739
740 class unpackermixin(object):
740 class unpackermixin(object):
741 """A mixin to extract bytes and struct data from a stream"""
741 """A mixin to extract bytes and struct data from a stream"""
742
742
743 def __init__(self, fp):
743 def __init__(self, fp):
744 self._fp = fp
744 self._fp = fp
745
745
746 def _unpack(self, format):
746 def _unpack(self, format):
747 """unpack this struct format from the stream
747 """unpack this struct format from the stream
748
748
749 This method is meant for internal usage by the bundle2 protocol only.
749 This method is meant for internal usage by the bundle2 protocol only.
750 They directly manipulate the low level stream including bundle2 level
750 They directly manipulate the low level stream including bundle2 level
751 instruction.
751 instruction.
752
752
753 Do not use it to implement higher-level logic or methods."""
753 Do not use it to implement higher-level logic or methods."""
754 data = self._readexact(struct.calcsize(format))
754 data = self._readexact(struct.calcsize(format))
755 return _unpack(format, data)
755 return _unpack(format, data)
756
756
757 def _readexact(self, size):
757 def _readexact(self, size):
758 """read exactly <size> bytes from the stream
758 """read exactly <size> bytes from the stream
759
759
760 This method is meant for internal usage by the bundle2 protocol only.
760 This method is meant for internal usage by the bundle2 protocol only.
761 They directly manipulate the low level stream including bundle2 level
761 They directly manipulate the low level stream including bundle2 level
762 instruction.
762 instruction.
763
763
764 Do not use it to implement higher-level logic or methods."""
764 Do not use it to implement higher-level logic or methods."""
765 return changegroup.readexactly(self._fp, size)
765 return changegroup.readexactly(self._fp, size)
766
766
767
767
768 def getunbundler(ui, fp, magicstring=None):
768 def getunbundler(ui, fp, magicstring=None):
769 """return a valid unbundler object for a given magicstring"""
769 """return a valid unbundler object for a given magicstring"""
770 if magicstring is None:
770 if magicstring is None:
771 magicstring = changegroup.readexactly(fp, 4)
771 magicstring = changegroup.readexactly(fp, 4)
772 magic, version = magicstring[0:2], magicstring[2:4]
772 magic, version = magicstring[0:2], magicstring[2:4]
773 if magic != b'HG':
773 if magic != b'HG':
774 ui.debug(
774 ui.debug(
775 b"error: invalid magic: %r (version %r), should be 'HG'\n"
775 b"error: invalid magic: %r (version %r), should be 'HG'\n"
776 % (magic, version)
776 % (magic, version)
777 )
777 )
778 raise error.Abort(_(b'not a Mercurial bundle'))
778 raise error.Abort(_(b'not a Mercurial bundle'))
779 unbundlerclass = formatmap.get(version)
779 unbundlerclass = formatmap.get(version)
780 if unbundlerclass is None:
780 if unbundlerclass is None:
781 raise error.Abort(_(b'unknown bundle version %s') % version)
781 raise error.Abort(_(b'unknown bundle version %s') % version)
782 unbundler = unbundlerclass(ui, fp)
782 unbundler = unbundlerclass(ui, fp)
783 indebug(ui, b'start processing of %s stream' % magicstring)
783 indebug(ui, b'start processing of %s stream' % magicstring)
784 return unbundler
784 return unbundler
785
785
786
786
787 class unbundle20(unpackermixin):
787 class unbundle20(unpackermixin):
788 """interpret a bundle2 stream
788 """interpret a bundle2 stream
789
789
790 This class is fed with a binary stream and yields parts through its
790 This class is fed with a binary stream and yields parts through its
791 `iterparts` methods."""
791 `iterparts` methods."""
792
792
793 _magicstring = b'HG20'
793 _magicstring = b'HG20'
794
794
795 def __init__(self, ui, fp):
795 def __init__(self, ui, fp):
796 """If header is specified, we do not read it out of the stream."""
796 """If header is specified, we do not read it out of the stream."""
797 self.ui = ui
797 self.ui = ui
798 self._compengine = util.compengines.forbundletype(b'UN')
798 self._compengine = util.compengines.forbundletype(b'UN')
799 self._compressed = None
799 self._compressed = None
800 super(unbundle20, self).__init__(fp)
800 super(unbundle20, self).__init__(fp)
801
801
802 @util.propertycache
802 @util.propertycache
803 def params(self):
803 def params(self):
804 """dictionary of stream level parameters"""
804 """dictionary of stream level parameters"""
805 indebug(self.ui, b'reading bundle2 stream parameters')
805 indebug(self.ui, b'reading bundle2 stream parameters')
806 params = {}
806 params = {}
807 paramssize = self._unpack(_fstreamparamsize)[0]
807 paramssize = self._unpack(_fstreamparamsize)[0]
808 if paramssize < 0:
808 if paramssize < 0:
809 raise error.BundleValueError(
809 raise error.BundleValueError(
810 b'negative bundle param size: %i' % paramssize
810 b'negative bundle param size: %i' % paramssize
811 )
811 )
812 if paramssize:
812 if paramssize:
813 params = self._readexact(paramssize)
813 params = self._readexact(paramssize)
814 params = self._processallparams(params)
814 params = self._processallparams(params)
815 return params
815 return params
816
816
817 def _processallparams(self, paramsblock):
817 def _processallparams(self, paramsblock):
818 """"""
818 """"""
819 params = util.sortdict()
819 params = util.sortdict()
820 for p in paramsblock.split(b' '):
820 for p in paramsblock.split(b' '):
821 p = p.split(b'=', 1)
821 p = p.split(b'=', 1)
822 p = [urlreq.unquote(i) for i in p]
822 p = [urlreq.unquote(i) for i in p]
823 if len(p) < 2:
823 if len(p) < 2:
824 p.append(None)
824 p.append(None)
825 self._processparam(*p)
825 self._processparam(*p)
826 params[p[0]] = p[1]
826 params[p[0]] = p[1]
827 return params
827 return params
828
828
829 def _processparam(self, name, value):
829 def _processparam(self, name, value):
830 """process a parameter, applying its effect if needed
830 """process a parameter, applying its effect if needed
831
831
832 Parameter starting with a lower case letter are advisory and will be
832 Parameter starting with a lower case letter are advisory and will be
833 ignored when unknown. Those starting with an upper case letter are
833 ignored when unknown. Those starting with an upper case letter are
834 mandatory and will this function will raise a KeyError when unknown.
834 mandatory and will this function will raise a KeyError when unknown.
835
835
836 Note: no option are currently supported. Any input will be either
836 Note: no option are currently supported. Any input will be either
837 ignored or failing.
837 ignored or failing.
838 """
838 """
839 if not name:
839 if not name:
840 raise ValueError('empty parameter name')
840 raise ValueError('empty parameter name')
841 if name[0:1] not in pycompat.bytestr(
841 if name[0:1] not in pycompat.bytestr(
842 string.ascii_letters # pytype: disable=wrong-arg-types
842 string.ascii_letters # pytype: disable=wrong-arg-types
843 ):
843 ):
844 raise ValueError('non letter first character: %s' % name)
844 raise ValueError('non letter first character: %s' % name)
845 try:
845 try:
846 handler = b2streamparamsmap[name.lower()]
846 handler = b2streamparamsmap[name.lower()]
847 except KeyError:
847 except KeyError:
848 if name[0:1].islower():
848 if name[0:1].islower():
849 indebug(self.ui, b"ignoring unknown parameter %s" % name)
849 indebug(self.ui, b"ignoring unknown parameter %s" % name)
850 else:
850 else:
851 raise error.BundleUnknownFeatureError(params=(name,))
851 raise error.BundleUnknownFeatureError(params=(name,))
852 else:
852 else:
853 handler(self, name, value)
853 handler(self, name, value)
854
854
855 def _forwardchunks(self):
855 def _forwardchunks(self):
856 """utility to transfer a bundle2 as binary
856 """utility to transfer a bundle2 as binary
857
857
858 This is made necessary by the fact the 'getbundle' command over 'ssh'
858 This is made necessary by the fact the 'getbundle' command over 'ssh'
859 have no way to know then the reply end, relying on the bundle to be
859 have no way to know then the reply end, relying on the bundle to be
860 interpreted to know its end. This is terrible and we are sorry, but we
860 interpreted to know its end. This is terrible and we are sorry, but we
861 needed to move forward to get general delta enabled.
861 needed to move forward to get general delta enabled.
862 """
862 """
863 yield self._magicstring
863 yield self._magicstring
864 assert 'params' not in vars(self)
864 assert 'params' not in vars(self)
865 paramssize = self._unpack(_fstreamparamsize)[0]
865 paramssize = self._unpack(_fstreamparamsize)[0]
866 if paramssize < 0:
866 if paramssize < 0:
867 raise error.BundleValueError(
867 raise error.BundleValueError(
868 b'negative bundle param size: %i' % paramssize
868 b'negative bundle param size: %i' % paramssize
869 )
869 )
870 if paramssize:
870 if paramssize:
871 params = self._readexact(paramssize)
871 params = self._readexact(paramssize)
872 self._processallparams(params)
872 self._processallparams(params)
873 # The payload itself is decompressed below, so drop
873 # The payload itself is decompressed below, so drop
874 # the compression parameter passed down to compensate.
874 # the compression parameter passed down to compensate.
875 outparams = []
875 outparams = []
876 for p in params.split(b' '):
876 for p in params.split(b' '):
877 k, v = p.split(b'=', 1)
877 k, v = p.split(b'=', 1)
878 if k.lower() != b'compression':
878 if k.lower() != b'compression':
879 outparams.append(p)
879 outparams.append(p)
880 outparams = b' '.join(outparams)
880 outparams = b' '.join(outparams)
881 yield _pack(_fstreamparamsize, len(outparams))
881 yield _pack(_fstreamparamsize, len(outparams))
882 yield outparams
882 yield outparams
883 else:
883 else:
884 yield _pack(_fstreamparamsize, paramssize)
884 yield _pack(_fstreamparamsize, paramssize)
885 # From there, payload might need to be decompressed
885 # From there, payload might need to be decompressed
886 self._fp = self._compengine.decompressorreader(self._fp)
886 self._fp = self._compengine.decompressorreader(self._fp)
887 emptycount = 0
887 emptycount = 0
888 while emptycount < 2:
888 while emptycount < 2:
889 # so we can brainlessly loop
889 # so we can brainlessly loop
890 assert _fpartheadersize == _fpayloadsize
890 assert _fpartheadersize == _fpayloadsize
891 size = self._unpack(_fpartheadersize)[0]
891 size = self._unpack(_fpartheadersize)[0]
892 yield _pack(_fpartheadersize, size)
892 yield _pack(_fpartheadersize, size)
893 if size:
893 if size:
894 emptycount = 0
894 emptycount = 0
895 else:
895 else:
896 emptycount += 1
896 emptycount += 1
897 continue
897 continue
898 if size == flaginterrupt:
898 if size == flaginterrupt:
899 continue
899 continue
900 elif size < 0:
900 elif size < 0:
901 raise error.BundleValueError(b'negative chunk size: %i')
901 raise error.BundleValueError(b'negative chunk size: %i')
902 yield self._readexact(size)
902 yield self._readexact(size)
903
903
904 def iterparts(self, seekable=False):
904 def iterparts(self, seekable=False):
905 """yield all parts contained in the stream"""
905 """yield all parts contained in the stream"""
906 cls = seekableunbundlepart if seekable else unbundlepart
906 cls = seekableunbundlepart if seekable else unbundlepart
907 # make sure param have been loaded
907 # make sure param have been loaded
908 self.params
908 self.params
909 # From there, payload need to be decompressed
909 # From there, payload need to be decompressed
910 self._fp = self._compengine.decompressorreader(self._fp)
910 self._fp = self._compengine.decompressorreader(self._fp)
911 indebug(self.ui, b'start extraction of bundle2 parts')
911 indebug(self.ui, b'start extraction of bundle2 parts')
912 headerblock = self._readpartheader()
912 headerblock = self._readpartheader()
913 while headerblock is not None:
913 while headerblock is not None:
914 part = cls(self.ui, headerblock, self._fp)
914 part = cls(self.ui, headerblock, self._fp)
915 yield part
915 yield part
916 # Ensure part is fully consumed so we can start reading the next
916 # Ensure part is fully consumed so we can start reading the next
917 # part.
917 # part.
918 part.consume()
918 part.consume()
919
919
920 headerblock = self._readpartheader()
920 headerblock = self._readpartheader()
921 indebug(self.ui, b'end of bundle2 stream')
921 indebug(self.ui, b'end of bundle2 stream')
922
922
923 def _readpartheader(self):
923 def _readpartheader(self):
924 """reads a part header size and return the bytes blob
924 """reads a part header size and return the bytes blob
925
925
926 returns None if empty"""
926 returns None if empty"""
927 headersize = self._unpack(_fpartheadersize)[0]
927 headersize = self._unpack(_fpartheadersize)[0]
928 if headersize < 0:
928 if headersize < 0:
929 raise error.BundleValueError(
929 raise error.BundleValueError(
930 b'negative part header size: %i' % headersize
930 b'negative part header size: %i' % headersize
931 )
931 )
932 indebug(self.ui, b'part header size: %i' % headersize)
932 indebug(self.ui, b'part header size: %i' % headersize)
933 if headersize:
933 if headersize:
934 return self._readexact(headersize)
934 return self._readexact(headersize)
935 return None
935 return None
936
936
937 def compressed(self):
937 def compressed(self):
938 self.params # load params
938 self.params # load params
939 return self._compressed
939 return self._compressed
940
940
941 def close(self):
941 def close(self):
942 """close underlying file"""
942 """close underlying file"""
943 if util.safehasattr(self._fp, 'close'):
943 if util.safehasattr(self._fp, 'close'):
944 return self._fp.close()
944 return self._fp.close()
945
945
946
946
947 formatmap = {b'20': unbundle20}
947 formatmap = {b'20': unbundle20}
948
948
949 b2streamparamsmap = {}
949 b2streamparamsmap = {}
950
950
951
951
952 def b2streamparamhandler(name):
952 def b2streamparamhandler(name):
953 """register a handler for a stream level parameter"""
953 """register a handler for a stream level parameter"""
954
954
955 def decorator(func):
955 def decorator(func):
956 assert name not in formatmap
956 assert name not in formatmap
957 b2streamparamsmap[name] = func
957 b2streamparamsmap[name] = func
958 return func
958 return func
959
959
960 return decorator
960 return decorator
961
961
962
962
963 @b2streamparamhandler(b'compression')
963 @b2streamparamhandler(b'compression')
964 def processcompression(unbundler, param, value):
964 def processcompression(unbundler, param, value):
965 """read compression parameter and install payload decompression"""
965 """read compression parameter and install payload decompression"""
966 if value not in util.compengines.supportedbundletypes:
966 if value not in util.compengines.supportedbundletypes:
967 raise error.BundleUnknownFeatureError(params=(param,), values=(value,))
967 raise error.BundleUnknownFeatureError(params=(param,), values=(value,))
968 unbundler._compengine = util.compengines.forbundletype(value)
968 unbundler._compengine = util.compengines.forbundletype(value)
969 if value is not None:
969 if value is not None:
970 unbundler._compressed = True
970 unbundler._compressed = True
971
971
972
972
973 class bundlepart(object):
973 class bundlepart(object):
974 """A bundle2 part contains application level payload
974 """A bundle2 part contains application level payload
975
975
976 The part `type` is used to route the part to the application level
976 The part `type` is used to route the part to the application level
977 handler.
977 handler.
978
978
979 The part payload is contained in ``part.data``. It could be raw bytes or a
979 The part payload is contained in ``part.data``. It could be raw bytes or a
980 generator of byte chunks.
980 generator of byte chunks.
981
981
982 You can add parameters to the part using the ``addparam`` method.
982 You can add parameters to the part using the ``addparam`` method.
983 Parameters can be either mandatory (default) or advisory. Remote side
983 Parameters can be either mandatory (default) or advisory. Remote side
984 should be able to safely ignore the advisory ones.
984 should be able to safely ignore the advisory ones.
985
985
986 Both data and parameters cannot be modified after the generation has begun.
986 Both data and parameters cannot be modified after the generation has begun.
987 """
987 """
988
988
989 def __init__(
989 def __init__(
990 self,
990 self,
991 parttype,
991 parttype,
992 mandatoryparams=(),
992 mandatoryparams=(),
993 advisoryparams=(),
993 advisoryparams=(),
994 data=b'',
994 data=b'',
995 mandatory=True,
995 mandatory=True,
996 ):
996 ):
997 validateparttype(parttype)
997 validateparttype(parttype)
998 self.id = None
998 self.id = None
999 self.type = parttype
999 self.type = parttype
1000 self._data = data
1000 self._data = data
1001 self._mandatoryparams = list(mandatoryparams)
1001 self._mandatoryparams = list(mandatoryparams)
1002 self._advisoryparams = list(advisoryparams)
1002 self._advisoryparams = list(advisoryparams)
1003 # checking for duplicated entries
1003 # checking for duplicated entries
1004 self._seenparams = set()
1004 self._seenparams = set()
1005 for pname, __ in self._mandatoryparams + self._advisoryparams:
1005 for pname, __ in self._mandatoryparams + self._advisoryparams:
1006 if pname in self._seenparams:
1006 if pname in self._seenparams:
1007 raise error.ProgrammingError(b'duplicated params: %s' % pname)
1007 raise error.ProgrammingError(b'duplicated params: %s' % pname)
1008 self._seenparams.add(pname)
1008 self._seenparams.add(pname)
1009 # status of the part's generation:
1009 # status of the part's generation:
1010 # - None: not started,
1010 # - None: not started,
1011 # - False: currently generated,
1011 # - False: currently generated,
1012 # - True: generation done.
1012 # - True: generation done.
1013 self._generated = None
1013 self._generated = None
1014 self.mandatory = mandatory
1014 self.mandatory = mandatory
1015
1015
1016 def __repr__(self):
1016 def __repr__(self):
1017 cls = b"%s.%s" % (self.__class__.__module__, self.__class__.__name__)
1017 cls = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
1018 return b'<%s object at %x; id: %s; type: %s; mandatory: %s>' % (
1018 return '<%s object at %x; id: %s; type: %s; mandatory: %s>' % (
1019 cls,
1019 cls,
1020 id(self),
1020 id(self),
1021 self.id,
1021 self.id,
1022 self.type,
1022 self.type,
1023 self.mandatory,
1023 self.mandatory,
1024 )
1024 )
1025
1025
1026 def copy(self):
1026 def copy(self):
1027 """return a copy of the part
1027 """return a copy of the part
1028
1028
1029 The new part have the very same content but no partid assigned yet.
1029 The new part have the very same content but no partid assigned yet.
1030 Parts with generated data cannot be copied."""
1030 Parts with generated data cannot be copied."""
1031 assert not util.safehasattr(self.data, 'next')
1031 assert not util.safehasattr(self.data, 'next')
1032 return self.__class__(
1032 return self.__class__(
1033 self.type,
1033 self.type,
1034 self._mandatoryparams,
1034 self._mandatoryparams,
1035 self._advisoryparams,
1035 self._advisoryparams,
1036 self._data,
1036 self._data,
1037 self.mandatory,
1037 self.mandatory,
1038 )
1038 )
1039
1039
1040 # methods used to defines the part content
1040 # methods used to defines the part content
1041 @property
1041 @property
1042 def data(self):
1042 def data(self):
1043 return self._data
1043 return self._data
1044
1044
1045 @data.setter
1045 @data.setter
1046 def data(self, data):
1046 def data(self, data):
1047 if self._generated is not None:
1047 if self._generated is not None:
1048 raise error.ReadOnlyPartError(b'part is being generated')
1048 raise error.ReadOnlyPartError(b'part is being generated')
1049 self._data = data
1049 self._data = data
1050
1050
1051 @property
1051 @property
1052 def mandatoryparams(self):
1052 def mandatoryparams(self):
1053 # make it an immutable tuple to force people through ``addparam``
1053 # make it an immutable tuple to force people through ``addparam``
1054 return tuple(self._mandatoryparams)
1054 return tuple(self._mandatoryparams)
1055
1055
1056 @property
1056 @property
1057 def advisoryparams(self):
1057 def advisoryparams(self):
1058 # make it an immutable tuple to force people through ``addparam``
1058 # make it an immutable tuple to force people through ``addparam``
1059 return tuple(self._advisoryparams)
1059 return tuple(self._advisoryparams)
1060
1060
1061 def addparam(self, name, value=b'', mandatory=True):
1061 def addparam(self, name, value=b'', mandatory=True):
1062 """add a parameter to the part
1062 """add a parameter to the part
1063
1063
1064 If 'mandatory' is set to True, the remote handler must claim support
1064 If 'mandatory' is set to True, the remote handler must claim support
1065 for this parameter or the unbundling will be aborted.
1065 for this parameter or the unbundling will be aborted.
1066
1066
1067 The 'name' and 'value' cannot exceed 255 bytes each.
1067 The 'name' and 'value' cannot exceed 255 bytes each.
1068 """
1068 """
1069 if self._generated is not None:
1069 if self._generated is not None:
1070 raise error.ReadOnlyPartError(b'part is being generated')
1070 raise error.ReadOnlyPartError(b'part is being generated')
1071 if name in self._seenparams:
1071 if name in self._seenparams:
1072 raise ValueError(b'duplicated params: %s' % name)
1072 raise ValueError(b'duplicated params: %s' % name)
1073 self._seenparams.add(name)
1073 self._seenparams.add(name)
1074 params = self._advisoryparams
1074 params = self._advisoryparams
1075 if mandatory:
1075 if mandatory:
1076 params = self._mandatoryparams
1076 params = self._mandatoryparams
1077 params.append((name, value))
1077 params.append((name, value))
1078
1078
1079 # methods used to generates the bundle2 stream
1079 # methods used to generates the bundle2 stream
1080 def getchunks(self, ui):
1080 def getchunks(self, ui):
1081 if self._generated is not None:
1081 if self._generated is not None:
1082 raise error.ProgrammingError(b'part can only be consumed once')
1082 raise error.ProgrammingError(b'part can only be consumed once')
1083 self._generated = False
1083 self._generated = False
1084
1084
1085 if ui.debugflag:
1085 if ui.debugflag:
1086 msg = [b'bundle2-output-part: "%s"' % self.type]
1086 msg = [b'bundle2-output-part: "%s"' % self.type]
1087 if not self.mandatory:
1087 if not self.mandatory:
1088 msg.append(b' (advisory)')
1088 msg.append(b' (advisory)')
1089 nbmp = len(self.mandatoryparams)
1089 nbmp = len(self.mandatoryparams)
1090 nbap = len(self.advisoryparams)
1090 nbap = len(self.advisoryparams)
1091 if nbmp or nbap:
1091 if nbmp or nbap:
1092 msg.append(b' (params:')
1092 msg.append(b' (params:')
1093 if nbmp:
1093 if nbmp:
1094 msg.append(b' %i mandatory' % nbmp)
1094 msg.append(b' %i mandatory' % nbmp)
1095 if nbap:
1095 if nbap:
1096 msg.append(b' %i advisory' % nbmp)
1096 msg.append(b' %i advisory' % nbmp)
1097 msg.append(b')')
1097 msg.append(b')')
1098 if not self.data:
1098 if not self.data:
1099 msg.append(b' empty payload')
1099 msg.append(b' empty payload')
1100 elif util.safehasattr(self.data, 'next') or util.safehasattr(
1100 elif util.safehasattr(self.data, 'next') or util.safehasattr(
1101 self.data, b'__next__'
1101 self.data, b'__next__'
1102 ):
1102 ):
1103 msg.append(b' streamed payload')
1103 msg.append(b' streamed payload')
1104 else:
1104 else:
1105 msg.append(b' %i bytes payload' % len(self.data))
1105 msg.append(b' %i bytes payload' % len(self.data))
1106 msg.append(b'\n')
1106 msg.append(b'\n')
1107 ui.debug(b''.join(msg))
1107 ui.debug(b''.join(msg))
1108
1108
1109 #### header
1109 #### header
1110 if self.mandatory:
1110 if self.mandatory:
1111 parttype = self.type.upper()
1111 parttype = self.type.upper()
1112 else:
1112 else:
1113 parttype = self.type.lower()
1113 parttype = self.type.lower()
1114 outdebug(ui, b'part %s: "%s"' % (pycompat.bytestr(self.id), parttype))
1114 outdebug(ui, b'part %s: "%s"' % (pycompat.bytestr(self.id), parttype))
1115 ## parttype
1115 ## parttype
1116 header = [
1116 header = [
1117 _pack(_fparttypesize, len(parttype)),
1117 _pack(_fparttypesize, len(parttype)),
1118 parttype,
1118 parttype,
1119 _pack(_fpartid, self.id),
1119 _pack(_fpartid, self.id),
1120 ]
1120 ]
1121 ## parameters
1121 ## parameters
1122 # count
1122 # count
1123 manpar = self.mandatoryparams
1123 manpar = self.mandatoryparams
1124 advpar = self.advisoryparams
1124 advpar = self.advisoryparams
1125 header.append(_pack(_fpartparamcount, len(manpar), len(advpar)))
1125 header.append(_pack(_fpartparamcount, len(manpar), len(advpar)))
1126 # size
1126 # size
1127 parsizes = []
1127 parsizes = []
1128 for key, value in manpar:
1128 for key, value in manpar:
1129 parsizes.append(len(key))
1129 parsizes.append(len(key))
1130 parsizes.append(len(value))
1130 parsizes.append(len(value))
1131 for key, value in advpar:
1131 for key, value in advpar:
1132 parsizes.append(len(key))
1132 parsizes.append(len(key))
1133 parsizes.append(len(value))
1133 parsizes.append(len(value))
1134 paramsizes = _pack(_makefpartparamsizes(len(parsizes) // 2), *parsizes)
1134 paramsizes = _pack(_makefpartparamsizes(len(parsizes) // 2), *parsizes)
1135 header.append(paramsizes)
1135 header.append(paramsizes)
1136 # key, value
1136 # key, value
1137 for key, value in manpar:
1137 for key, value in manpar:
1138 header.append(key)
1138 header.append(key)
1139 header.append(value)
1139 header.append(value)
1140 for key, value in advpar:
1140 for key, value in advpar:
1141 header.append(key)
1141 header.append(key)
1142 header.append(value)
1142 header.append(value)
1143 ## finalize header
1143 ## finalize header
1144 try:
1144 try:
1145 headerchunk = b''.join(header)
1145 headerchunk = b''.join(header)
1146 except TypeError:
1146 except TypeError:
1147 raise TypeError(
1147 raise TypeError(
1148 'Found a non-bytes trying to '
1148 'Found a non-bytes trying to '
1149 'build bundle part header: %r' % header
1149 'build bundle part header: %r' % header
1150 )
1150 )
1151 outdebug(ui, b'header chunk size: %i' % len(headerchunk))
1151 outdebug(ui, b'header chunk size: %i' % len(headerchunk))
1152 yield _pack(_fpartheadersize, len(headerchunk))
1152 yield _pack(_fpartheadersize, len(headerchunk))
1153 yield headerchunk
1153 yield headerchunk
1154 ## payload
1154 ## payload
1155 try:
1155 try:
1156 for chunk in self._payloadchunks():
1156 for chunk in self._payloadchunks():
1157 outdebug(ui, b'payload chunk size: %i' % len(chunk))
1157 outdebug(ui, b'payload chunk size: %i' % len(chunk))
1158 yield _pack(_fpayloadsize, len(chunk))
1158 yield _pack(_fpayloadsize, len(chunk))
1159 yield chunk
1159 yield chunk
1160 except GeneratorExit:
1160 except GeneratorExit:
1161 # GeneratorExit means that nobody is listening for our
1161 # GeneratorExit means that nobody is listening for our
1162 # results anyway, so just bail quickly rather than trying
1162 # results anyway, so just bail quickly rather than trying
1163 # to produce an error part.
1163 # to produce an error part.
1164 ui.debug(b'bundle2-generatorexit\n')
1164 ui.debug(b'bundle2-generatorexit\n')
1165 raise
1165 raise
1166 except BaseException as exc:
1166 except BaseException as exc:
1167 bexc = stringutil.forcebytestr(exc)
1167 bexc = stringutil.forcebytestr(exc)
1168 # backup exception data for later
1168 # backup exception data for later
1169 ui.debug(
1169 ui.debug(
1170 b'bundle2-input-stream-interrupt: encoding exception %s' % bexc
1170 b'bundle2-input-stream-interrupt: encoding exception %s' % bexc
1171 )
1171 )
1172 tb = sys.exc_info()[2]
1172 tb = sys.exc_info()[2]
1173 msg = b'unexpected error: %s' % bexc
1173 msg = b'unexpected error: %s' % bexc
1174 interpart = bundlepart(
1174 interpart = bundlepart(
1175 b'error:abort', [(b'message', msg)], mandatory=False
1175 b'error:abort', [(b'message', msg)], mandatory=False
1176 )
1176 )
1177 interpart.id = 0
1177 interpart.id = 0
1178 yield _pack(_fpayloadsize, -1)
1178 yield _pack(_fpayloadsize, -1)
1179 for chunk in interpart.getchunks(ui=ui):
1179 for chunk in interpart.getchunks(ui=ui):
1180 yield chunk
1180 yield chunk
1181 outdebug(ui, b'closing payload chunk')
1181 outdebug(ui, b'closing payload chunk')
1182 # abort current part payload
1182 # abort current part payload
1183 yield _pack(_fpayloadsize, 0)
1183 yield _pack(_fpayloadsize, 0)
1184 pycompat.raisewithtb(exc, tb)
1184 pycompat.raisewithtb(exc, tb)
1185 # end of payload
1185 # end of payload
1186 outdebug(ui, b'closing payload chunk')
1186 outdebug(ui, b'closing payload chunk')
1187 yield _pack(_fpayloadsize, 0)
1187 yield _pack(_fpayloadsize, 0)
1188 self._generated = True
1188 self._generated = True
1189
1189
1190 def _payloadchunks(self):
1190 def _payloadchunks(self):
1191 """yield chunks of a the part payload
1191 """yield chunks of a the part payload
1192
1192
1193 Exists to handle the different methods to provide data to a part."""
1193 Exists to handle the different methods to provide data to a part."""
1194 # we only support fixed size data now.
1194 # we only support fixed size data now.
1195 # This will be improved in the future.
1195 # This will be improved in the future.
1196 if util.safehasattr(self.data, 'next') or util.safehasattr(
1196 if util.safehasattr(self.data, 'next') or util.safehasattr(
1197 self.data, b'__next__'
1197 self.data, b'__next__'
1198 ):
1198 ):
1199 buff = util.chunkbuffer(self.data)
1199 buff = util.chunkbuffer(self.data)
1200 chunk = buff.read(preferedchunksize)
1200 chunk = buff.read(preferedchunksize)
1201 while chunk:
1201 while chunk:
1202 yield chunk
1202 yield chunk
1203 chunk = buff.read(preferedchunksize)
1203 chunk = buff.read(preferedchunksize)
1204 elif len(self.data):
1204 elif len(self.data):
1205 yield self.data
1205 yield self.data
1206
1206
1207
1207
1208 flaginterrupt = -1
1208 flaginterrupt = -1
1209
1209
1210
1210
1211 class interrupthandler(unpackermixin):
1211 class interrupthandler(unpackermixin):
1212 """read one part and process it with restricted capability
1212 """read one part and process it with restricted capability
1213
1213
1214 This allows to transmit exception raised on the producer size during part
1214 This allows to transmit exception raised on the producer size during part
1215 iteration while the consumer is reading a part.
1215 iteration while the consumer is reading a part.
1216
1216
1217 Part processed in this manner only have access to a ui object,"""
1217 Part processed in this manner only have access to a ui object,"""
1218
1218
1219 def __init__(self, ui, fp):
1219 def __init__(self, ui, fp):
1220 super(interrupthandler, self).__init__(fp)
1220 super(interrupthandler, self).__init__(fp)
1221 self.ui = ui
1221 self.ui = ui
1222
1222
1223 def _readpartheader(self):
1223 def _readpartheader(self):
1224 """reads a part header size and return the bytes blob
1224 """reads a part header size and return the bytes blob
1225
1225
1226 returns None if empty"""
1226 returns None if empty"""
1227 headersize = self._unpack(_fpartheadersize)[0]
1227 headersize = self._unpack(_fpartheadersize)[0]
1228 if headersize < 0:
1228 if headersize < 0:
1229 raise error.BundleValueError(
1229 raise error.BundleValueError(
1230 b'negative part header size: %i' % headersize
1230 b'negative part header size: %i' % headersize
1231 )
1231 )
1232 indebug(self.ui, b'part header size: %i\n' % headersize)
1232 indebug(self.ui, b'part header size: %i\n' % headersize)
1233 if headersize:
1233 if headersize:
1234 return self._readexact(headersize)
1234 return self._readexact(headersize)
1235 return None
1235 return None
1236
1236
1237 def __call__(self):
1237 def __call__(self):
1238
1238
1239 self.ui.debug(
1239 self.ui.debug(
1240 b'bundle2-input-stream-interrupt: opening out of band context\n'
1240 b'bundle2-input-stream-interrupt: opening out of band context\n'
1241 )
1241 )
1242 indebug(self.ui, b'bundle2 stream interruption, looking for a part.')
1242 indebug(self.ui, b'bundle2 stream interruption, looking for a part.')
1243 headerblock = self._readpartheader()
1243 headerblock = self._readpartheader()
1244 if headerblock is None:
1244 if headerblock is None:
1245 indebug(self.ui, b'no part found during interruption.')
1245 indebug(self.ui, b'no part found during interruption.')
1246 return
1246 return
1247 part = unbundlepart(self.ui, headerblock, self._fp)
1247 part = unbundlepart(self.ui, headerblock, self._fp)
1248 op = interruptoperation(self.ui)
1248 op = interruptoperation(self.ui)
1249 hardabort = False
1249 hardabort = False
1250 try:
1250 try:
1251 _processpart(op, part)
1251 _processpart(op, part)
1252 except (SystemExit, KeyboardInterrupt):
1252 except (SystemExit, KeyboardInterrupt):
1253 hardabort = True
1253 hardabort = True
1254 raise
1254 raise
1255 finally:
1255 finally:
1256 if not hardabort:
1256 if not hardabort:
1257 part.consume()
1257 part.consume()
1258 self.ui.debug(
1258 self.ui.debug(
1259 b'bundle2-input-stream-interrupt: closing out of band context\n'
1259 b'bundle2-input-stream-interrupt: closing out of band context\n'
1260 )
1260 )
1261
1261
1262
1262
1263 class interruptoperation(object):
1263 class interruptoperation(object):
1264 """A limited operation to be use by part handler during interruption
1264 """A limited operation to be use by part handler during interruption
1265
1265
1266 It only have access to an ui object.
1266 It only have access to an ui object.
1267 """
1267 """
1268
1268
1269 def __init__(self, ui):
1269 def __init__(self, ui):
1270 self.ui = ui
1270 self.ui = ui
1271 self.reply = None
1271 self.reply = None
1272 self.captureoutput = False
1272 self.captureoutput = False
1273
1273
1274 @property
1274 @property
1275 def repo(self):
1275 def repo(self):
1276 raise error.ProgrammingError(b'no repo access from stream interruption')
1276 raise error.ProgrammingError(b'no repo access from stream interruption')
1277
1277
1278 def gettransaction(self):
1278 def gettransaction(self):
1279 raise TransactionUnavailable(b'no repo access from stream interruption')
1279 raise TransactionUnavailable(b'no repo access from stream interruption')
1280
1280
1281
1281
1282 def decodepayloadchunks(ui, fh):
1282 def decodepayloadchunks(ui, fh):
1283 """Reads bundle2 part payload data into chunks.
1283 """Reads bundle2 part payload data into chunks.
1284
1284
1285 Part payload data consists of framed chunks. This function takes
1285 Part payload data consists of framed chunks. This function takes
1286 a file handle and emits those chunks.
1286 a file handle and emits those chunks.
1287 """
1287 """
1288 dolog = ui.configbool(b'devel', b'bundle2.debug')
1288 dolog = ui.configbool(b'devel', b'bundle2.debug')
1289 debug = ui.debug
1289 debug = ui.debug
1290
1290
1291 headerstruct = struct.Struct(_fpayloadsize)
1291 headerstruct = struct.Struct(_fpayloadsize)
1292 headersize = headerstruct.size
1292 headersize = headerstruct.size
1293 unpack = headerstruct.unpack
1293 unpack = headerstruct.unpack
1294
1294
1295 readexactly = changegroup.readexactly
1295 readexactly = changegroup.readexactly
1296 read = fh.read
1296 read = fh.read
1297
1297
1298 chunksize = unpack(readexactly(fh, headersize))[0]
1298 chunksize = unpack(readexactly(fh, headersize))[0]
1299 indebug(ui, b'payload chunk size: %i' % chunksize)
1299 indebug(ui, b'payload chunk size: %i' % chunksize)
1300
1300
1301 # changegroup.readexactly() is inlined below for performance.
1301 # changegroup.readexactly() is inlined below for performance.
1302 while chunksize:
1302 while chunksize:
1303 if chunksize >= 0:
1303 if chunksize >= 0:
1304 s = read(chunksize)
1304 s = read(chunksize)
1305 if len(s) < chunksize:
1305 if len(s) < chunksize:
1306 raise error.Abort(
1306 raise error.Abort(
1307 _(
1307 _(
1308 b'stream ended unexpectedly '
1308 b'stream ended unexpectedly '
1309 b' (got %d bytes, expected %d)'
1309 b' (got %d bytes, expected %d)'
1310 )
1310 )
1311 % (len(s), chunksize)
1311 % (len(s), chunksize)
1312 )
1312 )
1313
1313
1314 yield s
1314 yield s
1315 elif chunksize == flaginterrupt:
1315 elif chunksize == flaginterrupt:
1316 # Interrupt "signal" detected. The regular stream is interrupted
1316 # Interrupt "signal" detected. The regular stream is interrupted
1317 # and a bundle2 part follows. Consume it.
1317 # and a bundle2 part follows. Consume it.
1318 interrupthandler(ui, fh)()
1318 interrupthandler(ui, fh)()
1319 else:
1319 else:
1320 raise error.BundleValueError(
1320 raise error.BundleValueError(
1321 b'negative payload chunk size: %s' % chunksize
1321 b'negative payload chunk size: %s' % chunksize
1322 )
1322 )
1323
1323
1324 s = read(headersize)
1324 s = read(headersize)
1325 if len(s) < headersize:
1325 if len(s) < headersize:
1326 raise error.Abort(
1326 raise error.Abort(
1327 _(b'stream ended unexpectedly (got %d bytes, expected %d)')
1327 _(b'stream ended unexpectedly (got %d bytes, expected %d)')
1328 % (len(s), chunksize)
1328 % (len(s), chunksize)
1329 )
1329 )
1330
1330
1331 chunksize = unpack(s)[0]
1331 chunksize = unpack(s)[0]
1332
1332
1333 # indebug() inlined for performance.
1333 # indebug() inlined for performance.
1334 if dolog:
1334 if dolog:
1335 debug(b'bundle2-input: payload chunk size: %i\n' % chunksize)
1335 debug(b'bundle2-input: payload chunk size: %i\n' % chunksize)
1336
1336
1337
1337
1338 class unbundlepart(unpackermixin):
1338 class unbundlepart(unpackermixin):
1339 """a bundle part read from a bundle"""
1339 """a bundle part read from a bundle"""
1340
1340
1341 def __init__(self, ui, header, fp):
1341 def __init__(self, ui, header, fp):
1342 super(unbundlepart, self).__init__(fp)
1342 super(unbundlepart, self).__init__(fp)
1343 self._seekable = util.safehasattr(fp, 'seek') and util.safehasattr(
1343 self._seekable = util.safehasattr(fp, 'seek') and util.safehasattr(
1344 fp, b'tell'
1344 fp, b'tell'
1345 )
1345 )
1346 self.ui = ui
1346 self.ui = ui
1347 # unbundle state attr
1347 # unbundle state attr
1348 self._headerdata = header
1348 self._headerdata = header
1349 self._headeroffset = 0
1349 self._headeroffset = 0
1350 self._initialized = False
1350 self._initialized = False
1351 self.consumed = False
1351 self.consumed = False
1352 # part data
1352 # part data
1353 self.id = None
1353 self.id = None
1354 self.type = None
1354 self.type = None
1355 self.mandatoryparams = None
1355 self.mandatoryparams = None
1356 self.advisoryparams = None
1356 self.advisoryparams = None
1357 self.params = None
1357 self.params = None
1358 self.mandatorykeys = ()
1358 self.mandatorykeys = ()
1359 self._readheader()
1359 self._readheader()
1360 self._mandatory = None
1360 self._mandatory = None
1361 self._pos = 0
1361 self._pos = 0
1362
1362
1363 def _fromheader(self, size):
1363 def _fromheader(self, size):
1364 """return the next <size> byte from the header"""
1364 """return the next <size> byte from the header"""
1365 offset = self._headeroffset
1365 offset = self._headeroffset
1366 data = self._headerdata[offset : (offset + size)]
1366 data = self._headerdata[offset : (offset + size)]
1367 self._headeroffset = offset + size
1367 self._headeroffset = offset + size
1368 return data
1368 return data
1369
1369
1370 def _unpackheader(self, format):
1370 def _unpackheader(self, format):
1371 """read given format from header
1371 """read given format from header
1372
1372
1373 This automatically compute the size of the format to read."""
1373 This automatically compute the size of the format to read."""
1374 data = self._fromheader(struct.calcsize(format))
1374 data = self._fromheader(struct.calcsize(format))
1375 return _unpack(format, data)
1375 return _unpack(format, data)
1376
1376
1377 def _initparams(self, mandatoryparams, advisoryparams):
1377 def _initparams(self, mandatoryparams, advisoryparams):
1378 """internal function to setup all logic related parameters"""
1378 """internal function to setup all logic related parameters"""
1379 # make it read only to prevent people touching it by mistake.
1379 # make it read only to prevent people touching it by mistake.
1380 self.mandatoryparams = tuple(mandatoryparams)
1380 self.mandatoryparams = tuple(mandatoryparams)
1381 self.advisoryparams = tuple(advisoryparams)
1381 self.advisoryparams = tuple(advisoryparams)
1382 # user friendly UI
1382 # user friendly UI
1383 self.params = util.sortdict(self.mandatoryparams)
1383 self.params = util.sortdict(self.mandatoryparams)
1384 self.params.update(self.advisoryparams)
1384 self.params.update(self.advisoryparams)
1385 self.mandatorykeys = frozenset(p[0] for p in mandatoryparams)
1385 self.mandatorykeys = frozenset(p[0] for p in mandatoryparams)
1386
1386
1387 def _readheader(self):
1387 def _readheader(self):
1388 """read the header and setup the object"""
1388 """read the header and setup the object"""
1389 typesize = self._unpackheader(_fparttypesize)[0]
1389 typesize = self._unpackheader(_fparttypesize)[0]
1390 self.type = self._fromheader(typesize)
1390 self.type = self._fromheader(typesize)
1391 indebug(self.ui, b'part type: "%s"' % self.type)
1391 indebug(self.ui, b'part type: "%s"' % self.type)
1392 self.id = self._unpackheader(_fpartid)[0]
1392 self.id = self._unpackheader(_fpartid)[0]
1393 indebug(self.ui, b'part id: "%s"' % pycompat.bytestr(self.id))
1393 indebug(self.ui, b'part id: "%s"' % pycompat.bytestr(self.id))
1394 # extract mandatory bit from type
1394 # extract mandatory bit from type
1395 self.mandatory = self.type != self.type.lower()
1395 self.mandatory = self.type != self.type.lower()
1396 self.type = self.type.lower()
1396 self.type = self.type.lower()
1397 ## reading parameters
1397 ## reading parameters
1398 # param count
1398 # param count
1399 mancount, advcount = self._unpackheader(_fpartparamcount)
1399 mancount, advcount = self._unpackheader(_fpartparamcount)
1400 indebug(self.ui, b'part parameters: %i' % (mancount + advcount))
1400 indebug(self.ui, b'part parameters: %i' % (mancount + advcount))
1401 # param size
1401 # param size
1402 fparamsizes = _makefpartparamsizes(mancount + advcount)
1402 fparamsizes = _makefpartparamsizes(mancount + advcount)
1403 paramsizes = self._unpackheader(fparamsizes)
1403 paramsizes = self._unpackheader(fparamsizes)
1404 # make it a list of couple again
1404 # make it a list of couple again
1405 paramsizes = list(zip(paramsizes[::2], paramsizes[1::2]))
1405 paramsizes = list(zip(paramsizes[::2], paramsizes[1::2]))
1406 # split mandatory from advisory
1406 # split mandatory from advisory
1407 mansizes = paramsizes[:mancount]
1407 mansizes = paramsizes[:mancount]
1408 advsizes = paramsizes[mancount:]
1408 advsizes = paramsizes[mancount:]
1409 # retrieve param value
1409 # retrieve param value
1410 manparams = []
1410 manparams = []
1411 for key, value in mansizes:
1411 for key, value in mansizes:
1412 manparams.append((self._fromheader(key), self._fromheader(value)))
1412 manparams.append((self._fromheader(key), self._fromheader(value)))
1413 advparams = []
1413 advparams = []
1414 for key, value in advsizes:
1414 for key, value in advsizes:
1415 advparams.append((self._fromheader(key), self._fromheader(value)))
1415 advparams.append((self._fromheader(key), self._fromheader(value)))
1416 self._initparams(manparams, advparams)
1416 self._initparams(manparams, advparams)
1417 ## part payload
1417 ## part payload
1418 self._payloadstream = util.chunkbuffer(self._payloadchunks())
1418 self._payloadstream = util.chunkbuffer(self._payloadchunks())
1419 # we read the data, tell it
1419 # we read the data, tell it
1420 self._initialized = True
1420 self._initialized = True
1421
1421
1422 def _payloadchunks(self):
1422 def _payloadchunks(self):
1423 """Generator of decoded chunks in the payload."""
1423 """Generator of decoded chunks in the payload."""
1424 return decodepayloadchunks(self.ui, self._fp)
1424 return decodepayloadchunks(self.ui, self._fp)
1425
1425
1426 def consume(self):
1426 def consume(self):
1427 """Read the part payload until completion.
1427 """Read the part payload until completion.
1428
1428
1429 By consuming the part data, the underlying stream read offset will
1429 By consuming the part data, the underlying stream read offset will
1430 be advanced to the next part (or end of stream).
1430 be advanced to the next part (or end of stream).
1431 """
1431 """
1432 if self.consumed:
1432 if self.consumed:
1433 return
1433 return
1434
1434
1435 chunk = self.read(32768)
1435 chunk = self.read(32768)
1436 while chunk:
1436 while chunk:
1437 self._pos += len(chunk)
1437 self._pos += len(chunk)
1438 chunk = self.read(32768)
1438 chunk = self.read(32768)
1439
1439
1440 def read(self, size=None):
1440 def read(self, size=None):
1441 """read payload data"""
1441 """read payload data"""
1442 if not self._initialized:
1442 if not self._initialized:
1443 self._readheader()
1443 self._readheader()
1444 if size is None:
1444 if size is None:
1445 data = self._payloadstream.read()
1445 data = self._payloadstream.read()
1446 else:
1446 else:
1447 data = self._payloadstream.read(size)
1447 data = self._payloadstream.read(size)
1448 self._pos += len(data)
1448 self._pos += len(data)
1449 if size is None or len(data) < size:
1449 if size is None or len(data) < size:
1450 if not self.consumed and self._pos:
1450 if not self.consumed and self._pos:
1451 self.ui.debug(
1451 self.ui.debug(
1452 b'bundle2-input-part: total payload size %i\n' % self._pos
1452 b'bundle2-input-part: total payload size %i\n' % self._pos
1453 )
1453 )
1454 self.consumed = True
1454 self.consumed = True
1455 return data
1455 return data
1456
1456
1457
1457
1458 class seekableunbundlepart(unbundlepart):
1458 class seekableunbundlepart(unbundlepart):
1459 """A bundle2 part in a bundle that is seekable.
1459 """A bundle2 part in a bundle that is seekable.
1460
1460
1461 Regular ``unbundlepart`` instances can only be read once. This class
1461 Regular ``unbundlepart`` instances can only be read once. This class
1462 extends ``unbundlepart`` to enable bi-directional seeking within the
1462 extends ``unbundlepart`` to enable bi-directional seeking within the
1463 part.
1463 part.
1464
1464
1465 Bundle2 part data consists of framed chunks. Offsets when seeking
1465 Bundle2 part data consists of framed chunks. Offsets when seeking
1466 refer to the decoded data, not the offsets in the underlying bundle2
1466 refer to the decoded data, not the offsets in the underlying bundle2
1467 stream.
1467 stream.
1468
1468
1469 To facilitate quickly seeking within the decoded data, instances of this
1469 To facilitate quickly seeking within the decoded data, instances of this
1470 class maintain a mapping between offsets in the underlying stream and
1470 class maintain a mapping between offsets in the underlying stream and
1471 the decoded payload. This mapping will consume memory in proportion
1471 the decoded payload. This mapping will consume memory in proportion
1472 to the number of chunks within the payload (which almost certainly
1472 to the number of chunks within the payload (which almost certainly
1473 increases in proportion with the size of the part).
1473 increases in proportion with the size of the part).
1474 """
1474 """
1475
1475
1476 def __init__(self, ui, header, fp):
1476 def __init__(self, ui, header, fp):
1477 # (payload, file) offsets for chunk starts.
1477 # (payload, file) offsets for chunk starts.
1478 self._chunkindex = []
1478 self._chunkindex = []
1479
1479
1480 super(seekableunbundlepart, self).__init__(ui, header, fp)
1480 super(seekableunbundlepart, self).__init__(ui, header, fp)
1481
1481
1482 def _payloadchunks(self, chunknum=0):
1482 def _payloadchunks(self, chunknum=0):
1483 '''seek to specified chunk and start yielding data'''
1483 '''seek to specified chunk and start yielding data'''
1484 if len(self._chunkindex) == 0:
1484 if len(self._chunkindex) == 0:
1485 assert chunknum == 0, b'Must start with chunk 0'
1485 assert chunknum == 0, b'Must start with chunk 0'
1486 self._chunkindex.append((0, self._tellfp()))
1486 self._chunkindex.append((0, self._tellfp()))
1487 else:
1487 else:
1488 assert chunknum < len(self._chunkindex), (
1488 assert chunknum < len(self._chunkindex), (
1489 b'Unknown chunk %d' % chunknum
1489 b'Unknown chunk %d' % chunknum
1490 )
1490 )
1491 self._seekfp(self._chunkindex[chunknum][1])
1491 self._seekfp(self._chunkindex[chunknum][1])
1492
1492
1493 pos = self._chunkindex[chunknum][0]
1493 pos = self._chunkindex[chunknum][0]
1494
1494
1495 for chunk in decodepayloadchunks(self.ui, self._fp):
1495 for chunk in decodepayloadchunks(self.ui, self._fp):
1496 chunknum += 1
1496 chunknum += 1
1497 pos += len(chunk)
1497 pos += len(chunk)
1498 if chunknum == len(self._chunkindex):
1498 if chunknum == len(self._chunkindex):
1499 self._chunkindex.append((pos, self._tellfp()))
1499 self._chunkindex.append((pos, self._tellfp()))
1500
1500
1501 yield chunk
1501 yield chunk
1502
1502
1503 def _findchunk(self, pos):
1503 def _findchunk(self, pos):
1504 '''for a given payload position, return a chunk number and offset'''
1504 '''for a given payload position, return a chunk number and offset'''
1505 for chunk, (ppos, fpos) in enumerate(self._chunkindex):
1505 for chunk, (ppos, fpos) in enumerate(self._chunkindex):
1506 if ppos == pos:
1506 if ppos == pos:
1507 return chunk, 0
1507 return chunk, 0
1508 elif ppos > pos:
1508 elif ppos > pos:
1509 return chunk - 1, pos - self._chunkindex[chunk - 1][0]
1509 return chunk - 1, pos - self._chunkindex[chunk - 1][0]
1510 raise ValueError(b'Unknown chunk')
1510 raise ValueError(b'Unknown chunk')
1511
1511
1512 def tell(self):
1512 def tell(self):
1513 return self._pos
1513 return self._pos
1514
1514
1515 def seek(self, offset, whence=os.SEEK_SET):
1515 def seek(self, offset, whence=os.SEEK_SET):
1516 if whence == os.SEEK_SET:
1516 if whence == os.SEEK_SET:
1517 newpos = offset
1517 newpos = offset
1518 elif whence == os.SEEK_CUR:
1518 elif whence == os.SEEK_CUR:
1519 newpos = self._pos + offset
1519 newpos = self._pos + offset
1520 elif whence == os.SEEK_END:
1520 elif whence == os.SEEK_END:
1521 if not self.consumed:
1521 if not self.consumed:
1522 # Can't use self.consume() here because it advances self._pos.
1522 # Can't use self.consume() here because it advances self._pos.
1523 chunk = self.read(32768)
1523 chunk = self.read(32768)
1524 while chunk:
1524 while chunk:
1525 chunk = self.read(32768)
1525 chunk = self.read(32768)
1526 newpos = self._chunkindex[-1][0] - offset
1526 newpos = self._chunkindex[-1][0] - offset
1527 else:
1527 else:
1528 raise ValueError(b'Unknown whence value: %r' % (whence,))
1528 raise ValueError(b'Unknown whence value: %r' % (whence,))
1529
1529
1530 if newpos > self._chunkindex[-1][0] and not self.consumed:
1530 if newpos > self._chunkindex[-1][0] and not self.consumed:
1531 # Can't use self.consume() here because it advances self._pos.
1531 # Can't use self.consume() here because it advances self._pos.
1532 chunk = self.read(32768)
1532 chunk = self.read(32768)
1533 while chunk:
1533 while chunk:
1534 chunk = self.read(32668)
1534 chunk = self.read(32668)
1535
1535
1536 if not 0 <= newpos <= self._chunkindex[-1][0]:
1536 if not 0 <= newpos <= self._chunkindex[-1][0]:
1537 raise ValueError(b'Offset out of range')
1537 raise ValueError(b'Offset out of range')
1538
1538
1539 if self._pos != newpos:
1539 if self._pos != newpos:
1540 chunk, internaloffset = self._findchunk(newpos)
1540 chunk, internaloffset = self._findchunk(newpos)
1541 self._payloadstream = util.chunkbuffer(self._payloadchunks(chunk))
1541 self._payloadstream = util.chunkbuffer(self._payloadchunks(chunk))
1542 adjust = self.read(internaloffset)
1542 adjust = self.read(internaloffset)
1543 if len(adjust) != internaloffset:
1543 if len(adjust) != internaloffset:
1544 raise error.Abort(_(b'Seek failed\n'))
1544 raise error.Abort(_(b'Seek failed\n'))
1545 self._pos = newpos
1545 self._pos = newpos
1546
1546
1547 def _seekfp(self, offset, whence=0):
1547 def _seekfp(self, offset, whence=0):
1548 """move the underlying file pointer
1548 """move the underlying file pointer
1549
1549
1550 This method is meant for internal usage by the bundle2 protocol only.
1550 This method is meant for internal usage by the bundle2 protocol only.
1551 They directly manipulate the low level stream including bundle2 level
1551 They directly manipulate the low level stream including bundle2 level
1552 instruction.
1552 instruction.
1553
1553
1554 Do not use it to implement higher-level logic or methods."""
1554 Do not use it to implement higher-level logic or methods."""
1555 if self._seekable:
1555 if self._seekable:
1556 return self._fp.seek(offset, whence)
1556 return self._fp.seek(offset, whence)
1557 else:
1557 else:
1558 raise NotImplementedError(_(b'File pointer is not seekable'))
1558 raise NotImplementedError(_(b'File pointer is not seekable'))
1559
1559
1560 def _tellfp(self):
1560 def _tellfp(self):
1561 """return the file offset, or None if file is not seekable
1561 """return the file offset, or None if file is not seekable
1562
1562
1563 This method is meant for internal usage by the bundle2 protocol only.
1563 This method is meant for internal usage by the bundle2 protocol only.
1564 They directly manipulate the low level stream including bundle2 level
1564 They directly manipulate the low level stream including bundle2 level
1565 instruction.
1565 instruction.
1566
1566
1567 Do not use it to implement higher-level logic or methods."""
1567 Do not use it to implement higher-level logic or methods."""
1568 if self._seekable:
1568 if self._seekable:
1569 try:
1569 try:
1570 return self._fp.tell()
1570 return self._fp.tell()
1571 except IOError as e:
1571 except IOError as e:
1572 if e.errno == errno.ESPIPE:
1572 if e.errno == errno.ESPIPE:
1573 self._seekable = False
1573 self._seekable = False
1574 else:
1574 else:
1575 raise
1575 raise
1576 return None
1576 return None
1577
1577
1578
1578
1579 # These are only the static capabilities.
1579 # These are only the static capabilities.
1580 # Check the 'getrepocaps' function for the rest.
1580 # Check the 'getrepocaps' function for the rest.
1581 capabilities = {
1581 capabilities = {
1582 b'HG20': (),
1582 b'HG20': (),
1583 b'bookmarks': (),
1583 b'bookmarks': (),
1584 b'error': (b'abort', b'unsupportedcontent', b'pushraced', b'pushkey'),
1584 b'error': (b'abort', b'unsupportedcontent', b'pushraced', b'pushkey'),
1585 b'listkeys': (),
1585 b'listkeys': (),
1586 b'pushkey': (),
1586 b'pushkey': (),
1587 b'digests': tuple(sorted(util.DIGESTS.keys())),
1587 b'digests': tuple(sorted(util.DIGESTS.keys())),
1588 b'remote-changegroup': (b'http', b'https'),
1588 b'remote-changegroup': (b'http', b'https'),
1589 b'hgtagsfnodes': (),
1589 b'hgtagsfnodes': (),
1590 b'rev-branch-cache': (),
1590 b'rev-branch-cache': (),
1591 b'phases': (b'heads',),
1591 b'phases': (b'heads',),
1592 b'stream': (b'v2',),
1592 b'stream': (b'v2',),
1593 }
1593 }
1594
1594
1595
1595
1596 def getrepocaps(repo, allowpushback=False, role=None):
1596 def getrepocaps(repo, allowpushback=False, role=None):
1597 """return the bundle2 capabilities for a given repo
1597 """return the bundle2 capabilities for a given repo
1598
1598
1599 Exists to allow extensions (like evolution) to mutate the capabilities.
1599 Exists to allow extensions (like evolution) to mutate the capabilities.
1600
1600
1601 The returned value is used for servers advertising their capabilities as
1601 The returned value is used for servers advertising their capabilities as
1602 well as clients advertising their capabilities to servers as part of
1602 well as clients advertising their capabilities to servers as part of
1603 bundle2 requests. The ``role`` argument specifies which is which.
1603 bundle2 requests. The ``role`` argument specifies which is which.
1604 """
1604 """
1605 if role not in (b'client', b'server'):
1605 if role not in (b'client', b'server'):
1606 raise error.ProgrammingError(b'role argument must be client or server')
1606 raise error.ProgrammingError(b'role argument must be client or server')
1607
1607
1608 caps = capabilities.copy()
1608 caps = capabilities.copy()
1609 caps[b'changegroup'] = tuple(
1609 caps[b'changegroup'] = tuple(
1610 sorted(changegroup.supportedincomingversions(repo))
1610 sorted(changegroup.supportedincomingversions(repo))
1611 )
1611 )
1612 if obsolete.isenabled(repo, obsolete.exchangeopt):
1612 if obsolete.isenabled(repo, obsolete.exchangeopt):
1613 supportedformat = tuple(b'V%i' % v for v in obsolete.formats)
1613 supportedformat = tuple(b'V%i' % v for v in obsolete.formats)
1614 caps[b'obsmarkers'] = supportedformat
1614 caps[b'obsmarkers'] = supportedformat
1615 if allowpushback:
1615 if allowpushback:
1616 caps[b'pushback'] = ()
1616 caps[b'pushback'] = ()
1617 cpmode = repo.ui.config(b'server', b'concurrent-push-mode')
1617 cpmode = repo.ui.config(b'server', b'concurrent-push-mode')
1618 if cpmode == b'check-related':
1618 if cpmode == b'check-related':
1619 caps[b'checkheads'] = (b'related',)
1619 caps[b'checkheads'] = (b'related',)
1620 if b'phases' in repo.ui.configlist(b'devel', b'legacy.exchange'):
1620 if b'phases' in repo.ui.configlist(b'devel', b'legacy.exchange'):
1621 caps.pop(b'phases')
1621 caps.pop(b'phases')
1622
1622
1623 # Don't advertise stream clone support in server mode if not configured.
1623 # Don't advertise stream clone support in server mode if not configured.
1624 if role == b'server':
1624 if role == b'server':
1625 streamsupported = repo.ui.configbool(
1625 streamsupported = repo.ui.configbool(
1626 b'server', b'uncompressed', untrusted=True
1626 b'server', b'uncompressed', untrusted=True
1627 )
1627 )
1628 featuresupported = repo.ui.configbool(b'server', b'bundle2.stream')
1628 featuresupported = repo.ui.configbool(b'server', b'bundle2.stream')
1629
1629
1630 if not streamsupported or not featuresupported:
1630 if not streamsupported or not featuresupported:
1631 caps.pop(b'stream')
1631 caps.pop(b'stream')
1632 # Else always advertise support on client, because payload support
1632 # Else always advertise support on client, because payload support
1633 # should always be advertised.
1633 # should always be advertised.
1634
1634
1635 return caps
1635 return caps
1636
1636
1637
1637
1638 def bundle2caps(remote):
1638 def bundle2caps(remote):
1639 """return the bundle capabilities of a peer as dict"""
1639 """return the bundle capabilities of a peer as dict"""
1640 raw = remote.capable(b'bundle2')
1640 raw = remote.capable(b'bundle2')
1641 if not raw and raw != b'':
1641 if not raw and raw != b'':
1642 return {}
1642 return {}
1643 capsblob = urlreq.unquote(remote.capable(b'bundle2'))
1643 capsblob = urlreq.unquote(remote.capable(b'bundle2'))
1644 return decodecaps(capsblob)
1644 return decodecaps(capsblob)
1645
1645
1646
1646
1647 def obsmarkersversion(caps):
1647 def obsmarkersversion(caps):
1648 """extract the list of supported obsmarkers versions from a bundle2caps dict
1648 """extract the list of supported obsmarkers versions from a bundle2caps dict
1649 """
1649 """
1650 obscaps = caps.get(b'obsmarkers', ())
1650 obscaps = caps.get(b'obsmarkers', ())
1651 return [int(c[1:]) for c in obscaps if c.startswith(b'V')]
1651 return [int(c[1:]) for c in obscaps if c.startswith(b'V')]
1652
1652
1653
1653
1654 def writenewbundle(
1654 def writenewbundle(
1655 ui,
1655 ui,
1656 repo,
1656 repo,
1657 source,
1657 source,
1658 filename,
1658 filename,
1659 bundletype,
1659 bundletype,
1660 outgoing,
1660 outgoing,
1661 opts,
1661 opts,
1662 vfs=None,
1662 vfs=None,
1663 compression=None,
1663 compression=None,
1664 compopts=None,
1664 compopts=None,
1665 ):
1665 ):
1666 if bundletype.startswith(b'HG10'):
1666 if bundletype.startswith(b'HG10'):
1667 cg = changegroup.makechangegroup(repo, outgoing, b'01', source)
1667 cg = changegroup.makechangegroup(repo, outgoing, b'01', source)
1668 return writebundle(
1668 return writebundle(
1669 ui,
1669 ui,
1670 cg,
1670 cg,
1671 filename,
1671 filename,
1672 bundletype,
1672 bundletype,
1673 vfs=vfs,
1673 vfs=vfs,
1674 compression=compression,
1674 compression=compression,
1675 compopts=compopts,
1675 compopts=compopts,
1676 )
1676 )
1677 elif not bundletype.startswith(b'HG20'):
1677 elif not bundletype.startswith(b'HG20'):
1678 raise error.ProgrammingError(b'unknown bundle type: %s' % bundletype)
1678 raise error.ProgrammingError(b'unknown bundle type: %s' % bundletype)
1679
1679
1680 caps = {}
1680 caps = {}
1681 if b'obsolescence' in opts:
1681 if b'obsolescence' in opts:
1682 caps[b'obsmarkers'] = (b'V1',)
1682 caps[b'obsmarkers'] = (b'V1',)
1683 bundle = bundle20(ui, caps)
1683 bundle = bundle20(ui, caps)
1684 bundle.setcompression(compression, compopts)
1684 bundle.setcompression(compression, compopts)
1685 _addpartsfromopts(ui, repo, bundle, source, outgoing, opts)
1685 _addpartsfromopts(ui, repo, bundle, source, outgoing, opts)
1686 chunkiter = bundle.getchunks()
1686 chunkiter = bundle.getchunks()
1687
1687
1688 return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
1688 return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
1689
1689
1690
1690
1691 def _addpartsfromopts(ui, repo, bundler, source, outgoing, opts):
1691 def _addpartsfromopts(ui, repo, bundler, source, outgoing, opts):
1692 # We should eventually reconcile this logic with the one behind
1692 # We should eventually reconcile this logic with the one behind
1693 # 'exchange.getbundle2partsgenerator'.
1693 # 'exchange.getbundle2partsgenerator'.
1694 #
1694 #
1695 # The type of input from 'getbundle' and 'writenewbundle' are a bit
1695 # The type of input from 'getbundle' and 'writenewbundle' are a bit
1696 # different right now. So we keep them separated for now for the sake of
1696 # different right now. So we keep them separated for now for the sake of
1697 # simplicity.
1697 # simplicity.
1698
1698
1699 # we might not always want a changegroup in such bundle, for example in
1699 # we might not always want a changegroup in such bundle, for example in
1700 # stream bundles
1700 # stream bundles
1701 if opts.get(b'changegroup', True):
1701 if opts.get(b'changegroup', True):
1702 cgversion = opts.get(b'cg.version')
1702 cgversion = opts.get(b'cg.version')
1703 if cgversion is None:
1703 if cgversion is None:
1704 cgversion = changegroup.safeversion(repo)
1704 cgversion = changegroup.safeversion(repo)
1705 cg = changegroup.makechangegroup(repo, outgoing, cgversion, source)
1705 cg = changegroup.makechangegroup(repo, outgoing, cgversion, source)
1706 part = bundler.newpart(b'changegroup', data=cg.getchunks())
1706 part = bundler.newpart(b'changegroup', data=cg.getchunks())
1707 part.addparam(b'version', cg.version)
1707 part.addparam(b'version', cg.version)
1708 if b'clcount' in cg.extras:
1708 if b'clcount' in cg.extras:
1709 part.addparam(
1709 part.addparam(
1710 b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
1710 b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
1711 )
1711 )
1712 if opts.get(b'phases') and repo.revs(
1712 if opts.get(b'phases') and repo.revs(
1713 b'%ln and secret()', outgoing.missingheads
1713 b'%ln and secret()', outgoing.missingheads
1714 ):
1714 ):
1715 part.addparam(
1715 part.addparam(
1716 b'targetphase', b'%d' % phases.secret, mandatory=False
1716 b'targetphase', b'%d' % phases.secret, mandatory=False
1717 )
1717 )
1718 if b'exp-sidedata-flag' in repo.requirements:
1718 if b'exp-sidedata-flag' in repo.requirements:
1719 part.addparam(b'exp-sidedata', b'1')
1719 part.addparam(b'exp-sidedata', b'1')
1720
1720
1721 if opts.get(b'streamv2', False):
1721 if opts.get(b'streamv2', False):
1722 addpartbundlestream2(bundler, repo, stream=True)
1722 addpartbundlestream2(bundler, repo, stream=True)
1723
1723
1724 if opts.get(b'tagsfnodescache', True):
1724 if opts.get(b'tagsfnodescache', True):
1725 addparttagsfnodescache(repo, bundler, outgoing)
1725 addparttagsfnodescache(repo, bundler, outgoing)
1726
1726
1727 if opts.get(b'revbranchcache', True):
1727 if opts.get(b'revbranchcache', True):
1728 addpartrevbranchcache(repo, bundler, outgoing)
1728 addpartrevbranchcache(repo, bundler, outgoing)
1729
1729
1730 if opts.get(b'obsolescence', False):
1730 if opts.get(b'obsolescence', False):
1731 obsmarkers = repo.obsstore.relevantmarkers(outgoing.missing)
1731 obsmarkers = repo.obsstore.relevantmarkers(outgoing.missing)
1732 buildobsmarkerspart(bundler, obsmarkers)
1732 buildobsmarkerspart(bundler, obsmarkers)
1733
1733
1734 if opts.get(b'phases', False):
1734 if opts.get(b'phases', False):
1735 headsbyphase = phases.subsetphaseheads(repo, outgoing.missing)
1735 headsbyphase = phases.subsetphaseheads(repo, outgoing.missing)
1736 phasedata = phases.binaryencode(headsbyphase)
1736 phasedata = phases.binaryencode(headsbyphase)
1737 bundler.newpart(b'phase-heads', data=phasedata)
1737 bundler.newpart(b'phase-heads', data=phasedata)
1738
1738
1739
1739
1740 def addparttagsfnodescache(repo, bundler, outgoing):
1740 def addparttagsfnodescache(repo, bundler, outgoing):
1741 # we include the tags fnode cache for the bundle changeset
1741 # we include the tags fnode cache for the bundle changeset
1742 # (as an optional parts)
1742 # (as an optional parts)
1743 cache = tags.hgtagsfnodescache(repo.unfiltered())
1743 cache = tags.hgtagsfnodescache(repo.unfiltered())
1744 chunks = []
1744 chunks = []
1745
1745
1746 # .hgtags fnodes are only relevant for head changesets. While we could
1746 # .hgtags fnodes are only relevant for head changesets. While we could
1747 # transfer values for all known nodes, there will likely be little to
1747 # transfer values for all known nodes, there will likely be little to
1748 # no benefit.
1748 # no benefit.
1749 #
1749 #
1750 # We don't bother using a generator to produce output data because
1750 # We don't bother using a generator to produce output data because
1751 # a) we only have 40 bytes per head and even esoteric numbers of heads
1751 # a) we only have 40 bytes per head and even esoteric numbers of heads
1752 # consume little memory (1M heads is 40MB) b) we don't want to send the
1752 # consume little memory (1M heads is 40MB) b) we don't want to send the
1753 # part if we don't have entries and knowing if we have entries requires
1753 # part if we don't have entries and knowing if we have entries requires
1754 # cache lookups.
1754 # cache lookups.
1755 for node in outgoing.missingheads:
1755 for node in outgoing.missingheads:
1756 # Don't compute missing, as this may slow down serving.
1756 # Don't compute missing, as this may slow down serving.
1757 fnode = cache.getfnode(node, computemissing=False)
1757 fnode = cache.getfnode(node, computemissing=False)
1758 if fnode is not None:
1758 if fnode is not None:
1759 chunks.extend([node, fnode])
1759 chunks.extend([node, fnode])
1760
1760
1761 if chunks:
1761 if chunks:
1762 bundler.newpart(b'hgtagsfnodes', data=b''.join(chunks))
1762 bundler.newpart(b'hgtagsfnodes', data=b''.join(chunks))
1763
1763
1764
1764
1765 def addpartrevbranchcache(repo, bundler, outgoing):
1765 def addpartrevbranchcache(repo, bundler, outgoing):
1766 # we include the rev branch cache for the bundle changeset
1766 # we include the rev branch cache for the bundle changeset
1767 # (as an optional parts)
1767 # (as an optional parts)
1768 cache = repo.revbranchcache()
1768 cache = repo.revbranchcache()
1769 cl = repo.unfiltered().changelog
1769 cl = repo.unfiltered().changelog
1770 branchesdata = collections.defaultdict(lambda: (set(), set()))
1770 branchesdata = collections.defaultdict(lambda: (set(), set()))
1771 for node in outgoing.missing:
1771 for node in outgoing.missing:
1772 branch, close = cache.branchinfo(cl.rev(node))
1772 branch, close = cache.branchinfo(cl.rev(node))
1773 branchesdata[branch][close].add(node)
1773 branchesdata[branch][close].add(node)
1774
1774
1775 def generate():
1775 def generate():
1776 for branch, (nodes, closed) in sorted(branchesdata.items()):
1776 for branch, (nodes, closed) in sorted(branchesdata.items()):
1777 utf8branch = encoding.fromlocal(branch)
1777 utf8branch = encoding.fromlocal(branch)
1778 yield rbcstruct.pack(len(utf8branch), len(nodes), len(closed))
1778 yield rbcstruct.pack(len(utf8branch), len(nodes), len(closed))
1779 yield utf8branch
1779 yield utf8branch
1780 for n in sorted(nodes):
1780 for n in sorted(nodes):
1781 yield n
1781 yield n
1782 for n in sorted(closed):
1782 for n in sorted(closed):
1783 yield n
1783 yield n
1784
1784
1785 bundler.newpart(b'cache:rev-branch-cache', data=generate(), mandatory=False)
1785 bundler.newpart(b'cache:rev-branch-cache', data=generate(), mandatory=False)
1786
1786
1787
1787
1788 def _formatrequirementsspec(requirements):
1788 def _formatrequirementsspec(requirements):
1789 requirements = [req for req in requirements if req != b"shared"]
1789 requirements = [req for req in requirements if req != b"shared"]
1790 return urlreq.quote(b','.join(sorted(requirements)))
1790 return urlreq.quote(b','.join(sorted(requirements)))
1791
1791
1792
1792
1793 def _formatrequirementsparams(requirements):
1793 def _formatrequirementsparams(requirements):
1794 requirements = _formatrequirementsspec(requirements)
1794 requirements = _formatrequirementsspec(requirements)
1795 params = b"%s%s" % (urlreq.quote(b"requirements="), requirements)
1795 params = b"%s%s" % (urlreq.quote(b"requirements="), requirements)
1796 return params
1796 return params
1797
1797
1798
1798
1799 def addpartbundlestream2(bundler, repo, **kwargs):
1799 def addpartbundlestream2(bundler, repo, **kwargs):
1800 if not kwargs.get('stream', False):
1800 if not kwargs.get('stream', False):
1801 return
1801 return
1802
1802
1803 if not streamclone.allowservergeneration(repo):
1803 if not streamclone.allowservergeneration(repo):
1804 raise error.Abort(
1804 raise error.Abort(
1805 _(
1805 _(
1806 b'stream data requested but server does not allow '
1806 b'stream data requested but server does not allow '
1807 b'this feature'
1807 b'this feature'
1808 ),
1808 ),
1809 hint=_(
1809 hint=_(
1810 b'well-behaved clients should not be '
1810 b'well-behaved clients should not be '
1811 b'requesting stream data from servers not '
1811 b'requesting stream data from servers not '
1812 b'advertising it; the client may be buggy'
1812 b'advertising it; the client may be buggy'
1813 ),
1813 ),
1814 )
1814 )
1815
1815
1816 # Stream clones don't compress well. And compression undermines a
1816 # Stream clones don't compress well. And compression undermines a
1817 # goal of stream clones, which is to be fast. Communicate the desire
1817 # goal of stream clones, which is to be fast. Communicate the desire
1818 # to avoid compression to consumers of the bundle.
1818 # to avoid compression to consumers of the bundle.
1819 bundler.prefercompressed = False
1819 bundler.prefercompressed = False
1820
1820
1821 # get the includes and excludes
1821 # get the includes and excludes
1822 includepats = kwargs.get('includepats')
1822 includepats = kwargs.get('includepats')
1823 excludepats = kwargs.get('excludepats')
1823 excludepats = kwargs.get('excludepats')
1824
1824
1825 narrowstream = repo.ui.configbool(
1825 narrowstream = repo.ui.configbool(
1826 b'experimental', b'server.stream-narrow-clones'
1826 b'experimental', b'server.stream-narrow-clones'
1827 )
1827 )
1828
1828
1829 if (includepats or excludepats) and not narrowstream:
1829 if (includepats or excludepats) and not narrowstream:
1830 raise error.Abort(_(b'server does not support narrow stream clones'))
1830 raise error.Abort(_(b'server does not support narrow stream clones'))
1831
1831
1832 includeobsmarkers = False
1832 includeobsmarkers = False
1833 if repo.obsstore:
1833 if repo.obsstore:
1834 remoteversions = obsmarkersversion(bundler.capabilities)
1834 remoteversions = obsmarkersversion(bundler.capabilities)
1835 if not remoteversions:
1835 if not remoteversions:
1836 raise error.Abort(
1836 raise error.Abort(
1837 _(
1837 _(
1838 b'server has obsolescence markers, but client '
1838 b'server has obsolescence markers, but client '
1839 b'cannot receive them via stream clone'
1839 b'cannot receive them via stream clone'
1840 )
1840 )
1841 )
1841 )
1842 elif repo.obsstore._version in remoteversions:
1842 elif repo.obsstore._version in remoteversions:
1843 includeobsmarkers = True
1843 includeobsmarkers = True
1844
1844
1845 filecount, bytecount, it = streamclone.generatev2(
1845 filecount, bytecount, it = streamclone.generatev2(
1846 repo, includepats, excludepats, includeobsmarkers
1846 repo, includepats, excludepats, includeobsmarkers
1847 )
1847 )
1848 requirements = _formatrequirementsspec(repo.requirements)
1848 requirements = _formatrequirementsspec(repo.requirements)
1849 part = bundler.newpart(b'stream2', data=it)
1849 part = bundler.newpart(b'stream2', data=it)
1850 part.addparam(b'bytecount', b'%d' % bytecount, mandatory=True)
1850 part.addparam(b'bytecount', b'%d' % bytecount, mandatory=True)
1851 part.addparam(b'filecount', b'%d' % filecount, mandatory=True)
1851 part.addparam(b'filecount', b'%d' % filecount, mandatory=True)
1852 part.addparam(b'requirements', requirements, mandatory=True)
1852 part.addparam(b'requirements', requirements, mandatory=True)
1853
1853
1854
1854
1855 def buildobsmarkerspart(bundler, markers):
1855 def buildobsmarkerspart(bundler, markers):
1856 """add an obsmarker part to the bundler with <markers>
1856 """add an obsmarker part to the bundler with <markers>
1857
1857
1858 No part is created if markers is empty.
1858 No part is created if markers is empty.
1859 Raises ValueError if the bundler doesn't support any known obsmarker format.
1859 Raises ValueError if the bundler doesn't support any known obsmarker format.
1860 """
1860 """
1861 if not markers:
1861 if not markers:
1862 return None
1862 return None
1863
1863
1864 remoteversions = obsmarkersversion(bundler.capabilities)
1864 remoteversions = obsmarkersversion(bundler.capabilities)
1865 version = obsolete.commonversion(remoteversions)
1865 version = obsolete.commonversion(remoteversions)
1866 if version is None:
1866 if version is None:
1867 raise ValueError(b'bundler does not support common obsmarker format')
1867 raise ValueError(b'bundler does not support common obsmarker format')
1868 stream = obsolete.encodemarkers(markers, True, version=version)
1868 stream = obsolete.encodemarkers(markers, True, version=version)
1869 return bundler.newpart(b'obsmarkers', data=stream)
1869 return bundler.newpart(b'obsmarkers', data=stream)
1870
1870
1871
1871
1872 def writebundle(
1872 def writebundle(
1873 ui, cg, filename, bundletype, vfs=None, compression=None, compopts=None
1873 ui, cg, filename, bundletype, vfs=None, compression=None, compopts=None
1874 ):
1874 ):
1875 """Write a bundle file and return its filename.
1875 """Write a bundle file and return its filename.
1876
1876
1877 Existing files will not be overwritten.
1877 Existing files will not be overwritten.
1878 If no filename is specified, a temporary file is created.
1878 If no filename is specified, a temporary file is created.
1879 bz2 compression can be turned off.
1879 bz2 compression can be turned off.
1880 The bundle file will be deleted in case of errors.
1880 The bundle file will be deleted in case of errors.
1881 """
1881 """
1882
1882
1883 if bundletype == b"HG20":
1883 if bundletype == b"HG20":
1884 bundle = bundle20(ui)
1884 bundle = bundle20(ui)
1885 bundle.setcompression(compression, compopts)
1885 bundle.setcompression(compression, compopts)
1886 part = bundle.newpart(b'changegroup', data=cg.getchunks())
1886 part = bundle.newpart(b'changegroup', data=cg.getchunks())
1887 part.addparam(b'version', cg.version)
1887 part.addparam(b'version', cg.version)
1888 if b'clcount' in cg.extras:
1888 if b'clcount' in cg.extras:
1889 part.addparam(
1889 part.addparam(
1890 b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
1890 b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
1891 )
1891 )
1892 chunkiter = bundle.getchunks()
1892 chunkiter = bundle.getchunks()
1893 else:
1893 else:
1894 # compression argument is only for the bundle2 case
1894 # compression argument is only for the bundle2 case
1895 assert compression is None
1895 assert compression is None
1896 if cg.version != b'01':
1896 if cg.version != b'01':
1897 raise error.Abort(
1897 raise error.Abort(
1898 _(b'old bundle types only supports v1 changegroups')
1898 _(b'old bundle types only supports v1 changegroups')
1899 )
1899 )
1900 header, comp = bundletypes[bundletype]
1900 header, comp = bundletypes[bundletype]
1901 if comp not in util.compengines.supportedbundletypes:
1901 if comp not in util.compengines.supportedbundletypes:
1902 raise error.Abort(_(b'unknown stream compression type: %s') % comp)
1902 raise error.Abort(_(b'unknown stream compression type: %s') % comp)
1903 compengine = util.compengines.forbundletype(comp)
1903 compengine = util.compengines.forbundletype(comp)
1904
1904
1905 def chunkiter():
1905 def chunkiter():
1906 yield header
1906 yield header
1907 for chunk in compengine.compressstream(cg.getchunks(), compopts):
1907 for chunk in compengine.compressstream(cg.getchunks(), compopts):
1908 yield chunk
1908 yield chunk
1909
1909
1910 chunkiter = chunkiter()
1910 chunkiter = chunkiter()
1911
1911
1912 # parse the changegroup data, otherwise we will block
1912 # parse the changegroup data, otherwise we will block
1913 # in case of sshrepo because we don't know the end of the stream
1913 # in case of sshrepo because we don't know the end of the stream
1914 return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
1914 return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
1915
1915
1916
1916
1917 def combinechangegroupresults(op):
1917 def combinechangegroupresults(op):
1918 """logic to combine 0 or more addchangegroup results into one"""
1918 """logic to combine 0 or more addchangegroup results into one"""
1919 results = [r.get(b'return', 0) for r in op.records[b'changegroup']]
1919 results = [r.get(b'return', 0) for r in op.records[b'changegroup']]
1920 changedheads = 0
1920 changedheads = 0
1921 result = 1
1921 result = 1
1922 for ret in results:
1922 for ret in results:
1923 # If any changegroup result is 0, return 0
1923 # If any changegroup result is 0, return 0
1924 if ret == 0:
1924 if ret == 0:
1925 result = 0
1925 result = 0
1926 break
1926 break
1927 if ret < -1:
1927 if ret < -1:
1928 changedheads += ret + 1
1928 changedheads += ret + 1
1929 elif ret > 1:
1929 elif ret > 1:
1930 changedheads += ret - 1
1930 changedheads += ret - 1
1931 if changedheads > 0:
1931 if changedheads > 0:
1932 result = 1 + changedheads
1932 result = 1 + changedheads
1933 elif changedheads < 0:
1933 elif changedheads < 0:
1934 result = -1 + changedheads
1934 result = -1 + changedheads
1935 return result
1935 return result
1936
1936
1937
1937
1938 @parthandler(
1938 @parthandler(
1939 b'changegroup',
1939 b'changegroup',
1940 (
1940 (
1941 b'version',
1941 b'version',
1942 b'nbchanges',
1942 b'nbchanges',
1943 b'exp-sidedata',
1943 b'exp-sidedata',
1944 b'treemanifest',
1944 b'treemanifest',
1945 b'targetphase',
1945 b'targetphase',
1946 ),
1946 ),
1947 )
1947 )
1948 def handlechangegroup(op, inpart):
1948 def handlechangegroup(op, inpart):
1949 """apply a changegroup part on the repo
1949 """apply a changegroup part on the repo
1950
1950
1951 This is a very early implementation that will massive rework before being
1951 This is a very early implementation that will massive rework before being
1952 inflicted to any end-user.
1952 inflicted to any end-user.
1953 """
1953 """
1954 from . import localrepo
1954 from . import localrepo
1955
1955
1956 tr = op.gettransaction()
1956 tr = op.gettransaction()
1957 unpackerversion = inpart.params.get(b'version', b'01')
1957 unpackerversion = inpart.params.get(b'version', b'01')
1958 # We should raise an appropriate exception here
1958 # We should raise an appropriate exception here
1959 cg = changegroup.getunbundler(unpackerversion, inpart, None)
1959 cg = changegroup.getunbundler(unpackerversion, inpart, None)
1960 # the source and url passed here are overwritten by the one contained in
1960 # the source and url passed here are overwritten by the one contained in
1961 # the transaction.hookargs argument. So 'bundle2' is a placeholder
1961 # the transaction.hookargs argument. So 'bundle2' is a placeholder
1962 nbchangesets = None
1962 nbchangesets = None
1963 if b'nbchanges' in inpart.params:
1963 if b'nbchanges' in inpart.params:
1964 nbchangesets = int(inpart.params.get(b'nbchanges'))
1964 nbchangesets = int(inpart.params.get(b'nbchanges'))
1965 if (
1965 if (
1966 b'treemanifest' in inpart.params
1966 b'treemanifest' in inpart.params
1967 and b'treemanifest' not in op.repo.requirements
1967 and b'treemanifest' not in op.repo.requirements
1968 ):
1968 ):
1969 if len(op.repo.changelog) != 0:
1969 if len(op.repo.changelog) != 0:
1970 raise error.Abort(
1970 raise error.Abort(
1971 _(
1971 _(
1972 b"bundle contains tree manifests, but local repo is "
1972 b"bundle contains tree manifests, but local repo is "
1973 b"non-empty and does not use tree manifests"
1973 b"non-empty and does not use tree manifests"
1974 )
1974 )
1975 )
1975 )
1976 op.repo.requirements.add(b'treemanifest')
1976 op.repo.requirements.add(b'treemanifest')
1977 op.repo.svfs.options = localrepo.resolvestorevfsoptions(
1977 op.repo.svfs.options = localrepo.resolvestorevfsoptions(
1978 op.repo.ui, op.repo.requirements, op.repo.features
1978 op.repo.ui, op.repo.requirements, op.repo.features
1979 )
1979 )
1980 op.repo._writerequirements()
1980 op.repo._writerequirements()
1981
1981
1982 bundlesidedata = bool(b'exp-sidedata' in inpart.params)
1982 bundlesidedata = bool(b'exp-sidedata' in inpart.params)
1983 reposidedata = bool(b'exp-sidedata-flag' in op.repo.requirements)
1983 reposidedata = bool(b'exp-sidedata-flag' in op.repo.requirements)
1984 if reposidedata and not bundlesidedata:
1984 if reposidedata and not bundlesidedata:
1985 msg = b"repository is using sidedata but the bundle source do not"
1985 msg = b"repository is using sidedata but the bundle source do not"
1986 hint = b'this is currently unsupported'
1986 hint = b'this is currently unsupported'
1987 raise error.Abort(msg, hint=hint)
1987 raise error.Abort(msg, hint=hint)
1988
1988
1989 extrakwargs = {}
1989 extrakwargs = {}
1990 targetphase = inpart.params.get(b'targetphase')
1990 targetphase = inpart.params.get(b'targetphase')
1991 if targetphase is not None:
1991 if targetphase is not None:
1992 extrakwargs['targetphase'] = int(targetphase)
1992 extrakwargs['targetphase'] = int(targetphase)
1993 ret = _processchangegroup(
1993 ret = _processchangegroup(
1994 op,
1994 op,
1995 cg,
1995 cg,
1996 tr,
1996 tr,
1997 b'bundle2',
1997 b'bundle2',
1998 b'bundle2',
1998 b'bundle2',
1999 expectedtotal=nbchangesets,
1999 expectedtotal=nbchangesets,
2000 **extrakwargs
2000 **extrakwargs
2001 )
2001 )
2002 if op.reply is not None:
2002 if op.reply is not None:
2003 # This is definitely not the final form of this
2003 # This is definitely not the final form of this
2004 # return. But one need to start somewhere.
2004 # return. But one need to start somewhere.
2005 part = op.reply.newpart(b'reply:changegroup', mandatory=False)
2005 part = op.reply.newpart(b'reply:changegroup', mandatory=False)
2006 part.addparam(
2006 part.addparam(
2007 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2007 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2008 )
2008 )
2009 part.addparam(b'return', b'%i' % ret, mandatory=False)
2009 part.addparam(b'return', b'%i' % ret, mandatory=False)
2010 assert not inpart.read()
2010 assert not inpart.read()
2011
2011
2012
2012
2013 _remotechangegroupparams = tuple(
2013 _remotechangegroupparams = tuple(
2014 [b'url', b'size', b'digests']
2014 [b'url', b'size', b'digests']
2015 + [b'digest:%s' % k for k in util.DIGESTS.keys()]
2015 + [b'digest:%s' % k for k in util.DIGESTS.keys()]
2016 )
2016 )
2017
2017
2018
2018
2019 @parthandler(b'remote-changegroup', _remotechangegroupparams)
2019 @parthandler(b'remote-changegroup', _remotechangegroupparams)
2020 def handleremotechangegroup(op, inpart):
2020 def handleremotechangegroup(op, inpart):
2021 """apply a bundle10 on the repo, given an url and validation information
2021 """apply a bundle10 on the repo, given an url and validation information
2022
2022
2023 All the information about the remote bundle to import are given as
2023 All the information about the remote bundle to import are given as
2024 parameters. The parameters include:
2024 parameters. The parameters include:
2025 - url: the url to the bundle10.
2025 - url: the url to the bundle10.
2026 - size: the bundle10 file size. It is used to validate what was
2026 - size: the bundle10 file size. It is used to validate what was
2027 retrieved by the client matches the server knowledge about the bundle.
2027 retrieved by the client matches the server knowledge about the bundle.
2028 - digests: a space separated list of the digest types provided as
2028 - digests: a space separated list of the digest types provided as
2029 parameters.
2029 parameters.
2030 - digest:<digest-type>: the hexadecimal representation of the digest with
2030 - digest:<digest-type>: the hexadecimal representation of the digest with
2031 that name. Like the size, it is used to validate what was retrieved by
2031 that name. Like the size, it is used to validate what was retrieved by
2032 the client matches what the server knows about the bundle.
2032 the client matches what the server knows about the bundle.
2033
2033
2034 When multiple digest types are given, all of them are checked.
2034 When multiple digest types are given, all of them are checked.
2035 """
2035 """
2036 try:
2036 try:
2037 raw_url = inpart.params[b'url']
2037 raw_url = inpart.params[b'url']
2038 except KeyError:
2038 except KeyError:
2039 raise error.Abort(_(b'remote-changegroup: missing "%s" param') % b'url')
2039 raise error.Abort(_(b'remote-changegroup: missing "%s" param') % b'url')
2040 parsed_url = util.url(raw_url)
2040 parsed_url = util.url(raw_url)
2041 if parsed_url.scheme not in capabilities[b'remote-changegroup']:
2041 if parsed_url.scheme not in capabilities[b'remote-changegroup']:
2042 raise error.Abort(
2042 raise error.Abort(
2043 _(b'remote-changegroup does not support %s urls')
2043 _(b'remote-changegroup does not support %s urls')
2044 % parsed_url.scheme
2044 % parsed_url.scheme
2045 )
2045 )
2046
2046
2047 try:
2047 try:
2048 size = int(inpart.params[b'size'])
2048 size = int(inpart.params[b'size'])
2049 except ValueError:
2049 except ValueError:
2050 raise error.Abort(
2050 raise error.Abort(
2051 _(b'remote-changegroup: invalid value for param "%s"') % b'size'
2051 _(b'remote-changegroup: invalid value for param "%s"') % b'size'
2052 )
2052 )
2053 except KeyError:
2053 except KeyError:
2054 raise error.Abort(
2054 raise error.Abort(
2055 _(b'remote-changegroup: missing "%s" param') % b'size'
2055 _(b'remote-changegroup: missing "%s" param') % b'size'
2056 )
2056 )
2057
2057
2058 digests = {}
2058 digests = {}
2059 for typ in inpart.params.get(b'digests', b'').split():
2059 for typ in inpart.params.get(b'digests', b'').split():
2060 param = b'digest:%s' % typ
2060 param = b'digest:%s' % typ
2061 try:
2061 try:
2062 value = inpart.params[param]
2062 value = inpart.params[param]
2063 except KeyError:
2063 except KeyError:
2064 raise error.Abort(
2064 raise error.Abort(
2065 _(b'remote-changegroup: missing "%s" param') % param
2065 _(b'remote-changegroup: missing "%s" param') % param
2066 )
2066 )
2067 digests[typ] = value
2067 digests[typ] = value
2068
2068
2069 real_part = util.digestchecker(url.open(op.ui, raw_url), size, digests)
2069 real_part = util.digestchecker(url.open(op.ui, raw_url), size, digests)
2070
2070
2071 tr = op.gettransaction()
2071 tr = op.gettransaction()
2072 from . import exchange
2072 from . import exchange
2073
2073
2074 cg = exchange.readbundle(op.repo.ui, real_part, raw_url)
2074 cg = exchange.readbundle(op.repo.ui, real_part, raw_url)
2075 if not isinstance(cg, changegroup.cg1unpacker):
2075 if not isinstance(cg, changegroup.cg1unpacker):
2076 raise error.Abort(
2076 raise error.Abort(
2077 _(b'%s: not a bundle version 1.0') % util.hidepassword(raw_url)
2077 _(b'%s: not a bundle version 1.0') % util.hidepassword(raw_url)
2078 )
2078 )
2079 ret = _processchangegroup(op, cg, tr, b'bundle2', b'bundle2')
2079 ret = _processchangegroup(op, cg, tr, b'bundle2', b'bundle2')
2080 if op.reply is not None:
2080 if op.reply is not None:
2081 # This is definitely not the final form of this
2081 # This is definitely not the final form of this
2082 # return. But one need to start somewhere.
2082 # return. But one need to start somewhere.
2083 part = op.reply.newpart(b'reply:changegroup')
2083 part = op.reply.newpart(b'reply:changegroup')
2084 part.addparam(
2084 part.addparam(
2085 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2085 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2086 )
2086 )
2087 part.addparam(b'return', b'%i' % ret, mandatory=False)
2087 part.addparam(b'return', b'%i' % ret, mandatory=False)
2088 try:
2088 try:
2089 real_part.validate()
2089 real_part.validate()
2090 except error.Abort as e:
2090 except error.Abort as e:
2091 raise error.Abort(
2091 raise error.Abort(
2092 _(b'bundle at %s is corrupted:\n%s')
2092 _(b'bundle at %s is corrupted:\n%s')
2093 % (util.hidepassword(raw_url), bytes(e))
2093 % (util.hidepassword(raw_url), bytes(e))
2094 )
2094 )
2095 assert not inpart.read()
2095 assert not inpart.read()
2096
2096
2097
2097
2098 @parthandler(b'reply:changegroup', (b'return', b'in-reply-to'))
2098 @parthandler(b'reply:changegroup', (b'return', b'in-reply-to'))
2099 def handlereplychangegroup(op, inpart):
2099 def handlereplychangegroup(op, inpart):
2100 ret = int(inpart.params[b'return'])
2100 ret = int(inpart.params[b'return'])
2101 replyto = int(inpart.params[b'in-reply-to'])
2101 replyto = int(inpart.params[b'in-reply-to'])
2102 op.records.add(b'changegroup', {b'return': ret}, replyto)
2102 op.records.add(b'changegroup', {b'return': ret}, replyto)
2103
2103
2104
2104
2105 @parthandler(b'check:bookmarks')
2105 @parthandler(b'check:bookmarks')
2106 def handlecheckbookmarks(op, inpart):
2106 def handlecheckbookmarks(op, inpart):
2107 """check location of bookmarks
2107 """check location of bookmarks
2108
2108
2109 This part is to be used to detect push race regarding bookmark, it
2109 This part is to be used to detect push race regarding bookmark, it
2110 contains binary encoded (bookmark, node) tuple. If the local state does
2110 contains binary encoded (bookmark, node) tuple. If the local state does
2111 not marks the one in the part, a PushRaced exception is raised
2111 not marks the one in the part, a PushRaced exception is raised
2112 """
2112 """
2113 bookdata = bookmarks.binarydecode(inpart)
2113 bookdata = bookmarks.binarydecode(inpart)
2114
2114
2115 msgstandard = (
2115 msgstandard = (
2116 b'remote repository changed while pushing - please try again '
2116 b'remote repository changed while pushing - please try again '
2117 b'(bookmark "%s" move from %s to %s)'
2117 b'(bookmark "%s" move from %s to %s)'
2118 )
2118 )
2119 msgmissing = (
2119 msgmissing = (
2120 b'remote repository changed while pushing - please try again '
2120 b'remote repository changed while pushing - please try again '
2121 b'(bookmark "%s" is missing, expected %s)'
2121 b'(bookmark "%s" is missing, expected %s)'
2122 )
2122 )
2123 msgexist = (
2123 msgexist = (
2124 b'remote repository changed while pushing - please try again '
2124 b'remote repository changed while pushing - please try again '
2125 b'(bookmark "%s" set on %s, expected missing)'
2125 b'(bookmark "%s" set on %s, expected missing)'
2126 )
2126 )
2127 for book, node in bookdata:
2127 for book, node in bookdata:
2128 currentnode = op.repo._bookmarks.get(book)
2128 currentnode = op.repo._bookmarks.get(book)
2129 if currentnode != node:
2129 if currentnode != node:
2130 if node is None:
2130 if node is None:
2131 finalmsg = msgexist % (book, nodemod.short(currentnode))
2131 finalmsg = msgexist % (book, nodemod.short(currentnode))
2132 elif currentnode is None:
2132 elif currentnode is None:
2133 finalmsg = msgmissing % (book, nodemod.short(node))
2133 finalmsg = msgmissing % (book, nodemod.short(node))
2134 else:
2134 else:
2135 finalmsg = msgstandard % (
2135 finalmsg = msgstandard % (
2136 book,
2136 book,
2137 nodemod.short(node),
2137 nodemod.short(node),
2138 nodemod.short(currentnode),
2138 nodemod.short(currentnode),
2139 )
2139 )
2140 raise error.PushRaced(finalmsg)
2140 raise error.PushRaced(finalmsg)
2141
2141
2142
2142
2143 @parthandler(b'check:heads')
2143 @parthandler(b'check:heads')
2144 def handlecheckheads(op, inpart):
2144 def handlecheckheads(op, inpart):
2145 """check that head of the repo did not change
2145 """check that head of the repo did not change
2146
2146
2147 This is used to detect a push race when using unbundle.
2147 This is used to detect a push race when using unbundle.
2148 This replaces the "heads" argument of unbundle."""
2148 This replaces the "heads" argument of unbundle."""
2149 h = inpart.read(20)
2149 h = inpart.read(20)
2150 heads = []
2150 heads = []
2151 while len(h) == 20:
2151 while len(h) == 20:
2152 heads.append(h)
2152 heads.append(h)
2153 h = inpart.read(20)
2153 h = inpart.read(20)
2154 assert not h
2154 assert not h
2155 # Trigger a transaction so that we are guaranteed to have the lock now.
2155 # Trigger a transaction so that we are guaranteed to have the lock now.
2156 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2156 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2157 op.gettransaction()
2157 op.gettransaction()
2158 if sorted(heads) != sorted(op.repo.heads()):
2158 if sorted(heads) != sorted(op.repo.heads()):
2159 raise error.PushRaced(
2159 raise error.PushRaced(
2160 b'remote repository changed while pushing - please try again'
2160 b'remote repository changed while pushing - please try again'
2161 )
2161 )
2162
2162
2163
2163
2164 @parthandler(b'check:updated-heads')
2164 @parthandler(b'check:updated-heads')
2165 def handlecheckupdatedheads(op, inpart):
2165 def handlecheckupdatedheads(op, inpart):
2166 """check for race on the heads touched by a push
2166 """check for race on the heads touched by a push
2167
2167
2168 This is similar to 'check:heads' but focus on the heads actually updated
2168 This is similar to 'check:heads' but focus on the heads actually updated
2169 during the push. If other activities happen on unrelated heads, it is
2169 during the push. If other activities happen on unrelated heads, it is
2170 ignored.
2170 ignored.
2171
2171
2172 This allow server with high traffic to avoid push contention as long as
2172 This allow server with high traffic to avoid push contention as long as
2173 unrelated parts of the graph are involved."""
2173 unrelated parts of the graph are involved."""
2174 h = inpart.read(20)
2174 h = inpart.read(20)
2175 heads = []
2175 heads = []
2176 while len(h) == 20:
2176 while len(h) == 20:
2177 heads.append(h)
2177 heads.append(h)
2178 h = inpart.read(20)
2178 h = inpart.read(20)
2179 assert not h
2179 assert not h
2180 # trigger a transaction so that we are guaranteed to have the lock now.
2180 # trigger a transaction so that we are guaranteed to have the lock now.
2181 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2181 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2182 op.gettransaction()
2182 op.gettransaction()
2183
2183
2184 currentheads = set()
2184 currentheads = set()
2185 for ls in op.repo.branchmap().iterheads():
2185 for ls in op.repo.branchmap().iterheads():
2186 currentheads.update(ls)
2186 currentheads.update(ls)
2187
2187
2188 for h in heads:
2188 for h in heads:
2189 if h not in currentheads:
2189 if h not in currentheads:
2190 raise error.PushRaced(
2190 raise error.PushRaced(
2191 b'remote repository changed while pushing - '
2191 b'remote repository changed while pushing - '
2192 b'please try again'
2192 b'please try again'
2193 )
2193 )
2194
2194
2195
2195
2196 @parthandler(b'check:phases')
2196 @parthandler(b'check:phases')
2197 def handlecheckphases(op, inpart):
2197 def handlecheckphases(op, inpart):
2198 """check that phase boundaries of the repository did not change
2198 """check that phase boundaries of the repository did not change
2199
2199
2200 This is used to detect a push race.
2200 This is used to detect a push race.
2201 """
2201 """
2202 phasetonodes = phases.binarydecode(inpart)
2202 phasetonodes = phases.binarydecode(inpart)
2203 unfi = op.repo.unfiltered()
2203 unfi = op.repo.unfiltered()
2204 cl = unfi.changelog
2204 cl = unfi.changelog
2205 phasecache = unfi._phasecache
2205 phasecache = unfi._phasecache
2206 msg = (
2206 msg = (
2207 b'remote repository changed while pushing - please try again '
2207 b'remote repository changed while pushing - please try again '
2208 b'(%s is %s expected %s)'
2208 b'(%s is %s expected %s)'
2209 )
2209 )
2210 for expectedphase, nodes in enumerate(phasetonodes):
2210 for expectedphase, nodes in enumerate(phasetonodes):
2211 for n in nodes:
2211 for n in nodes:
2212 actualphase = phasecache.phase(unfi, cl.rev(n))
2212 actualphase = phasecache.phase(unfi, cl.rev(n))
2213 if actualphase != expectedphase:
2213 if actualphase != expectedphase:
2214 finalmsg = msg % (
2214 finalmsg = msg % (
2215 nodemod.short(n),
2215 nodemod.short(n),
2216 phases.phasenames[actualphase],
2216 phases.phasenames[actualphase],
2217 phases.phasenames[expectedphase],
2217 phases.phasenames[expectedphase],
2218 )
2218 )
2219 raise error.PushRaced(finalmsg)
2219 raise error.PushRaced(finalmsg)
2220
2220
2221
2221
2222 @parthandler(b'output')
2222 @parthandler(b'output')
2223 def handleoutput(op, inpart):
2223 def handleoutput(op, inpart):
2224 """forward output captured on the server to the client"""
2224 """forward output captured on the server to the client"""
2225 for line in inpart.read().splitlines():
2225 for line in inpart.read().splitlines():
2226 op.ui.status(_(b'remote: %s\n') % line)
2226 op.ui.status(_(b'remote: %s\n') % line)
2227
2227
2228
2228
2229 @parthandler(b'replycaps')
2229 @parthandler(b'replycaps')
2230 def handlereplycaps(op, inpart):
2230 def handlereplycaps(op, inpart):
2231 """Notify that a reply bundle should be created
2231 """Notify that a reply bundle should be created
2232
2232
2233 The payload contains the capabilities information for the reply"""
2233 The payload contains the capabilities information for the reply"""
2234 caps = decodecaps(inpart.read())
2234 caps = decodecaps(inpart.read())
2235 if op.reply is None:
2235 if op.reply is None:
2236 op.reply = bundle20(op.ui, caps)
2236 op.reply = bundle20(op.ui, caps)
2237
2237
2238
2238
2239 class AbortFromPart(error.Abort):
2239 class AbortFromPart(error.Abort):
2240 """Sub-class of Abort that denotes an error from a bundle2 part."""
2240 """Sub-class of Abort that denotes an error from a bundle2 part."""
2241
2241
2242
2242
2243 @parthandler(b'error:abort', (b'message', b'hint'))
2243 @parthandler(b'error:abort', (b'message', b'hint'))
2244 def handleerrorabort(op, inpart):
2244 def handleerrorabort(op, inpart):
2245 """Used to transmit abort error over the wire"""
2245 """Used to transmit abort error over the wire"""
2246 raise AbortFromPart(
2246 raise AbortFromPart(
2247 inpart.params[b'message'], hint=inpart.params.get(b'hint')
2247 inpart.params[b'message'], hint=inpart.params.get(b'hint')
2248 )
2248 )
2249
2249
2250
2250
2251 @parthandler(
2251 @parthandler(
2252 b'error:pushkey',
2252 b'error:pushkey',
2253 (b'namespace', b'key', b'new', b'old', b'ret', b'in-reply-to'),
2253 (b'namespace', b'key', b'new', b'old', b'ret', b'in-reply-to'),
2254 )
2254 )
2255 def handleerrorpushkey(op, inpart):
2255 def handleerrorpushkey(op, inpart):
2256 """Used to transmit failure of a mandatory pushkey over the wire"""
2256 """Used to transmit failure of a mandatory pushkey over the wire"""
2257 kwargs = {}
2257 kwargs = {}
2258 for name in (b'namespace', b'key', b'new', b'old', b'ret'):
2258 for name in (b'namespace', b'key', b'new', b'old', b'ret'):
2259 value = inpart.params.get(name)
2259 value = inpart.params.get(name)
2260 if value is not None:
2260 if value is not None:
2261 kwargs[name] = value
2261 kwargs[name] = value
2262 raise error.PushkeyFailed(
2262 raise error.PushkeyFailed(
2263 inpart.params[b'in-reply-to'], **pycompat.strkwargs(kwargs)
2263 inpart.params[b'in-reply-to'], **pycompat.strkwargs(kwargs)
2264 )
2264 )
2265
2265
2266
2266
2267 @parthandler(b'error:unsupportedcontent', (b'parttype', b'params'))
2267 @parthandler(b'error:unsupportedcontent', (b'parttype', b'params'))
2268 def handleerrorunsupportedcontent(op, inpart):
2268 def handleerrorunsupportedcontent(op, inpart):
2269 """Used to transmit unknown content error over the wire"""
2269 """Used to transmit unknown content error over the wire"""
2270 kwargs = {}
2270 kwargs = {}
2271 parttype = inpart.params.get(b'parttype')
2271 parttype = inpart.params.get(b'parttype')
2272 if parttype is not None:
2272 if parttype is not None:
2273 kwargs[b'parttype'] = parttype
2273 kwargs[b'parttype'] = parttype
2274 params = inpart.params.get(b'params')
2274 params = inpart.params.get(b'params')
2275 if params is not None:
2275 if params is not None:
2276 kwargs[b'params'] = params.split(b'\0')
2276 kwargs[b'params'] = params.split(b'\0')
2277
2277
2278 raise error.BundleUnknownFeatureError(**pycompat.strkwargs(kwargs))
2278 raise error.BundleUnknownFeatureError(**pycompat.strkwargs(kwargs))
2279
2279
2280
2280
2281 @parthandler(b'error:pushraced', (b'message',))
2281 @parthandler(b'error:pushraced', (b'message',))
2282 def handleerrorpushraced(op, inpart):
2282 def handleerrorpushraced(op, inpart):
2283 """Used to transmit push race error over the wire"""
2283 """Used to transmit push race error over the wire"""
2284 raise error.ResponseError(_(b'push failed:'), inpart.params[b'message'])
2284 raise error.ResponseError(_(b'push failed:'), inpart.params[b'message'])
2285
2285
2286
2286
2287 @parthandler(b'listkeys', (b'namespace',))
2287 @parthandler(b'listkeys', (b'namespace',))
2288 def handlelistkeys(op, inpart):
2288 def handlelistkeys(op, inpart):
2289 """retrieve pushkey namespace content stored in a bundle2"""
2289 """retrieve pushkey namespace content stored in a bundle2"""
2290 namespace = inpart.params[b'namespace']
2290 namespace = inpart.params[b'namespace']
2291 r = pushkey.decodekeys(inpart.read())
2291 r = pushkey.decodekeys(inpart.read())
2292 op.records.add(b'listkeys', (namespace, r))
2292 op.records.add(b'listkeys', (namespace, r))
2293
2293
2294
2294
2295 @parthandler(b'pushkey', (b'namespace', b'key', b'old', b'new'))
2295 @parthandler(b'pushkey', (b'namespace', b'key', b'old', b'new'))
2296 def handlepushkey(op, inpart):
2296 def handlepushkey(op, inpart):
2297 """process a pushkey request"""
2297 """process a pushkey request"""
2298 dec = pushkey.decode
2298 dec = pushkey.decode
2299 namespace = dec(inpart.params[b'namespace'])
2299 namespace = dec(inpart.params[b'namespace'])
2300 key = dec(inpart.params[b'key'])
2300 key = dec(inpart.params[b'key'])
2301 old = dec(inpart.params[b'old'])
2301 old = dec(inpart.params[b'old'])
2302 new = dec(inpart.params[b'new'])
2302 new = dec(inpart.params[b'new'])
2303 # Grab the transaction to ensure that we have the lock before performing the
2303 # Grab the transaction to ensure that we have the lock before performing the
2304 # pushkey.
2304 # pushkey.
2305 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2305 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2306 op.gettransaction()
2306 op.gettransaction()
2307 ret = op.repo.pushkey(namespace, key, old, new)
2307 ret = op.repo.pushkey(namespace, key, old, new)
2308 record = {b'namespace': namespace, b'key': key, b'old': old, b'new': new}
2308 record = {b'namespace': namespace, b'key': key, b'old': old, b'new': new}
2309 op.records.add(b'pushkey', record)
2309 op.records.add(b'pushkey', record)
2310 if op.reply is not None:
2310 if op.reply is not None:
2311 rpart = op.reply.newpart(b'reply:pushkey')
2311 rpart = op.reply.newpart(b'reply:pushkey')
2312 rpart.addparam(
2312 rpart.addparam(
2313 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2313 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2314 )
2314 )
2315 rpart.addparam(b'return', b'%i' % ret, mandatory=False)
2315 rpart.addparam(b'return', b'%i' % ret, mandatory=False)
2316 if inpart.mandatory and not ret:
2316 if inpart.mandatory and not ret:
2317 kwargs = {}
2317 kwargs = {}
2318 for key in (b'namespace', b'key', b'new', b'old', b'ret'):
2318 for key in (b'namespace', b'key', b'new', b'old', b'ret'):
2319 if key in inpart.params:
2319 if key in inpart.params:
2320 kwargs[key] = inpart.params[key]
2320 kwargs[key] = inpart.params[key]
2321 raise error.PushkeyFailed(
2321 raise error.PushkeyFailed(
2322 partid=b'%d' % inpart.id, **pycompat.strkwargs(kwargs)
2322 partid=b'%d' % inpart.id, **pycompat.strkwargs(kwargs)
2323 )
2323 )
2324
2324
2325
2325
2326 @parthandler(b'bookmarks')
2326 @parthandler(b'bookmarks')
2327 def handlebookmark(op, inpart):
2327 def handlebookmark(op, inpart):
2328 """transmit bookmark information
2328 """transmit bookmark information
2329
2329
2330 The part contains binary encoded bookmark information.
2330 The part contains binary encoded bookmark information.
2331
2331
2332 The exact behavior of this part can be controlled by the 'bookmarks' mode
2332 The exact behavior of this part can be controlled by the 'bookmarks' mode
2333 on the bundle operation.
2333 on the bundle operation.
2334
2334
2335 When mode is 'apply' (the default) the bookmark information is applied as
2335 When mode is 'apply' (the default) the bookmark information is applied as
2336 is to the unbundling repository. Make sure a 'check:bookmarks' part is
2336 is to the unbundling repository. Make sure a 'check:bookmarks' part is
2337 issued earlier to check for push races in such update. This behavior is
2337 issued earlier to check for push races in such update. This behavior is
2338 suitable for pushing.
2338 suitable for pushing.
2339
2339
2340 When mode is 'records', the information is recorded into the 'bookmarks'
2340 When mode is 'records', the information is recorded into the 'bookmarks'
2341 records of the bundle operation. This behavior is suitable for pulling.
2341 records of the bundle operation. This behavior is suitable for pulling.
2342 """
2342 """
2343 changes = bookmarks.binarydecode(inpart)
2343 changes = bookmarks.binarydecode(inpart)
2344
2344
2345 pushkeycompat = op.repo.ui.configbool(
2345 pushkeycompat = op.repo.ui.configbool(
2346 b'server', b'bookmarks-pushkey-compat'
2346 b'server', b'bookmarks-pushkey-compat'
2347 )
2347 )
2348 bookmarksmode = op.modes.get(b'bookmarks', b'apply')
2348 bookmarksmode = op.modes.get(b'bookmarks', b'apply')
2349
2349
2350 if bookmarksmode == b'apply':
2350 if bookmarksmode == b'apply':
2351 tr = op.gettransaction()
2351 tr = op.gettransaction()
2352 bookstore = op.repo._bookmarks
2352 bookstore = op.repo._bookmarks
2353 if pushkeycompat:
2353 if pushkeycompat:
2354 allhooks = []
2354 allhooks = []
2355 for book, node in changes:
2355 for book, node in changes:
2356 hookargs = tr.hookargs.copy()
2356 hookargs = tr.hookargs.copy()
2357 hookargs[b'pushkeycompat'] = b'1'
2357 hookargs[b'pushkeycompat'] = b'1'
2358 hookargs[b'namespace'] = b'bookmarks'
2358 hookargs[b'namespace'] = b'bookmarks'
2359 hookargs[b'key'] = book
2359 hookargs[b'key'] = book
2360 hookargs[b'old'] = nodemod.hex(bookstore.get(book, b''))
2360 hookargs[b'old'] = nodemod.hex(bookstore.get(book, b''))
2361 hookargs[b'new'] = nodemod.hex(
2361 hookargs[b'new'] = nodemod.hex(
2362 node if node is not None else b''
2362 node if node is not None else b''
2363 )
2363 )
2364 allhooks.append(hookargs)
2364 allhooks.append(hookargs)
2365
2365
2366 for hookargs in allhooks:
2366 for hookargs in allhooks:
2367 op.repo.hook(
2367 op.repo.hook(
2368 b'prepushkey', throw=True, **pycompat.strkwargs(hookargs)
2368 b'prepushkey', throw=True, **pycompat.strkwargs(hookargs)
2369 )
2369 )
2370
2370
2371 bookstore.applychanges(op.repo, op.gettransaction(), changes)
2371 bookstore.applychanges(op.repo, op.gettransaction(), changes)
2372
2372
2373 if pushkeycompat:
2373 if pushkeycompat:
2374
2374
2375 def runhook(unused_success):
2375 def runhook(unused_success):
2376 for hookargs in allhooks:
2376 for hookargs in allhooks:
2377 op.repo.hook(b'pushkey', **pycompat.strkwargs(hookargs))
2377 op.repo.hook(b'pushkey', **pycompat.strkwargs(hookargs))
2378
2378
2379 op.repo._afterlock(runhook)
2379 op.repo._afterlock(runhook)
2380
2380
2381 elif bookmarksmode == b'records':
2381 elif bookmarksmode == b'records':
2382 for book, node in changes:
2382 for book, node in changes:
2383 record = {b'bookmark': book, b'node': node}
2383 record = {b'bookmark': book, b'node': node}
2384 op.records.add(b'bookmarks', record)
2384 op.records.add(b'bookmarks', record)
2385 else:
2385 else:
2386 raise error.ProgrammingError(
2386 raise error.ProgrammingError(
2387 b'unkown bookmark mode: %s' % bookmarksmode
2387 b'unkown bookmark mode: %s' % bookmarksmode
2388 )
2388 )
2389
2389
2390
2390
2391 @parthandler(b'phase-heads')
2391 @parthandler(b'phase-heads')
2392 def handlephases(op, inpart):
2392 def handlephases(op, inpart):
2393 """apply phases from bundle part to repo"""
2393 """apply phases from bundle part to repo"""
2394 headsbyphase = phases.binarydecode(inpart)
2394 headsbyphase = phases.binarydecode(inpart)
2395 phases.updatephases(op.repo.unfiltered(), op.gettransaction, headsbyphase)
2395 phases.updatephases(op.repo.unfiltered(), op.gettransaction, headsbyphase)
2396
2396
2397
2397
2398 @parthandler(b'reply:pushkey', (b'return', b'in-reply-to'))
2398 @parthandler(b'reply:pushkey', (b'return', b'in-reply-to'))
2399 def handlepushkeyreply(op, inpart):
2399 def handlepushkeyreply(op, inpart):
2400 """retrieve the result of a pushkey request"""
2400 """retrieve the result of a pushkey request"""
2401 ret = int(inpart.params[b'return'])
2401 ret = int(inpart.params[b'return'])
2402 partid = int(inpart.params[b'in-reply-to'])
2402 partid = int(inpart.params[b'in-reply-to'])
2403 op.records.add(b'pushkey', {b'return': ret}, partid)
2403 op.records.add(b'pushkey', {b'return': ret}, partid)
2404
2404
2405
2405
2406 @parthandler(b'obsmarkers')
2406 @parthandler(b'obsmarkers')
2407 def handleobsmarker(op, inpart):
2407 def handleobsmarker(op, inpart):
2408 """add a stream of obsmarkers to the repo"""
2408 """add a stream of obsmarkers to the repo"""
2409 tr = op.gettransaction()
2409 tr = op.gettransaction()
2410 markerdata = inpart.read()
2410 markerdata = inpart.read()
2411 if op.ui.config(b'experimental', b'obsmarkers-exchange-debug'):
2411 if op.ui.config(b'experimental', b'obsmarkers-exchange-debug'):
2412 op.ui.writenoi18n(
2412 op.ui.writenoi18n(
2413 b'obsmarker-exchange: %i bytes received\n' % len(markerdata)
2413 b'obsmarker-exchange: %i bytes received\n' % len(markerdata)
2414 )
2414 )
2415 # The mergemarkers call will crash if marker creation is not enabled.
2415 # The mergemarkers call will crash if marker creation is not enabled.
2416 # we want to avoid this if the part is advisory.
2416 # we want to avoid this if the part is advisory.
2417 if not inpart.mandatory and op.repo.obsstore.readonly:
2417 if not inpart.mandatory and op.repo.obsstore.readonly:
2418 op.repo.ui.debug(
2418 op.repo.ui.debug(
2419 b'ignoring obsolescence markers, feature not enabled\n'
2419 b'ignoring obsolescence markers, feature not enabled\n'
2420 )
2420 )
2421 return
2421 return
2422 new = op.repo.obsstore.mergemarkers(tr, markerdata)
2422 new = op.repo.obsstore.mergemarkers(tr, markerdata)
2423 op.repo.invalidatevolatilesets()
2423 op.repo.invalidatevolatilesets()
2424 op.records.add(b'obsmarkers', {b'new': new})
2424 op.records.add(b'obsmarkers', {b'new': new})
2425 if op.reply is not None:
2425 if op.reply is not None:
2426 rpart = op.reply.newpart(b'reply:obsmarkers')
2426 rpart = op.reply.newpart(b'reply:obsmarkers')
2427 rpart.addparam(
2427 rpart.addparam(
2428 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2428 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2429 )
2429 )
2430 rpart.addparam(b'new', b'%i' % new, mandatory=False)
2430 rpart.addparam(b'new', b'%i' % new, mandatory=False)
2431
2431
2432
2432
2433 @parthandler(b'reply:obsmarkers', (b'new', b'in-reply-to'))
2433 @parthandler(b'reply:obsmarkers', (b'new', b'in-reply-to'))
2434 def handleobsmarkerreply(op, inpart):
2434 def handleobsmarkerreply(op, inpart):
2435 """retrieve the result of a pushkey request"""
2435 """retrieve the result of a pushkey request"""
2436 ret = int(inpart.params[b'new'])
2436 ret = int(inpart.params[b'new'])
2437 partid = int(inpart.params[b'in-reply-to'])
2437 partid = int(inpart.params[b'in-reply-to'])
2438 op.records.add(b'obsmarkers', {b'new': ret}, partid)
2438 op.records.add(b'obsmarkers', {b'new': ret}, partid)
2439
2439
2440
2440
2441 @parthandler(b'hgtagsfnodes')
2441 @parthandler(b'hgtagsfnodes')
2442 def handlehgtagsfnodes(op, inpart):
2442 def handlehgtagsfnodes(op, inpart):
2443 """Applies .hgtags fnodes cache entries to the local repo.
2443 """Applies .hgtags fnodes cache entries to the local repo.
2444
2444
2445 Payload is pairs of 20 byte changeset nodes and filenodes.
2445 Payload is pairs of 20 byte changeset nodes and filenodes.
2446 """
2446 """
2447 # Grab the transaction so we ensure that we have the lock at this point.
2447 # Grab the transaction so we ensure that we have the lock at this point.
2448 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2448 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2449 op.gettransaction()
2449 op.gettransaction()
2450 cache = tags.hgtagsfnodescache(op.repo.unfiltered())
2450 cache = tags.hgtagsfnodescache(op.repo.unfiltered())
2451
2451
2452 count = 0
2452 count = 0
2453 while True:
2453 while True:
2454 node = inpart.read(20)
2454 node = inpart.read(20)
2455 fnode = inpart.read(20)
2455 fnode = inpart.read(20)
2456 if len(node) < 20 or len(fnode) < 20:
2456 if len(node) < 20 or len(fnode) < 20:
2457 op.ui.debug(b'ignoring incomplete received .hgtags fnodes data\n')
2457 op.ui.debug(b'ignoring incomplete received .hgtags fnodes data\n')
2458 break
2458 break
2459 cache.setfnode(node, fnode)
2459 cache.setfnode(node, fnode)
2460 count += 1
2460 count += 1
2461
2461
2462 cache.write()
2462 cache.write()
2463 op.ui.debug(b'applied %i hgtags fnodes cache entries\n' % count)
2463 op.ui.debug(b'applied %i hgtags fnodes cache entries\n' % count)
2464
2464
2465
2465
2466 rbcstruct = struct.Struct(b'>III')
2466 rbcstruct = struct.Struct(b'>III')
2467
2467
2468
2468
2469 @parthandler(b'cache:rev-branch-cache')
2469 @parthandler(b'cache:rev-branch-cache')
2470 def handlerbc(op, inpart):
2470 def handlerbc(op, inpart):
2471 """receive a rev-branch-cache payload and update the local cache
2471 """receive a rev-branch-cache payload and update the local cache
2472
2472
2473 The payload is a series of data related to each branch
2473 The payload is a series of data related to each branch
2474
2474
2475 1) branch name length
2475 1) branch name length
2476 2) number of open heads
2476 2) number of open heads
2477 3) number of closed heads
2477 3) number of closed heads
2478 4) open heads nodes
2478 4) open heads nodes
2479 5) closed heads nodes
2479 5) closed heads nodes
2480 """
2480 """
2481 total = 0
2481 total = 0
2482 rawheader = inpart.read(rbcstruct.size)
2482 rawheader = inpart.read(rbcstruct.size)
2483 cache = op.repo.revbranchcache()
2483 cache = op.repo.revbranchcache()
2484 cl = op.repo.unfiltered().changelog
2484 cl = op.repo.unfiltered().changelog
2485 while rawheader:
2485 while rawheader:
2486 header = rbcstruct.unpack(rawheader)
2486 header = rbcstruct.unpack(rawheader)
2487 total += header[1] + header[2]
2487 total += header[1] + header[2]
2488 utf8branch = inpart.read(header[0])
2488 utf8branch = inpart.read(header[0])
2489 branch = encoding.tolocal(utf8branch)
2489 branch = encoding.tolocal(utf8branch)
2490 for x in pycompat.xrange(header[1]):
2490 for x in pycompat.xrange(header[1]):
2491 node = inpart.read(20)
2491 node = inpart.read(20)
2492 rev = cl.rev(node)
2492 rev = cl.rev(node)
2493 cache.setdata(branch, rev, node, False)
2493 cache.setdata(branch, rev, node, False)
2494 for x in pycompat.xrange(header[2]):
2494 for x in pycompat.xrange(header[2]):
2495 node = inpart.read(20)
2495 node = inpart.read(20)
2496 rev = cl.rev(node)
2496 rev = cl.rev(node)
2497 cache.setdata(branch, rev, node, True)
2497 cache.setdata(branch, rev, node, True)
2498 rawheader = inpart.read(rbcstruct.size)
2498 rawheader = inpart.read(rbcstruct.size)
2499 cache.write()
2499 cache.write()
2500
2500
2501
2501
2502 @parthandler(b'pushvars')
2502 @parthandler(b'pushvars')
2503 def bundle2getvars(op, part):
2503 def bundle2getvars(op, part):
2504 '''unbundle a bundle2 containing shellvars on the server'''
2504 '''unbundle a bundle2 containing shellvars on the server'''
2505 # An option to disable unbundling on server-side for security reasons
2505 # An option to disable unbundling on server-side for security reasons
2506 if op.ui.configbool(b'push', b'pushvars.server'):
2506 if op.ui.configbool(b'push', b'pushvars.server'):
2507 hookargs = {}
2507 hookargs = {}
2508 for key, value in part.advisoryparams:
2508 for key, value in part.advisoryparams:
2509 key = key.upper()
2509 key = key.upper()
2510 # We want pushed variables to have USERVAR_ prepended so we know
2510 # We want pushed variables to have USERVAR_ prepended so we know
2511 # they came from the --pushvar flag.
2511 # they came from the --pushvar flag.
2512 key = b"USERVAR_" + key
2512 key = b"USERVAR_" + key
2513 hookargs[key] = value
2513 hookargs[key] = value
2514 op.addhookargs(hookargs)
2514 op.addhookargs(hookargs)
2515
2515
2516
2516
2517 @parthandler(b'stream2', (b'requirements', b'filecount', b'bytecount'))
2517 @parthandler(b'stream2', (b'requirements', b'filecount', b'bytecount'))
2518 def handlestreamv2bundle(op, part):
2518 def handlestreamv2bundle(op, part):
2519
2519
2520 requirements = urlreq.unquote(part.params[b'requirements']).split(b',')
2520 requirements = urlreq.unquote(part.params[b'requirements']).split(b',')
2521 filecount = int(part.params[b'filecount'])
2521 filecount = int(part.params[b'filecount'])
2522 bytecount = int(part.params[b'bytecount'])
2522 bytecount = int(part.params[b'bytecount'])
2523
2523
2524 repo = op.repo
2524 repo = op.repo
2525 if len(repo):
2525 if len(repo):
2526 msg = _(b'cannot apply stream clone to non empty repository')
2526 msg = _(b'cannot apply stream clone to non empty repository')
2527 raise error.Abort(msg)
2527 raise error.Abort(msg)
2528
2528
2529 repo.ui.debug(b'applying stream bundle\n')
2529 repo.ui.debug(b'applying stream bundle\n')
2530 streamclone.applybundlev2(repo, part, filecount, bytecount, requirements)
2530 streamclone.applybundlev2(repo, part, filecount, bytecount, requirements)
2531
2531
2532
2532
2533 def widen_bundle(
2533 def widen_bundle(
2534 bundler, repo, oldmatcher, newmatcher, common, known, cgversion, ellipses
2534 bundler, repo, oldmatcher, newmatcher, common, known, cgversion, ellipses
2535 ):
2535 ):
2536 """generates bundle2 for widening a narrow clone
2536 """generates bundle2 for widening a narrow clone
2537
2537
2538 bundler is the bundle to which data should be added
2538 bundler is the bundle to which data should be added
2539 repo is the localrepository instance
2539 repo is the localrepository instance
2540 oldmatcher matches what the client already has
2540 oldmatcher matches what the client already has
2541 newmatcher matches what the client needs (including what it already has)
2541 newmatcher matches what the client needs (including what it already has)
2542 common is set of common heads between server and client
2542 common is set of common heads between server and client
2543 known is a set of revs known on the client side (used in ellipses)
2543 known is a set of revs known on the client side (used in ellipses)
2544 cgversion is the changegroup version to send
2544 cgversion is the changegroup version to send
2545 ellipses is boolean value telling whether to send ellipses data or not
2545 ellipses is boolean value telling whether to send ellipses data or not
2546
2546
2547 returns bundle2 of the data required for extending
2547 returns bundle2 of the data required for extending
2548 """
2548 """
2549 commonnodes = set()
2549 commonnodes = set()
2550 cl = repo.changelog
2550 cl = repo.changelog
2551 for r in repo.revs(b"::%ln", common):
2551 for r in repo.revs(b"::%ln", common):
2552 commonnodes.add(cl.node(r))
2552 commonnodes.add(cl.node(r))
2553 if commonnodes:
2553 if commonnodes:
2554 # XXX: we should only send the filelogs (and treemanifest). user
2554 # XXX: we should only send the filelogs (and treemanifest). user
2555 # already has the changelog and manifest
2555 # already has the changelog and manifest
2556 packer = changegroup.getbundler(
2556 packer = changegroup.getbundler(
2557 cgversion,
2557 cgversion,
2558 repo,
2558 repo,
2559 oldmatcher=oldmatcher,
2559 oldmatcher=oldmatcher,
2560 matcher=newmatcher,
2560 matcher=newmatcher,
2561 fullnodes=commonnodes,
2561 fullnodes=commonnodes,
2562 )
2562 )
2563 cgdata = packer.generate(
2563 cgdata = packer.generate(
2564 {nodemod.nullid},
2564 {nodemod.nullid},
2565 list(commonnodes),
2565 list(commonnodes),
2566 False,
2566 False,
2567 b'narrow_widen',
2567 b'narrow_widen',
2568 changelog=False,
2568 changelog=False,
2569 )
2569 )
2570
2570
2571 part = bundler.newpart(b'changegroup', data=cgdata)
2571 part = bundler.newpart(b'changegroup', data=cgdata)
2572 part.addparam(b'version', cgversion)
2572 part.addparam(b'version', cgversion)
2573 if b'treemanifest' in repo.requirements:
2573 if b'treemanifest' in repo.requirements:
2574 part.addparam(b'treemanifest', b'1')
2574 part.addparam(b'treemanifest', b'1')
2575 if b'exp-sidedata-flag' in repo.requirements:
2575 if b'exp-sidedata-flag' in repo.requirements:
2576 part.addparam(b'exp-sidedata', b'1')
2576 part.addparam(b'exp-sidedata', b'1')
2577
2577
2578 return bundler
2578 return bundler
@@ -1,1282 +1,1286 b''
1 # dispatch.py - command dispatching for mercurial
1 # dispatch.py - command dispatching for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import, print_function
8 from __future__ import absolute_import, print_function
9
9
10 import difflib
10 import difflib
11 import errno
11 import errno
12 import getopt
12 import getopt
13 import os
13 import os
14 import pdb
14 import pdb
15 import re
15 import re
16 import signal
16 import signal
17 import sys
17 import sys
18 import traceback
18 import traceback
19
19
20
20
21 from .i18n import _
21 from .i18n import _
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from hgdemandimport import tracing
24 from hgdemandimport import tracing
25
25
26 from . import (
26 from . import (
27 cmdutil,
27 cmdutil,
28 color,
28 color,
29 commands,
29 commands,
30 demandimport,
30 demandimport,
31 encoding,
31 encoding,
32 error,
32 error,
33 extensions,
33 extensions,
34 fancyopts,
34 fancyopts,
35 help,
35 help,
36 hg,
36 hg,
37 hook,
37 hook,
38 profiling,
38 profiling,
39 pycompat,
39 pycompat,
40 rcutil,
40 registrar,
41 registrar,
41 scmutil,
42 scmutil,
42 ui as uimod,
43 ui as uimod,
43 util,
44 util,
44 )
45 )
45
46
46 from .utils import (
47 from .utils import (
47 procutil,
48 procutil,
48 stringutil,
49 stringutil,
49 )
50 )
50
51
51
52
52 class request(object):
53 class request(object):
53 def __init__(
54 def __init__(
54 self,
55 self,
55 args,
56 args,
56 ui=None,
57 ui=None,
57 repo=None,
58 repo=None,
58 fin=None,
59 fin=None,
59 fout=None,
60 fout=None,
60 ferr=None,
61 ferr=None,
61 fmsg=None,
62 fmsg=None,
62 prereposetups=None,
63 prereposetups=None,
63 ):
64 ):
64 self.args = args
65 self.args = args
65 self.ui = ui
66 self.ui = ui
66 self.repo = repo
67 self.repo = repo
67
68
68 # input/output/error streams
69 # input/output/error streams
69 self.fin = fin
70 self.fin = fin
70 self.fout = fout
71 self.fout = fout
71 self.ferr = ferr
72 self.ferr = ferr
72 # separate stream for status/error messages
73 # separate stream for status/error messages
73 self.fmsg = fmsg
74 self.fmsg = fmsg
74
75
75 # remember options pre-parsed by _earlyparseopts()
76 # remember options pre-parsed by _earlyparseopts()
76 self.earlyoptions = {}
77 self.earlyoptions = {}
77
78
78 # reposetups which run before extensions, useful for chg to pre-fill
79 # reposetups which run before extensions, useful for chg to pre-fill
79 # low-level repo state (for example, changelog) before extensions.
80 # low-level repo state (for example, changelog) before extensions.
80 self.prereposetups = prereposetups or []
81 self.prereposetups = prereposetups or []
81
82
82 # store the parsed and canonical command
83 # store the parsed and canonical command
83 self.canonical_command = None
84 self.canonical_command = None
84
85
85 def _runexithandlers(self):
86 def _runexithandlers(self):
86 exc = None
87 exc = None
87 handlers = self.ui._exithandlers
88 handlers = self.ui._exithandlers
88 try:
89 try:
89 while handlers:
90 while handlers:
90 func, args, kwargs = handlers.pop()
91 func, args, kwargs = handlers.pop()
91 try:
92 try:
92 func(*args, **kwargs)
93 func(*args, **kwargs)
93 except: # re-raises below
94 except: # re-raises below
94 if exc is None:
95 if exc is None:
95 exc = sys.exc_info()[1]
96 exc = sys.exc_info()[1]
96 self.ui.warnnoi18n(b'error in exit handlers:\n')
97 self.ui.warnnoi18n(b'error in exit handlers:\n')
97 self.ui.traceback(force=True)
98 self.ui.traceback(force=True)
98 finally:
99 finally:
99 if exc is not None:
100 if exc is not None:
100 raise exc
101 raise exc
101
102
102
103
103 def run():
104 def run():
104 """run the command in sys.argv"""
105 """run the command in sys.argv"""
105 initstdio()
106 initstdio()
106 with tracing.log('parse args into request'):
107 with tracing.log('parse args into request'):
107 req = request(pycompat.sysargv[1:])
108 req = request(pycompat.sysargv[1:])
108 err = None
109 err = None
109 try:
110 try:
110 status = dispatch(req)
111 status = dispatch(req)
111 except error.StdioError as e:
112 except error.StdioError as e:
112 err = e
113 err = e
113 status = -1
114 status = -1
114
115
115 # In all cases we try to flush stdio streams.
116 # In all cases we try to flush stdio streams.
116 if util.safehasattr(req.ui, b'fout'):
117 if util.safehasattr(req.ui, b'fout'):
117 assert req.ui is not None # help pytype
118 assert req.ui is not None # help pytype
118 assert req.ui.fout is not None # help pytype
119 assert req.ui.fout is not None # help pytype
119 try:
120 try:
120 req.ui.fout.flush()
121 req.ui.fout.flush()
121 except IOError as e:
122 except IOError as e:
122 err = e
123 err = e
123 status = -1
124 status = -1
124
125
125 if util.safehasattr(req.ui, b'ferr'):
126 if util.safehasattr(req.ui, b'ferr'):
126 assert req.ui is not None # help pytype
127 assert req.ui is not None # help pytype
127 assert req.ui.ferr is not None # help pytype
128 assert req.ui.ferr is not None # help pytype
128 try:
129 try:
129 if err is not None and err.errno != errno.EPIPE:
130 if err is not None and err.errno != errno.EPIPE:
130 req.ui.ferr.write(
131 req.ui.ferr.write(
131 b'abort: %s\n' % encoding.strtolocal(err.strerror)
132 b'abort: %s\n' % encoding.strtolocal(err.strerror)
132 )
133 )
133 req.ui.ferr.flush()
134 req.ui.ferr.flush()
134 # There's not much we can do about an I/O error here. So (possibly)
135 # There's not much we can do about an I/O error here. So (possibly)
135 # change the status code and move on.
136 # change the status code and move on.
136 except IOError:
137 except IOError:
137 status = -1
138 status = -1
138
139
139 _silencestdio()
140 _silencestdio()
140 sys.exit(status & 255)
141 sys.exit(status & 255)
141
142
142
143
143 if pycompat.ispy3:
144 if pycompat.ispy3:
144
145
145 def initstdio():
146 def initstdio():
146 pass
147 pass
147
148
148 def _silencestdio():
149 def _silencestdio():
149 for fp in (sys.stdout, sys.stderr):
150 for fp in (sys.stdout, sys.stderr):
150 # Check if the file is okay
151 # Check if the file is okay
151 try:
152 try:
152 fp.flush()
153 fp.flush()
153 continue
154 continue
154 except IOError:
155 except IOError:
155 pass
156 pass
156 # Otherwise mark it as closed to silence "Exception ignored in"
157 # Otherwise mark it as closed to silence "Exception ignored in"
157 # message emitted by the interpreter finalizer. Be careful to
158 # message emitted by the interpreter finalizer. Be careful to
158 # not close procutil.stdout, which may be a fdopen-ed file object
159 # not close procutil.stdout, which may be a fdopen-ed file object
159 # and its close() actually closes the underlying file descriptor.
160 # and its close() actually closes the underlying file descriptor.
160 try:
161 try:
161 fp.close()
162 fp.close()
162 except IOError:
163 except IOError:
163 pass
164 pass
164
165
165
166
166 else:
167 else:
167
168
168 def initstdio():
169 def initstdio():
169 for fp in (sys.stdin, sys.stdout, sys.stderr):
170 for fp in (sys.stdin, sys.stdout, sys.stderr):
170 procutil.setbinary(fp)
171 procutil.setbinary(fp)
171
172
172 def _silencestdio():
173 def _silencestdio():
173 pass
174 pass
174
175
175
176
176 def _getsimilar(symbols, value):
177 def _getsimilar(symbols, value):
177 sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio()
178 sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio()
178 # The cutoff for similarity here is pretty arbitrary. It should
179 # The cutoff for similarity here is pretty arbitrary. It should
179 # probably be investigated and tweaked.
180 # probably be investigated and tweaked.
180 return [s for s in symbols if sim(s) > 0.6]
181 return [s for s in symbols if sim(s) > 0.6]
181
182
182
183
183 def _reportsimilar(write, similar):
184 def _reportsimilar(write, similar):
184 if len(similar) == 1:
185 if len(similar) == 1:
185 write(_(b"(did you mean %s?)\n") % similar[0])
186 write(_(b"(did you mean %s?)\n") % similar[0])
186 elif similar:
187 elif similar:
187 ss = b", ".join(sorted(similar))
188 ss = b", ".join(sorted(similar))
188 write(_(b"(did you mean one of %s?)\n") % ss)
189 write(_(b"(did you mean one of %s?)\n") % ss)
189
190
190
191
191 def _formatparse(write, inst):
192 def _formatparse(write, inst):
192 similar = []
193 similar = []
193 if isinstance(inst, error.UnknownIdentifier):
194 if isinstance(inst, error.UnknownIdentifier):
194 # make sure to check fileset first, as revset can invoke fileset
195 # make sure to check fileset first, as revset can invoke fileset
195 similar = _getsimilar(inst.symbols, inst.function)
196 similar = _getsimilar(inst.symbols, inst.function)
196 if len(inst.args) > 1:
197 if len(inst.args) > 1:
197 write(
198 write(
198 _(b"hg: parse error at %s: %s\n")
199 _(b"hg: parse error at %s: %s\n")
199 % (pycompat.bytestr(inst.args[1]), inst.args[0])
200 % (pycompat.bytestr(inst.args[1]), inst.args[0])
200 )
201 )
201 if inst.args[0].startswith(b' '):
202 if inst.args[0].startswith(b' '):
202 write(_(b"unexpected leading whitespace\n"))
203 write(_(b"unexpected leading whitespace\n"))
203 else:
204 else:
204 write(_(b"hg: parse error: %s\n") % inst.args[0])
205 write(_(b"hg: parse error: %s\n") % inst.args[0])
205 _reportsimilar(write, similar)
206 _reportsimilar(write, similar)
206 if inst.hint:
207 if inst.hint:
207 write(_(b"(%s)\n") % inst.hint)
208 write(_(b"(%s)\n") % inst.hint)
208
209
209
210
210 def _formatargs(args):
211 def _formatargs(args):
211 return b' '.join(procutil.shellquote(a) for a in args)
212 return b' '.join(procutil.shellquote(a) for a in args)
212
213
213
214
214 def dispatch(req):
215 def dispatch(req):
215 """run the command specified in req.args; returns an integer status code"""
216 """run the command specified in req.args; returns an integer status code"""
216 with tracing.log('dispatch.dispatch'):
217 with tracing.log('dispatch.dispatch'):
217 if req.ferr:
218 if req.ferr:
218 ferr = req.ferr
219 ferr = req.ferr
219 elif req.ui:
220 elif req.ui:
220 ferr = req.ui.ferr
221 ferr = req.ui.ferr
221 else:
222 else:
222 ferr = procutil.stderr
223 ferr = procutil.stderr
223
224
224 try:
225 try:
225 if not req.ui:
226 if not req.ui:
226 req.ui = uimod.ui.load()
227 req.ui = uimod.ui.load()
227 req.earlyoptions.update(_earlyparseopts(req.ui, req.args))
228 req.earlyoptions.update(_earlyparseopts(req.ui, req.args))
228 if req.earlyoptions[b'traceback']:
229 if req.earlyoptions[b'traceback']:
229 req.ui.setconfig(b'ui', b'traceback', b'on', b'--traceback')
230 req.ui.setconfig(b'ui', b'traceback', b'on', b'--traceback')
230
231
231 # set ui streams from the request
232 # set ui streams from the request
232 if req.fin:
233 if req.fin:
233 req.ui.fin = req.fin
234 req.ui.fin = req.fin
234 if req.fout:
235 if req.fout:
235 req.ui.fout = req.fout
236 req.ui.fout = req.fout
236 if req.ferr:
237 if req.ferr:
237 req.ui.ferr = req.ferr
238 req.ui.ferr = req.ferr
238 if req.fmsg:
239 if req.fmsg:
239 req.ui.fmsg = req.fmsg
240 req.ui.fmsg = req.fmsg
240 except error.Abort as inst:
241 except error.Abort as inst:
241 ferr.write(_(b"abort: %s\n") % inst)
242 ferr.write(_(b"abort: %s\n") % inst)
242 if inst.hint:
243 if inst.hint:
243 ferr.write(_(b"(%s)\n") % inst.hint)
244 ferr.write(_(b"(%s)\n") % inst.hint)
244 return -1
245 return -1
245 except error.ParseError as inst:
246 except error.ParseError as inst:
246 _formatparse(ferr.write, inst)
247 _formatparse(ferr.write, inst)
247 return -1
248 return -1
248
249
249 msg = _formatargs(req.args)
250 msg = _formatargs(req.args)
250 starttime = util.timer()
251 starttime = util.timer()
251 ret = 1 # default of Python exit code on unhandled exception
252 ret = 1 # default of Python exit code on unhandled exception
252 try:
253 try:
253 ret = _runcatch(req) or 0
254 ret = _runcatch(req) or 0
254 except error.ProgrammingError as inst:
255 except error.ProgrammingError as inst:
255 req.ui.error(_(b'** ProgrammingError: %s\n') % inst)
256 req.ui.error(_(b'** ProgrammingError: %s\n') % inst)
256 if inst.hint:
257 if inst.hint:
257 req.ui.error(_(b'** (%s)\n') % inst.hint)
258 req.ui.error(_(b'** (%s)\n') % inst.hint)
258 raise
259 raise
259 except KeyboardInterrupt as inst:
260 except KeyboardInterrupt as inst:
260 try:
261 try:
261 if isinstance(inst, error.SignalInterrupt):
262 if isinstance(inst, error.SignalInterrupt):
262 msg = _(b"killed!\n")
263 msg = _(b"killed!\n")
263 else:
264 else:
264 msg = _(b"interrupted!\n")
265 msg = _(b"interrupted!\n")
265 req.ui.error(msg)
266 req.ui.error(msg)
266 except error.SignalInterrupt:
267 except error.SignalInterrupt:
267 # maybe pager would quit without consuming all the output, and
268 # maybe pager would quit without consuming all the output, and
268 # SIGPIPE was raised. we cannot print anything in this case.
269 # SIGPIPE was raised. we cannot print anything in this case.
269 pass
270 pass
270 except IOError as inst:
271 except IOError as inst:
271 if inst.errno != errno.EPIPE:
272 if inst.errno != errno.EPIPE:
272 raise
273 raise
273 ret = -1
274 ret = -1
274 finally:
275 finally:
275 duration = util.timer() - starttime
276 duration = util.timer() - starttime
276 req.ui.flush()
277 req.ui.flush()
277 if req.ui.logblockedtimes:
278 if req.ui.logblockedtimes:
278 req.ui._blockedtimes[b'command_duration'] = duration * 1000
279 req.ui._blockedtimes[b'command_duration'] = duration * 1000
279 req.ui.log(
280 req.ui.log(
280 b'uiblocked',
281 b'uiblocked',
281 b'ui blocked ms\n',
282 b'ui blocked ms\n',
282 **pycompat.strkwargs(req.ui._blockedtimes)
283 **pycompat.strkwargs(req.ui._blockedtimes)
283 )
284 )
284 return_code = ret & 255
285 return_code = ret & 255
285 req.ui.log(
286 req.ui.log(
286 b"commandfinish",
287 b"commandfinish",
287 b"%s exited %d after %0.2f seconds\n",
288 b"%s exited %d after %0.2f seconds\n",
288 msg,
289 msg,
289 return_code,
290 return_code,
290 duration,
291 duration,
291 return_code=return_code,
292 return_code=return_code,
292 duration=duration,
293 duration=duration,
293 canonical_command=req.canonical_command,
294 canonical_command=req.canonical_command,
294 )
295 )
295 try:
296 try:
296 req._runexithandlers()
297 req._runexithandlers()
297 except: # exiting, so no re-raises
298 except: # exiting, so no re-raises
298 ret = ret or -1
299 ret = ret or -1
299 return ret
300 return ret
300
301
301
302
302 def _runcatch(req):
303 def _runcatch(req):
303 with tracing.log('dispatch._runcatch'):
304 with tracing.log('dispatch._runcatch'):
304
305
305 def catchterm(*args):
306 def catchterm(*args):
306 raise error.SignalInterrupt
307 raise error.SignalInterrupt
307
308
308 ui = req.ui
309 ui = req.ui
309 try:
310 try:
310 for name in b'SIGBREAK', b'SIGHUP', b'SIGTERM':
311 for name in b'SIGBREAK', b'SIGHUP', b'SIGTERM':
311 num = getattr(signal, name, None)
312 num = getattr(signal, name, None)
312 if num:
313 if num:
313 signal.signal(num, catchterm)
314 signal.signal(num, catchterm)
314 except ValueError:
315 except ValueError:
315 pass # happens if called in a thread
316 pass # happens if called in a thread
316
317
317 def _runcatchfunc():
318 def _runcatchfunc():
318 realcmd = None
319 realcmd = None
319 try:
320 try:
320 cmdargs = fancyopts.fancyopts(
321 cmdargs = fancyopts.fancyopts(
321 req.args[:], commands.globalopts, {}
322 req.args[:], commands.globalopts, {}
322 )
323 )
323 cmd = cmdargs[0]
324 cmd = cmdargs[0]
324 aliases, entry = cmdutil.findcmd(cmd, commands.table, False)
325 aliases, entry = cmdutil.findcmd(cmd, commands.table, False)
325 realcmd = aliases[0]
326 realcmd = aliases[0]
326 except (
327 except (
327 error.UnknownCommand,
328 error.UnknownCommand,
328 error.AmbiguousCommand,
329 error.AmbiguousCommand,
329 IndexError,
330 IndexError,
330 getopt.GetoptError,
331 getopt.GetoptError,
331 ):
332 ):
332 # Don't handle this here. We know the command is
333 # Don't handle this here. We know the command is
333 # invalid, but all we're worried about for now is that
334 # invalid, but all we're worried about for now is that
334 # it's not a command that server operators expect to
335 # it's not a command that server operators expect to
335 # be safe to offer to users in a sandbox.
336 # be safe to offer to users in a sandbox.
336 pass
337 pass
337 if realcmd == b'serve' and b'--stdio' in cmdargs:
338 if realcmd == b'serve' and b'--stdio' in cmdargs:
338 # We want to constrain 'hg serve --stdio' instances pretty
339 # We want to constrain 'hg serve --stdio' instances pretty
339 # closely, as many shared-ssh access tools want to grant
340 # closely, as many shared-ssh access tools want to grant
340 # access to run *only* 'hg -R $repo serve --stdio'. We
341 # access to run *only* 'hg -R $repo serve --stdio'. We
341 # restrict to exactly that set of arguments, and prohibit
342 # restrict to exactly that set of arguments, and prohibit
342 # any repo name that starts with '--' to prevent
343 # any repo name that starts with '--' to prevent
343 # shenanigans wherein a user does something like pass
344 # shenanigans wherein a user does something like pass
344 # --debugger or --config=ui.debugger=1 as a repo
345 # --debugger or --config=ui.debugger=1 as a repo
345 # name. This used to actually run the debugger.
346 # name. This used to actually run the debugger.
346 if (
347 if (
347 len(req.args) != 4
348 len(req.args) != 4
348 or req.args[0] != b'-R'
349 or req.args[0] != b'-R'
349 or req.args[1].startswith(b'--')
350 or req.args[1].startswith(b'--')
350 or req.args[2] != b'serve'
351 or req.args[2] != b'serve'
351 or req.args[3] != b'--stdio'
352 or req.args[3] != b'--stdio'
352 ):
353 ):
353 raise error.Abort(
354 raise error.Abort(
354 _(b'potentially unsafe serve --stdio invocation: %s')
355 _(b'potentially unsafe serve --stdio invocation: %s')
355 % (stringutil.pprint(req.args),)
356 % (stringutil.pprint(req.args),)
356 )
357 )
357
358
358 try:
359 try:
359 debugger = b'pdb'
360 debugger = b'pdb'
360 debugtrace = {b'pdb': pdb.set_trace}
361 debugtrace = {b'pdb': pdb.set_trace}
361 debugmortem = {b'pdb': pdb.post_mortem}
362 debugmortem = {b'pdb': pdb.post_mortem}
362
363
363 # read --config before doing anything else
364 # read --config before doing anything else
364 # (e.g. to change trust settings for reading .hg/hgrc)
365 # (e.g. to change trust settings for reading .hg/hgrc)
365 cfgs = _parseconfig(req.ui, req.earlyoptions[b'config'])
366 cfgs = _parseconfig(req.ui, req.earlyoptions[b'config'])
366
367
367 if req.repo:
368 if req.repo:
368 # copy configs that were passed on the cmdline (--config) to
369 # copy configs that were passed on the cmdline (--config) to
369 # the repo ui
370 # the repo ui
370 for sec, name, val in cfgs:
371 for sec, name, val in cfgs:
371 req.repo.ui.setconfig(
372 req.repo.ui.setconfig(
372 sec, name, val, source=b'--config'
373 sec, name, val, source=b'--config'
373 )
374 )
374
375
375 # developer config: ui.debugger
376 # developer config: ui.debugger
376 debugger = ui.config(b"ui", b"debugger")
377 debugger = ui.config(b"ui", b"debugger")
377 debugmod = pdb
378 debugmod = pdb
378 if not debugger or ui.plain():
379 if not debugger or ui.plain():
379 # if we are in HGPLAIN mode, then disable custom debugging
380 # if we are in HGPLAIN mode, then disable custom debugging
380 debugger = b'pdb'
381 debugger = b'pdb'
381 elif req.earlyoptions[b'debugger']:
382 elif req.earlyoptions[b'debugger']:
382 # This import can be slow for fancy debuggers, so only
383 # This import can be slow for fancy debuggers, so only
383 # do it when absolutely necessary, i.e. when actual
384 # do it when absolutely necessary, i.e. when actual
384 # debugging has been requested
385 # debugging has been requested
385 with demandimport.deactivated():
386 with demandimport.deactivated():
386 try:
387 try:
387 debugmod = __import__(debugger)
388 debugmod = __import__(debugger)
388 except ImportError:
389 except ImportError:
389 pass # Leave debugmod = pdb
390 pass # Leave debugmod = pdb
390
391
391 debugtrace[debugger] = debugmod.set_trace
392 debugtrace[debugger] = debugmod.set_trace
392 debugmortem[debugger] = debugmod.post_mortem
393 debugmortem[debugger] = debugmod.post_mortem
393
394
394 # enter the debugger before command execution
395 # enter the debugger before command execution
395 if req.earlyoptions[b'debugger']:
396 if req.earlyoptions[b'debugger']:
396 ui.warn(
397 ui.warn(
397 _(
398 _(
398 b"entering debugger - "
399 b"entering debugger - "
399 b"type c to continue starting hg or h for help\n"
400 b"type c to continue starting hg or h for help\n"
400 )
401 )
401 )
402 )
402
403
403 if (
404 if (
404 debugger != b'pdb'
405 debugger != b'pdb'
405 and debugtrace[debugger] == debugtrace[b'pdb']
406 and debugtrace[debugger] == debugtrace[b'pdb']
406 ):
407 ):
407 ui.warn(
408 ui.warn(
408 _(
409 _(
409 b"%s debugger specified "
410 b"%s debugger specified "
410 b"but its module was not found\n"
411 b"but its module was not found\n"
411 )
412 )
412 % debugger
413 % debugger
413 )
414 )
414 with demandimport.deactivated():
415 with demandimport.deactivated():
415 debugtrace[debugger]()
416 debugtrace[debugger]()
416 try:
417 try:
417 return _dispatch(req)
418 return _dispatch(req)
418 finally:
419 finally:
419 ui.flush()
420 ui.flush()
420 except: # re-raises
421 except: # re-raises
421 # enter the debugger when we hit an exception
422 # enter the debugger when we hit an exception
422 if req.earlyoptions[b'debugger']:
423 if req.earlyoptions[b'debugger']:
423 traceback.print_exc()
424 traceback.print_exc()
424 debugmortem[debugger](sys.exc_info()[2])
425 debugmortem[debugger](sys.exc_info()[2])
425 raise
426 raise
426
427
427 return _callcatch(ui, _runcatchfunc)
428 return _callcatch(ui, _runcatchfunc)
428
429
429
430
430 def _callcatch(ui, func):
431 def _callcatch(ui, func):
431 """like scmutil.callcatch but handles more high-level exceptions about
432 """like scmutil.callcatch but handles more high-level exceptions about
432 config parsing and commands. besides, use handlecommandexception to handle
433 config parsing and commands. besides, use handlecommandexception to handle
433 uncaught exceptions.
434 uncaught exceptions.
434 """
435 """
435 try:
436 try:
436 return scmutil.callcatch(ui, func)
437 return scmutil.callcatch(ui, func)
437 except error.AmbiguousCommand as inst:
438 except error.AmbiguousCommand as inst:
438 ui.warn(
439 ui.warn(
439 _(b"hg: command '%s' is ambiguous:\n %s\n")
440 _(b"hg: command '%s' is ambiguous:\n %s\n")
440 % (inst.args[0], b" ".join(inst.args[1]))
441 % (inst.args[0], b" ".join(inst.args[1]))
441 )
442 )
442 except error.CommandError as inst:
443 except error.CommandError as inst:
443 if inst.args[0]:
444 if inst.args[0]:
444 ui.pager(b'help')
445 ui.pager(b'help')
445 msgbytes = pycompat.bytestr(inst.args[1])
446 msgbytes = pycompat.bytestr(inst.args[1])
446 ui.warn(_(b"hg %s: %s\n") % (inst.args[0], msgbytes))
447 ui.warn(_(b"hg %s: %s\n") % (inst.args[0], msgbytes))
447 commands.help_(ui, inst.args[0], full=False, command=True)
448 commands.help_(ui, inst.args[0], full=False, command=True)
448 else:
449 else:
449 ui.warn(_(b"hg: %s\n") % inst.args[1])
450 ui.warn(_(b"hg: %s\n") % inst.args[1])
450 ui.warn(_(b"(use 'hg help -v' for a list of global options)\n"))
451 ui.warn(_(b"(use 'hg help -v' for a list of global options)\n"))
451 except error.ParseError as inst:
452 except error.ParseError as inst:
452 _formatparse(ui.warn, inst)
453 _formatparse(ui.warn, inst)
453 return -1
454 return -1
454 except error.UnknownCommand as inst:
455 except error.UnknownCommand as inst:
455 nocmdmsg = _(b"hg: unknown command '%s'\n") % inst.args[0]
456 nocmdmsg = _(b"hg: unknown command '%s'\n") % inst.args[0]
456 try:
457 try:
457 # check if the command is in a disabled extension
458 # check if the command is in a disabled extension
458 # (but don't check for extensions themselves)
459 # (but don't check for extensions themselves)
459 formatted = help.formattedhelp(
460 formatted = help.formattedhelp(
460 ui, commands, inst.args[0], unknowncmd=True
461 ui, commands, inst.args[0], unknowncmd=True
461 )
462 )
462 ui.warn(nocmdmsg)
463 ui.warn(nocmdmsg)
463 ui.write(formatted)
464 ui.write(formatted)
464 except (error.UnknownCommand, error.Abort):
465 except (error.UnknownCommand, error.Abort):
465 suggested = False
466 suggested = False
466 if len(inst.args) == 2:
467 if len(inst.args) == 2:
467 sim = _getsimilar(inst.args[1], inst.args[0])
468 sim = _getsimilar(inst.args[1], inst.args[0])
468 if sim:
469 if sim:
469 ui.warn(nocmdmsg)
470 ui.warn(nocmdmsg)
470 _reportsimilar(ui.warn, sim)
471 _reportsimilar(ui.warn, sim)
471 suggested = True
472 suggested = True
472 if not suggested:
473 if not suggested:
473 ui.warn(nocmdmsg)
474 ui.warn(nocmdmsg)
474 ui.warn(_(b"(use 'hg help' for a list of commands)\n"))
475 ui.warn(_(b"(use 'hg help' for a list of commands)\n"))
475 except IOError:
476 except IOError:
476 raise
477 raise
477 except KeyboardInterrupt:
478 except KeyboardInterrupt:
478 raise
479 raise
479 except: # probably re-raises
480 except: # probably re-raises
480 if not handlecommandexception(ui):
481 if not handlecommandexception(ui):
481 raise
482 raise
482
483
483 return -1
484 return -1
484
485
485
486
486 def aliasargs(fn, givenargs):
487 def aliasargs(fn, givenargs):
487 args = []
488 args = []
488 # only care about alias 'args', ignore 'args' set by extensions.wrapfunction
489 # only care about alias 'args', ignore 'args' set by extensions.wrapfunction
489 if not util.safehasattr(fn, b'_origfunc'):
490 if not util.safehasattr(fn, b'_origfunc'):
490 args = getattr(fn, 'args', args)
491 args = getattr(fn, 'args', args)
491 if args:
492 if args:
492 cmd = b' '.join(map(procutil.shellquote, args))
493 cmd = b' '.join(map(procutil.shellquote, args))
493
494
494 nums = []
495 nums = []
495
496
496 def replacer(m):
497 def replacer(m):
497 num = int(m.group(1)) - 1
498 num = int(m.group(1)) - 1
498 nums.append(num)
499 nums.append(num)
499 if num < len(givenargs):
500 if num < len(givenargs):
500 return givenargs[num]
501 return givenargs[num]
501 raise error.Abort(_(b'too few arguments for command alias'))
502 raise error.Abort(_(b'too few arguments for command alias'))
502
503
503 cmd = re.sub(br'\$(\d+|\$)', replacer, cmd)
504 cmd = re.sub(br'\$(\d+|\$)', replacer, cmd)
504 givenargs = [x for i, x in enumerate(givenargs) if i not in nums]
505 givenargs = [x for i, x in enumerate(givenargs) if i not in nums]
505 args = pycompat.shlexsplit(cmd)
506 args = pycompat.shlexsplit(cmd)
506 return args + givenargs
507 return args + givenargs
507
508
508
509
509 def aliasinterpolate(name, args, cmd):
510 def aliasinterpolate(name, args, cmd):
510 '''interpolate args into cmd for shell aliases
511 '''interpolate args into cmd for shell aliases
511
512
512 This also handles $0, $@ and "$@".
513 This also handles $0, $@ and "$@".
513 '''
514 '''
514 # util.interpolate can't deal with "$@" (with quotes) because it's only
515 # util.interpolate can't deal with "$@" (with quotes) because it's only
515 # built to match prefix + patterns.
516 # built to match prefix + patterns.
516 replacemap = dict((b'$%d' % (i + 1), arg) for i, arg in enumerate(args))
517 replacemap = dict((b'$%d' % (i + 1), arg) for i, arg in enumerate(args))
517 replacemap[b'$0'] = name
518 replacemap[b'$0'] = name
518 replacemap[b'$$'] = b'$'
519 replacemap[b'$$'] = b'$'
519 replacemap[b'$@'] = b' '.join(args)
520 replacemap[b'$@'] = b' '.join(args)
520 # Typical Unix shells interpolate "$@" (with quotes) as all the positional
521 # Typical Unix shells interpolate "$@" (with quotes) as all the positional
521 # parameters, separated out into words. Emulate the same behavior here by
522 # parameters, separated out into words. Emulate the same behavior here by
522 # quoting the arguments individually. POSIX shells will then typically
523 # quoting the arguments individually. POSIX shells will then typically
523 # tokenize each argument into exactly one word.
524 # tokenize each argument into exactly one word.
524 replacemap[b'"$@"'] = b' '.join(procutil.shellquote(arg) for arg in args)
525 replacemap[b'"$@"'] = b' '.join(procutil.shellquote(arg) for arg in args)
525 # escape '\$' for regex
526 # escape '\$' for regex
526 regex = b'|'.join(replacemap.keys()).replace(b'$', br'\$')
527 regex = b'|'.join(replacemap.keys()).replace(b'$', br'\$')
527 r = re.compile(regex)
528 r = re.compile(regex)
528 return r.sub(lambda x: replacemap[x.group()], cmd)
529 return r.sub(lambda x: replacemap[x.group()], cmd)
529
530
530
531
531 class cmdalias(object):
532 class cmdalias(object):
532 def __init__(self, ui, name, definition, cmdtable, source):
533 def __init__(self, ui, name, definition, cmdtable, source):
533 self.name = self.cmd = name
534 self.name = self.cmd = name
534 self.cmdname = b''
535 self.cmdname = b''
535 self.definition = definition
536 self.definition = definition
536 self.fn = None
537 self.fn = None
537 self.givenargs = []
538 self.givenargs = []
538 self.opts = []
539 self.opts = []
539 self.help = b''
540 self.help = b''
540 self.badalias = None
541 self.badalias = None
541 self.unknowncmd = False
542 self.unknowncmd = False
542 self.source = source
543 self.source = source
543
544
544 try:
545 try:
545 aliases, entry = cmdutil.findcmd(self.name, cmdtable)
546 aliases, entry = cmdutil.findcmd(self.name, cmdtable)
546 for alias, e in pycompat.iteritems(cmdtable):
547 for alias, e in pycompat.iteritems(cmdtable):
547 if e is entry:
548 if e is entry:
548 self.cmd = alias
549 self.cmd = alias
549 break
550 break
550 self.shadows = True
551 self.shadows = True
551 except error.UnknownCommand:
552 except error.UnknownCommand:
552 self.shadows = False
553 self.shadows = False
553
554
554 if not self.definition:
555 if not self.definition:
555 self.badalias = _(b"no definition for alias '%s'") % self.name
556 self.badalias = _(b"no definition for alias '%s'") % self.name
556 return
557 return
557
558
558 if self.definition.startswith(b'!'):
559 if self.definition.startswith(b'!'):
559 shdef = self.definition[1:]
560 shdef = self.definition[1:]
560 self.shell = True
561 self.shell = True
561
562
562 def fn(ui, *args):
563 def fn(ui, *args):
563 env = {b'HG_ARGS': b' '.join((self.name,) + args)}
564 env = {b'HG_ARGS': b' '.join((self.name,) + args)}
564
565
565 def _checkvar(m):
566 def _checkvar(m):
566 if m.groups()[0] == b'$':
567 if m.groups()[0] == b'$':
567 return m.group()
568 return m.group()
568 elif int(m.groups()[0]) <= len(args):
569 elif int(m.groups()[0]) <= len(args):
569 return m.group()
570 return m.group()
570 else:
571 else:
571 ui.debug(
572 ui.debug(
572 b"No argument found for substitution "
573 b"No argument found for substitution "
573 b"of %i variable in alias '%s' definition.\n"
574 b"of %i variable in alias '%s' definition.\n"
574 % (int(m.groups()[0]), self.name)
575 % (int(m.groups()[0]), self.name)
575 )
576 )
576 return b''
577 return b''
577
578
578 cmd = re.sub(br'\$(\d+|\$)', _checkvar, shdef)
579 cmd = re.sub(br'\$(\d+|\$)', _checkvar, shdef)
579 cmd = aliasinterpolate(self.name, args, cmd)
580 cmd = aliasinterpolate(self.name, args, cmd)
580 return ui.system(
581 return ui.system(
581 cmd, environ=env, blockedtag=b'alias_%s' % self.name
582 cmd, environ=env, blockedtag=b'alias_%s' % self.name
582 )
583 )
583
584
584 self.fn = fn
585 self.fn = fn
585 self.alias = True
586 self.alias = True
586 self._populatehelp(ui, name, shdef, self.fn)
587 self._populatehelp(ui, name, shdef, self.fn)
587 return
588 return
588
589
589 try:
590 try:
590 args = pycompat.shlexsplit(self.definition)
591 args = pycompat.shlexsplit(self.definition)
591 except ValueError as inst:
592 except ValueError as inst:
592 self.badalias = _(b"error in definition for alias '%s': %s") % (
593 self.badalias = _(b"error in definition for alias '%s': %s") % (
593 self.name,
594 self.name,
594 stringutil.forcebytestr(inst),
595 stringutil.forcebytestr(inst),
595 )
596 )
596 return
597 return
597 earlyopts, args = _earlysplitopts(args)
598 earlyopts, args = _earlysplitopts(args)
598 if earlyopts:
599 if earlyopts:
599 self.badalias = _(
600 self.badalias = _(
600 b"error in definition for alias '%s': %s may "
601 b"error in definition for alias '%s': %s may "
601 b"only be given on the command line"
602 b"only be given on the command line"
602 ) % (self.name, b'/'.join(pycompat.ziplist(*earlyopts)[0]))
603 ) % (self.name, b'/'.join(pycompat.ziplist(*earlyopts)[0]))
603 return
604 return
604 self.cmdname = cmd = args.pop(0)
605 self.cmdname = cmd = args.pop(0)
605 self.givenargs = args
606 self.givenargs = args
606
607
607 try:
608 try:
608 tableentry = cmdutil.findcmd(cmd, cmdtable, False)[1]
609 tableentry = cmdutil.findcmd(cmd, cmdtable, False)[1]
609 if len(tableentry) > 2:
610 if len(tableentry) > 2:
610 self.fn, self.opts, cmdhelp = tableentry
611 self.fn, self.opts, cmdhelp = tableentry
611 else:
612 else:
612 self.fn, self.opts = tableentry
613 self.fn, self.opts = tableentry
613 cmdhelp = None
614 cmdhelp = None
614
615
615 self.alias = True
616 self.alias = True
616 self._populatehelp(ui, name, cmd, self.fn, cmdhelp)
617 self._populatehelp(ui, name, cmd, self.fn, cmdhelp)
617
618
618 except error.UnknownCommand:
619 except error.UnknownCommand:
619 self.badalias = _(
620 self.badalias = _(
620 b"alias '%s' resolves to unknown command '%s'"
621 b"alias '%s' resolves to unknown command '%s'"
621 ) % (self.name, cmd,)
622 ) % (self.name, cmd,)
622 self.unknowncmd = True
623 self.unknowncmd = True
623 except error.AmbiguousCommand:
624 except error.AmbiguousCommand:
624 self.badalias = _(
625 self.badalias = _(
625 b"alias '%s' resolves to ambiguous command '%s'"
626 b"alias '%s' resolves to ambiguous command '%s'"
626 ) % (self.name, cmd)
627 ) % (self.name, cmd)
627
628
628 def _populatehelp(self, ui, name, cmd, fn, defaulthelp=None):
629 def _populatehelp(self, ui, name, cmd, fn, defaulthelp=None):
629 # confine strings to be passed to i18n.gettext()
630 # confine strings to be passed to i18n.gettext()
630 cfg = {}
631 cfg = {}
631 for k in (b'doc', b'help', b'category'):
632 for k in (b'doc', b'help', b'category'):
632 v = ui.config(b'alias', b'%s:%s' % (name, k), None)
633 v = ui.config(b'alias', b'%s:%s' % (name, k), None)
633 if v is None:
634 if v is None:
634 continue
635 continue
635 if not encoding.isasciistr(v):
636 if not encoding.isasciistr(v):
636 self.badalias = _(
637 self.badalias = _(
637 b"non-ASCII character in alias definition '%s:%s'"
638 b"non-ASCII character in alias definition '%s:%s'"
638 ) % (name, k)
639 ) % (name, k)
639 return
640 return
640 cfg[k] = v
641 cfg[k] = v
641
642
642 self.help = cfg.get(b'help', defaulthelp or b'')
643 self.help = cfg.get(b'help', defaulthelp or b'')
643 if self.help and self.help.startswith(b"hg " + cmd):
644 if self.help and self.help.startswith(b"hg " + cmd):
644 # drop prefix in old-style help lines so hg shows the alias
645 # drop prefix in old-style help lines so hg shows the alias
645 self.help = self.help[4 + len(cmd) :]
646 self.help = self.help[4 + len(cmd) :]
646
647
647 self.owndoc = b'doc' in cfg
648 self.owndoc = b'doc' in cfg
648 doc = cfg.get(b'doc', pycompat.getdoc(fn))
649 doc = cfg.get(b'doc', pycompat.getdoc(fn))
649 if doc is not None:
650 if doc is not None:
650 doc = pycompat.sysstr(doc)
651 doc = pycompat.sysstr(doc)
651 self.__doc__ = doc
652 self.__doc__ = doc
652
653
653 self.helpcategory = cfg.get(
654 self.helpcategory = cfg.get(
654 b'category', registrar.command.CATEGORY_NONE
655 b'category', registrar.command.CATEGORY_NONE
655 )
656 )
656
657
657 @property
658 @property
658 def args(self):
659 def args(self):
659 args = pycompat.maplist(util.expandpath, self.givenargs)
660 args = pycompat.maplist(util.expandpath, self.givenargs)
660 return aliasargs(self.fn, args)
661 return aliasargs(self.fn, args)
661
662
662 def __getattr__(self, name):
663 def __getattr__(self, name):
663 adefaults = {
664 adefaults = {
664 'norepo': True,
665 'norepo': True,
665 'intents': set(),
666 'intents': set(),
666 'optionalrepo': False,
667 'optionalrepo': False,
667 'inferrepo': False,
668 'inferrepo': False,
668 }
669 }
669 if name not in adefaults:
670 if name not in adefaults:
670 raise AttributeError(name)
671 raise AttributeError(name)
671 if self.badalias or util.safehasattr(self, b'shell'):
672 if self.badalias or util.safehasattr(self, b'shell'):
672 return adefaults[name]
673 return adefaults[name]
673 return getattr(self.fn, name)
674 return getattr(self.fn, name)
674
675
675 def __call__(self, ui, *args, **opts):
676 def __call__(self, ui, *args, **opts):
676 if self.badalias:
677 if self.badalias:
677 hint = None
678 hint = None
678 if self.unknowncmd:
679 if self.unknowncmd:
679 try:
680 try:
680 # check if the command is in a disabled extension
681 # check if the command is in a disabled extension
681 cmd, ext = extensions.disabledcmd(ui, self.cmdname)[:2]
682 cmd, ext = extensions.disabledcmd(ui, self.cmdname)[:2]
682 hint = _(b"'%s' is provided by '%s' extension") % (cmd, ext)
683 hint = _(b"'%s' is provided by '%s' extension") % (cmd, ext)
683 except error.UnknownCommand:
684 except error.UnknownCommand:
684 pass
685 pass
685 raise error.Abort(self.badalias, hint=hint)
686 raise error.Abort(self.badalias, hint=hint)
686 if self.shadows:
687 if self.shadows:
687 ui.debug(
688 ui.debug(
688 b"alias '%s' shadows command '%s'\n" % (self.name, self.cmdname)
689 b"alias '%s' shadows command '%s'\n" % (self.name, self.cmdname)
689 )
690 )
690
691
691 ui.log(
692 ui.log(
692 b'commandalias',
693 b'commandalias',
693 b"alias '%s' expands to '%s'\n",
694 b"alias '%s' expands to '%s'\n",
694 self.name,
695 self.name,
695 self.definition,
696 self.definition,
696 )
697 )
697 if util.safehasattr(self, b'shell'):
698 if util.safehasattr(self, b'shell'):
698 return self.fn(ui, *args, **opts)
699 return self.fn(ui, *args, **opts)
699 else:
700 else:
700 try:
701 try:
701 return util.checksignature(self.fn)(ui, *args, **opts)
702 return util.checksignature(self.fn)(ui, *args, **opts)
702 except error.SignatureError:
703 except error.SignatureError:
703 args = b' '.join([self.cmdname] + self.args)
704 args = b' '.join([self.cmdname] + self.args)
704 ui.debug(b"alias '%s' expands to '%s'\n" % (self.name, args))
705 ui.debug(b"alias '%s' expands to '%s'\n" % (self.name, args))
705 raise
706 raise
706
707
707
708
708 class lazyaliasentry(object):
709 class lazyaliasentry(object):
709 """like a typical command entry (func, opts, help), but is lazy"""
710 """like a typical command entry (func, opts, help), but is lazy"""
710
711
711 def __init__(self, ui, name, definition, cmdtable, source):
712 def __init__(self, ui, name, definition, cmdtable, source):
712 self.ui = ui
713 self.ui = ui
713 self.name = name
714 self.name = name
714 self.definition = definition
715 self.definition = definition
715 self.cmdtable = cmdtable.copy()
716 self.cmdtable = cmdtable.copy()
716 self.source = source
717 self.source = source
717 self.alias = True
718 self.alias = True
718
719
719 @util.propertycache
720 @util.propertycache
720 def _aliasdef(self):
721 def _aliasdef(self):
721 return cmdalias(
722 return cmdalias(
722 self.ui, self.name, self.definition, self.cmdtable, self.source
723 self.ui, self.name, self.definition, self.cmdtable, self.source
723 )
724 )
724
725
725 def __getitem__(self, n):
726 def __getitem__(self, n):
726 aliasdef = self._aliasdef
727 aliasdef = self._aliasdef
727 if n == 0:
728 if n == 0:
728 return aliasdef
729 return aliasdef
729 elif n == 1:
730 elif n == 1:
730 return aliasdef.opts
731 return aliasdef.opts
731 elif n == 2:
732 elif n == 2:
732 return aliasdef.help
733 return aliasdef.help
733 else:
734 else:
734 raise IndexError
735 raise IndexError
735
736
736 def __iter__(self):
737 def __iter__(self):
737 for i in range(3):
738 for i in range(3):
738 yield self[i]
739 yield self[i]
739
740
740 def __len__(self):
741 def __len__(self):
741 return 3
742 return 3
742
743
743
744
744 def addaliases(ui, cmdtable):
745 def addaliases(ui, cmdtable):
745 # aliases are processed after extensions have been loaded, so they
746 # aliases are processed after extensions have been loaded, so they
746 # may use extension commands. Aliases can also use other alias definitions,
747 # may use extension commands. Aliases can also use other alias definitions,
747 # but only if they have been defined prior to the current definition.
748 # but only if they have been defined prior to the current definition.
748 for alias, definition in ui.configitems(b'alias', ignoresub=True):
749 for alias, definition in ui.configitems(b'alias', ignoresub=True):
749 try:
750 try:
750 if cmdtable[alias].definition == definition:
751 if cmdtable[alias].definition == definition:
751 continue
752 continue
752 except (KeyError, AttributeError):
753 except (KeyError, AttributeError):
753 # definition might not exist or it might not be a cmdalias
754 # definition might not exist or it might not be a cmdalias
754 pass
755 pass
755
756
756 source = ui.configsource(b'alias', alias)
757 source = ui.configsource(b'alias', alias)
757 entry = lazyaliasentry(ui, alias, definition, cmdtable, source)
758 entry = lazyaliasentry(ui, alias, definition, cmdtable, source)
758 cmdtable[alias] = entry
759 cmdtable[alias] = entry
759
760
760
761
761 def _parse(ui, args):
762 def _parse(ui, args):
762 options = {}
763 options = {}
763 cmdoptions = {}
764 cmdoptions = {}
764
765
765 try:
766 try:
766 args = fancyopts.fancyopts(args, commands.globalopts, options)
767 args = fancyopts.fancyopts(args, commands.globalopts, options)
767 except getopt.GetoptError as inst:
768 except getopt.GetoptError as inst:
768 raise error.CommandError(None, stringutil.forcebytestr(inst))
769 raise error.CommandError(None, stringutil.forcebytestr(inst))
769
770
770 if args:
771 if args:
771 cmd, args = args[0], args[1:]
772 cmd, args = args[0], args[1:]
772 aliases, entry = cmdutil.findcmd(
773 aliases, entry = cmdutil.findcmd(
773 cmd, commands.table, ui.configbool(b"ui", b"strict")
774 cmd, commands.table, ui.configbool(b"ui", b"strict")
774 )
775 )
775 cmd = aliases[0]
776 cmd = aliases[0]
776 args = aliasargs(entry[0], args)
777 args = aliasargs(entry[0], args)
777 defaults = ui.config(b"defaults", cmd)
778 defaults = ui.config(b"defaults", cmd)
778 if defaults:
779 if defaults:
779 args = (
780 args = (
780 pycompat.maplist(util.expandpath, pycompat.shlexsplit(defaults))
781 pycompat.maplist(util.expandpath, pycompat.shlexsplit(defaults))
781 + args
782 + args
782 )
783 )
783 c = list(entry[1])
784 c = list(entry[1])
784 else:
785 else:
785 cmd = None
786 cmd = None
786 c = []
787 c = []
787
788
788 # combine global options into local
789 # combine global options into local
789 for o in commands.globalopts:
790 for o in commands.globalopts:
790 c.append((o[0], o[1], options[o[1]], o[3]))
791 c.append((o[0], o[1], options[o[1]], o[3]))
791
792
792 try:
793 try:
793 args = fancyopts.fancyopts(args, c, cmdoptions, gnu=True)
794 args = fancyopts.fancyopts(args, c, cmdoptions, gnu=True)
794 except getopt.GetoptError as inst:
795 except getopt.GetoptError as inst:
795 raise error.CommandError(cmd, stringutil.forcebytestr(inst))
796 raise error.CommandError(cmd, stringutil.forcebytestr(inst))
796
797
797 # separate global options back out
798 # separate global options back out
798 for o in commands.globalopts:
799 for o in commands.globalopts:
799 n = o[1]
800 n = o[1]
800 options[n] = cmdoptions[n]
801 options[n] = cmdoptions[n]
801 del cmdoptions[n]
802 del cmdoptions[n]
802
803
803 return (cmd, cmd and entry[0] or None, args, options, cmdoptions)
804 return (cmd, cmd and entry[0] or None, args, options, cmdoptions)
804
805
805
806
806 def _parseconfig(ui, config):
807 def _parseconfig(ui, config):
807 """parse the --config options from the command line"""
808 """parse the --config options from the command line"""
808 configs = []
809 configs = []
809
810
810 for cfg in config:
811 for cfg in config:
811 try:
812 try:
812 name, value = [cfgelem.strip() for cfgelem in cfg.split(b'=', 1)]
813 name, value = [cfgelem.strip() for cfgelem in cfg.split(b'=', 1)]
813 section, name = name.split(b'.', 1)
814 section, name = name.split(b'.', 1)
814 if not section or not name:
815 if not section or not name:
815 raise IndexError
816 raise IndexError
816 ui.setconfig(section, name, value, b'--config')
817 ui.setconfig(section, name, value, b'--config')
817 configs.append((section, name, value))
818 configs.append((section, name, value))
818 except (IndexError, ValueError):
819 except (IndexError, ValueError):
819 raise error.Abort(
820 raise error.Abort(
820 _(
821 _(
821 b'malformed --config option: %r '
822 b'malformed --config option: %r '
822 b'(use --config section.name=value)'
823 b'(use --config section.name=value)'
823 )
824 )
824 % pycompat.bytestr(cfg)
825 % pycompat.bytestr(cfg)
825 )
826 )
826
827
827 return configs
828 return configs
828
829
829
830
830 def _earlyparseopts(ui, args):
831 def _earlyparseopts(ui, args):
831 options = {}
832 options = {}
832 fancyopts.fancyopts(
833 fancyopts.fancyopts(
833 args,
834 args,
834 commands.globalopts,
835 commands.globalopts,
835 options,
836 options,
836 gnu=not ui.plain(b'strictflags'),
837 gnu=not ui.plain(b'strictflags'),
837 early=True,
838 early=True,
838 optaliases={b'repository': [b'repo']},
839 optaliases={b'repository': [b'repo']},
839 )
840 )
840 return options
841 return options
841
842
842
843
843 def _earlysplitopts(args):
844 def _earlysplitopts(args):
844 """Split args into a list of possible early options and remainder args"""
845 """Split args into a list of possible early options and remainder args"""
845 shortoptions = b'R:'
846 shortoptions = b'R:'
846 # TODO: perhaps 'debugger' should be included
847 # TODO: perhaps 'debugger' should be included
847 longoptions = [b'cwd=', b'repository=', b'repo=', b'config=']
848 longoptions = [b'cwd=', b'repository=', b'repo=', b'config=']
848 return fancyopts.earlygetopt(
849 return fancyopts.earlygetopt(
849 args, shortoptions, longoptions, gnu=True, keepsep=True
850 args, shortoptions, longoptions, gnu=True, keepsep=True
850 )
851 )
851
852
852
853
853 def runcommand(lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions):
854 def runcommand(lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions):
854 # run pre-hook, and abort if it fails
855 # run pre-hook, and abort if it fails
855 hook.hook(
856 hook.hook(
856 lui,
857 lui,
857 repo,
858 repo,
858 b"pre-%s" % cmd,
859 b"pre-%s" % cmd,
859 True,
860 True,
860 args=b" ".join(fullargs),
861 args=b" ".join(fullargs),
861 pats=cmdpats,
862 pats=cmdpats,
862 opts=cmdoptions,
863 opts=cmdoptions,
863 )
864 )
864 try:
865 try:
865 ret = _runcommand(ui, options, cmd, d)
866 ret = _runcommand(ui, options, cmd, d)
866 # run post-hook, passing command result
867 # run post-hook, passing command result
867 hook.hook(
868 hook.hook(
868 lui,
869 lui,
869 repo,
870 repo,
870 b"post-%s" % cmd,
871 b"post-%s" % cmd,
871 False,
872 False,
872 args=b" ".join(fullargs),
873 args=b" ".join(fullargs),
873 result=ret,
874 result=ret,
874 pats=cmdpats,
875 pats=cmdpats,
875 opts=cmdoptions,
876 opts=cmdoptions,
876 )
877 )
877 except Exception:
878 except Exception:
878 # run failure hook and re-raise
879 # run failure hook and re-raise
879 hook.hook(
880 hook.hook(
880 lui,
881 lui,
881 repo,
882 repo,
882 b"fail-%s" % cmd,
883 b"fail-%s" % cmd,
883 False,
884 False,
884 args=b" ".join(fullargs),
885 args=b" ".join(fullargs),
885 pats=cmdpats,
886 pats=cmdpats,
886 opts=cmdoptions,
887 opts=cmdoptions,
887 )
888 )
888 raise
889 raise
889 return ret
890 return ret
890
891
891
892
892 def _getlocal(ui, rpath, wd=None):
893 def _getlocal(ui, rpath, wd=None):
893 """Return (path, local ui object) for the given target path.
894 """Return (path, local ui object) for the given target path.
894
895
895 Takes paths in [cwd]/.hg/hgrc into account."
896 Takes paths in [cwd]/.hg/hgrc into account."
896 """
897 """
897 if wd is None:
898 if wd is None:
898 try:
899 try:
899 wd = encoding.getcwd()
900 wd = encoding.getcwd()
900 except OSError as e:
901 except OSError as e:
901 raise error.Abort(
902 raise error.Abort(
902 _(b"error getting current working directory: %s")
903 _(b"error getting current working directory: %s")
903 % encoding.strtolocal(e.strerror)
904 % encoding.strtolocal(e.strerror)
904 )
905 )
906
905 path = cmdutil.findrepo(wd) or b""
907 path = cmdutil.findrepo(wd) or b""
906 if not path:
908 if not path:
907 lui = ui
909 lui = ui
908 else:
910 else:
909 lui = ui.copy()
911 lui = ui.copy()
912 if rcutil.use_repo_hgrc():
910 lui.readconfig(os.path.join(path, b".hg", b"hgrc"), path)
913 lui.readconfig(os.path.join(path, b".hg", b"hgrc"), path)
911
914
912 if rpath:
915 if rpath:
913 path = lui.expandpath(rpath)
916 path = lui.expandpath(rpath)
914 lui = ui.copy()
917 lui = ui.copy()
918 if rcutil.use_repo_hgrc():
915 lui.readconfig(os.path.join(path, b".hg", b"hgrc"), path)
919 lui.readconfig(os.path.join(path, b".hg", b"hgrc"), path)
916
920
917 return path, lui
921 return path, lui
918
922
919
923
920 def _checkshellalias(lui, ui, args):
924 def _checkshellalias(lui, ui, args):
921 """Return the function to run the shell alias, if it is required"""
925 """Return the function to run the shell alias, if it is required"""
922 options = {}
926 options = {}
923
927
924 try:
928 try:
925 args = fancyopts.fancyopts(args, commands.globalopts, options)
929 args = fancyopts.fancyopts(args, commands.globalopts, options)
926 except getopt.GetoptError:
930 except getopt.GetoptError:
927 return
931 return
928
932
929 if not args:
933 if not args:
930 return
934 return
931
935
932 cmdtable = commands.table
936 cmdtable = commands.table
933
937
934 cmd = args[0]
938 cmd = args[0]
935 try:
939 try:
936 strict = ui.configbool(b"ui", b"strict")
940 strict = ui.configbool(b"ui", b"strict")
937 aliases, entry = cmdutil.findcmd(cmd, cmdtable, strict)
941 aliases, entry = cmdutil.findcmd(cmd, cmdtable, strict)
938 except (error.AmbiguousCommand, error.UnknownCommand):
942 except (error.AmbiguousCommand, error.UnknownCommand):
939 return
943 return
940
944
941 cmd = aliases[0]
945 cmd = aliases[0]
942 fn = entry[0]
946 fn = entry[0]
943
947
944 if cmd and util.safehasattr(fn, b'shell'):
948 if cmd and util.safehasattr(fn, b'shell'):
945 # shell alias shouldn't receive early options which are consumed by hg
949 # shell alias shouldn't receive early options which are consumed by hg
946 _earlyopts, args = _earlysplitopts(args)
950 _earlyopts, args = _earlysplitopts(args)
947 d = lambda: fn(ui, *args[1:])
951 d = lambda: fn(ui, *args[1:])
948 return lambda: runcommand(
952 return lambda: runcommand(
949 lui, None, cmd, args[:1], ui, options, d, [], {}
953 lui, None, cmd, args[:1], ui, options, d, [], {}
950 )
954 )
951
955
952
956
953 def _dispatch(req):
957 def _dispatch(req):
954 args = req.args
958 args = req.args
955 ui = req.ui
959 ui = req.ui
956
960
957 # check for cwd
961 # check for cwd
958 cwd = req.earlyoptions[b'cwd']
962 cwd = req.earlyoptions[b'cwd']
959 if cwd:
963 if cwd:
960 os.chdir(cwd)
964 os.chdir(cwd)
961
965
962 rpath = req.earlyoptions[b'repository']
966 rpath = req.earlyoptions[b'repository']
963 path, lui = _getlocal(ui, rpath)
967 path, lui = _getlocal(ui, rpath)
964
968
965 uis = {ui, lui}
969 uis = {ui, lui}
966
970
967 if req.repo:
971 if req.repo:
968 uis.add(req.repo.ui)
972 uis.add(req.repo.ui)
969
973
970 if (
974 if (
971 req.earlyoptions[b'verbose']
975 req.earlyoptions[b'verbose']
972 or req.earlyoptions[b'debug']
976 or req.earlyoptions[b'debug']
973 or req.earlyoptions[b'quiet']
977 or req.earlyoptions[b'quiet']
974 ):
978 ):
975 for opt in (b'verbose', b'debug', b'quiet'):
979 for opt in (b'verbose', b'debug', b'quiet'):
976 val = pycompat.bytestr(bool(req.earlyoptions[opt]))
980 val = pycompat.bytestr(bool(req.earlyoptions[opt]))
977 for ui_ in uis:
981 for ui_ in uis:
978 ui_.setconfig(b'ui', opt, val, b'--' + opt)
982 ui_.setconfig(b'ui', opt, val, b'--' + opt)
979
983
980 if req.earlyoptions[b'profile']:
984 if req.earlyoptions[b'profile']:
981 for ui_ in uis:
985 for ui_ in uis:
982 ui_.setconfig(b'profiling', b'enabled', b'true', b'--profile')
986 ui_.setconfig(b'profiling', b'enabled', b'true', b'--profile')
983
987
984 profile = lui.configbool(b'profiling', b'enabled')
988 profile = lui.configbool(b'profiling', b'enabled')
985 with profiling.profile(lui, enabled=profile) as profiler:
989 with profiling.profile(lui, enabled=profile) as profiler:
986 # Configure extensions in phases: uisetup, extsetup, cmdtable, and
990 # Configure extensions in phases: uisetup, extsetup, cmdtable, and
987 # reposetup
991 # reposetup
988 extensions.loadall(lui)
992 extensions.loadall(lui)
989 # Propagate any changes to lui.__class__ by extensions
993 # Propagate any changes to lui.__class__ by extensions
990 ui.__class__ = lui.__class__
994 ui.__class__ = lui.__class__
991
995
992 # (uisetup and extsetup are handled in extensions.loadall)
996 # (uisetup and extsetup are handled in extensions.loadall)
993
997
994 # (reposetup is handled in hg.repository)
998 # (reposetup is handled in hg.repository)
995
999
996 addaliases(lui, commands.table)
1000 addaliases(lui, commands.table)
997
1001
998 # All aliases and commands are completely defined, now.
1002 # All aliases and commands are completely defined, now.
999 # Check abbreviation/ambiguity of shell alias.
1003 # Check abbreviation/ambiguity of shell alias.
1000 shellaliasfn = _checkshellalias(lui, ui, args)
1004 shellaliasfn = _checkshellalias(lui, ui, args)
1001 if shellaliasfn:
1005 if shellaliasfn:
1002 # no additional configs will be set, set up the ui instances
1006 # no additional configs will be set, set up the ui instances
1003 for ui_ in uis:
1007 for ui_ in uis:
1004 extensions.populateui(ui_)
1008 extensions.populateui(ui_)
1005 return shellaliasfn()
1009 return shellaliasfn()
1006
1010
1007 # check for fallback encoding
1011 # check for fallback encoding
1008 fallback = lui.config(b'ui', b'fallbackencoding')
1012 fallback = lui.config(b'ui', b'fallbackencoding')
1009 if fallback:
1013 if fallback:
1010 encoding.fallbackencoding = fallback
1014 encoding.fallbackencoding = fallback
1011
1015
1012 fullargs = args
1016 fullargs = args
1013 cmd, func, args, options, cmdoptions = _parse(lui, args)
1017 cmd, func, args, options, cmdoptions = _parse(lui, args)
1014
1018
1015 # store the canonical command name in request object for later access
1019 # store the canonical command name in request object for later access
1016 req.canonical_command = cmd
1020 req.canonical_command = cmd
1017
1021
1018 if options[b"config"] != req.earlyoptions[b"config"]:
1022 if options[b"config"] != req.earlyoptions[b"config"]:
1019 raise error.Abort(_(b"option --config may not be abbreviated!"))
1023 raise error.Abort(_(b"option --config may not be abbreviated!"))
1020 if options[b"cwd"] != req.earlyoptions[b"cwd"]:
1024 if options[b"cwd"] != req.earlyoptions[b"cwd"]:
1021 raise error.Abort(_(b"option --cwd may not be abbreviated!"))
1025 raise error.Abort(_(b"option --cwd may not be abbreviated!"))
1022 if options[b"repository"] != req.earlyoptions[b"repository"]:
1026 if options[b"repository"] != req.earlyoptions[b"repository"]:
1023 raise error.Abort(
1027 raise error.Abort(
1024 _(
1028 _(
1025 b"option -R has to be separated from other options (e.g. not "
1029 b"option -R has to be separated from other options (e.g. not "
1026 b"-qR) and --repository may only be abbreviated as --repo!"
1030 b"-qR) and --repository may only be abbreviated as --repo!"
1027 )
1031 )
1028 )
1032 )
1029 if options[b"debugger"] != req.earlyoptions[b"debugger"]:
1033 if options[b"debugger"] != req.earlyoptions[b"debugger"]:
1030 raise error.Abort(_(b"option --debugger may not be abbreviated!"))
1034 raise error.Abort(_(b"option --debugger may not be abbreviated!"))
1031 # don't validate --profile/--traceback, which can be enabled from now
1035 # don't validate --profile/--traceback, which can be enabled from now
1032
1036
1033 if options[b"encoding"]:
1037 if options[b"encoding"]:
1034 encoding.encoding = options[b"encoding"]
1038 encoding.encoding = options[b"encoding"]
1035 if options[b"encodingmode"]:
1039 if options[b"encodingmode"]:
1036 encoding.encodingmode = options[b"encodingmode"]
1040 encoding.encodingmode = options[b"encodingmode"]
1037 if options[b"time"]:
1041 if options[b"time"]:
1038
1042
1039 def get_times():
1043 def get_times():
1040 t = os.times()
1044 t = os.times()
1041 if t[4] == 0.0:
1045 if t[4] == 0.0:
1042 # Windows leaves this as zero, so use time.perf_counter()
1046 # Windows leaves this as zero, so use time.perf_counter()
1043 t = (t[0], t[1], t[2], t[3], util.timer())
1047 t = (t[0], t[1], t[2], t[3], util.timer())
1044 return t
1048 return t
1045
1049
1046 s = get_times()
1050 s = get_times()
1047
1051
1048 def print_time():
1052 def print_time():
1049 t = get_times()
1053 t = get_times()
1050 ui.warn(
1054 ui.warn(
1051 _(b"time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n")
1055 _(b"time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n")
1052 % (
1056 % (
1053 t[4] - s[4],
1057 t[4] - s[4],
1054 t[0] - s[0],
1058 t[0] - s[0],
1055 t[2] - s[2],
1059 t[2] - s[2],
1056 t[1] - s[1],
1060 t[1] - s[1],
1057 t[3] - s[3],
1061 t[3] - s[3],
1058 )
1062 )
1059 )
1063 )
1060
1064
1061 ui.atexit(print_time)
1065 ui.atexit(print_time)
1062 if options[b"profile"]:
1066 if options[b"profile"]:
1063 profiler.start()
1067 profiler.start()
1064
1068
1065 # if abbreviated version of this were used, take them in account, now
1069 # if abbreviated version of this were used, take them in account, now
1066 if options[b'verbose'] or options[b'debug'] or options[b'quiet']:
1070 if options[b'verbose'] or options[b'debug'] or options[b'quiet']:
1067 for opt in (b'verbose', b'debug', b'quiet'):
1071 for opt in (b'verbose', b'debug', b'quiet'):
1068 if options[opt] == req.earlyoptions[opt]:
1072 if options[opt] == req.earlyoptions[opt]:
1069 continue
1073 continue
1070 val = pycompat.bytestr(bool(options[opt]))
1074 val = pycompat.bytestr(bool(options[opt]))
1071 for ui_ in uis:
1075 for ui_ in uis:
1072 ui_.setconfig(b'ui', opt, val, b'--' + opt)
1076 ui_.setconfig(b'ui', opt, val, b'--' + opt)
1073
1077
1074 if options[b'traceback']:
1078 if options[b'traceback']:
1075 for ui_ in uis:
1079 for ui_ in uis:
1076 ui_.setconfig(b'ui', b'traceback', b'on', b'--traceback')
1080 ui_.setconfig(b'ui', b'traceback', b'on', b'--traceback')
1077
1081
1078 if options[b'noninteractive']:
1082 if options[b'noninteractive']:
1079 for ui_ in uis:
1083 for ui_ in uis:
1080 ui_.setconfig(b'ui', b'interactive', b'off', b'-y')
1084 ui_.setconfig(b'ui', b'interactive', b'off', b'-y')
1081
1085
1082 if cmdoptions.get(b'insecure', False):
1086 if cmdoptions.get(b'insecure', False):
1083 for ui_ in uis:
1087 for ui_ in uis:
1084 ui_.insecureconnections = True
1088 ui_.insecureconnections = True
1085
1089
1086 # setup color handling before pager, because setting up pager
1090 # setup color handling before pager, because setting up pager
1087 # might cause incorrect console information
1091 # might cause incorrect console information
1088 coloropt = options[b'color']
1092 coloropt = options[b'color']
1089 for ui_ in uis:
1093 for ui_ in uis:
1090 if coloropt:
1094 if coloropt:
1091 ui_.setconfig(b'ui', b'color', coloropt, b'--color')
1095 ui_.setconfig(b'ui', b'color', coloropt, b'--color')
1092 color.setup(ui_)
1096 color.setup(ui_)
1093
1097
1094 if stringutil.parsebool(options[b'pager']):
1098 if stringutil.parsebool(options[b'pager']):
1095 # ui.pager() expects 'internal-always-' prefix in this case
1099 # ui.pager() expects 'internal-always-' prefix in this case
1096 ui.pager(b'internal-always-' + cmd)
1100 ui.pager(b'internal-always-' + cmd)
1097 elif options[b'pager'] != b'auto':
1101 elif options[b'pager'] != b'auto':
1098 for ui_ in uis:
1102 for ui_ in uis:
1099 ui_.disablepager()
1103 ui_.disablepager()
1100
1104
1101 # configs are fully loaded, set up the ui instances
1105 # configs are fully loaded, set up the ui instances
1102 for ui_ in uis:
1106 for ui_ in uis:
1103 extensions.populateui(ui_)
1107 extensions.populateui(ui_)
1104
1108
1105 if options[b'version']:
1109 if options[b'version']:
1106 return commands.version_(ui)
1110 return commands.version_(ui)
1107 if options[b'help']:
1111 if options[b'help']:
1108 return commands.help_(ui, cmd, command=cmd is not None)
1112 return commands.help_(ui, cmd, command=cmd is not None)
1109 elif not cmd:
1113 elif not cmd:
1110 return commands.help_(ui, b'shortlist')
1114 return commands.help_(ui, b'shortlist')
1111
1115
1112 repo = None
1116 repo = None
1113 cmdpats = args[:]
1117 cmdpats = args[:]
1114 assert func is not None # help out pytype
1118 assert func is not None # help out pytype
1115 if not func.norepo:
1119 if not func.norepo:
1116 # use the repo from the request only if we don't have -R
1120 # use the repo from the request only if we don't have -R
1117 if not rpath and not cwd:
1121 if not rpath and not cwd:
1118 repo = req.repo
1122 repo = req.repo
1119
1123
1120 if repo:
1124 if repo:
1121 # set the descriptors of the repo ui to those of ui
1125 # set the descriptors of the repo ui to those of ui
1122 repo.ui.fin = ui.fin
1126 repo.ui.fin = ui.fin
1123 repo.ui.fout = ui.fout
1127 repo.ui.fout = ui.fout
1124 repo.ui.ferr = ui.ferr
1128 repo.ui.ferr = ui.ferr
1125 repo.ui.fmsg = ui.fmsg
1129 repo.ui.fmsg = ui.fmsg
1126 else:
1130 else:
1127 try:
1131 try:
1128 repo = hg.repository(
1132 repo = hg.repository(
1129 ui,
1133 ui,
1130 path=path,
1134 path=path,
1131 presetupfuncs=req.prereposetups,
1135 presetupfuncs=req.prereposetups,
1132 intents=func.intents,
1136 intents=func.intents,
1133 )
1137 )
1134 if not repo.local():
1138 if not repo.local():
1135 raise error.Abort(
1139 raise error.Abort(
1136 _(b"repository '%s' is not local") % path
1140 _(b"repository '%s' is not local") % path
1137 )
1141 )
1138 repo.ui.setconfig(
1142 repo.ui.setconfig(
1139 b"bundle", b"mainreporoot", repo.root, b'repo'
1143 b"bundle", b"mainreporoot", repo.root, b'repo'
1140 )
1144 )
1141 except error.RequirementError:
1145 except error.RequirementError:
1142 raise
1146 raise
1143 except error.RepoError:
1147 except error.RepoError:
1144 if rpath: # invalid -R path
1148 if rpath: # invalid -R path
1145 raise
1149 raise
1146 if not func.optionalrepo:
1150 if not func.optionalrepo:
1147 if func.inferrepo and args and not path:
1151 if func.inferrepo and args and not path:
1148 # try to infer -R from command args
1152 # try to infer -R from command args
1149 repos = pycompat.maplist(cmdutil.findrepo, args)
1153 repos = pycompat.maplist(cmdutil.findrepo, args)
1150 guess = repos[0]
1154 guess = repos[0]
1151 if guess and repos.count(guess) == len(repos):
1155 if guess and repos.count(guess) == len(repos):
1152 req.args = [b'--repository', guess] + fullargs
1156 req.args = [b'--repository', guess] + fullargs
1153 req.earlyoptions[b'repository'] = guess
1157 req.earlyoptions[b'repository'] = guess
1154 return _dispatch(req)
1158 return _dispatch(req)
1155 if not path:
1159 if not path:
1156 raise error.RepoError(
1160 raise error.RepoError(
1157 _(
1161 _(
1158 b"no repository found in"
1162 b"no repository found in"
1159 b" '%s' (.hg not found)"
1163 b" '%s' (.hg not found)"
1160 )
1164 )
1161 % encoding.getcwd()
1165 % encoding.getcwd()
1162 )
1166 )
1163 raise
1167 raise
1164 if repo:
1168 if repo:
1165 ui = repo.ui
1169 ui = repo.ui
1166 if options[b'hidden']:
1170 if options[b'hidden']:
1167 repo = repo.unfiltered()
1171 repo = repo.unfiltered()
1168 args.insert(0, repo)
1172 args.insert(0, repo)
1169 elif rpath:
1173 elif rpath:
1170 ui.warn(_(b"warning: --repository ignored\n"))
1174 ui.warn(_(b"warning: --repository ignored\n"))
1171
1175
1172 msg = _formatargs(fullargs)
1176 msg = _formatargs(fullargs)
1173 ui.log(b"command", b'%s\n', msg)
1177 ui.log(b"command", b'%s\n', msg)
1174 strcmdopt = pycompat.strkwargs(cmdoptions)
1178 strcmdopt = pycompat.strkwargs(cmdoptions)
1175 d = lambda: util.checksignature(func)(ui, *args, **strcmdopt)
1179 d = lambda: util.checksignature(func)(ui, *args, **strcmdopt)
1176 try:
1180 try:
1177 return runcommand(
1181 return runcommand(
1178 lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions
1182 lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions
1179 )
1183 )
1180 finally:
1184 finally:
1181 if repo and repo != req.repo:
1185 if repo and repo != req.repo:
1182 repo.close()
1186 repo.close()
1183
1187
1184
1188
1185 def _runcommand(ui, options, cmd, cmdfunc):
1189 def _runcommand(ui, options, cmd, cmdfunc):
1186 """Run a command function, possibly with profiling enabled."""
1190 """Run a command function, possibly with profiling enabled."""
1187 try:
1191 try:
1188 with tracing.log("Running %s command" % cmd):
1192 with tracing.log("Running %s command" % cmd):
1189 return cmdfunc()
1193 return cmdfunc()
1190 except error.SignatureError:
1194 except error.SignatureError:
1191 raise error.CommandError(cmd, _(b'invalid arguments'))
1195 raise error.CommandError(cmd, _(b'invalid arguments'))
1192
1196
1193
1197
1194 def _exceptionwarning(ui):
1198 def _exceptionwarning(ui):
1195 """Produce a warning message for the current active exception"""
1199 """Produce a warning message for the current active exception"""
1196
1200
1197 # For compatibility checking, we discard the portion of the hg
1201 # For compatibility checking, we discard the portion of the hg
1198 # version after the + on the assumption that if a "normal
1202 # version after the + on the assumption that if a "normal
1199 # user" is running a build with a + in it the packager
1203 # user" is running a build with a + in it the packager
1200 # probably built from fairly close to a tag and anyone with a
1204 # probably built from fairly close to a tag and anyone with a
1201 # 'make local' copy of hg (where the version number can be out
1205 # 'make local' copy of hg (where the version number can be out
1202 # of date) will be clueful enough to notice the implausible
1206 # of date) will be clueful enough to notice the implausible
1203 # version number and try updating.
1207 # version number and try updating.
1204 ct = util.versiontuple(n=2)
1208 ct = util.versiontuple(n=2)
1205 worst = None, ct, b''
1209 worst = None, ct, b''
1206 if ui.config(b'ui', b'supportcontact') is None:
1210 if ui.config(b'ui', b'supportcontact') is None:
1207 for name, mod in extensions.extensions():
1211 for name, mod in extensions.extensions():
1208 # 'testedwith' should be bytes, but not all extensions are ported
1212 # 'testedwith' should be bytes, but not all extensions are ported
1209 # to py3 and we don't want UnicodeException because of that.
1213 # to py3 and we don't want UnicodeException because of that.
1210 testedwith = stringutil.forcebytestr(
1214 testedwith = stringutil.forcebytestr(
1211 getattr(mod, 'testedwith', b'')
1215 getattr(mod, 'testedwith', b'')
1212 )
1216 )
1213 report = getattr(mod, 'buglink', _(b'the extension author.'))
1217 report = getattr(mod, 'buglink', _(b'the extension author.'))
1214 if not testedwith.strip():
1218 if not testedwith.strip():
1215 # We found an untested extension. It's likely the culprit.
1219 # We found an untested extension. It's likely the culprit.
1216 worst = name, b'unknown', report
1220 worst = name, b'unknown', report
1217 break
1221 break
1218
1222
1219 # Never blame on extensions bundled with Mercurial.
1223 # Never blame on extensions bundled with Mercurial.
1220 if extensions.ismoduleinternal(mod):
1224 if extensions.ismoduleinternal(mod):
1221 continue
1225 continue
1222
1226
1223 tested = [util.versiontuple(t, 2) for t in testedwith.split()]
1227 tested = [util.versiontuple(t, 2) for t in testedwith.split()]
1224 if ct in tested:
1228 if ct in tested:
1225 continue
1229 continue
1226
1230
1227 lower = [t for t in tested if t < ct]
1231 lower = [t for t in tested if t < ct]
1228 nearest = max(lower or tested)
1232 nearest = max(lower or tested)
1229 if worst[0] is None or nearest < worst[1]:
1233 if worst[0] is None or nearest < worst[1]:
1230 worst = name, nearest, report
1234 worst = name, nearest, report
1231 if worst[0] is not None:
1235 if worst[0] is not None:
1232 name, testedwith, report = worst
1236 name, testedwith, report = worst
1233 if not isinstance(testedwith, (bytes, str)):
1237 if not isinstance(testedwith, (bytes, str)):
1234 testedwith = b'.'.join(
1238 testedwith = b'.'.join(
1235 [stringutil.forcebytestr(c) for c in testedwith]
1239 [stringutil.forcebytestr(c) for c in testedwith]
1236 )
1240 )
1237 warning = _(
1241 warning = _(
1238 b'** Unknown exception encountered with '
1242 b'** Unknown exception encountered with '
1239 b'possibly-broken third-party extension %s\n'
1243 b'possibly-broken third-party extension %s\n'
1240 b'** which supports versions %s of Mercurial.\n'
1244 b'** which supports versions %s of Mercurial.\n'
1241 b'** Please disable %s and try your action again.\n'
1245 b'** Please disable %s and try your action again.\n'
1242 b'** If that fixes the bug please report it to %s\n'
1246 b'** If that fixes the bug please report it to %s\n'
1243 ) % (name, testedwith, name, stringutil.forcebytestr(report))
1247 ) % (name, testedwith, name, stringutil.forcebytestr(report))
1244 else:
1248 else:
1245 bugtracker = ui.config(b'ui', b'supportcontact')
1249 bugtracker = ui.config(b'ui', b'supportcontact')
1246 if bugtracker is None:
1250 if bugtracker is None:
1247 bugtracker = _(b"https://mercurial-scm.org/wiki/BugTracker")
1251 bugtracker = _(b"https://mercurial-scm.org/wiki/BugTracker")
1248 warning = (
1252 warning = (
1249 _(
1253 _(
1250 b"** unknown exception encountered, "
1254 b"** unknown exception encountered, "
1251 b"please report by visiting\n** "
1255 b"please report by visiting\n** "
1252 )
1256 )
1253 + bugtracker
1257 + bugtracker
1254 + b'\n'
1258 + b'\n'
1255 )
1259 )
1256 sysversion = pycompat.sysbytes(sys.version).replace(b'\n', b'')
1260 sysversion = pycompat.sysbytes(sys.version).replace(b'\n', b'')
1257 warning += (
1261 warning += (
1258 (_(b"** Python %s\n") % sysversion)
1262 (_(b"** Python %s\n") % sysversion)
1259 + (_(b"** Mercurial Distributed SCM (version %s)\n") % util.version())
1263 + (_(b"** Mercurial Distributed SCM (version %s)\n") % util.version())
1260 + (
1264 + (
1261 _(b"** Extensions loaded: %s\n")
1265 _(b"** Extensions loaded: %s\n")
1262 % b", ".join([x[0] for x in extensions.extensions()])
1266 % b", ".join([x[0] for x in extensions.extensions()])
1263 )
1267 )
1264 )
1268 )
1265 return warning
1269 return warning
1266
1270
1267
1271
1268 def handlecommandexception(ui):
1272 def handlecommandexception(ui):
1269 """Produce a warning message for broken commands
1273 """Produce a warning message for broken commands
1270
1274
1271 Called when handling an exception; the exception is reraised if
1275 Called when handling an exception; the exception is reraised if
1272 this function returns False, ignored otherwise.
1276 this function returns False, ignored otherwise.
1273 """
1277 """
1274 warning = _exceptionwarning(ui)
1278 warning = _exceptionwarning(ui)
1275 ui.log(
1279 ui.log(
1276 b"commandexception",
1280 b"commandexception",
1277 b"%s\n%s\n",
1281 b"%s\n%s\n",
1278 warning,
1282 warning,
1279 pycompat.sysbytes(traceback.format_exc()),
1283 pycompat.sysbytes(traceback.format_exc()),
1280 )
1284 )
1281 ui.warn(warning)
1285 ui.warn(warning)
1282 return False # re-raise the exception
1286 return False # re-raise the exception
@@ -1,579 +1,581 b''
1 # hgweb/hgwebdir_mod.py - Web interface for a directory of repositories.
1 # hgweb/hgwebdir_mod.py - Web interface for a directory of repositories.
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import gc
11 import gc
12 import os
12 import os
13 import time
13 import time
14
14
15 from ..i18n import _
15 from ..i18n import _
16
16
17 from .common import (
17 from .common import (
18 ErrorResponse,
18 ErrorResponse,
19 HTTP_SERVER_ERROR,
19 HTTP_SERVER_ERROR,
20 cspvalues,
20 cspvalues,
21 get_contact,
21 get_contact,
22 get_mtime,
22 get_mtime,
23 ismember,
23 ismember,
24 paritygen,
24 paritygen,
25 staticfile,
25 staticfile,
26 statusmessage,
26 statusmessage,
27 )
27 )
28
28
29 from .. import (
29 from .. import (
30 configitems,
30 configitems,
31 encoding,
31 encoding,
32 error,
32 error,
33 extensions,
33 extensions,
34 hg,
34 hg,
35 pathutil,
35 pathutil,
36 profiling,
36 profiling,
37 pycompat,
37 pycompat,
38 rcutil,
38 registrar,
39 registrar,
39 scmutil,
40 scmutil,
40 templater,
41 templater,
41 templateutil,
42 templateutil,
42 ui as uimod,
43 ui as uimod,
43 util,
44 util,
44 )
45 )
45
46
46 from . import (
47 from . import (
47 hgweb_mod,
48 hgweb_mod,
48 request as requestmod,
49 request as requestmod,
49 webutil,
50 webutil,
50 wsgicgi,
51 wsgicgi,
51 )
52 )
52 from ..utils import dateutil
53 from ..utils import dateutil
53
54
54
55
55 def cleannames(items):
56 def cleannames(items):
56 return [(util.pconvert(name).strip(b'/'), path) for name, path in items]
57 return [(util.pconvert(name).strip(b'/'), path) for name, path in items]
57
58
58
59
59 def findrepos(paths):
60 def findrepos(paths):
60 repos = []
61 repos = []
61 for prefix, root in cleannames(paths):
62 for prefix, root in cleannames(paths):
62 roothead, roottail = os.path.split(root)
63 roothead, roottail = os.path.split(root)
63 # "foo = /bar/*" or "foo = /bar/**" lets every repo /bar/N in or below
64 # "foo = /bar/*" or "foo = /bar/**" lets every repo /bar/N in or below
64 # /bar/ be served as as foo/N .
65 # /bar/ be served as as foo/N .
65 # '*' will not search inside dirs with .hg (except .hg/patches),
66 # '*' will not search inside dirs with .hg (except .hg/patches),
66 # '**' will search inside dirs with .hg (and thus also find subrepos).
67 # '**' will search inside dirs with .hg (and thus also find subrepos).
67 try:
68 try:
68 recurse = {b'*': False, b'**': True}[roottail]
69 recurse = {b'*': False, b'**': True}[roottail]
69 except KeyError:
70 except KeyError:
70 repos.append((prefix, root))
71 repos.append((prefix, root))
71 continue
72 continue
72 roothead = os.path.normpath(os.path.abspath(roothead))
73 roothead = os.path.normpath(os.path.abspath(roothead))
73 paths = scmutil.walkrepos(roothead, followsym=True, recurse=recurse)
74 paths = scmutil.walkrepos(roothead, followsym=True, recurse=recurse)
74 repos.extend(urlrepos(prefix, roothead, paths))
75 repos.extend(urlrepos(prefix, roothead, paths))
75 return repos
76 return repos
76
77
77
78
78 def urlrepos(prefix, roothead, paths):
79 def urlrepos(prefix, roothead, paths):
79 """yield url paths and filesystem paths from a list of repo paths
80 """yield url paths and filesystem paths from a list of repo paths
80
81
81 >>> conv = lambda seq: [(v, util.pconvert(p)) for v,p in seq]
82 >>> conv = lambda seq: [(v, util.pconvert(p)) for v,p in seq]
82 >>> conv(urlrepos(b'hg', b'/opt', [b'/opt/r', b'/opt/r/r', b'/opt']))
83 >>> conv(urlrepos(b'hg', b'/opt', [b'/opt/r', b'/opt/r/r', b'/opt']))
83 [('hg/r', '/opt/r'), ('hg/r/r', '/opt/r/r'), ('hg', '/opt')]
84 [('hg/r', '/opt/r'), ('hg/r/r', '/opt/r/r'), ('hg', '/opt')]
84 >>> conv(urlrepos(b'', b'/opt', [b'/opt/r', b'/opt/r/r', b'/opt']))
85 >>> conv(urlrepos(b'', b'/opt', [b'/opt/r', b'/opt/r/r', b'/opt']))
85 [('r', '/opt/r'), ('r/r', '/opt/r/r'), ('', '/opt')]
86 [('r', '/opt/r'), ('r/r', '/opt/r/r'), ('', '/opt')]
86 """
87 """
87 for path in paths:
88 for path in paths:
88 path = os.path.normpath(path)
89 path = os.path.normpath(path)
89 yield (
90 yield (
90 prefix + b'/' + util.pconvert(path[len(roothead) :]).lstrip(b'/')
91 prefix + b'/' + util.pconvert(path[len(roothead) :]).lstrip(b'/')
91 ).strip(b'/'), path
92 ).strip(b'/'), path
92
93
93
94
94 def readallowed(ui, req):
95 def readallowed(ui, req):
95 """Check allow_read and deny_read config options of a repo's ui object
96 """Check allow_read and deny_read config options of a repo's ui object
96 to determine user permissions. By default, with neither option set (or
97 to determine user permissions. By default, with neither option set (or
97 both empty), allow all users to read the repo. There are two ways a
98 both empty), allow all users to read the repo. There are two ways a
98 user can be denied read access: (1) deny_read is not empty, and the
99 user can be denied read access: (1) deny_read is not empty, and the
99 user is unauthenticated or deny_read contains user (or *), and (2)
100 user is unauthenticated or deny_read contains user (or *), and (2)
100 allow_read is not empty and the user is not in allow_read. Return True
101 allow_read is not empty and the user is not in allow_read. Return True
101 if user is allowed to read the repo, else return False."""
102 if user is allowed to read the repo, else return False."""
102
103
103 user = req.remoteuser
104 user = req.remoteuser
104
105
105 deny_read = ui.configlist(b'web', b'deny_read', untrusted=True)
106 deny_read = ui.configlist(b'web', b'deny_read', untrusted=True)
106 if deny_read and (not user or ismember(ui, user, deny_read)):
107 if deny_read and (not user or ismember(ui, user, deny_read)):
107 return False
108 return False
108
109
109 allow_read = ui.configlist(b'web', b'allow_read', untrusted=True)
110 allow_read = ui.configlist(b'web', b'allow_read', untrusted=True)
110 # by default, allow reading if no allow_read option has been set
111 # by default, allow reading if no allow_read option has been set
111 if not allow_read or ismember(ui, user, allow_read):
112 if not allow_read or ismember(ui, user, allow_read):
112 return True
113 return True
113
114
114 return False
115 return False
115
116
116
117
117 def rawindexentries(ui, repos, req, subdir=b''):
118 def rawindexentries(ui, repos, req, subdir=b''):
118 descend = ui.configbool(b'web', b'descend')
119 descend = ui.configbool(b'web', b'descend')
119 collapse = ui.configbool(b'web', b'collapse')
120 collapse = ui.configbool(b'web', b'collapse')
120 seenrepos = set()
121 seenrepos = set()
121 seendirs = set()
122 seendirs = set()
122 for name, path in repos:
123 for name, path in repos:
123
124
124 if not name.startswith(subdir):
125 if not name.startswith(subdir):
125 continue
126 continue
126 name = name[len(subdir) :]
127 name = name[len(subdir) :]
127 directory = False
128 directory = False
128
129
129 if b'/' in name:
130 if b'/' in name:
130 if not descend:
131 if not descend:
131 continue
132 continue
132
133
133 nameparts = name.split(b'/')
134 nameparts = name.split(b'/')
134 rootname = nameparts[0]
135 rootname = nameparts[0]
135
136
136 if not collapse:
137 if not collapse:
137 pass
138 pass
138 elif rootname in seendirs:
139 elif rootname in seendirs:
139 continue
140 continue
140 elif rootname in seenrepos:
141 elif rootname in seenrepos:
141 pass
142 pass
142 else:
143 else:
143 directory = True
144 directory = True
144 name = rootname
145 name = rootname
145
146
146 # redefine the path to refer to the directory
147 # redefine the path to refer to the directory
147 discarded = b'/'.join(nameparts[1:])
148 discarded = b'/'.join(nameparts[1:])
148
149
149 # remove name parts plus accompanying slash
150 # remove name parts plus accompanying slash
150 path = path[: -len(discarded) - 1]
151 path = path[: -len(discarded) - 1]
151
152
152 try:
153 try:
153 hg.repository(ui, path)
154 hg.repository(ui, path)
154 directory = False
155 directory = False
155 except (IOError, error.RepoError):
156 except (IOError, error.RepoError):
156 pass
157 pass
157
158
158 parts = [
159 parts = [
159 req.apppath.strip(b'/'),
160 req.apppath.strip(b'/'),
160 subdir.strip(b'/'),
161 subdir.strip(b'/'),
161 name.strip(b'/'),
162 name.strip(b'/'),
162 ]
163 ]
163 url = b'/' + b'/'.join(p for p in parts if p) + b'/'
164 url = b'/' + b'/'.join(p for p in parts if p) + b'/'
164
165
165 # show either a directory entry or a repository
166 # show either a directory entry or a repository
166 if directory:
167 if directory:
167 # get the directory's time information
168 # get the directory's time information
168 try:
169 try:
169 d = (get_mtime(path), dateutil.makedate()[1])
170 d = (get_mtime(path), dateutil.makedate()[1])
170 except OSError:
171 except OSError:
171 continue
172 continue
172
173
173 # add '/' to the name to make it obvious that
174 # add '/' to the name to make it obvious that
174 # the entry is a directory, not a regular repository
175 # the entry is a directory, not a regular repository
175 row = {
176 row = {
176 b'contact': b"",
177 b'contact': b"",
177 b'contact_sort': b"",
178 b'contact_sort': b"",
178 b'name': name + b'/',
179 b'name': name + b'/',
179 b'name_sort': name,
180 b'name_sort': name,
180 b'url': url,
181 b'url': url,
181 b'description': b"",
182 b'description': b"",
182 b'description_sort': b"",
183 b'description_sort': b"",
183 b'lastchange': d,
184 b'lastchange': d,
184 b'lastchange_sort': d[1] - d[0],
185 b'lastchange_sort': d[1] - d[0],
185 b'archives': templateutil.mappinglist([]),
186 b'archives': templateutil.mappinglist([]),
186 b'isdirectory': True,
187 b'isdirectory': True,
187 b'labels': templateutil.hybridlist([], name=b'label'),
188 b'labels': templateutil.hybridlist([], name=b'label'),
188 }
189 }
189
190
190 seendirs.add(name)
191 seendirs.add(name)
191 yield row
192 yield row
192 continue
193 continue
193
194
194 u = ui.copy()
195 u = ui.copy()
196 if rcutil.use_repo_hgrc():
195 try:
197 try:
196 u.readconfig(os.path.join(path, b'.hg', b'hgrc'))
198 u.readconfig(os.path.join(path, b'.hg', b'hgrc'))
197 except Exception as e:
199 except Exception as e:
198 u.warn(_(b'error reading %s/.hg/hgrc: %s\n') % (path, e))
200 u.warn(_(b'error reading %s/.hg/hgrc: %s\n') % (path, e))
199 continue
201 continue
200
202
201 def get(section, name, default=uimod._unset):
203 def get(section, name, default=uimod._unset):
202 return u.config(section, name, default, untrusted=True)
204 return u.config(section, name, default, untrusted=True)
203
205
204 if u.configbool(b"web", b"hidden", untrusted=True):
206 if u.configbool(b"web", b"hidden", untrusted=True):
205 continue
207 continue
206
208
207 if not readallowed(u, req):
209 if not readallowed(u, req):
208 continue
210 continue
209
211
210 # update time with local timezone
212 # update time with local timezone
211 try:
213 try:
212 r = hg.repository(ui, path)
214 r = hg.repository(ui, path)
213 except IOError:
215 except IOError:
214 u.warn(_(b'error accessing repository at %s\n') % path)
216 u.warn(_(b'error accessing repository at %s\n') % path)
215 continue
217 continue
216 except error.RepoError:
218 except error.RepoError:
217 u.warn(_(b'error accessing repository at %s\n') % path)
219 u.warn(_(b'error accessing repository at %s\n') % path)
218 continue
220 continue
219 try:
221 try:
220 d = (get_mtime(r.spath), dateutil.makedate()[1])
222 d = (get_mtime(r.spath), dateutil.makedate()[1])
221 except OSError:
223 except OSError:
222 continue
224 continue
223
225
224 contact = get_contact(get)
226 contact = get_contact(get)
225 description = get(b"web", b"description")
227 description = get(b"web", b"description")
226 seenrepos.add(name)
228 seenrepos.add(name)
227 name = get(b"web", b"name", name)
229 name = get(b"web", b"name", name)
228 labels = u.configlist(b'web', b'labels', untrusted=True)
230 labels = u.configlist(b'web', b'labels', untrusted=True)
229 row = {
231 row = {
230 b'contact': contact or b"unknown",
232 b'contact': contact or b"unknown",
231 b'contact_sort': contact.upper() or b"unknown",
233 b'contact_sort': contact.upper() or b"unknown",
232 b'name': name,
234 b'name': name,
233 b'name_sort': name,
235 b'name_sort': name,
234 b'url': url,
236 b'url': url,
235 b'description': description or b"unknown",
237 b'description': description or b"unknown",
236 b'description_sort': description.upper() or b"unknown",
238 b'description_sort': description.upper() or b"unknown",
237 b'lastchange': d,
239 b'lastchange': d,
238 b'lastchange_sort': d[1] - d[0],
240 b'lastchange_sort': d[1] - d[0],
239 b'archives': webutil.archivelist(u, b"tip", url),
241 b'archives': webutil.archivelist(u, b"tip", url),
240 b'isdirectory': None,
242 b'isdirectory': None,
241 b'labels': templateutil.hybridlist(labels, name=b'label'),
243 b'labels': templateutil.hybridlist(labels, name=b'label'),
242 }
244 }
243
245
244 yield row
246 yield row
245
247
246
248
247 def _indexentriesgen(
249 def _indexentriesgen(
248 context, ui, repos, req, stripecount, sortcolumn, descending, subdir
250 context, ui, repos, req, stripecount, sortcolumn, descending, subdir
249 ):
251 ):
250 rows = rawindexentries(ui, repos, req, subdir=subdir)
252 rows = rawindexentries(ui, repos, req, subdir=subdir)
251
253
252 sortdefault = None, False
254 sortdefault = None, False
253
255
254 if sortcolumn and sortdefault != (sortcolumn, descending):
256 if sortcolumn and sortdefault != (sortcolumn, descending):
255 sortkey = b'%s_sort' % sortcolumn
257 sortkey = b'%s_sort' % sortcolumn
256 rows = sorted(rows, key=lambda x: x[sortkey], reverse=descending)
258 rows = sorted(rows, key=lambda x: x[sortkey], reverse=descending)
257
259
258 for row, parity in zip(rows, paritygen(stripecount)):
260 for row, parity in zip(rows, paritygen(stripecount)):
259 row[b'parity'] = parity
261 row[b'parity'] = parity
260 yield row
262 yield row
261
263
262
264
263 def indexentries(
265 def indexentries(
264 ui, repos, req, stripecount, sortcolumn=b'', descending=False, subdir=b''
266 ui, repos, req, stripecount, sortcolumn=b'', descending=False, subdir=b''
265 ):
267 ):
266 args = (ui, repos, req, stripecount, sortcolumn, descending, subdir)
268 args = (ui, repos, req, stripecount, sortcolumn, descending, subdir)
267 return templateutil.mappinggenerator(_indexentriesgen, args=args)
269 return templateutil.mappinggenerator(_indexentriesgen, args=args)
268
270
269
271
270 class hgwebdir(object):
272 class hgwebdir(object):
271 """HTTP server for multiple repositories.
273 """HTTP server for multiple repositories.
272
274
273 Given a configuration, different repositories will be served depending
275 Given a configuration, different repositories will be served depending
274 on the request path.
276 on the request path.
275
277
276 Instances are typically used as WSGI applications.
278 Instances are typically used as WSGI applications.
277 """
279 """
278
280
279 def __init__(self, conf, baseui=None):
281 def __init__(self, conf, baseui=None):
280 self.conf = conf
282 self.conf = conf
281 self.baseui = baseui
283 self.baseui = baseui
282 self.ui = None
284 self.ui = None
283 self.lastrefresh = 0
285 self.lastrefresh = 0
284 self.motd = None
286 self.motd = None
285 self.refresh()
287 self.refresh()
286 if not baseui:
288 if not baseui:
287 # set up environment for new ui
289 # set up environment for new ui
288 extensions.loadall(self.ui)
290 extensions.loadall(self.ui)
289 extensions.populateui(self.ui)
291 extensions.populateui(self.ui)
290
292
291 def refresh(self):
293 def refresh(self):
292 if self.ui:
294 if self.ui:
293 refreshinterval = self.ui.configint(b'web', b'refreshinterval')
295 refreshinterval = self.ui.configint(b'web', b'refreshinterval')
294 else:
296 else:
295 item = configitems.coreitems[b'web'][b'refreshinterval']
297 item = configitems.coreitems[b'web'][b'refreshinterval']
296 refreshinterval = item.default
298 refreshinterval = item.default
297
299
298 # refreshinterval <= 0 means to always refresh.
300 # refreshinterval <= 0 means to always refresh.
299 if (
301 if (
300 refreshinterval > 0
302 refreshinterval > 0
301 and self.lastrefresh + refreshinterval > time.time()
303 and self.lastrefresh + refreshinterval > time.time()
302 ):
304 ):
303 return
305 return
304
306
305 if self.baseui:
307 if self.baseui:
306 u = self.baseui.copy()
308 u = self.baseui.copy()
307 else:
309 else:
308 u = uimod.ui.load()
310 u = uimod.ui.load()
309 u.setconfig(b'ui', b'report_untrusted', b'off', b'hgwebdir')
311 u.setconfig(b'ui', b'report_untrusted', b'off', b'hgwebdir')
310 u.setconfig(b'ui', b'nontty', b'true', b'hgwebdir')
312 u.setconfig(b'ui', b'nontty', b'true', b'hgwebdir')
311 # displaying bundling progress bar while serving feels wrong and may
313 # displaying bundling progress bar while serving feels wrong and may
312 # break some wsgi implementations.
314 # break some wsgi implementations.
313 u.setconfig(b'progress', b'disable', b'true', b'hgweb')
315 u.setconfig(b'progress', b'disable', b'true', b'hgweb')
314
316
315 if not isinstance(self.conf, (dict, list, tuple)):
317 if not isinstance(self.conf, (dict, list, tuple)):
316 map = {b'paths': b'hgweb-paths'}
318 map = {b'paths': b'hgweb-paths'}
317 if not os.path.exists(self.conf):
319 if not os.path.exists(self.conf):
318 raise error.Abort(_(b'config file %s not found!') % self.conf)
320 raise error.Abort(_(b'config file %s not found!') % self.conf)
319 u.readconfig(self.conf, remap=map, trust=True)
321 u.readconfig(self.conf, remap=map, trust=True)
320 paths = []
322 paths = []
321 for name, ignored in u.configitems(b'hgweb-paths'):
323 for name, ignored in u.configitems(b'hgweb-paths'):
322 for path in u.configlist(b'hgweb-paths', name):
324 for path in u.configlist(b'hgweb-paths', name):
323 paths.append((name, path))
325 paths.append((name, path))
324 elif isinstance(self.conf, (list, tuple)):
326 elif isinstance(self.conf, (list, tuple)):
325 paths = self.conf
327 paths = self.conf
326 elif isinstance(self.conf, dict):
328 elif isinstance(self.conf, dict):
327 paths = self.conf.items()
329 paths = self.conf.items()
328 extensions.populateui(u)
330 extensions.populateui(u)
329
331
330 repos = findrepos(paths)
332 repos = findrepos(paths)
331 for prefix, root in u.configitems(b'collections'):
333 for prefix, root in u.configitems(b'collections'):
332 prefix = util.pconvert(prefix)
334 prefix = util.pconvert(prefix)
333 for path in scmutil.walkrepos(root, followsym=True):
335 for path in scmutil.walkrepos(root, followsym=True):
334 repo = os.path.normpath(path)
336 repo = os.path.normpath(path)
335 name = util.pconvert(repo)
337 name = util.pconvert(repo)
336 if name.startswith(prefix):
338 if name.startswith(prefix):
337 name = name[len(prefix) :]
339 name = name[len(prefix) :]
338 repos.append((name.lstrip(b'/'), repo))
340 repos.append((name.lstrip(b'/'), repo))
339
341
340 self.repos = repos
342 self.repos = repos
341 self.ui = u
343 self.ui = u
342 encoding.encoding = self.ui.config(b'web', b'encoding')
344 encoding.encoding = self.ui.config(b'web', b'encoding')
343 self.style = self.ui.config(b'web', b'style')
345 self.style = self.ui.config(b'web', b'style')
344 self.templatepath = self.ui.config(
346 self.templatepath = self.ui.config(
345 b'web', b'templates', untrusted=False
347 b'web', b'templates', untrusted=False
346 )
348 )
347 self.stripecount = self.ui.config(b'web', b'stripes')
349 self.stripecount = self.ui.config(b'web', b'stripes')
348 if self.stripecount:
350 if self.stripecount:
349 self.stripecount = int(self.stripecount)
351 self.stripecount = int(self.stripecount)
350 prefix = self.ui.config(b'web', b'prefix')
352 prefix = self.ui.config(b'web', b'prefix')
351 if prefix.startswith(b'/'):
353 if prefix.startswith(b'/'):
352 prefix = prefix[1:]
354 prefix = prefix[1:]
353 if prefix.endswith(b'/'):
355 if prefix.endswith(b'/'):
354 prefix = prefix[:-1]
356 prefix = prefix[:-1]
355 self.prefix = prefix
357 self.prefix = prefix
356 self.lastrefresh = time.time()
358 self.lastrefresh = time.time()
357
359
358 def run(self):
360 def run(self):
359 if not encoding.environ.get(b'GATEWAY_INTERFACE', b'').startswith(
361 if not encoding.environ.get(b'GATEWAY_INTERFACE', b'').startswith(
360 b"CGI/1."
362 b"CGI/1."
361 ):
363 ):
362 raise RuntimeError(
364 raise RuntimeError(
363 b"This function is only intended to be "
365 b"This function is only intended to be "
364 b"called while running as a CGI script."
366 b"called while running as a CGI script."
365 )
367 )
366 wsgicgi.launch(self)
368 wsgicgi.launch(self)
367
369
368 def __call__(self, env, respond):
370 def __call__(self, env, respond):
369 baseurl = self.ui.config(b'web', b'baseurl')
371 baseurl = self.ui.config(b'web', b'baseurl')
370 req = requestmod.parserequestfromenv(env, altbaseurl=baseurl)
372 req = requestmod.parserequestfromenv(env, altbaseurl=baseurl)
371 res = requestmod.wsgiresponse(req, respond)
373 res = requestmod.wsgiresponse(req, respond)
372
374
373 return self.run_wsgi(req, res)
375 return self.run_wsgi(req, res)
374
376
375 def run_wsgi(self, req, res):
377 def run_wsgi(self, req, res):
376 profile = self.ui.configbool(b'profiling', b'enabled')
378 profile = self.ui.configbool(b'profiling', b'enabled')
377 with profiling.profile(self.ui, enabled=profile):
379 with profiling.profile(self.ui, enabled=profile):
378 try:
380 try:
379 for r in self._runwsgi(req, res):
381 for r in self._runwsgi(req, res):
380 yield r
382 yield r
381 finally:
383 finally:
382 # There are known cycles in localrepository that prevent
384 # There are known cycles in localrepository that prevent
383 # those objects (and tons of held references) from being
385 # those objects (and tons of held references) from being
384 # collected through normal refcounting. We mitigate those
386 # collected through normal refcounting. We mitigate those
385 # leaks by performing an explicit GC on every request.
387 # leaks by performing an explicit GC on every request.
386 # TODO remove this once leaks are fixed.
388 # TODO remove this once leaks are fixed.
387 # TODO only run this on requests that create localrepository
389 # TODO only run this on requests that create localrepository
388 # instances instead of every request.
390 # instances instead of every request.
389 gc.collect()
391 gc.collect()
390
392
391 def _runwsgi(self, req, res):
393 def _runwsgi(self, req, res):
392 try:
394 try:
393 self.refresh()
395 self.refresh()
394
396
395 csp, nonce = cspvalues(self.ui)
397 csp, nonce = cspvalues(self.ui)
396 if csp:
398 if csp:
397 res.headers[b'Content-Security-Policy'] = csp
399 res.headers[b'Content-Security-Policy'] = csp
398
400
399 virtual = req.dispatchpath.strip(b'/')
401 virtual = req.dispatchpath.strip(b'/')
400 tmpl = self.templater(req, nonce)
402 tmpl = self.templater(req, nonce)
401 ctype = tmpl.render(b'mimetype', {b'encoding': encoding.encoding})
403 ctype = tmpl.render(b'mimetype', {b'encoding': encoding.encoding})
402
404
403 # Global defaults. These can be overridden by any handler.
405 # Global defaults. These can be overridden by any handler.
404 res.status = b'200 Script output follows'
406 res.status = b'200 Script output follows'
405 res.headers[b'Content-Type'] = ctype
407 res.headers[b'Content-Type'] = ctype
406
408
407 # a static file
409 # a static file
408 if virtual.startswith(b'static/') or b'static' in req.qsparams:
410 if virtual.startswith(b'static/') or b'static' in req.qsparams:
409 if virtual.startswith(b'static/'):
411 if virtual.startswith(b'static/'):
410 fname = virtual[7:]
412 fname = virtual[7:]
411 else:
413 else:
412 fname = req.qsparams[b'static']
414 fname = req.qsparams[b'static']
413 static = self.ui.config(b"web", b"static", untrusted=False)
415 static = self.ui.config(b"web", b"static", untrusted=False)
414 if not static:
416 if not static:
415 tp = self.templatepath or templater.templatepaths()
417 tp = self.templatepath or templater.templatepaths()
416 if isinstance(tp, bytes):
418 if isinstance(tp, bytes):
417 tp = [tp]
419 tp = [tp]
418 static = [os.path.join(p, b'static') for p in tp]
420 static = [os.path.join(p, b'static') for p in tp]
419
421
420 staticfile(static, fname, res)
422 staticfile(static, fname, res)
421 return res.sendresponse()
423 return res.sendresponse()
422
424
423 # top-level index
425 # top-level index
424
426
425 repos = dict(self.repos)
427 repos = dict(self.repos)
426
428
427 if (not virtual or virtual == b'index') and virtual not in repos:
429 if (not virtual or virtual == b'index') and virtual not in repos:
428 return self.makeindex(req, res, tmpl)
430 return self.makeindex(req, res, tmpl)
429
431
430 # nested indexes and hgwebs
432 # nested indexes and hgwebs
431
433
432 if virtual.endswith(b'/index') and virtual not in repos:
434 if virtual.endswith(b'/index') and virtual not in repos:
433 subdir = virtual[: -len(b'index')]
435 subdir = virtual[: -len(b'index')]
434 if any(r.startswith(subdir) for r in repos):
436 if any(r.startswith(subdir) for r in repos):
435 return self.makeindex(req, res, tmpl, subdir)
437 return self.makeindex(req, res, tmpl, subdir)
436
438
437 def _virtualdirs():
439 def _virtualdirs():
438 # Check the full virtual path, and each parent
440 # Check the full virtual path, and each parent
439 yield virtual
441 yield virtual
440 for p in pathutil.finddirs(virtual):
442 for p in pathutil.finddirs(virtual):
441 yield p
443 yield p
442
444
443 for virtualrepo in _virtualdirs():
445 for virtualrepo in _virtualdirs():
444 real = repos.get(virtualrepo)
446 real = repos.get(virtualrepo)
445 if real:
447 if real:
446 # Re-parse the WSGI environment to take into account our
448 # Re-parse the WSGI environment to take into account our
447 # repository path component.
449 # repository path component.
448 uenv = req.rawenv
450 uenv = req.rawenv
449 if pycompat.ispy3:
451 if pycompat.ispy3:
450 uenv = {
452 uenv = {
451 k.decode('latin1'): v
453 k.decode('latin1'): v
452 for k, v in pycompat.iteritems(uenv)
454 for k, v in pycompat.iteritems(uenv)
453 }
455 }
454 req = requestmod.parserequestfromenv(
456 req = requestmod.parserequestfromenv(
455 uenv,
457 uenv,
456 reponame=virtualrepo,
458 reponame=virtualrepo,
457 altbaseurl=self.ui.config(b'web', b'baseurl'),
459 altbaseurl=self.ui.config(b'web', b'baseurl'),
458 # Reuse wrapped body file object otherwise state
460 # Reuse wrapped body file object otherwise state
459 # tracking can get confused.
461 # tracking can get confused.
460 bodyfh=req.bodyfh,
462 bodyfh=req.bodyfh,
461 )
463 )
462 try:
464 try:
463 # ensure caller gets private copy of ui
465 # ensure caller gets private copy of ui
464 repo = hg.repository(self.ui.copy(), real)
466 repo = hg.repository(self.ui.copy(), real)
465 return hgweb_mod.hgweb(repo).run_wsgi(req, res)
467 return hgweb_mod.hgweb(repo).run_wsgi(req, res)
466 except IOError as inst:
468 except IOError as inst:
467 msg = encoding.strtolocal(inst.strerror)
469 msg = encoding.strtolocal(inst.strerror)
468 raise ErrorResponse(HTTP_SERVER_ERROR, msg)
470 raise ErrorResponse(HTTP_SERVER_ERROR, msg)
469 except error.RepoError as inst:
471 except error.RepoError as inst:
470 raise ErrorResponse(HTTP_SERVER_ERROR, bytes(inst))
472 raise ErrorResponse(HTTP_SERVER_ERROR, bytes(inst))
471
473
472 # browse subdirectories
474 # browse subdirectories
473 subdir = virtual + b'/'
475 subdir = virtual + b'/'
474 if [r for r in repos if r.startswith(subdir)]:
476 if [r for r in repos if r.startswith(subdir)]:
475 return self.makeindex(req, res, tmpl, subdir)
477 return self.makeindex(req, res, tmpl, subdir)
476
478
477 # prefixes not found
479 # prefixes not found
478 res.status = b'404 Not Found'
480 res.status = b'404 Not Found'
479 res.setbodygen(tmpl.generate(b'notfound', {b'repo': virtual}))
481 res.setbodygen(tmpl.generate(b'notfound', {b'repo': virtual}))
480 return res.sendresponse()
482 return res.sendresponse()
481
483
482 except ErrorResponse as e:
484 except ErrorResponse as e:
483 res.status = statusmessage(e.code, pycompat.bytestr(e))
485 res.status = statusmessage(e.code, pycompat.bytestr(e))
484 res.setbodygen(
486 res.setbodygen(
485 tmpl.generate(b'error', {b'error': e.message or b''})
487 tmpl.generate(b'error', {b'error': e.message or b''})
486 )
488 )
487 return res.sendresponse()
489 return res.sendresponse()
488 finally:
490 finally:
489 del tmpl
491 del tmpl
490
492
491 def makeindex(self, req, res, tmpl, subdir=b""):
493 def makeindex(self, req, res, tmpl, subdir=b""):
492 self.refresh()
494 self.refresh()
493 sortable = [b"name", b"description", b"contact", b"lastchange"]
495 sortable = [b"name", b"description", b"contact", b"lastchange"]
494 sortcolumn, descending = None, False
496 sortcolumn, descending = None, False
495 if b'sort' in req.qsparams:
497 if b'sort' in req.qsparams:
496 sortcolumn = req.qsparams[b'sort']
498 sortcolumn = req.qsparams[b'sort']
497 descending = sortcolumn.startswith(b'-')
499 descending = sortcolumn.startswith(b'-')
498 if descending:
500 if descending:
499 sortcolumn = sortcolumn[1:]
501 sortcolumn = sortcolumn[1:]
500 if sortcolumn not in sortable:
502 if sortcolumn not in sortable:
501 sortcolumn = b""
503 sortcolumn = b""
502
504
503 sort = [
505 sort = [
504 (
506 (
505 b"sort_%s" % column,
507 b"sort_%s" % column,
506 b"%s%s"
508 b"%s%s"
507 % (
509 % (
508 (not descending and column == sortcolumn) and b"-" or b"",
510 (not descending and column == sortcolumn) and b"-" or b"",
509 column,
511 column,
510 ),
512 ),
511 )
513 )
512 for column in sortable
514 for column in sortable
513 ]
515 ]
514
516
515 self.refresh()
517 self.refresh()
516
518
517 entries = indexentries(
519 entries = indexentries(
518 self.ui,
520 self.ui,
519 self.repos,
521 self.repos,
520 req,
522 req,
521 self.stripecount,
523 self.stripecount,
522 sortcolumn=sortcolumn,
524 sortcolumn=sortcolumn,
523 descending=descending,
525 descending=descending,
524 subdir=subdir,
526 subdir=subdir,
525 )
527 )
526
528
527 mapping = {
529 mapping = {
528 b'entries': entries,
530 b'entries': entries,
529 b'subdir': subdir,
531 b'subdir': subdir,
530 b'pathdef': hgweb_mod.makebreadcrumb(b'/' + subdir, self.prefix),
532 b'pathdef': hgweb_mod.makebreadcrumb(b'/' + subdir, self.prefix),
531 b'sortcolumn': sortcolumn,
533 b'sortcolumn': sortcolumn,
532 b'descending': descending,
534 b'descending': descending,
533 }
535 }
534 mapping.update(sort)
536 mapping.update(sort)
535 res.setbodygen(tmpl.generate(b'index', mapping))
537 res.setbodygen(tmpl.generate(b'index', mapping))
536 return res.sendresponse()
538 return res.sendresponse()
537
539
538 def templater(self, req, nonce):
540 def templater(self, req, nonce):
539 def config(section, name, default=uimod._unset, untrusted=True):
541 def config(section, name, default=uimod._unset, untrusted=True):
540 return self.ui.config(section, name, default, untrusted)
542 return self.ui.config(section, name, default, untrusted)
541
543
542 vars = {}
544 vars = {}
543 styles, (style, mapfile) = hgweb_mod.getstyle(
545 styles, (style, mapfile) = hgweb_mod.getstyle(
544 req, config, self.templatepath
546 req, config, self.templatepath
545 )
547 )
546 if style == styles[0]:
548 if style == styles[0]:
547 vars[b'style'] = style
549 vars[b'style'] = style
548
550
549 sessionvars = webutil.sessionvars(vars, b'?')
551 sessionvars = webutil.sessionvars(vars, b'?')
550 logourl = config(b'web', b'logourl')
552 logourl = config(b'web', b'logourl')
551 logoimg = config(b'web', b'logoimg')
553 logoimg = config(b'web', b'logoimg')
552 staticurl = (
554 staticurl = (
553 config(b'web', b'staticurl')
555 config(b'web', b'staticurl')
554 or req.apppath.rstrip(b'/') + b'/static/'
556 or req.apppath.rstrip(b'/') + b'/static/'
555 )
557 )
556 if not staticurl.endswith(b'/'):
558 if not staticurl.endswith(b'/'):
557 staticurl += b'/'
559 staticurl += b'/'
558
560
559 defaults = {
561 defaults = {
560 b"encoding": encoding.encoding,
562 b"encoding": encoding.encoding,
561 b"url": req.apppath + b'/',
563 b"url": req.apppath + b'/',
562 b"logourl": logourl,
564 b"logourl": logourl,
563 b"logoimg": logoimg,
565 b"logoimg": logoimg,
564 b"staticurl": staticurl,
566 b"staticurl": staticurl,
565 b"sessionvars": sessionvars,
567 b"sessionvars": sessionvars,
566 b"style": style,
568 b"style": style,
567 b"nonce": nonce,
569 b"nonce": nonce,
568 }
570 }
569 templatekeyword = registrar.templatekeyword(defaults)
571 templatekeyword = registrar.templatekeyword(defaults)
570
572
571 @templatekeyword(b'motd', requires=())
573 @templatekeyword(b'motd', requires=())
572 def motd(context, mapping):
574 def motd(context, mapping):
573 if self.motd is not None:
575 if self.motd is not None:
574 yield self.motd
576 yield self.motd
575 else:
577 else:
576 yield config(b'web', b'motd')
578 yield config(b'web', b'motd')
577
579
578 tmpl = templater.templater.frommapfile(mapfile, defaults=defaults)
580 tmpl = templater.templater.frommapfile(mapfile, defaults=defaults)
579 return tmpl
581 return tmpl
@@ -1,467 +1,467 b''
1 # linelog - efficient cache for annotate data
1 # linelog - efficient cache for annotate data
2 #
2 #
3 # Copyright 2018 Google LLC.
3 # Copyright 2018 Google LLC.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """linelog is an efficient cache for annotate data inspired by SCCS Weaves.
7 """linelog is an efficient cache for annotate data inspired by SCCS Weaves.
8
8
9 SCCS Weaves are an implementation of
9 SCCS Weaves are an implementation of
10 https://en.wikipedia.org/wiki/Interleaved_deltas. See
10 https://en.wikipedia.org/wiki/Interleaved_deltas. See
11 mercurial/helptext/internals/linelog.txt for an exploration of SCCS weaves
11 mercurial/helptext/internals/linelog.txt for an exploration of SCCS weaves
12 and how linelog works in detail.
12 and how linelog works in detail.
13
13
14 Here's a hacker's summary: a linelog is a program which is executed in
14 Here's a hacker's summary: a linelog is a program which is executed in
15 the context of a revision. Executing the program emits information
15 the context of a revision. Executing the program emits information
16 about lines, including the revision that introduced them and the line
16 about lines, including the revision that introduced them and the line
17 number in the file at the introducing revision. When an insertion or
17 number in the file at the introducing revision. When an insertion or
18 deletion is performed on the file, a jump instruction is used to patch
18 deletion is performed on the file, a jump instruction is used to patch
19 in a new body of annotate information.
19 in a new body of annotate information.
20 """
20 """
21 from __future__ import absolute_import, print_function
21 from __future__ import absolute_import, print_function
22
22
23 import abc
23 import abc
24 import struct
24 import struct
25
25
26 from .thirdparty import attr
26 from .thirdparty import attr
27 from . import pycompat
27 from . import pycompat
28
28
29 _llentry = struct.Struct(b'>II')
29 _llentry = struct.Struct(b'>II')
30
30
31
31
32 class LineLogError(Exception):
32 class LineLogError(Exception):
33 """Error raised when something bad happens internally in linelog."""
33 """Error raised when something bad happens internally in linelog."""
34
34
35
35
36 @attr.s
36 @attr.s
37 class lineinfo(object):
37 class lineinfo(object):
38 # Introducing revision of this line.
38 # Introducing revision of this line.
39 rev = attr.ib()
39 rev = attr.ib()
40 # Line number for this line in its introducing revision.
40 # Line number for this line in its introducing revision.
41 linenum = attr.ib()
41 linenum = attr.ib()
42 # Private. Offset in the linelog program of this line. Used internally.
42 # Private. Offset in the linelog program of this line. Used internally.
43 _offset = attr.ib()
43 _offset = attr.ib()
44
44
45
45
46 @attr.s
46 @attr.s
47 class annotateresult(object):
47 class annotateresult(object):
48 rev = attr.ib()
48 rev = attr.ib()
49 lines = attr.ib()
49 lines = attr.ib()
50 _eof = attr.ib()
50 _eof = attr.ib()
51
51
52 def __iter__(self):
52 def __iter__(self):
53 return iter(self.lines)
53 return iter(self.lines)
54
54
55
55
56 class _llinstruction(object): # pytype: disable=ignored-metaclass
56 class _llinstruction(object): # pytype: disable=ignored-metaclass
57
57
58 __metaclass__ = abc.ABCMeta
58 __metaclass__ = abc.ABCMeta
59
59
60 @abc.abstractmethod
60 @abc.abstractmethod
61 def __init__(self, op1, op2):
61 def __init__(self, op1, op2):
62 pass
62 pass
63
63
64 @abc.abstractmethod
64 @abc.abstractmethod
65 def __str__(self):
65 def __str__(self):
66 pass
66 pass
67
67
68 def __repr__(self):
68 def __repr__(self):
69 return str(self)
69 return str(self)
70
70
71 @abc.abstractmethod
71 @abc.abstractmethod
72 def __eq__(self, other):
72 def __eq__(self, other):
73 pass
73 pass
74
74
75 @abc.abstractmethod
75 @abc.abstractmethod
76 def encode(self):
76 def encode(self):
77 """Encode this instruction to the binary linelog format."""
77 """Encode this instruction to the binary linelog format."""
78
78
79 @abc.abstractmethod
79 @abc.abstractmethod
80 def execute(self, rev, pc, emit):
80 def execute(self, rev, pc, emit):
81 """Execute this instruction.
81 """Execute this instruction.
82
82
83 Args:
83 Args:
84 rev: The revision we're annotating.
84 rev: The revision we're annotating.
85 pc: The current offset in the linelog program.
85 pc: The current offset in the linelog program.
86 emit: A function that accepts a single lineinfo object.
86 emit: A function that accepts a single lineinfo object.
87
87
88 Returns:
88 Returns:
89 The new value of pc. Returns None if exeuction should stop
89 The new value of pc. Returns None if exeuction should stop
90 (that is, we've found the end of the file.)
90 (that is, we've found the end of the file.)
91 """
91 """
92
92
93
93
94 class _jge(_llinstruction):
94 class _jge(_llinstruction):
95 """If the current rev is greater than or equal to op1, jump to op2."""
95 """If the current rev is greater than or equal to op1, jump to op2."""
96
96
97 def __init__(self, op1, op2):
97 def __init__(self, op1, op2):
98 self._cmprev = op1
98 self._cmprev = op1
99 self._target = op2
99 self._target = op2
100
100
101 def __str__(self):
101 def __str__(self):
102 return 'JGE %d %d' % (self._cmprev, self._target)
102 return 'JGE %d %d' % (self._cmprev, self._target)
103
103
104 def __eq__(self, other):
104 def __eq__(self, other):
105 return (
105 return (
106 type(self) == type(other)
106 type(self) == type(other)
107 and self._cmprev == other._cmprev
107 and self._cmprev == other._cmprev
108 and self._target == other._target
108 and self._target == other._target
109 )
109 )
110
110
111 def encode(self):
111 def encode(self):
112 return _llentry.pack(self._cmprev << 2, self._target)
112 return _llentry.pack(self._cmprev << 2, self._target)
113
113
114 def execute(self, rev, pc, emit):
114 def execute(self, rev, pc, emit):
115 if rev >= self._cmprev:
115 if rev >= self._cmprev:
116 return self._target
116 return self._target
117 return pc + 1
117 return pc + 1
118
118
119
119
120 class _jump(_llinstruction):
120 class _jump(_llinstruction):
121 """Unconditional jumps are expressed as a JGE with op1 set to 0."""
121 """Unconditional jumps are expressed as a JGE with op1 set to 0."""
122
122
123 def __init__(self, op1, op2):
123 def __init__(self, op1, op2):
124 if op1 != 0:
124 if op1 != 0:
125 raise LineLogError(b"malformed JUMP, op1 must be 0, got %d" % op1)
125 raise LineLogError(b"malformed JUMP, op1 must be 0, got %d" % op1)
126 self._target = op2
126 self._target = op2
127
127
128 def __str__(self):
128 def __str__(self):
129 return 'JUMP %d' % (self._target)
129 return 'JUMP %d' % (self._target)
130
130
131 def __eq__(self, other):
131 def __eq__(self, other):
132 return type(self) == type(other) and self._target == other._target
132 return type(self) == type(other) and self._target == other._target
133
133
134 def encode(self):
134 def encode(self):
135 return _llentry.pack(0, self._target)
135 return _llentry.pack(0, self._target)
136
136
137 def execute(self, rev, pc, emit):
137 def execute(self, rev, pc, emit):
138 return self._target
138 return self._target
139
139
140
140
141 class _eof(_llinstruction):
141 class _eof(_llinstruction):
142 """EOF is expressed as a JGE that always jumps to 0."""
142 """EOF is expressed as a JGE that always jumps to 0."""
143
143
144 def __init__(self, op1, op2):
144 def __init__(self, op1, op2):
145 if op1 != 0:
145 if op1 != 0:
146 raise LineLogError(b"malformed EOF, op1 must be 0, got %d" % op1)
146 raise LineLogError(b"malformed EOF, op1 must be 0, got %d" % op1)
147 if op2 != 0:
147 if op2 != 0:
148 raise LineLogError(b"malformed EOF, op2 must be 0, got %d" % op2)
148 raise LineLogError(b"malformed EOF, op2 must be 0, got %d" % op2)
149
149
150 def __str__(self):
150 def __str__(self):
151 return r'EOF'
151 return r'EOF'
152
152
153 def __eq__(self, other):
153 def __eq__(self, other):
154 return type(self) == type(other)
154 return type(self) == type(other)
155
155
156 def encode(self):
156 def encode(self):
157 return _llentry.pack(0, 0)
157 return _llentry.pack(0, 0)
158
158
159 def execute(self, rev, pc, emit):
159 def execute(self, rev, pc, emit):
160 return None
160 return None
161
161
162
162
163 class _jl(_llinstruction):
163 class _jl(_llinstruction):
164 """If the current rev is less than op1, jump to op2."""
164 """If the current rev is less than op1, jump to op2."""
165
165
166 def __init__(self, op1, op2):
166 def __init__(self, op1, op2):
167 self._cmprev = op1
167 self._cmprev = op1
168 self._target = op2
168 self._target = op2
169
169
170 def __str__(self):
170 def __str__(self):
171 return 'JL %d %d' % (self._cmprev, self._target)
171 return 'JL %d %d' % (self._cmprev, self._target)
172
172
173 def __eq__(self, other):
173 def __eq__(self, other):
174 return (
174 return (
175 type(self) == type(other)
175 type(self) == type(other)
176 and self._cmprev == other._cmprev
176 and self._cmprev == other._cmprev
177 and self._target == other._target
177 and self._target == other._target
178 )
178 )
179
179
180 def encode(self):
180 def encode(self):
181 return _llentry.pack(1 | (self._cmprev << 2), self._target)
181 return _llentry.pack(1 | (self._cmprev << 2), self._target)
182
182
183 def execute(self, rev, pc, emit):
183 def execute(self, rev, pc, emit):
184 if rev < self._cmprev:
184 if rev < self._cmprev:
185 return self._target
185 return self._target
186 return pc + 1
186 return pc + 1
187
187
188
188
189 class _line(_llinstruction):
189 class _line(_llinstruction):
190 """Emit a line."""
190 """Emit a line."""
191
191
192 def __init__(self, op1, op2):
192 def __init__(self, op1, op2):
193 # This line was introduced by this revision number.
193 # This line was introduced by this revision number.
194 self._rev = op1
194 self._rev = op1
195 # This line had the specified line number in the introducing revision.
195 # This line had the specified line number in the introducing revision.
196 self._origlineno = op2
196 self._origlineno = op2
197
197
198 def __str__(self):
198 def __str__(self):
199 return 'LINE %d %d' % (self._rev, self._origlineno)
199 return 'LINE %d %d' % (self._rev, self._origlineno)
200
200
201 def __eq__(self, other):
201 def __eq__(self, other):
202 return (
202 return (
203 type(self) == type(other)
203 type(self) == type(other)
204 and self._rev == other._rev
204 and self._rev == other._rev
205 and self._origlineno == other._origlineno
205 and self._origlineno == other._origlineno
206 )
206 )
207
207
208 def encode(self):
208 def encode(self):
209 return _llentry.pack(2 | (self._rev << 2), self._origlineno)
209 return _llentry.pack(2 | (self._rev << 2), self._origlineno)
210
210
211 def execute(self, rev, pc, emit):
211 def execute(self, rev, pc, emit):
212 emit(lineinfo(self._rev, self._origlineno, pc))
212 emit(lineinfo(self._rev, self._origlineno, pc))
213 return pc + 1
213 return pc + 1
214
214
215
215
216 def _decodeone(data, offset):
216 def _decodeone(data, offset):
217 """Decode a single linelog instruction from an offset in a buffer."""
217 """Decode a single linelog instruction from an offset in a buffer."""
218 try:
218 try:
219 op1, op2 = _llentry.unpack_from(data, offset)
219 op1, op2 = _llentry.unpack_from(data, offset)
220 except struct.error as e:
220 except struct.error as e:
221 raise LineLogError(b'reading an instruction failed: %r' % e)
221 raise LineLogError(b'reading an instruction failed: %r' % e)
222 opcode = op1 & 0b11
222 opcode = op1 & 0b11
223 op1 = op1 >> 2
223 op1 = op1 >> 2
224 if opcode == 0:
224 if opcode == 0:
225 if op1 == 0:
225 if op1 == 0:
226 if op2 == 0:
226 if op2 == 0:
227 return _eof(op1, op2)
227 return _eof(op1, op2)
228 return _jump(op1, op2)
228 return _jump(op1, op2)
229 return _jge(op1, op2)
229 return _jge(op1, op2)
230 elif opcode == 1:
230 elif opcode == 1:
231 return _jl(op1, op2)
231 return _jl(op1, op2)
232 elif opcode == 2:
232 elif opcode == 2:
233 return _line(op1, op2)
233 return _line(op1, op2)
234 raise NotImplementedError(b'Unimplemented opcode %r' % opcode)
234 raise NotImplementedError(b'Unimplemented opcode %r' % opcode)
235
235
236
236
237 class linelog(object):
237 class linelog(object):
238 """Efficient cache for per-line history information."""
238 """Efficient cache for per-line history information."""
239
239
240 def __init__(self, program=None, maxrev=0):
240 def __init__(self, program=None, maxrev=0):
241 if program is None:
241 if program is None:
242 # We pad the program with an extra leading EOF so that our
242 # We pad the program with an extra leading EOF so that our
243 # offsets will match the C code exactly. This means we can
243 # offsets will match the C code exactly. This means we can
244 # interoperate with the C code.
244 # interoperate with the C code.
245 program = [_eof(0, 0), _eof(0, 0)]
245 program = [_eof(0, 0), _eof(0, 0)]
246 self._program = program
246 self._program = program
247 self._lastannotate = None
247 self._lastannotate = None
248 self._maxrev = maxrev
248 self._maxrev = maxrev
249
249
250 def __eq__(self, other):
250 def __eq__(self, other):
251 return (
251 return (
252 type(self) == type(other)
252 type(self) == type(other)
253 and self._program == other._program
253 and self._program == other._program
254 and self._maxrev == other._maxrev
254 and self._maxrev == other._maxrev
255 )
255 )
256
256
257 def __repr__(self):
257 def __repr__(self):
258 return b'<linelog at %s: maxrev=%d size=%d>' % (
258 return '<linelog at %s: maxrev=%d size=%d>' % (
259 hex(id(self)),
259 hex(id(self)),
260 self._maxrev,
260 self._maxrev,
261 len(self._program),
261 len(self._program),
262 )
262 )
263
263
264 def debugstr(self):
264 def debugstr(self):
265 fmt = '%%%dd %%s' % len(str(len(self._program)))
265 fmt = '%%%dd %%s' % len(str(len(self._program)))
266 return pycompat.sysstr(b'\n').join(
266 return pycompat.sysstr(b'\n').join(
267 fmt % (idx, i) for idx, i in enumerate(self._program[1:], 1)
267 fmt % (idx, i) for idx, i in enumerate(self._program[1:], 1)
268 )
268 )
269
269
270 @classmethod
270 @classmethod
271 def fromdata(cls, buf):
271 def fromdata(cls, buf):
272 if len(buf) % _llentry.size != 0:
272 if len(buf) % _llentry.size != 0:
273 raise LineLogError(
273 raise LineLogError(
274 b"invalid linelog buffer size %d (must be a multiple of %d)"
274 b"invalid linelog buffer size %d (must be a multiple of %d)"
275 % (len(buf), _llentry.size)
275 % (len(buf), _llentry.size)
276 )
276 )
277 expected = len(buf) / _llentry.size
277 expected = len(buf) / _llentry.size
278 fakejge = _decodeone(buf, 0)
278 fakejge = _decodeone(buf, 0)
279 if isinstance(fakejge, _jump):
279 if isinstance(fakejge, _jump):
280 maxrev = 0
280 maxrev = 0
281 elif isinstance(fakejge, (_jge, _jl)):
281 elif isinstance(fakejge, (_jge, _jl)):
282 maxrev = fakejge._cmprev
282 maxrev = fakejge._cmprev
283 else:
283 else:
284 raise LineLogError(
284 raise LineLogError(
285 'Expected one of _jump, _jge, or _jl. Got %s.'
285 'Expected one of _jump, _jge, or _jl. Got %s.'
286 % type(fakejge).__name__
286 % type(fakejge).__name__
287 )
287 )
288 assert isinstance(fakejge, (_jump, _jge, _jl)) # help pytype
288 assert isinstance(fakejge, (_jump, _jge, _jl)) # help pytype
289 numentries = fakejge._target
289 numentries = fakejge._target
290 if expected != numentries:
290 if expected != numentries:
291 raise LineLogError(
291 raise LineLogError(
292 b"corrupt linelog data: claimed"
292 b"corrupt linelog data: claimed"
293 b" %d entries but given data for %d entries"
293 b" %d entries but given data for %d entries"
294 % (expected, numentries)
294 % (expected, numentries)
295 )
295 )
296 instructions = [_eof(0, 0)]
296 instructions = [_eof(0, 0)]
297 for offset in pycompat.xrange(1, numentries):
297 for offset in pycompat.xrange(1, numentries):
298 instructions.append(_decodeone(buf, offset * _llentry.size))
298 instructions.append(_decodeone(buf, offset * _llentry.size))
299 return cls(instructions, maxrev=maxrev)
299 return cls(instructions, maxrev=maxrev)
300
300
301 def encode(self):
301 def encode(self):
302 hdr = _jge(self._maxrev, len(self._program)).encode()
302 hdr = _jge(self._maxrev, len(self._program)).encode()
303 return hdr + b''.join(i.encode() for i in self._program[1:])
303 return hdr + b''.join(i.encode() for i in self._program[1:])
304
304
305 def clear(self):
305 def clear(self):
306 self._program = []
306 self._program = []
307 self._maxrev = 0
307 self._maxrev = 0
308 self._lastannotate = None
308 self._lastannotate = None
309
309
310 def replacelines_vec(self, rev, a1, a2, blines):
310 def replacelines_vec(self, rev, a1, a2, blines):
311 return self.replacelines(
311 return self.replacelines(
312 rev, a1, a2, 0, len(blines), _internal_blines=blines
312 rev, a1, a2, 0, len(blines), _internal_blines=blines
313 )
313 )
314
314
315 def replacelines(self, rev, a1, a2, b1, b2, _internal_blines=None):
315 def replacelines(self, rev, a1, a2, b1, b2, _internal_blines=None):
316 """Replace lines [a1, a2) with lines [b1, b2)."""
316 """Replace lines [a1, a2) with lines [b1, b2)."""
317 if self._lastannotate:
317 if self._lastannotate:
318 # TODO(augie): make replacelines() accept a revision at
318 # TODO(augie): make replacelines() accept a revision at
319 # which we're editing as well as a revision to mark
319 # which we're editing as well as a revision to mark
320 # responsible for the edits. In hg-experimental it's
320 # responsible for the edits. In hg-experimental it's
321 # stateful like this, so we're doing the same thing to
321 # stateful like this, so we're doing the same thing to
322 # retain compatibility with absorb until that's imported.
322 # retain compatibility with absorb until that's imported.
323 ar = self._lastannotate
323 ar = self._lastannotate
324 else:
324 else:
325 ar = self.annotate(rev)
325 ar = self.annotate(rev)
326 # ar = self.annotate(self._maxrev)
326 # ar = self.annotate(self._maxrev)
327 if a1 > len(ar.lines):
327 if a1 > len(ar.lines):
328 raise LineLogError(
328 raise LineLogError(
329 b'%d contains %d lines, tried to access line %d'
329 b'%d contains %d lines, tried to access line %d'
330 % (rev, len(ar.lines), a1)
330 % (rev, len(ar.lines), a1)
331 )
331 )
332 elif a1 == len(ar.lines):
332 elif a1 == len(ar.lines):
333 # Simulated EOF instruction since we're at EOF, which
333 # Simulated EOF instruction since we're at EOF, which
334 # doesn't have a "real" line.
334 # doesn't have a "real" line.
335 a1inst = _eof(0, 0)
335 a1inst = _eof(0, 0)
336 a1info = lineinfo(0, 0, ar._eof)
336 a1info = lineinfo(0, 0, ar._eof)
337 else:
337 else:
338 a1info = ar.lines[a1]
338 a1info = ar.lines[a1]
339 a1inst = self._program[a1info._offset]
339 a1inst = self._program[a1info._offset]
340 programlen = self._program.__len__
340 programlen = self._program.__len__
341 oldproglen = programlen()
341 oldproglen = programlen()
342 appendinst = self._program.append
342 appendinst = self._program.append
343
343
344 # insert
344 # insert
345 blineinfos = []
345 blineinfos = []
346 bappend = blineinfos.append
346 bappend = blineinfos.append
347 if b1 < b2:
347 if b1 < b2:
348 # Determine the jump target for the JGE at the start of
348 # Determine the jump target for the JGE at the start of
349 # the new block.
349 # the new block.
350 tgt = oldproglen + (b2 - b1 + 1)
350 tgt = oldproglen + (b2 - b1 + 1)
351 # Jump to skip the insert if we're at an older revision.
351 # Jump to skip the insert if we're at an older revision.
352 appendinst(_jl(rev, tgt))
352 appendinst(_jl(rev, tgt))
353 for linenum in pycompat.xrange(b1, b2):
353 for linenum in pycompat.xrange(b1, b2):
354 if _internal_blines is None:
354 if _internal_blines is None:
355 bappend(lineinfo(rev, linenum, programlen()))
355 bappend(lineinfo(rev, linenum, programlen()))
356 appendinst(_line(rev, linenum))
356 appendinst(_line(rev, linenum))
357 else:
357 else:
358 newrev, newlinenum = _internal_blines[linenum]
358 newrev, newlinenum = _internal_blines[linenum]
359 bappend(lineinfo(newrev, newlinenum, programlen()))
359 bappend(lineinfo(newrev, newlinenum, programlen()))
360 appendinst(_line(newrev, newlinenum))
360 appendinst(_line(newrev, newlinenum))
361 # delete
361 # delete
362 if a1 < a2:
362 if a1 < a2:
363 if a2 > len(ar.lines):
363 if a2 > len(ar.lines):
364 raise LineLogError(
364 raise LineLogError(
365 b'%d contains %d lines, tried to access line %d'
365 b'%d contains %d lines, tried to access line %d'
366 % (rev, len(ar.lines), a2)
366 % (rev, len(ar.lines), a2)
367 )
367 )
368 elif a2 == len(ar.lines):
368 elif a2 == len(ar.lines):
369 endaddr = ar._eof
369 endaddr = ar._eof
370 else:
370 else:
371 endaddr = ar.lines[a2]._offset
371 endaddr = ar.lines[a2]._offset
372 if a2 > 0 and rev < self._maxrev:
372 if a2 > 0 and rev < self._maxrev:
373 # If we're here, we're deleting a chunk of an old
373 # If we're here, we're deleting a chunk of an old
374 # commit, so we need to be careful and not touch
374 # commit, so we need to be careful and not touch
375 # invisible lines between a2-1 and a2 (IOW, lines that
375 # invisible lines between a2-1 and a2 (IOW, lines that
376 # are added later).
376 # are added later).
377 endaddr = ar.lines[a2 - 1]._offset + 1
377 endaddr = ar.lines[a2 - 1]._offset + 1
378 appendinst(_jge(rev, endaddr))
378 appendinst(_jge(rev, endaddr))
379 # copy instruction from a1
379 # copy instruction from a1
380 a1instpc = programlen()
380 a1instpc = programlen()
381 appendinst(a1inst)
381 appendinst(a1inst)
382 # if a1inst isn't a jump or EOF, then we need to add an unconditional
382 # if a1inst isn't a jump or EOF, then we need to add an unconditional
383 # jump back into the program here.
383 # jump back into the program here.
384 if not isinstance(a1inst, (_jump, _eof)):
384 if not isinstance(a1inst, (_jump, _eof)):
385 appendinst(_jump(0, a1info._offset + 1))
385 appendinst(_jump(0, a1info._offset + 1))
386 # Patch instruction at a1, which makes our patch live.
386 # Patch instruction at a1, which makes our patch live.
387 self._program[a1info._offset] = _jump(0, oldproglen)
387 self._program[a1info._offset] = _jump(0, oldproglen)
388
388
389 # Update self._lastannotate in place. This serves as a cache to avoid
389 # Update self._lastannotate in place. This serves as a cache to avoid
390 # expensive "self.annotate" in this function, when "replacelines" is
390 # expensive "self.annotate" in this function, when "replacelines" is
391 # used continuously.
391 # used continuously.
392 if len(self._lastannotate.lines) > a1:
392 if len(self._lastannotate.lines) > a1:
393 self._lastannotate.lines[a1]._offset = a1instpc
393 self._lastannotate.lines[a1]._offset = a1instpc
394 else:
394 else:
395 assert isinstance(a1inst, _eof)
395 assert isinstance(a1inst, _eof)
396 self._lastannotate._eof = a1instpc
396 self._lastannotate._eof = a1instpc
397 self._lastannotate.lines[a1:a2] = blineinfos
397 self._lastannotate.lines[a1:a2] = blineinfos
398 self._lastannotate.rev = max(self._lastannotate.rev, rev)
398 self._lastannotate.rev = max(self._lastannotate.rev, rev)
399
399
400 if rev > self._maxrev:
400 if rev > self._maxrev:
401 self._maxrev = rev
401 self._maxrev = rev
402
402
403 def annotate(self, rev):
403 def annotate(self, rev):
404 pc = 1
404 pc = 1
405 lines = []
405 lines = []
406 executed = 0
406 executed = 0
407 # Sanity check: if instructions executed exceeds len(program), we
407 # Sanity check: if instructions executed exceeds len(program), we
408 # hit an infinite loop in the linelog program somehow and we
408 # hit an infinite loop in the linelog program somehow and we
409 # should stop.
409 # should stop.
410 while pc is not None and executed < len(self._program):
410 while pc is not None and executed < len(self._program):
411 inst = self._program[pc]
411 inst = self._program[pc]
412 lastpc = pc
412 lastpc = pc
413 pc = inst.execute(rev, pc, lines.append)
413 pc = inst.execute(rev, pc, lines.append)
414 executed += 1
414 executed += 1
415 if pc is not None:
415 if pc is not None:
416 raise LineLogError(
416 raise LineLogError(
417 r'Probably hit an infinite loop in linelog. Program:\n'
417 r'Probably hit an infinite loop in linelog. Program:\n'
418 + self.debugstr()
418 + self.debugstr()
419 )
419 )
420 ar = annotateresult(rev, lines, lastpc)
420 ar = annotateresult(rev, lines, lastpc)
421 self._lastannotate = ar
421 self._lastannotate = ar
422 return ar
422 return ar
423
423
424 @property
424 @property
425 def maxrev(self):
425 def maxrev(self):
426 return self._maxrev
426 return self._maxrev
427
427
428 # Stateful methods which depend on the value of the last
428 # Stateful methods which depend on the value of the last
429 # annotation run. This API is for compatiblity with the original
429 # annotation run. This API is for compatiblity with the original
430 # linelog, and we should probably consider refactoring it.
430 # linelog, and we should probably consider refactoring it.
431 @property
431 @property
432 def annotateresult(self):
432 def annotateresult(self):
433 """Return the last annotation result. C linelog code exposed this."""
433 """Return the last annotation result. C linelog code exposed this."""
434 return [(l.rev, l.linenum) for l in self._lastannotate.lines]
434 return [(l.rev, l.linenum) for l in self._lastannotate.lines]
435
435
436 def getoffset(self, line):
436 def getoffset(self, line):
437 return self._lastannotate.lines[line]._offset
437 return self._lastannotate.lines[line]._offset
438
438
439 def getalllines(self, start=0, end=0):
439 def getalllines(self, start=0, end=0):
440 """Get all lines that ever occurred in [start, end).
440 """Get all lines that ever occurred in [start, end).
441
441
442 Passing start == end == 0 means "all lines ever".
442 Passing start == end == 0 means "all lines ever".
443
443
444 This works in terms of *internal* program offsets, not line numbers.
444 This works in terms of *internal* program offsets, not line numbers.
445 """
445 """
446 pc = start or 1
446 pc = start or 1
447 lines = []
447 lines = []
448 # only take as many steps as there are instructions in the
448 # only take as many steps as there are instructions in the
449 # program - if we don't find an EOF or our stop-line before
449 # program - if we don't find an EOF or our stop-line before
450 # then, something is badly broken.
450 # then, something is badly broken.
451 for step in pycompat.xrange(len(self._program)):
451 for step in pycompat.xrange(len(self._program)):
452 inst = self._program[pc]
452 inst = self._program[pc]
453 nextpc = pc + 1
453 nextpc = pc + 1
454 if isinstance(inst, _jump):
454 if isinstance(inst, _jump):
455 nextpc = inst._target
455 nextpc = inst._target
456 elif isinstance(inst, _eof):
456 elif isinstance(inst, _eof):
457 return lines
457 return lines
458 elif isinstance(inst, (_jl, _jge)):
458 elif isinstance(inst, (_jl, _jge)):
459 pass
459 pass
460 elif isinstance(inst, _line):
460 elif isinstance(inst, _line):
461 lines.append((inst._rev, inst._origlineno))
461 lines.append((inst._rev, inst._origlineno))
462 else:
462 else:
463 raise LineLogError(b"Illegal instruction %r" % inst)
463 raise LineLogError(b"Illegal instruction %r" % inst)
464 if nextpc == end:
464 if nextpc == end:
465 return lines
465 return lines
466 pc = nextpc
466 pc = nextpc
467 raise LineLogError(b"Failed to perform getalllines")
467 raise LineLogError(b"Failed to perform getalllines")
@@ -1,3786 +1,3787 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 )
24 )
25 from .pycompat import (
25 from .pycompat import (
26 delattr,
26 delattr,
27 getattr,
27 getattr,
28 )
28 )
29 from . import (
29 from . import (
30 bookmarks,
30 bookmarks,
31 branchmap,
31 branchmap,
32 bundle2,
32 bundle2,
33 changegroup,
33 changegroup,
34 color,
34 color,
35 context,
35 context,
36 dirstate,
36 dirstate,
37 dirstateguard,
37 dirstateguard,
38 discovery,
38 discovery,
39 encoding,
39 encoding,
40 error,
40 error,
41 exchange,
41 exchange,
42 extensions,
42 extensions,
43 filelog,
43 filelog,
44 hook,
44 hook,
45 lock as lockmod,
45 lock as lockmod,
46 match as matchmod,
46 match as matchmod,
47 merge as mergemod,
47 merge as mergemod,
48 mergeutil,
48 mergeutil,
49 namespaces,
49 namespaces,
50 narrowspec,
50 narrowspec,
51 obsolete,
51 obsolete,
52 pathutil,
52 pathutil,
53 phases,
53 phases,
54 pushkey,
54 pushkey,
55 pycompat,
55 pycompat,
56 rcutil,
56 repoview,
57 repoview,
57 revset,
58 revset,
58 revsetlang,
59 revsetlang,
59 scmutil,
60 scmutil,
60 sparse,
61 sparse,
61 store as storemod,
62 store as storemod,
62 subrepoutil,
63 subrepoutil,
63 tags as tagsmod,
64 tags as tagsmod,
64 transaction,
65 transaction,
65 txnutil,
66 txnutil,
66 util,
67 util,
67 vfs as vfsmod,
68 vfs as vfsmod,
68 )
69 )
69
70
70 from .interfaces import (
71 from .interfaces import (
71 repository,
72 repository,
72 util as interfaceutil,
73 util as interfaceutil,
73 )
74 )
74
75
75 from .utils import (
76 from .utils import (
76 hashutil,
77 hashutil,
77 procutil,
78 procutil,
78 stringutil,
79 stringutil,
79 )
80 )
80
81
81 from .revlogutils import constants as revlogconst
82 from .revlogutils import constants as revlogconst
82
83
83 release = lockmod.release
84 release = lockmod.release
84 urlerr = util.urlerr
85 urlerr = util.urlerr
85 urlreq = util.urlreq
86 urlreq = util.urlreq
86
87
87 # set of (path, vfs-location) tuples. vfs-location is:
88 # set of (path, vfs-location) tuples. vfs-location is:
88 # - 'plain for vfs relative paths
89 # - 'plain for vfs relative paths
89 # - '' for svfs relative paths
90 # - '' for svfs relative paths
90 _cachedfiles = set()
91 _cachedfiles = set()
91
92
92
93
93 class _basefilecache(scmutil.filecache):
94 class _basefilecache(scmutil.filecache):
94 """All filecache usage on repo are done for logic that should be unfiltered
95 """All filecache usage on repo are done for logic that should be unfiltered
95 """
96 """
96
97
97 def __get__(self, repo, type=None):
98 def __get__(self, repo, type=None):
98 if repo is None:
99 if repo is None:
99 return self
100 return self
100 # proxy to unfiltered __dict__ since filtered repo has no entry
101 # proxy to unfiltered __dict__ since filtered repo has no entry
101 unfi = repo.unfiltered()
102 unfi = repo.unfiltered()
102 try:
103 try:
103 return unfi.__dict__[self.sname]
104 return unfi.__dict__[self.sname]
104 except KeyError:
105 except KeyError:
105 pass
106 pass
106 return super(_basefilecache, self).__get__(unfi, type)
107 return super(_basefilecache, self).__get__(unfi, type)
107
108
108 def set(self, repo, value):
109 def set(self, repo, value):
109 return super(_basefilecache, self).set(repo.unfiltered(), value)
110 return super(_basefilecache, self).set(repo.unfiltered(), value)
110
111
111
112
112 class repofilecache(_basefilecache):
113 class repofilecache(_basefilecache):
113 """filecache for files in .hg but outside of .hg/store"""
114 """filecache for files in .hg but outside of .hg/store"""
114
115
115 def __init__(self, *paths):
116 def __init__(self, *paths):
116 super(repofilecache, self).__init__(*paths)
117 super(repofilecache, self).__init__(*paths)
117 for path in paths:
118 for path in paths:
118 _cachedfiles.add((path, b'plain'))
119 _cachedfiles.add((path, b'plain'))
119
120
120 def join(self, obj, fname):
121 def join(self, obj, fname):
121 return obj.vfs.join(fname)
122 return obj.vfs.join(fname)
122
123
123
124
124 class storecache(_basefilecache):
125 class storecache(_basefilecache):
125 """filecache for files in the store"""
126 """filecache for files in the store"""
126
127
127 def __init__(self, *paths):
128 def __init__(self, *paths):
128 super(storecache, self).__init__(*paths)
129 super(storecache, self).__init__(*paths)
129 for path in paths:
130 for path in paths:
130 _cachedfiles.add((path, b''))
131 _cachedfiles.add((path, b''))
131
132
132 def join(self, obj, fname):
133 def join(self, obj, fname):
133 return obj.sjoin(fname)
134 return obj.sjoin(fname)
134
135
135
136
136 class mixedrepostorecache(_basefilecache):
137 class mixedrepostorecache(_basefilecache):
137 """filecache for a mix files in .hg/store and outside"""
138 """filecache for a mix files in .hg/store and outside"""
138
139
139 def __init__(self, *pathsandlocations):
140 def __init__(self, *pathsandlocations):
140 # scmutil.filecache only uses the path for passing back into our
141 # scmutil.filecache only uses the path for passing back into our
141 # join(), so we can safely pass a list of paths and locations
142 # join(), so we can safely pass a list of paths and locations
142 super(mixedrepostorecache, self).__init__(*pathsandlocations)
143 super(mixedrepostorecache, self).__init__(*pathsandlocations)
143 _cachedfiles.update(pathsandlocations)
144 _cachedfiles.update(pathsandlocations)
144
145
145 def join(self, obj, fnameandlocation):
146 def join(self, obj, fnameandlocation):
146 fname, location = fnameandlocation
147 fname, location = fnameandlocation
147 if location == b'plain':
148 if location == b'plain':
148 return obj.vfs.join(fname)
149 return obj.vfs.join(fname)
149 else:
150 else:
150 if location != b'':
151 if location != b'':
151 raise error.ProgrammingError(
152 raise error.ProgrammingError(
152 b'unexpected location: %s' % location
153 b'unexpected location: %s' % location
153 )
154 )
154 return obj.sjoin(fname)
155 return obj.sjoin(fname)
155
156
156
157
157 def isfilecached(repo, name):
158 def isfilecached(repo, name):
158 """check if a repo has already cached "name" filecache-ed property
159 """check if a repo has already cached "name" filecache-ed property
159
160
160 This returns (cachedobj-or-None, iscached) tuple.
161 This returns (cachedobj-or-None, iscached) tuple.
161 """
162 """
162 cacheentry = repo.unfiltered()._filecache.get(name, None)
163 cacheentry = repo.unfiltered()._filecache.get(name, None)
163 if not cacheentry:
164 if not cacheentry:
164 return None, False
165 return None, False
165 return cacheentry.obj, True
166 return cacheentry.obj, True
166
167
167
168
168 class unfilteredpropertycache(util.propertycache):
169 class unfilteredpropertycache(util.propertycache):
169 """propertycache that apply to unfiltered repo only"""
170 """propertycache that apply to unfiltered repo only"""
170
171
171 def __get__(self, repo, type=None):
172 def __get__(self, repo, type=None):
172 unfi = repo.unfiltered()
173 unfi = repo.unfiltered()
173 if unfi is repo:
174 if unfi is repo:
174 return super(unfilteredpropertycache, self).__get__(unfi)
175 return super(unfilteredpropertycache, self).__get__(unfi)
175 return getattr(unfi, self.name)
176 return getattr(unfi, self.name)
176
177
177
178
178 class filteredpropertycache(util.propertycache):
179 class filteredpropertycache(util.propertycache):
179 """propertycache that must take filtering in account"""
180 """propertycache that must take filtering in account"""
180
181
181 def cachevalue(self, obj, value):
182 def cachevalue(self, obj, value):
182 object.__setattr__(obj, self.name, value)
183 object.__setattr__(obj, self.name, value)
183
184
184
185
185 def hasunfilteredcache(repo, name):
186 def hasunfilteredcache(repo, name):
186 """check if a repo has an unfilteredpropertycache value for <name>"""
187 """check if a repo has an unfilteredpropertycache value for <name>"""
187 return name in vars(repo.unfiltered())
188 return name in vars(repo.unfiltered())
188
189
189
190
190 def unfilteredmethod(orig):
191 def unfilteredmethod(orig):
191 """decorate method that always need to be run on unfiltered version"""
192 """decorate method that always need to be run on unfiltered version"""
192
193
193 def wrapper(repo, *args, **kwargs):
194 def wrapper(repo, *args, **kwargs):
194 return orig(repo.unfiltered(), *args, **kwargs)
195 return orig(repo.unfiltered(), *args, **kwargs)
195
196
196 return wrapper
197 return wrapper
197
198
198
199
199 moderncaps = {
200 moderncaps = {
200 b'lookup',
201 b'lookup',
201 b'branchmap',
202 b'branchmap',
202 b'pushkey',
203 b'pushkey',
203 b'known',
204 b'known',
204 b'getbundle',
205 b'getbundle',
205 b'unbundle',
206 b'unbundle',
206 }
207 }
207 legacycaps = moderncaps.union({b'changegroupsubset'})
208 legacycaps = moderncaps.union({b'changegroupsubset'})
208
209
209
210
210 @interfaceutil.implementer(repository.ipeercommandexecutor)
211 @interfaceutil.implementer(repository.ipeercommandexecutor)
211 class localcommandexecutor(object):
212 class localcommandexecutor(object):
212 def __init__(self, peer):
213 def __init__(self, peer):
213 self._peer = peer
214 self._peer = peer
214 self._sent = False
215 self._sent = False
215 self._closed = False
216 self._closed = False
216
217
217 def __enter__(self):
218 def __enter__(self):
218 return self
219 return self
219
220
220 def __exit__(self, exctype, excvalue, exctb):
221 def __exit__(self, exctype, excvalue, exctb):
221 self.close()
222 self.close()
222
223
223 def callcommand(self, command, args):
224 def callcommand(self, command, args):
224 if self._sent:
225 if self._sent:
225 raise error.ProgrammingError(
226 raise error.ProgrammingError(
226 b'callcommand() cannot be used after sendcommands()'
227 b'callcommand() cannot be used after sendcommands()'
227 )
228 )
228
229
229 if self._closed:
230 if self._closed:
230 raise error.ProgrammingError(
231 raise error.ProgrammingError(
231 b'callcommand() cannot be used after close()'
232 b'callcommand() cannot be used after close()'
232 )
233 )
233
234
234 # We don't need to support anything fancy. Just call the named
235 # We don't need to support anything fancy. Just call the named
235 # method on the peer and return a resolved future.
236 # method on the peer and return a resolved future.
236 fn = getattr(self._peer, pycompat.sysstr(command))
237 fn = getattr(self._peer, pycompat.sysstr(command))
237
238
238 f = pycompat.futures.Future()
239 f = pycompat.futures.Future()
239
240
240 try:
241 try:
241 result = fn(**pycompat.strkwargs(args))
242 result = fn(**pycompat.strkwargs(args))
242 except Exception:
243 except Exception:
243 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
244 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
244 else:
245 else:
245 f.set_result(result)
246 f.set_result(result)
246
247
247 return f
248 return f
248
249
249 def sendcommands(self):
250 def sendcommands(self):
250 self._sent = True
251 self._sent = True
251
252
252 def close(self):
253 def close(self):
253 self._closed = True
254 self._closed = True
254
255
255
256
256 @interfaceutil.implementer(repository.ipeercommands)
257 @interfaceutil.implementer(repository.ipeercommands)
257 class localpeer(repository.peer):
258 class localpeer(repository.peer):
258 '''peer for a local repo; reflects only the most recent API'''
259 '''peer for a local repo; reflects only the most recent API'''
259
260
260 def __init__(self, repo, caps=None):
261 def __init__(self, repo, caps=None):
261 super(localpeer, self).__init__()
262 super(localpeer, self).__init__()
262
263
263 if caps is None:
264 if caps is None:
264 caps = moderncaps.copy()
265 caps = moderncaps.copy()
265 self._repo = repo.filtered(b'served')
266 self._repo = repo.filtered(b'served')
266 self.ui = repo.ui
267 self.ui = repo.ui
267 self._caps = repo._restrictcapabilities(caps)
268 self._caps = repo._restrictcapabilities(caps)
268
269
269 # Begin of _basepeer interface.
270 # Begin of _basepeer interface.
270
271
271 def url(self):
272 def url(self):
272 return self._repo.url()
273 return self._repo.url()
273
274
274 def local(self):
275 def local(self):
275 return self._repo
276 return self._repo
276
277
277 def peer(self):
278 def peer(self):
278 return self
279 return self
279
280
280 def canpush(self):
281 def canpush(self):
281 return True
282 return True
282
283
283 def close(self):
284 def close(self):
284 self._repo.close()
285 self._repo.close()
285
286
286 # End of _basepeer interface.
287 # End of _basepeer interface.
287
288
288 # Begin of _basewirecommands interface.
289 # Begin of _basewirecommands interface.
289
290
290 def branchmap(self):
291 def branchmap(self):
291 return self._repo.branchmap()
292 return self._repo.branchmap()
292
293
293 def capabilities(self):
294 def capabilities(self):
294 return self._caps
295 return self._caps
295
296
296 def clonebundles(self):
297 def clonebundles(self):
297 return self._repo.tryread(b'clonebundles.manifest')
298 return self._repo.tryread(b'clonebundles.manifest')
298
299
299 def debugwireargs(self, one, two, three=None, four=None, five=None):
300 def debugwireargs(self, one, two, three=None, four=None, five=None):
300 """Used to test argument passing over the wire"""
301 """Used to test argument passing over the wire"""
301 return b"%s %s %s %s %s" % (
302 return b"%s %s %s %s %s" % (
302 one,
303 one,
303 two,
304 two,
304 pycompat.bytestr(three),
305 pycompat.bytestr(three),
305 pycompat.bytestr(four),
306 pycompat.bytestr(four),
306 pycompat.bytestr(five),
307 pycompat.bytestr(five),
307 )
308 )
308
309
309 def getbundle(
310 def getbundle(
310 self, source, heads=None, common=None, bundlecaps=None, **kwargs
311 self, source, heads=None, common=None, bundlecaps=None, **kwargs
311 ):
312 ):
312 chunks = exchange.getbundlechunks(
313 chunks = exchange.getbundlechunks(
313 self._repo,
314 self._repo,
314 source,
315 source,
315 heads=heads,
316 heads=heads,
316 common=common,
317 common=common,
317 bundlecaps=bundlecaps,
318 bundlecaps=bundlecaps,
318 **kwargs
319 **kwargs
319 )[1]
320 )[1]
320 cb = util.chunkbuffer(chunks)
321 cb = util.chunkbuffer(chunks)
321
322
322 if exchange.bundle2requested(bundlecaps):
323 if exchange.bundle2requested(bundlecaps):
323 # When requesting a bundle2, getbundle returns a stream to make the
324 # When requesting a bundle2, getbundle returns a stream to make the
324 # wire level function happier. We need to build a proper object
325 # wire level function happier. We need to build a proper object
325 # from it in local peer.
326 # from it in local peer.
326 return bundle2.getunbundler(self.ui, cb)
327 return bundle2.getunbundler(self.ui, cb)
327 else:
328 else:
328 return changegroup.getunbundler(b'01', cb, None)
329 return changegroup.getunbundler(b'01', cb, None)
329
330
330 def heads(self):
331 def heads(self):
331 return self._repo.heads()
332 return self._repo.heads()
332
333
333 def known(self, nodes):
334 def known(self, nodes):
334 return self._repo.known(nodes)
335 return self._repo.known(nodes)
335
336
336 def listkeys(self, namespace):
337 def listkeys(self, namespace):
337 return self._repo.listkeys(namespace)
338 return self._repo.listkeys(namespace)
338
339
339 def lookup(self, key):
340 def lookup(self, key):
340 return self._repo.lookup(key)
341 return self._repo.lookup(key)
341
342
342 def pushkey(self, namespace, key, old, new):
343 def pushkey(self, namespace, key, old, new):
343 return self._repo.pushkey(namespace, key, old, new)
344 return self._repo.pushkey(namespace, key, old, new)
344
345
345 def stream_out(self):
346 def stream_out(self):
346 raise error.Abort(_(b'cannot perform stream clone against local peer'))
347 raise error.Abort(_(b'cannot perform stream clone against local peer'))
347
348
348 def unbundle(self, bundle, heads, url):
349 def unbundle(self, bundle, heads, url):
349 """apply a bundle on a repo
350 """apply a bundle on a repo
350
351
351 This function handles the repo locking itself."""
352 This function handles the repo locking itself."""
352 try:
353 try:
353 try:
354 try:
354 bundle = exchange.readbundle(self.ui, bundle, None)
355 bundle = exchange.readbundle(self.ui, bundle, None)
355 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
356 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
356 if util.safehasattr(ret, b'getchunks'):
357 if util.safehasattr(ret, b'getchunks'):
357 # This is a bundle20 object, turn it into an unbundler.
358 # This is a bundle20 object, turn it into an unbundler.
358 # This little dance should be dropped eventually when the
359 # This little dance should be dropped eventually when the
359 # API is finally improved.
360 # API is finally improved.
360 stream = util.chunkbuffer(ret.getchunks())
361 stream = util.chunkbuffer(ret.getchunks())
361 ret = bundle2.getunbundler(self.ui, stream)
362 ret = bundle2.getunbundler(self.ui, stream)
362 return ret
363 return ret
363 except Exception as exc:
364 except Exception as exc:
364 # If the exception contains output salvaged from a bundle2
365 # If the exception contains output salvaged from a bundle2
365 # reply, we need to make sure it is printed before continuing
366 # reply, we need to make sure it is printed before continuing
366 # to fail. So we build a bundle2 with such output and consume
367 # to fail. So we build a bundle2 with such output and consume
367 # it directly.
368 # it directly.
368 #
369 #
369 # This is not very elegant but allows a "simple" solution for
370 # This is not very elegant but allows a "simple" solution for
370 # issue4594
371 # issue4594
371 output = getattr(exc, '_bundle2salvagedoutput', ())
372 output = getattr(exc, '_bundle2salvagedoutput', ())
372 if output:
373 if output:
373 bundler = bundle2.bundle20(self._repo.ui)
374 bundler = bundle2.bundle20(self._repo.ui)
374 for out in output:
375 for out in output:
375 bundler.addpart(out)
376 bundler.addpart(out)
376 stream = util.chunkbuffer(bundler.getchunks())
377 stream = util.chunkbuffer(bundler.getchunks())
377 b = bundle2.getunbundler(self.ui, stream)
378 b = bundle2.getunbundler(self.ui, stream)
378 bundle2.processbundle(self._repo, b)
379 bundle2.processbundle(self._repo, b)
379 raise
380 raise
380 except error.PushRaced as exc:
381 except error.PushRaced as exc:
381 raise error.ResponseError(
382 raise error.ResponseError(
382 _(b'push failed:'), stringutil.forcebytestr(exc)
383 _(b'push failed:'), stringutil.forcebytestr(exc)
383 )
384 )
384
385
385 # End of _basewirecommands interface.
386 # End of _basewirecommands interface.
386
387
387 # Begin of peer interface.
388 # Begin of peer interface.
388
389
389 def commandexecutor(self):
390 def commandexecutor(self):
390 return localcommandexecutor(self)
391 return localcommandexecutor(self)
391
392
392 # End of peer interface.
393 # End of peer interface.
393
394
394
395
395 @interfaceutil.implementer(repository.ipeerlegacycommands)
396 @interfaceutil.implementer(repository.ipeerlegacycommands)
396 class locallegacypeer(localpeer):
397 class locallegacypeer(localpeer):
397 '''peer extension which implements legacy methods too; used for tests with
398 '''peer extension which implements legacy methods too; used for tests with
398 restricted capabilities'''
399 restricted capabilities'''
399
400
400 def __init__(self, repo):
401 def __init__(self, repo):
401 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
402 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
402
403
403 # Begin of baselegacywirecommands interface.
404 # Begin of baselegacywirecommands interface.
404
405
405 def between(self, pairs):
406 def between(self, pairs):
406 return self._repo.between(pairs)
407 return self._repo.between(pairs)
407
408
408 def branches(self, nodes):
409 def branches(self, nodes):
409 return self._repo.branches(nodes)
410 return self._repo.branches(nodes)
410
411
411 def changegroup(self, nodes, source):
412 def changegroup(self, nodes, source):
412 outgoing = discovery.outgoing(
413 outgoing = discovery.outgoing(
413 self._repo, missingroots=nodes, missingheads=self._repo.heads()
414 self._repo, missingroots=nodes, missingheads=self._repo.heads()
414 )
415 )
415 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
416 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
416
417
417 def changegroupsubset(self, bases, heads, source):
418 def changegroupsubset(self, bases, heads, source):
418 outgoing = discovery.outgoing(
419 outgoing = discovery.outgoing(
419 self._repo, missingroots=bases, missingheads=heads
420 self._repo, missingroots=bases, missingheads=heads
420 )
421 )
421 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
422 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
422
423
423 # End of baselegacywirecommands interface.
424 # End of baselegacywirecommands interface.
424
425
425
426
426 # Increment the sub-version when the revlog v2 format changes to lock out old
427 # Increment the sub-version when the revlog v2 format changes to lock out old
427 # clients.
428 # clients.
428 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
429 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
429
430
430 # A repository with the sparserevlog feature will have delta chains that
431 # A repository with the sparserevlog feature will have delta chains that
431 # can spread over a larger span. Sparse reading cuts these large spans into
432 # can spread over a larger span. Sparse reading cuts these large spans into
432 # pieces, so that each piece isn't too big.
433 # pieces, so that each piece isn't too big.
433 # Without the sparserevlog capability, reading from the repository could use
434 # Without the sparserevlog capability, reading from the repository could use
434 # huge amounts of memory, because the whole span would be read at once,
435 # huge amounts of memory, because the whole span would be read at once,
435 # including all the intermediate revisions that aren't pertinent for the chain.
436 # including all the intermediate revisions that aren't pertinent for the chain.
436 # This is why once a repository has enabled sparse-read, it becomes required.
437 # This is why once a repository has enabled sparse-read, it becomes required.
437 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
438 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
438
439
439 # A repository with the sidedataflag requirement will allow to store extra
440 # A repository with the sidedataflag requirement will allow to store extra
440 # information for revision without altering their original hashes.
441 # information for revision without altering their original hashes.
441 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
442 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
442
443
443 # A repository with the the copies-sidedata-changeset requirement will store
444 # A repository with the the copies-sidedata-changeset requirement will store
444 # copies related information in changeset's sidedata.
445 # copies related information in changeset's sidedata.
445 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
446 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
446
447
447 # Functions receiving (ui, features) that extensions can register to impact
448 # Functions receiving (ui, features) that extensions can register to impact
448 # the ability to load repositories with custom requirements. Only
449 # the ability to load repositories with custom requirements. Only
449 # functions defined in loaded extensions are called.
450 # functions defined in loaded extensions are called.
450 #
451 #
451 # The function receives a set of requirement strings that the repository
452 # The function receives a set of requirement strings that the repository
452 # is capable of opening. Functions will typically add elements to the
453 # is capable of opening. Functions will typically add elements to the
453 # set to reflect that the extension knows how to handle that requirements.
454 # set to reflect that the extension knows how to handle that requirements.
454 featuresetupfuncs = set()
455 featuresetupfuncs = set()
455
456
456
457
457 def makelocalrepository(baseui, path, intents=None):
458 def makelocalrepository(baseui, path, intents=None):
458 """Create a local repository object.
459 """Create a local repository object.
459
460
460 Given arguments needed to construct a local repository, this function
461 Given arguments needed to construct a local repository, this function
461 performs various early repository loading functionality (such as
462 performs various early repository loading functionality (such as
462 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
463 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
463 the repository can be opened, derives a type suitable for representing
464 the repository can be opened, derives a type suitable for representing
464 that repository, and returns an instance of it.
465 that repository, and returns an instance of it.
465
466
466 The returned object conforms to the ``repository.completelocalrepository``
467 The returned object conforms to the ``repository.completelocalrepository``
467 interface.
468 interface.
468
469
469 The repository type is derived by calling a series of factory functions
470 The repository type is derived by calling a series of factory functions
470 for each aspect/interface of the final repository. These are defined by
471 for each aspect/interface of the final repository. These are defined by
471 ``REPO_INTERFACES``.
472 ``REPO_INTERFACES``.
472
473
473 Each factory function is called to produce a type implementing a specific
474 Each factory function is called to produce a type implementing a specific
474 interface. The cumulative list of returned types will be combined into a
475 interface. The cumulative list of returned types will be combined into a
475 new type and that type will be instantiated to represent the local
476 new type and that type will be instantiated to represent the local
476 repository.
477 repository.
477
478
478 The factory functions each receive various state that may be consulted
479 The factory functions each receive various state that may be consulted
479 as part of deriving a type.
480 as part of deriving a type.
480
481
481 Extensions should wrap these factory functions to customize repository type
482 Extensions should wrap these factory functions to customize repository type
482 creation. Note that an extension's wrapped function may be called even if
483 creation. Note that an extension's wrapped function may be called even if
483 that extension is not loaded for the repo being constructed. Extensions
484 that extension is not loaded for the repo being constructed. Extensions
484 should check if their ``__name__`` appears in the
485 should check if their ``__name__`` appears in the
485 ``extensionmodulenames`` set passed to the factory function and no-op if
486 ``extensionmodulenames`` set passed to the factory function and no-op if
486 not.
487 not.
487 """
488 """
488 ui = baseui.copy()
489 ui = baseui.copy()
489 # Prevent copying repo configuration.
490 # Prevent copying repo configuration.
490 ui.copy = baseui.copy
491 ui.copy = baseui.copy
491
492
492 # Working directory VFS rooted at repository root.
493 # Working directory VFS rooted at repository root.
493 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
494 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
494
495
495 # Main VFS for .hg/ directory.
496 # Main VFS for .hg/ directory.
496 hgpath = wdirvfs.join(b'.hg')
497 hgpath = wdirvfs.join(b'.hg')
497 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
498 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
498
499
499 # The .hg/ path should exist and should be a directory. All other
500 # The .hg/ path should exist and should be a directory. All other
500 # cases are errors.
501 # cases are errors.
501 if not hgvfs.isdir():
502 if not hgvfs.isdir():
502 try:
503 try:
503 hgvfs.stat()
504 hgvfs.stat()
504 except OSError as e:
505 except OSError as e:
505 if e.errno != errno.ENOENT:
506 if e.errno != errno.ENOENT:
506 raise
507 raise
507
508
508 raise error.RepoError(_(b'repository %s not found') % path)
509 raise error.RepoError(_(b'repository %s not found') % path)
509
510
510 # .hg/requires file contains a newline-delimited list of
511 # .hg/requires file contains a newline-delimited list of
511 # features/capabilities the opener (us) must have in order to use
512 # features/capabilities the opener (us) must have in order to use
512 # the repository. This file was introduced in Mercurial 0.9.2,
513 # the repository. This file was introduced in Mercurial 0.9.2,
513 # which means very old repositories may not have one. We assume
514 # which means very old repositories may not have one. We assume
514 # a missing file translates to no requirements.
515 # a missing file translates to no requirements.
515 try:
516 try:
516 requirements = set(hgvfs.read(b'requires').splitlines())
517 requirements = set(hgvfs.read(b'requires').splitlines())
517 except IOError as e:
518 except IOError as e:
518 if e.errno != errno.ENOENT:
519 if e.errno != errno.ENOENT:
519 raise
520 raise
520 requirements = set()
521 requirements = set()
521
522
522 # The .hg/hgrc file may load extensions or contain config options
523 # The .hg/hgrc file may load extensions or contain config options
523 # that influence repository construction. Attempt to load it and
524 # that influence repository construction. Attempt to load it and
524 # process any new extensions that it may have pulled in.
525 # process any new extensions that it may have pulled in.
525 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
526 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
526 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
527 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
527 extensions.loadall(ui)
528 extensions.loadall(ui)
528 extensions.populateui(ui)
529 extensions.populateui(ui)
529
530
530 # Set of module names of extensions loaded for this repository.
531 # Set of module names of extensions loaded for this repository.
531 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
532 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
532
533
533 supportedrequirements = gathersupportedrequirements(ui)
534 supportedrequirements = gathersupportedrequirements(ui)
534
535
535 # We first validate the requirements are known.
536 # We first validate the requirements are known.
536 ensurerequirementsrecognized(requirements, supportedrequirements)
537 ensurerequirementsrecognized(requirements, supportedrequirements)
537
538
538 # Then we validate that the known set is reasonable to use together.
539 # Then we validate that the known set is reasonable to use together.
539 ensurerequirementscompatible(ui, requirements)
540 ensurerequirementscompatible(ui, requirements)
540
541
541 # TODO there are unhandled edge cases related to opening repositories with
542 # TODO there are unhandled edge cases related to opening repositories with
542 # shared storage. If storage is shared, we should also test for requirements
543 # shared storage. If storage is shared, we should also test for requirements
543 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
544 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
544 # that repo, as that repo may load extensions needed to open it. This is a
545 # that repo, as that repo may load extensions needed to open it. This is a
545 # bit complicated because we don't want the other hgrc to overwrite settings
546 # bit complicated because we don't want the other hgrc to overwrite settings
546 # in this hgrc.
547 # in this hgrc.
547 #
548 #
548 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
549 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
549 # file when sharing repos. But if a requirement is added after the share is
550 # file when sharing repos. But if a requirement is added after the share is
550 # performed, thereby introducing a new requirement for the opener, we may
551 # performed, thereby introducing a new requirement for the opener, we may
551 # will not see that and could encounter a run-time error interacting with
552 # will not see that and could encounter a run-time error interacting with
552 # that shared store since it has an unknown-to-us requirement.
553 # that shared store since it has an unknown-to-us requirement.
553
554
554 # At this point, we know we should be capable of opening the repository.
555 # At this point, we know we should be capable of opening the repository.
555 # Now get on with doing that.
556 # Now get on with doing that.
556
557
557 features = set()
558 features = set()
558
559
559 # The "store" part of the repository holds versioned data. How it is
560 # The "store" part of the repository holds versioned data. How it is
560 # accessed is determined by various requirements. The ``shared`` or
561 # accessed is determined by various requirements. The ``shared`` or
561 # ``relshared`` requirements indicate the store lives in the path contained
562 # ``relshared`` requirements indicate the store lives in the path contained
562 # in the ``.hg/sharedpath`` file. This is an absolute path for
563 # in the ``.hg/sharedpath`` file. This is an absolute path for
563 # ``shared`` and relative to ``.hg/`` for ``relshared``.
564 # ``shared`` and relative to ``.hg/`` for ``relshared``.
564 if b'shared' in requirements or b'relshared' in requirements:
565 if b'shared' in requirements or b'relshared' in requirements:
565 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
566 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
566 if b'relshared' in requirements:
567 if b'relshared' in requirements:
567 sharedpath = hgvfs.join(sharedpath)
568 sharedpath = hgvfs.join(sharedpath)
568
569
569 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
570 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
570
571
571 if not sharedvfs.exists():
572 if not sharedvfs.exists():
572 raise error.RepoError(
573 raise error.RepoError(
573 _(b'.hg/sharedpath points to nonexistent directory %s')
574 _(b'.hg/sharedpath points to nonexistent directory %s')
574 % sharedvfs.base
575 % sharedvfs.base
575 )
576 )
576
577
577 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
578 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
578
579
579 storebasepath = sharedvfs.base
580 storebasepath = sharedvfs.base
580 cachepath = sharedvfs.join(b'cache')
581 cachepath = sharedvfs.join(b'cache')
581 else:
582 else:
582 storebasepath = hgvfs.base
583 storebasepath = hgvfs.base
583 cachepath = hgvfs.join(b'cache')
584 cachepath = hgvfs.join(b'cache')
584 wcachepath = hgvfs.join(b'wcache')
585 wcachepath = hgvfs.join(b'wcache')
585
586
586 # The store has changed over time and the exact layout is dictated by
587 # The store has changed over time and the exact layout is dictated by
587 # requirements. The store interface abstracts differences across all
588 # requirements. The store interface abstracts differences across all
588 # of them.
589 # of them.
589 store = makestore(
590 store = makestore(
590 requirements,
591 requirements,
591 storebasepath,
592 storebasepath,
592 lambda base: vfsmod.vfs(base, cacheaudited=True),
593 lambda base: vfsmod.vfs(base, cacheaudited=True),
593 )
594 )
594 hgvfs.createmode = store.createmode
595 hgvfs.createmode = store.createmode
595
596
596 storevfs = store.vfs
597 storevfs = store.vfs
597 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
598 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
598
599
599 # The cache vfs is used to manage cache files.
600 # The cache vfs is used to manage cache files.
600 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
601 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
601 cachevfs.createmode = store.createmode
602 cachevfs.createmode = store.createmode
602 # The cache vfs is used to manage cache files related to the working copy
603 # The cache vfs is used to manage cache files related to the working copy
603 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
604 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
604 wcachevfs.createmode = store.createmode
605 wcachevfs.createmode = store.createmode
605
606
606 # Now resolve the type for the repository object. We do this by repeatedly
607 # Now resolve the type for the repository object. We do this by repeatedly
607 # calling a factory function to produces types for specific aspects of the
608 # calling a factory function to produces types for specific aspects of the
608 # repo's operation. The aggregate returned types are used as base classes
609 # repo's operation. The aggregate returned types are used as base classes
609 # for a dynamically-derived type, which will represent our new repository.
610 # for a dynamically-derived type, which will represent our new repository.
610
611
611 bases = []
612 bases = []
612 extrastate = {}
613 extrastate = {}
613
614
614 for iface, fn in REPO_INTERFACES:
615 for iface, fn in REPO_INTERFACES:
615 # We pass all potentially useful state to give extensions tons of
616 # We pass all potentially useful state to give extensions tons of
616 # flexibility.
617 # flexibility.
617 typ = fn()(
618 typ = fn()(
618 ui=ui,
619 ui=ui,
619 intents=intents,
620 intents=intents,
620 requirements=requirements,
621 requirements=requirements,
621 features=features,
622 features=features,
622 wdirvfs=wdirvfs,
623 wdirvfs=wdirvfs,
623 hgvfs=hgvfs,
624 hgvfs=hgvfs,
624 store=store,
625 store=store,
625 storevfs=storevfs,
626 storevfs=storevfs,
626 storeoptions=storevfs.options,
627 storeoptions=storevfs.options,
627 cachevfs=cachevfs,
628 cachevfs=cachevfs,
628 wcachevfs=wcachevfs,
629 wcachevfs=wcachevfs,
629 extensionmodulenames=extensionmodulenames,
630 extensionmodulenames=extensionmodulenames,
630 extrastate=extrastate,
631 extrastate=extrastate,
631 baseclasses=bases,
632 baseclasses=bases,
632 )
633 )
633
634
634 if not isinstance(typ, type):
635 if not isinstance(typ, type):
635 raise error.ProgrammingError(
636 raise error.ProgrammingError(
636 b'unable to construct type for %s' % iface
637 b'unable to construct type for %s' % iface
637 )
638 )
638
639
639 bases.append(typ)
640 bases.append(typ)
640
641
641 # type() allows you to use characters in type names that wouldn't be
642 # type() allows you to use characters in type names that wouldn't be
642 # recognized as Python symbols in source code. We abuse that to add
643 # recognized as Python symbols in source code. We abuse that to add
643 # rich information about our constructed repo.
644 # rich information about our constructed repo.
644 name = pycompat.sysstr(
645 name = pycompat.sysstr(
645 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
646 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
646 )
647 )
647
648
648 cls = type(name, tuple(bases), {})
649 cls = type(name, tuple(bases), {})
649
650
650 return cls(
651 return cls(
651 baseui=baseui,
652 baseui=baseui,
652 ui=ui,
653 ui=ui,
653 origroot=path,
654 origroot=path,
654 wdirvfs=wdirvfs,
655 wdirvfs=wdirvfs,
655 hgvfs=hgvfs,
656 hgvfs=hgvfs,
656 requirements=requirements,
657 requirements=requirements,
657 supportedrequirements=supportedrequirements,
658 supportedrequirements=supportedrequirements,
658 sharedpath=storebasepath,
659 sharedpath=storebasepath,
659 store=store,
660 store=store,
660 cachevfs=cachevfs,
661 cachevfs=cachevfs,
661 wcachevfs=wcachevfs,
662 wcachevfs=wcachevfs,
662 features=features,
663 features=features,
663 intents=intents,
664 intents=intents,
664 )
665 )
665
666
666
667
667 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
668 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
668 """Load hgrc files/content into a ui instance.
669 """Load hgrc files/content into a ui instance.
669
670
670 This is called during repository opening to load any additional
671 This is called during repository opening to load any additional
671 config files or settings relevant to the current repository.
672 config files or settings relevant to the current repository.
672
673
673 Returns a bool indicating whether any additional configs were loaded.
674 Returns a bool indicating whether any additional configs were loaded.
674
675
675 Extensions should monkeypatch this function to modify how per-repo
676 Extensions should monkeypatch this function to modify how per-repo
676 configs are loaded. For example, an extension may wish to pull in
677 configs are loaded. For example, an extension may wish to pull in
677 configs from alternate files or sources.
678 configs from alternate files or sources.
678 """
679 """
679 if b'HGRCSKIPREPO' in encoding.environ:
680 if not rcutil.use_repo_hgrc():
680 return False
681 return False
681 try:
682 try:
682 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
683 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
683 return True
684 return True
684 except IOError:
685 except IOError:
685 return False
686 return False
686
687
687
688
688 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
689 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
689 """Perform additional actions after .hg/hgrc is loaded.
690 """Perform additional actions after .hg/hgrc is loaded.
690
691
691 This function is called during repository loading immediately after
692 This function is called during repository loading immediately after
692 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
693 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
693
694
694 The function can be used to validate configs, automatically add
695 The function can be used to validate configs, automatically add
695 options (including extensions) based on requirements, etc.
696 options (including extensions) based on requirements, etc.
696 """
697 """
697
698
698 # Map of requirements to list of extensions to load automatically when
699 # Map of requirements to list of extensions to load automatically when
699 # requirement is present.
700 # requirement is present.
700 autoextensions = {
701 autoextensions = {
701 b'largefiles': [b'largefiles'],
702 b'largefiles': [b'largefiles'],
702 b'lfs': [b'lfs'],
703 b'lfs': [b'lfs'],
703 }
704 }
704
705
705 for requirement, names in sorted(autoextensions.items()):
706 for requirement, names in sorted(autoextensions.items()):
706 if requirement not in requirements:
707 if requirement not in requirements:
707 continue
708 continue
708
709
709 for name in names:
710 for name in names:
710 if not ui.hasconfig(b'extensions', name):
711 if not ui.hasconfig(b'extensions', name):
711 ui.setconfig(b'extensions', name, b'', source=b'autoload')
712 ui.setconfig(b'extensions', name, b'', source=b'autoload')
712
713
713
714
714 def gathersupportedrequirements(ui):
715 def gathersupportedrequirements(ui):
715 """Determine the complete set of recognized requirements."""
716 """Determine the complete set of recognized requirements."""
716 # Start with all requirements supported by this file.
717 # Start with all requirements supported by this file.
717 supported = set(localrepository._basesupported)
718 supported = set(localrepository._basesupported)
718
719
719 # Execute ``featuresetupfuncs`` entries if they belong to an extension
720 # Execute ``featuresetupfuncs`` entries if they belong to an extension
720 # relevant to this ui instance.
721 # relevant to this ui instance.
721 modules = {m.__name__ for n, m in extensions.extensions(ui)}
722 modules = {m.__name__ for n, m in extensions.extensions(ui)}
722
723
723 for fn in featuresetupfuncs:
724 for fn in featuresetupfuncs:
724 if fn.__module__ in modules:
725 if fn.__module__ in modules:
725 fn(ui, supported)
726 fn(ui, supported)
726
727
727 # Add derived requirements from registered compression engines.
728 # Add derived requirements from registered compression engines.
728 for name in util.compengines:
729 for name in util.compengines:
729 engine = util.compengines[name]
730 engine = util.compengines[name]
730 if engine.available() and engine.revlogheader():
731 if engine.available() and engine.revlogheader():
731 supported.add(b'exp-compression-%s' % name)
732 supported.add(b'exp-compression-%s' % name)
732 if engine.name() == b'zstd':
733 if engine.name() == b'zstd':
733 supported.add(b'revlog-compression-zstd')
734 supported.add(b'revlog-compression-zstd')
734
735
735 return supported
736 return supported
736
737
737
738
738 def ensurerequirementsrecognized(requirements, supported):
739 def ensurerequirementsrecognized(requirements, supported):
739 """Validate that a set of local requirements is recognized.
740 """Validate that a set of local requirements is recognized.
740
741
741 Receives a set of requirements. Raises an ``error.RepoError`` if there
742 Receives a set of requirements. Raises an ``error.RepoError`` if there
742 exists any requirement in that set that currently loaded code doesn't
743 exists any requirement in that set that currently loaded code doesn't
743 recognize.
744 recognize.
744
745
745 Returns a set of supported requirements.
746 Returns a set of supported requirements.
746 """
747 """
747 missing = set()
748 missing = set()
748
749
749 for requirement in requirements:
750 for requirement in requirements:
750 if requirement in supported:
751 if requirement in supported:
751 continue
752 continue
752
753
753 if not requirement or not requirement[0:1].isalnum():
754 if not requirement or not requirement[0:1].isalnum():
754 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
755 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
755
756
756 missing.add(requirement)
757 missing.add(requirement)
757
758
758 if missing:
759 if missing:
759 raise error.RequirementError(
760 raise error.RequirementError(
760 _(b'repository requires features unknown to this Mercurial: %s')
761 _(b'repository requires features unknown to this Mercurial: %s')
761 % b' '.join(sorted(missing)),
762 % b' '.join(sorted(missing)),
762 hint=_(
763 hint=_(
763 b'see https://mercurial-scm.org/wiki/MissingRequirement '
764 b'see https://mercurial-scm.org/wiki/MissingRequirement '
764 b'for more information'
765 b'for more information'
765 ),
766 ),
766 )
767 )
767
768
768
769
769 def ensurerequirementscompatible(ui, requirements):
770 def ensurerequirementscompatible(ui, requirements):
770 """Validates that a set of recognized requirements is mutually compatible.
771 """Validates that a set of recognized requirements is mutually compatible.
771
772
772 Some requirements may not be compatible with others or require
773 Some requirements may not be compatible with others or require
773 config options that aren't enabled. This function is called during
774 config options that aren't enabled. This function is called during
774 repository opening to ensure that the set of requirements needed
775 repository opening to ensure that the set of requirements needed
775 to open a repository is sane and compatible with config options.
776 to open a repository is sane and compatible with config options.
776
777
777 Extensions can monkeypatch this function to perform additional
778 Extensions can monkeypatch this function to perform additional
778 checking.
779 checking.
779
780
780 ``error.RepoError`` should be raised on failure.
781 ``error.RepoError`` should be raised on failure.
781 """
782 """
782 if b'exp-sparse' in requirements and not sparse.enabled:
783 if b'exp-sparse' in requirements and not sparse.enabled:
783 raise error.RepoError(
784 raise error.RepoError(
784 _(
785 _(
785 b'repository is using sparse feature but '
786 b'repository is using sparse feature but '
786 b'sparse is not enabled; enable the '
787 b'sparse is not enabled; enable the '
787 b'"sparse" extensions to access'
788 b'"sparse" extensions to access'
788 )
789 )
789 )
790 )
790
791
791
792
792 def makestore(requirements, path, vfstype):
793 def makestore(requirements, path, vfstype):
793 """Construct a storage object for a repository."""
794 """Construct a storage object for a repository."""
794 if b'store' in requirements:
795 if b'store' in requirements:
795 if b'fncache' in requirements:
796 if b'fncache' in requirements:
796 return storemod.fncachestore(
797 return storemod.fncachestore(
797 path, vfstype, b'dotencode' in requirements
798 path, vfstype, b'dotencode' in requirements
798 )
799 )
799
800
800 return storemod.encodedstore(path, vfstype)
801 return storemod.encodedstore(path, vfstype)
801
802
802 return storemod.basicstore(path, vfstype)
803 return storemod.basicstore(path, vfstype)
803
804
804
805
805 def resolvestorevfsoptions(ui, requirements, features):
806 def resolvestorevfsoptions(ui, requirements, features):
806 """Resolve the options to pass to the store vfs opener.
807 """Resolve the options to pass to the store vfs opener.
807
808
808 The returned dict is used to influence behavior of the storage layer.
809 The returned dict is used to influence behavior of the storage layer.
809 """
810 """
810 options = {}
811 options = {}
811
812
812 if b'treemanifest' in requirements:
813 if b'treemanifest' in requirements:
813 options[b'treemanifest'] = True
814 options[b'treemanifest'] = True
814
815
815 # experimental config: format.manifestcachesize
816 # experimental config: format.manifestcachesize
816 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
817 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
817 if manifestcachesize is not None:
818 if manifestcachesize is not None:
818 options[b'manifestcachesize'] = manifestcachesize
819 options[b'manifestcachesize'] = manifestcachesize
819
820
820 # In the absence of another requirement superseding a revlog-related
821 # In the absence of another requirement superseding a revlog-related
821 # requirement, we have to assume the repo is using revlog version 0.
822 # requirement, we have to assume the repo is using revlog version 0.
822 # This revlog format is super old and we don't bother trying to parse
823 # This revlog format is super old and we don't bother trying to parse
823 # opener options for it because those options wouldn't do anything
824 # opener options for it because those options wouldn't do anything
824 # meaningful on such old repos.
825 # meaningful on such old repos.
825 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
826 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
826 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
827 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
827 else: # explicitly mark repo as using revlogv0
828 else: # explicitly mark repo as using revlogv0
828 options[b'revlogv0'] = True
829 options[b'revlogv0'] = True
829
830
830 if COPIESSDC_REQUIREMENT in requirements:
831 if COPIESSDC_REQUIREMENT in requirements:
831 options[b'copies-storage'] = b'changeset-sidedata'
832 options[b'copies-storage'] = b'changeset-sidedata'
832 else:
833 else:
833 writecopiesto = ui.config(b'experimental', b'copies.write-to')
834 writecopiesto = ui.config(b'experimental', b'copies.write-to')
834 copiesextramode = (b'changeset-only', b'compatibility')
835 copiesextramode = (b'changeset-only', b'compatibility')
835 if writecopiesto in copiesextramode:
836 if writecopiesto in copiesextramode:
836 options[b'copies-storage'] = b'extra'
837 options[b'copies-storage'] = b'extra'
837
838
838 return options
839 return options
839
840
840
841
841 def resolverevlogstorevfsoptions(ui, requirements, features):
842 def resolverevlogstorevfsoptions(ui, requirements, features):
842 """Resolve opener options specific to revlogs."""
843 """Resolve opener options specific to revlogs."""
843
844
844 options = {}
845 options = {}
845 options[b'flagprocessors'] = {}
846 options[b'flagprocessors'] = {}
846
847
847 if b'revlogv1' in requirements:
848 if b'revlogv1' in requirements:
848 options[b'revlogv1'] = True
849 options[b'revlogv1'] = True
849 if REVLOGV2_REQUIREMENT in requirements:
850 if REVLOGV2_REQUIREMENT in requirements:
850 options[b'revlogv2'] = True
851 options[b'revlogv2'] = True
851
852
852 if b'generaldelta' in requirements:
853 if b'generaldelta' in requirements:
853 options[b'generaldelta'] = True
854 options[b'generaldelta'] = True
854
855
855 # experimental config: format.chunkcachesize
856 # experimental config: format.chunkcachesize
856 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
857 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
857 if chunkcachesize is not None:
858 if chunkcachesize is not None:
858 options[b'chunkcachesize'] = chunkcachesize
859 options[b'chunkcachesize'] = chunkcachesize
859
860
860 deltabothparents = ui.configbool(
861 deltabothparents = ui.configbool(
861 b'storage', b'revlog.optimize-delta-parent-choice'
862 b'storage', b'revlog.optimize-delta-parent-choice'
862 )
863 )
863 options[b'deltabothparents'] = deltabothparents
864 options[b'deltabothparents'] = deltabothparents
864
865
865 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
866 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
866 lazydeltabase = False
867 lazydeltabase = False
867 if lazydelta:
868 if lazydelta:
868 lazydeltabase = ui.configbool(
869 lazydeltabase = ui.configbool(
869 b'storage', b'revlog.reuse-external-delta-parent'
870 b'storage', b'revlog.reuse-external-delta-parent'
870 )
871 )
871 if lazydeltabase is None:
872 if lazydeltabase is None:
872 lazydeltabase = not scmutil.gddeltaconfig(ui)
873 lazydeltabase = not scmutil.gddeltaconfig(ui)
873 options[b'lazydelta'] = lazydelta
874 options[b'lazydelta'] = lazydelta
874 options[b'lazydeltabase'] = lazydeltabase
875 options[b'lazydeltabase'] = lazydeltabase
875
876
876 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
877 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
877 if 0 <= chainspan:
878 if 0 <= chainspan:
878 options[b'maxdeltachainspan'] = chainspan
879 options[b'maxdeltachainspan'] = chainspan
879
880
880 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
881 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
881 if mmapindexthreshold is not None:
882 if mmapindexthreshold is not None:
882 options[b'mmapindexthreshold'] = mmapindexthreshold
883 options[b'mmapindexthreshold'] = mmapindexthreshold
883
884
884 withsparseread = ui.configbool(b'experimental', b'sparse-read')
885 withsparseread = ui.configbool(b'experimental', b'sparse-read')
885 srdensitythres = float(
886 srdensitythres = float(
886 ui.config(b'experimental', b'sparse-read.density-threshold')
887 ui.config(b'experimental', b'sparse-read.density-threshold')
887 )
888 )
888 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
889 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
889 options[b'with-sparse-read'] = withsparseread
890 options[b'with-sparse-read'] = withsparseread
890 options[b'sparse-read-density-threshold'] = srdensitythres
891 options[b'sparse-read-density-threshold'] = srdensitythres
891 options[b'sparse-read-min-gap-size'] = srmingapsize
892 options[b'sparse-read-min-gap-size'] = srmingapsize
892
893
893 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
894 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
894 options[b'sparse-revlog'] = sparserevlog
895 options[b'sparse-revlog'] = sparserevlog
895 if sparserevlog:
896 if sparserevlog:
896 options[b'generaldelta'] = True
897 options[b'generaldelta'] = True
897
898
898 sidedata = SIDEDATA_REQUIREMENT in requirements
899 sidedata = SIDEDATA_REQUIREMENT in requirements
899 options[b'side-data'] = sidedata
900 options[b'side-data'] = sidedata
900
901
901 maxchainlen = None
902 maxchainlen = None
902 if sparserevlog:
903 if sparserevlog:
903 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
904 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
904 # experimental config: format.maxchainlen
905 # experimental config: format.maxchainlen
905 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
906 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
906 if maxchainlen is not None:
907 if maxchainlen is not None:
907 options[b'maxchainlen'] = maxchainlen
908 options[b'maxchainlen'] = maxchainlen
908
909
909 for r in requirements:
910 for r in requirements:
910 # we allow multiple compression engine requirement to co-exist because
911 # we allow multiple compression engine requirement to co-exist because
911 # strickly speaking, revlog seems to support mixed compression style.
912 # strickly speaking, revlog seems to support mixed compression style.
912 #
913 #
913 # The compression used for new entries will be "the last one"
914 # The compression used for new entries will be "the last one"
914 prefix = r.startswith
915 prefix = r.startswith
915 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
916 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
916 options[b'compengine'] = r.split(b'-', 2)[2]
917 options[b'compengine'] = r.split(b'-', 2)[2]
917
918
918 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
919 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
919 if options[b'zlib.level'] is not None:
920 if options[b'zlib.level'] is not None:
920 if not (0 <= options[b'zlib.level'] <= 9):
921 if not (0 <= options[b'zlib.level'] <= 9):
921 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
922 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
922 raise error.Abort(msg % options[b'zlib.level'])
923 raise error.Abort(msg % options[b'zlib.level'])
923 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
924 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
924 if options[b'zstd.level'] is not None:
925 if options[b'zstd.level'] is not None:
925 if not (0 <= options[b'zstd.level'] <= 22):
926 if not (0 <= options[b'zstd.level'] <= 22):
926 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
927 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
927 raise error.Abort(msg % options[b'zstd.level'])
928 raise error.Abort(msg % options[b'zstd.level'])
928
929
929 if repository.NARROW_REQUIREMENT in requirements:
930 if repository.NARROW_REQUIREMENT in requirements:
930 options[b'enableellipsis'] = True
931 options[b'enableellipsis'] = True
931
932
932 if ui.configbool(b'experimental', b'rust.index'):
933 if ui.configbool(b'experimental', b'rust.index'):
933 options[b'rust.index'] = True
934 options[b'rust.index'] = True
934
935
935 return options
936 return options
936
937
937
938
938 def makemain(**kwargs):
939 def makemain(**kwargs):
939 """Produce a type conforming to ``ilocalrepositorymain``."""
940 """Produce a type conforming to ``ilocalrepositorymain``."""
940 return localrepository
941 return localrepository
941
942
942
943
943 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
944 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
944 class revlogfilestorage(object):
945 class revlogfilestorage(object):
945 """File storage when using revlogs."""
946 """File storage when using revlogs."""
946
947
947 def file(self, path):
948 def file(self, path):
948 if path[0] == b'/':
949 if path[0] == b'/':
949 path = path[1:]
950 path = path[1:]
950
951
951 return filelog.filelog(self.svfs, path)
952 return filelog.filelog(self.svfs, path)
952
953
953
954
954 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
955 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
955 class revlognarrowfilestorage(object):
956 class revlognarrowfilestorage(object):
956 """File storage when using revlogs and narrow files."""
957 """File storage when using revlogs and narrow files."""
957
958
958 def file(self, path):
959 def file(self, path):
959 if path[0] == b'/':
960 if path[0] == b'/':
960 path = path[1:]
961 path = path[1:]
961
962
962 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
963 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
963
964
964
965
965 def makefilestorage(requirements, features, **kwargs):
966 def makefilestorage(requirements, features, **kwargs):
966 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
967 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
967 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
968 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
968 features.add(repository.REPO_FEATURE_STREAM_CLONE)
969 features.add(repository.REPO_FEATURE_STREAM_CLONE)
969
970
970 if repository.NARROW_REQUIREMENT in requirements:
971 if repository.NARROW_REQUIREMENT in requirements:
971 return revlognarrowfilestorage
972 return revlognarrowfilestorage
972 else:
973 else:
973 return revlogfilestorage
974 return revlogfilestorage
974
975
975
976
976 # List of repository interfaces and factory functions for them. Each
977 # List of repository interfaces and factory functions for them. Each
977 # will be called in order during ``makelocalrepository()`` to iteratively
978 # will be called in order during ``makelocalrepository()`` to iteratively
978 # derive the final type for a local repository instance. We capture the
979 # derive the final type for a local repository instance. We capture the
979 # function as a lambda so we don't hold a reference and the module-level
980 # function as a lambda so we don't hold a reference and the module-level
980 # functions can be wrapped.
981 # functions can be wrapped.
981 REPO_INTERFACES = [
982 REPO_INTERFACES = [
982 (repository.ilocalrepositorymain, lambda: makemain),
983 (repository.ilocalrepositorymain, lambda: makemain),
983 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
984 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
984 ]
985 ]
985
986
986
987
987 @interfaceutil.implementer(repository.ilocalrepositorymain)
988 @interfaceutil.implementer(repository.ilocalrepositorymain)
988 class localrepository(object):
989 class localrepository(object):
989 """Main class for representing local repositories.
990 """Main class for representing local repositories.
990
991
991 All local repositories are instances of this class.
992 All local repositories are instances of this class.
992
993
993 Constructed on its own, instances of this class are not usable as
994 Constructed on its own, instances of this class are not usable as
994 repository objects. To obtain a usable repository object, call
995 repository objects. To obtain a usable repository object, call
995 ``hg.repository()``, ``localrepo.instance()``, or
996 ``hg.repository()``, ``localrepo.instance()``, or
996 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
997 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
997 ``instance()`` adds support for creating new repositories.
998 ``instance()`` adds support for creating new repositories.
998 ``hg.repository()`` adds more extension integration, including calling
999 ``hg.repository()`` adds more extension integration, including calling
999 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1000 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1000 used.
1001 used.
1001 """
1002 """
1002
1003
1003 # obsolete experimental requirements:
1004 # obsolete experimental requirements:
1004 # - manifestv2: An experimental new manifest format that allowed
1005 # - manifestv2: An experimental new manifest format that allowed
1005 # for stem compression of long paths. Experiment ended up not
1006 # for stem compression of long paths. Experiment ended up not
1006 # being successful (repository sizes went up due to worse delta
1007 # being successful (repository sizes went up due to worse delta
1007 # chains), and the code was deleted in 4.6.
1008 # chains), and the code was deleted in 4.6.
1008 supportedformats = {
1009 supportedformats = {
1009 b'revlogv1',
1010 b'revlogv1',
1010 b'generaldelta',
1011 b'generaldelta',
1011 b'treemanifest',
1012 b'treemanifest',
1012 COPIESSDC_REQUIREMENT,
1013 COPIESSDC_REQUIREMENT,
1013 REVLOGV2_REQUIREMENT,
1014 REVLOGV2_REQUIREMENT,
1014 SIDEDATA_REQUIREMENT,
1015 SIDEDATA_REQUIREMENT,
1015 SPARSEREVLOG_REQUIREMENT,
1016 SPARSEREVLOG_REQUIREMENT,
1016 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1017 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1017 }
1018 }
1018 _basesupported = supportedformats | {
1019 _basesupported = supportedformats | {
1019 b'store',
1020 b'store',
1020 b'fncache',
1021 b'fncache',
1021 b'shared',
1022 b'shared',
1022 b'relshared',
1023 b'relshared',
1023 b'dotencode',
1024 b'dotencode',
1024 b'exp-sparse',
1025 b'exp-sparse',
1025 b'internal-phase',
1026 b'internal-phase',
1026 }
1027 }
1027
1028
1028 # list of prefix for file which can be written without 'wlock'
1029 # list of prefix for file which can be written without 'wlock'
1029 # Extensions should extend this list when needed
1030 # Extensions should extend this list when needed
1030 _wlockfreeprefix = {
1031 _wlockfreeprefix = {
1031 # We migh consider requiring 'wlock' for the next
1032 # We migh consider requiring 'wlock' for the next
1032 # two, but pretty much all the existing code assume
1033 # two, but pretty much all the existing code assume
1033 # wlock is not needed so we keep them excluded for
1034 # wlock is not needed so we keep them excluded for
1034 # now.
1035 # now.
1035 b'hgrc',
1036 b'hgrc',
1036 b'requires',
1037 b'requires',
1037 # XXX cache is a complicatged business someone
1038 # XXX cache is a complicatged business someone
1038 # should investigate this in depth at some point
1039 # should investigate this in depth at some point
1039 b'cache/',
1040 b'cache/',
1040 # XXX shouldn't be dirstate covered by the wlock?
1041 # XXX shouldn't be dirstate covered by the wlock?
1041 b'dirstate',
1042 b'dirstate',
1042 # XXX bisect was still a bit too messy at the time
1043 # XXX bisect was still a bit too messy at the time
1043 # this changeset was introduced. Someone should fix
1044 # this changeset was introduced. Someone should fix
1044 # the remainig bit and drop this line
1045 # the remainig bit and drop this line
1045 b'bisect.state',
1046 b'bisect.state',
1046 }
1047 }
1047
1048
1048 def __init__(
1049 def __init__(
1049 self,
1050 self,
1050 baseui,
1051 baseui,
1051 ui,
1052 ui,
1052 origroot,
1053 origroot,
1053 wdirvfs,
1054 wdirvfs,
1054 hgvfs,
1055 hgvfs,
1055 requirements,
1056 requirements,
1056 supportedrequirements,
1057 supportedrequirements,
1057 sharedpath,
1058 sharedpath,
1058 store,
1059 store,
1059 cachevfs,
1060 cachevfs,
1060 wcachevfs,
1061 wcachevfs,
1061 features,
1062 features,
1062 intents=None,
1063 intents=None,
1063 ):
1064 ):
1064 """Create a new local repository instance.
1065 """Create a new local repository instance.
1065
1066
1066 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1067 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1067 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1068 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1068 object.
1069 object.
1069
1070
1070 Arguments:
1071 Arguments:
1071
1072
1072 baseui
1073 baseui
1073 ``ui.ui`` instance that ``ui`` argument was based off of.
1074 ``ui.ui`` instance that ``ui`` argument was based off of.
1074
1075
1075 ui
1076 ui
1076 ``ui.ui`` instance for use by the repository.
1077 ``ui.ui`` instance for use by the repository.
1077
1078
1078 origroot
1079 origroot
1079 ``bytes`` path to working directory root of this repository.
1080 ``bytes`` path to working directory root of this repository.
1080
1081
1081 wdirvfs
1082 wdirvfs
1082 ``vfs.vfs`` rooted at the working directory.
1083 ``vfs.vfs`` rooted at the working directory.
1083
1084
1084 hgvfs
1085 hgvfs
1085 ``vfs.vfs`` rooted at .hg/
1086 ``vfs.vfs`` rooted at .hg/
1086
1087
1087 requirements
1088 requirements
1088 ``set`` of bytestrings representing repository opening requirements.
1089 ``set`` of bytestrings representing repository opening requirements.
1089
1090
1090 supportedrequirements
1091 supportedrequirements
1091 ``set`` of bytestrings representing repository requirements that we
1092 ``set`` of bytestrings representing repository requirements that we
1092 know how to open. May be a supetset of ``requirements``.
1093 know how to open. May be a supetset of ``requirements``.
1093
1094
1094 sharedpath
1095 sharedpath
1095 ``bytes`` Defining path to storage base directory. Points to a
1096 ``bytes`` Defining path to storage base directory. Points to a
1096 ``.hg/`` directory somewhere.
1097 ``.hg/`` directory somewhere.
1097
1098
1098 store
1099 store
1099 ``store.basicstore`` (or derived) instance providing access to
1100 ``store.basicstore`` (or derived) instance providing access to
1100 versioned storage.
1101 versioned storage.
1101
1102
1102 cachevfs
1103 cachevfs
1103 ``vfs.vfs`` used for cache files.
1104 ``vfs.vfs`` used for cache files.
1104
1105
1105 wcachevfs
1106 wcachevfs
1106 ``vfs.vfs`` used for cache files related to the working copy.
1107 ``vfs.vfs`` used for cache files related to the working copy.
1107
1108
1108 features
1109 features
1109 ``set`` of bytestrings defining features/capabilities of this
1110 ``set`` of bytestrings defining features/capabilities of this
1110 instance.
1111 instance.
1111
1112
1112 intents
1113 intents
1113 ``set`` of system strings indicating what this repo will be used
1114 ``set`` of system strings indicating what this repo will be used
1114 for.
1115 for.
1115 """
1116 """
1116 self.baseui = baseui
1117 self.baseui = baseui
1117 self.ui = ui
1118 self.ui = ui
1118 self.origroot = origroot
1119 self.origroot = origroot
1119 # vfs rooted at working directory.
1120 # vfs rooted at working directory.
1120 self.wvfs = wdirvfs
1121 self.wvfs = wdirvfs
1121 self.root = wdirvfs.base
1122 self.root = wdirvfs.base
1122 # vfs rooted at .hg/. Used to access most non-store paths.
1123 # vfs rooted at .hg/. Used to access most non-store paths.
1123 self.vfs = hgvfs
1124 self.vfs = hgvfs
1124 self.path = hgvfs.base
1125 self.path = hgvfs.base
1125 self.requirements = requirements
1126 self.requirements = requirements
1126 self.supported = supportedrequirements
1127 self.supported = supportedrequirements
1127 self.sharedpath = sharedpath
1128 self.sharedpath = sharedpath
1128 self.store = store
1129 self.store = store
1129 self.cachevfs = cachevfs
1130 self.cachevfs = cachevfs
1130 self.wcachevfs = wcachevfs
1131 self.wcachevfs = wcachevfs
1131 self.features = features
1132 self.features = features
1132
1133
1133 self.filtername = None
1134 self.filtername = None
1134
1135
1135 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1136 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1136 b'devel', b'check-locks'
1137 b'devel', b'check-locks'
1137 ):
1138 ):
1138 self.vfs.audit = self._getvfsward(self.vfs.audit)
1139 self.vfs.audit = self._getvfsward(self.vfs.audit)
1139 # A list of callback to shape the phase if no data were found.
1140 # A list of callback to shape the phase if no data were found.
1140 # Callback are in the form: func(repo, roots) --> processed root.
1141 # Callback are in the form: func(repo, roots) --> processed root.
1141 # This list it to be filled by extension during repo setup
1142 # This list it to be filled by extension during repo setup
1142 self._phasedefaults = []
1143 self._phasedefaults = []
1143
1144
1144 color.setup(self.ui)
1145 color.setup(self.ui)
1145
1146
1146 self.spath = self.store.path
1147 self.spath = self.store.path
1147 self.svfs = self.store.vfs
1148 self.svfs = self.store.vfs
1148 self.sjoin = self.store.join
1149 self.sjoin = self.store.join
1149 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1150 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1150 b'devel', b'check-locks'
1151 b'devel', b'check-locks'
1151 ):
1152 ):
1152 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1153 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1153 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1154 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1154 else: # standard vfs
1155 else: # standard vfs
1155 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1156 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1156
1157
1157 self._dirstatevalidatewarned = False
1158 self._dirstatevalidatewarned = False
1158
1159
1159 self._branchcaches = branchmap.BranchMapCache()
1160 self._branchcaches = branchmap.BranchMapCache()
1160 self._revbranchcache = None
1161 self._revbranchcache = None
1161 self._filterpats = {}
1162 self._filterpats = {}
1162 self._datafilters = {}
1163 self._datafilters = {}
1163 self._transref = self._lockref = self._wlockref = None
1164 self._transref = self._lockref = self._wlockref = None
1164
1165
1165 # A cache for various files under .hg/ that tracks file changes,
1166 # A cache for various files under .hg/ that tracks file changes,
1166 # (used by the filecache decorator)
1167 # (used by the filecache decorator)
1167 #
1168 #
1168 # Maps a property name to its util.filecacheentry
1169 # Maps a property name to its util.filecacheentry
1169 self._filecache = {}
1170 self._filecache = {}
1170
1171
1171 # hold sets of revision to be filtered
1172 # hold sets of revision to be filtered
1172 # should be cleared when something might have changed the filter value:
1173 # should be cleared when something might have changed the filter value:
1173 # - new changesets,
1174 # - new changesets,
1174 # - phase change,
1175 # - phase change,
1175 # - new obsolescence marker,
1176 # - new obsolescence marker,
1176 # - working directory parent change,
1177 # - working directory parent change,
1177 # - bookmark changes
1178 # - bookmark changes
1178 self.filteredrevcache = {}
1179 self.filteredrevcache = {}
1179
1180
1180 # post-dirstate-status hooks
1181 # post-dirstate-status hooks
1181 self._postdsstatus = []
1182 self._postdsstatus = []
1182
1183
1183 # generic mapping between names and nodes
1184 # generic mapping between names and nodes
1184 self.names = namespaces.namespaces()
1185 self.names = namespaces.namespaces()
1185
1186
1186 # Key to signature value.
1187 # Key to signature value.
1187 self._sparsesignaturecache = {}
1188 self._sparsesignaturecache = {}
1188 # Signature to cached matcher instance.
1189 # Signature to cached matcher instance.
1189 self._sparsematchercache = {}
1190 self._sparsematchercache = {}
1190
1191
1191 self._extrafilterid = repoview.extrafilter(ui)
1192 self._extrafilterid = repoview.extrafilter(ui)
1192
1193
1193 self.filecopiesmode = None
1194 self.filecopiesmode = None
1194 if COPIESSDC_REQUIREMENT in self.requirements:
1195 if COPIESSDC_REQUIREMENT in self.requirements:
1195 self.filecopiesmode = b'changeset-sidedata'
1196 self.filecopiesmode = b'changeset-sidedata'
1196
1197
1197 def _getvfsward(self, origfunc):
1198 def _getvfsward(self, origfunc):
1198 """build a ward for self.vfs"""
1199 """build a ward for self.vfs"""
1199 rref = weakref.ref(self)
1200 rref = weakref.ref(self)
1200
1201
1201 def checkvfs(path, mode=None):
1202 def checkvfs(path, mode=None):
1202 ret = origfunc(path, mode=mode)
1203 ret = origfunc(path, mode=mode)
1203 repo = rref()
1204 repo = rref()
1204 if (
1205 if (
1205 repo is None
1206 repo is None
1206 or not util.safehasattr(repo, b'_wlockref')
1207 or not util.safehasattr(repo, b'_wlockref')
1207 or not util.safehasattr(repo, b'_lockref')
1208 or not util.safehasattr(repo, b'_lockref')
1208 ):
1209 ):
1209 return
1210 return
1210 if mode in (None, b'r', b'rb'):
1211 if mode in (None, b'r', b'rb'):
1211 return
1212 return
1212 if path.startswith(repo.path):
1213 if path.startswith(repo.path):
1213 # truncate name relative to the repository (.hg)
1214 # truncate name relative to the repository (.hg)
1214 path = path[len(repo.path) + 1 :]
1215 path = path[len(repo.path) + 1 :]
1215 if path.startswith(b'cache/'):
1216 if path.startswith(b'cache/'):
1216 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1217 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1217 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1218 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1218 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1219 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1219 # journal is covered by 'lock'
1220 # journal is covered by 'lock'
1220 if repo._currentlock(repo._lockref) is None:
1221 if repo._currentlock(repo._lockref) is None:
1221 repo.ui.develwarn(
1222 repo.ui.develwarn(
1222 b'write with no lock: "%s"' % path,
1223 b'write with no lock: "%s"' % path,
1223 stacklevel=3,
1224 stacklevel=3,
1224 config=b'check-locks',
1225 config=b'check-locks',
1225 )
1226 )
1226 elif repo._currentlock(repo._wlockref) is None:
1227 elif repo._currentlock(repo._wlockref) is None:
1227 # rest of vfs files are covered by 'wlock'
1228 # rest of vfs files are covered by 'wlock'
1228 #
1229 #
1229 # exclude special files
1230 # exclude special files
1230 for prefix in self._wlockfreeprefix:
1231 for prefix in self._wlockfreeprefix:
1231 if path.startswith(prefix):
1232 if path.startswith(prefix):
1232 return
1233 return
1233 repo.ui.develwarn(
1234 repo.ui.develwarn(
1234 b'write with no wlock: "%s"' % path,
1235 b'write with no wlock: "%s"' % path,
1235 stacklevel=3,
1236 stacklevel=3,
1236 config=b'check-locks',
1237 config=b'check-locks',
1237 )
1238 )
1238 return ret
1239 return ret
1239
1240
1240 return checkvfs
1241 return checkvfs
1241
1242
1242 def _getsvfsward(self, origfunc):
1243 def _getsvfsward(self, origfunc):
1243 """build a ward for self.svfs"""
1244 """build a ward for self.svfs"""
1244 rref = weakref.ref(self)
1245 rref = weakref.ref(self)
1245
1246
1246 def checksvfs(path, mode=None):
1247 def checksvfs(path, mode=None):
1247 ret = origfunc(path, mode=mode)
1248 ret = origfunc(path, mode=mode)
1248 repo = rref()
1249 repo = rref()
1249 if repo is None or not util.safehasattr(repo, b'_lockref'):
1250 if repo is None or not util.safehasattr(repo, b'_lockref'):
1250 return
1251 return
1251 if mode in (None, b'r', b'rb'):
1252 if mode in (None, b'r', b'rb'):
1252 return
1253 return
1253 if path.startswith(repo.sharedpath):
1254 if path.startswith(repo.sharedpath):
1254 # truncate name relative to the repository (.hg)
1255 # truncate name relative to the repository (.hg)
1255 path = path[len(repo.sharedpath) + 1 :]
1256 path = path[len(repo.sharedpath) + 1 :]
1256 if repo._currentlock(repo._lockref) is None:
1257 if repo._currentlock(repo._lockref) is None:
1257 repo.ui.develwarn(
1258 repo.ui.develwarn(
1258 b'write with no lock: "%s"' % path, stacklevel=4
1259 b'write with no lock: "%s"' % path, stacklevel=4
1259 )
1260 )
1260 return ret
1261 return ret
1261
1262
1262 return checksvfs
1263 return checksvfs
1263
1264
1264 def close(self):
1265 def close(self):
1265 self._writecaches()
1266 self._writecaches()
1266
1267
1267 def _writecaches(self):
1268 def _writecaches(self):
1268 if self._revbranchcache:
1269 if self._revbranchcache:
1269 self._revbranchcache.write()
1270 self._revbranchcache.write()
1270
1271
1271 def _restrictcapabilities(self, caps):
1272 def _restrictcapabilities(self, caps):
1272 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1273 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1273 caps = set(caps)
1274 caps = set(caps)
1274 capsblob = bundle2.encodecaps(
1275 capsblob = bundle2.encodecaps(
1275 bundle2.getrepocaps(self, role=b'client')
1276 bundle2.getrepocaps(self, role=b'client')
1276 )
1277 )
1277 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1278 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1278 return caps
1279 return caps
1279
1280
1280 def _writerequirements(self):
1281 def _writerequirements(self):
1281 scmutil.writerequires(self.vfs, self.requirements)
1282 scmutil.writerequires(self.vfs, self.requirements)
1282
1283
1283 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1284 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1284 # self -> auditor -> self._checknested -> self
1285 # self -> auditor -> self._checknested -> self
1285
1286
1286 @property
1287 @property
1287 def auditor(self):
1288 def auditor(self):
1288 # This is only used by context.workingctx.match in order to
1289 # This is only used by context.workingctx.match in order to
1289 # detect files in subrepos.
1290 # detect files in subrepos.
1290 return pathutil.pathauditor(self.root, callback=self._checknested)
1291 return pathutil.pathauditor(self.root, callback=self._checknested)
1291
1292
1292 @property
1293 @property
1293 def nofsauditor(self):
1294 def nofsauditor(self):
1294 # This is only used by context.basectx.match in order to detect
1295 # This is only used by context.basectx.match in order to detect
1295 # files in subrepos.
1296 # files in subrepos.
1296 return pathutil.pathauditor(
1297 return pathutil.pathauditor(
1297 self.root, callback=self._checknested, realfs=False, cached=True
1298 self.root, callback=self._checknested, realfs=False, cached=True
1298 )
1299 )
1299
1300
1300 def _checknested(self, path):
1301 def _checknested(self, path):
1301 """Determine if path is a legal nested repository."""
1302 """Determine if path is a legal nested repository."""
1302 if not path.startswith(self.root):
1303 if not path.startswith(self.root):
1303 return False
1304 return False
1304 subpath = path[len(self.root) + 1 :]
1305 subpath = path[len(self.root) + 1 :]
1305 normsubpath = util.pconvert(subpath)
1306 normsubpath = util.pconvert(subpath)
1306
1307
1307 # XXX: Checking against the current working copy is wrong in
1308 # XXX: Checking against the current working copy is wrong in
1308 # the sense that it can reject things like
1309 # the sense that it can reject things like
1309 #
1310 #
1310 # $ hg cat -r 10 sub/x.txt
1311 # $ hg cat -r 10 sub/x.txt
1311 #
1312 #
1312 # if sub/ is no longer a subrepository in the working copy
1313 # if sub/ is no longer a subrepository in the working copy
1313 # parent revision.
1314 # parent revision.
1314 #
1315 #
1315 # However, it can of course also allow things that would have
1316 # However, it can of course also allow things that would have
1316 # been rejected before, such as the above cat command if sub/
1317 # been rejected before, such as the above cat command if sub/
1317 # is a subrepository now, but was a normal directory before.
1318 # is a subrepository now, but was a normal directory before.
1318 # The old path auditor would have rejected by mistake since it
1319 # The old path auditor would have rejected by mistake since it
1319 # panics when it sees sub/.hg/.
1320 # panics when it sees sub/.hg/.
1320 #
1321 #
1321 # All in all, checking against the working copy seems sensible
1322 # All in all, checking against the working copy seems sensible
1322 # since we want to prevent access to nested repositories on
1323 # since we want to prevent access to nested repositories on
1323 # the filesystem *now*.
1324 # the filesystem *now*.
1324 ctx = self[None]
1325 ctx = self[None]
1325 parts = util.splitpath(subpath)
1326 parts = util.splitpath(subpath)
1326 while parts:
1327 while parts:
1327 prefix = b'/'.join(parts)
1328 prefix = b'/'.join(parts)
1328 if prefix in ctx.substate:
1329 if prefix in ctx.substate:
1329 if prefix == normsubpath:
1330 if prefix == normsubpath:
1330 return True
1331 return True
1331 else:
1332 else:
1332 sub = ctx.sub(prefix)
1333 sub = ctx.sub(prefix)
1333 return sub.checknested(subpath[len(prefix) + 1 :])
1334 return sub.checknested(subpath[len(prefix) + 1 :])
1334 else:
1335 else:
1335 parts.pop()
1336 parts.pop()
1336 return False
1337 return False
1337
1338
1338 def peer(self):
1339 def peer(self):
1339 return localpeer(self) # not cached to avoid reference cycle
1340 return localpeer(self) # not cached to avoid reference cycle
1340
1341
1341 def unfiltered(self):
1342 def unfiltered(self):
1342 """Return unfiltered version of the repository
1343 """Return unfiltered version of the repository
1343
1344
1344 Intended to be overwritten by filtered repo."""
1345 Intended to be overwritten by filtered repo."""
1345 return self
1346 return self
1346
1347
1347 def filtered(self, name, visibilityexceptions=None):
1348 def filtered(self, name, visibilityexceptions=None):
1348 """Return a filtered version of a repository
1349 """Return a filtered version of a repository
1349
1350
1350 The `name` parameter is the identifier of the requested view. This
1351 The `name` parameter is the identifier of the requested view. This
1351 will return a repoview object set "exactly" to the specified view.
1352 will return a repoview object set "exactly" to the specified view.
1352
1353
1353 This function does not apply recursive filtering to a repository. For
1354 This function does not apply recursive filtering to a repository. For
1354 example calling `repo.filtered("served")` will return a repoview using
1355 example calling `repo.filtered("served")` will return a repoview using
1355 the "served" view, regardless of the initial view used by `repo`.
1356 the "served" view, regardless of the initial view used by `repo`.
1356
1357
1357 In other word, there is always only one level of `repoview` "filtering".
1358 In other word, there is always only one level of `repoview` "filtering".
1358 """
1359 """
1359 if self._extrafilterid is not None and b'%' not in name:
1360 if self._extrafilterid is not None and b'%' not in name:
1360 name = name + b'%' + self._extrafilterid
1361 name = name + b'%' + self._extrafilterid
1361
1362
1362 cls = repoview.newtype(self.unfiltered().__class__)
1363 cls = repoview.newtype(self.unfiltered().__class__)
1363 return cls(self, name, visibilityexceptions)
1364 return cls(self, name, visibilityexceptions)
1364
1365
1365 @mixedrepostorecache(
1366 @mixedrepostorecache(
1366 (b'bookmarks', b'plain'),
1367 (b'bookmarks', b'plain'),
1367 (b'bookmarks.current', b'plain'),
1368 (b'bookmarks.current', b'plain'),
1368 (b'bookmarks', b''),
1369 (b'bookmarks', b''),
1369 (b'00changelog.i', b''),
1370 (b'00changelog.i', b''),
1370 )
1371 )
1371 def _bookmarks(self):
1372 def _bookmarks(self):
1372 # Since the multiple files involved in the transaction cannot be
1373 # Since the multiple files involved in the transaction cannot be
1373 # written atomically (with current repository format), there is a race
1374 # written atomically (with current repository format), there is a race
1374 # condition here.
1375 # condition here.
1375 #
1376 #
1376 # 1) changelog content A is read
1377 # 1) changelog content A is read
1377 # 2) outside transaction update changelog to content B
1378 # 2) outside transaction update changelog to content B
1378 # 3) outside transaction update bookmark file referring to content B
1379 # 3) outside transaction update bookmark file referring to content B
1379 # 4) bookmarks file content is read and filtered against changelog-A
1380 # 4) bookmarks file content is read and filtered against changelog-A
1380 #
1381 #
1381 # When this happens, bookmarks against nodes missing from A are dropped.
1382 # When this happens, bookmarks against nodes missing from A are dropped.
1382 #
1383 #
1383 # Having this happening during read is not great, but it become worse
1384 # Having this happening during read is not great, but it become worse
1384 # when this happen during write because the bookmarks to the "unknown"
1385 # when this happen during write because the bookmarks to the "unknown"
1385 # nodes will be dropped for good. However, writes happen within locks.
1386 # nodes will be dropped for good. However, writes happen within locks.
1386 # This locking makes it possible to have a race free consistent read.
1387 # This locking makes it possible to have a race free consistent read.
1387 # For this purpose data read from disc before locking are
1388 # For this purpose data read from disc before locking are
1388 # "invalidated" right after the locks are taken. This invalidations are
1389 # "invalidated" right after the locks are taken. This invalidations are
1389 # "light", the `filecache` mechanism keep the data in memory and will
1390 # "light", the `filecache` mechanism keep the data in memory and will
1390 # reuse them if the underlying files did not changed. Not parsing the
1391 # reuse them if the underlying files did not changed. Not parsing the
1391 # same data multiple times helps performances.
1392 # same data multiple times helps performances.
1392 #
1393 #
1393 # Unfortunately in the case describe above, the files tracked by the
1394 # Unfortunately in the case describe above, the files tracked by the
1394 # bookmarks file cache might not have changed, but the in-memory
1395 # bookmarks file cache might not have changed, but the in-memory
1395 # content is still "wrong" because we used an older changelog content
1396 # content is still "wrong" because we used an older changelog content
1396 # to process the on-disk data. So after locking, the changelog would be
1397 # to process the on-disk data. So after locking, the changelog would be
1397 # refreshed but `_bookmarks` would be preserved.
1398 # refreshed but `_bookmarks` would be preserved.
1398 # Adding `00changelog.i` to the list of tracked file is not
1399 # Adding `00changelog.i` to the list of tracked file is not
1399 # enough, because at the time we build the content for `_bookmarks` in
1400 # enough, because at the time we build the content for `_bookmarks` in
1400 # (4), the changelog file has already diverged from the content used
1401 # (4), the changelog file has already diverged from the content used
1401 # for loading `changelog` in (1)
1402 # for loading `changelog` in (1)
1402 #
1403 #
1403 # To prevent the issue, we force the changelog to be explicitly
1404 # To prevent the issue, we force the changelog to be explicitly
1404 # reloaded while computing `_bookmarks`. The data race can still happen
1405 # reloaded while computing `_bookmarks`. The data race can still happen
1405 # without the lock (with a narrower window), but it would no longer go
1406 # without the lock (with a narrower window), but it would no longer go
1406 # undetected during the lock time refresh.
1407 # undetected during the lock time refresh.
1407 #
1408 #
1408 # The new schedule is as follow
1409 # The new schedule is as follow
1409 #
1410 #
1410 # 1) filecache logic detect that `_bookmarks` needs to be computed
1411 # 1) filecache logic detect that `_bookmarks` needs to be computed
1411 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1412 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1412 # 3) We force `changelog` filecache to be tested
1413 # 3) We force `changelog` filecache to be tested
1413 # 4) cachestat for `changelog` are captured (for changelog)
1414 # 4) cachestat for `changelog` are captured (for changelog)
1414 # 5) `_bookmarks` is computed and cached
1415 # 5) `_bookmarks` is computed and cached
1415 #
1416 #
1416 # The step in (3) ensure we have a changelog at least as recent as the
1417 # The step in (3) ensure we have a changelog at least as recent as the
1417 # cache stat computed in (1). As a result at locking time:
1418 # cache stat computed in (1). As a result at locking time:
1418 # * if the changelog did not changed since (1) -> we can reuse the data
1419 # * if the changelog did not changed since (1) -> we can reuse the data
1419 # * otherwise -> the bookmarks get refreshed.
1420 # * otherwise -> the bookmarks get refreshed.
1420 self._refreshchangelog()
1421 self._refreshchangelog()
1421 return bookmarks.bmstore(self)
1422 return bookmarks.bmstore(self)
1422
1423
1423 def _refreshchangelog(self):
1424 def _refreshchangelog(self):
1424 """make sure the in memory changelog match the on-disk one"""
1425 """make sure the in memory changelog match the on-disk one"""
1425 if 'changelog' in vars(self) and self.currenttransaction() is None:
1426 if 'changelog' in vars(self) and self.currenttransaction() is None:
1426 del self.changelog
1427 del self.changelog
1427
1428
1428 @property
1429 @property
1429 def _activebookmark(self):
1430 def _activebookmark(self):
1430 return self._bookmarks.active
1431 return self._bookmarks.active
1431
1432
1432 # _phasesets depend on changelog. what we need is to call
1433 # _phasesets depend on changelog. what we need is to call
1433 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1434 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1434 # can't be easily expressed in filecache mechanism.
1435 # can't be easily expressed in filecache mechanism.
1435 @storecache(b'phaseroots', b'00changelog.i')
1436 @storecache(b'phaseroots', b'00changelog.i')
1436 def _phasecache(self):
1437 def _phasecache(self):
1437 return phases.phasecache(self, self._phasedefaults)
1438 return phases.phasecache(self, self._phasedefaults)
1438
1439
1439 @storecache(b'obsstore')
1440 @storecache(b'obsstore')
1440 def obsstore(self):
1441 def obsstore(self):
1441 return obsolete.makestore(self.ui, self)
1442 return obsolete.makestore(self.ui, self)
1442
1443
1443 @storecache(b'00changelog.i')
1444 @storecache(b'00changelog.i')
1444 def changelog(self):
1445 def changelog(self):
1445 return self.store.changelog(txnutil.mayhavepending(self.root))
1446 return self.store.changelog(txnutil.mayhavepending(self.root))
1446
1447
1447 @storecache(b'00manifest.i')
1448 @storecache(b'00manifest.i')
1448 def manifestlog(self):
1449 def manifestlog(self):
1449 return self.store.manifestlog(self, self._storenarrowmatch)
1450 return self.store.manifestlog(self, self._storenarrowmatch)
1450
1451
1451 @repofilecache(b'dirstate')
1452 @repofilecache(b'dirstate')
1452 def dirstate(self):
1453 def dirstate(self):
1453 return self._makedirstate()
1454 return self._makedirstate()
1454
1455
1455 def _makedirstate(self):
1456 def _makedirstate(self):
1456 """Extension point for wrapping the dirstate per-repo."""
1457 """Extension point for wrapping the dirstate per-repo."""
1457 sparsematchfn = lambda: sparse.matcher(self)
1458 sparsematchfn = lambda: sparse.matcher(self)
1458
1459
1459 return dirstate.dirstate(
1460 return dirstate.dirstate(
1460 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1461 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1461 )
1462 )
1462
1463
1463 def _dirstatevalidate(self, node):
1464 def _dirstatevalidate(self, node):
1464 try:
1465 try:
1465 self.changelog.rev(node)
1466 self.changelog.rev(node)
1466 return node
1467 return node
1467 except error.LookupError:
1468 except error.LookupError:
1468 if not self._dirstatevalidatewarned:
1469 if not self._dirstatevalidatewarned:
1469 self._dirstatevalidatewarned = True
1470 self._dirstatevalidatewarned = True
1470 self.ui.warn(
1471 self.ui.warn(
1471 _(b"warning: ignoring unknown working parent %s!\n")
1472 _(b"warning: ignoring unknown working parent %s!\n")
1472 % short(node)
1473 % short(node)
1473 )
1474 )
1474 return nullid
1475 return nullid
1475
1476
1476 @storecache(narrowspec.FILENAME)
1477 @storecache(narrowspec.FILENAME)
1477 def narrowpats(self):
1478 def narrowpats(self):
1478 """matcher patterns for this repository's narrowspec
1479 """matcher patterns for this repository's narrowspec
1479
1480
1480 A tuple of (includes, excludes).
1481 A tuple of (includes, excludes).
1481 """
1482 """
1482 return narrowspec.load(self)
1483 return narrowspec.load(self)
1483
1484
1484 @storecache(narrowspec.FILENAME)
1485 @storecache(narrowspec.FILENAME)
1485 def _storenarrowmatch(self):
1486 def _storenarrowmatch(self):
1486 if repository.NARROW_REQUIREMENT not in self.requirements:
1487 if repository.NARROW_REQUIREMENT not in self.requirements:
1487 return matchmod.always()
1488 return matchmod.always()
1488 include, exclude = self.narrowpats
1489 include, exclude = self.narrowpats
1489 return narrowspec.match(self.root, include=include, exclude=exclude)
1490 return narrowspec.match(self.root, include=include, exclude=exclude)
1490
1491
1491 @storecache(narrowspec.FILENAME)
1492 @storecache(narrowspec.FILENAME)
1492 def _narrowmatch(self):
1493 def _narrowmatch(self):
1493 if repository.NARROW_REQUIREMENT not in self.requirements:
1494 if repository.NARROW_REQUIREMENT not in self.requirements:
1494 return matchmod.always()
1495 return matchmod.always()
1495 narrowspec.checkworkingcopynarrowspec(self)
1496 narrowspec.checkworkingcopynarrowspec(self)
1496 include, exclude = self.narrowpats
1497 include, exclude = self.narrowpats
1497 return narrowspec.match(self.root, include=include, exclude=exclude)
1498 return narrowspec.match(self.root, include=include, exclude=exclude)
1498
1499
1499 def narrowmatch(self, match=None, includeexact=False):
1500 def narrowmatch(self, match=None, includeexact=False):
1500 """matcher corresponding the the repo's narrowspec
1501 """matcher corresponding the the repo's narrowspec
1501
1502
1502 If `match` is given, then that will be intersected with the narrow
1503 If `match` is given, then that will be intersected with the narrow
1503 matcher.
1504 matcher.
1504
1505
1505 If `includeexact` is True, then any exact matches from `match` will
1506 If `includeexact` is True, then any exact matches from `match` will
1506 be included even if they're outside the narrowspec.
1507 be included even if they're outside the narrowspec.
1507 """
1508 """
1508 if match:
1509 if match:
1509 if includeexact and not self._narrowmatch.always():
1510 if includeexact and not self._narrowmatch.always():
1510 # do not exclude explicitly-specified paths so that they can
1511 # do not exclude explicitly-specified paths so that they can
1511 # be warned later on
1512 # be warned later on
1512 em = matchmod.exact(match.files())
1513 em = matchmod.exact(match.files())
1513 nm = matchmod.unionmatcher([self._narrowmatch, em])
1514 nm = matchmod.unionmatcher([self._narrowmatch, em])
1514 return matchmod.intersectmatchers(match, nm)
1515 return matchmod.intersectmatchers(match, nm)
1515 return matchmod.intersectmatchers(match, self._narrowmatch)
1516 return matchmod.intersectmatchers(match, self._narrowmatch)
1516 return self._narrowmatch
1517 return self._narrowmatch
1517
1518
1518 def setnarrowpats(self, newincludes, newexcludes):
1519 def setnarrowpats(self, newincludes, newexcludes):
1519 narrowspec.save(self, newincludes, newexcludes)
1520 narrowspec.save(self, newincludes, newexcludes)
1520 self.invalidate(clearfilecache=True)
1521 self.invalidate(clearfilecache=True)
1521
1522
1522 @unfilteredpropertycache
1523 @unfilteredpropertycache
1523 def _quick_access_changeid_null(self):
1524 def _quick_access_changeid_null(self):
1524 return {
1525 return {
1525 b'null': (nullrev, nullid),
1526 b'null': (nullrev, nullid),
1526 nullrev: (nullrev, nullid),
1527 nullrev: (nullrev, nullid),
1527 nullid: (nullrev, nullid),
1528 nullid: (nullrev, nullid),
1528 }
1529 }
1529
1530
1530 @unfilteredpropertycache
1531 @unfilteredpropertycache
1531 def _quick_access_changeid_wc(self):
1532 def _quick_access_changeid_wc(self):
1532 # also fast path access to the working copy parents
1533 # also fast path access to the working copy parents
1533 # however, only do it for filter that ensure wc is visible.
1534 # however, only do it for filter that ensure wc is visible.
1534 quick = {}
1535 quick = {}
1535 cl = self.unfiltered().changelog
1536 cl = self.unfiltered().changelog
1536 for node in self.dirstate.parents():
1537 for node in self.dirstate.parents():
1537 if node == nullid:
1538 if node == nullid:
1538 continue
1539 continue
1539 rev = cl.index.get_rev(node)
1540 rev = cl.index.get_rev(node)
1540 if rev is None:
1541 if rev is None:
1541 # unknown working copy parent case:
1542 # unknown working copy parent case:
1542 #
1543 #
1543 # skip the fast path and let higher code deal with it
1544 # skip the fast path and let higher code deal with it
1544 continue
1545 continue
1545 pair = (rev, node)
1546 pair = (rev, node)
1546 quick[rev] = pair
1547 quick[rev] = pair
1547 quick[node] = pair
1548 quick[node] = pair
1548 # also add the parents of the parents
1549 # also add the parents of the parents
1549 for r in cl.parentrevs(rev):
1550 for r in cl.parentrevs(rev):
1550 if r == nullrev:
1551 if r == nullrev:
1551 continue
1552 continue
1552 n = cl.node(r)
1553 n = cl.node(r)
1553 pair = (r, n)
1554 pair = (r, n)
1554 quick[r] = pair
1555 quick[r] = pair
1555 quick[n] = pair
1556 quick[n] = pair
1556 p1node = self.dirstate.p1()
1557 p1node = self.dirstate.p1()
1557 if p1node != nullid:
1558 if p1node != nullid:
1558 quick[b'.'] = quick[p1node]
1559 quick[b'.'] = quick[p1node]
1559 return quick
1560 return quick
1560
1561
1561 @unfilteredmethod
1562 @unfilteredmethod
1562 def _quick_access_changeid_invalidate(self):
1563 def _quick_access_changeid_invalidate(self):
1563 if '_quick_access_changeid_wc' in vars(self):
1564 if '_quick_access_changeid_wc' in vars(self):
1564 del self.__dict__['_quick_access_changeid_wc']
1565 del self.__dict__['_quick_access_changeid_wc']
1565
1566
1566 @property
1567 @property
1567 def _quick_access_changeid(self):
1568 def _quick_access_changeid(self):
1568 """an helper dictionnary for __getitem__ calls
1569 """an helper dictionnary for __getitem__ calls
1569
1570
1570 This contains a list of symbol we can recognise right away without
1571 This contains a list of symbol we can recognise right away without
1571 further processing.
1572 further processing.
1572 """
1573 """
1573 mapping = self._quick_access_changeid_null
1574 mapping = self._quick_access_changeid_null
1574 if self.filtername in repoview.filter_has_wc:
1575 if self.filtername in repoview.filter_has_wc:
1575 mapping = mapping.copy()
1576 mapping = mapping.copy()
1576 mapping.update(self._quick_access_changeid_wc)
1577 mapping.update(self._quick_access_changeid_wc)
1577 return mapping
1578 return mapping
1578
1579
1579 def __getitem__(self, changeid):
1580 def __getitem__(self, changeid):
1580 # dealing with special cases
1581 # dealing with special cases
1581 if changeid is None:
1582 if changeid is None:
1582 return context.workingctx(self)
1583 return context.workingctx(self)
1583 if isinstance(changeid, context.basectx):
1584 if isinstance(changeid, context.basectx):
1584 return changeid
1585 return changeid
1585
1586
1586 # dealing with multiple revisions
1587 # dealing with multiple revisions
1587 if isinstance(changeid, slice):
1588 if isinstance(changeid, slice):
1588 # wdirrev isn't contiguous so the slice shouldn't include it
1589 # wdirrev isn't contiguous so the slice shouldn't include it
1589 return [
1590 return [
1590 self[i]
1591 self[i]
1591 for i in pycompat.xrange(*changeid.indices(len(self)))
1592 for i in pycompat.xrange(*changeid.indices(len(self)))
1592 if i not in self.changelog.filteredrevs
1593 if i not in self.changelog.filteredrevs
1593 ]
1594 ]
1594
1595
1595 # dealing with some special values
1596 # dealing with some special values
1596 quick_access = self._quick_access_changeid.get(changeid)
1597 quick_access = self._quick_access_changeid.get(changeid)
1597 if quick_access is not None:
1598 if quick_access is not None:
1598 rev, node = quick_access
1599 rev, node = quick_access
1599 return context.changectx(self, rev, node, maybe_filtered=False)
1600 return context.changectx(self, rev, node, maybe_filtered=False)
1600 if changeid == b'tip':
1601 if changeid == b'tip':
1601 node = self.changelog.tip()
1602 node = self.changelog.tip()
1602 rev = self.changelog.rev(node)
1603 rev = self.changelog.rev(node)
1603 return context.changectx(self, rev, node)
1604 return context.changectx(self, rev, node)
1604
1605
1605 # dealing with arbitrary values
1606 # dealing with arbitrary values
1606 try:
1607 try:
1607 if isinstance(changeid, int):
1608 if isinstance(changeid, int):
1608 node = self.changelog.node(changeid)
1609 node = self.changelog.node(changeid)
1609 rev = changeid
1610 rev = changeid
1610 elif changeid == b'.':
1611 elif changeid == b'.':
1611 # this is a hack to delay/avoid loading obsmarkers
1612 # this is a hack to delay/avoid loading obsmarkers
1612 # when we know that '.' won't be hidden
1613 # when we know that '.' won't be hidden
1613 node = self.dirstate.p1()
1614 node = self.dirstate.p1()
1614 rev = self.unfiltered().changelog.rev(node)
1615 rev = self.unfiltered().changelog.rev(node)
1615 elif len(changeid) == 20:
1616 elif len(changeid) == 20:
1616 try:
1617 try:
1617 node = changeid
1618 node = changeid
1618 rev = self.changelog.rev(changeid)
1619 rev = self.changelog.rev(changeid)
1619 except error.FilteredLookupError:
1620 except error.FilteredLookupError:
1620 changeid = hex(changeid) # for the error message
1621 changeid = hex(changeid) # for the error message
1621 raise
1622 raise
1622 except LookupError:
1623 except LookupError:
1623 # check if it might have come from damaged dirstate
1624 # check if it might have come from damaged dirstate
1624 #
1625 #
1625 # XXX we could avoid the unfiltered if we had a recognizable
1626 # XXX we could avoid the unfiltered if we had a recognizable
1626 # exception for filtered changeset access
1627 # exception for filtered changeset access
1627 if (
1628 if (
1628 self.local()
1629 self.local()
1629 and changeid in self.unfiltered().dirstate.parents()
1630 and changeid in self.unfiltered().dirstate.parents()
1630 ):
1631 ):
1631 msg = _(b"working directory has unknown parent '%s'!")
1632 msg = _(b"working directory has unknown parent '%s'!")
1632 raise error.Abort(msg % short(changeid))
1633 raise error.Abort(msg % short(changeid))
1633 changeid = hex(changeid) # for the error message
1634 changeid = hex(changeid) # for the error message
1634 raise
1635 raise
1635
1636
1636 elif len(changeid) == 40:
1637 elif len(changeid) == 40:
1637 node = bin(changeid)
1638 node = bin(changeid)
1638 rev = self.changelog.rev(node)
1639 rev = self.changelog.rev(node)
1639 else:
1640 else:
1640 raise error.ProgrammingError(
1641 raise error.ProgrammingError(
1641 b"unsupported changeid '%s' of type %s"
1642 b"unsupported changeid '%s' of type %s"
1642 % (changeid, pycompat.bytestr(type(changeid)))
1643 % (changeid, pycompat.bytestr(type(changeid)))
1643 )
1644 )
1644
1645
1645 return context.changectx(self, rev, node)
1646 return context.changectx(self, rev, node)
1646
1647
1647 except (error.FilteredIndexError, error.FilteredLookupError):
1648 except (error.FilteredIndexError, error.FilteredLookupError):
1648 raise error.FilteredRepoLookupError(
1649 raise error.FilteredRepoLookupError(
1649 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1650 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1650 )
1651 )
1651 except (IndexError, LookupError):
1652 except (IndexError, LookupError):
1652 raise error.RepoLookupError(
1653 raise error.RepoLookupError(
1653 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1654 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1654 )
1655 )
1655 except error.WdirUnsupported:
1656 except error.WdirUnsupported:
1656 return context.workingctx(self)
1657 return context.workingctx(self)
1657
1658
1658 def __contains__(self, changeid):
1659 def __contains__(self, changeid):
1659 """True if the given changeid exists
1660 """True if the given changeid exists
1660
1661
1661 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1662 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1662 specified.
1663 specified.
1663 """
1664 """
1664 try:
1665 try:
1665 self[changeid]
1666 self[changeid]
1666 return True
1667 return True
1667 except error.RepoLookupError:
1668 except error.RepoLookupError:
1668 return False
1669 return False
1669
1670
1670 def __nonzero__(self):
1671 def __nonzero__(self):
1671 return True
1672 return True
1672
1673
1673 __bool__ = __nonzero__
1674 __bool__ = __nonzero__
1674
1675
1675 def __len__(self):
1676 def __len__(self):
1676 # no need to pay the cost of repoview.changelog
1677 # no need to pay the cost of repoview.changelog
1677 unfi = self.unfiltered()
1678 unfi = self.unfiltered()
1678 return len(unfi.changelog)
1679 return len(unfi.changelog)
1679
1680
1680 def __iter__(self):
1681 def __iter__(self):
1681 return iter(self.changelog)
1682 return iter(self.changelog)
1682
1683
1683 def revs(self, expr, *args):
1684 def revs(self, expr, *args):
1684 '''Find revisions matching a revset.
1685 '''Find revisions matching a revset.
1685
1686
1686 The revset is specified as a string ``expr`` that may contain
1687 The revset is specified as a string ``expr`` that may contain
1687 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1688 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1688
1689
1689 Revset aliases from the configuration are not expanded. To expand
1690 Revset aliases from the configuration are not expanded. To expand
1690 user aliases, consider calling ``scmutil.revrange()`` or
1691 user aliases, consider calling ``scmutil.revrange()`` or
1691 ``repo.anyrevs([expr], user=True)``.
1692 ``repo.anyrevs([expr], user=True)``.
1692
1693
1693 Returns a smartset.abstractsmartset, which is a list-like interface
1694 Returns a smartset.abstractsmartset, which is a list-like interface
1694 that contains integer revisions.
1695 that contains integer revisions.
1695 '''
1696 '''
1696 tree = revsetlang.spectree(expr, *args)
1697 tree = revsetlang.spectree(expr, *args)
1697 return revset.makematcher(tree)(self)
1698 return revset.makematcher(tree)(self)
1698
1699
1699 def set(self, expr, *args):
1700 def set(self, expr, *args):
1700 '''Find revisions matching a revset and emit changectx instances.
1701 '''Find revisions matching a revset and emit changectx instances.
1701
1702
1702 This is a convenience wrapper around ``revs()`` that iterates the
1703 This is a convenience wrapper around ``revs()`` that iterates the
1703 result and is a generator of changectx instances.
1704 result and is a generator of changectx instances.
1704
1705
1705 Revset aliases from the configuration are not expanded. To expand
1706 Revset aliases from the configuration are not expanded. To expand
1706 user aliases, consider calling ``scmutil.revrange()``.
1707 user aliases, consider calling ``scmutil.revrange()``.
1707 '''
1708 '''
1708 for r in self.revs(expr, *args):
1709 for r in self.revs(expr, *args):
1709 yield self[r]
1710 yield self[r]
1710
1711
1711 def anyrevs(self, specs, user=False, localalias=None):
1712 def anyrevs(self, specs, user=False, localalias=None):
1712 '''Find revisions matching one of the given revsets.
1713 '''Find revisions matching one of the given revsets.
1713
1714
1714 Revset aliases from the configuration are not expanded by default. To
1715 Revset aliases from the configuration are not expanded by default. To
1715 expand user aliases, specify ``user=True``. To provide some local
1716 expand user aliases, specify ``user=True``. To provide some local
1716 definitions overriding user aliases, set ``localalias`` to
1717 definitions overriding user aliases, set ``localalias`` to
1717 ``{name: definitionstring}``.
1718 ``{name: definitionstring}``.
1718 '''
1719 '''
1719 if specs == [b'null']:
1720 if specs == [b'null']:
1720 return revset.baseset([nullrev])
1721 return revset.baseset([nullrev])
1721 if specs == [b'.']:
1722 if specs == [b'.']:
1722 quick_data = self._quick_access_changeid.get(b'.')
1723 quick_data = self._quick_access_changeid.get(b'.')
1723 if quick_data is not None:
1724 if quick_data is not None:
1724 return revset.baseset([quick_data[0]])
1725 return revset.baseset([quick_data[0]])
1725 if user:
1726 if user:
1726 m = revset.matchany(
1727 m = revset.matchany(
1727 self.ui,
1728 self.ui,
1728 specs,
1729 specs,
1729 lookup=revset.lookupfn(self),
1730 lookup=revset.lookupfn(self),
1730 localalias=localalias,
1731 localalias=localalias,
1731 )
1732 )
1732 else:
1733 else:
1733 m = revset.matchany(None, specs, localalias=localalias)
1734 m = revset.matchany(None, specs, localalias=localalias)
1734 return m(self)
1735 return m(self)
1735
1736
1736 def url(self):
1737 def url(self):
1737 return b'file:' + self.root
1738 return b'file:' + self.root
1738
1739
1739 def hook(self, name, throw=False, **args):
1740 def hook(self, name, throw=False, **args):
1740 """Call a hook, passing this repo instance.
1741 """Call a hook, passing this repo instance.
1741
1742
1742 This a convenience method to aid invoking hooks. Extensions likely
1743 This a convenience method to aid invoking hooks. Extensions likely
1743 won't call this unless they have registered a custom hook or are
1744 won't call this unless they have registered a custom hook or are
1744 replacing code that is expected to call a hook.
1745 replacing code that is expected to call a hook.
1745 """
1746 """
1746 return hook.hook(self.ui, self, name, throw, **args)
1747 return hook.hook(self.ui, self, name, throw, **args)
1747
1748
1748 @filteredpropertycache
1749 @filteredpropertycache
1749 def _tagscache(self):
1750 def _tagscache(self):
1750 '''Returns a tagscache object that contains various tags related
1751 '''Returns a tagscache object that contains various tags related
1751 caches.'''
1752 caches.'''
1752
1753
1753 # This simplifies its cache management by having one decorated
1754 # This simplifies its cache management by having one decorated
1754 # function (this one) and the rest simply fetch things from it.
1755 # function (this one) and the rest simply fetch things from it.
1755 class tagscache(object):
1756 class tagscache(object):
1756 def __init__(self):
1757 def __init__(self):
1757 # These two define the set of tags for this repository. tags
1758 # These two define the set of tags for this repository. tags
1758 # maps tag name to node; tagtypes maps tag name to 'global' or
1759 # maps tag name to node; tagtypes maps tag name to 'global' or
1759 # 'local'. (Global tags are defined by .hgtags across all
1760 # 'local'. (Global tags are defined by .hgtags across all
1760 # heads, and local tags are defined in .hg/localtags.)
1761 # heads, and local tags are defined in .hg/localtags.)
1761 # They constitute the in-memory cache of tags.
1762 # They constitute the in-memory cache of tags.
1762 self.tags = self.tagtypes = None
1763 self.tags = self.tagtypes = None
1763
1764
1764 self.nodetagscache = self.tagslist = None
1765 self.nodetagscache = self.tagslist = None
1765
1766
1766 cache = tagscache()
1767 cache = tagscache()
1767 cache.tags, cache.tagtypes = self._findtags()
1768 cache.tags, cache.tagtypes = self._findtags()
1768
1769
1769 return cache
1770 return cache
1770
1771
1771 def tags(self):
1772 def tags(self):
1772 '''return a mapping of tag to node'''
1773 '''return a mapping of tag to node'''
1773 t = {}
1774 t = {}
1774 if self.changelog.filteredrevs:
1775 if self.changelog.filteredrevs:
1775 tags, tt = self._findtags()
1776 tags, tt = self._findtags()
1776 else:
1777 else:
1777 tags = self._tagscache.tags
1778 tags = self._tagscache.tags
1778 rev = self.changelog.rev
1779 rev = self.changelog.rev
1779 for k, v in pycompat.iteritems(tags):
1780 for k, v in pycompat.iteritems(tags):
1780 try:
1781 try:
1781 # ignore tags to unknown nodes
1782 # ignore tags to unknown nodes
1782 rev(v)
1783 rev(v)
1783 t[k] = v
1784 t[k] = v
1784 except (error.LookupError, ValueError):
1785 except (error.LookupError, ValueError):
1785 pass
1786 pass
1786 return t
1787 return t
1787
1788
1788 def _findtags(self):
1789 def _findtags(self):
1789 '''Do the hard work of finding tags. Return a pair of dicts
1790 '''Do the hard work of finding tags. Return a pair of dicts
1790 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1791 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1791 maps tag name to a string like \'global\' or \'local\'.
1792 maps tag name to a string like \'global\' or \'local\'.
1792 Subclasses or extensions are free to add their own tags, but
1793 Subclasses or extensions are free to add their own tags, but
1793 should be aware that the returned dicts will be retained for the
1794 should be aware that the returned dicts will be retained for the
1794 duration of the localrepo object.'''
1795 duration of the localrepo object.'''
1795
1796
1796 # XXX what tagtype should subclasses/extensions use? Currently
1797 # XXX what tagtype should subclasses/extensions use? Currently
1797 # mq and bookmarks add tags, but do not set the tagtype at all.
1798 # mq and bookmarks add tags, but do not set the tagtype at all.
1798 # Should each extension invent its own tag type? Should there
1799 # Should each extension invent its own tag type? Should there
1799 # be one tagtype for all such "virtual" tags? Or is the status
1800 # be one tagtype for all such "virtual" tags? Or is the status
1800 # quo fine?
1801 # quo fine?
1801
1802
1802 # map tag name to (node, hist)
1803 # map tag name to (node, hist)
1803 alltags = tagsmod.findglobaltags(self.ui, self)
1804 alltags = tagsmod.findglobaltags(self.ui, self)
1804 # map tag name to tag type
1805 # map tag name to tag type
1805 tagtypes = dict((tag, b'global') for tag in alltags)
1806 tagtypes = dict((tag, b'global') for tag in alltags)
1806
1807
1807 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1808 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1808
1809
1809 # Build the return dicts. Have to re-encode tag names because
1810 # Build the return dicts. Have to re-encode tag names because
1810 # the tags module always uses UTF-8 (in order not to lose info
1811 # the tags module always uses UTF-8 (in order not to lose info
1811 # writing to the cache), but the rest of Mercurial wants them in
1812 # writing to the cache), but the rest of Mercurial wants them in
1812 # local encoding.
1813 # local encoding.
1813 tags = {}
1814 tags = {}
1814 for (name, (node, hist)) in pycompat.iteritems(alltags):
1815 for (name, (node, hist)) in pycompat.iteritems(alltags):
1815 if node != nullid:
1816 if node != nullid:
1816 tags[encoding.tolocal(name)] = node
1817 tags[encoding.tolocal(name)] = node
1817 tags[b'tip'] = self.changelog.tip()
1818 tags[b'tip'] = self.changelog.tip()
1818 tagtypes = dict(
1819 tagtypes = dict(
1819 [
1820 [
1820 (encoding.tolocal(name), value)
1821 (encoding.tolocal(name), value)
1821 for (name, value) in pycompat.iteritems(tagtypes)
1822 for (name, value) in pycompat.iteritems(tagtypes)
1822 ]
1823 ]
1823 )
1824 )
1824 return (tags, tagtypes)
1825 return (tags, tagtypes)
1825
1826
1826 def tagtype(self, tagname):
1827 def tagtype(self, tagname):
1827 '''
1828 '''
1828 return the type of the given tag. result can be:
1829 return the type of the given tag. result can be:
1829
1830
1830 'local' : a local tag
1831 'local' : a local tag
1831 'global' : a global tag
1832 'global' : a global tag
1832 None : tag does not exist
1833 None : tag does not exist
1833 '''
1834 '''
1834
1835
1835 return self._tagscache.tagtypes.get(tagname)
1836 return self._tagscache.tagtypes.get(tagname)
1836
1837
1837 def tagslist(self):
1838 def tagslist(self):
1838 '''return a list of tags ordered by revision'''
1839 '''return a list of tags ordered by revision'''
1839 if not self._tagscache.tagslist:
1840 if not self._tagscache.tagslist:
1840 l = []
1841 l = []
1841 for t, n in pycompat.iteritems(self.tags()):
1842 for t, n in pycompat.iteritems(self.tags()):
1842 l.append((self.changelog.rev(n), t, n))
1843 l.append((self.changelog.rev(n), t, n))
1843 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1844 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1844
1845
1845 return self._tagscache.tagslist
1846 return self._tagscache.tagslist
1846
1847
1847 def nodetags(self, node):
1848 def nodetags(self, node):
1848 '''return the tags associated with a node'''
1849 '''return the tags associated with a node'''
1849 if not self._tagscache.nodetagscache:
1850 if not self._tagscache.nodetagscache:
1850 nodetagscache = {}
1851 nodetagscache = {}
1851 for t, n in pycompat.iteritems(self._tagscache.tags):
1852 for t, n in pycompat.iteritems(self._tagscache.tags):
1852 nodetagscache.setdefault(n, []).append(t)
1853 nodetagscache.setdefault(n, []).append(t)
1853 for tags in pycompat.itervalues(nodetagscache):
1854 for tags in pycompat.itervalues(nodetagscache):
1854 tags.sort()
1855 tags.sort()
1855 self._tagscache.nodetagscache = nodetagscache
1856 self._tagscache.nodetagscache = nodetagscache
1856 return self._tagscache.nodetagscache.get(node, [])
1857 return self._tagscache.nodetagscache.get(node, [])
1857
1858
1858 def nodebookmarks(self, node):
1859 def nodebookmarks(self, node):
1859 """return the list of bookmarks pointing to the specified node"""
1860 """return the list of bookmarks pointing to the specified node"""
1860 return self._bookmarks.names(node)
1861 return self._bookmarks.names(node)
1861
1862
1862 def branchmap(self):
1863 def branchmap(self):
1863 '''returns a dictionary {branch: [branchheads]} with branchheads
1864 '''returns a dictionary {branch: [branchheads]} with branchheads
1864 ordered by increasing revision number'''
1865 ordered by increasing revision number'''
1865 return self._branchcaches[self]
1866 return self._branchcaches[self]
1866
1867
1867 @unfilteredmethod
1868 @unfilteredmethod
1868 def revbranchcache(self):
1869 def revbranchcache(self):
1869 if not self._revbranchcache:
1870 if not self._revbranchcache:
1870 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1871 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1871 return self._revbranchcache
1872 return self._revbranchcache
1872
1873
1873 def branchtip(self, branch, ignoremissing=False):
1874 def branchtip(self, branch, ignoremissing=False):
1874 '''return the tip node for a given branch
1875 '''return the tip node for a given branch
1875
1876
1876 If ignoremissing is True, then this method will not raise an error.
1877 If ignoremissing is True, then this method will not raise an error.
1877 This is helpful for callers that only expect None for a missing branch
1878 This is helpful for callers that only expect None for a missing branch
1878 (e.g. namespace).
1879 (e.g. namespace).
1879
1880
1880 '''
1881 '''
1881 try:
1882 try:
1882 return self.branchmap().branchtip(branch)
1883 return self.branchmap().branchtip(branch)
1883 except KeyError:
1884 except KeyError:
1884 if not ignoremissing:
1885 if not ignoremissing:
1885 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1886 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1886 else:
1887 else:
1887 pass
1888 pass
1888
1889
1889 def lookup(self, key):
1890 def lookup(self, key):
1890 node = scmutil.revsymbol(self, key).node()
1891 node = scmutil.revsymbol(self, key).node()
1891 if node is None:
1892 if node is None:
1892 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1893 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1893 return node
1894 return node
1894
1895
1895 def lookupbranch(self, key):
1896 def lookupbranch(self, key):
1896 if self.branchmap().hasbranch(key):
1897 if self.branchmap().hasbranch(key):
1897 return key
1898 return key
1898
1899
1899 return scmutil.revsymbol(self, key).branch()
1900 return scmutil.revsymbol(self, key).branch()
1900
1901
1901 def known(self, nodes):
1902 def known(self, nodes):
1902 cl = self.changelog
1903 cl = self.changelog
1903 get_rev = cl.index.get_rev
1904 get_rev = cl.index.get_rev
1904 filtered = cl.filteredrevs
1905 filtered = cl.filteredrevs
1905 result = []
1906 result = []
1906 for n in nodes:
1907 for n in nodes:
1907 r = get_rev(n)
1908 r = get_rev(n)
1908 resp = not (r is None or r in filtered)
1909 resp = not (r is None or r in filtered)
1909 result.append(resp)
1910 result.append(resp)
1910 return result
1911 return result
1911
1912
1912 def local(self):
1913 def local(self):
1913 return self
1914 return self
1914
1915
1915 def publishing(self):
1916 def publishing(self):
1916 # it's safe (and desirable) to trust the publish flag unconditionally
1917 # it's safe (and desirable) to trust the publish flag unconditionally
1917 # so that we don't finalize changes shared between users via ssh or nfs
1918 # so that we don't finalize changes shared between users via ssh or nfs
1918 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1919 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1919
1920
1920 def cancopy(self):
1921 def cancopy(self):
1921 # so statichttprepo's override of local() works
1922 # so statichttprepo's override of local() works
1922 if not self.local():
1923 if not self.local():
1923 return False
1924 return False
1924 if not self.publishing():
1925 if not self.publishing():
1925 return True
1926 return True
1926 # if publishing we can't copy if there is filtered content
1927 # if publishing we can't copy if there is filtered content
1927 return not self.filtered(b'visible').changelog.filteredrevs
1928 return not self.filtered(b'visible').changelog.filteredrevs
1928
1929
1929 def shared(self):
1930 def shared(self):
1930 '''the type of shared repository (None if not shared)'''
1931 '''the type of shared repository (None if not shared)'''
1931 if self.sharedpath != self.path:
1932 if self.sharedpath != self.path:
1932 return b'store'
1933 return b'store'
1933 return None
1934 return None
1934
1935
1935 def wjoin(self, f, *insidef):
1936 def wjoin(self, f, *insidef):
1936 return self.vfs.reljoin(self.root, f, *insidef)
1937 return self.vfs.reljoin(self.root, f, *insidef)
1937
1938
1938 def setparents(self, p1, p2=nullid):
1939 def setparents(self, p1, p2=nullid):
1939 self[None].setparents(p1, p2)
1940 self[None].setparents(p1, p2)
1940 self._quick_access_changeid_invalidate()
1941 self._quick_access_changeid_invalidate()
1941
1942
1942 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1943 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1943 """changeid must be a changeset revision, if specified.
1944 """changeid must be a changeset revision, if specified.
1944 fileid can be a file revision or node."""
1945 fileid can be a file revision or node."""
1945 return context.filectx(
1946 return context.filectx(
1946 self, path, changeid, fileid, changectx=changectx
1947 self, path, changeid, fileid, changectx=changectx
1947 )
1948 )
1948
1949
1949 def getcwd(self):
1950 def getcwd(self):
1950 return self.dirstate.getcwd()
1951 return self.dirstate.getcwd()
1951
1952
1952 def pathto(self, f, cwd=None):
1953 def pathto(self, f, cwd=None):
1953 return self.dirstate.pathto(f, cwd)
1954 return self.dirstate.pathto(f, cwd)
1954
1955
1955 def _loadfilter(self, filter):
1956 def _loadfilter(self, filter):
1956 if filter not in self._filterpats:
1957 if filter not in self._filterpats:
1957 l = []
1958 l = []
1958 for pat, cmd in self.ui.configitems(filter):
1959 for pat, cmd in self.ui.configitems(filter):
1959 if cmd == b'!':
1960 if cmd == b'!':
1960 continue
1961 continue
1961 mf = matchmod.match(self.root, b'', [pat])
1962 mf = matchmod.match(self.root, b'', [pat])
1962 fn = None
1963 fn = None
1963 params = cmd
1964 params = cmd
1964 for name, filterfn in pycompat.iteritems(self._datafilters):
1965 for name, filterfn in pycompat.iteritems(self._datafilters):
1965 if cmd.startswith(name):
1966 if cmd.startswith(name):
1966 fn = filterfn
1967 fn = filterfn
1967 params = cmd[len(name) :].lstrip()
1968 params = cmd[len(name) :].lstrip()
1968 break
1969 break
1969 if not fn:
1970 if not fn:
1970 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1971 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1971 fn.__name__ = 'commandfilter'
1972 fn.__name__ = 'commandfilter'
1972 # Wrap old filters not supporting keyword arguments
1973 # Wrap old filters not supporting keyword arguments
1973 if not pycompat.getargspec(fn)[2]:
1974 if not pycompat.getargspec(fn)[2]:
1974 oldfn = fn
1975 oldfn = fn
1975 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1976 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1976 fn.__name__ = 'compat-' + oldfn.__name__
1977 fn.__name__ = 'compat-' + oldfn.__name__
1977 l.append((mf, fn, params))
1978 l.append((mf, fn, params))
1978 self._filterpats[filter] = l
1979 self._filterpats[filter] = l
1979 return self._filterpats[filter]
1980 return self._filterpats[filter]
1980
1981
1981 def _filter(self, filterpats, filename, data):
1982 def _filter(self, filterpats, filename, data):
1982 for mf, fn, cmd in filterpats:
1983 for mf, fn, cmd in filterpats:
1983 if mf(filename):
1984 if mf(filename):
1984 self.ui.debug(
1985 self.ui.debug(
1985 b"filtering %s through %s\n"
1986 b"filtering %s through %s\n"
1986 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1987 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1987 )
1988 )
1988 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1989 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1989 break
1990 break
1990
1991
1991 return data
1992 return data
1992
1993
1993 @unfilteredpropertycache
1994 @unfilteredpropertycache
1994 def _encodefilterpats(self):
1995 def _encodefilterpats(self):
1995 return self._loadfilter(b'encode')
1996 return self._loadfilter(b'encode')
1996
1997
1997 @unfilteredpropertycache
1998 @unfilteredpropertycache
1998 def _decodefilterpats(self):
1999 def _decodefilterpats(self):
1999 return self._loadfilter(b'decode')
2000 return self._loadfilter(b'decode')
2000
2001
2001 def adddatafilter(self, name, filter):
2002 def adddatafilter(self, name, filter):
2002 self._datafilters[name] = filter
2003 self._datafilters[name] = filter
2003
2004
2004 def wread(self, filename):
2005 def wread(self, filename):
2005 if self.wvfs.islink(filename):
2006 if self.wvfs.islink(filename):
2006 data = self.wvfs.readlink(filename)
2007 data = self.wvfs.readlink(filename)
2007 else:
2008 else:
2008 data = self.wvfs.read(filename)
2009 data = self.wvfs.read(filename)
2009 return self._filter(self._encodefilterpats, filename, data)
2010 return self._filter(self._encodefilterpats, filename, data)
2010
2011
2011 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2012 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2012 """write ``data`` into ``filename`` in the working directory
2013 """write ``data`` into ``filename`` in the working directory
2013
2014
2014 This returns length of written (maybe decoded) data.
2015 This returns length of written (maybe decoded) data.
2015 """
2016 """
2016 data = self._filter(self._decodefilterpats, filename, data)
2017 data = self._filter(self._decodefilterpats, filename, data)
2017 if b'l' in flags:
2018 if b'l' in flags:
2018 self.wvfs.symlink(data, filename)
2019 self.wvfs.symlink(data, filename)
2019 else:
2020 else:
2020 self.wvfs.write(
2021 self.wvfs.write(
2021 filename, data, backgroundclose=backgroundclose, **kwargs
2022 filename, data, backgroundclose=backgroundclose, **kwargs
2022 )
2023 )
2023 if b'x' in flags:
2024 if b'x' in flags:
2024 self.wvfs.setflags(filename, False, True)
2025 self.wvfs.setflags(filename, False, True)
2025 else:
2026 else:
2026 self.wvfs.setflags(filename, False, False)
2027 self.wvfs.setflags(filename, False, False)
2027 return len(data)
2028 return len(data)
2028
2029
2029 def wwritedata(self, filename, data):
2030 def wwritedata(self, filename, data):
2030 return self._filter(self._decodefilterpats, filename, data)
2031 return self._filter(self._decodefilterpats, filename, data)
2031
2032
2032 def currenttransaction(self):
2033 def currenttransaction(self):
2033 """return the current transaction or None if non exists"""
2034 """return the current transaction or None if non exists"""
2034 if self._transref:
2035 if self._transref:
2035 tr = self._transref()
2036 tr = self._transref()
2036 else:
2037 else:
2037 tr = None
2038 tr = None
2038
2039
2039 if tr and tr.running():
2040 if tr and tr.running():
2040 return tr
2041 return tr
2041 return None
2042 return None
2042
2043
2043 def transaction(self, desc, report=None):
2044 def transaction(self, desc, report=None):
2044 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2045 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2045 b'devel', b'check-locks'
2046 b'devel', b'check-locks'
2046 ):
2047 ):
2047 if self._currentlock(self._lockref) is None:
2048 if self._currentlock(self._lockref) is None:
2048 raise error.ProgrammingError(b'transaction requires locking')
2049 raise error.ProgrammingError(b'transaction requires locking')
2049 tr = self.currenttransaction()
2050 tr = self.currenttransaction()
2050 if tr is not None:
2051 if tr is not None:
2051 return tr.nest(name=desc)
2052 return tr.nest(name=desc)
2052
2053
2053 # abort here if the journal already exists
2054 # abort here if the journal already exists
2054 if self.svfs.exists(b"journal"):
2055 if self.svfs.exists(b"journal"):
2055 raise error.RepoError(
2056 raise error.RepoError(
2056 _(b"abandoned transaction found"),
2057 _(b"abandoned transaction found"),
2057 hint=_(b"run 'hg recover' to clean up transaction"),
2058 hint=_(b"run 'hg recover' to clean up transaction"),
2058 )
2059 )
2059
2060
2060 idbase = b"%.40f#%f" % (random.random(), time.time())
2061 idbase = b"%.40f#%f" % (random.random(), time.time())
2061 ha = hex(hashutil.sha1(idbase).digest())
2062 ha = hex(hashutil.sha1(idbase).digest())
2062 txnid = b'TXN:' + ha
2063 txnid = b'TXN:' + ha
2063 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2064 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2064
2065
2065 self._writejournal(desc)
2066 self._writejournal(desc)
2066 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2067 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2067 if report:
2068 if report:
2068 rp = report
2069 rp = report
2069 else:
2070 else:
2070 rp = self.ui.warn
2071 rp = self.ui.warn
2071 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2072 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2072 # we must avoid cyclic reference between repo and transaction.
2073 # we must avoid cyclic reference between repo and transaction.
2073 reporef = weakref.ref(self)
2074 reporef = weakref.ref(self)
2074 # Code to track tag movement
2075 # Code to track tag movement
2075 #
2076 #
2076 # Since tags are all handled as file content, it is actually quite hard
2077 # Since tags are all handled as file content, it is actually quite hard
2077 # to track these movement from a code perspective. So we fallback to a
2078 # to track these movement from a code perspective. So we fallback to a
2078 # tracking at the repository level. One could envision to track changes
2079 # tracking at the repository level. One could envision to track changes
2079 # to the '.hgtags' file through changegroup apply but that fails to
2080 # to the '.hgtags' file through changegroup apply but that fails to
2080 # cope with case where transaction expose new heads without changegroup
2081 # cope with case where transaction expose new heads without changegroup
2081 # being involved (eg: phase movement).
2082 # being involved (eg: phase movement).
2082 #
2083 #
2083 # For now, We gate the feature behind a flag since this likely comes
2084 # For now, We gate the feature behind a flag since this likely comes
2084 # with performance impacts. The current code run more often than needed
2085 # with performance impacts. The current code run more often than needed
2085 # and do not use caches as much as it could. The current focus is on
2086 # and do not use caches as much as it could. The current focus is on
2086 # the behavior of the feature so we disable it by default. The flag
2087 # the behavior of the feature so we disable it by default. The flag
2087 # will be removed when we are happy with the performance impact.
2088 # will be removed when we are happy with the performance impact.
2088 #
2089 #
2089 # Once this feature is no longer experimental move the following
2090 # Once this feature is no longer experimental move the following
2090 # documentation to the appropriate help section:
2091 # documentation to the appropriate help section:
2091 #
2092 #
2092 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2093 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2093 # tags (new or changed or deleted tags). In addition the details of
2094 # tags (new or changed or deleted tags). In addition the details of
2094 # these changes are made available in a file at:
2095 # these changes are made available in a file at:
2095 # ``REPOROOT/.hg/changes/tags.changes``.
2096 # ``REPOROOT/.hg/changes/tags.changes``.
2096 # Make sure you check for HG_TAG_MOVED before reading that file as it
2097 # Make sure you check for HG_TAG_MOVED before reading that file as it
2097 # might exist from a previous transaction even if no tag were touched
2098 # might exist from a previous transaction even if no tag were touched
2098 # in this one. Changes are recorded in a line base format::
2099 # in this one. Changes are recorded in a line base format::
2099 #
2100 #
2100 # <action> <hex-node> <tag-name>\n
2101 # <action> <hex-node> <tag-name>\n
2101 #
2102 #
2102 # Actions are defined as follow:
2103 # Actions are defined as follow:
2103 # "-R": tag is removed,
2104 # "-R": tag is removed,
2104 # "+A": tag is added,
2105 # "+A": tag is added,
2105 # "-M": tag is moved (old value),
2106 # "-M": tag is moved (old value),
2106 # "+M": tag is moved (new value),
2107 # "+M": tag is moved (new value),
2107 tracktags = lambda x: None
2108 tracktags = lambda x: None
2108 # experimental config: experimental.hook-track-tags
2109 # experimental config: experimental.hook-track-tags
2109 shouldtracktags = self.ui.configbool(
2110 shouldtracktags = self.ui.configbool(
2110 b'experimental', b'hook-track-tags'
2111 b'experimental', b'hook-track-tags'
2111 )
2112 )
2112 if desc != b'strip' and shouldtracktags:
2113 if desc != b'strip' and shouldtracktags:
2113 oldheads = self.changelog.headrevs()
2114 oldheads = self.changelog.headrevs()
2114
2115
2115 def tracktags(tr2):
2116 def tracktags(tr2):
2116 repo = reporef()
2117 repo = reporef()
2117 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2118 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2118 newheads = repo.changelog.headrevs()
2119 newheads = repo.changelog.headrevs()
2119 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2120 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2120 # notes: we compare lists here.
2121 # notes: we compare lists here.
2121 # As we do it only once buiding set would not be cheaper
2122 # As we do it only once buiding set would not be cheaper
2122 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2123 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2123 if changes:
2124 if changes:
2124 tr2.hookargs[b'tag_moved'] = b'1'
2125 tr2.hookargs[b'tag_moved'] = b'1'
2125 with repo.vfs(
2126 with repo.vfs(
2126 b'changes/tags.changes', b'w', atomictemp=True
2127 b'changes/tags.changes', b'w', atomictemp=True
2127 ) as changesfile:
2128 ) as changesfile:
2128 # note: we do not register the file to the transaction
2129 # note: we do not register the file to the transaction
2129 # because we needs it to still exist on the transaction
2130 # because we needs it to still exist on the transaction
2130 # is close (for txnclose hooks)
2131 # is close (for txnclose hooks)
2131 tagsmod.writediff(changesfile, changes)
2132 tagsmod.writediff(changesfile, changes)
2132
2133
2133 def validate(tr2):
2134 def validate(tr2):
2134 """will run pre-closing hooks"""
2135 """will run pre-closing hooks"""
2135 # XXX the transaction API is a bit lacking here so we take a hacky
2136 # XXX the transaction API is a bit lacking here so we take a hacky
2136 # path for now
2137 # path for now
2137 #
2138 #
2138 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2139 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2139 # dict is copied before these run. In addition we needs the data
2140 # dict is copied before these run. In addition we needs the data
2140 # available to in memory hooks too.
2141 # available to in memory hooks too.
2141 #
2142 #
2142 # Moreover, we also need to make sure this runs before txnclose
2143 # Moreover, we also need to make sure this runs before txnclose
2143 # hooks and there is no "pending" mechanism that would execute
2144 # hooks and there is no "pending" mechanism that would execute
2144 # logic only if hooks are about to run.
2145 # logic only if hooks are about to run.
2145 #
2146 #
2146 # Fixing this limitation of the transaction is also needed to track
2147 # Fixing this limitation of the transaction is also needed to track
2147 # other families of changes (bookmarks, phases, obsolescence).
2148 # other families of changes (bookmarks, phases, obsolescence).
2148 #
2149 #
2149 # This will have to be fixed before we remove the experimental
2150 # This will have to be fixed before we remove the experimental
2150 # gating.
2151 # gating.
2151 tracktags(tr2)
2152 tracktags(tr2)
2152 repo = reporef()
2153 repo = reporef()
2153
2154
2154 singleheadopt = (b'experimental', b'single-head-per-branch')
2155 singleheadopt = (b'experimental', b'single-head-per-branch')
2155 singlehead = repo.ui.configbool(*singleheadopt)
2156 singlehead = repo.ui.configbool(*singleheadopt)
2156 if singlehead:
2157 if singlehead:
2157 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2158 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2158 accountclosed = singleheadsub.get(
2159 accountclosed = singleheadsub.get(
2159 b"account-closed-heads", False
2160 b"account-closed-heads", False
2160 )
2161 )
2161 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2162 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2162 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2163 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2163 for name, (old, new) in sorted(
2164 for name, (old, new) in sorted(
2164 tr.changes[b'bookmarks'].items()
2165 tr.changes[b'bookmarks'].items()
2165 ):
2166 ):
2166 args = tr.hookargs.copy()
2167 args = tr.hookargs.copy()
2167 args.update(bookmarks.preparehookargs(name, old, new))
2168 args.update(bookmarks.preparehookargs(name, old, new))
2168 repo.hook(
2169 repo.hook(
2169 b'pretxnclose-bookmark',
2170 b'pretxnclose-bookmark',
2170 throw=True,
2171 throw=True,
2171 **pycompat.strkwargs(args)
2172 **pycompat.strkwargs(args)
2172 )
2173 )
2173 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2174 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2174 cl = repo.unfiltered().changelog
2175 cl = repo.unfiltered().changelog
2175 for rev, (old, new) in tr.changes[b'phases'].items():
2176 for rev, (old, new) in tr.changes[b'phases'].items():
2176 args = tr.hookargs.copy()
2177 args = tr.hookargs.copy()
2177 node = hex(cl.node(rev))
2178 node = hex(cl.node(rev))
2178 args.update(phases.preparehookargs(node, old, new))
2179 args.update(phases.preparehookargs(node, old, new))
2179 repo.hook(
2180 repo.hook(
2180 b'pretxnclose-phase',
2181 b'pretxnclose-phase',
2181 throw=True,
2182 throw=True,
2182 **pycompat.strkwargs(args)
2183 **pycompat.strkwargs(args)
2183 )
2184 )
2184
2185
2185 repo.hook(
2186 repo.hook(
2186 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2187 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2187 )
2188 )
2188
2189
2189 def releasefn(tr, success):
2190 def releasefn(tr, success):
2190 repo = reporef()
2191 repo = reporef()
2191 if repo is None:
2192 if repo is None:
2192 # If the repo has been GC'd (and this release function is being
2193 # If the repo has been GC'd (and this release function is being
2193 # called from transaction.__del__), there's not much we can do,
2194 # called from transaction.__del__), there's not much we can do,
2194 # so just leave the unfinished transaction there and let the
2195 # so just leave the unfinished transaction there and let the
2195 # user run `hg recover`.
2196 # user run `hg recover`.
2196 return
2197 return
2197 if success:
2198 if success:
2198 # this should be explicitly invoked here, because
2199 # this should be explicitly invoked here, because
2199 # in-memory changes aren't written out at closing
2200 # in-memory changes aren't written out at closing
2200 # transaction, if tr.addfilegenerator (via
2201 # transaction, if tr.addfilegenerator (via
2201 # dirstate.write or so) isn't invoked while
2202 # dirstate.write or so) isn't invoked while
2202 # transaction running
2203 # transaction running
2203 repo.dirstate.write(None)
2204 repo.dirstate.write(None)
2204 else:
2205 else:
2205 # discard all changes (including ones already written
2206 # discard all changes (including ones already written
2206 # out) in this transaction
2207 # out) in this transaction
2207 narrowspec.restorebackup(self, b'journal.narrowspec')
2208 narrowspec.restorebackup(self, b'journal.narrowspec')
2208 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2209 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2209 repo.dirstate.restorebackup(None, b'journal.dirstate')
2210 repo.dirstate.restorebackup(None, b'journal.dirstate')
2210
2211
2211 repo.invalidate(clearfilecache=True)
2212 repo.invalidate(clearfilecache=True)
2212
2213
2213 tr = transaction.transaction(
2214 tr = transaction.transaction(
2214 rp,
2215 rp,
2215 self.svfs,
2216 self.svfs,
2216 vfsmap,
2217 vfsmap,
2217 b"journal",
2218 b"journal",
2218 b"undo",
2219 b"undo",
2219 aftertrans(renames),
2220 aftertrans(renames),
2220 self.store.createmode,
2221 self.store.createmode,
2221 validator=validate,
2222 validator=validate,
2222 releasefn=releasefn,
2223 releasefn=releasefn,
2223 checkambigfiles=_cachedfiles,
2224 checkambigfiles=_cachedfiles,
2224 name=desc,
2225 name=desc,
2225 )
2226 )
2226 tr.changes[b'origrepolen'] = len(self)
2227 tr.changes[b'origrepolen'] = len(self)
2227 tr.changes[b'obsmarkers'] = set()
2228 tr.changes[b'obsmarkers'] = set()
2228 tr.changes[b'phases'] = {}
2229 tr.changes[b'phases'] = {}
2229 tr.changes[b'bookmarks'] = {}
2230 tr.changes[b'bookmarks'] = {}
2230
2231
2231 tr.hookargs[b'txnid'] = txnid
2232 tr.hookargs[b'txnid'] = txnid
2232 tr.hookargs[b'txnname'] = desc
2233 tr.hookargs[b'txnname'] = desc
2233 # note: writing the fncache only during finalize mean that the file is
2234 # note: writing the fncache only during finalize mean that the file is
2234 # outdated when running hooks. As fncache is used for streaming clone,
2235 # outdated when running hooks. As fncache is used for streaming clone,
2235 # this is not expected to break anything that happen during the hooks.
2236 # this is not expected to break anything that happen during the hooks.
2236 tr.addfinalize(b'flush-fncache', self.store.write)
2237 tr.addfinalize(b'flush-fncache', self.store.write)
2237
2238
2238 def txnclosehook(tr2):
2239 def txnclosehook(tr2):
2239 """To be run if transaction is successful, will schedule a hook run
2240 """To be run if transaction is successful, will schedule a hook run
2240 """
2241 """
2241 # Don't reference tr2 in hook() so we don't hold a reference.
2242 # Don't reference tr2 in hook() so we don't hold a reference.
2242 # This reduces memory consumption when there are multiple
2243 # This reduces memory consumption when there are multiple
2243 # transactions per lock. This can likely go away if issue5045
2244 # transactions per lock. This can likely go away if issue5045
2244 # fixes the function accumulation.
2245 # fixes the function accumulation.
2245 hookargs = tr2.hookargs
2246 hookargs = tr2.hookargs
2246
2247
2247 def hookfunc(unused_success):
2248 def hookfunc(unused_success):
2248 repo = reporef()
2249 repo = reporef()
2249 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2250 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2250 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2251 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2251 for name, (old, new) in bmchanges:
2252 for name, (old, new) in bmchanges:
2252 args = tr.hookargs.copy()
2253 args = tr.hookargs.copy()
2253 args.update(bookmarks.preparehookargs(name, old, new))
2254 args.update(bookmarks.preparehookargs(name, old, new))
2254 repo.hook(
2255 repo.hook(
2255 b'txnclose-bookmark',
2256 b'txnclose-bookmark',
2256 throw=False,
2257 throw=False,
2257 **pycompat.strkwargs(args)
2258 **pycompat.strkwargs(args)
2258 )
2259 )
2259
2260
2260 if hook.hashook(repo.ui, b'txnclose-phase'):
2261 if hook.hashook(repo.ui, b'txnclose-phase'):
2261 cl = repo.unfiltered().changelog
2262 cl = repo.unfiltered().changelog
2262 phasemv = sorted(tr.changes[b'phases'].items())
2263 phasemv = sorted(tr.changes[b'phases'].items())
2263 for rev, (old, new) in phasemv:
2264 for rev, (old, new) in phasemv:
2264 args = tr.hookargs.copy()
2265 args = tr.hookargs.copy()
2265 node = hex(cl.node(rev))
2266 node = hex(cl.node(rev))
2266 args.update(phases.preparehookargs(node, old, new))
2267 args.update(phases.preparehookargs(node, old, new))
2267 repo.hook(
2268 repo.hook(
2268 b'txnclose-phase',
2269 b'txnclose-phase',
2269 throw=False,
2270 throw=False,
2270 **pycompat.strkwargs(args)
2271 **pycompat.strkwargs(args)
2271 )
2272 )
2272
2273
2273 repo.hook(
2274 repo.hook(
2274 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2275 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2275 )
2276 )
2276
2277
2277 reporef()._afterlock(hookfunc)
2278 reporef()._afterlock(hookfunc)
2278
2279
2279 tr.addfinalize(b'txnclose-hook', txnclosehook)
2280 tr.addfinalize(b'txnclose-hook', txnclosehook)
2280 # Include a leading "-" to make it happen before the transaction summary
2281 # Include a leading "-" to make it happen before the transaction summary
2281 # reports registered via scmutil.registersummarycallback() whose names
2282 # reports registered via scmutil.registersummarycallback() whose names
2282 # are 00-txnreport etc. That way, the caches will be warm when the
2283 # are 00-txnreport etc. That way, the caches will be warm when the
2283 # callbacks run.
2284 # callbacks run.
2284 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2285 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2285
2286
2286 def txnaborthook(tr2):
2287 def txnaborthook(tr2):
2287 """To be run if transaction is aborted
2288 """To be run if transaction is aborted
2288 """
2289 """
2289 reporef().hook(
2290 reporef().hook(
2290 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2291 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2291 )
2292 )
2292
2293
2293 tr.addabort(b'txnabort-hook', txnaborthook)
2294 tr.addabort(b'txnabort-hook', txnaborthook)
2294 # avoid eager cache invalidation. in-memory data should be identical
2295 # avoid eager cache invalidation. in-memory data should be identical
2295 # to stored data if transaction has no error.
2296 # to stored data if transaction has no error.
2296 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2297 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2297 self._transref = weakref.ref(tr)
2298 self._transref = weakref.ref(tr)
2298 scmutil.registersummarycallback(self, tr, desc)
2299 scmutil.registersummarycallback(self, tr, desc)
2299 return tr
2300 return tr
2300
2301
2301 def _journalfiles(self):
2302 def _journalfiles(self):
2302 return (
2303 return (
2303 (self.svfs, b'journal'),
2304 (self.svfs, b'journal'),
2304 (self.svfs, b'journal.narrowspec'),
2305 (self.svfs, b'journal.narrowspec'),
2305 (self.vfs, b'journal.narrowspec.dirstate'),
2306 (self.vfs, b'journal.narrowspec.dirstate'),
2306 (self.vfs, b'journal.dirstate'),
2307 (self.vfs, b'journal.dirstate'),
2307 (self.vfs, b'journal.branch'),
2308 (self.vfs, b'journal.branch'),
2308 (self.vfs, b'journal.desc'),
2309 (self.vfs, b'journal.desc'),
2309 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2310 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2310 (self.svfs, b'journal.phaseroots'),
2311 (self.svfs, b'journal.phaseroots'),
2311 )
2312 )
2312
2313
2313 def undofiles(self):
2314 def undofiles(self):
2314 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2315 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2315
2316
2316 @unfilteredmethod
2317 @unfilteredmethod
2317 def _writejournal(self, desc):
2318 def _writejournal(self, desc):
2318 self.dirstate.savebackup(None, b'journal.dirstate')
2319 self.dirstate.savebackup(None, b'journal.dirstate')
2319 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2320 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2320 narrowspec.savebackup(self, b'journal.narrowspec')
2321 narrowspec.savebackup(self, b'journal.narrowspec')
2321 self.vfs.write(
2322 self.vfs.write(
2322 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2323 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2323 )
2324 )
2324 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2325 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2325 bookmarksvfs = bookmarks.bookmarksvfs(self)
2326 bookmarksvfs = bookmarks.bookmarksvfs(self)
2326 bookmarksvfs.write(
2327 bookmarksvfs.write(
2327 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2328 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2328 )
2329 )
2329 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2330 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2330
2331
2331 def recover(self):
2332 def recover(self):
2332 with self.lock():
2333 with self.lock():
2333 if self.svfs.exists(b"journal"):
2334 if self.svfs.exists(b"journal"):
2334 self.ui.status(_(b"rolling back interrupted transaction\n"))
2335 self.ui.status(_(b"rolling back interrupted transaction\n"))
2335 vfsmap = {
2336 vfsmap = {
2336 b'': self.svfs,
2337 b'': self.svfs,
2337 b'plain': self.vfs,
2338 b'plain': self.vfs,
2338 }
2339 }
2339 transaction.rollback(
2340 transaction.rollback(
2340 self.svfs,
2341 self.svfs,
2341 vfsmap,
2342 vfsmap,
2342 b"journal",
2343 b"journal",
2343 self.ui.warn,
2344 self.ui.warn,
2344 checkambigfiles=_cachedfiles,
2345 checkambigfiles=_cachedfiles,
2345 )
2346 )
2346 self.invalidate()
2347 self.invalidate()
2347 return True
2348 return True
2348 else:
2349 else:
2349 self.ui.warn(_(b"no interrupted transaction available\n"))
2350 self.ui.warn(_(b"no interrupted transaction available\n"))
2350 return False
2351 return False
2351
2352
2352 def rollback(self, dryrun=False, force=False):
2353 def rollback(self, dryrun=False, force=False):
2353 wlock = lock = dsguard = None
2354 wlock = lock = dsguard = None
2354 try:
2355 try:
2355 wlock = self.wlock()
2356 wlock = self.wlock()
2356 lock = self.lock()
2357 lock = self.lock()
2357 if self.svfs.exists(b"undo"):
2358 if self.svfs.exists(b"undo"):
2358 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2359 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2359
2360
2360 return self._rollback(dryrun, force, dsguard)
2361 return self._rollback(dryrun, force, dsguard)
2361 else:
2362 else:
2362 self.ui.warn(_(b"no rollback information available\n"))
2363 self.ui.warn(_(b"no rollback information available\n"))
2363 return 1
2364 return 1
2364 finally:
2365 finally:
2365 release(dsguard, lock, wlock)
2366 release(dsguard, lock, wlock)
2366
2367
2367 @unfilteredmethod # Until we get smarter cache management
2368 @unfilteredmethod # Until we get smarter cache management
2368 def _rollback(self, dryrun, force, dsguard):
2369 def _rollback(self, dryrun, force, dsguard):
2369 ui = self.ui
2370 ui = self.ui
2370 try:
2371 try:
2371 args = self.vfs.read(b'undo.desc').splitlines()
2372 args = self.vfs.read(b'undo.desc').splitlines()
2372 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2373 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2373 if len(args) >= 3:
2374 if len(args) >= 3:
2374 detail = args[2]
2375 detail = args[2]
2375 oldtip = oldlen - 1
2376 oldtip = oldlen - 1
2376
2377
2377 if detail and ui.verbose:
2378 if detail and ui.verbose:
2378 msg = _(
2379 msg = _(
2379 b'repository tip rolled back to revision %d'
2380 b'repository tip rolled back to revision %d'
2380 b' (undo %s: %s)\n'
2381 b' (undo %s: %s)\n'
2381 ) % (oldtip, desc, detail)
2382 ) % (oldtip, desc, detail)
2382 else:
2383 else:
2383 msg = _(
2384 msg = _(
2384 b'repository tip rolled back to revision %d (undo %s)\n'
2385 b'repository tip rolled back to revision %d (undo %s)\n'
2385 ) % (oldtip, desc)
2386 ) % (oldtip, desc)
2386 except IOError:
2387 except IOError:
2387 msg = _(b'rolling back unknown transaction\n')
2388 msg = _(b'rolling back unknown transaction\n')
2388 desc = None
2389 desc = None
2389
2390
2390 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2391 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2391 raise error.Abort(
2392 raise error.Abort(
2392 _(
2393 _(
2393 b'rollback of last commit while not checked out '
2394 b'rollback of last commit while not checked out '
2394 b'may lose data'
2395 b'may lose data'
2395 ),
2396 ),
2396 hint=_(b'use -f to force'),
2397 hint=_(b'use -f to force'),
2397 )
2398 )
2398
2399
2399 ui.status(msg)
2400 ui.status(msg)
2400 if dryrun:
2401 if dryrun:
2401 return 0
2402 return 0
2402
2403
2403 parents = self.dirstate.parents()
2404 parents = self.dirstate.parents()
2404 self.destroying()
2405 self.destroying()
2405 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2406 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2406 transaction.rollback(
2407 transaction.rollback(
2407 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2408 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2408 )
2409 )
2409 bookmarksvfs = bookmarks.bookmarksvfs(self)
2410 bookmarksvfs = bookmarks.bookmarksvfs(self)
2410 if bookmarksvfs.exists(b'undo.bookmarks'):
2411 if bookmarksvfs.exists(b'undo.bookmarks'):
2411 bookmarksvfs.rename(
2412 bookmarksvfs.rename(
2412 b'undo.bookmarks', b'bookmarks', checkambig=True
2413 b'undo.bookmarks', b'bookmarks', checkambig=True
2413 )
2414 )
2414 if self.svfs.exists(b'undo.phaseroots'):
2415 if self.svfs.exists(b'undo.phaseroots'):
2415 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2416 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2416 self.invalidate()
2417 self.invalidate()
2417
2418
2418 has_node = self.changelog.index.has_node
2419 has_node = self.changelog.index.has_node
2419 parentgone = any(not has_node(p) for p in parents)
2420 parentgone = any(not has_node(p) for p in parents)
2420 if parentgone:
2421 if parentgone:
2421 # prevent dirstateguard from overwriting already restored one
2422 # prevent dirstateguard from overwriting already restored one
2422 dsguard.close()
2423 dsguard.close()
2423
2424
2424 narrowspec.restorebackup(self, b'undo.narrowspec')
2425 narrowspec.restorebackup(self, b'undo.narrowspec')
2425 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2426 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2426 self.dirstate.restorebackup(None, b'undo.dirstate')
2427 self.dirstate.restorebackup(None, b'undo.dirstate')
2427 try:
2428 try:
2428 branch = self.vfs.read(b'undo.branch')
2429 branch = self.vfs.read(b'undo.branch')
2429 self.dirstate.setbranch(encoding.tolocal(branch))
2430 self.dirstate.setbranch(encoding.tolocal(branch))
2430 except IOError:
2431 except IOError:
2431 ui.warn(
2432 ui.warn(
2432 _(
2433 _(
2433 b'named branch could not be reset: '
2434 b'named branch could not be reset: '
2434 b'current branch is still \'%s\'\n'
2435 b'current branch is still \'%s\'\n'
2435 )
2436 )
2436 % self.dirstate.branch()
2437 % self.dirstate.branch()
2437 )
2438 )
2438
2439
2439 parents = tuple([p.rev() for p in self[None].parents()])
2440 parents = tuple([p.rev() for p in self[None].parents()])
2440 if len(parents) > 1:
2441 if len(parents) > 1:
2441 ui.status(
2442 ui.status(
2442 _(
2443 _(
2443 b'working directory now based on '
2444 b'working directory now based on '
2444 b'revisions %d and %d\n'
2445 b'revisions %d and %d\n'
2445 )
2446 )
2446 % parents
2447 % parents
2447 )
2448 )
2448 else:
2449 else:
2449 ui.status(
2450 ui.status(
2450 _(b'working directory now based on revision %d\n') % parents
2451 _(b'working directory now based on revision %d\n') % parents
2451 )
2452 )
2452 mergemod.mergestate.clean(self, self[b'.'].node())
2453 mergemod.mergestate.clean(self, self[b'.'].node())
2453
2454
2454 # TODO: if we know which new heads may result from this rollback, pass
2455 # TODO: if we know which new heads may result from this rollback, pass
2455 # them to destroy(), which will prevent the branchhead cache from being
2456 # them to destroy(), which will prevent the branchhead cache from being
2456 # invalidated.
2457 # invalidated.
2457 self.destroyed()
2458 self.destroyed()
2458 return 0
2459 return 0
2459
2460
2460 def _buildcacheupdater(self, newtransaction):
2461 def _buildcacheupdater(self, newtransaction):
2461 """called during transaction to build the callback updating cache
2462 """called during transaction to build the callback updating cache
2462
2463
2463 Lives on the repository to help extension who might want to augment
2464 Lives on the repository to help extension who might want to augment
2464 this logic. For this purpose, the created transaction is passed to the
2465 this logic. For this purpose, the created transaction is passed to the
2465 method.
2466 method.
2466 """
2467 """
2467 # we must avoid cyclic reference between repo and transaction.
2468 # we must avoid cyclic reference between repo and transaction.
2468 reporef = weakref.ref(self)
2469 reporef = weakref.ref(self)
2469
2470
2470 def updater(tr):
2471 def updater(tr):
2471 repo = reporef()
2472 repo = reporef()
2472 repo.updatecaches(tr)
2473 repo.updatecaches(tr)
2473
2474
2474 return updater
2475 return updater
2475
2476
2476 @unfilteredmethod
2477 @unfilteredmethod
2477 def updatecaches(self, tr=None, full=False):
2478 def updatecaches(self, tr=None, full=False):
2478 """warm appropriate caches
2479 """warm appropriate caches
2479
2480
2480 If this function is called after a transaction closed. The transaction
2481 If this function is called after a transaction closed. The transaction
2481 will be available in the 'tr' argument. This can be used to selectively
2482 will be available in the 'tr' argument. This can be used to selectively
2482 update caches relevant to the changes in that transaction.
2483 update caches relevant to the changes in that transaction.
2483
2484
2484 If 'full' is set, make sure all caches the function knows about have
2485 If 'full' is set, make sure all caches the function knows about have
2485 up-to-date data. Even the ones usually loaded more lazily.
2486 up-to-date data. Even the ones usually loaded more lazily.
2486 """
2487 """
2487 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2488 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2488 # During strip, many caches are invalid but
2489 # During strip, many caches are invalid but
2489 # later call to `destroyed` will refresh them.
2490 # later call to `destroyed` will refresh them.
2490 return
2491 return
2491
2492
2492 if tr is None or tr.changes[b'origrepolen'] < len(self):
2493 if tr is None or tr.changes[b'origrepolen'] < len(self):
2493 # accessing the 'ser ved' branchmap should refresh all the others,
2494 # accessing the 'ser ved' branchmap should refresh all the others,
2494 self.ui.debug(b'updating the branch cache\n')
2495 self.ui.debug(b'updating the branch cache\n')
2495 self.filtered(b'served').branchmap()
2496 self.filtered(b'served').branchmap()
2496 self.filtered(b'served.hidden').branchmap()
2497 self.filtered(b'served.hidden').branchmap()
2497
2498
2498 if full:
2499 if full:
2499 unfi = self.unfiltered()
2500 unfi = self.unfiltered()
2500 rbc = unfi.revbranchcache()
2501 rbc = unfi.revbranchcache()
2501 for r in unfi.changelog:
2502 for r in unfi.changelog:
2502 rbc.branchinfo(r)
2503 rbc.branchinfo(r)
2503 rbc.write()
2504 rbc.write()
2504
2505
2505 # ensure the working copy parents are in the manifestfulltextcache
2506 # ensure the working copy parents are in the manifestfulltextcache
2506 for ctx in self[b'.'].parents():
2507 for ctx in self[b'.'].parents():
2507 ctx.manifest() # accessing the manifest is enough
2508 ctx.manifest() # accessing the manifest is enough
2508
2509
2509 # accessing fnode cache warms the cache
2510 # accessing fnode cache warms the cache
2510 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2511 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2511 # accessing tags warm the cache
2512 # accessing tags warm the cache
2512 self.tags()
2513 self.tags()
2513 self.filtered(b'served').tags()
2514 self.filtered(b'served').tags()
2514
2515
2515 # The `full` arg is documented as updating even the lazily-loaded
2516 # The `full` arg is documented as updating even the lazily-loaded
2516 # caches immediately, so we're forcing a write to cause these caches
2517 # caches immediately, so we're forcing a write to cause these caches
2517 # to be warmed up even if they haven't explicitly been requested
2518 # to be warmed up even if they haven't explicitly been requested
2518 # yet (if they've never been used by hg, they won't ever have been
2519 # yet (if they've never been used by hg, they won't ever have been
2519 # written, even if they're a subset of another kind of cache that
2520 # written, even if they're a subset of another kind of cache that
2520 # *has* been used).
2521 # *has* been used).
2521 for filt in repoview.filtertable.keys():
2522 for filt in repoview.filtertable.keys():
2522 filtered = self.filtered(filt)
2523 filtered = self.filtered(filt)
2523 filtered.branchmap().write(filtered)
2524 filtered.branchmap().write(filtered)
2524
2525
2525 def invalidatecaches(self):
2526 def invalidatecaches(self):
2526
2527
2527 if '_tagscache' in vars(self):
2528 if '_tagscache' in vars(self):
2528 # can't use delattr on proxy
2529 # can't use delattr on proxy
2529 del self.__dict__['_tagscache']
2530 del self.__dict__['_tagscache']
2530
2531
2531 self._branchcaches.clear()
2532 self._branchcaches.clear()
2532 self.invalidatevolatilesets()
2533 self.invalidatevolatilesets()
2533 self._sparsesignaturecache.clear()
2534 self._sparsesignaturecache.clear()
2534
2535
2535 def invalidatevolatilesets(self):
2536 def invalidatevolatilesets(self):
2536 self.filteredrevcache.clear()
2537 self.filteredrevcache.clear()
2537 obsolete.clearobscaches(self)
2538 obsolete.clearobscaches(self)
2538 self._quick_access_changeid_invalidate()
2539 self._quick_access_changeid_invalidate()
2539
2540
2540 def invalidatedirstate(self):
2541 def invalidatedirstate(self):
2541 '''Invalidates the dirstate, causing the next call to dirstate
2542 '''Invalidates the dirstate, causing the next call to dirstate
2542 to check if it was modified since the last time it was read,
2543 to check if it was modified since the last time it was read,
2543 rereading it if it has.
2544 rereading it if it has.
2544
2545
2545 This is different to dirstate.invalidate() that it doesn't always
2546 This is different to dirstate.invalidate() that it doesn't always
2546 rereads the dirstate. Use dirstate.invalidate() if you want to
2547 rereads the dirstate. Use dirstate.invalidate() if you want to
2547 explicitly read the dirstate again (i.e. restoring it to a previous
2548 explicitly read the dirstate again (i.e. restoring it to a previous
2548 known good state).'''
2549 known good state).'''
2549 if hasunfilteredcache(self, 'dirstate'):
2550 if hasunfilteredcache(self, 'dirstate'):
2550 for k in self.dirstate._filecache:
2551 for k in self.dirstate._filecache:
2551 try:
2552 try:
2552 delattr(self.dirstate, k)
2553 delattr(self.dirstate, k)
2553 except AttributeError:
2554 except AttributeError:
2554 pass
2555 pass
2555 delattr(self.unfiltered(), 'dirstate')
2556 delattr(self.unfiltered(), 'dirstate')
2556
2557
2557 def invalidate(self, clearfilecache=False):
2558 def invalidate(self, clearfilecache=False):
2558 '''Invalidates both store and non-store parts other than dirstate
2559 '''Invalidates both store and non-store parts other than dirstate
2559
2560
2560 If a transaction is running, invalidation of store is omitted,
2561 If a transaction is running, invalidation of store is omitted,
2561 because discarding in-memory changes might cause inconsistency
2562 because discarding in-memory changes might cause inconsistency
2562 (e.g. incomplete fncache causes unintentional failure, but
2563 (e.g. incomplete fncache causes unintentional failure, but
2563 redundant one doesn't).
2564 redundant one doesn't).
2564 '''
2565 '''
2565 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2566 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2566 for k in list(self._filecache.keys()):
2567 for k in list(self._filecache.keys()):
2567 # dirstate is invalidated separately in invalidatedirstate()
2568 # dirstate is invalidated separately in invalidatedirstate()
2568 if k == b'dirstate':
2569 if k == b'dirstate':
2569 continue
2570 continue
2570 if (
2571 if (
2571 k == b'changelog'
2572 k == b'changelog'
2572 and self.currenttransaction()
2573 and self.currenttransaction()
2573 and self.changelog._delayed
2574 and self.changelog._delayed
2574 ):
2575 ):
2575 # The changelog object may store unwritten revisions. We don't
2576 # The changelog object may store unwritten revisions. We don't
2576 # want to lose them.
2577 # want to lose them.
2577 # TODO: Solve the problem instead of working around it.
2578 # TODO: Solve the problem instead of working around it.
2578 continue
2579 continue
2579
2580
2580 if clearfilecache:
2581 if clearfilecache:
2581 del self._filecache[k]
2582 del self._filecache[k]
2582 try:
2583 try:
2583 delattr(unfiltered, k)
2584 delattr(unfiltered, k)
2584 except AttributeError:
2585 except AttributeError:
2585 pass
2586 pass
2586 self.invalidatecaches()
2587 self.invalidatecaches()
2587 if not self.currenttransaction():
2588 if not self.currenttransaction():
2588 # TODO: Changing contents of store outside transaction
2589 # TODO: Changing contents of store outside transaction
2589 # causes inconsistency. We should make in-memory store
2590 # causes inconsistency. We should make in-memory store
2590 # changes detectable, and abort if changed.
2591 # changes detectable, and abort if changed.
2591 self.store.invalidatecaches()
2592 self.store.invalidatecaches()
2592
2593
2593 def invalidateall(self):
2594 def invalidateall(self):
2594 '''Fully invalidates both store and non-store parts, causing the
2595 '''Fully invalidates both store and non-store parts, causing the
2595 subsequent operation to reread any outside changes.'''
2596 subsequent operation to reread any outside changes.'''
2596 # extension should hook this to invalidate its caches
2597 # extension should hook this to invalidate its caches
2597 self.invalidate()
2598 self.invalidate()
2598 self.invalidatedirstate()
2599 self.invalidatedirstate()
2599
2600
2600 @unfilteredmethod
2601 @unfilteredmethod
2601 def _refreshfilecachestats(self, tr):
2602 def _refreshfilecachestats(self, tr):
2602 """Reload stats of cached files so that they are flagged as valid"""
2603 """Reload stats of cached files so that they are flagged as valid"""
2603 for k, ce in self._filecache.items():
2604 for k, ce in self._filecache.items():
2604 k = pycompat.sysstr(k)
2605 k = pycompat.sysstr(k)
2605 if k == 'dirstate' or k not in self.__dict__:
2606 if k == 'dirstate' or k not in self.__dict__:
2606 continue
2607 continue
2607 ce.refresh()
2608 ce.refresh()
2608
2609
2609 def _lock(
2610 def _lock(
2610 self,
2611 self,
2611 vfs,
2612 vfs,
2612 lockname,
2613 lockname,
2613 wait,
2614 wait,
2614 releasefn,
2615 releasefn,
2615 acquirefn,
2616 acquirefn,
2616 desc,
2617 desc,
2617 inheritchecker=None,
2618 inheritchecker=None,
2618 parentenvvar=None,
2619 parentenvvar=None,
2619 ):
2620 ):
2620 parentlock = None
2621 parentlock = None
2621 # the contents of parentenvvar are used by the underlying lock to
2622 # the contents of parentenvvar are used by the underlying lock to
2622 # determine whether it can be inherited
2623 # determine whether it can be inherited
2623 if parentenvvar is not None:
2624 if parentenvvar is not None:
2624 parentlock = encoding.environ.get(parentenvvar)
2625 parentlock = encoding.environ.get(parentenvvar)
2625
2626
2626 timeout = 0
2627 timeout = 0
2627 warntimeout = 0
2628 warntimeout = 0
2628 if wait:
2629 if wait:
2629 timeout = self.ui.configint(b"ui", b"timeout")
2630 timeout = self.ui.configint(b"ui", b"timeout")
2630 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2631 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2631 # internal config: ui.signal-safe-lock
2632 # internal config: ui.signal-safe-lock
2632 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2633 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2633
2634
2634 l = lockmod.trylock(
2635 l = lockmod.trylock(
2635 self.ui,
2636 self.ui,
2636 vfs,
2637 vfs,
2637 lockname,
2638 lockname,
2638 timeout,
2639 timeout,
2639 warntimeout,
2640 warntimeout,
2640 releasefn=releasefn,
2641 releasefn=releasefn,
2641 acquirefn=acquirefn,
2642 acquirefn=acquirefn,
2642 desc=desc,
2643 desc=desc,
2643 inheritchecker=inheritchecker,
2644 inheritchecker=inheritchecker,
2644 parentlock=parentlock,
2645 parentlock=parentlock,
2645 signalsafe=signalsafe,
2646 signalsafe=signalsafe,
2646 )
2647 )
2647 return l
2648 return l
2648
2649
2649 def _afterlock(self, callback):
2650 def _afterlock(self, callback):
2650 """add a callback to be run when the repository is fully unlocked
2651 """add a callback to be run when the repository is fully unlocked
2651
2652
2652 The callback will be executed when the outermost lock is released
2653 The callback will be executed when the outermost lock is released
2653 (with wlock being higher level than 'lock')."""
2654 (with wlock being higher level than 'lock')."""
2654 for ref in (self._wlockref, self._lockref):
2655 for ref in (self._wlockref, self._lockref):
2655 l = ref and ref()
2656 l = ref and ref()
2656 if l and l.held:
2657 if l and l.held:
2657 l.postrelease.append(callback)
2658 l.postrelease.append(callback)
2658 break
2659 break
2659 else: # no lock have been found.
2660 else: # no lock have been found.
2660 callback(True)
2661 callback(True)
2661
2662
2662 def lock(self, wait=True):
2663 def lock(self, wait=True):
2663 '''Lock the repository store (.hg/store) and return a weak reference
2664 '''Lock the repository store (.hg/store) and return a weak reference
2664 to the lock. Use this before modifying the store (e.g. committing or
2665 to the lock. Use this before modifying the store (e.g. committing or
2665 stripping). If you are opening a transaction, get a lock as well.)
2666 stripping). If you are opening a transaction, get a lock as well.)
2666
2667
2667 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2668 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2668 'wlock' first to avoid a dead-lock hazard.'''
2669 'wlock' first to avoid a dead-lock hazard.'''
2669 l = self._currentlock(self._lockref)
2670 l = self._currentlock(self._lockref)
2670 if l is not None:
2671 if l is not None:
2671 l.lock()
2672 l.lock()
2672 return l
2673 return l
2673
2674
2674 l = self._lock(
2675 l = self._lock(
2675 vfs=self.svfs,
2676 vfs=self.svfs,
2676 lockname=b"lock",
2677 lockname=b"lock",
2677 wait=wait,
2678 wait=wait,
2678 releasefn=None,
2679 releasefn=None,
2679 acquirefn=self.invalidate,
2680 acquirefn=self.invalidate,
2680 desc=_(b'repository %s') % self.origroot,
2681 desc=_(b'repository %s') % self.origroot,
2681 )
2682 )
2682 self._lockref = weakref.ref(l)
2683 self._lockref = weakref.ref(l)
2683 return l
2684 return l
2684
2685
2685 def _wlockchecktransaction(self):
2686 def _wlockchecktransaction(self):
2686 if self.currenttransaction() is not None:
2687 if self.currenttransaction() is not None:
2687 raise error.LockInheritanceContractViolation(
2688 raise error.LockInheritanceContractViolation(
2688 b'wlock cannot be inherited in the middle of a transaction'
2689 b'wlock cannot be inherited in the middle of a transaction'
2689 )
2690 )
2690
2691
2691 def wlock(self, wait=True):
2692 def wlock(self, wait=True):
2692 '''Lock the non-store parts of the repository (everything under
2693 '''Lock the non-store parts of the repository (everything under
2693 .hg except .hg/store) and return a weak reference to the lock.
2694 .hg except .hg/store) and return a weak reference to the lock.
2694
2695
2695 Use this before modifying files in .hg.
2696 Use this before modifying files in .hg.
2696
2697
2697 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2698 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2698 'wlock' first to avoid a dead-lock hazard.'''
2699 'wlock' first to avoid a dead-lock hazard.'''
2699 l = self._wlockref and self._wlockref()
2700 l = self._wlockref and self._wlockref()
2700 if l is not None and l.held:
2701 if l is not None and l.held:
2701 l.lock()
2702 l.lock()
2702 return l
2703 return l
2703
2704
2704 # We do not need to check for non-waiting lock acquisition. Such
2705 # We do not need to check for non-waiting lock acquisition. Such
2705 # acquisition would not cause dead-lock as they would just fail.
2706 # acquisition would not cause dead-lock as they would just fail.
2706 if wait and (
2707 if wait and (
2707 self.ui.configbool(b'devel', b'all-warnings')
2708 self.ui.configbool(b'devel', b'all-warnings')
2708 or self.ui.configbool(b'devel', b'check-locks')
2709 or self.ui.configbool(b'devel', b'check-locks')
2709 ):
2710 ):
2710 if self._currentlock(self._lockref) is not None:
2711 if self._currentlock(self._lockref) is not None:
2711 self.ui.develwarn(b'"wlock" acquired after "lock"')
2712 self.ui.develwarn(b'"wlock" acquired after "lock"')
2712
2713
2713 def unlock():
2714 def unlock():
2714 if self.dirstate.pendingparentchange():
2715 if self.dirstate.pendingparentchange():
2715 self.dirstate.invalidate()
2716 self.dirstate.invalidate()
2716 else:
2717 else:
2717 self.dirstate.write(None)
2718 self.dirstate.write(None)
2718
2719
2719 self._filecache[b'dirstate'].refresh()
2720 self._filecache[b'dirstate'].refresh()
2720
2721
2721 l = self._lock(
2722 l = self._lock(
2722 self.vfs,
2723 self.vfs,
2723 b"wlock",
2724 b"wlock",
2724 wait,
2725 wait,
2725 unlock,
2726 unlock,
2726 self.invalidatedirstate,
2727 self.invalidatedirstate,
2727 _(b'working directory of %s') % self.origroot,
2728 _(b'working directory of %s') % self.origroot,
2728 inheritchecker=self._wlockchecktransaction,
2729 inheritchecker=self._wlockchecktransaction,
2729 parentenvvar=b'HG_WLOCK_LOCKER',
2730 parentenvvar=b'HG_WLOCK_LOCKER',
2730 )
2731 )
2731 self._wlockref = weakref.ref(l)
2732 self._wlockref = weakref.ref(l)
2732 return l
2733 return l
2733
2734
2734 def _currentlock(self, lockref):
2735 def _currentlock(self, lockref):
2735 """Returns the lock if it's held, or None if it's not."""
2736 """Returns the lock if it's held, or None if it's not."""
2736 if lockref is None:
2737 if lockref is None:
2737 return None
2738 return None
2738 l = lockref()
2739 l = lockref()
2739 if l is None or not l.held:
2740 if l is None or not l.held:
2740 return None
2741 return None
2741 return l
2742 return l
2742
2743
2743 def currentwlock(self):
2744 def currentwlock(self):
2744 """Returns the wlock if it's held, or None if it's not."""
2745 """Returns the wlock if it's held, or None if it's not."""
2745 return self._currentlock(self._wlockref)
2746 return self._currentlock(self._wlockref)
2746
2747
2747 def _filecommit(
2748 def _filecommit(
2748 self,
2749 self,
2749 fctx,
2750 fctx,
2750 manifest1,
2751 manifest1,
2751 manifest2,
2752 manifest2,
2752 linkrev,
2753 linkrev,
2753 tr,
2754 tr,
2754 changelist,
2755 changelist,
2755 includecopymeta,
2756 includecopymeta,
2756 ):
2757 ):
2757 """
2758 """
2758 commit an individual file as part of a larger transaction
2759 commit an individual file as part of a larger transaction
2759 """
2760 """
2760
2761
2761 fname = fctx.path()
2762 fname = fctx.path()
2762 fparent1 = manifest1.get(fname, nullid)
2763 fparent1 = manifest1.get(fname, nullid)
2763 fparent2 = manifest2.get(fname, nullid)
2764 fparent2 = manifest2.get(fname, nullid)
2764 if isinstance(fctx, context.filectx):
2765 if isinstance(fctx, context.filectx):
2765 node = fctx.filenode()
2766 node = fctx.filenode()
2766 if node in [fparent1, fparent2]:
2767 if node in [fparent1, fparent2]:
2767 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2768 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2768 if (
2769 if (
2769 fparent1 != nullid
2770 fparent1 != nullid
2770 and manifest1.flags(fname) != fctx.flags()
2771 and manifest1.flags(fname) != fctx.flags()
2771 ) or (
2772 ) or (
2772 fparent2 != nullid
2773 fparent2 != nullid
2773 and manifest2.flags(fname) != fctx.flags()
2774 and manifest2.flags(fname) != fctx.flags()
2774 ):
2775 ):
2775 changelist.append(fname)
2776 changelist.append(fname)
2776 return node
2777 return node
2777
2778
2778 flog = self.file(fname)
2779 flog = self.file(fname)
2779 meta = {}
2780 meta = {}
2780 cfname = fctx.copysource()
2781 cfname = fctx.copysource()
2781 if cfname and cfname != fname:
2782 if cfname and cfname != fname:
2782 # Mark the new revision of this file as a copy of another
2783 # Mark the new revision of this file as a copy of another
2783 # file. This copy data will effectively act as a parent
2784 # file. This copy data will effectively act as a parent
2784 # of this new revision. If this is a merge, the first
2785 # of this new revision. If this is a merge, the first
2785 # parent will be the nullid (meaning "look up the copy data")
2786 # parent will be the nullid (meaning "look up the copy data")
2786 # and the second one will be the other parent. For example:
2787 # and the second one will be the other parent. For example:
2787 #
2788 #
2788 # 0 --- 1 --- 3 rev1 changes file foo
2789 # 0 --- 1 --- 3 rev1 changes file foo
2789 # \ / rev2 renames foo to bar and changes it
2790 # \ / rev2 renames foo to bar and changes it
2790 # \- 2 -/ rev3 should have bar with all changes and
2791 # \- 2 -/ rev3 should have bar with all changes and
2791 # should record that bar descends from
2792 # should record that bar descends from
2792 # bar in rev2 and foo in rev1
2793 # bar in rev2 and foo in rev1
2793 #
2794 #
2794 # this allows this merge to succeed:
2795 # this allows this merge to succeed:
2795 #
2796 #
2796 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2797 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2797 # \ / merging rev3 and rev4 should use bar@rev2
2798 # \ / merging rev3 and rev4 should use bar@rev2
2798 # \- 2 --- 4 as the merge base
2799 # \- 2 --- 4 as the merge base
2799 #
2800 #
2800
2801
2801 cnode = manifest1.get(cfname)
2802 cnode = manifest1.get(cfname)
2802 newfparent = fparent2
2803 newfparent = fparent2
2803
2804
2804 if manifest2: # branch merge
2805 if manifest2: # branch merge
2805 if fparent2 == nullid or cnode is None: # copied on remote side
2806 if fparent2 == nullid or cnode is None: # copied on remote side
2806 if cfname in manifest2:
2807 if cfname in manifest2:
2807 cnode = manifest2[cfname]
2808 cnode = manifest2[cfname]
2808 newfparent = fparent1
2809 newfparent = fparent1
2809
2810
2810 # Here, we used to search backwards through history to try to find
2811 # Here, we used to search backwards through history to try to find
2811 # where the file copy came from if the source of a copy was not in
2812 # where the file copy came from if the source of a copy was not in
2812 # the parent directory. However, this doesn't actually make sense to
2813 # the parent directory. However, this doesn't actually make sense to
2813 # do (what does a copy from something not in your working copy even
2814 # do (what does a copy from something not in your working copy even
2814 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2815 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2815 # the user that copy information was dropped, so if they didn't
2816 # the user that copy information was dropped, so if they didn't
2816 # expect this outcome it can be fixed, but this is the correct
2817 # expect this outcome it can be fixed, but this is the correct
2817 # behavior in this circumstance.
2818 # behavior in this circumstance.
2818
2819
2819 if cnode:
2820 if cnode:
2820 self.ui.debug(
2821 self.ui.debug(
2821 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2822 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2822 )
2823 )
2823 if includecopymeta:
2824 if includecopymeta:
2824 meta[b"copy"] = cfname
2825 meta[b"copy"] = cfname
2825 meta[b"copyrev"] = hex(cnode)
2826 meta[b"copyrev"] = hex(cnode)
2826 fparent1, fparent2 = nullid, newfparent
2827 fparent1, fparent2 = nullid, newfparent
2827 else:
2828 else:
2828 self.ui.warn(
2829 self.ui.warn(
2829 _(
2830 _(
2830 b"warning: can't find ancestor for '%s' "
2831 b"warning: can't find ancestor for '%s' "
2831 b"copied from '%s'!\n"
2832 b"copied from '%s'!\n"
2832 )
2833 )
2833 % (fname, cfname)
2834 % (fname, cfname)
2834 )
2835 )
2835
2836
2836 elif fparent1 == nullid:
2837 elif fparent1 == nullid:
2837 fparent1, fparent2 = fparent2, nullid
2838 fparent1, fparent2 = fparent2, nullid
2838 elif fparent2 != nullid:
2839 elif fparent2 != nullid:
2839 # is one parent an ancestor of the other?
2840 # is one parent an ancestor of the other?
2840 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2841 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2841 if fparent1 in fparentancestors:
2842 if fparent1 in fparentancestors:
2842 fparent1, fparent2 = fparent2, nullid
2843 fparent1, fparent2 = fparent2, nullid
2843 elif fparent2 in fparentancestors:
2844 elif fparent2 in fparentancestors:
2844 fparent2 = nullid
2845 fparent2 = nullid
2845
2846
2846 # is the file changed?
2847 # is the file changed?
2847 text = fctx.data()
2848 text = fctx.data()
2848 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2849 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2849 changelist.append(fname)
2850 changelist.append(fname)
2850 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2851 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2851 # are just the flags changed during merge?
2852 # are just the flags changed during merge?
2852 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2853 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2853 changelist.append(fname)
2854 changelist.append(fname)
2854
2855
2855 return fparent1
2856 return fparent1
2856
2857
2857 def checkcommitpatterns(self, wctx, match, status, fail):
2858 def checkcommitpatterns(self, wctx, match, status, fail):
2858 """check for commit arguments that aren't committable"""
2859 """check for commit arguments that aren't committable"""
2859 if match.isexact() or match.prefix():
2860 if match.isexact() or match.prefix():
2860 matched = set(status.modified + status.added + status.removed)
2861 matched = set(status.modified + status.added + status.removed)
2861
2862
2862 for f in match.files():
2863 for f in match.files():
2863 f = self.dirstate.normalize(f)
2864 f = self.dirstate.normalize(f)
2864 if f == b'.' or f in matched or f in wctx.substate:
2865 if f == b'.' or f in matched or f in wctx.substate:
2865 continue
2866 continue
2866 if f in status.deleted:
2867 if f in status.deleted:
2867 fail(f, _(b'file not found!'))
2868 fail(f, _(b'file not found!'))
2868 # Is it a directory that exists or used to exist?
2869 # Is it a directory that exists or used to exist?
2869 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2870 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2870 d = f + b'/'
2871 d = f + b'/'
2871 for mf in matched:
2872 for mf in matched:
2872 if mf.startswith(d):
2873 if mf.startswith(d):
2873 break
2874 break
2874 else:
2875 else:
2875 fail(f, _(b"no match under directory!"))
2876 fail(f, _(b"no match under directory!"))
2876 elif f not in self.dirstate:
2877 elif f not in self.dirstate:
2877 fail(f, _(b"file not tracked!"))
2878 fail(f, _(b"file not tracked!"))
2878
2879
2879 @unfilteredmethod
2880 @unfilteredmethod
2880 def commit(
2881 def commit(
2881 self,
2882 self,
2882 text=b"",
2883 text=b"",
2883 user=None,
2884 user=None,
2884 date=None,
2885 date=None,
2885 match=None,
2886 match=None,
2886 force=False,
2887 force=False,
2887 editor=None,
2888 editor=None,
2888 extra=None,
2889 extra=None,
2889 ):
2890 ):
2890 """Add a new revision to current repository.
2891 """Add a new revision to current repository.
2891
2892
2892 Revision information is gathered from the working directory,
2893 Revision information is gathered from the working directory,
2893 match can be used to filter the committed files. If editor is
2894 match can be used to filter the committed files. If editor is
2894 supplied, it is called to get a commit message.
2895 supplied, it is called to get a commit message.
2895 """
2896 """
2896 if extra is None:
2897 if extra is None:
2897 extra = {}
2898 extra = {}
2898
2899
2899 def fail(f, msg):
2900 def fail(f, msg):
2900 raise error.Abort(b'%s: %s' % (f, msg))
2901 raise error.Abort(b'%s: %s' % (f, msg))
2901
2902
2902 if not match:
2903 if not match:
2903 match = matchmod.always()
2904 match = matchmod.always()
2904
2905
2905 if not force:
2906 if not force:
2906 match.bad = fail
2907 match.bad = fail
2907
2908
2908 # lock() for recent changelog (see issue4368)
2909 # lock() for recent changelog (see issue4368)
2909 with self.wlock(), self.lock():
2910 with self.wlock(), self.lock():
2910 wctx = self[None]
2911 wctx = self[None]
2911 merge = len(wctx.parents()) > 1
2912 merge = len(wctx.parents()) > 1
2912
2913
2913 if not force and merge and not match.always():
2914 if not force and merge and not match.always():
2914 raise error.Abort(
2915 raise error.Abort(
2915 _(
2916 _(
2916 b'cannot partially commit a merge '
2917 b'cannot partially commit a merge '
2917 b'(do not specify files or patterns)'
2918 b'(do not specify files or patterns)'
2918 )
2919 )
2919 )
2920 )
2920
2921
2921 status = self.status(match=match, clean=force)
2922 status = self.status(match=match, clean=force)
2922 if force:
2923 if force:
2923 status.modified.extend(
2924 status.modified.extend(
2924 status.clean
2925 status.clean
2925 ) # mq may commit clean files
2926 ) # mq may commit clean files
2926
2927
2927 # check subrepos
2928 # check subrepos
2928 subs, commitsubs, newstate = subrepoutil.precommit(
2929 subs, commitsubs, newstate = subrepoutil.precommit(
2929 self.ui, wctx, status, match, force=force
2930 self.ui, wctx, status, match, force=force
2930 )
2931 )
2931
2932
2932 # make sure all explicit patterns are matched
2933 # make sure all explicit patterns are matched
2933 if not force:
2934 if not force:
2934 self.checkcommitpatterns(wctx, match, status, fail)
2935 self.checkcommitpatterns(wctx, match, status, fail)
2935
2936
2936 cctx = context.workingcommitctx(
2937 cctx = context.workingcommitctx(
2937 self, status, text, user, date, extra
2938 self, status, text, user, date, extra
2938 )
2939 )
2939
2940
2940 # internal config: ui.allowemptycommit
2941 # internal config: ui.allowemptycommit
2941 allowemptycommit = (
2942 allowemptycommit = (
2942 wctx.branch() != wctx.p1().branch()
2943 wctx.branch() != wctx.p1().branch()
2943 or extra.get(b'close')
2944 or extra.get(b'close')
2944 or merge
2945 or merge
2945 or cctx.files()
2946 or cctx.files()
2946 or self.ui.configbool(b'ui', b'allowemptycommit')
2947 or self.ui.configbool(b'ui', b'allowemptycommit')
2947 )
2948 )
2948 if not allowemptycommit:
2949 if not allowemptycommit:
2949 return None
2950 return None
2950
2951
2951 if merge and cctx.deleted():
2952 if merge and cctx.deleted():
2952 raise error.Abort(_(b"cannot commit merge with missing files"))
2953 raise error.Abort(_(b"cannot commit merge with missing files"))
2953
2954
2954 ms = mergemod.mergestate.read(self)
2955 ms = mergemod.mergestate.read(self)
2955 mergeutil.checkunresolved(ms)
2956 mergeutil.checkunresolved(ms)
2956
2957
2957 if editor:
2958 if editor:
2958 cctx._text = editor(self, cctx, subs)
2959 cctx._text = editor(self, cctx, subs)
2959 edited = text != cctx._text
2960 edited = text != cctx._text
2960
2961
2961 # Save commit message in case this transaction gets rolled back
2962 # Save commit message in case this transaction gets rolled back
2962 # (e.g. by a pretxncommit hook). Leave the content alone on
2963 # (e.g. by a pretxncommit hook). Leave the content alone on
2963 # the assumption that the user will use the same editor again.
2964 # the assumption that the user will use the same editor again.
2964 msgfn = self.savecommitmessage(cctx._text)
2965 msgfn = self.savecommitmessage(cctx._text)
2965
2966
2966 # commit subs and write new state
2967 # commit subs and write new state
2967 if subs:
2968 if subs:
2968 uipathfn = scmutil.getuipathfn(self)
2969 uipathfn = scmutil.getuipathfn(self)
2969 for s in sorted(commitsubs):
2970 for s in sorted(commitsubs):
2970 sub = wctx.sub(s)
2971 sub = wctx.sub(s)
2971 self.ui.status(
2972 self.ui.status(
2972 _(b'committing subrepository %s\n')
2973 _(b'committing subrepository %s\n')
2973 % uipathfn(subrepoutil.subrelpath(sub))
2974 % uipathfn(subrepoutil.subrelpath(sub))
2974 )
2975 )
2975 sr = sub.commit(cctx._text, user, date)
2976 sr = sub.commit(cctx._text, user, date)
2976 newstate[s] = (newstate[s][0], sr)
2977 newstate[s] = (newstate[s][0], sr)
2977 subrepoutil.writestate(self, newstate)
2978 subrepoutil.writestate(self, newstate)
2978
2979
2979 p1, p2 = self.dirstate.parents()
2980 p1, p2 = self.dirstate.parents()
2980 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2981 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2981 try:
2982 try:
2982 self.hook(
2983 self.hook(
2983 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2984 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2984 )
2985 )
2985 with self.transaction(b'commit'):
2986 with self.transaction(b'commit'):
2986 ret = self.commitctx(cctx, True)
2987 ret = self.commitctx(cctx, True)
2987 # update bookmarks, dirstate and mergestate
2988 # update bookmarks, dirstate and mergestate
2988 bookmarks.update(self, [p1, p2], ret)
2989 bookmarks.update(self, [p1, p2], ret)
2989 cctx.markcommitted(ret)
2990 cctx.markcommitted(ret)
2990 ms.reset()
2991 ms.reset()
2991 except: # re-raises
2992 except: # re-raises
2992 if edited:
2993 if edited:
2993 self.ui.write(
2994 self.ui.write(
2994 _(b'note: commit message saved in %s\n') % msgfn
2995 _(b'note: commit message saved in %s\n') % msgfn
2995 )
2996 )
2996 raise
2997 raise
2997
2998
2998 def commithook(unused_success):
2999 def commithook(unused_success):
2999 # hack for command that use a temporary commit (eg: histedit)
3000 # hack for command that use a temporary commit (eg: histedit)
3000 # temporary commit got stripped before hook release
3001 # temporary commit got stripped before hook release
3001 if self.changelog.hasnode(ret):
3002 if self.changelog.hasnode(ret):
3002 self.hook(
3003 self.hook(
3003 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3004 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3004 )
3005 )
3005
3006
3006 self._afterlock(commithook)
3007 self._afterlock(commithook)
3007 return ret
3008 return ret
3008
3009
3009 @unfilteredmethod
3010 @unfilteredmethod
3010 def commitctx(self, ctx, error=False, origctx=None):
3011 def commitctx(self, ctx, error=False, origctx=None):
3011 """Add a new revision to current repository.
3012 """Add a new revision to current repository.
3012 Revision information is passed via the context argument.
3013 Revision information is passed via the context argument.
3013
3014
3014 ctx.files() should list all files involved in this commit, i.e.
3015 ctx.files() should list all files involved in this commit, i.e.
3015 modified/added/removed files. On merge, it may be wider than the
3016 modified/added/removed files. On merge, it may be wider than the
3016 ctx.files() to be committed, since any file nodes derived directly
3017 ctx.files() to be committed, since any file nodes derived directly
3017 from p1 or p2 are excluded from the committed ctx.files().
3018 from p1 or p2 are excluded from the committed ctx.files().
3018
3019
3019 origctx is for convert to work around the problem that bug
3020 origctx is for convert to work around the problem that bug
3020 fixes to the files list in changesets change hashes. For
3021 fixes to the files list in changesets change hashes. For
3021 convert to be the identity, it can pass an origctx and this
3022 convert to be the identity, it can pass an origctx and this
3022 function will use the same files list when it makes sense to
3023 function will use the same files list when it makes sense to
3023 do so.
3024 do so.
3024 """
3025 """
3025
3026
3026 p1, p2 = ctx.p1(), ctx.p2()
3027 p1, p2 = ctx.p1(), ctx.p2()
3027 user = ctx.user()
3028 user = ctx.user()
3028
3029
3029 if self.filecopiesmode == b'changeset-sidedata':
3030 if self.filecopiesmode == b'changeset-sidedata':
3030 writechangesetcopy = True
3031 writechangesetcopy = True
3031 writefilecopymeta = True
3032 writefilecopymeta = True
3032 writecopiesto = None
3033 writecopiesto = None
3033 else:
3034 else:
3034 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3035 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3035 writefilecopymeta = writecopiesto != b'changeset-only'
3036 writefilecopymeta = writecopiesto != b'changeset-only'
3036 writechangesetcopy = writecopiesto in (
3037 writechangesetcopy = writecopiesto in (
3037 b'changeset-only',
3038 b'changeset-only',
3038 b'compatibility',
3039 b'compatibility',
3039 )
3040 )
3040 p1copies, p2copies = None, None
3041 p1copies, p2copies = None, None
3041 if writechangesetcopy:
3042 if writechangesetcopy:
3042 p1copies = ctx.p1copies()
3043 p1copies = ctx.p1copies()
3043 p2copies = ctx.p2copies()
3044 p2copies = ctx.p2copies()
3044 filesadded, filesremoved = None, None
3045 filesadded, filesremoved = None, None
3045 with self.lock(), self.transaction(b"commit") as tr:
3046 with self.lock(), self.transaction(b"commit") as tr:
3046 trp = weakref.proxy(tr)
3047 trp = weakref.proxy(tr)
3047
3048
3048 if ctx.manifestnode():
3049 if ctx.manifestnode():
3049 # reuse an existing manifest revision
3050 # reuse an existing manifest revision
3050 self.ui.debug(b'reusing known manifest\n')
3051 self.ui.debug(b'reusing known manifest\n')
3051 mn = ctx.manifestnode()
3052 mn = ctx.manifestnode()
3052 files = ctx.files()
3053 files = ctx.files()
3053 if writechangesetcopy:
3054 if writechangesetcopy:
3054 filesadded = ctx.filesadded()
3055 filesadded = ctx.filesadded()
3055 filesremoved = ctx.filesremoved()
3056 filesremoved = ctx.filesremoved()
3056 elif ctx.files():
3057 elif ctx.files():
3057 m1ctx = p1.manifestctx()
3058 m1ctx = p1.manifestctx()
3058 m2ctx = p2.manifestctx()
3059 m2ctx = p2.manifestctx()
3059 mctx = m1ctx.copy()
3060 mctx = m1ctx.copy()
3060
3061
3061 m = mctx.read()
3062 m = mctx.read()
3062 m1 = m1ctx.read()
3063 m1 = m1ctx.read()
3063 m2 = m2ctx.read()
3064 m2 = m2ctx.read()
3064
3065
3065 # check in files
3066 # check in files
3066 added = []
3067 added = []
3067 changed = []
3068 changed = []
3068 removed = list(ctx.removed())
3069 removed = list(ctx.removed())
3069 linkrev = len(self)
3070 linkrev = len(self)
3070 self.ui.note(_(b"committing files:\n"))
3071 self.ui.note(_(b"committing files:\n"))
3071 uipathfn = scmutil.getuipathfn(self)
3072 uipathfn = scmutil.getuipathfn(self)
3072 for f in sorted(ctx.modified() + ctx.added()):
3073 for f in sorted(ctx.modified() + ctx.added()):
3073 self.ui.note(uipathfn(f) + b"\n")
3074 self.ui.note(uipathfn(f) + b"\n")
3074 try:
3075 try:
3075 fctx = ctx[f]
3076 fctx = ctx[f]
3076 if fctx is None:
3077 if fctx is None:
3077 removed.append(f)
3078 removed.append(f)
3078 else:
3079 else:
3079 added.append(f)
3080 added.append(f)
3080 m[f] = self._filecommit(
3081 m[f] = self._filecommit(
3081 fctx,
3082 fctx,
3082 m1,
3083 m1,
3083 m2,
3084 m2,
3084 linkrev,
3085 linkrev,
3085 trp,
3086 trp,
3086 changed,
3087 changed,
3087 writefilecopymeta,
3088 writefilecopymeta,
3088 )
3089 )
3089 m.setflag(f, fctx.flags())
3090 m.setflag(f, fctx.flags())
3090 except OSError:
3091 except OSError:
3091 self.ui.warn(
3092 self.ui.warn(
3092 _(b"trouble committing %s!\n") % uipathfn(f)
3093 _(b"trouble committing %s!\n") % uipathfn(f)
3093 )
3094 )
3094 raise
3095 raise
3095 except IOError as inst:
3096 except IOError as inst:
3096 errcode = getattr(inst, 'errno', errno.ENOENT)
3097 errcode = getattr(inst, 'errno', errno.ENOENT)
3097 if error or errcode and errcode != errno.ENOENT:
3098 if error or errcode and errcode != errno.ENOENT:
3098 self.ui.warn(
3099 self.ui.warn(
3099 _(b"trouble committing %s!\n") % uipathfn(f)
3100 _(b"trouble committing %s!\n") % uipathfn(f)
3100 )
3101 )
3101 raise
3102 raise
3102
3103
3103 # update manifest
3104 # update manifest
3104 removed = [f for f in removed if f in m1 or f in m2]
3105 removed = [f for f in removed if f in m1 or f in m2]
3105 drop = sorted([f for f in removed if f in m])
3106 drop = sorted([f for f in removed if f in m])
3106 for f in drop:
3107 for f in drop:
3107 del m[f]
3108 del m[f]
3108 if p2.rev() != nullrev:
3109 if p2.rev() != nullrev:
3109
3110
3110 @util.cachefunc
3111 @util.cachefunc
3111 def mas():
3112 def mas():
3112 p1n = p1.node()
3113 p1n = p1.node()
3113 p2n = p2.node()
3114 p2n = p2.node()
3114 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3115 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3115 if not cahs:
3116 if not cahs:
3116 cahs = [nullrev]
3117 cahs = [nullrev]
3117 return [self[r].manifest() for r in cahs]
3118 return [self[r].manifest() for r in cahs]
3118
3119
3119 def deletionfromparent(f):
3120 def deletionfromparent(f):
3120 # When a file is removed relative to p1 in a merge, this
3121 # When a file is removed relative to p1 in a merge, this
3121 # function determines whether the absence is due to a
3122 # function determines whether the absence is due to a
3122 # deletion from a parent, or whether the merge commit
3123 # deletion from a parent, or whether the merge commit
3123 # itself deletes the file. We decide this by doing a
3124 # itself deletes the file. We decide this by doing a
3124 # simplified three way merge of the manifest entry for
3125 # simplified three way merge of the manifest entry for
3125 # the file. There are two ways we decide the merge
3126 # the file. There are two ways we decide the merge
3126 # itself didn't delete a file:
3127 # itself didn't delete a file:
3127 # - neither parent (nor the merge) contain the file
3128 # - neither parent (nor the merge) contain the file
3128 # - exactly one parent contains the file, and that
3129 # - exactly one parent contains the file, and that
3129 # parent has the same filelog entry as the merge
3130 # parent has the same filelog entry as the merge
3130 # ancestor (or all of them if there two). In other
3131 # ancestor (or all of them if there two). In other
3131 # words, that parent left the file unchanged while the
3132 # words, that parent left the file unchanged while the
3132 # other one deleted it.
3133 # other one deleted it.
3133 # One way to think about this is that deleting a file is
3134 # One way to think about this is that deleting a file is
3134 # similar to emptying it, so the list of changed files
3135 # similar to emptying it, so the list of changed files
3135 # should be similar either way. The computation
3136 # should be similar either way. The computation
3136 # described above is not done directly in _filecommit
3137 # described above is not done directly in _filecommit
3137 # when creating the list of changed files, however
3138 # when creating the list of changed files, however
3138 # it does something very similar by comparing filelog
3139 # it does something very similar by comparing filelog
3139 # nodes.
3140 # nodes.
3140 if f in m1:
3141 if f in m1:
3141 return f not in m2 and all(
3142 return f not in m2 and all(
3142 f in ma and ma.find(f) == m1.find(f)
3143 f in ma and ma.find(f) == m1.find(f)
3143 for ma in mas()
3144 for ma in mas()
3144 )
3145 )
3145 elif f in m2:
3146 elif f in m2:
3146 return all(
3147 return all(
3147 f in ma and ma.find(f) == m2.find(f)
3148 f in ma and ma.find(f) == m2.find(f)
3148 for ma in mas()
3149 for ma in mas()
3149 )
3150 )
3150 else:
3151 else:
3151 return True
3152 return True
3152
3153
3153 removed = [f for f in removed if not deletionfromparent(f)]
3154 removed = [f for f in removed if not deletionfromparent(f)]
3154
3155
3155 files = changed + removed
3156 files = changed + removed
3156 md = None
3157 md = None
3157 if not files:
3158 if not files:
3158 # if no "files" actually changed in terms of the changelog,
3159 # if no "files" actually changed in terms of the changelog,
3159 # try hard to detect unmodified manifest entry so that the
3160 # try hard to detect unmodified manifest entry so that the
3160 # exact same commit can be reproduced later on convert.
3161 # exact same commit can be reproduced later on convert.
3161 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3162 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3162 if not files and md:
3163 if not files and md:
3163 self.ui.debug(
3164 self.ui.debug(
3164 b'not reusing manifest (no file change in '
3165 b'not reusing manifest (no file change in '
3165 b'changelog, but manifest differs)\n'
3166 b'changelog, but manifest differs)\n'
3166 )
3167 )
3167 if files or md:
3168 if files or md:
3168 self.ui.note(_(b"committing manifest\n"))
3169 self.ui.note(_(b"committing manifest\n"))
3169 # we're using narrowmatch here since it's already applied at
3170 # we're using narrowmatch here since it's already applied at
3170 # other stages (such as dirstate.walk), so we're already
3171 # other stages (such as dirstate.walk), so we're already
3171 # ignoring things outside of narrowspec in most cases. The
3172 # ignoring things outside of narrowspec in most cases. The
3172 # one case where we might have files outside the narrowspec
3173 # one case where we might have files outside the narrowspec
3173 # at this point is merges, and we already error out in the
3174 # at this point is merges, and we already error out in the
3174 # case where the merge has files outside of the narrowspec,
3175 # case where the merge has files outside of the narrowspec,
3175 # so this is safe.
3176 # so this is safe.
3176 mn = mctx.write(
3177 mn = mctx.write(
3177 trp,
3178 trp,
3178 linkrev,
3179 linkrev,
3179 p1.manifestnode(),
3180 p1.manifestnode(),
3180 p2.manifestnode(),
3181 p2.manifestnode(),
3181 added,
3182 added,
3182 drop,
3183 drop,
3183 match=self.narrowmatch(),
3184 match=self.narrowmatch(),
3184 )
3185 )
3185
3186
3186 if writechangesetcopy:
3187 if writechangesetcopy:
3187 filesadded = [
3188 filesadded = [
3188 f for f in changed if not (f in m1 or f in m2)
3189 f for f in changed if not (f in m1 or f in m2)
3189 ]
3190 ]
3190 filesremoved = removed
3191 filesremoved = removed
3191 else:
3192 else:
3192 self.ui.debug(
3193 self.ui.debug(
3193 b'reusing manifest from p1 (listed files '
3194 b'reusing manifest from p1 (listed files '
3194 b'actually unchanged)\n'
3195 b'actually unchanged)\n'
3195 )
3196 )
3196 mn = p1.manifestnode()
3197 mn = p1.manifestnode()
3197 else:
3198 else:
3198 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3199 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3199 mn = p1.manifestnode()
3200 mn = p1.manifestnode()
3200 files = []
3201 files = []
3201
3202
3202 if writecopiesto == b'changeset-only':
3203 if writecopiesto == b'changeset-only':
3203 # If writing only to changeset extras, use None to indicate that
3204 # If writing only to changeset extras, use None to indicate that
3204 # no entry should be written. If writing to both, write an empty
3205 # no entry should be written. If writing to both, write an empty
3205 # entry to prevent the reader from falling back to reading
3206 # entry to prevent the reader from falling back to reading
3206 # filelogs.
3207 # filelogs.
3207 p1copies = p1copies or None
3208 p1copies = p1copies or None
3208 p2copies = p2copies or None
3209 p2copies = p2copies or None
3209 filesadded = filesadded or None
3210 filesadded = filesadded or None
3210 filesremoved = filesremoved or None
3211 filesremoved = filesremoved or None
3211
3212
3212 if origctx and origctx.manifestnode() == mn:
3213 if origctx and origctx.manifestnode() == mn:
3213 files = origctx.files()
3214 files = origctx.files()
3214
3215
3215 # update changelog
3216 # update changelog
3216 self.ui.note(_(b"committing changelog\n"))
3217 self.ui.note(_(b"committing changelog\n"))
3217 self.changelog.delayupdate(tr)
3218 self.changelog.delayupdate(tr)
3218 n = self.changelog.add(
3219 n = self.changelog.add(
3219 mn,
3220 mn,
3220 files,
3221 files,
3221 ctx.description(),
3222 ctx.description(),
3222 trp,
3223 trp,
3223 p1.node(),
3224 p1.node(),
3224 p2.node(),
3225 p2.node(),
3225 user,
3226 user,
3226 ctx.date(),
3227 ctx.date(),
3227 ctx.extra().copy(),
3228 ctx.extra().copy(),
3228 p1copies,
3229 p1copies,
3229 p2copies,
3230 p2copies,
3230 filesadded,
3231 filesadded,
3231 filesremoved,
3232 filesremoved,
3232 )
3233 )
3233 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3234 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3234 self.hook(
3235 self.hook(
3235 b'pretxncommit',
3236 b'pretxncommit',
3236 throw=True,
3237 throw=True,
3237 node=hex(n),
3238 node=hex(n),
3238 parent1=xp1,
3239 parent1=xp1,
3239 parent2=xp2,
3240 parent2=xp2,
3240 )
3241 )
3241 # set the new commit is proper phase
3242 # set the new commit is proper phase
3242 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3243 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3243 if targetphase:
3244 if targetphase:
3244 # retract boundary do not alter parent changeset.
3245 # retract boundary do not alter parent changeset.
3245 # if a parent have higher the resulting phase will
3246 # if a parent have higher the resulting phase will
3246 # be compliant anyway
3247 # be compliant anyway
3247 #
3248 #
3248 # if minimal phase was 0 we don't need to retract anything
3249 # if minimal phase was 0 we don't need to retract anything
3249 phases.registernew(self, tr, targetphase, [n])
3250 phases.registernew(self, tr, targetphase, [n])
3250 return n
3251 return n
3251
3252
3252 @unfilteredmethod
3253 @unfilteredmethod
3253 def destroying(self):
3254 def destroying(self):
3254 '''Inform the repository that nodes are about to be destroyed.
3255 '''Inform the repository that nodes are about to be destroyed.
3255 Intended for use by strip and rollback, so there's a common
3256 Intended for use by strip and rollback, so there's a common
3256 place for anything that has to be done before destroying history.
3257 place for anything that has to be done before destroying history.
3257
3258
3258 This is mostly useful for saving state that is in memory and waiting
3259 This is mostly useful for saving state that is in memory and waiting
3259 to be flushed when the current lock is released. Because a call to
3260 to be flushed when the current lock is released. Because a call to
3260 destroyed is imminent, the repo will be invalidated causing those
3261 destroyed is imminent, the repo will be invalidated causing those
3261 changes to stay in memory (waiting for the next unlock), or vanish
3262 changes to stay in memory (waiting for the next unlock), or vanish
3262 completely.
3263 completely.
3263 '''
3264 '''
3264 # When using the same lock to commit and strip, the phasecache is left
3265 # When using the same lock to commit and strip, the phasecache is left
3265 # dirty after committing. Then when we strip, the repo is invalidated,
3266 # dirty after committing. Then when we strip, the repo is invalidated,
3266 # causing those changes to disappear.
3267 # causing those changes to disappear.
3267 if '_phasecache' in vars(self):
3268 if '_phasecache' in vars(self):
3268 self._phasecache.write()
3269 self._phasecache.write()
3269
3270
3270 @unfilteredmethod
3271 @unfilteredmethod
3271 def destroyed(self):
3272 def destroyed(self):
3272 '''Inform the repository that nodes have been destroyed.
3273 '''Inform the repository that nodes have been destroyed.
3273 Intended for use by strip and rollback, so there's a common
3274 Intended for use by strip and rollback, so there's a common
3274 place for anything that has to be done after destroying history.
3275 place for anything that has to be done after destroying history.
3275 '''
3276 '''
3276 # When one tries to:
3277 # When one tries to:
3277 # 1) destroy nodes thus calling this method (e.g. strip)
3278 # 1) destroy nodes thus calling this method (e.g. strip)
3278 # 2) use phasecache somewhere (e.g. commit)
3279 # 2) use phasecache somewhere (e.g. commit)
3279 #
3280 #
3280 # then 2) will fail because the phasecache contains nodes that were
3281 # then 2) will fail because the phasecache contains nodes that were
3281 # removed. We can either remove phasecache from the filecache,
3282 # removed. We can either remove phasecache from the filecache,
3282 # causing it to reload next time it is accessed, or simply filter
3283 # causing it to reload next time it is accessed, or simply filter
3283 # the removed nodes now and write the updated cache.
3284 # the removed nodes now and write the updated cache.
3284 self._phasecache.filterunknown(self)
3285 self._phasecache.filterunknown(self)
3285 self._phasecache.write()
3286 self._phasecache.write()
3286
3287
3287 # refresh all repository caches
3288 # refresh all repository caches
3288 self.updatecaches()
3289 self.updatecaches()
3289
3290
3290 # Ensure the persistent tag cache is updated. Doing it now
3291 # Ensure the persistent tag cache is updated. Doing it now
3291 # means that the tag cache only has to worry about destroyed
3292 # means that the tag cache only has to worry about destroyed
3292 # heads immediately after a strip/rollback. That in turn
3293 # heads immediately after a strip/rollback. That in turn
3293 # guarantees that "cachetip == currenttip" (comparing both rev
3294 # guarantees that "cachetip == currenttip" (comparing both rev
3294 # and node) always means no nodes have been added or destroyed.
3295 # and node) always means no nodes have been added or destroyed.
3295
3296
3296 # XXX this is suboptimal when qrefresh'ing: we strip the current
3297 # XXX this is suboptimal when qrefresh'ing: we strip the current
3297 # head, refresh the tag cache, then immediately add a new head.
3298 # head, refresh the tag cache, then immediately add a new head.
3298 # But I think doing it this way is necessary for the "instant
3299 # But I think doing it this way is necessary for the "instant
3299 # tag cache retrieval" case to work.
3300 # tag cache retrieval" case to work.
3300 self.invalidate()
3301 self.invalidate()
3301
3302
3302 def status(
3303 def status(
3303 self,
3304 self,
3304 node1=b'.',
3305 node1=b'.',
3305 node2=None,
3306 node2=None,
3306 match=None,
3307 match=None,
3307 ignored=False,
3308 ignored=False,
3308 clean=False,
3309 clean=False,
3309 unknown=False,
3310 unknown=False,
3310 listsubrepos=False,
3311 listsubrepos=False,
3311 ):
3312 ):
3312 '''a convenience method that calls node1.status(node2)'''
3313 '''a convenience method that calls node1.status(node2)'''
3313 return self[node1].status(
3314 return self[node1].status(
3314 node2, match, ignored, clean, unknown, listsubrepos
3315 node2, match, ignored, clean, unknown, listsubrepos
3315 )
3316 )
3316
3317
3317 def addpostdsstatus(self, ps):
3318 def addpostdsstatus(self, ps):
3318 """Add a callback to run within the wlock, at the point at which status
3319 """Add a callback to run within the wlock, at the point at which status
3319 fixups happen.
3320 fixups happen.
3320
3321
3321 On status completion, callback(wctx, status) will be called with the
3322 On status completion, callback(wctx, status) will be called with the
3322 wlock held, unless the dirstate has changed from underneath or the wlock
3323 wlock held, unless the dirstate has changed from underneath or the wlock
3323 couldn't be grabbed.
3324 couldn't be grabbed.
3324
3325
3325 Callbacks should not capture and use a cached copy of the dirstate --
3326 Callbacks should not capture and use a cached copy of the dirstate --
3326 it might change in the meanwhile. Instead, they should access the
3327 it might change in the meanwhile. Instead, they should access the
3327 dirstate via wctx.repo().dirstate.
3328 dirstate via wctx.repo().dirstate.
3328
3329
3329 This list is emptied out after each status run -- extensions should
3330 This list is emptied out after each status run -- extensions should
3330 make sure it adds to this list each time dirstate.status is called.
3331 make sure it adds to this list each time dirstate.status is called.
3331 Extensions should also make sure they don't call this for statuses
3332 Extensions should also make sure they don't call this for statuses
3332 that don't involve the dirstate.
3333 that don't involve the dirstate.
3333 """
3334 """
3334
3335
3335 # The list is located here for uniqueness reasons -- it is actually
3336 # The list is located here for uniqueness reasons -- it is actually
3336 # managed by the workingctx, but that isn't unique per-repo.
3337 # managed by the workingctx, but that isn't unique per-repo.
3337 self._postdsstatus.append(ps)
3338 self._postdsstatus.append(ps)
3338
3339
3339 def postdsstatus(self):
3340 def postdsstatus(self):
3340 """Used by workingctx to get the list of post-dirstate-status hooks."""
3341 """Used by workingctx to get the list of post-dirstate-status hooks."""
3341 return self._postdsstatus
3342 return self._postdsstatus
3342
3343
3343 def clearpostdsstatus(self):
3344 def clearpostdsstatus(self):
3344 """Used by workingctx to clear post-dirstate-status hooks."""
3345 """Used by workingctx to clear post-dirstate-status hooks."""
3345 del self._postdsstatus[:]
3346 del self._postdsstatus[:]
3346
3347
3347 def heads(self, start=None):
3348 def heads(self, start=None):
3348 if start is None:
3349 if start is None:
3349 cl = self.changelog
3350 cl = self.changelog
3350 headrevs = reversed(cl.headrevs())
3351 headrevs = reversed(cl.headrevs())
3351 return [cl.node(rev) for rev in headrevs]
3352 return [cl.node(rev) for rev in headrevs]
3352
3353
3353 heads = self.changelog.heads(start)
3354 heads = self.changelog.heads(start)
3354 # sort the output in rev descending order
3355 # sort the output in rev descending order
3355 return sorted(heads, key=self.changelog.rev, reverse=True)
3356 return sorted(heads, key=self.changelog.rev, reverse=True)
3356
3357
3357 def branchheads(self, branch=None, start=None, closed=False):
3358 def branchheads(self, branch=None, start=None, closed=False):
3358 '''return a (possibly filtered) list of heads for the given branch
3359 '''return a (possibly filtered) list of heads for the given branch
3359
3360
3360 Heads are returned in topological order, from newest to oldest.
3361 Heads are returned in topological order, from newest to oldest.
3361 If branch is None, use the dirstate branch.
3362 If branch is None, use the dirstate branch.
3362 If start is not None, return only heads reachable from start.
3363 If start is not None, return only heads reachable from start.
3363 If closed is True, return heads that are marked as closed as well.
3364 If closed is True, return heads that are marked as closed as well.
3364 '''
3365 '''
3365 if branch is None:
3366 if branch is None:
3366 branch = self[None].branch()
3367 branch = self[None].branch()
3367 branches = self.branchmap()
3368 branches = self.branchmap()
3368 if not branches.hasbranch(branch):
3369 if not branches.hasbranch(branch):
3369 return []
3370 return []
3370 # the cache returns heads ordered lowest to highest
3371 # the cache returns heads ordered lowest to highest
3371 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3372 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3372 if start is not None:
3373 if start is not None:
3373 # filter out the heads that cannot be reached from startrev
3374 # filter out the heads that cannot be reached from startrev
3374 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3375 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3375 bheads = [h for h in bheads if h in fbheads]
3376 bheads = [h for h in bheads if h in fbheads]
3376 return bheads
3377 return bheads
3377
3378
3378 def branches(self, nodes):
3379 def branches(self, nodes):
3379 if not nodes:
3380 if not nodes:
3380 nodes = [self.changelog.tip()]
3381 nodes = [self.changelog.tip()]
3381 b = []
3382 b = []
3382 for n in nodes:
3383 for n in nodes:
3383 t = n
3384 t = n
3384 while True:
3385 while True:
3385 p = self.changelog.parents(n)
3386 p = self.changelog.parents(n)
3386 if p[1] != nullid or p[0] == nullid:
3387 if p[1] != nullid or p[0] == nullid:
3387 b.append((t, n, p[0], p[1]))
3388 b.append((t, n, p[0], p[1]))
3388 break
3389 break
3389 n = p[0]
3390 n = p[0]
3390 return b
3391 return b
3391
3392
3392 def between(self, pairs):
3393 def between(self, pairs):
3393 r = []
3394 r = []
3394
3395
3395 for top, bottom in pairs:
3396 for top, bottom in pairs:
3396 n, l, i = top, [], 0
3397 n, l, i = top, [], 0
3397 f = 1
3398 f = 1
3398
3399
3399 while n != bottom and n != nullid:
3400 while n != bottom and n != nullid:
3400 p = self.changelog.parents(n)[0]
3401 p = self.changelog.parents(n)[0]
3401 if i == f:
3402 if i == f:
3402 l.append(n)
3403 l.append(n)
3403 f = f * 2
3404 f = f * 2
3404 n = p
3405 n = p
3405 i += 1
3406 i += 1
3406
3407
3407 r.append(l)
3408 r.append(l)
3408
3409
3409 return r
3410 return r
3410
3411
3411 def checkpush(self, pushop):
3412 def checkpush(self, pushop):
3412 """Extensions can override this function if additional checks have
3413 """Extensions can override this function if additional checks have
3413 to be performed before pushing, or call it if they override push
3414 to be performed before pushing, or call it if they override push
3414 command.
3415 command.
3415 """
3416 """
3416
3417
3417 @unfilteredpropertycache
3418 @unfilteredpropertycache
3418 def prepushoutgoinghooks(self):
3419 def prepushoutgoinghooks(self):
3419 """Return util.hooks consists of a pushop with repo, remote, outgoing
3420 """Return util.hooks consists of a pushop with repo, remote, outgoing
3420 methods, which are called before pushing changesets.
3421 methods, which are called before pushing changesets.
3421 """
3422 """
3422 return util.hooks()
3423 return util.hooks()
3423
3424
3424 def pushkey(self, namespace, key, old, new):
3425 def pushkey(self, namespace, key, old, new):
3425 try:
3426 try:
3426 tr = self.currenttransaction()
3427 tr = self.currenttransaction()
3427 hookargs = {}
3428 hookargs = {}
3428 if tr is not None:
3429 if tr is not None:
3429 hookargs.update(tr.hookargs)
3430 hookargs.update(tr.hookargs)
3430 hookargs = pycompat.strkwargs(hookargs)
3431 hookargs = pycompat.strkwargs(hookargs)
3431 hookargs['namespace'] = namespace
3432 hookargs['namespace'] = namespace
3432 hookargs['key'] = key
3433 hookargs['key'] = key
3433 hookargs['old'] = old
3434 hookargs['old'] = old
3434 hookargs['new'] = new
3435 hookargs['new'] = new
3435 self.hook(b'prepushkey', throw=True, **hookargs)
3436 self.hook(b'prepushkey', throw=True, **hookargs)
3436 except error.HookAbort as exc:
3437 except error.HookAbort as exc:
3437 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3438 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3438 if exc.hint:
3439 if exc.hint:
3439 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3440 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3440 return False
3441 return False
3441 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3442 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3442 ret = pushkey.push(self, namespace, key, old, new)
3443 ret = pushkey.push(self, namespace, key, old, new)
3443
3444
3444 def runhook(unused_success):
3445 def runhook(unused_success):
3445 self.hook(
3446 self.hook(
3446 b'pushkey',
3447 b'pushkey',
3447 namespace=namespace,
3448 namespace=namespace,
3448 key=key,
3449 key=key,
3449 old=old,
3450 old=old,
3450 new=new,
3451 new=new,
3451 ret=ret,
3452 ret=ret,
3452 )
3453 )
3453
3454
3454 self._afterlock(runhook)
3455 self._afterlock(runhook)
3455 return ret
3456 return ret
3456
3457
3457 def listkeys(self, namespace):
3458 def listkeys(self, namespace):
3458 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3459 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3459 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3460 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3460 values = pushkey.list(self, namespace)
3461 values = pushkey.list(self, namespace)
3461 self.hook(b'listkeys', namespace=namespace, values=values)
3462 self.hook(b'listkeys', namespace=namespace, values=values)
3462 return values
3463 return values
3463
3464
3464 def debugwireargs(self, one, two, three=None, four=None, five=None):
3465 def debugwireargs(self, one, two, three=None, four=None, five=None):
3465 '''used to test argument passing over the wire'''
3466 '''used to test argument passing over the wire'''
3466 return b"%s %s %s %s %s" % (
3467 return b"%s %s %s %s %s" % (
3467 one,
3468 one,
3468 two,
3469 two,
3469 pycompat.bytestr(three),
3470 pycompat.bytestr(three),
3470 pycompat.bytestr(four),
3471 pycompat.bytestr(four),
3471 pycompat.bytestr(five),
3472 pycompat.bytestr(five),
3472 )
3473 )
3473
3474
3474 def savecommitmessage(self, text):
3475 def savecommitmessage(self, text):
3475 fp = self.vfs(b'last-message.txt', b'wb')
3476 fp = self.vfs(b'last-message.txt', b'wb')
3476 try:
3477 try:
3477 fp.write(text)
3478 fp.write(text)
3478 finally:
3479 finally:
3479 fp.close()
3480 fp.close()
3480 return self.pathto(fp.name[len(self.root) + 1 :])
3481 return self.pathto(fp.name[len(self.root) + 1 :])
3481
3482
3482
3483
3483 # used to avoid circular references so destructors work
3484 # used to avoid circular references so destructors work
3484 def aftertrans(files):
3485 def aftertrans(files):
3485 renamefiles = [tuple(t) for t in files]
3486 renamefiles = [tuple(t) for t in files]
3486
3487
3487 def a():
3488 def a():
3488 for vfs, src, dest in renamefiles:
3489 for vfs, src, dest in renamefiles:
3489 # if src and dest refer to a same file, vfs.rename is a no-op,
3490 # if src and dest refer to a same file, vfs.rename is a no-op,
3490 # leaving both src and dest on disk. delete dest to make sure
3491 # leaving both src and dest on disk. delete dest to make sure
3491 # the rename couldn't be such a no-op.
3492 # the rename couldn't be such a no-op.
3492 vfs.tryunlink(dest)
3493 vfs.tryunlink(dest)
3493 try:
3494 try:
3494 vfs.rename(src, dest)
3495 vfs.rename(src, dest)
3495 except OSError: # journal file does not yet exist
3496 except OSError: # journal file does not yet exist
3496 pass
3497 pass
3497
3498
3498 return a
3499 return a
3499
3500
3500
3501
3501 def undoname(fn):
3502 def undoname(fn):
3502 base, name = os.path.split(fn)
3503 base, name = os.path.split(fn)
3503 assert name.startswith(b'journal')
3504 assert name.startswith(b'journal')
3504 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3505 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3505
3506
3506
3507
3507 def instance(ui, path, create, intents=None, createopts=None):
3508 def instance(ui, path, create, intents=None, createopts=None):
3508 localpath = util.urllocalpath(path)
3509 localpath = util.urllocalpath(path)
3509 if create:
3510 if create:
3510 createrepository(ui, localpath, createopts=createopts)
3511 createrepository(ui, localpath, createopts=createopts)
3511
3512
3512 return makelocalrepository(ui, localpath, intents=intents)
3513 return makelocalrepository(ui, localpath, intents=intents)
3513
3514
3514
3515
3515 def islocal(path):
3516 def islocal(path):
3516 return True
3517 return True
3517
3518
3518
3519
3519 def defaultcreateopts(ui, createopts=None):
3520 def defaultcreateopts(ui, createopts=None):
3520 """Populate the default creation options for a repository.
3521 """Populate the default creation options for a repository.
3521
3522
3522 A dictionary of explicitly requested creation options can be passed
3523 A dictionary of explicitly requested creation options can be passed
3523 in. Missing keys will be populated.
3524 in. Missing keys will be populated.
3524 """
3525 """
3525 createopts = dict(createopts or {})
3526 createopts = dict(createopts or {})
3526
3527
3527 if b'backend' not in createopts:
3528 if b'backend' not in createopts:
3528 # experimental config: storage.new-repo-backend
3529 # experimental config: storage.new-repo-backend
3529 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3530 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3530
3531
3531 return createopts
3532 return createopts
3532
3533
3533
3534
3534 def newreporequirements(ui, createopts):
3535 def newreporequirements(ui, createopts):
3535 """Determine the set of requirements for a new local repository.
3536 """Determine the set of requirements for a new local repository.
3536
3537
3537 Extensions can wrap this function to specify custom requirements for
3538 Extensions can wrap this function to specify custom requirements for
3538 new repositories.
3539 new repositories.
3539 """
3540 """
3540 # If the repo is being created from a shared repository, we copy
3541 # If the repo is being created from a shared repository, we copy
3541 # its requirements.
3542 # its requirements.
3542 if b'sharedrepo' in createopts:
3543 if b'sharedrepo' in createopts:
3543 requirements = set(createopts[b'sharedrepo'].requirements)
3544 requirements = set(createopts[b'sharedrepo'].requirements)
3544 if createopts.get(b'sharedrelative'):
3545 if createopts.get(b'sharedrelative'):
3545 requirements.add(b'relshared')
3546 requirements.add(b'relshared')
3546 else:
3547 else:
3547 requirements.add(b'shared')
3548 requirements.add(b'shared')
3548
3549
3549 return requirements
3550 return requirements
3550
3551
3551 if b'backend' not in createopts:
3552 if b'backend' not in createopts:
3552 raise error.ProgrammingError(
3553 raise error.ProgrammingError(
3553 b'backend key not present in createopts; '
3554 b'backend key not present in createopts; '
3554 b'was defaultcreateopts() called?'
3555 b'was defaultcreateopts() called?'
3555 )
3556 )
3556
3557
3557 if createopts[b'backend'] != b'revlogv1':
3558 if createopts[b'backend'] != b'revlogv1':
3558 raise error.Abort(
3559 raise error.Abort(
3559 _(
3560 _(
3560 b'unable to determine repository requirements for '
3561 b'unable to determine repository requirements for '
3561 b'storage backend: %s'
3562 b'storage backend: %s'
3562 )
3563 )
3563 % createopts[b'backend']
3564 % createopts[b'backend']
3564 )
3565 )
3565
3566
3566 requirements = {b'revlogv1'}
3567 requirements = {b'revlogv1'}
3567 if ui.configbool(b'format', b'usestore'):
3568 if ui.configbool(b'format', b'usestore'):
3568 requirements.add(b'store')
3569 requirements.add(b'store')
3569 if ui.configbool(b'format', b'usefncache'):
3570 if ui.configbool(b'format', b'usefncache'):
3570 requirements.add(b'fncache')
3571 requirements.add(b'fncache')
3571 if ui.configbool(b'format', b'dotencode'):
3572 if ui.configbool(b'format', b'dotencode'):
3572 requirements.add(b'dotencode')
3573 requirements.add(b'dotencode')
3573
3574
3574 compengine = ui.config(b'format', b'revlog-compression')
3575 compengine = ui.config(b'format', b'revlog-compression')
3575 if compengine not in util.compengines:
3576 if compengine not in util.compengines:
3576 raise error.Abort(
3577 raise error.Abort(
3577 _(
3578 _(
3578 b'compression engine %s defined by '
3579 b'compression engine %s defined by '
3579 b'format.revlog-compression not available'
3580 b'format.revlog-compression not available'
3580 )
3581 )
3581 % compengine,
3582 % compengine,
3582 hint=_(
3583 hint=_(
3583 b'run "hg debuginstall" to list available '
3584 b'run "hg debuginstall" to list available '
3584 b'compression engines'
3585 b'compression engines'
3585 ),
3586 ),
3586 )
3587 )
3587
3588
3588 # zlib is the historical default and doesn't need an explicit requirement.
3589 # zlib is the historical default and doesn't need an explicit requirement.
3589 elif compengine == b'zstd':
3590 elif compengine == b'zstd':
3590 requirements.add(b'revlog-compression-zstd')
3591 requirements.add(b'revlog-compression-zstd')
3591 elif compengine != b'zlib':
3592 elif compengine != b'zlib':
3592 requirements.add(b'exp-compression-%s' % compengine)
3593 requirements.add(b'exp-compression-%s' % compengine)
3593
3594
3594 if scmutil.gdinitconfig(ui):
3595 if scmutil.gdinitconfig(ui):
3595 requirements.add(b'generaldelta')
3596 requirements.add(b'generaldelta')
3596 if ui.configbool(b'format', b'sparse-revlog'):
3597 if ui.configbool(b'format', b'sparse-revlog'):
3597 requirements.add(SPARSEREVLOG_REQUIREMENT)
3598 requirements.add(SPARSEREVLOG_REQUIREMENT)
3598
3599
3599 # experimental config: format.exp-use-side-data
3600 # experimental config: format.exp-use-side-data
3600 if ui.configbool(b'format', b'exp-use-side-data'):
3601 if ui.configbool(b'format', b'exp-use-side-data'):
3601 requirements.add(SIDEDATA_REQUIREMENT)
3602 requirements.add(SIDEDATA_REQUIREMENT)
3602 # experimental config: format.exp-use-copies-side-data-changeset
3603 # experimental config: format.exp-use-copies-side-data-changeset
3603 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3604 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3604 requirements.add(SIDEDATA_REQUIREMENT)
3605 requirements.add(SIDEDATA_REQUIREMENT)
3605 requirements.add(COPIESSDC_REQUIREMENT)
3606 requirements.add(COPIESSDC_REQUIREMENT)
3606 if ui.configbool(b'experimental', b'treemanifest'):
3607 if ui.configbool(b'experimental', b'treemanifest'):
3607 requirements.add(b'treemanifest')
3608 requirements.add(b'treemanifest')
3608
3609
3609 revlogv2 = ui.config(b'experimental', b'revlogv2')
3610 revlogv2 = ui.config(b'experimental', b'revlogv2')
3610 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3611 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3611 requirements.remove(b'revlogv1')
3612 requirements.remove(b'revlogv1')
3612 # generaldelta is implied by revlogv2.
3613 # generaldelta is implied by revlogv2.
3613 requirements.discard(b'generaldelta')
3614 requirements.discard(b'generaldelta')
3614 requirements.add(REVLOGV2_REQUIREMENT)
3615 requirements.add(REVLOGV2_REQUIREMENT)
3615 # experimental config: format.internal-phase
3616 # experimental config: format.internal-phase
3616 if ui.configbool(b'format', b'internal-phase'):
3617 if ui.configbool(b'format', b'internal-phase'):
3617 requirements.add(b'internal-phase')
3618 requirements.add(b'internal-phase')
3618
3619
3619 if createopts.get(b'narrowfiles'):
3620 if createopts.get(b'narrowfiles'):
3620 requirements.add(repository.NARROW_REQUIREMENT)
3621 requirements.add(repository.NARROW_REQUIREMENT)
3621
3622
3622 if createopts.get(b'lfs'):
3623 if createopts.get(b'lfs'):
3623 requirements.add(b'lfs')
3624 requirements.add(b'lfs')
3624
3625
3625 if ui.configbool(b'format', b'bookmarks-in-store'):
3626 if ui.configbool(b'format', b'bookmarks-in-store'):
3626 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3627 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3627
3628
3628 return requirements
3629 return requirements
3629
3630
3630
3631
3631 def filterknowncreateopts(ui, createopts):
3632 def filterknowncreateopts(ui, createopts):
3632 """Filters a dict of repo creation options against options that are known.
3633 """Filters a dict of repo creation options against options that are known.
3633
3634
3634 Receives a dict of repo creation options and returns a dict of those
3635 Receives a dict of repo creation options and returns a dict of those
3635 options that we don't know how to handle.
3636 options that we don't know how to handle.
3636
3637
3637 This function is called as part of repository creation. If the
3638 This function is called as part of repository creation. If the
3638 returned dict contains any items, repository creation will not
3639 returned dict contains any items, repository creation will not
3639 be allowed, as it means there was a request to create a repository
3640 be allowed, as it means there was a request to create a repository
3640 with options not recognized by loaded code.
3641 with options not recognized by loaded code.
3641
3642
3642 Extensions can wrap this function to filter out creation options
3643 Extensions can wrap this function to filter out creation options
3643 they know how to handle.
3644 they know how to handle.
3644 """
3645 """
3645 known = {
3646 known = {
3646 b'backend',
3647 b'backend',
3647 b'lfs',
3648 b'lfs',
3648 b'narrowfiles',
3649 b'narrowfiles',
3649 b'sharedrepo',
3650 b'sharedrepo',
3650 b'sharedrelative',
3651 b'sharedrelative',
3651 b'shareditems',
3652 b'shareditems',
3652 b'shallowfilestore',
3653 b'shallowfilestore',
3653 }
3654 }
3654
3655
3655 return {k: v for k, v in createopts.items() if k not in known}
3656 return {k: v for k, v in createopts.items() if k not in known}
3656
3657
3657
3658
3658 def createrepository(ui, path, createopts=None):
3659 def createrepository(ui, path, createopts=None):
3659 """Create a new repository in a vfs.
3660 """Create a new repository in a vfs.
3660
3661
3661 ``path`` path to the new repo's working directory.
3662 ``path`` path to the new repo's working directory.
3662 ``createopts`` options for the new repository.
3663 ``createopts`` options for the new repository.
3663
3664
3664 The following keys for ``createopts`` are recognized:
3665 The following keys for ``createopts`` are recognized:
3665
3666
3666 backend
3667 backend
3667 The storage backend to use.
3668 The storage backend to use.
3668 lfs
3669 lfs
3669 Repository will be created with ``lfs`` requirement. The lfs extension
3670 Repository will be created with ``lfs`` requirement. The lfs extension
3670 will automatically be loaded when the repository is accessed.
3671 will automatically be loaded when the repository is accessed.
3671 narrowfiles
3672 narrowfiles
3672 Set up repository to support narrow file storage.
3673 Set up repository to support narrow file storage.
3673 sharedrepo
3674 sharedrepo
3674 Repository object from which storage should be shared.
3675 Repository object from which storage should be shared.
3675 sharedrelative
3676 sharedrelative
3676 Boolean indicating if the path to the shared repo should be
3677 Boolean indicating if the path to the shared repo should be
3677 stored as relative. By default, the pointer to the "parent" repo
3678 stored as relative. By default, the pointer to the "parent" repo
3678 is stored as an absolute path.
3679 is stored as an absolute path.
3679 shareditems
3680 shareditems
3680 Set of items to share to the new repository (in addition to storage).
3681 Set of items to share to the new repository (in addition to storage).
3681 shallowfilestore
3682 shallowfilestore
3682 Indicates that storage for files should be shallow (not all ancestor
3683 Indicates that storage for files should be shallow (not all ancestor
3683 revisions are known).
3684 revisions are known).
3684 """
3685 """
3685 createopts = defaultcreateopts(ui, createopts=createopts)
3686 createopts = defaultcreateopts(ui, createopts=createopts)
3686
3687
3687 unknownopts = filterknowncreateopts(ui, createopts)
3688 unknownopts = filterknowncreateopts(ui, createopts)
3688
3689
3689 if not isinstance(unknownopts, dict):
3690 if not isinstance(unknownopts, dict):
3690 raise error.ProgrammingError(
3691 raise error.ProgrammingError(
3691 b'filterknowncreateopts() did not return a dict'
3692 b'filterknowncreateopts() did not return a dict'
3692 )
3693 )
3693
3694
3694 if unknownopts:
3695 if unknownopts:
3695 raise error.Abort(
3696 raise error.Abort(
3696 _(
3697 _(
3697 b'unable to create repository because of unknown '
3698 b'unable to create repository because of unknown '
3698 b'creation option: %s'
3699 b'creation option: %s'
3699 )
3700 )
3700 % b', '.join(sorted(unknownopts)),
3701 % b', '.join(sorted(unknownopts)),
3701 hint=_(b'is a required extension not loaded?'),
3702 hint=_(b'is a required extension not loaded?'),
3702 )
3703 )
3703
3704
3704 requirements = newreporequirements(ui, createopts=createopts)
3705 requirements = newreporequirements(ui, createopts=createopts)
3705
3706
3706 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3707 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3707
3708
3708 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3709 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3709 if hgvfs.exists():
3710 if hgvfs.exists():
3710 raise error.RepoError(_(b'repository %s already exists') % path)
3711 raise error.RepoError(_(b'repository %s already exists') % path)
3711
3712
3712 if b'sharedrepo' in createopts:
3713 if b'sharedrepo' in createopts:
3713 sharedpath = createopts[b'sharedrepo'].sharedpath
3714 sharedpath = createopts[b'sharedrepo'].sharedpath
3714
3715
3715 if createopts.get(b'sharedrelative'):
3716 if createopts.get(b'sharedrelative'):
3716 try:
3717 try:
3717 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3718 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3718 except (IOError, ValueError) as e:
3719 except (IOError, ValueError) as e:
3719 # ValueError is raised on Windows if the drive letters differ
3720 # ValueError is raised on Windows if the drive letters differ
3720 # on each path.
3721 # on each path.
3721 raise error.Abort(
3722 raise error.Abort(
3722 _(b'cannot calculate relative path'),
3723 _(b'cannot calculate relative path'),
3723 hint=stringutil.forcebytestr(e),
3724 hint=stringutil.forcebytestr(e),
3724 )
3725 )
3725
3726
3726 if not wdirvfs.exists():
3727 if not wdirvfs.exists():
3727 wdirvfs.makedirs()
3728 wdirvfs.makedirs()
3728
3729
3729 hgvfs.makedir(notindexed=True)
3730 hgvfs.makedir(notindexed=True)
3730 if b'sharedrepo' not in createopts:
3731 if b'sharedrepo' not in createopts:
3731 hgvfs.mkdir(b'cache')
3732 hgvfs.mkdir(b'cache')
3732 hgvfs.mkdir(b'wcache')
3733 hgvfs.mkdir(b'wcache')
3733
3734
3734 if b'store' in requirements and b'sharedrepo' not in createopts:
3735 if b'store' in requirements and b'sharedrepo' not in createopts:
3735 hgvfs.mkdir(b'store')
3736 hgvfs.mkdir(b'store')
3736
3737
3737 # We create an invalid changelog outside the store so very old
3738 # We create an invalid changelog outside the store so very old
3738 # Mercurial versions (which didn't know about the requirements
3739 # Mercurial versions (which didn't know about the requirements
3739 # file) encounter an error on reading the changelog. This
3740 # file) encounter an error on reading the changelog. This
3740 # effectively locks out old clients and prevents them from
3741 # effectively locks out old clients and prevents them from
3741 # mucking with a repo in an unknown format.
3742 # mucking with a repo in an unknown format.
3742 #
3743 #
3743 # The revlog header has version 2, which won't be recognized by
3744 # The revlog header has version 2, which won't be recognized by
3744 # such old clients.
3745 # such old clients.
3745 hgvfs.append(
3746 hgvfs.append(
3746 b'00changelog.i',
3747 b'00changelog.i',
3747 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3748 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3748 b'layout',
3749 b'layout',
3749 )
3750 )
3750
3751
3751 scmutil.writerequires(hgvfs, requirements)
3752 scmutil.writerequires(hgvfs, requirements)
3752
3753
3753 # Write out file telling readers where to find the shared store.
3754 # Write out file telling readers where to find the shared store.
3754 if b'sharedrepo' in createopts:
3755 if b'sharedrepo' in createopts:
3755 hgvfs.write(b'sharedpath', sharedpath)
3756 hgvfs.write(b'sharedpath', sharedpath)
3756
3757
3757 if createopts.get(b'shareditems'):
3758 if createopts.get(b'shareditems'):
3758 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3759 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3759 hgvfs.write(b'shared', shared)
3760 hgvfs.write(b'shared', shared)
3760
3761
3761
3762
3762 def poisonrepository(repo):
3763 def poisonrepository(repo):
3763 """Poison a repository instance so it can no longer be used."""
3764 """Poison a repository instance so it can no longer be used."""
3764 # Perform any cleanup on the instance.
3765 # Perform any cleanup on the instance.
3765 repo.close()
3766 repo.close()
3766
3767
3767 # Our strategy is to replace the type of the object with one that
3768 # Our strategy is to replace the type of the object with one that
3768 # has all attribute lookups result in error.
3769 # has all attribute lookups result in error.
3769 #
3770 #
3770 # But we have to allow the close() method because some constructors
3771 # But we have to allow the close() method because some constructors
3771 # of repos call close() on repo references.
3772 # of repos call close() on repo references.
3772 class poisonedrepository(object):
3773 class poisonedrepository(object):
3773 def __getattribute__(self, item):
3774 def __getattribute__(self, item):
3774 if item == 'close':
3775 if item == 'close':
3775 return object.__getattribute__(self, item)
3776 return object.__getattribute__(self, item)
3776
3777
3777 raise error.ProgrammingError(
3778 raise error.ProgrammingError(
3778 b'repo instances should not be used after unshare'
3779 b'repo instances should not be used after unshare'
3779 )
3780 )
3780
3781
3781 def close(self):
3782 def close(self):
3782 pass
3783 pass
3783
3784
3784 # We may have a repoview, which intercepts __setattr__. So be sure
3785 # We may have a repoview, which intercepts __setattr__. So be sure
3785 # we operate at the lowest level possible.
3786 # we operate at the lowest level possible.
3786 object.__setattr__(repo, '__class__', poisonedrepository)
3787 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,2262 +1,2264 b''
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import itertools
11 import itertools
12 import struct
12 import struct
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23 from . import (
23 from . import (
24 encoding,
24 error,
25 error,
25 mdiff,
26 mdiff,
26 pathutil,
27 pathutil,
27 policy,
28 policy,
28 pycompat,
29 pycompat,
29 revlog,
30 revlog,
30 util,
31 util,
31 )
32 )
32 from .interfaces import (
33 from .interfaces import (
33 repository,
34 repository,
34 util as interfaceutil,
35 util as interfaceutil,
35 )
36 )
36
37
37 parsers = policy.importmod('parsers')
38 parsers = policy.importmod('parsers')
38 propertycache = util.propertycache
39 propertycache = util.propertycache
39
40
40 # Allow tests to more easily test the alternate path in manifestdict.fastdelta()
41 # Allow tests to more easily test the alternate path in manifestdict.fastdelta()
41 FASTDELTA_TEXTDIFF_THRESHOLD = 1000
42 FASTDELTA_TEXTDIFF_THRESHOLD = 1000
42
43
43
44
44 def _parse(data):
45 def _parse(data):
45 # This method does a little bit of excessive-looking
46 # This method does a little bit of excessive-looking
46 # precondition checking. This is so that the behavior of this
47 # precondition checking. This is so that the behavior of this
47 # class exactly matches its C counterpart to try and help
48 # class exactly matches its C counterpart to try and help
48 # prevent surprise breakage for anyone that develops against
49 # prevent surprise breakage for anyone that develops against
49 # the pure version.
50 # the pure version.
50 if data and data[-1:] != b'\n':
51 if data and data[-1:] != b'\n':
51 raise ValueError(b'Manifest did not end in a newline.')
52 raise ValueError(b'Manifest did not end in a newline.')
52 prev = None
53 prev = None
53 for l in data.splitlines():
54 for l in data.splitlines():
54 if prev is not None and prev > l:
55 if prev is not None and prev > l:
55 raise ValueError(b'Manifest lines not in sorted order.')
56 raise ValueError(b'Manifest lines not in sorted order.')
56 prev = l
57 prev = l
57 f, n = l.split(b'\0')
58 f, n = l.split(b'\0')
58 if len(n) > 40:
59 if len(n) > 40:
59 yield f, bin(n[:40]), n[40:]
60 yield f, bin(n[:40]), n[40:]
60 else:
61 else:
61 yield f, bin(n), b''
62 yield f, bin(n), b''
62
63
63
64
64 def _text(it):
65 def _text(it):
65 files = []
66 files = []
66 lines = []
67 lines = []
67 for f, n, fl in it:
68 for f, n, fl in it:
68 files.append(f)
69 files.append(f)
69 # if this is changed to support newlines in filenames,
70 # if this is changed to support newlines in filenames,
70 # be sure to check the templates/ dir again (especially *-raw.tmpl)
71 # be sure to check the templates/ dir again (especially *-raw.tmpl)
71 lines.append(b"%s\0%s%s\n" % (f, hex(n), fl))
72 lines.append(b"%s\0%s%s\n" % (f, hex(n), fl))
72
73
73 _checkforbidden(files)
74 _checkforbidden(files)
74 return b''.join(lines)
75 return b''.join(lines)
75
76
76
77
77 class lazymanifestiter(object):
78 class lazymanifestiter(object):
78 def __init__(self, lm):
79 def __init__(self, lm):
79 self.pos = 0
80 self.pos = 0
80 self.lm = lm
81 self.lm = lm
81
82
82 def __iter__(self):
83 def __iter__(self):
83 return self
84 return self
84
85
85 def next(self):
86 def next(self):
86 try:
87 try:
87 data, pos = self.lm._get(self.pos)
88 data, pos = self.lm._get(self.pos)
88 except IndexError:
89 except IndexError:
89 raise StopIteration
90 raise StopIteration
90 if pos == -1:
91 if pos == -1:
91 self.pos += 1
92 self.pos += 1
92 return data[0]
93 return data[0]
93 self.pos += 1
94 self.pos += 1
94 zeropos = data.find(b'\x00', pos)
95 zeropos = data.find(b'\x00', pos)
95 return data[pos:zeropos]
96 return data[pos:zeropos]
96
97
97 __next__ = next
98 __next__ = next
98
99
99
100
100 class lazymanifestiterentries(object):
101 class lazymanifestiterentries(object):
101 def __init__(self, lm):
102 def __init__(self, lm):
102 self.lm = lm
103 self.lm = lm
103 self.pos = 0
104 self.pos = 0
104
105
105 def __iter__(self):
106 def __iter__(self):
106 return self
107 return self
107
108
108 def next(self):
109 def next(self):
109 try:
110 try:
110 data, pos = self.lm._get(self.pos)
111 data, pos = self.lm._get(self.pos)
111 except IndexError:
112 except IndexError:
112 raise StopIteration
113 raise StopIteration
113 if pos == -1:
114 if pos == -1:
114 self.pos += 1
115 self.pos += 1
115 return data
116 return data
116 zeropos = data.find(b'\x00', pos)
117 zeropos = data.find(b'\x00', pos)
117 hashval = unhexlify(data, self.lm.extrainfo[self.pos], zeropos + 1, 40)
118 hashval = unhexlify(data, self.lm.extrainfo[self.pos], zeropos + 1, 40)
118 flags = self.lm._getflags(data, self.pos, zeropos)
119 flags = self.lm._getflags(data, self.pos, zeropos)
119 self.pos += 1
120 self.pos += 1
120 return (data[pos:zeropos], hashval, flags)
121 return (data[pos:zeropos], hashval, flags)
121
122
122 __next__ = next
123 __next__ = next
123
124
124
125
125 def unhexlify(data, extra, pos, length):
126 def unhexlify(data, extra, pos, length):
126 s = bin(data[pos : pos + length])
127 s = bin(data[pos : pos + length])
127 if extra:
128 if extra:
128 s += chr(extra & 0xFF)
129 s += chr(extra & 0xFF)
129 return s
130 return s
130
131
131
132
132 def _cmp(a, b):
133 def _cmp(a, b):
133 return (a > b) - (a < b)
134 return (a > b) - (a < b)
134
135
135
136
136 class _lazymanifest(object):
137 class _lazymanifest(object):
137 """A pure python manifest backed by a byte string. It is supplimented with
138 """A pure python manifest backed by a byte string. It is supplimented with
138 internal lists as it is modified, until it is compacted back to a pure byte
139 internal lists as it is modified, until it is compacted back to a pure byte
139 string.
140 string.
140
141
141 ``data`` is the initial manifest data.
142 ``data`` is the initial manifest data.
142
143
143 ``positions`` is a list of offsets, one per manifest entry. Positive
144 ``positions`` is a list of offsets, one per manifest entry. Positive
144 values are offsets into ``data``, negative values are offsets into the
145 values are offsets into ``data``, negative values are offsets into the
145 ``extradata`` list. When an entry is removed, its entry is dropped from
146 ``extradata`` list. When an entry is removed, its entry is dropped from
146 ``positions``. The values are encoded such that when walking the list and
147 ``positions``. The values are encoded such that when walking the list and
147 indexing into ``data`` or ``extradata`` as appropriate, the entries are
148 indexing into ``data`` or ``extradata`` as appropriate, the entries are
148 sorted by filename.
149 sorted by filename.
149
150
150 ``extradata`` is a list of (key, hash, flags) for entries that were added or
151 ``extradata`` is a list of (key, hash, flags) for entries that were added or
151 modified since the manifest was created or compacted.
152 modified since the manifest was created or compacted.
152 """
153 """
153
154
154 def __init__(
155 def __init__(
155 self,
156 self,
156 data,
157 data,
157 positions=None,
158 positions=None,
158 extrainfo=None,
159 extrainfo=None,
159 extradata=None,
160 extradata=None,
160 hasremovals=False,
161 hasremovals=False,
161 ):
162 ):
162 if positions is None:
163 if positions is None:
163 self.positions = self.findlines(data)
164 self.positions = self.findlines(data)
164 self.extrainfo = [0] * len(self.positions)
165 self.extrainfo = [0] * len(self.positions)
165 self.data = data
166 self.data = data
166 self.extradata = []
167 self.extradata = []
167 self.hasremovals = False
168 self.hasremovals = False
168 else:
169 else:
169 self.positions = positions[:]
170 self.positions = positions[:]
170 self.extrainfo = extrainfo[:]
171 self.extrainfo = extrainfo[:]
171 self.extradata = extradata[:]
172 self.extradata = extradata[:]
172 self.data = data
173 self.data = data
173 self.hasremovals = hasremovals
174 self.hasremovals = hasremovals
174
175
175 def findlines(self, data):
176 def findlines(self, data):
176 if not data:
177 if not data:
177 return []
178 return []
178 pos = data.find(b"\n")
179 pos = data.find(b"\n")
179 if pos == -1 or data[-1:] != b'\n':
180 if pos == -1 or data[-1:] != b'\n':
180 raise ValueError(b"Manifest did not end in a newline.")
181 raise ValueError(b"Manifest did not end in a newline.")
181 positions = [0]
182 positions = [0]
182 prev = data[: data.find(b'\x00')]
183 prev = data[: data.find(b'\x00')]
183 while pos < len(data) - 1 and pos != -1:
184 while pos < len(data) - 1 and pos != -1:
184 positions.append(pos + 1)
185 positions.append(pos + 1)
185 nexts = data[pos + 1 : data.find(b'\x00', pos + 1)]
186 nexts = data[pos + 1 : data.find(b'\x00', pos + 1)]
186 if nexts < prev:
187 if nexts < prev:
187 raise ValueError(b"Manifest lines not in sorted order.")
188 raise ValueError(b"Manifest lines not in sorted order.")
188 prev = nexts
189 prev = nexts
189 pos = data.find(b"\n", pos + 1)
190 pos = data.find(b"\n", pos + 1)
190 return positions
191 return positions
191
192
192 def _get(self, index):
193 def _get(self, index):
193 # get the position encoded in pos:
194 # get the position encoded in pos:
194 # positive number is an index in 'data'
195 # positive number is an index in 'data'
195 # negative number is in extrapieces
196 # negative number is in extrapieces
196 pos = self.positions[index]
197 pos = self.positions[index]
197 if pos >= 0:
198 if pos >= 0:
198 return self.data, pos
199 return self.data, pos
199 return self.extradata[-pos - 1], -1
200 return self.extradata[-pos - 1], -1
200
201
201 def _getkey(self, pos):
202 def _getkey(self, pos):
202 if pos >= 0:
203 if pos >= 0:
203 return self.data[pos : self.data.find(b'\x00', pos + 1)]
204 return self.data[pos : self.data.find(b'\x00', pos + 1)]
204 return self.extradata[-pos - 1][0]
205 return self.extradata[-pos - 1][0]
205
206
206 def bsearch(self, key):
207 def bsearch(self, key):
207 first = 0
208 first = 0
208 last = len(self.positions) - 1
209 last = len(self.positions) - 1
209
210
210 while first <= last:
211 while first <= last:
211 midpoint = (first + last) // 2
212 midpoint = (first + last) // 2
212 nextpos = self.positions[midpoint]
213 nextpos = self.positions[midpoint]
213 candidate = self._getkey(nextpos)
214 candidate = self._getkey(nextpos)
214 r = _cmp(key, candidate)
215 r = _cmp(key, candidate)
215 if r == 0:
216 if r == 0:
216 return midpoint
217 return midpoint
217 else:
218 else:
218 if r < 0:
219 if r < 0:
219 last = midpoint - 1
220 last = midpoint - 1
220 else:
221 else:
221 first = midpoint + 1
222 first = midpoint + 1
222 return -1
223 return -1
223
224
224 def bsearch2(self, key):
225 def bsearch2(self, key):
225 # same as the above, but will always return the position
226 # same as the above, but will always return the position
226 # done for performance reasons
227 # done for performance reasons
227 first = 0
228 first = 0
228 last = len(self.positions) - 1
229 last = len(self.positions) - 1
229
230
230 while first <= last:
231 while first <= last:
231 midpoint = (first + last) // 2
232 midpoint = (first + last) // 2
232 nextpos = self.positions[midpoint]
233 nextpos = self.positions[midpoint]
233 candidate = self._getkey(nextpos)
234 candidate = self._getkey(nextpos)
234 r = _cmp(key, candidate)
235 r = _cmp(key, candidate)
235 if r == 0:
236 if r == 0:
236 return (midpoint, True)
237 return (midpoint, True)
237 else:
238 else:
238 if r < 0:
239 if r < 0:
239 last = midpoint - 1
240 last = midpoint - 1
240 else:
241 else:
241 first = midpoint + 1
242 first = midpoint + 1
242 return (first, False)
243 return (first, False)
243
244
244 def __contains__(self, key):
245 def __contains__(self, key):
245 return self.bsearch(key) != -1
246 return self.bsearch(key) != -1
246
247
247 def _getflags(self, data, needle, pos):
248 def _getflags(self, data, needle, pos):
248 start = pos + 41
249 start = pos + 41
249 end = data.find(b"\n", start)
250 end = data.find(b"\n", start)
250 if end == -1:
251 if end == -1:
251 end = len(data) - 1
252 end = len(data) - 1
252 if start == end:
253 if start == end:
253 return b''
254 return b''
254 return self.data[start:end]
255 return self.data[start:end]
255
256
256 def __getitem__(self, key):
257 def __getitem__(self, key):
257 if not isinstance(key, bytes):
258 if not isinstance(key, bytes):
258 raise TypeError(b"getitem: manifest keys must be a bytes.")
259 raise TypeError(b"getitem: manifest keys must be a bytes.")
259 needle = self.bsearch(key)
260 needle = self.bsearch(key)
260 if needle == -1:
261 if needle == -1:
261 raise KeyError
262 raise KeyError
262 data, pos = self._get(needle)
263 data, pos = self._get(needle)
263 if pos == -1:
264 if pos == -1:
264 return (data[1], data[2])
265 return (data[1], data[2])
265 zeropos = data.find(b'\x00', pos)
266 zeropos = data.find(b'\x00', pos)
266 assert 0 <= needle <= len(self.positions)
267 assert 0 <= needle <= len(self.positions)
267 assert len(self.extrainfo) == len(self.positions)
268 assert len(self.extrainfo) == len(self.positions)
268 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
269 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
269 flags = self._getflags(data, needle, zeropos)
270 flags = self._getflags(data, needle, zeropos)
270 return (hashval, flags)
271 return (hashval, flags)
271
272
272 def __delitem__(self, key):
273 def __delitem__(self, key):
273 needle, found = self.bsearch2(key)
274 needle, found = self.bsearch2(key)
274 if not found:
275 if not found:
275 raise KeyError
276 raise KeyError
276 cur = self.positions[needle]
277 cur = self.positions[needle]
277 self.positions = self.positions[:needle] + self.positions[needle + 1 :]
278 self.positions = self.positions[:needle] + self.positions[needle + 1 :]
278 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1 :]
279 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1 :]
279 if cur >= 0:
280 if cur >= 0:
280 # This does NOT unsort the list as far as the search functions are
281 # This does NOT unsort the list as far as the search functions are
281 # concerned, as they only examine lines mapped by self.positions.
282 # concerned, as they only examine lines mapped by self.positions.
282 self.data = self.data[:cur] + b'\x00' + self.data[cur + 1 :]
283 self.data = self.data[:cur] + b'\x00' + self.data[cur + 1 :]
283 self.hasremovals = True
284 self.hasremovals = True
284
285
285 def __setitem__(self, key, value):
286 def __setitem__(self, key, value):
286 if not isinstance(key, bytes):
287 if not isinstance(key, bytes):
287 raise TypeError(b"setitem: manifest keys must be a byte string.")
288 raise TypeError(b"setitem: manifest keys must be a byte string.")
288 if not isinstance(value, tuple) or len(value) != 2:
289 if not isinstance(value, tuple) or len(value) != 2:
289 raise TypeError(
290 raise TypeError(
290 b"Manifest values must be a tuple of (node, flags)."
291 b"Manifest values must be a tuple of (node, flags)."
291 )
292 )
292 hashval = value[0]
293 hashval = value[0]
293 if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22:
294 if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22:
294 raise TypeError(b"node must be a 20-byte byte string")
295 raise TypeError(b"node must be a 20-byte byte string")
295 flags = value[1]
296 flags = value[1]
296 if len(hashval) == 22:
297 if len(hashval) == 22:
297 hashval = hashval[:-1]
298 hashval = hashval[:-1]
298 if not isinstance(flags, bytes) or len(flags) > 1:
299 if not isinstance(flags, bytes) or len(flags) > 1:
299 raise TypeError(b"flags must a 0 or 1 byte string, got %r", flags)
300 raise TypeError(b"flags must a 0 or 1 byte string, got %r", flags)
300 needle, found = self.bsearch2(key)
301 needle, found = self.bsearch2(key)
301 if found:
302 if found:
302 # put the item
303 # put the item
303 pos = self.positions[needle]
304 pos = self.positions[needle]
304 if pos < 0:
305 if pos < 0:
305 self.extradata[-pos - 1] = (key, hashval, value[1])
306 self.extradata[-pos - 1] = (key, hashval, value[1])
306 else:
307 else:
307 # just don't bother
308 # just don't bother
308 self.extradata.append((key, hashval, value[1]))
309 self.extradata.append((key, hashval, value[1]))
309 self.positions[needle] = -len(self.extradata)
310 self.positions[needle] = -len(self.extradata)
310 else:
311 else:
311 # not found, put it in with extra positions
312 # not found, put it in with extra positions
312 self.extradata.append((key, hashval, value[1]))
313 self.extradata.append((key, hashval, value[1]))
313 self.positions = (
314 self.positions = (
314 self.positions[:needle]
315 self.positions[:needle]
315 + [-len(self.extradata)]
316 + [-len(self.extradata)]
316 + self.positions[needle:]
317 + self.positions[needle:]
317 )
318 )
318 self.extrainfo = (
319 self.extrainfo = (
319 self.extrainfo[:needle] + [0] + self.extrainfo[needle:]
320 self.extrainfo[:needle] + [0] + self.extrainfo[needle:]
320 )
321 )
321
322
322 def copy(self):
323 def copy(self):
323 # XXX call _compact like in C?
324 # XXX call _compact like in C?
324 return _lazymanifest(
325 return _lazymanifest(
325 self.data,
326 self.data,
326 self.positions,
327 self.positions,
327 self.extrainfo,
328 self.extrainfo,
328 self.extradata,
329 self.extradata,
329 self.hasremovals,
330 self.hasremovals,
330 )
331 )
331
332
332 def _compact(self):
333 def _compact(self):
333 # hopefully not called TOO often
334 # hopefully not called TOO often
334 if len(self.extradata) == 0 and not self.hasremovals:
335 if len(self.extradata) == 0 and not self.hasremovals:
335 return
336 return
336 l = []
337 l = []
337 i = 0
338 i = 0
338 offset = 0
339 offset = 0
339 self.extrainfo = [0] * len(self.positions)
340 self.extrainfo = [0] * len(self.positions)
340 while i < len(self.positions):
341 while i < len(self.positions):
341 if self.positions[i] >= 0:
342 if self.positions[i] >= 0:
342 cur = self.positions[i]
343 cur = self.positions[i]
343 last_cut = cur
344 last_cut = cur
344
345
345 # Collect all contiguous entries in the buffer at the current
346 # Collect all contiguous entries in the buffer at the current
346 # offset, breaking out only for added/modified items held in
347 # offset, breaking out only for added/modified items held in
347 # extradata, or a deleted line prior to the next position.
348 # extradata, or a deleted line prior to the next position.
348 while True:
349 while True:
349 self.positions[i] = offset
350 self.positions[i] = offset
350 i += 1
351 i += 1
351 if i == len(self.positions) or self.positions[i] < 0:
352 if i == len(self.positions) or self.positions[i] < 0:
352 break
353 break
353
354
354 # A removed file has no positions[] entry, but does have an
355 # A removed file has no positions[] entry, but does have an
355 # overwritten first byte. Break out and find the end of the
356 # overwritten first byte. Break out and find the end of the
356 # current good entry/entries if there is a removed file
357 # current good entry/entries if there is a removed file
357 # before the next position.
358 # before the next position.
358 if (
359 if (
359 self.hasremovals
360 self.hasremovals
360 and self.data.find(b'\n\x00', cur, self.positions[i])
361 and self.data.find(b'\n\x00', cur, self.positions[i])
361 != -1
362 != -1
362 ):
363 ):
363 break
364 break
364
365
365 offset += self.positions[i] - cur
366 offset += self.positions[i] - cur
366 cur = self.positions[i]
367 cur = self.positions[i]
367 end_cut = self.data.find(b'\n', cur)
368 end_cut = self.data.find(b'\n', cur)
368 if end_cut != -1:
369 if end_cut != -1:
369 end_cut += 1
370 end_cut += 1
370 offset += end_cut - cur
371 offset += end_cut - cur
371 l.append(self.data[last_cut:end_cut])
372 l.append(self.data[last_cut:end_cut])
372 else:
373 else:
373 while i < len(self.positions) and self.positions[i] < 0:
374 while i < len(self.positions) and self.positions[i] < 0:
374 cur = self.positions[i]
375 cur = self.positions[i]
375 t = self.extradata[-cur - 1]
376 t = self.extradata[-cur - 1]
376 l.append(self._pack(t))
377 l.append(self._pack(t))
377 self.positions[i] = offset
378 self.positions[i] = offset
378 if len(t[1]) > 20:
379 if len(t[1]) > 20:
379 self.extrainfo[i] = ord(t[1][21])
380 self.extrainfo[i] = ord(t[1][21])
380 offset += len(l[-1])
381 offset += len(l[-1])
381 i += 1
382 i += 1
382 self.data = b''.join(l)
383 self.data = b''.join(l)
383 self.hasremovals = False
384 self.hasremovals = False
384 self.extradata = []
385 self.extradata = []
385
386
386 def _pack(self, d):
387 def _pack(self, d):
387 return d[0] + b'\x00' + hex(d[1][:20]) + d[2] + b'\n'
388 return d[0] + b'\x00' + hex(d[1][:20]) + d[2] + b'\n'
388
389
389 def text(self):
390 def text(self):
390 self._compact()
391 self._compact()
391 return self.data
392 return self.data
392
393
393 def diff(self, m2, clean=False):
394 def diff(self, m2, clean=False):
394 '''Finds changes between the current manifest and m2.'''
395 '''Finds changes between the current manifest and m2.'''
395 # XXX think whether efficiency matters here
396 # XXX think whether efficiency matters here
396 diff = {}
397 diff = {}
397
398
398 for fn, e1, flags in self.iterentries():
399 for fn, e1, flags in self.iterentries():
399 if fn not in m2:
400 if fn not in m2:
400 diff[fn] = (e1, flags), (None, b'')
401 diff[fn] = (e1, flags), (None, b'')
401 else:
402 else:
402 e2 = m2[fn]
403 e2 = m2[fn]
403 if (e1, flags) != e2:
404 if (e1, flags) != e2:
404 diff[fn] = (e1, flags), e2
405 diff[fn] = (e1, flags), e2
405 elif clean:
406 elif clean:
406 diff[fn] = None
407 diff[fn] = None
407
408
408 for fn, e2, flags in m2.iterentries():
409 for fn, e2, flags in m2.iterentries():
409 if fn not in self:
410 if fn not in self:
410 diff[fn] = (None, b''), (e2, flags)
411 diff[fn] = (None, b''), (e2, flags)
411
412
412 return diff
413 return diff
413
414
414 def iterentries(self):
415 def iterentries(self):
415 return lazymanifestiterentries(self)
416 return lazymanifestiterentries(self)
416
417
417 def iterkeys(self):
418 def iterkeys(self):
418 return lazymanifestiter(self)
419 return lazymanifestiter(self)
419
420
420 def __iter__(self):
421 def __iter__(self):
421 return lazymanifestiter(self)
422 return lazymanifestiter(self)
422
423
423 def __len__(self):
424 def __len__(self):
424 return len(self.positions)
425 return len(self.positions)
425
426
426 def filtercopy(self, filterfn):
427 def filtercopy(self, filterfn):
427 # XXX should be optimized
428 # XXX should be optimized
428 c = _lazymanifest(b'')
429 c = _lazymanifest(b'')
429 for f, n, fl in self.iterentries():
430 for f, n, fl in self.iterentries():
430 if filterfn(f):
431 if filterfn(f):
431 c[f] = n, fl
432 c[f] = n, fl
432 return c
433 return c
433
434
434
435
435 try:
436 try:
436 _lazymanifest = parsers.lazymanifest
437 _lazymanifest = parsers.lazymanifest
437 except AttributeError:
438 except AttributeError:
438 pass
439 pass
439
440
440
441
441 @interfaceutil.implementer(repository.imanifestdict)
442 @interfaceutil.implementer(repository.imanifestdict)
442 class manifestdict(object):
443 class manifestdict(object):
443 def __init__(self, data=b''):
444 def __init__(self, data=b''):
444 self._lm = _lazymanifest(data)
445 self._lm = _lazymanifest(data)
445
446
446 def __getitem__(self, key):
447 def __getitem__(self, key):
447 return self._lm[key][0]
448 return self._lm[key][0]
448
449
449 def find(self, key):
450 def find(self, key):
450 return self._lm[key]
451 return self._lm[key]
451
452
452 def __len__(self):
453 def __len__(self):
453 return len(self._lm)
454 return len(self._lm)
454
455
455 def __nonzero__(self):
456 def __nonzero__(self):
456 # nonzero is covered by the __len__ function, but implementing it here
457 # nonzero is covered by the __len__ function, but implementing it here
457 # makes it easier for extensions to override.
458 # makes it easier for extensions to override.
458 return len(self._lm) != 0
459 return len(self._lm) != 0
459
460
460 __bool__ = __nonzero__
461 __bool__ = __nonzero__
461
462
462 def __setitem__(self, key, node):
463 def __setitem__(self, key, node):
463 self._lm[key] = node, self.flags(key)
464 self._lm[key] = node, self.flags(key)
464
465
465 def __contains__(self, key):
466 def __contains__(self, key):
466 if key is None:
467 if key is None:
467 return False
468 return False
468 return key in self._lm
469 return key in self._lm
469
470
470 def __delitem__(self, key):
471 def __delitem__(self, key):
471 del self._lm[key]
472 del self._lm[key]
472
473
473 def __iter__(self):
474 def __iter__(self):
474 return self._lm.__iter__()
475 return self._lm.__iter__()
475
476
476 def iterkeys(self):
477 def iterkeys(self):
477 return self._lm.iterkeys()
478 return self._lm.iterkeys()
478
479
479 def keys(self):
480 def keys(self):
480 return list(self.iterkeys())
481 return list(self.iterkeys())
481
482
482 def filesnotin(self, m2, match=None):
483 def filesnotin(self, m2, match=None):
483 '''Set of files in this manifest that are not in the other'''
484 '''Set of files in this manifest that are not in the other'''
484 if match:
485 if match:
485 m1 = self.matches(match)
486 m1 = self.matches(match)
486 m2 = m2.matches(match)
487 m2 = m2.matches(match)
487 return m1.filesnotin(m2)
488 return m1.filesnotin(m2)
488 diff = self.diff(m2)
489 diff = self.diff(m2)
489 files = set(
490 files = set(
490 filepath
491 filepath
491 for filepath, hashflags in pycompat.iteritems(diff)
492 for filepath, hashflags in pycompat.iteritems(diff)
492 if hashflags[1][0] is None
493 if hashflags[1][0] is None
493 )
494 )
494 return files
495 return files
495
496
496 @propertycache
497 @propertycache
497 def _dirs(self):
498 def _dirs(self):
498 return pathutil.dirs(self)
499 return pathutil.dirs(self)
499
500
500 def dirs(self):
501 def dirs(self):
501 return self._dirs
502 return self._dirs
502
503
503 def hasdir(self, dir):
504 def hasdir(self, dir):
504 return dir in self._dirs
505 return dir in self._dirs
505
506
506 def _filesfastpath(self, match):
507 def _filesfastpath(self, match):
507 '''Checks whether we can correctly and quickly iterate over matcher
508 '''Checks whether we can correctly and quickly iterate over matcher
508 files instead of over manifest files.'''
509 files instead of over manifest files.'''
509 files = match.files()
510 files = match.files()
510 return len(files) < 100 and (
511 return len(files) < 100 and (
511 match.isexact()
512 match.isexact()
512 or (match.prefix() and all(fn in self for fn in files))
513 or (match.prefix() and all(fn in self for fn in files))
513 )
514 )
514
515
515 def walk(self, match):
516 def walk(self, match):
516 '''Generates matching file names.
517 '''Generates matching file names.
517
518
518 Equivalent to manifest.matches(match).iterkeys(), but without creating
519 Equivalent to manifest.matches(match).iterkeys(), but without creating
519 an entirely new manifest.
520 an entirely new manifest.
520
521
521 It also reports nonexistent files by marking them bad with match.bad().
522 It also reports nonexistent files by marking them bad with match.bad().
522 '''
523 '''
523 if match.always():
524 if match.always():
524 for f in iter(self):
525 for f in iter(self):
525 yield f
526 yield f
526 return
527 return
527
528
528 fset = set(match.files())
529 fset = set(match.files())
529
530
530 # avoid the entire walk if we're only looking for specific files
531 # avoid the entire walk if we're only looking for specific files
531 if self._filesfastpath(match):
532 if self._filesfastpath(match):
532 for fn in sorted(fset):
533 for fn in sorted(fset):
533 if fn in self:
534 if fn in self:
534 yield fn
535 yield fn
535 return
536 return
536
537
537 for fn in self:
538 for fn in self:
538 if fn in fset:
539 if fn in fset:
539 # specified pattern is the exact name
540 # specified pattern is the exact name
540 fset.remove(fn)
541 fset.remove(fn)
541 if match(fn):
542 if match(fn):
542 yield fn
543 yield fn
543
544
544 # for dirstate.walk, files=[''] means "walk the whole tree".
545 # for dirstate.walk, files=[''] means "walk the whole tree".
545 # follow that here, too
546 # follow that here, too
546 fset.discard(b'')
547 fset.discard(b'')
547
548
548 for fn in sorted(fset):
549 for fn in sorted(fset):
549 if not self.hasdir(fn):
550 if not self.hasdir(fn):
550 match.bad(fn, None)
551 match.bad(fn, None)
551
552
552 def matches(self, match):
553 def matches(self, match):
553 '''generate a new manifest filtered by the match argument'''
554 '''generate a new manifest filtered by the match argument'''
554 if match.always():
555 if match.always():
555 return self.copy()
556 return self.copy()
556
557
557 if self._filesfastpath(match):
558 if self._filesfastpath(match):
558 m = manifestdict()
559 m = manifestdict()
559 lm = self._lm
560 lm = self._lm
560 for fn in match.files():
561 for fn in match.files():
561 if fn in lm:
562 if fn in lm:
562 m._lm[fn] = lm[fn]
563 m._lm[fn] = lm[fn]
563 return m
564 return m
564
565
565 m = manifestdict()
566 m = manifestdict()
566 m._lm = self._lm.filtercopy(match)
567 m._lm = self._lm.filtercopy(match)
567 return m
568 return m
568
569
569 def diff(self, m2, match=None, clean=False):
570 def diff(self, m2, match=None, clean=False):
570 '''Finds changes between the current manifest and m2.
571 '''Finds changes between the current manifest and m2.
571
572
572 Args:
573 Args:
573 m2: the manifest to which this manifest should be compared.
574 m2: the manifest to which this manifest should be compared.
574 clean: if true, include files unchanged between these manifests
575 clean: if true, include files unchanged between these manifests
575 with a None value in the returned dictionary.
576 with a None value in the returned dictionary.
576
577
577 The result is returned as a dict with filename as key and
578 The result is returned as a dict with filename as key and
578 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
579 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
579 nodeid in the current/other manifest and fl1/fl2 is the flag
580 nodeid in the current/other manifest and fl1/fl2 is the flag
580 in the current/other manifest. Where the file does not exist,
581 in the current/other manifest. Where the file does not exist,
581 the nodeid will be None and the flags will be the empty
582 the nodeid will be None and the flags will be the empty
582 string.
583 string.
583 '''
584 '''
584 if match:
585 if match:
585 m1 = self.matches(match)
586 m1 = self.matches(match)
586 m2 = m2.matches(match)
587 m2 = m2.matches(match)
587 return m1.diff(m2, clean=clean)
588 return m1.diff(m2, clean=clean)
588 return self._lm.diff(m2._lm, clean)
589 return self._lm.diff(m2._lm, clean)
589
590
590 def setflag(self, key, flag):
591 def setflag(self, key, flag):
591 self._lm[key] = self[key], flag
592 self._lm[key] = self[key], flag
592
593
593 def get(self, key, default=None):
594 def get(self, key, default=None):
594 try:
595 try:
595 return self._lm[key][0]
596 return self._lm[key][0]
596 except KeyError:
597 except KeyError:
597 return default
598 return default
598
599
599 def flags(self, key):
600 def flags(self, key):
600 try:
601 try:
601 return self._lm[key][1]
602 return self._lm[key][1]
602 except KeyError:
603 except KeyError:
603 return b''
604 return b''
604
605
605 def copy(self):
606 def copy(self):
606 c = manifestdict()
607 c = manifestdict()
607 c._lm = self._lm.copy()
608 c._lm = self._lm.copy()
608 return c
609 return c
609
610
610 def items(self):
611 def items(self):
611 return (x[:2] for x in self._lm.iterentries())
612 return (x[:2] for x in self._lm.iterentries())
612
613
613 def iteritems(self):
614 def iteritems(self):
614 return (x[:2] for x in self._lm.iterentries())
615 return (x[:2] for x in self._lm.iterentries())
615
616
616 def iterentries(self):
617 def iterentries(self):
617 return self._lm.iterentries()
618 return self._lm.iterentries()
618
619
619 def text(self):
620 def text(self):
620 # most likely uses native version
621 # most likely uses native version
621 return self._lm.text()
622 return self._lm.text()
622
623
623 def fastdelta(self, base, changes):
624 def fastdelta(self, base, changes):
624 """Given a base manifest text as a bytearray and a list of changes
625 """Given a base manifest text as a bytearray and a list of changes
625 relative to that text, compute a delta that can be used by revlog.
626 relative to that text, compute a delta that can be used by revlog.
626 """
627 """
627 delta = []
628 delta = []
628 dstart = None
629 dstart = None
629 dend = None
630 dend = None
630 dline = [b""]
631 dline = [b""]
631 start = 0
632 start = 0
632 # zero copy representation of base as a buffer
633 # zero copy representation of base as a buffer
633 addbuf = util.buffer(base)
634 addbuf = util.buffer(base)
634
635
635 changes = list(changes)
636 changes = list(changes)
636 if len(changes) < FASTDELTA_TEXTDIFF_THRESHOLD:
637 if len(changes) < FASTDELTA_TEXTDIFF_THRESHOLD:
637 # start with a readonly loop that finds the offset of
638 # start with a readonly loop that finds the offset of
638 # each line and creates the deltas
639 # each line and creates the deltas
639 for f, todelete in changes:
640 for f, todelete in changes:
640 # bs will either be the index of the item or the insert point
641 # bs will either be the index of the item or the insert point
641 start, end = _msearch(addbuf, f, start)
642 start, end = _msearch(addbuf, f, start)
642 if not todelete:
643 if not todelete:
643 h, fl = self._lm[f]
644 h, fl = self._lm[f]
644 l = b"%s\0%s%s\n" % (f, hex(h), fl)
645 l = b"%s\0%s%s\n" % (f, hex(h), fl)
645 else:
646 else:
646 if start == end:
647 if start == end:
647 # item we want to delete was not found, error out
648 # item we want to delete was not found, error out
648 raise AssertionError(
649 raise AssertionError(
649 _(b"failed to remove %s from manifest") % f
650 _(b"failed to remove %s from manifest") % f
650 )
651 )
651 l = b""
652 l = b""
652 if dstart is not None and dstart <= start and dend >= start:
653 if dstart is not None and dstart <= start and dend >= start:
653 if dend < end:
654 if dend < end:
654 dend = end
655 dend = end
655 if l:
656 if l:
656 dline.append(l)
657 dline.append(l)
657 else:
658 else:
658 if dstart is not None:
659 if dstart is not None:
659 delta.append([dstart, dend, b"".join(dline)])
660 delta.append([dstart, dend, b"".join(dline)])
660 dstart = start
661 dstart = start
661 dend = end
662 dend = end
662 dline = [l]
663 dline = [l]
663
664
664 if dstart is not None:
665 if dstart is not None:
665 delta.append([dstart, dend, b"".join(dline)])
666 delta.append([dstart, dend, b"".join(dline)])
666 # apply the delta to the base, and get a delta for addrevision
667 # apply the delta to the base, and get a delta for addrevision
667 deltatext, arraytext = _addlistdelta(base, delta)
668 deltatext, arraytext = _addlistdelta(base, delta)
668 else:
669 else:
669 # For large changes, it's much cheaper to just build the text and
670 # For large changes, it's much cheaper to just build the text and
670 # diff it.
671 # diff it.
671 arraytext = bytearray(self.text())
672 arraytext = bytearray(self.text())
672 deltatext = mdiff.textdiff(
673 deltatext = mdiff.textdiff(
673 util.buffer(base), util.buffer(arraytext)
674 util.buffer(base), util.buffer(arraytext)
674 )
675 )
675
676
676 return arraytext, deltatext
677 return arraytext, deltatext
677
678
678
679
679 def _msearch(m, s, lo=0, hi=None):
680 def _msearch(m, s, lo=0, hi=None):
680 '''return a tuple (start, end) that says where to find s within m.
681 '''return a tuple (start, end) that says where to find s within m.
681
682
682 If the string is found m[start:end] are the line containing
683 If the string is found m[start:end] are the line containing
683 that string. If start == end the string was not found and
684 that string. If start == end the string was not found and
684 they indicate the proper sorted insertion point.
685 they indicate the proper sorted insertion point.
685
686
686 m should be a buffer, a memoryview or a byte string.
687 m should be a buffer, a memoryview or a byte string.
687 s is a byte string'''
688 s is a byte string'''
688
689
689 def advance(i, c):
690 def advance(i, c):
690 while i < lenm and m[i : i + 1] != c:
691 while i < lenm and m[i : i + 1] != c:
691 i += 1
692 i += 1
692 return i
693 return i
693
694
694 if not s:
695 if not s:
695 return (lo, lo)
696 return (lo, lo)
696 lenm = len(m)
697 lenm = len(m)
697 if not hi:
698 if not hi:
698 hi = lenm
699 hi = lenm
699 while lo < hi:
700 while lo < hi:
700 mid = (lo + hi) // 2
701 mid = (lo + hi) // 2
701 start = mid
702 start = mid
702 while start > 0 and m[start - 1 : start] != b'\n':
703 while start > 0 and m[start - 1 : start] != b'\n':
703 start -= 1
704 start -= 1
704 end = advance(start, b'\0')
705 end = advance(start, b'\0')
705 if bytes(m[start:end]) < s:
706 if bytes(m[start:end]) < s:
706 # we know that after the null there are 40 bytes of sha1
707 # we know that after the null there are 40 bytes of sha1
707 # this translates to the bisect lo = mid + 1
708 # this translates to the bisect lo = mid + 1
708 lo = advance(end + 40, b'\n') + 1
709 lo = advance(end + 40, b'\n') + 1
709 else:
710 else:
710 # this translates to the bisect hi = mid
711 # this translates to the bisect hi = mid
711 hi = start
712 hi = start
712 end = advance(lo, b'\0')
713 end = advance(lo, b'\0')
713 found = m[lo:end]
714 found = m[lo:end]
714 if s == found:
715 if s == found:
715 # we know that after the null there are 40 bytes of sha1
716 # we know that after the null there are 40 bytes of sha1
716 end = advance(end + 40, b'\n')
717 end = advance(end + 40, b'\n')
717 return (lo, end + 1)
718 return (lo, end + 1)
718 else:
719 else:
719 return (lo, lo)
720 return (lo, lo)
720
721
721
722
722 def _checkforbidden(l):
723 def _checkforbidden(l):
723 """Check filenames for illegal characters."""
724 """Check filenames for illegal characters."""
724 for f in l:
725 for f in l:
725 if b'\n' in f or b'\r' in f:
726 if b'\n' in f or b'\r' in f:
726 raise error.StorageError(
727 raise error.StorageError(
727 _(b"'\\n' and '\\r' disallowed in filenames: %r")
728 _(b"'\\n' and '\\r' disallowed in filenames: %r")
728 % pycompat.bytestr(f)
729 % pycompat.bytestr(f)
729 )
730 )
730
731
731
732
732 # apply the changes collected during the bisect loop to our addlist
733 # apply the changes collected during the bisect loop to our addlist
733 # return a delta suitable for addrevision
734 # return a delta suitable for addrevision
734 def _addlistdelta(addlist, x):
735 def _addlistdelta(addlist, x):
735 # for large addlist arrays, building a new array is cheaper
736 # for large addlist arrays, building a new array is cheaper
736 # than repeatedly modifying the existing one
737 # than repeatedly modifying the existing one
737 currentposition = 0
738 currentposition = 0
738 newaddlist = bytearray()
739 newaddlist = bytearray()
739
740
740 for start, end, content in x:
741 for start, end, content in x:
741 newaddlist += addlist[currentposition:start]
742 newaddlist += addlist[currentposition:start]
742 if content:
743 if content:
743 newaddlist += bytearray(content)
744 newaddlist += bytearray(content)
744
745
745 currentposition = end
746 currentposition = end
746
747
747 newaddlist += addlist[currentposition:]
748 newaddlist += addlist[currentposition:]
748
749
749 deltatext = b"".join(
750 deltatext = b"".join(
750 struct.pack(b">lll", start, end, len(content)) + content
751 struct.pack(b">lll", start, end, len(content)) + content
751 for start, end, content in x
752 for start, end, content in x
752 )
753 )
753 return deltatext, newaddlist
754 return deltatext, newaddlist
754
755
755
756
756 def _splittopdir(f):
757 def _splittopdir(f):
757 if b'/' in f:
758 if b'/' in f:
758 dir, subpath = f.split(b'/', 1)
759 dir, subpath = f.split(b'/', 1)
759 return dir + b'/', subpath
760 return dir + b'/', subpath
760 else:
761 else:
761 return b'', f
762 return b'', f
762
763
763
764
764 _noop = lambda s: None
765 _noop = lambda s: None
765
766
766
767
767 class treemanifest(object):
768 class treemanifest(object):
768 def __init__(self, dir=b'', text=b''):
769 def __init__(self, dir=b'', text=b''):
769 self._dir = dir
770 self._dir = dir
770 self._node = nullid
771 self._node = nullid
771 self._loadfunc = _noop
772 self._loadfunc = _noop
772 self._copyfunc = _noop
773 self._copyfunc = _noop
773 self._dirty = False
774 self._dirty = False
774 self._dirs = {}
775 self._dirs = {}
775 self._lazydirs = {}
776 self._lazydirs = {}
776 # Using _lazymanifest here is a little slower than plain old dicts
777 # Using _lazymanifest here is a little slower than plain old dicts
777 self._files = {}
778 self._files = {}
778 self._flags = {}
779 self._flags = {}
779 if text:
780 if text:
780
781
781 def readsubtree(subdir, subm):
782 def readsubtree(subdir, subm):
782 raise AssertionError(
783 raise AssertionError(
783 b'treemanifest constructor only accepts flat manifests'
784 b'treemanifest constructor only accepts flat manifests'
784 )
785 )
785
786
786 self.parse(text, readsubtree)
787 self.parse(text, readsubtree)
787 self._dirty = True # Mark flat manifest dirty after parsing
788 self._dirty = True # Mark flat manifest dirty after parsing
788
789
789 def _subpath(self, path):
790 def _subpath(self, path):
790 return self._dir + path
791 return self._dir + path
791
792
792 def _loadalllazy(self):
793 def _loadalllazy(self):
793 selfdirs = self._dirs
794 selfdirs = self._dirs
794 for d, (path, node, readsubtree, docopy) in pycompat.iteritems(
795 for d, (path, node, readsubtree, docopy) in pycompat.iteritems(
795 self._lazydirs
796 self._lazydirs
796 ):
797 ):
797 if docopy:
798 if docopy:
798 selfdirs[d] = readsubtree(path, node).copy()
799 selfdirs[d] = readsubtree(path, node).copy()
799 else:
800 else:
800 selfdirs[d] = readsubtree(path, node)
801 selfdirs[d] = readsubtree(path, node)
801 self._lazydirs = {}
802 self._lazydirs = {}
802
803
803 def _loadlazy(self, d):
804 def _loadlazy(self, d):
804 v = self._lazydirs.get(d)
805 v = self._lazydirs.get(d)
805 if v:
806 if v:
806 path, node, readsubtree, docopy = v
807 path, node, readsubtree, docopy = v
807 if docopy:
808 if docopy:
808 self._dirs[d] = readsubtree(path, node).copy()
809 self._dirs[d] = readsubtree(path, node).copy()
809 else:
810 else:
810 self._dirs[d] = readsubtree(path, node)
811 self._dirs[d] = readsubtree(path, node)
811 del self._lazydirs[d]
812 del self._lazydirs[d]
812
813
813 def _loadchildrensetlazy(self, visit):
814 def _loadchildrensetlazy(self, visit):
814 if not visit:
815 if not visit:
815 return None
816 return None
816 if visit == b'all' or visit == b'this':
817 if visit == b'all' or visit == b'this':
817 self._loadalllazy()
818 self._loadalllazy()
818 return None
819 return None
819
820
820 loadlazy = self._loadlazy
821 loadlazy = self._loadlazy
821 for k in visit:
822 for k in visit:
822 loadlazy(k + b'/')
823 loadlazy(k + b'/')
823 return visit
824 return visit
824
825
825 def _loaddifflazy(self, t1, t2):
826 def _loaddifflazy(self, t1, t2):
826 """load items in t1 and t2 if they're needed for diffing.
827 """load items in t1 and t2 if they're needed for diffing.
827
828
828 The criteria currently is:
829 The criteria currently is:
829 - if it's not present in _lazydirs in either t1 or t2, load it in the
830 - if it's not present in _lazydirs in either t1 or t2, load it in the
830 other (it may already be loaded or it may not exist, doesn't matter)
831 other (it may already be loaded or it may not exist, doesn't matter)
831 - if it's present in _lazydirs in both, compare the nodeid; if it
832 - if it's present in _lazydirs in both, compare the nodeid; if it
832 differs, load it in both
833 differs, load it in both
833 """
834 """
834 toloadlazy = []
835 toloadlazy = []
835 for d, v1 in pycompat.iteritems(t1._lazydirs):
836 for d, v1 in pycompat.iteritems(t1._lazydirs):
836 v2 = t2._lazydirs.get(d)
837 v2 = t2._lazydirs.get(d)
837 if not v2 or v2[1] != v1[1]:
838 if not v2 or v2[1] != v1[1]:
838 toloadlazy.append(d)
839 toloadlazy.append(d)
839 for d, v1 in pycompat.iteritems(t2._lazydirs):
840 for d, v1 in pycompat.iteritems(t2._lazydirs):
840 if d not in t1._lazydirs:
841 if d not in t1._lazydirs:
841 toloadlazy.append(d)
842 toloadlazy.append(d)
842
843
843 for d in toloadlazy:
844 for d in toloadlazy:
844 t1._loadlazy(d)
845 t1._loadlazy(d)
845 t2._loadlazy(d)
846 t2._loadlazy(d)
846
847
847 def __len__(self):
848 def __len__(self):
848 self._load()
849 self._load()
849 size = len(self._files)
850 size = len(self._files)
850 self._loadalllazy()
851 self._loadalllazy()
851 for m in self._dirs.values():
852 for m in self._dirs.values():
852 size += m.__len__()
853 size += m.__len__()
853 return size
854 return size
854
855
855 def __nonzero__(self):
856 def __nonzero__(self):
856 # Faster than "__len() != 0" since it avoids loading sub-manifests
857 # Faster than "__len() != 0" since it avoids loading sub-manifests
857 return not self._isempty()
858 return not self._isempty()
858
859
859 __bool__ = __nonzero__
860 __bool__ = __nonzero__
860
861
861 def _isempty(self):
862 def _isempty(self):
862 self._load() # for consistency; already loaded by all callers
863 self._load() # for consistency; already loaded by all callers
863 # See if we can skip loading everything.
864 # See if we can skip loading everything.
864 if self._files or (
865 if self._files or (
865 self._dirs and any(not m._isempty() for m in self._dirs.values())
866 self._dirs and any(not m._isempty() for m in self._dirs.values())
866 ):
867 ):
867 return False
868 return False
868 self._loadalllazy()
869 self._loadalllazy()
869 return not self._dirs or all(m._isempty() for m in self._dirs.values())
870 return not self._dirs or all(m._isempty() for m in self._dirs.values())
870
871
872 @encoding.strmethod
871 def __repr__(self):
873 def __repr__(self):
872 return (
874 return (
873 b'<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>'
875 b'<treemanifest dir=%s, node=%s, loaded=%r, dirty=%r at 0x%x>'
874 % (
876 % (
875 self._dir,
877 self._dir,
876 hex(self._node),
878 hex(self._node),
877 bool(self._loadfunc is _noop),
879 bool(self._loadfunc is _noop),
878 self._dirty,
880 self._dirty,
879 id(self),
881 id(self),
880 )
882 )
881 )
883 )
882
884
883 def dir(self):
885 def dir(self):
884 '''The directory that this tree manifest represents, including a
886 '''The directory that this tree manifest represents, including a
885 trailing '/'. Empty string for the repo root directory.'''
887 trailing '/'. Empty string for the repo root directory.'''
886 return self._dir
888 return self._dir
887
889
888 def node(self):
890 def node(self):
889 '''This node of this instance. nullid for unsaved instances. Should
891 '''This node of this instance. nullid for unsaved instances. Should
890 be updated when the instance is read or written from a revlog.
892 be updated when the instance is read or written from a revlog.
891 '''
893 '''
892 assert not self._dirty
894 assert not self._dirty
893 return self._node
895 return self._node
894
896
895 def setnode(self, node):
897 def setnode(self, node):
896 self._node = node
898 self._node = node
897 self._dirty = False
899 self._dirty = False
898
900
899 def iterentries(self):
901 def iterentries(self):
900 self._load()
902 self._load()
901 self._loadalllazy()
903 self._loadalllazy()
902 for p, n in sorted(
904 for p, n in sorted(
903 itertools.chain(self._dirs.items(), self._files.items())
905 itertools.chain(self._dirs.items(), self._files.items())
904 ):
906 ):
905 if p in self._files:
907 if p in self._files:
906 yield self._subpath(p), n, self._flags.get(p, b'')
908 yield self._subpath(p), n, self._flags.get(p, b'')
907 else:
909 else:
908 for x in n.iterentries():
910 for x in n.iterentries():
909 yield x
911 yield x
910
912
911 def items(self):
913 def items(self):
912 self._load()
914 self._load()
913 self._loadalllazy()
915 self._loadalllazy()
914 for p, n in sorted(
916 for p, n in sorted(
915 itertools.chain(self._dirs.items(), self._files.items())
917 itertools.chain(self._dirs.items(), self._files.items())
916 ):
918 ):
917 if p in self._files:
919 if p in self._files:
918 yield self._subpath(p), n
920 yield self._subpath(p), n
919 else:
921 else:
920 for f, sn in pycompat.iteritems(n):
922 for f, sn in pycompat.iteritems(n):
921 yield f, sn
923 yield f, sn
922
924
923 iteritems = items
925 iteritems = items
924
926
925 def iterkeys(self):
927 def iterkeys(self):
926 self._load()
928 self._load()
927 self._loadalllazy()
929 self._loadalllazy()
928 for p in sorted(itertools.chain(self._dirs, self._files)):
930 for p in sorted(itertools.chain(self._dirs, self._files)):
929 if p in self._files:
931 if p in self._files:
930 yield self._subpath(p)
932 yield self._subpath(p)
931 else:
933 else:
932 for f in self._dirs[p]:
934 for f in self._dirs[p]:
933 yield f
935 yield f
934
936
935 def keys(self):
937 def keys(self):
936 return list(self.iterkeys())
938 return list(self.iterkeys())
937
939
938 def __iter__(self):
940 def __iter__(self):
939 return self.iterkeys()
941 return self.iterkeys()
940
942
941 def __contains__(self, f):
943 def __contains__(self, f):
942 if f is None:
944 if f is None:
943 return False
945 return False
944 self._load()
946 self._load()
945 dir, subpath = _splittopdir(f)
947 dir, subpath = _splittopdir(f)
946 if dir:
948 if dir:
947 self._loadlazy(dir)
949 self._loadlazy(dir)
948
950
949 if dir not in self._dirs:
951 if dir not in self._dirs:
950 return False
952 return False
951
953
952 return self._dirs[dir].__contains__(subpath)
954 return self._dirs[dir].__contains__(subpath)
953 else:
955 else:
954 return f in self._files
956 return f in self._files
955
957
956 def get(self, f, default=None):
958 def get(self, f, default=None):
957 self._load()
959 self._load()
958 dir, subpath = _splittopdir(f)
960 dir, subpath = _splittopdir(f)
959 if dir:
961 if dir:
960 self._loadlazy(dir)
962 self._loadlazy(dir)
961
963
962 if dir not in self._dirs:
964 if dir not in self._dirs:
963 return default
965 return default
964 return self._dirs[dir].get(subpath, default)
966 return self._dirs[dir].get(subpath, default)
965 else:
967 else:
966 return self._files.get(f, default)
968 return self._files.get(f, default)
967
969
968 def __getitem__(self, f):
970 def __getitem__(self, f):
969 self._load()
971 self._load()
970 dir, subpath = _splittopdir(f)
972 dir, subpath = _splittopdir(f)
971 if dir:
973 if dir:
972 self._loadlazy(dir)
974 self._loadlazy(dir)
973
975
974 return self._dirs[dir].__getitem__(subpath)
976 return self._dirs[dir].__getitem__(subpath)
975 else:
977 else:
976 return self._files[f]
978 return self._files[f]
977
979
978 def flags(self, f):
980 def flags(self, f):
979 self._load()
981 self._load()
980 dir, subpath = _splittopdir(f)
982 dir, subpath = _splittopdir(f)
981 if dir:
983 if dir:
982 self._loadlazy(dir)
984 self._loadlazy(dir)
983
985
984 if dir not in self._dirs:
986 if dir not in self._dirs:
985 return b''
987 return b''
986 return self._dirs[dir].flags(subpath)
988 return self._dirs[dir].flags(subpath)
987 else:
989 else:
988 if f in self._lazydirs or f in self._dirs:
990 if f in self._lazydirs or f in self._dirs:
989 return b''
991 return b''
990 return self._flags.get(f, b'')
992 return self._flags.get(f, b'')
991
993
992 def find(self, f):
994 def find(self, f):
993 self._load()
995 self._load()
994 dir, subpath = _splittopdir(f)
996 dir, subpath = _splittopdir(f)
995 if dir:
997 if dir:
996 self._loadlazy(dir)
998 self._loadlazy(dir)
997
999
998 return self._dirs[dir].find(subpath)
1000 return self._dirs[dir].find(subpath)
999 else:
1001 else:
1000 return self._files[f], self._flags.get(f, b'')
1002 return self._files[f], self._flags.get(f, b'')
1001
1003
1002 def __delitem__(self, f):
1004 def __delitem__(self, f):
1003 self._load()
1005 self._load()
1004 dir, subpath = _splittopdir(f)
1006 dir, subpath = _splittopdir(f)
1005 if dir:
1007 if dir:
1006 self._loadlazy(dir)
1008 self._loadlazy(dir)
1007
1009
1008 self._dirs[dir].__delitem__(subpath)
1010 self._dirs[dir].__delitem__(subpath)
1009 # If the directory is now empty, remove it
1011 # If the directory is now empty, remove it
1010 if self._dirs[dir]._isempty():
1012 if self._dirs[dir]._isempty():
1011 del self._dirs[dir]
1013 del self._dirs[dir]
1012 else:
1014 else:
1013 del self._files[f]
1015 del self._files[f]
1014 if f in self._flags:
1016 if f in self._flags:
1015 del self._flags[f]
1017 del self._flags[f]
1016 self._dirty = True
1018 self._dirty = True
1017
1019
1018 def __setitem__(self, f, n):
1020 def __setitem__(self, f, n):
1019 assert n is not None
1021 assert n is not None
1020 self._load()
1022 self._load()
1021 dir, subpath = _splittopdir(f)
1023 dir, subpath = _splittopdir(f)
1022 if dir:
1024 if dir:
1023 self._loadlazy(dir)
1025 self._loadlazy(dir)
1024 if dir not in self._dirs:
1026 if dir not in self._dirs:
1025 self._dirs[dir] = treemanifest(self._subpath(dir))
1027 self._dirs[dir] = treemanifest(self._subpath(dir))
1026 self._dirs[dir].__setitem__(subpath, n)
1028 self._dirs[dir].__setitem__(subpath, n)
1027 else:
1029 else:
1028 self._files[f] = n[:21] # to match manifestdict's behavior
1030 self._files[f] = n[:21] # to match manifestdict's behavior
1029 self._dirty = True
1031 self._dirty = True
1030
1032
1031 def _load(self):
1033 def _load(self):
1032 if self._loadfunc is not _noop:
1034 if self._loadfunc is not _noop:
1033 lf, self._loadfunc = self._loadfunc, _noop
1035 lf, self._loadfunc = self._loadfunc, _noop
1034 lf(self)
1036 lf(self)
1035 elif self._copyfunc is not _noop:
1037 elif self._copyfunc is not _noop:
1036 cf, self._copyfunc = self._copyfunc, _noop
1038 cf, self._copyfunc = self._copyfunc, _noop
1037 cf(self)
1039 cf(self)
1038
1040
1039 def setflag(self, f, flags):
1041 def setflag(self, f, flags):
1040 """Set the flags (symlink, executable) for path f."""
1042 """Set the flags (symlink, executable) for path f."""
1041 self._load()
1043 self._load()
1042 dir, subpath = _splittopdir(f)
1044 dir, subpath = _splittopdir(f)
1043 if dir:
1045 if dir:
1044 self._loadlazy(dir)
1046 self._loadlazy(dir)
1045 if dir not in self._dirs:
1047 if dir not in self._dirs:
1046 self._dirs[dir] = treemanifest(self._subpath(dir))
1048 self._dirs[dir] = treemanifest(self._subpath(dir))
1047 self._dirs[dir].setflag(subpath, flags)
1049 self._dirs[dir].setflag(subpath, flags)
1048 else:
1050 else:
1049 self._flags[f] = flags
1051 self._flags[f] = flags
1050 self._dirty = True
1052 self._dirty = True
1051
1053
1052 def copy(self):
1054 def copy(self):
1053 copy = treemanifest(self._dir)
1055 copy = treemanifest(self._dir)
1054 copy._node = self._node
1056 copy._node = self._node
1055 copy._dirty = self._dirty
1057 copy._dirty = self._dirty
1056 if self._copyfunc is _noop:
1058 if self._copyfunc is _noop:
1057
1059
1058 def _copyfunc(s):
1060 def _copyfunc(s):
1059 self._load()
1061 self._load()
1060 s._lazydirs = {
1062 s._lazydirs = {
1061 d: (p, n, r, True)
1063 d: (p, n, r, True)
1062 for d, (p, n, r, c) in pycompat.iteritems(self._lazydirs)
1064 for d, (p, n, r, c) in pycompat.iteritems(self._lazydirs)
1063 }
1065 }
1064 sdirs = s._dirs
1066 sdirs = s._dirs
1065 for d, v in pycompat.iteritems(self._dirs):
1067 for d, v in pycompat.iteritems(self._dirs):
1066 sdirs[d] = v.copy()
1068 sdirs[d] = v.copy()
1067 s._files = dict.copy(self._files)
1069 s._files = dict.copy(self._files)
1068 s._flags = dict.copy(self._flags)
1070 s._flags = dict.copy(self._flags)
1069
1071
1070 if self._loadfunc is _noop:
1072 if self._loadfunc is _noop:
1071 _copyfunc(copy)
1073 _copyfunc(copy)
1072 else:
1074 else:
1073 copy._copyfunc = _copyfunc
1075 copy._copyfunc = _copyfunc
1074 else:
1076 else:
1075 copy._copyfunc = self._copyfunc
1077 copy._copyfunc = self._copyfunc
1076 return copy
1078 return copy
1077
1079
1078 def filesnotin(self, m2, match=None):
1080 def filesnotin(self, m2, match=None):
1079 '''Set of files in this manifest that are not in the other'''
1081 '''Set of files in this manifest that are not in the other'''
1080 if match and not match.always():
1082 if match and not match.always():
1081 m1 = self.matches(match)
1083 m1 = self.matches(match)
1082 m2 = m2.matches(match)
1084 m2 = m2.matches(match)
1083 return m1.filesnotin(m2)
1085 return m1.filesnotin(m2)
1084
1086
1085 files = set()
1087 files = set()
1086
1088
1087 def _filesnotin(t1, t2):
1089 def _filesnotin(t1, t2):
1088 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1090 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1089 return
1091 return
1090 t1._load()
1092 t1._load()
1091 t2._load()
1093 t2._load()
1092 self._loaddifflazy(t1, t2)
1094 self._loaddifflazy(t1, t2)
1093 for d, m1 in pycompat.iteritems(t1._dirs):
1095 for d, m1 in pycompat.iteritems(t1._dirs):
1094 if d in t2._dirs:
1096 if d in t2._dirs:
1095 m2 = t2._dirs[d]
1097 m2 = t2._dirs[d]
1096 _filesnotin(m1, m2)
1098 _filesnotin(m1, m2)
1097 else:
1099 else:
1098 files.update(m1.iterkeys())
1100 files.update(m1.iterkeys())
1099
1101
1100 for fn in t1._files:
1102 for fn in t1._files:
1101 if fn not in t2._files:
1103 if fn not in t2._files:
1102 files.add(t1._subpath(fn))
1104 files.add(t1._subpath(fn))
1103
1105
1104 _filesnotin(self, m2)
1106 _filesnotin(self, m2)
1105 return files
1107 return files
1106
1108
1107 @propertycache
1109 @propertycache
1108 def _alldirs(self):
1110 def _alldirs(self):
1109 return pathutil.dirs(self)
1111 return pathutil.dirs(self)
1110
1112
1111 def dirs(self):
1113 def dirs(self):
1112 return self._alldirs
1114 return self._alldirs
1113
1115
1114 def hasdir(self, dir):
1116 def hasdir(self, dir):
1115 self._load()
1117 self._load()
1116 topdir, subdir = _splittopdir(dir)
1118 topdir, subdir = _splittopdir(dir)
1117 if topdir:
1119 if topdir:
1118 self._loadlazy(topdir)
1120 self._loadlazy(topdir)
1119 if topdir in self._dirs:
1121 if topdir in self._dirs:
1120 return self._dirs[topdir].hasdir(subdir)
1122 return self._dirs[topdir].hasdir(subdir)
1121 return False
1123 return False
1122 dirslash = dir + b'/'
1124 dirslash = dir + b'/'
1123 return dirslash in self._dirs or dirslash in self._lazydirs
1125 return dirslash in self._dirs or dirslash in self._lazydirs
1124
1126
1125 def walk(self, match):
1127 def walk(self, match):
1126 '''Generates matching file names.
1128 '''Generates matching file names.
1127
1129
1128 Equivalent to manifest.matches(match).iterkeys(), but without creating
1130 Equivalent to manifest.matches(match).iterkeys(), but without creating
1129 an entirely new manifest.
1131 an entirely new manifest.
1130
1132
1131 It also reports nonexistent files by marking them bad with match.bad().
1133 It also reports nonexistent files by marking them bad with match.bad().
1132 '''
1134 '''
1133 if match.always():
1135 if match.always():
1134 for f in iter(self):
1136 for f in iter(self):
1135 yield f
1137 yield f
1136 return
1138 return
1137
1139
1138 fset = set(match.files())
1140 fset = set(match.files())
1139
1141
1140 for fn in self._walk(match):
1142 for fn in self._walk(match):
1141 if fn in fset:
1143 if fn in fset:
1142 # specified pattern is the exact name
1144 # specified pattern is the exact name
1143 fset.remove(fn)
1145 fset.remove(fn)
1144 yield fn
1146 yield fn
1145
1147
1146 # for dirstate.walk, files=[''] means "walk the whole tree".
1148 # for dirstate.walk, files=[''] means "walk the whole tree".
1147 # follow that here, too
1149 # follow that here, too
1148 fset.discard(b'')
1150 fset.discard(b'')
1149
1151
1150 for fn in sorted(fset):
1152 for fn in sorted(fset):
1151 if not self.hasdir(fn):
1153 if not self.hasdir(fn):
1152 match.bad(fn, None)
1154 match.bad(fn, None)
1153
1155
1154 def _walk(self, match):
1156 def _walk(self, match):
1155 '''Recursively generates matching file names for walk().'''
1157 '''Recursively generates matching file names for walk().'''
1156 visit = match.visitchildrenset(self._dir[:-1])
1158 visit = match.visitchildrenset(self._dir[:-1])
1157 if not visit:
1159 if not visit:
1158 return
1160 return
1159
1161
1160 # yield this dir's files and walk its submanifests
1162 # yield this dir's files and walk its submanifests
1161 self._load()
1163 self._load()
1162 visit = self._loadchildrensetlazy(visit)
1164 visit = self._loadchildrensetlazy(visit)
1163 for p in sorted(list(self._dirs) + list(self._files)):
1165 for p in sorted(list(self._dirs) + list(self._files)):
1164 if p in self._files:
1166 if p in self._files:
1165 fullp = self._subpath(p)
1167 fullp = self._subpath(p)
1166 if match(fullp):
1168 if match(fullp):
1167 yield fullp
1169 yield fullp
1168 else:
1170 else:
1169 if not visit or p[:-1] in visit:
1171 if not visit or p[:-1] in visit:
1170 for f in self._dirs[p]._walk(match):
1172 for f in self._dirs[p]._walk(match):
1171 yield f
1173 yield f
1172
1174
1173 def matches(self, match):
1175 def matches(self, match):
1174 '''generate a new manifest filtered by the match argument'''
1176 '''generate a new manifest filtered by the match argument'''
1175 if match.always():
1177 if match.always():
1176 return self.copy()
1178 return self.copy()
1177
1179
1178 return self._matches(match)
1180 return self._matches(match)
1179
1181
1180 def _matches(self, match):
1182 def _matches(self, match):
1181 '''recursively generate a new manifest filtered by the match argument.
1183 '''recursively generate a new manifest filtered by the match argument.
1182 '''
1184 '''
1183
1185
1184 visit = match.visitchildrenset(self._dir[:-1])
1186 visit = match.visitchildrenset(self._dir[:-1])
1185 if visit == b'all':
1187 if visit == b'all':
1186 return self.copy()
1188 return self.copy()
1187 ret = treemanifest(self._dir)
1189 ret = treemanifest(self._dir)
1188 if not visit:
1190 if not visit:
1189 return ret
1191 return ret
1190
1192
1191 self._load()
1193 self._load()
1192 for fn in self._files:
1194 for fn in self._files:
1193 # While visitchildrenset *usually* lists only subdirs, this is
1195 # While visitchildrenset *usually* lists only subdirs, this is
1194 # actually up to the matcher and may have some files in the set().
1196 # actually up to the matcher and may have some files in the set().
1195 # If visit == 'this', we should obviously look at the files in this
1197 # If visit == 'this', we should obviously look at the files in this
1196 # directory; if visit is a set, and fn is in it, we should inspect
1198 # directory; if visit is a set, and fn is in it, we should inspect
1197 # fn (but no need to inspect things not in the set).
1199 # fn (but no need to inspect things not in the set).
1198 if visit != b'this' and fn not in visit:
1200 if visit != b'this' and fn not in visit:
1199 continue
1201 continue
1200 fullp = self._subpath(fn)
1202 fullp = self._subpath(fn)
1201 # visitchildrenset isn't perfect, we still need to call the regular
1203 # visitchildrenset isn't perfect, we still need to call the regular
1202 # matcher code to further filter results.
1204 # matcher code to further filter results.
1203 if not match(fullp):
1205 if not match(fullp):
1204 continue
1206 continue
1205 ret._files[fn] = self._files[fn]
1207 ret._files[fn] = self._files[fn]
1206 if fn in self._flags:
1208 if fn in self._flags:
1207 ret._flags[fn] = self._flags[fn]
1209 ret._flags[fn] = self._flags[fn]
1208
1210
1209 visit = self._loadchildrensetlazy(visit)
1211 visit = self._loadchildrensetlazy(visit)
1210 for dir, subm in pycompat.iteritems(self._dirs):
1212 for dir, subm in pycompat.iteritems(self._dirs):
1211 if visit and dir[:-1] not in visit:
1213 if visit and dir[:-1] not in visit:
1212 continue
1214 continue
1213 m = subm._matches(match)
1215 m = subm._matches(match)
1214 if not m._isempty():
1216 if not m._isempty():
1215 ret._dirs[dir] = m
1217 ret._dirs[dir] = m
1216
1218
1217 if not ret._isempty():
1219 if not ret._isempty():
1218 ret._dirty = True
1220 ret._dirty = True
1219 return ret
1221 return ret
1220
1222
1221 def diff(self, m2, match=None, clean=False):
1223 def diff(self, m2, match=None, clean=False):
1222 '''Finds changes between the current manifest and m2.
1224 '''Finds changes between the current manifest and m2.
1223
1225
1224 Args:
1226 Args:
1225 m2: the manifest to which this manifest should be compared.
1227 m2: the manifest to which this manifest should be compared.
1226 clean: if true, include files unchanged between these manifests
1228 clean: if true, include files unchanged between these manifests
1227 with a None value in the returned dictionary.
1229 with a None value in the returned dictionary.
1228
1230
1229 The result is returned as a dict with filename as key and
1231 The result is returned as a dict with filename as key and
1230 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1232 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1231 nodeid in the current/other manifest and fl1/fl2 is the flag
1233 nodeid in the current/other manifest and fl1/fl2 is the flag
1232 in the current/other manifest. Where the file does not exist,
1234 in the current/other manifest. Where the file does not exist,
1233 the nodeid will be None and the flags will be the empty
1235 the nodeid will be None and the flags will be the empty
1234 string.
1236 string.
1235 '''
1237 '''
1236 if match and not match.always():
1238 if match and not match.always():
1237 m1 = self.matches(match)
1239 m1 = self.matches(match)
1238 m2 = m2.matches(match)
1240 m2 = m2.matches(match)
1239 return m1.diff(m2, clean=clean)
1241 return m1.diff(m2, clean=clean)
1240 result = {}
1242 result = {}
1241 emptytree = treemanifest()
1243 emptytree = treemanifest()
1242
1244
1243 def _iterativediff(t1, t2, stack):
1245 def _iterativediff(t1, t2, stack):
1244 """compares two tree manifests and append new tree-manifests which
1246 """compares two tree manifests and append new tree-manifests which
1245 needs to be compared to stack"""
1247 needs to be compared to stack"""
1246 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1248 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1247 return
1249 return
1248 t1._load()
1250 t1._load()
1249 t2._load()
1251 t2._load()
1250 self._loaddifflazy(t1, t2)
1252 self._loaddifflazy(t1, t2)
1251
1253
1252 for d, m1 in pycompat.iteritems(t1._dirs):
1254 for d, m1 in pycompat.iteritems(t1._dirs):
1253 m2 = t2._dirs.get(d, emptytree)
1255 m2 = t2._dirs.get(d, emptytree)
1254 stack.append((m1, m2))
1256 stack.append((m1, m2))
1255
1257
1256 for d, m2 in pycompat.iteritems(t2._dirs):
1258 for d, m2 in pycompat.iteritems(t2._dirs):
1257 if d not in t1._dirs:
1259 if d not in t1._dirs:
1258 stack.append((emptytree, m2))
1260 stack.append((emptytree, m2))
1259
1261
1260 for fn, n1 in pycompat.iteritems(t1._files):
1262 for fn, n1 in pycompat.iteritems(t1._files):
1261 fl1 = t1._flags.get(fn, b'')
1263 fl1 = t1._flags.get(fn, b'')
1262 n2 = t2._files.get(fn, None)
1264 n2 = t2._files.get(fn, None)
1263 fl2 = t2._flags.get(fn, b'')
1265 fl2 = t2._flags.get(fn, b'')
1264 if n1 != n2 or fl1 != fl2:
1266 if n1 != n2 or fl1 != fl2:
1265 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1267 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1266 elif clean:
1268 elif clean:
1267 result[t1._subpath(fn)] = None
1269 result[t1._subpath(fn)] = None
1268
1270
1269 for fn, n2 in pycompat.iteritems(t2._files):
1271 for fn, n2 in pycompat.iteritems(t2._files):
1270 if fn not in t1._files:
1272 if fn not in t1._files:
1271 fl2 = t2._flags.get(fn, b'')
1273 fl2 = t2._flags.get(fn, b'')
1272 result[t2._subpath(fn)] = ((None, b''), (n2, fl2))
1274 result[t2._subpath(fn)] = ((None, b''), (n2, fl2))
1273
1275
1274 stackls = []
1276 stackls = []
1275 _iterativediff(self, m2, stackls)
1277 _iterativediff(self, m2, stackls)
1276 while stackls:
1278 while stackls:
1277 t1, t2 = stackls.pop()
1279 t1, t2 = stackls.pop()
1278 # stackls is populated in the function call
1280 # stackls is populated in the function call
1279 _iterativediff(t1, t2, stackls)
1281 _iterativediff(t1, t2, stackls)
1280 return result
1282 return result
1281
1283
1282 def unmodifiedsince(self, m2):
1284 def unmodifiedsince(self, m2):
1283 return not self._dirty and not m2._dirty and self._node == m2._node
1285 return not self._dirty and not m2._dirty and self._node == m2._node
1284
1286
1285 def parse(self, text, readsubtree):
1287 def parse(self, text, readsubtree):
1286 selflazy = self._lazydirs
1288 selflazy = self._lazydirs
1287 subpath = self._subpath
1289 subpath = self._subpath
1288 for f, n, fl in _parse(text):
1290 for f, n, fl in _parse(text):
1289 if fl == b't':
1291 if fl == b't':
1290 f = f + b'/'
1292 f = f + b'/'
1291 # False below means "doesn't need to be copied" and can use the
1293 # False below means "doesn't need to be copied" and can use the
1292 # cached value from readsubtree directly.
1294 # cached value from readsubtree directly.
1293 selflazy[f] = (subpath(f), n, readsubtree, False)
1295 selflazy[f] = (subpath(f), n, readsubtree, False)
1294 elif b'/' in f:
1296 elif b'/' in f:
1295 # This is a flat manifest, so use __setitem__ and setflag rather
1297 # This is a flat manifest, so use __setitem__ and setflag rather
1296 # than assigning directly to _files and _flags, so we can
1298 # than assigning directly to _files and _flags, so we can
1297 # assign a path in a subdirectory, and to mark dirty (compared
1299 # assign a path in a subdirectory, and to mark dirty (compared
1298 # to nullid).
1300 # to nullid).
1299 self[f] = n
1301 self[f] = n
1300 if fl:
1302 if fl:
1301 self.setflag(f, fl)
1303 self.setflag(f, fl)
1302 else:
1304 else:
1303 # Assigning to _files and _flags avoids marking as dirty,
1305 # Assigning to _files and _flags avoids marking as dirty,
1304 # and should be a little faster.
1306 # and should be a little faster.
1305 self._files[f] = n
1307 self._files[f] = n
1306 if fl:
1308 if fl:
1307 self._flags[f] = fl
1309 self._flags[f] = fl
1308
1310
1309 def text(self):
1311 def text(self):
1310 """Get the full data of this manifest as a bytestring."""
1312 """Get the full data of this manifest as a bytestring."""
1311 self._load()
1313 self._load()
1312 return _text(self.iterentries())
1314 return _text(self.iterentries())
1313
1315
1314 def dirtext(self):
1316 def dirtext(self):
1315 """Get the full data of this directory as a bytestring. Make sure that
1317 """Get the full data of this directory as a bytestring. Make sure that
1316 any submanifests have been written first, so their nodeids are correct.
1318 any submanifests have been written first, so their nodeids are correct.
1317 """
1319 """
1318 self._load()
1320 self._load()
1319 flags = self.flags
1321 flags = self.flags
1320 lazydirs = [
1322 lazydirs = [
1321 (d[:-1], v[1], b't') for d, v in pycompat.iteritems(self._lazydirs)
1323 (d[:-1], v[1], b't') for d, v in pycompat.iteritems(self._lazydirs)
1322 ]
1324 ]
1323 dirs = [(d[:-1], self._dirs[d]._node, b't') for d in self._dirs]
1325 dirs = [(d[:-1], self._dirs[d]._node, b't') for d in self._dirs]
1324 files = [(f, self._files[f], flags(f)) for f in self._files]
1326 files = [(f, self._files[f], flags(f)) for f in self._files]
1325 return _text(sorted(dirs + files + lazydirs))
1327 return _text(sorted(dirs + files + lazydirs))
1326
1328
1327 def read(self, gettext, readsubtree):
1329 def read(self, gettext, readsubtree):
1328 def _load_for_read(s):
1330 def _load_for_read(s):
1329 s.parse(gettext(), readsubtree)
1331 s.parse(gettext(), readsubtree)
1330 s._dirty = False
1332 s._dirty = False
1331
1333
1332 self._loadfunc = _load_for_read
1334 self._loadfunc = _load_for_read
1333
1335
1334 def writesubtrees(self, m1, m2, writesubtree, match):
1336 def writesubtrees(self, m1, m2, writesubtree, match):
1335 self._load() # for consistency; should never have any effect here
1337 self._load() # for consistency; should never have any effect here
1336 m1._load()
1338 m1._load()
1337 m2._load()
1339 m2._load()
1338 emptytree = treemanifest()
1340 emptytree = treemanifest()
1339
1341
1340 def getnode(m, d):
1342 def getnode(m, d):
1341 ld = m._lazydirs.get(d)
1343 ld = m._lazydirs.get(d)
1342 if ld:
1344 if ld:
1343 return ld[1]
1345 return ld[1]
1344 return m._dirs.get(d, emptytree)._node
1346 return m._dirs.get(d, emptytree)._node
1345
1347
1346 # let's skip investigating things that `match` says we do not need.
1348 # let's skip investigating things that `match` says we do not need.
1347 visit = match.visitchildrenset(self._dir[:-1])
1349 visit = match.visitchildrenset(self._dir[:-1])
1348 visit = self._loadchildrensetlazy(visit)
1350 visit = self._loadchildrensetlazy(visit)
1349 if visit == b'this' or visit == b'all':
1351 if visit == b'this' or visit == b'all':
1350 visit = None
1352 visit = None
1351 for d, subm in pycompat.iteritems(self._dirs):
1353 for d, subm in pycompat.iteritems(self._dirs):
1352 if visit and d[:-1] not in visit:
1354 if visit and d[:-1] not in visit:
1353 continue
1355 continue
1354 subp1 = getnode(m1, d)
1356 subp1 = getnode(m1, d)
1355 subp2 = getnode(m2, d)
1357 subp2 = getnode(m2, d)
1356 if subp1 == nullid:
1358 if subp1 == nullid:
1357 subp1, subp2 = subp2, subp1
1359 subp1, subp2 = subp2, subp1
1358 writesubtree(subm, subp1, subp2, match)
1360 writesubtree(subm, subp1, subp2, match)
1359
1361
1360 def walksubtrees(self, matcher=None):
1362 def walksubtrees(self, matcher=None):
1361 """Returns an iterator of the subtrees of this manifest, including this
1363 """Returns an iterator of the subtrees of this manifest, including this
1362 manifest itself.
1364 manifest itself.
1363
1365
1364 If `matcher` is provided, it only returns subtrees that match.
1366 If `matcher` is provided, it only returns subtrees that match.
1365 """
1367 """
1366 if matcher and not matcher.visitdir(self._dir[:-1]):
1368 if matcher and not matcher.visitdir(self._dir[:-1]):
1367 return
1369 return
1368 if not matcher or matcher(self._dir[:-1]):
1370 if not matcher or matcher(self._dir[:-1]):
1369 yield self
1371 yield self
1370
1372
1371 self._load()
1373 self._load()
1372 # OPT: use visitchildrenset to avoid loading everything.
1374 # OPT: use visitchildrenset to avoid loading everything.
1373 self._loadalllazy()
1375 self._loadalllazy()
1374 for d, subm in pycompat.iteritems(self._dirs):
1376 for d, subm in pycompat.iteritems(self._dirs):
1375 for subtree in subm.walksubtrees(matcher=matcher):
1377 for subtree in subm.walksubtrees(matcher=matcher):
1376 yield subtree
1378 yield subtree
1377
1379
1378
1380
1379 class manifestfulltextcache(util.lrucachedict):
1381 class manifestfulltextcache(util.lrucachedict):
1380 """File-backed LRU cache for the manifest cache
1382 """File-backed LRU cache for the manifest cache
1381
1383
1382 File consists of entries, up to EOF:
1384 File consists of entries, up to EOF:
1383
1385
1384 - 20 bytes node, 4 bytes length, <length> manifest data
1386 - 20 bytes node, 4 bytes length, <length> manifest data
1385
1387
1386 These are written in reverse cache order (oldest to newest).
1388 These are written in reverse cache order (oldest to newest).
1387
1389
1388 """
1390 """
1389
1391
1390 _file = b'manifestfulltextcache'
1392 _file = b'manifestfulltextcache'
1391
1393
1392 def __init__(self, max):
1394 def __init__(self, max):
1393 super(manifestfulltextcache, self).__init__(max)
1395 super(manifestfulltextcache, self).__init__(max)
1394 self._dirty = False
1396 self._dirty = False
1395 self._read = False
1397 self._read = False
1396 self._opener = None
1398 self._opener = None
1397
1399
1398 def read(self):
1400 def read(self):
1399 if self._read or self._opener is None:
1401 if self._read or self._opener is None:
1400 return
1402 return
1401
1403
1402 try:
1404 try:
1403 with self._opener(self._file) as fp:
1405 with self._opener(self._file) as fp:
1404 set = super(manifestfulltextcache, self).__setitem__
1406 set = super(manifestfulltextcache, self).__setitem__
1405 # ignore trailing data, this is a cache, corruption is skipped
1407 # ignore trailing data, this is a cache, corruption is skipped
1406 while True:
1408 while True:
1407 node = fp.read(20)
1409 node = fp.read(20)
1408 if len(node) < 20:
1410 if len(node) < 20:
1409 break
1411 break
1410 try:
1412 try:
1411 size = struct.unpack(b'>L', fp.read(4))[0]
1413 size = struct.unpack(b'>L', fp.read(4))[0]
1412 except struct.error:
1414 except struct.error:
1413 break
1415 break
1414 value = bytearray(fp.read(size))
1416 value = bytearray(fp.read(size))
1415 if len(value) != size:
1417 if len(value) != size:
1416 break
1418 break
1417 set(node, value)
1419 set(node, value)
1418 except IOError:
1420 except IOError:
1419 # the file is allowed to be missing
1421 # the file is allowed to be missing
1420 pass
1422 pass
1421
1423
1422 self._read = True
1424 self._read = True
1423 self._dirty = False
1425 self._dirty = False
1424
1426
1425 def write(self):
1427 def write(self):
1426 if not self._dirty or self._opener is None:
1428 if not self._dirty or self._opener is None:
1427 return
1429 return
1428 # rotate backwards to the first used node
1430 # rotate backwards to the first used node
1429 with self._opener(
1431 with self._opener(
1430 self._file, b'w', atomictemp=True, checkambig=True
1432 self._file, b'w', atomictemp=True, checkambig=True
1431 ) as fp:
1433 ) as fp:
1432 node = self._head.prev
1434 node = self._head.prev
1433 while True:
1435 while True:
1434 if node.key in self._cache:
1436 if node.key in self._cache:
1435 fp.write(node.key)
1437 fp.write(node.key)
1436 fp.write(struct.pack(b'>L', len(node.value)))
1438 fp.write(struct.pack(b'>L', len(node.value)))
1437 fp.write(node.value)
1439 fp.write(node.value)
1438 if node is self._head:
1440 if node is self._head:
1439 break
1441 break
1440 node = node.prev
1442 node = node.prev
1441
1443
1442 def __len__(self):
1444 def __len__(self):
1443 if not self._read:
1445 if not self._read:
1444 self.read()
1446 self.read()
1445 return super(manifestfulltextcache, self).__len__()
1447 return super(manifestfulltextcache, self).__len__()
1446
1448
1447 def __contains__(self, k):
1449 def __contains__(self, k):
1448 if not self._read:
1450 if not self._read:
1449 self.read()
1451 self.read()
1450 return super(manifestfulltextcache, self).__contains__(k)
1452 return super(manifestfulltextcache, self).__contains__(k)
1451
1453
1452 def __iter__(self):
1454 def __iter__(self):
1453 if not self._read:
1455 if not self._read:
1454 self.read()
1456 self.read()
1455 return super(manifestfulltextcache, self).__iter__()
1457 return super(manifestfulltextcache, self).__iter__()
1456
1458
1457 def __getitem__(self, k):
1459 def __getitem__(self, k):
1458 if not self._read:
1460 if not self._read:
1459 self.read()
1461 self.read()
1460 # the cache lru order can change on read
1462 # the cache lru order can change on read
1461 setdirty = self._cache.get(k) is not self._head
1463 setdirty = self._cache.get(k) is not self._head
1462 value = super(manifestfulltextcache, self).__getitem__(k)
1464 value = super(manifestfulltextcache, self).__getitem__(k)
1463 if setdirty:
1465 if setdirty:
1464 self._dirty = True
1466 self._dirty = True
1465 return value
1467 return value
1466
1468
1467 def __setitem__(self, k, v):
1469 def __setitem__(self, k, v):
1468 if not self._read:
1470 if not self._read:
1469 self.read()
1471 self.read()
1470 super(manifestfulltextcache, self).__setitem__(k, v)
1472 super(manifestfulltextcache, self).__setitem__(k, v)
1471 self._dirty = True
1473 self._dirty = True
1472
1474
1473 def __delitem__(self, k):
1475 def __delitem__(self, k):
1474 if not self._read:
1476 if not self._read:
1475 self.read()
1477 self.read()
1476 super(manifestfulltextcache, self).__delitem__(k)
1478 super(manifestfulltextcache, self).__delitem__(k)
1477 self._dirty = True
1479 self._dirty = True
1478
1480
1479 def get(self, k, default=None):
1481 def get(self, k, default=None):
1480 if not self._read:
1482 if not self._read:
1481 self.read()
1483 self.read()
1482 return super(manifestfulltextcache, self).get(k, default=default)
1484 return super(manifestfulltextcache, self).get(k, default=default)
1483
1485
1484 def clear(self, clear_persisted_data=False):
1486 def clear(self, clear_persisted_data=False):
1485 super(manifestfulltextcache, self).clear()
1487 super(manifestfulltextcache, self).clear()
1486 if clear_persisted_data:
1488 if clear_persisted_data:
1487 self._dirty = True
1489 self._dirty = True
1488 self.write()
1490 self.write()
1489 self._read = False
1491 self._read = False
1490
1492
1491
1493
1492 # and upper bound of what we expect from compression
1494 # and upper bound of what we expect from compression
1493 # (real live value seems to be "3")
1495 # (real live value seems to be "3")
1494 MAXCOMPRESSION = 3
1496 MAXCOMPRESSION = 3
1495
1497
1496
1498
1497 @interfaceutil.implementer(repository.imanifeststorage)
1499 @interfaceutil.implementer(repository.imanifeststorage)
1498 class manifestrevlog(object):
1500 class manifestrevlog(object):
1499 '''A revlog that stores manifest texts. This is responsible for caching the
1501 '''A revlog that stores manifest texts. This is responsible for caching the
1500 full-text manifest contents.
1502 full-text manifest contents.
1501 '''
1503 '''
1502
1504
1503 def __init__(
1505 def __init__(
1504 self,
1506 self,
1505 opener,
1507 opener,
1506 tree=b'',
1508 tree=b'',
1507 dirlogcache=None,
1509 dirlogcache=None,
1508 indexfile=None,
1510 indexfile=None,
1509 treemanifest=False,
1511 treemanifest=False,
1510 ):
1512 ):
1511 """Constructs a new manifest revlog
1513 """Constructs a new manifest revlog
1512
1514
1513 `indexfile` - used by extensions to have two manifests at once, like
1515 `indexfile` - used by extensions to have two manifests at once, like
1514 when transitioning between flatmanifeset and treemanifests.
1516 when transitioning between flatmanifeset and treemanifests.
1515
1517
1516 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1518 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1517 options can also be used to make this a tree manifest revlog. The opener
1519 options can also be used to make this a tree manifest revlog. The opener
1518 option takes precedence, so if it is set to True, we ignore whatever
1520 option takes precedence, so if it is set to True, we ignore whatever
1519 value is passed in to the constructor.
1521 value is passed in to the constructor.
1520 """
1522 """
1521 # During normal operations, we expect to deal with not more than four
1523 # During normal operations, we expect to deal with not more than four
1522 # revs at a time (such as during commit --amend). When rebasing large
1524 # revs at a time (such as during commit --amend). When rebasing large
1523 # stacks of commits, the number can go up, hence the config knob below.
1525 # stacks of commits, the number can go up, hence the config knob below.
1524 cachesize = 4
1526 cachesize = 4
1525 optiontreemanifest = False
1527 optiontreemanifest = False
1526 opts = getattr(opener, 'options', None)
1528 opts = getattr(opener, 'options', None)
1527 if opts is not None:
1529 if opts is not None:
1528 cachesize = opts.get(b'manifestcachesize', cachesize)
1530 cachesize = opts.get(b'manifestcachesize', cachesize)
1529 optiontreemanifest = opts.get(b'treemanifest', False)
1531 optiontreemanifest = opts.get(b'treemanifest', False)
1530
1532
1531 self._treeondisk = optiontreemanifest or treemanifest
1533 self._treeondisk = optiontreemanifest or treemanifest
1532
1534
1533 self._fulltextcache = manifestfulltextcache(cachesize)
1535 self._fulltextcache = manifestfulltextcache(cachesize)
1534
1536
1535 if tree:
1537 if tree:
1536 assert self._treeondisk, b'opts is %r' % opts
1538 assert self._treeondisk, b'opts is %r' % opts
1537
1539
1538 if indexfile is None:
1540 if indexfile is None:
1539 indexfile = b'00manifest.i'
1541 indexfile = b'00manifest.i'
1540 if tree:
1542 if tree:
1541 indexfile = b"meta/" + tree + indexfile
1543 indexfile = b"meta/" + tree + indexfile
1542
1544
1543 self.tree = tree
1545 self.tree = tree
1544
1546
1545 # The dirlogcache is kept on the root manifest log
1547 # The dirlogcache is kept on the root manifest log
1546 if tree:
1548 if tree:
1547 self._dirlogcache = dirlogcache
1549 self._dirlogcache = dirlogcache
1548 else:
1550 else:
1549 self._dirlogcache = {b'': self}
1551 self._dirlogcache = {b'': self}
1550
1552
1551 self._revlog = revlog.revlog(
1553 self._revlog = revlog.revlog(
1552 opener,
1554 opener,
1553 indexfile,
1555 indexfile,
1554 # only root indexfile is cached
1556 # only root indexfile is cached
1555 checkambig=not bool(tree),
1557 checkambig=not bool(tree),
1556 mmaplargeindex=True,
1558 mmaplargeindex=True,
1557 upperboundcomp=MAXCOMPRESSION,
1559 upperboundcomp=MAXCOMPRESSION,
1558 )
1560 )
1559
1561
1560 self.index = self._revlog.index
1562 self.index = self._revlog.index
1561 self.version = self._revlog.version
1563 self.version = self._revlog.version
1562 self._generaldelta = self._revlog._generaldelta
1564 self._generaldelta = self._revlog._generaldelta
1563
1565
1564 def _setupmanifestcachehooks(self, repo):
1566 def _setupmanifestcachehooks(self, repo):
1565 """Persist the manifestfulltextcache on lock release"""
1567 """Persist the manifestfulltextcache on lock release"""
1566 if not util.safehasattr(repo, b'_wlockref'):
1568 if not util.safehasattr(repo, b'_wlockref'):
1567 return
1569 return
1568
1570
1569 self._fulltextcache._opener = repo.wcachevfs
1571 self._fulltextcache._opener = repo.wcachevfs
1570 if repo._currentlock(repo._wlockref) is None:
1572 if repo._currentlock(repo._wlockref) is None:
1571 return
1573 return
1572
1574
1573 reporef = weakref.ref(repo)
1575 reporef = weakref.ref(repo)
1574 manifestrevlogref = weakref.ref(self)
1576 manifestrevlogref = weakref.ref(self)
1575
1577
1576 def persistmanifestcache(success):
1578 def persistmanifestcache(success):
1577 # Repo is in an unknown state, do not persist.
1579 # Repo is in an unknown state, do not persist.
1578 if not success:
1580 if not success:
1579 return
1581 return
1580
1582
1581 repo = reporef()
1583 repo = reporef()
1582 self = manifestrevlogref()
1584 self = manifestrevlogref()
1583 if repo is None or self is None:
1585 if repo is None or self is None:
1584 return
1586 return
1585 if repo.manifestlog.getstorage(b'') is not self:
1587 if repo.manifestlog.getstorage(b'') is not self:
1586 # there's a different manifest in play now, abort
1588 # there's a different manifest in play now, abort
1587 return
1589 return
1588 self._fulltextcache.write()
1590 self._fulltextcache.write()
1589
1591
1590 repo._afterlock(persistmanifestcache)
1592 repo._afterlock(persistmanifestcache)
1591
1593
1592 @property
1594 @property
1593 def fulltextcache(self):
1595 def fulltextcache(self):
1594 return self._fulltextcache
1596 return self._fulltextcache
1595
1597
1596 def clearcaches(self, clear_persisted_data=False):
1598 def clearcaches(self, clear_persisted_data=False):
1597 self._revlog.clearcaches()
1599 self._revlog.clearcaches()
1598 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1600 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1599 self._dirlogcache = {self.tree: self}
1601 self._dirlogcache = {self.tree: self}
1600
1602
1601 def dirlog(self, d):
1603 def dirlog(self, d):
1602 if d:
1604 if d:
1603 assert self._treeondisk
1605 assert self._treeondisk
1604 if d not in self._dirlogcache:
1606 if d not in self._dirlogcache:
1605 mfrevlog = manifestrevlog(
1607 mfrevlog = manifestrevlog(
1606 self.opener, d, self._dirlogcache, treemanifest=self._treeondisk
1608 self.opener, d, self._dirlogcache, treemanifest=self._treeondisk
1607 )
1609 )
1608 self._dirlogcache[d] = mfrevlog
1610 self._dirlogcache[d] = mfrevlog
1609 return self._dirlogcache[d]
1611 return self._dirlogcache[d]
1610
1612
1611 def add(
1613 def add(
1612 self,
1614 self,
1613 m,
1615 m,
1614 transaction,
1616 transaction,
1615 link,
1617 link,
1616 p1,
1618 p1,
1617 p2,
1619 p2,
1618 added,
1620 added,
1619 removed,
1621 removed,
1620 readtree=None,
1622 readtree=None,
1621 match=None,
1623 match=None,
1622 ):
1624 ):
1623 if p1 in self.fulltextcache and util.safehasattr(m, b'fastdelta'):
1625 if p1 in self.fulltextcache and util.safehasattr(m, b'fastdelta'):
1624 # If our first parent is in the manifest cache, we can
1626 # If our first parent is in the manifest cache, we can
1625 # compute a delta here using properties we know about the
1627 # compute a delta here using properties we know about the
1626 # manifest up-front, which may save time later for the
1628 # manifest up-front, which may save time later for the
1627 # revlog layer.
1629 # revlog layer.
1628
1630
1629 _checkforbidden(added)
1631 _checkforbidden(added)
1630 # combine the changed lists into one sorted iterator
1632 # combine the changed lists into one sorted iterator
1631 work = heapq.merge(
1633 work = heapq.merge(
1632 [(x, False) for x in sorted(added)],
1634 [(x, False) for x in sorted(added)],
1633 [(x, True) for x in sorted(removed)],
1635 [(x, True) for x in sorted(removed)],
1634 )
1636 )
1635
1637
1636 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1638 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1637 cachedelta = self._revlog.rev(p1), deltatext
1639 cachedelta = self._revlog.rev(p1), deltatext
1638 text = util.buffer(arraytext)
1640 text = util.buffer(arraytext)
1639 n = self._revlog.addrevision(
1641 n = self._revlog.addrevision(
1640 text, transaction, link, p1, p2, cachedelta
1642 text, transaction, link, p1, p2, cachedelta
1641 )
1643 )
1642 else:
1644 else:
1643 # The first parent manifest isn't already loaded, so we'll
1645 # The first parent manifest isn't already loaded, so we'll
1644 # just encode a fulltext of the manifest and pass that
1646 # just encode a fulltext of the manifest and pass that
1645 # through to the revlog layer, and let it handle the delta
1647 # through to the revlog layer, and let it handle the delta
1646 # process.
1648 # process.
1647 if self._treeondisk:
1649 if self._treeondisk:
1648 assert readtree, b"readtree must be set for treemanifest writes"
1650 assert readtree, b"readtree must be set for treemanifest writes"
1649 assert match, b"match must be specified for treemanifest writes"
1651 assert match, b"match must be specified for treemanifest writes"
1650 m1 = readtree(self.tree, p1)
1652 m1 = readtree(self.tree, p1)
1651 m2 = readtree(self.tree, p2)
1653 m2 = readtree(self.tree, p2)
1652 n = self._addtree(
1654 n = self._addtree(
1653 m, transaction, link, m1, m2, readtree, match=match
1655 m, transaction, link, m1, m2, readtree, match=match
1654 )
1656 )
1655 arraytext = None
1657 arraytext = None
1656 else:
1658 else:
1657 text = m.text()
1659 text = m.text()
1658 n = self._revlog.addrevision(text, transaction, link, p1, p2)
1660 n = self._revlog.addrevision(text, transaction, link, p1, p2)
1659 arraytext = bytearray(text)
1661 arraytext = bytearray(text)
1660
1662
1661 if arraytext is not None:
1663 if arraytext is not None:
1662 self.fulltextcache[n] = arraytext
1664 self.fulltextcache[n] = arraytext
1663
1665
1664 return n
1666 return n
1665
1667
1666 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1668 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1667 # If the manifest is unchanged compared to one parent,
1669 # If the manifest is unchanged compared to one parent,
1668 # don't write a new revision
1670 # don't write a new revision
1669 if self.tree != b'' and (
1671 if self.tree != b'' and (
1670 m.unmodifiedsince(m1) or m.unmodifiedsince(m2)
1672 m.unmodifiedsince(m1) or m.unmodifiedsince(m2)
1671 ):
1673 ):
1672 return m.node()
1674 return m.node()
1673
1675
1674 def writesubtree(subm, subp1, subp2, match):
1676 def writesubtree(subm, subp1, subp2, match):
1675 sublog = self.dirlog(subm.dir())
1677 sublog = self.dirlog(subm.dir())
1676 sublog.add(
1678 sublog.add(
1677 subm,
1679 subm,
1678 transaction,
1680 transaction,
1679 link,
1681 link,
1680 subp1,
1682 subp1,
1681 subp2,
1683 subp2,
1682 None,
1684 None,
1683 None,
1685 None,
1684 readtree=readtree,
1686 readtree=readtree,
1685 match=match,
1687 match=match,
1686 )
1688 )
1687
1689
1688 m.writesubtrees(m1, m2, writesubtree, match)
1690 m.writesubtrees(m1, m2, writesubtree, match)
1689 text = m.dirtext()
1691 text = m.dirtext()
1690 n = None
1692 n = None
1691 if self.tree != b'':
1693 if self.tree != b'':
1692 # Double-check whether contents are unchanged to one parent
1694 # Double-check whether contents are unchanged to one parent
1693 if text == m1.dirtext():
1695 if text == m1.dirtext():
1694 n = m1.node()
1696 n = m1.node()
1695 elif text == m2.dirtext():
1697 elif text == m2.dirtext():
1696 n = m2.node()
1698 n = m2.node()
1697
1699
1698 if not n:
1700 if not n:
1699 n = self._revlog.addrevision(
1701 n = self._revlog.addrevision(
1700 text, transaction, link, m1.node(), m2.node()
1702 text, transaction, link, m1.node(), m2.node()
1701 )
1703 )
1702
1704
1703 # Save nodeid so parent manifest can calculate its nodeid
1705 # Save nodeid so parent manifest can calculate its nodeid
1704 m.setnode(n)
1706 m.setnode(n)
1705 return n
1707 return n
1706
1708
1707 def __len__(self):
1709 def __len__(self):
1708 return len(self._revlog)
1710 return len(self._revlog)
1709
1711
1710 def __iter__(self):
1712 def __iter__(self):
1711 return self._revlog.__iter__()
1713 return self._revlog.__iter__()
1712
1714
1713 def rev(self, node):
1715 def rev(self, node):
1714 return self._revlog.rev(node)
1716 return self._revlog.rev(node)
1715
1717
1716 def node(self, rev):
1718 def node(self, rev):
1717 return self._revlog.node(rev)
1719 return self._revlog.node(rev)
1718
1720
1719 def lookup(self, value):
1721 def lookup(self, value):
1720 return self._revlog.lookup(value)
1722 return self._revlog.lookup(value)
1721
1723
1722 def parentrevs(self, rev):
1724 def parentrevs(self, rev):
1723 return self._revlog.parentrevs(rev)
1725 return self._revlog.parentrevs(rev)
1724
1726
1725 def parents(self, node):
1727 def parents(self, node):
1726 return self._revlog.parents(node)
1728 return self._revlog.parents(node)
1727
1729
1728 def linkrev(self, rev):
1730 def linkrev(self, rev):
1729 return self._revlog.linkrev(rev)
1731 return self._revlog.linkrev(rev)
1730
1732
1731 def checksize(self):
1733 def checksize(self):
1732 return self._revlog.checksize()
1734 return self._revlog.checksize()
1733
1735
1734 def revision(self, node, _df=None, raw=False):
1736 def revision(self, node, _df=None, raw=False):
1735 return self._revlog.revision(node, _df=_df, raw=raw)
1737 return self._revlog.revision(node, _df=_df, raw=raw)
1736
1738
1737 def rawdata(self, node, _df=None):
1739 def rawdata(self, node, _df=None):
1738 return self._revlog.rawdata(node, _df=_df)
1740 return self._revlog.rawdata(node, _df=_df)
1739
1741
1740 def revdiff(self, rev1, rev2):
1742 def revdiff(self, rev1, rev2):
1741 return self._revlog.revdiff(rev1, rev2)
1743 return self._revlog.revdiff(rev1, rev2)
1742
1744
1743 def cmp(self, node, text):
1745 def cmp(self, node, text):
1744 return self._revlog.cmp(node, text)
1746 return self._revlog.cmp(node, text)
1745
1747
1746 def deltaparent(self, rev):
1748 def deltaparent(self, rev):
1747 return self._revlog.deltaparent(rev)
1749 return self._revlog.deltaparent(rev)
1748
1750
1749 def emitrevisions(
1751 def emitrevisions(
1750 self,
1752 self,
1751 nodes,
1753 nodes,
1752 nodesorder=None,
1754 nodesorder=None,
1753 revisiondata=False,
1755 revisiondata=False,
1754 assumehaveparentrevisions=False,
1756 assumehaveparentrevisions=False,
1755 deltamode=repository.CG_DELTAMODE_STD,
1757 deltamode=repository.CG_DELTAMODE_STD,
1756 ):
1758 ):
1757 return self._revlog.emitrevisions(
1759 return self._revlog.emitrevisions(
1758 nodes,
1760 nodes,
1759 nodesorder=nodesorder,
1761 nodesorder=nodesorder,
1760 revisiondata=revisiondata,
1762 revisiondata=revisiondata,
1761 assumehaveparentrevisions=assumehaveparentrevisions,
1763 assumehaveparentrevisions=assumehaveparentrevisions,
1762 deltamode=deltamode,
1764 deltamode=deltamode,
1763 )
1765 )
1764
1766
1765 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
1767 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
1766 return self._revlog.addgroup(
1768 return self._revlog.addgroup(
1767 deltas, linkmapper, transaction, addrevisioncb=addrevisioncb
1769 deltas, linkmapper, transaction, addrevisioncb=addrevisioncb
1768 )
1770 )
1769
1771
1770 def rawsize(self, rev):
1772 def rawsize(self, rev):
1771 return self._revlog.rawsize(rev)
1773 return self._revlog.rawsize(rev)
1772
1774
1773 def getstrippoint(self, minlink):
1775 def getstrippoint(self, minlink):
1774 return self._revlog.getstrippoint(minlink)
1776 return self._revlog.getstrippoint(minlink)
1775
1777
1776 def strip(self, minlink, transaction):
1778 def strip(self, minlink, transaction):
1777 return self._revlog.strip(minlink, transaction)
1779 return self._revlog.strip(minlink, transaction)
1778
1780
1779 def files(self):
1781 def files(self):
1780 return self._revlog.files()
1782 return self._revlog.files()
1781
1783
1782 def clone(self, tr, destrevlog, **kwargs):
1784 def clone(self, tr, destrevlog, **kwargs):
1783 if not isinstance(destrevlog, manifestrevlog):
1785 if not isinstance(destrevlog, manifestrevlog):
1784 raise error.ProgrammingError(b'expected manifestrevlog to clone()')
1786 raise error.ProgrammingError(b'expected manifestrevlog to clone()')
1785
1787
1786 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1788 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1787
1789
1788 def storageinfo(
1790 def storageinfo(
1789 self,
1791 self,
1790 exclusivefiles=False,
1792 exclusivefiles=False,
1791 sharedfiles=False,
1793 sharedfiles=False,
1792 revisionscount=False,
1794 revisionscount=False,
1793 trackedsize=False,
1795 trackedsize=False,
1794 storedsize=False,
1796 storedsize=False,
1795 ):
1797 ):
1796 return self._revlog.storageinfo(
1798 return self._revlog.storageinfo(
1797 exclusivefiles=exclusivefiles,
1799 exclusivefiles=exclusivefiles,
1798 sharedfiles=sharedfiles,
1800 sharedfiles=sharedfiles,
1799 revisionscount=revisionscount,
1801 revisionscount=revisionscount,
1800 trackedsize=trackedsize,
1802 trackedsize=trackedsize,
1801 storedsize=storedsize,
1803 storedsize=storedsize,
1802 )
1804 )
1803
1805
1804 @property
1806 @property
1805 def indexfile(self):
1807 def indexfile(self):
1806 return self._revlog.indexfile
1808 return self._revlog.indexfile
1807
1809
1808 @indexfile.setter
1810 @indexfile.setter
1809 def indexfile(self, value):
1811 def indexfile(self, value):
1810 self._revlog.indexfile = value
1812 self._revlog.indexfile = value
1811
1813
1812 @property
1814 @property
1813 def opener(self):
1815 def opener(self):
1814 return self._revlog.opener
1816 return self._revlog.opener
1815
1817
1816 @opener.setter
1818 @opener.setter
1817 def opener(self, value):
1819 def opener(self, value):
1818 self._revlog.opener = value
1820 self._revlog.opener = value
1819
1821
1820
1822
1821 @interfaceutil.implementer(repository.imanifestlog)
1823 @interfaceutil.implementer(repository.imanifestlog)
1822 class manifestlog(object):
1824 class manifestlog(object):
1823 """A collection class representing the collection of manifest snapshots
1825 """A collection class representing the collection of manifest snapshots
1824 referenced by commits in the repository.
1826 referenced by commits in the repository.
1825
1827
1826 In this situation, 'manifest' refers to the abstract concept of a snapshot
1828 In this situation, 'manifest' refers to the abstract concept of a snapshot
1827 of the list of files in the given commit. Consumers of the output of this
1829 of the list of files in the given commit. Consumers of the output of this
1828 class do not care about the implementation details of the actual manifests
1830 class do not care about the implementation details of the actual manifests
1829 they receive (i.e. tree or flat or lazily loaded, etc)."""
1831 they receive (i.e. tree or flat or lazily loaded, etc)."""
1830
1832
1831 def __init__(self, opener, repo, rootstore, narrowmatch):
1833 def __init__(self, opener, repo, rootstore, narrowmatch):
1832 usetreemanifest = False
1834 usetreemanifest = False
1833 cachesize = 4
1835 cachesize = 4
1834
1836
1835 opts = getattr(opener, 'options', None)
1837 opts = getattr(opener, 'options', None)
1836 if opts is not None:
1838 if opts is not None:
1837 usetreemanifest = opts.get(b'treemanifest', usetreemanifest)
1839 usetreemanifest = opts.get(b'treemanifest', usetreemanifest)
1838 cachesize = opts.get(b'manifestcachesize', cachesize)
1840 cachesize = opts.get(b'manifestcachesize', cachesize)
1839
1841
1840 self._treemanifests = usetreemanifest
1842 self._treemanifests = usetreemanifest
1841
1843
1842 self._rootstore = rootstore
1844 self._rootstore = rootstore
1843 self._rootstore._setupmanifestcachehooks(repo)
1845 self._rootstore._setupmanifestcachehooks(repo)
1844 self._narrowmatch = narrowmatch
1846 self._narrowmatch = narrowmatch
1845
1847
1846 # A cache of the manifestctx or treemanifestctx for each directory
1848 # A cache of the manifestctx or treemanifestctx for each directory
1847 self._dirmancache = {}
1849 self._dirmancache = {}
1848 self._dirmancache[b''] = util.lrucachedict(cachesize)
1850 self._dirmancache[b''] = util.lrucachedict(cachesize)
1849
1851
1850 self._cachesize = cachesize
1852 self._cachesize = cachesize
1851
1853
1852 def __getitem__(self, node):
1854 def __getitem__(self, node):
1853 """Retrieves the manifest instance for the given node. Throws a
1855 """Retrieves the manifest instance for the given node. Throws a
1854 LookupError if not found.
1856 LookupError if not found.
1855 """
1857 """
1856 return self.get(b'', node)
1858 return self.get(b'', node)
1857
1859
1858 def get(self, tree, node, verify=True):
1860 def get(self, tree, node, verify=True):
1859 """Retrieves the manifest instance for the given node. Throws a
1861 """Retrieves the manifest instance for the given node. Throws a
1860 LookupError if not found.
1862 LookupError if not found.
1861
1863
1862 `verify` - if True an exception will be thrown if the node is not in
1864 `verify` - if True an exception will be thrown if the node is not in
1863 the revlog
1865 the revlog
1864 """
1866 """
1865 if node in self._dirmancache.get(tree, ()):
1867 if node in self._dirmancache.get(tree, ()):
1866 return self._dirmancache[tree][node]
1868 return self._dirmancache[tree][node]
1867
1869
1868 if not self._narrowmatch.always():
1870 if not self._narrowmatch.always():
1869 if not self._narrowmatch.visitdir(tree[:-1]):
1871 if not self._narrowmatch.visitdir(tree[:-1]):
1870 return excludeddirmanifestctx(tree, node)
1872 return excludeddirmanifestctx(tree, node)
1871 if tree:
1873 if tree:
1872 if self._rootstore._treeondisk:
1874 if self._rootstore._treeondisk:
1873 if verify:
1875 if verify:
1874 # Side-effect is LookupError is raised if node doesn't
1876 # Side-effect is LookupError is raised if node doesn't
1875 # exist.
1877 # exist.
1876 self.getstorage(tree).rev(node)
1878 self.getstorage(tree).rev(node)
1877
1879
1878 m = treemanifestctx(self, tree, node)
1880 m = treemanifestctx(self, tree, node)
1879 else:
1881 else:
1880 raise error.Abort(
1882 raise error.Abort(
1881 _(
1883 _(
1882 b"cannot ask for manifest directory '%s' in a flat "
1884 b"cannot ask for manifest directory '%s' in a flat "
1883 b"manifest"
1885 b"manifest"
1884 )
1886 )
1885 % tree
1887 % tree
1886 )
1888 )
1887 else:
1889 else:
1888 if verify:
1890 if verify:
1889 # Side-effect is LookupError is raised if node doesn't exist.
1891 # Side-effect is LookupError is raised if node doesn't exist.
1890 self._rootstore.rev(node)
1892 self._rootstore.rev(node)
1891
1893
1892 if self._treemanifests:
1894 if self._treemanifests:
1893 m = treemanifestctx(self, b'', node)
1895 m = treemanifestctx(self, b'', node)
1894 else:
1896 else:
1895 m = manifestctx(self, node)
1897 m = manifestctx(self, node)
1896
1898
1897 if node != nullid:
1899 if node != nullid:
1898 mancache = self._dirmancache.get(tree)
1900 mancache = self._dirmancache.get(tree)
1899 if not mancache:
1901 if not mancache:
1900 mancache = util.lrucachedict(self._cachesize)
1902 mancache = util.lrucachedict(self._cachesize)
1901 self._dirmancache[tree] = mancache
1903 self._dirmancache[tree] = mancache
1902 mancache[node] = m
1904 mancache[node] = m
1903 return m
1905 return m
1904
1906
1905 def getstorage(self, tree):
1907 def getstorage(self, tree):
1906 return self._rootstore.dirlog(tree)
1908 return self._rootstore.dirlog(tree)
1907
1909
1908 def clearcaches(self, clear_persisted_data=False):
1910 def clearcaches(self, clear_persisted_data=False):
1909 self._dirmancache.clear()
1911 self._dirmancache.clear()
1910 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
1912 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
1911
1913
1912 def rev(self, node):
1914 def rev(self, node):
1913 return self._rootstore.rev(node)
1915 return self._rootstore.rev(node)
1914
1916
1915
1917
1916 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1918 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1917 class memmanifestctx(object):
1919 class memmanifestctx(object):
1918 def __init__(self, manifestlog):
1920 def __init__(self, manifestlog):
1919 self._manifestlog = manifestlog
1921 self._manifestlog = manifestlog
1920 self._manifestdict = manifestdict()
1922 self._manifestdict = manifestdict()
1921
1923
1922 def _storage(self):
1924 def _storage(self):
1923 return self._manifestlog.getstorage(b'')
1925 return self._manifestlog.getstorage(b'')
1924
1926
1925 def copy(self):
1927 def copy(self):
1926 memmf = memmanifestctx(self._manifestlog)
1928 memmf = memmanifestctx(self._manifestlog)
1927 memmf._manifestdict = self.read().copy()
1929 memmf._manifestdict = self.read().copy()
1928 return memmf
1930 return memmf
1929
1931
1930 def read(self):
1932 def read(self):
1931 return self._manifestdict
1933 return self._manifestdict
1932
1934
1933 def write(self, transaction, link, p1, p2, added, removed, match=None):
1935 def write(self, transaction, link, p1, p2, added, removed, match=None):
1934 return self._storage().add(
1936 return self._storage().add(
1935 self._manifestdict,
1937 self._manifestdict,
1936 transaction,
1938 transaction,
1937 link,
1939 link,
1938 p1,
1940 p1,
1939 p2,
1941 p2,
1940 added,
1942 added,
1941 removed,
1943 removed,
1942 match=match,
1944 match=match,
1943 )
1945 )
1944
1946
1945
1947
1946 @interfaceutil.implementer(repository.imanifestrevisionstored)
1948 @interfaceutil.implementer(repository.imanifestrevisionstored)
1947 class manifestctx(object):
1949 class manifestctx(object):
1948 """A class representing a single revision of a manifest, including its
1950 """A class representing a single revision of a manifest, including its
1949 contents, its parent revs, and its linkrev.
1951 contents, its parent revs, and its linkrev.
1950 """
1952 """
1951
1953
1952 def __init__(self, manifestlog, node):
1954 def __init__(self, manifestlog, node):
1953 self._manifestlog = manifestlog
1955 self._manifestlog = manifestlog
1954 self._data = None
1956 self._data = None
1955
1957
1956 self._node = node
1958 self._node = node
1957
1959
1958 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1960 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1959 # but let's add it later when something needs it and we can load it
1961 # but let's add it later when something needs it and we can load it
1960 # lazily.
1962 # lazily.
1961 # self.p1, self.p2 = store.parents(node)
1963 # self.p1, self.p2 = store.parents(node)
1962 # rev = store.rev(node)
1964 # rev = store.rev(node)
1963 # self.linkrev = store.linkrev(rev)
1965 # self.linkrev = store.linkrev(rev)
1964
1966
1965 def _storage(self):
1967 def _storage(self):
1966 return self._manifestlog.getstorage(b'')
1968 return self._manifestlog.getstorage(b'')
1967
1969
1968 def node(self):
1970 def node(self):
1969 return self._node
1971 return self._node
1970
1972
1971 def copy(self):
1973 def copy(self):
1972 memmf = memmanifestctx(self._manifestlog)
1974 memmf = memmanifestctx(self._manifestlog)
1973 memmf._manifestdict = self.read().copy()
1975 memmf._manifestdict = self.read().copy()
1974 return memmf
1976 return memmf
1975
1977
1976 @propertycache
1978 @propertycache
1977 def parents(self):
1979 def parents(self):
1978 return self._storage().parents(self._node)
1980 return self._storage().parents(self._node)
1979
1981
1980 def read(self):
1982 def read(self):
1981 if self._data is None:
1983 if self._data is None:
1982 if self._node == nullid:
1984 if self._node == nullid:
1983 self._data = manifestdict()
1985 self._data = manifestdict()
1984 else:
1986 else:
1985 store = self._storage()
1987 store = self._storage()
1986 if self._node in store.fulltextcache:
1988 if self._node in store.fulltextcache:
1987 text = pycompat.bytestr(store.fulltextcache[self._node])
1989 text = pycompat.bytestr(store.fulltextcache[self._node])
1988 else:
1990 else:
1989 text = store.revision(self._node)
1991 text = store.revision(self._node)
1990 arraytext = bytearray(text)
1992 arraytext = bytearray(text)
1991 store.fulltextcache[self._node] = arraytext
1993 store.fulltextcache[self._node] = arraytext
1992 self._data = manifestdict(text)
1994 self._data = manifestdict(text)
1993 return self._data
1995 return self._data
1994
1996
1995 def readfast(self, shallow=False):
1997 def readfast(self, shallow=False):
1996 '''Calls either readdelta or read, based on which would be less work.
1998 '''Calls either readdelta or read, based on which would be less work.
1997 readdelta is called if the delta is against the p1, and therefore can be
1999 readdelta is called if the delta is against the p1, and therefore can be
1998 read quickly.
2000 read quickly.
1999
2001
2000 If `shallow` is True, nothing changes since this is a flat manifest.
2002 If `shallow` is True, nothing changes since this is a flat manifest.
2001 '''
2003 '''
2002 store = self._storage()
2004 store = self._storage()
2003 r = store.rev(self._node)
2005 r = store.rev(self._node)
2004 deltaparent = store.deltaparent(r)
2006 deltaparent = store.deltaparent(r)
2005 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2007 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2006 return self.readdelta()
2008 return self.readdelta()
2007 return self.read()
2009 return self.read()
2008
2010
2009 def readdelta(self, shallow=False):
2011 def readdelta(self, shallow=False):
2010 '''Returns a manifest containing just the entries that are present
2012 '''Returns a manifest containing just the entries that are present
2011 in this manifest, but not in its p1 manifest. This is efficient to read
2013 in this manifest, but not in its p1 manifest. This is efficient to read
2012 if the revlog delta is already p1.
2014 if the revlog delta is already p1.
2013
2015
2014 Changing the value of `shallow` has no effect on flat manifests.
2016 Changing the value of `shallow` has no effect on flat manifests.
2015 '''
2017 '''
2016 store = self._storage()
2018 store = self._storage()
2017 r = store.rev(self._node)
2019 r = store.rev(self._node)
2018 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2020 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2019 return manifestdict(d)
2021 return manifestdict(d)
2020
2022
2021 def find(self, key):
2023 def find(self, key):
2022 return self.read().find(key)
2024 return self.read().find(key)
2023
2025
2024
2026
2025 @interfaceutil.implementer(repository.imanifestrevisionwritable)
2027 @interfaceutil.implementer(repository.imanifestrevisionwritable)
2026 class memtreemanifestctx(object):
2028 class memtreemanifestctx(object):
2027 def __init__(self, manifestlog, dir=b''):
2029 def __init__(self, manifestlog, dir=b''):
2028 self._manifestlog = manifestlog
2030 self._manifestlog = manifestlog
2029 self._dir = dir
2031 self._dir = dir
2030 self._treemanifest = treemanifest()
2032 self._treemanifest = treemanifest()
2031
2033
2032 def _storage(self):
2034 def _storage(self):
2033 return self._manifestlog.getstorage(b'')
2035 return self._manifestlog.getstorage(b'')
2034
2036
2035 def copy(self):
2037 def copy(self):
2036 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2038 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2037 memmf._treemanifest = self._treemanifest.copy()
2039 memmf._treemanifest = self._treemanifest.copy()
2038 return memmf
2040 return memmf
2039
2041
2040 def read(self):
2042 def read(self):
2041 return self._treemanifest
2043 return self._treemanifest
2042
2044
2043 def write(self, transaction, link, p1, p2, added, removed, match=None):
2045 def write(self, transaction, link, p1, p2, added, removed, match=None):
2044 def readtree(dir, node):
2046 def readtree(dir, node):
2045 return self._manifestlog.get(dir, node).read()
2047 return self._manifestlog.get(dir, node).read()
2046
2048
2047 return self._storage().add(
2049 return self._storage().add(
2048 self._treemanifest,
2050 self._treemanifest,
2049 transaction,
2051 transaction,
2050 link,
2052 link,
2051 p1,
2053 p1,
2052 p2,
2054 p2,
2053 added,
2055 added,
2054 removed,
2056 removed,
2055 readtree=readtree,
2057 readtree=readtree,
2056 match=match,
2058 match=match,
2057 )
2059 )
2058
2060
2059
2061
2060 @interfaceutil.implementer(repository.imanifestrevisionstored)
2062 @interfaceutil.implementer(repository.imanifestrevisionstored)
2061 class treemanifestctx(object):
2063 class treemanifestctx(object):
2062 def __init__(self, manifestlog, dir, node):
2064 def __init__(self, manifestlog, dir, node):
2063 self._manifestlog = manifestlog
2065 self._manifestlog = manifestlog
2064 self._dir = dir
2066 self._dir = dir
2065 self._data = None
2067 self._data = None
2066
2068
2067 self._node = node
2069 self._node = node
2068
2070
2069 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
2071 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
2070 # we can instantiate treemanifestctx objects for directories we don't
2072 # we can instantiate treemanifestctx objects for directories we don't
2071 # have on disk.
2073 # have on disk.
2072 # self.p1, self.p2 = store.parents(node)
2074 # self.p1, self.p2 = store.parents(node)
2073 # rev = store.rev(node)
2075 # rev = store.rev(node)
2074 # self.linkrev = store.linkrev(rev)
2076 # self.linkrev = store.linkrev(rev)
2075
2077
2076 def _storage(self):
2078 def _storage(self):
2077 narrowmatch = self._manifestlog._narrowmatch
2079 narrowmatch = self._manifestlog._narrowmatch
2078 if not narrowmatch.always():
2080 if not narrowmatch.always():
2079 if not narrowmatch.visitdir(self._dir[:-1]):
2081 if not narrowmatch.visitdir(self._dir[:-1]):
2080 return excludedmanifestrevlog(self._dir)
2082 return excludedmanifestrevlog(self._dir)
2081 return self._manifestlog.getstorage(self._dir)
2083 return self._manifestlog.getstorage(self._dir)
2082
2084
2083 def read(self):
2085 def read(self):
2084 if self._data is None:
2086 if self._data is None:
2085 store = self._storage()
2087 store = self._storage()
2086 if self._node == nullid:
2088 if self._node == nullid:
2087 self._data = treemanifest()
2089 self._data = treemanifest()
2088 # TODO accessing non-public API
2090 # TODO accessing non-public API
2089 elif store._treeondisk:
2091 elif store._treeondisk:
2090 m = treemanifest(dir=self._dir)
2092 m = treemanifest(dir=self._dir)
2091
2093
2092 def gettext():
2094 def gettext():
2093 return store.revision(self._node)
2095 return store.revision(self._node)
2094
2096
2095 def readsubtree(dir, subm):
2097 def readsubtree(dir, subm):
2096 # Set verify to False since we need to be able to create
2098 # Set verify to False since we need to be able to create
2097 # subtrees for trees that don't exist on disk.
2099 # subtrees for trees that don't exist on disk.
2098 return self._manifestlog.get(dir, subm, verify=False).read()
2100 return self._manifestlog.get(dir, subm, verify=False).read()
2099
2101
2100 m.read(gettext, readsubtree)
2102 m.read(gettext, readsubtree)
2101 m.setnode(self._node)
2103 m.setnode(self._node)
2102 self._data = m
2104 self._data = m
2103 else:
2105 else:
2104 if self._node in store.fulltextcache:
2106 if self._node in store.fulltextcache:
2105 text = pycompat.bytestr(store.fulltextcache[self._node])
2107 text = pycompat.bytestr(store.fulltextcache[self._node])
2106 else:
2108 else:
2107 text = store.revision(self._node)
2109 text = store.revision(self._node)
2108 arraytext = bytearray(text)
2110 arraytext = bytearray(text)
2109 store.fulltextcache[self._node] = arraytext
2111 store.fulltextcache[self._node] = arraytext
2110 self._data = treemanifest(dir=self._dir, text=text)
2112 self._data = treemanifest(dir=self._dir, text=text)
2111
2113
2112 return self._data
2114 return self._data
2113
2115
2114 def node(self):
2116 def node(self):
2115 return self._node
2117 return self._node
2116
2118
2117 def copy(self):
2119 def copy(self):
2118 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2120 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2119 memmf._treemanifest = self.read().copy()
2121 memmf._treemanifest = self.read().copy()
2120 return memmf
2122 return memmf
2121
2123
2122 @propertycache
2124 @propertycache
2123 def parents(self):
2125 def parents(self):
2124 return self._storage().parents(self._node)
2126 return self._storage().parents(self._node)
2125
2127
2126 def readdelta(self, shallow=False):
2128 def readdelta(self, shallow=False):
2127 '''Returns a manifest containing just the entries that are present
2129 '''Returns a manifest containing just the entries that are present
2128 in this manifest, but not in its p1 manifest. This is efficient to read
2130 in this manifest, but not in its p1 manifest. This is efficient to read
2129 if the revlog delta is already p1.
2131 if the revlog delta is already p1.
2130
2132
2131 If `shallow` is True, this will read the delta for this directory,
2133 If `shallow` is True, this will read the delta for this directory,
2132 without recursively reading subdirectory manifests. Instead, any
2134 without recursively reading subdirectory manifests. Instead, any
2133 subdirectory entry will be reported as it appears in the manifest, i.e.
2135 subdirectory entry will be reported as it appears in the manifest, i.e.
2134 the subdirectory will be reported among files and distinguished only by
2136 the subdirectory will be reported among files and distinguished only by
2135 its 't' flag.
2137 its 't' flag.
2136 '''
2138 '''
2137 store = self._storage()
2139 store = self._storage()
2138 if shallow:
2140 if shallow:
2139 r = store.rev(self._node)
2141 r = store.rev(self._node)
2140 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2142 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2141 return manifestdict(d)
2143 return manifestdict(d)
2142 else:
2144 else:
2143 # Need to perform a slow delta
2145 # Need to perform a slow delta
2144 r0 = store.deltaparent(store.rev(self._node))
2146 r0 = store.deltaparent(store.rev(self._node))
2145 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
2147 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
2146 m1 = self.read()
2148 m1 = self.read()
2147 md = treemanifest(dir=self._dir)
2149 md = treemanifest(dir=self._dir)
2148 for f, ((n0, fl0), (n1, fl1)) in pycompat.iteritems(m0.diff(m1)):
2150 for f, ((n0, fl0), (n1, fl1)) in pycompat.iteritems(m0.diff(m1)):
2149 if n1:
2151 if n1:
2150 md[f] = n1
2152 md[f] = n1
2151 if fl1:
2153 if fl1:
2152 md.setflag(f, fl1)
2154 md.setflag(f, fl1)
2153 return md
2155 return md
2154
2156
2155 def readfast(self, shallow=False):
2157 def readfast(self, shallow=False):
2156 '''Calls either readdelta or read, based on which would be less work.
2158 '''Calls either readdelta or read, based on which would be less work.
2157 readdelta is called if the delta is against the p1, and therefore can be
2159 readdelta is called if the delta is against the p1, and therefore can be
2158 read quickly.
2160 read quickly.
2159
2161
2160 If `shallow` is True, it only returns the entries from this manifest,
2162 If `shallow` is True, it only returns the entries from this manifest,
2161 and not any submanifests.
2163 and not any submanifests.
2162 '''
2164 '''
2163 store = self._storage()
2165 store = self._storage()
2164 r = store.rev(self._node)
2166 r = store.rev(self._node)
2165 deltaparent = store.deltaparent(r)
2167 deltaparent = store.deltaparent(r)
2166 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2168 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2167 return self.readdelta(shallow=shallow)
2169 return self.readdelta(shallow=shallow)
2168
2170
2169 if shallow:
2171 if shallow:
2170 return manifestdict(store.revision(self._node))
2172 return manifestdict(store.revision(self._node))
2171 else:
2173 else:
2172 return self.read()
2174 return self.read()
2173
2175
2174 def find(self, key):
2176 def find(self, key):
2175 return self.read().find(key)
2177 return self.read().find(key)
2176
2178
2177
2179
2178 class excludeddir(treemanifest):
2180 class excludeddir(treemanifest):
2179 """Stand-in for a directory that is excluded from the repository.
2181 """Stand-in for a directory that is excluded from the repository.
2180
2182
2181 With narrowing active on a repository that uses treemanifests,
2183 With narrowing active on a repository that uses treemanifests,
2182 some of the directory revlogs will be excluded from the resulting
2184 some of the directory revlogs will be excluded from the resulting
2183 clone. This is a huge storage win for clients, but means we need
2185 clone. This is a huge storage win for clients, but means we need
2184 some sort of pseudo-manifest to surface to internals so we can
2186 some sort of pseudo-manifest to surface to internals so we can
2185 detect a merge conflict outside the narrowspec. That's what this
2187 detect a merge conflict outside the narrowspec. That's what this
2186 class is: it stands in for a directory whose node is known, but
2188 class is: it stands in for a directory whose node is known, but
2187 whose contents are unknown.
2189 whose contents are unknown.
2188 """
2190 """
2189
2191
2190 def __init__(self, dir, node):
2192 def __init__(self, dir, node):
2191 super(excludeddir, self).__init__(dir)
2193 super(excludeddir, self).__init__(dir)
2192 self._node = node
2194 self._node = node
2193 # Add an empty file, which will be included by iterators and such,
2195 # Add an empty file, which will be included by iterators and such,
2194 # appearing as the directory itself (i.e. something like "dir/")
2196 # appearing as the directory itself (i.e. something like "dir/")
2195 self._files[b''] = node
2197 self._files[b''] = node
2196 self._flags[b''] = b't'
2198 self._flags[b''] = b't'
2197
2199
2198 # Manifests outside the narrowspec should never be modified, so avoid
2200 # Manifests outside the narrowspec should never be modified, so avoid
2199 # copying. This makes a noticeable difference when there are very many
2201 # copying. This makes a noticeable difference when there are very many
2200 # directories outside the narrowspec. Also, it makes sense for the copy to
2202 # directories outside the narrowspec. Also, it makes sense for the copy to
2201 # be of the same type as the original, which would not happen with the
2203 # be of the same type as the original, which would not happen with the
2202 # super type's copy().
2204 # super type's copy().
2203 def copy(self):
2205 def copy(self):
2204 return self
2206 return self
2205
2207
2206
2208
2207 class excludeddirmanifestctx(treemanifestctx):
2209 class excludeddirmanifestctx(treemanifestctx):
2208 """context wrapper for excludeddir - see that docstring for rationale"""
2210 """context wrapper for excludeddir - see that docstring for rationale"""
2209
2211
2210 def __init__(self, dir, node):
2212 def __init__(self, dir, node):
2211 self._dir = dir
2213 self._dir = dir
2212 self._node = node
2214 self._node = node
2213
2215
2214 def read(self):
2216 def read(self):
2215 return excludeddir(self._dir, self._node)
2217 return excludeddir(self._dir, self._node)
2216
2218
2217 def write(self, *args):
2219 def write(self, *args):
2218 raise error.ProgrammingError(
2220 raise error.ProgrammingError(
2219 b'attempt to write manifest from excluded dir %s' % self._dir
2221 b'attempt to write manifest from excluded dir %s' % self._dir
2220 )
2222 )
2221
2223
2222
2224
2223 class excludedmanifestrevlog(manifestrevlog):
2225 class excludedmanifestrevlog(manifestrevlog):
2224 """Stand-in for excluded treemanifest revlogs.
2226 """Stand-in for excluded treemanifest revlogs.
2225
2227
2226 When narrowing is active on a treemanifest repository, we'll have
2228 When narrowing is active on a treemanifest repository, we'll have
2227 references to directories we can't see due to the revlog being
2229 references to directories we can't see due to the revlog being
2228 skipped. This class exists to conform to the manifestrevlog
2230 skipped. This class exists to conform to the manifestrevlog
2229 interface for those directories and proactively prevent writes to
2231 interface for those directories and proactively prevent writes to
2230 outside the narrowspec.
2232 outside the narrowspec.
2231 """
2233 """
2232
2234
2233 def __init__(self, dir):
2235 def __init__(self, dir):
2234 self._dir = dir
2236 self._dir = dir
2235
2237
2236 def __len__(self):
2238 def __len__(self):
2237 raise error.ProgrammingError(
2239 raise error.ProgrammingError(
2238 b'attempt to get length of excluded dir %s' % self._dir
2240 b'attempt to get length of excluded dir %s' % self._dir
2239 )
2241 )
2240
2242
2241 def rev(self, node):
2243 def rev(self, node):
2242 raise error.ProgrammingError(
2244 raise error.ProgrammingError(
2243 b'attempt to get rev from excluded dir %s' % self._dir
2245 b'attempt to get rev from excluded dir %s' % self._dir
2244 )
2246 )
2245
2247
2246 def linkrev(self, node):
2248 def linkrev(self, node):
2247 raise error.ProgrammingError(
2249 raise error.ProgrammingError(
2248 b'attempt to get linkrev from excluded dir %s' % self._dir
2250 b'attempt to get linkrev from excluded dir %s' % self._dir
2249 )
2251 )
2250
2252
2251 def node(self, rev):
2253 def node(self, rev):
2252 raise error.ProgrammingError(
2254 raise error.ProgrammingError(
2253 b'attempt to get node from excluded dir %s' % self._dir
2255 b'attempt to get node from excluded dir %s' % self._dir
2254 )
2256 )
2255
2257
2256 def add(self, *args, **kwargs):
2258 def add(self, *args, **kwargs):
2257 # We should never write entries in dirlogs outside the narrow clone.
2259 # We should never write entries in dirlogs outside the narrow clone.
2258 # However, the method still gets called from writesubtree() in
2260 # However, the method still gets called from writesubtree() in
2259 # _addtree(), so we need to handle it. We should possibly make that
2261 # _addtree(), so we need to handle it. We should possibly make that
2260 # avoid calling add() with a clean manifest (_dirty is always False
2262 # avoid calling add() with a clean manifest (_dirty is always False
2261 # in excludeddir instances).
2263 # in excludeddir instances).
2262 pass
2264 pass
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now