##// END OF EJS Templates
requirements: add constant for revlog v1 requirement...
Raphaël Gomès -
r47371:ee91966a default
parent child Browse files
Show More
@@ -1,798 +1,799 b''
1 # exchangev2.py - repository exchange for wire protocol version 2
1 # exchangev2.py - repository exchange for wire protocol version 2
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import weakref
11 import weakref
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 nullid,
15 nullid,
16 short,
16 short,
17 )
17 )
18 from . import (
18 from . import (
19 bookmarks,
19 bookmarks,
20 error,
20 error,
21 mdiff,
21 mdiff,
22 narrowspec,
22 narrowspec,
23 phases,
23 phases,
24 pycompat,
24 pycompat,
25 requirements as requirementsmod,
25 setdiscovery,
26 setdiscovery,
26 )
27 )
27 from .interfaces import repository
28 from .interfaces import repository
28
29
29
30
30 def pull(pullop):
31 def pull(pullop):
31 """Pull using wire protocol version 2."""
32 """Pull using wire protocol version 2."""
32 repo = pullop.repo
33 repo = pullop.repo
33 remote = pullop.remote
34 remote = pullop.remote
34
35
35 usingrawchangelogandmanifest = _checkuserawstorefiledata(pullop)
36 usingrawchangelogandmanifest = _checkuserawstorefiledata(pullop)
36
37
37 # If this is a clone and it was requested to perform a "stream clone",
38 # If this is a clone and it was requested to perform a "stream clone",
38 # we obtain the raw files data from the remote then fall back to an
39 # we obtain the raw files data from the remote then fall back to an
39 # incremental pull. This is somewhat hacky and is not nearly robust enough
40 # incremental pull. This is somewhat hacky and is not nearly robust enough
40 # for long-term usage.
41 # for long-term usage.
41 if usingrawchangelogandmanifest:
42 if usingrawchangelogandmanifest:
42 with repo.transaction(b'clone'):
43 with repo.transaction(b'clone'):
43 _fetchrawstorefiles(repo, remote)
44 _fetchrawstorefiles(repo, remote)
44 repo.invalidate(clearfilecache=True)
45 repo.invalidate(clearfilecache=True)
45
46
46 tr = pullop.trmanager.transaction()
47 tr = pullop.trmanager.transaction()
47
48
48 # We don't use the repo's narrow matcher here because the patterns passed
49 # We don't use the repo's narrow matcher here because the patterns passed
49 # to exchange.pull() could be different.
50 # to exchange.pull() could be different.
50 narrowmatcher = narrowspec.match(
51 narrowmatcher = narrowspec.match(
51 repo.root,
52 repo.root,
52 # Empty maps to nevermatcher. So always
53 # Empty maps to nevermatcher. So always
53 # set includes if missing.
54 # set includes if missing.
54 pullop.includepats or {b'path:.'},
55 pullop.includepats or {b'path:.'},
55 pullop.excludepats,
56 pullop.excludepats,
56 )
57 )
57
58
58 if pullop.includepats or pullop.excludepats:
59 if pullop.includepats or pullop.excludepats:
59 pathfilter = {}
60 pathfilter = {}
60 if pullop.includepats:
61 if pullop.includepats:
61 pathfilter[b'include'] = sorted(pullop.includepats)
62 pathfilter[b'include'] = sorted(pullop.includepats)
62 if pullop.excludepats:
63 if pullop.excludepats:
63 pathfilter[b'exclude'] = sorted(pullop.excludepats)
64 pathfilter[b'exclude'] = sorted(pullop.excludepats)
64 else:
65 else:
65 pathfilter = None
66 pathfilter = None
66
67
67 # Figure out what needs to be fetched.
68 # Figure out what needs to be fetched.
68 common, fetch, remoteheads = _pullchangesetdiscovery(
69 common, fetch, remoteheads = _pullchangesetdiscovery(
69 repo, remote, pullop.heads, abortwhenunrelated=pullop.force
70 repo, remote, pullop.heads, abortwhenunrelated=pullop.force
70 )
71 )
71
72
72 # And fetch the data.
73 # And fetch the data.
73 pullheads = pullop.heads or remoteheads
74 pullheads = pullop.heads or remoteheads
74 csetres = _fetchchangesets(repo, tr, remote, common, fetch, pullheads)
75 csetres = _fetchchangesets(repo, tr, remote, common, fetch, pullheads)
75
76
76 # New revisions are written to the changelog. But all other updates
77 # New revisions are written to the changelog. But all other updates
77 # are deferred. Do those now.
78 # are deferred. Do those now.
78
79
79 # Ensure all new changesets are draft by default. If the repo is
80 # Ensure all new changesets are draft by default. If the repo is
80 # publishing, the phase will be adjusted by the loop below.
81 # publishing, the phase will be adjusted by the loop below.
81 if csetres[b'added']:
82 if csetres[b'added']:
82 phases.registernew(
83 phases.registernew(
83 repo, tr, phases.draft, [repo[n].rev() for n in csetres[b'added']]
84 repo, tr, phases.draft, [repo[n].rev() for n in csetres[b'added']]
84 )
85 )
85
86
86 # And adjust the phase of all changesets accordingly.
87 # And adjust the phase of all changesets accordingly.
87 for phasenumber, phase in phases.phasenames.items():
88 for phasenumber, phase in phases.phasenames.items():
88 if phase == b'secret' or not csetres[b'nodesbyphase'][phase]:
89 if phase == b'secret' or not csetres[b'nodesbyphase'][phase]:
89 continue
90 continue
90
91
91 phases.advanceboundary(
92 phases.advanceboundary(
92 repo,
93 repo,
93 tr,
94 tr,
94 phasenumber,
95 phasenumber,
95 csetres[b'nodesbyphase'][phase],
96 csetres[b'nodesbyphase'][phase],
96 )
97 )
97
98
98 # Write bookmark updates.
99 # Write bookmark updates.
99 bookmarks.updatefromremote(
100 bookmarks.updatefromremote(
100 repo.ui,
101 repo.ui,
101 repo,
102 repo,
102 csetres[b'bookmarks'],
103 csetres[b'bookmarks'],
103 remote.url(),
104 remote.url(),
104 pullop.gettransaction,
105 pullop.gettransaction,
105 explicit=pullop.explicitbookmarks,
106 explicit=pullop.explicitbookmarks,
106 )
107 )
107
108
108 manres = _fetchmanifests(repo, tr, remote, csetres[b'manifestnodes'])
109 manres = _fetchmanifests(repo, tr, remote, csetres[b'manifestnodes'])
109
110
110 # We don't properly support shallow changeset and manifest yet. So we apply
111 # We don't properly support shallow changeset and manifest yet. So we apply
111 # depth limiting locally.
112 # depth limiting locally.
112 if pullop.depth:
113 if pullop.depth:
113 relevantcsetnodes = set()
114 relevantcsetnodes = set()
114 clnode = repo.changelog.node
115 clnode = repo.changelog.node
115
116
116 for rev in repo.revs(
117 for rev in repo.revs(
117 b'ancestors(%ln, %s)', pullheads, pullop.depth - 1
118 b'ancestors(%ln, %s)', pullheads, pullop.depth - 1
118 ):
119 ):
119 relevantcsetnodes.add(clnode(rev))
120 relevantcsetnodes.add(clnode(rev))
120
121
121 csetrelevantfilter = lambda n: n in relevantcsetnodes
122 csetrelevantfilter = lambda n: n in relevantcsetnodes
122
123
123 else:
124 else:
124 csetrelevantfilter = lambda n: True
125 csetrelevantfilter = lambda n: True
125
126
126 # If obtaining the raw store files, we need to scan the full repo to
127 # If obtaining the raw store files, we need to scan the full repo to
127 # derive all the changesets, manifests, and linkrevs.
128 # derive all the changesets, manifests, and linkrevs.
128 if usingrawchangelogandmanifest:
129 if usingrawchangelogandmanifest:
129 csetsforfiles = []
130 csetsforfiles = []
130 mnodesforfiles = []
131 mnodesforfiles = []
131 manifestlinkrevs = {}
132 manifestlinkrevs = {}
132
133
133 for rev in repo:
134 for rev in repo:
134 ctx = repo[rev]
135 ctx = repo[rev]
135 node = ctx.node()
136 node = ctx.node()
136
137
137 if not csetrelevantfilter(node):
138 if not csetrelevantfilter(node):
138 continue
139 continue
139
140
140 mnode = ctx.manifestnode()
141 mnode = ctx.manifestnode()
141
142
142 csetsforfiles.append(node)
143 csetsforfiles.append(node)
143 mnodesforfiles.append(mnode)
144 mnodesforfiles.append(mnode)
144 manifestlinkrevs[mnode] = rev
145 manifestlinkrevs[mnode] = rev
145
146
146 else:
147 else:
147 csetsforfiles = [n for n in csetres[b'added'] if csetrelevantfilter(n)]
148 csetsforfiles = [n for n in csetres[b'added'] if csetrelevantfilter(n)]
148 mnodesforfiles = manres[b'added']
149 mnodesforfiles = manres[b'added']
149 manifestlinkrevs = manres[b'linkrevs']
150 manifestlinkrevs = manres[b'linkrevs']
150
151
151 # Find all file nodes referenced by added manifests and fetch those
152 # Find all file nodes referenced by added manifests and fetch those
152 # revisions.
153 # revisions.
153 fnodes = _derivefilesfrommanifests(repo, narrowmatcher, mnodesforfiles)
154 fnodes = _derivefilesfrommanifests(repo, narrowmatcher, mnodesforfiles)
154 _fetchfilesfromcsets(
155 _fetchfilesfromcsets(
155 repo,
156 repo,
156 tr,
157 tr,
157 remote,
158 remote,
158 pathfilter,
159 pathfilter,
159 fnodes,
160 fnodes,
160 csetsforfiles,
161 csetsforfiles,
161 manifestlinkrevs,
162 manifestlinkrevs,
162 shallow=bool(pullop.depth),
163 shallow=bool(pullop.depth),
163 )
164 )
164
165
165
166
166 def _checkuserawstorefiledata(pullop):
167 def _checkuserawstorefiledata(pullop):
167 """Check whether we should use rawstorefiledata command to retrieve data."""
168 """Check whether we should use rawstorefiledata command to retrieve data."""
168
169
169 repo = pullop.repo
170 repo = pullop.repo
170 remote = pullop.remote
171 remote = pullop.remote
171
172
172 # Command to obtain raw store data isn't available.
173 # Command to obtain raw store data isn't available.
173 if b'rawstorefiledata' not in remote.apidescriptor[b'commands']:
174 if b'rawstorefiledata' not in remote.apidescriptor[b'commands']:
174 return False
175 return False
175
176
176 # Only honor if user requested stream clone operation.
177 # Only honor if user requested stream clone operation.
177 if not pullop.streamclonerequested:
178 if not pullop.streamclonerequested:
178 return False
179 return False
179
180
180 # Only works on empty repos.
181 # Only works on empty repos.
181 if len(repo):
182 if len(repo):
182 return False
183 return False
183
184
184 # TODO This is super hacky. There needs to be a storage API for this. We
185 # TODO This is super hacky. There needs to be a storage API for this. We
185 # also need to check for compatibility with the remote.
186 # also need to check for compatibility with the remote.
186 if b'revlogv1' not in repo.requirements:
187 if requirementsmod.REVLOGV1_REQUIREMENT not in repo.requirements:
187 return False
188 return False
188
189
189 return True
190 return True
190
191
191
192
192 def _fetchrawstorefiles(repo, remote):
193 def _fetchrawstorefiles(repo, remote):
193 with remote.commandexecutor() as e:
194 with remote.commandexecutor() as e:
194 objs = e.callcommand(
195 objs = e.callcommand(
195 b'rawstorefiledata',
196 b'rawstorefiledata',
196 {
197 {
197 b'files': [b'changelog', b'manifestlog'],
198 b'files': [b'changelog', b'manifestlog'],
198 },
199 },
199 ).result()
200 ).result()
200
201
201 # First object is a summary of files data that follows.
202 # First object is a summary of files data that follows.
202 overall = next(objs)
203 overall = next(objs)
203
204
204 progress = repo.ui.makeprogress(
205 progress = repo.ui.makeprogress(
205 _(b'clone'), total=overall[b'totalsize'], unit=_(b'bytes')
206 _(b'clone'), total=overall[b'totalsize'], unit=_(b'bytes')
206 )
207 )
207 with progress:
208 with progress:
208 progress.update(0)
209 progress.update(0)
209
210
210 # Next are pairs of file metadata, data.
211 # Next are pairs of file metadata, data.
211 while True:
212 while True:
212 try:
213 try:
213 filemeta = next(objs)
214 filemeta = next(objs)
214 except StopIteration:
215 except StopIteration:
215 break
216 break
216
217
217 for k in (b'location', b'path', b'size'):
218 for k in (b'location', b'path', b'size'):
218 if k not in filemeta:
219 if k not in filemeta:
219 raise error.Abort(
220 raise error.Abort(
220 _(b'remote file data missing key: %s') % k
221 _(b'remote file data missing key: %s') % k
221 )
222 )
222
223
223 if filemeta[b'location'] == b'store':
224 if filemeta[b'location'] == b'store':
224 vfs = repo.svfs
225 vfs = repo.svfs
225 else:
226 else:
226 raise error.Abort(
227 raise error.Abort(
227 _(b'invalid location for raw file data: %s')
228 _(b'invalid location for raw file data: %s')
228 % filemeta[b'location']
229 % filemeta[b'location']
229 )
230 )
230
231
231 bytesremaining = filemeta[b'size']
232 bytesremaining = filemeta[b'size']
232
233
233 with vfs.open(filemeta[b'path'], b'wb') as fh:
234 with vfs.open(filemeta[b'path'], b'wb') as fh:
234 while True:
235 while True:
235 try:
236 try:
236 chunk = next(objs)
237 chunk = next(objs)
237 except StopIteration:
238 except StopIteration:
238 break
239 break
239
240
240 bytesremaining -= len(chunk)
241 bytesremaining -= len(chunk)
241
242
242 if bytesremaining < 0:
243 if bytesremaining < 0:
243 raise error.Abort(
244 raise error.Abort(
244 _(
245 _(
245 b'received invalid number of bytes for file '
246 b'received invalid number of bytes for file '
246 b'data; expected %d, got extra'
247 b'data; expected %d, got extra'
247 )
248 )
248 % filemeta[b'size']
249 % filemeta[b'size']
249 )
250 )
250
251
251 progress.increment(step=len(chunk))
252 progress.increment(step=len(chunk))
252 fh.write(chunk)
253 fh.write(chunk)
253
254
254 try:
255 try:
255 if chunk.islast:
256 if chunk.islast:
256 break
257 break
257 except AttributeError:
258 except AttributeError:
258 raise error.Abort(
259 raise error.Abort(
259 _(
260 _(
260 b'did not receive indefinite length bytestring '
261 b'did not receive indefinite length bytestring '
261 b'for file data'
262 b'for file data'
262 )
263 )
263 )
264 )
264
265
265 if bytesremaining:
266 if bytesremaining:
266 raise error.Abort(
267 raise error.Abort(
267 _(
268 _(
268 b'received invalid number of bytes for'
269 b'received invalid number of bytes for'
269 b'file data; expected %d got %d'
270 b'file data; expected %d got %d'
270 )
271 )
271 % (
272 % (
272 filemeta[b'size'],
273 filemeta[b'size'],
273 filemeta[b'size'] - bytesremaining,
274 filemeta[b'size'] - bytesremaining,
274 )
275 )
275 )
276 )
276
277
277
278
278 def _pullchangesetdiscovery(repo, remote, heads, abortwhenunrelated=True):
279 def _pullchangesetdiscovery(repo, remote, heads, abortwhenunrelated=True):
279 """Determine which changesets need to be pulled."""
280 """Determine which changesets need to be pulled."""
280
281
281 if heads:
282 if heads:
282 knownnode = repo.changelog.hasnode
283 knownnode = repo.changelog.hasnode
283 if all(knownnode(head) for head in heads):
284 if all(knownnode(head) for head in heads):
284 return heads, False, heads
285 return heads, False, heads
285
286
286 # TODO wire protocol version 2 is capable of more efficient discovery
287 # TODO wire protocol version 2 is capable of more efficient discovery
287 # than setdiscovery. Consider implementing something better.
288 # than setdiscovery. Consider implementing something better.
288 common, fetch, remoteheads = setdiscovery.findcommonheads(
289 common, fetch, remoteheads = setdiscovery.findcommonheads(
289 repo.ui, repo, remote, abortwhenunrelated=abortwhenunrelated
290 repo.ui, repo, remote, abortwhenunrelated=abortwhenunrelated
290 )
291 )
291
292
292 common = set(common)
293 common = set(common)
293 remoteheads = set(remoteheads)
294 remoteheads = set(remoteheads)
294
295
295 # If a remote head is filtered locally, put it back in the common set.
296 # If a remote head is filtered locally, put it back in the common set.
296 # See the comment in exchange._pulldiscoverychangegroup() for more.
297 # See the comment in exchange._pulldiscoverychangegroup() for more.
297
298
298 if fetch and remoteheads:
299 if fetch and remoteheads:
299 has_node = repo.unfiltered().changelog.index.has_node
300 has_node = repo.unfiltered().changelog.index.has_node
300
301
301 common |= {head for head in remoteheads if has_node(head)}
302 common |= {head for head in remoteheads if has_node(head)}
302
303
303 if set(remoteheads).issubset(common):
304 if set(remoteheads).issubset(common):
304 fetch = []
305 fetch = []
305
306
306 common.discard(nullid)
307 common.discard(nullid)
307
308
308 return common, fetch, remoteheads
309 return common, fetch, remoteheads
309
310
310
311
311 def _fetchchangesets(repo, tr, remote, common, fetch, remoteheads):
312 def _fetchchangesets(repo, tr, remote, common, fetch, remoteheads):
312 # TODO consider adding a step here where we obtain the DAG shape first
313 # TODO consider adding a step here where we obtain the DAG shape first
313 # (or ask the server to slice changesets into chunks for us) so that
314 # (or ask the server to slice changesets into chunks for us) so that
314 # we can perform multiple fetches in batches. This will facilitate
315 # we can perform multiple fetches in batches. This will facilitate
315 # resuming interrupted clones, higher server-side cache hit rates due
316 # resuming interrupted clones, higher server-side cache hit rates due
316 # to smaller segments, etc.
317 # to smaller segments, etc.
317 with remote.commandexecutor() as e:
318 with remote.commandexecutor() as e:
318 objs = e.callcommand(
319 objs = e.callcommand(
319 b'changesetdata',
320 b'changesetdata',
320 {
321 {
321 b'revisions': [
322 b'revisions': [
322 {
323 {
323 b'type': b'changesetdagrange',
324 b'type': b'changesetdagrange',
324 b'roots': sorted(common),
325 b'roots': sorted(common),
325 b'heads': sorted(remoteheads),
326 b'heads': sorted(remoteheads),
326 }
327 }
327 ],
328 ],
328 b'fields': {b'bookmarks', b'parents', b'phase', b'revision'},
329 b'fields': {b'bookmarks', b'parents', b'phase', b'revision'},
329 },
330 },
330 ).result()
331 ).result()
331
332
332 # The context manager waits on all response data when exiting. So
333 # The context manager waits on all response data when exiting. So
333 # we need to remain in the context manager in order to stream data.
334 # we need to remain in the context manager in order to stream data.
334 return _processchangesetdata(repo, tr, objs)
335 return _processchangesetdata(repo, tr, objs)
335
336
336
337
337 def _processchangesetdata(repo, tr, objs):
338 def _processchangesetdata(repo, tr, objs):
338 repo.hook(b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs))
339 repo.hook(b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs))
339
340
340 urepo = repo.unfiltered()
341 urepo = repo.unfiltered()
341 cl = urepo.changelog
342 cl = urepo.changelog
342
343
343 cl.delayupdate(tr)
344 cl.delayupdate(tr)
344
345
345 # The first emitted object is a header describing the data that
346 # The first emitted object is a header describing the data that
346 # follows.
347 # follows.
347 meta = next(objs)
348 meta = next(objs)
348
349
349 progress = repo.ui.makeprogress(
350 progress = repo.ui.makeprogress(
350 _(b'changesets'), unit=_(b'chunks'), total=meta.get(b'totalitems')
351 _(b'changesets'), unit=_(b'chunks'), total=meta.get(b'totalitems')
351 )
352 )
352
353
353 manifestnodes = {}
354 manifestnodes = {}
354 added = []
355 added = []
355
356
356 def linkrev(node):
357 def linkrev(node):
357 repo.ui.debug(b'add changeset %s\n' % short(node))
358 repo.ui.debug(b'add changeset %s\n' % short(node))
358 # Linkrev for changelog is always self.
359 # Linkrev for changelog is always self.
359 return len(cl)
360 return len(cl)
360
361
361 def ondupchangeset(cl, rev):
362 def ondupchangeset(cl, rev):
362 added.append(cl.node(rev))
363 added.append(cl.node(rev))
363
364
364 def onchangeset(cl, rev):
365 def onchangeset(cl, rev):
365 progress.increment()
366 progress.increment()
366
367
367 revision = cl.changelogrevision(rev)
368 revision = cl.changelogrevision(rev)
368 added.append(cl.node(rev))
369 added.append(cl.node(rev))
369
370
370 # We need to preserve the mapping of changelog revision to node
371 # We need to preserve the mapping of changelog revision to node
371 # so we can set the linkrev accordingly when manifests are added.
372 # so we can set the linkrev accordingly when manifests are added.
372 manifestnodes[rev] = revision.manifest
373 manifestnodes[rev] = revision.manifest
373
374
374 repo.register_changeset(rev, revision)
375 repo.register_changeset(rev, revision)
375
376
376 nodesbyphase = {phase: set() for phase in phases.phasenames.values()}
377 nodesbyphase = {phase: set() for phase in phases.phasenames.values()}
377 remotebookmarks = {}
378 remotebookmarks = {}
378
379
379 # addgroup() expects a 7-tuple describing revisions. This normalizes
380 # addgroup() expects a 7-tuple describing revisions. This normalizes
380 # the wire data to that format.
381 # the wire data to that format.
381 #
382 #
382 # This loop also aggregates non-revision metadata, such as phase
383 # This loop also aggregates non-revision metadata, such as phase
383 # data.
384 # data.
384 def iterrevisions():
385 def iterrevisions():
385 for cset in objs:
386 for cset in objs:
386 node = cset[b'node']
387 node = cset[b'node']
387
388
388 if b'phase' in cset:
389 if b'phase' in cset:
389 nodesbyphase[cset[b'phase']].add(node)
390 nodesbyphase[cset[b'phase']].add(node)
390
391
391 for mark in cset.get(b'bookmarks', []):
392 for mark in cset.get(b'bookmarks', []):
392 remotebookmarks[mark] = node
393 remotebookmarks[mark] = node
393
394
394 # TODO add mechanism for extensions to examine records so they
395 # TODO add mechanism for extensions to examine records so they
395 # can siphon off custom data fields.
396 # can siphon off custom data fields.
396
397
397 extrafields = {}
398 extrafields = {}
398
399
399 for field, size in cset.get(b'fieldsfollowing', []):
400 for field, size in cset.get(b'fieldsfollowing', []):
400 extrafields[field] = next(objs)
401 extrafields[field] = next(objs)
401
402
402 # Some entries might only be metadata only updates.
403 # Some entries might only be metadata only updates.
403 if b'revision' not in extrafields:
404 if b'revision' not in extrafields:
404 continue
405 continue
405
406
406 data = extrafields[b'revision']
407 data = extrafields[b'revision']
407
408
408 yield (
409 yield (
409 node,
410 node,
410 cset[b'parents'][0],
411 cset[b'parents'][0],
411 cset[b'parents'][1],
412 cset[b'parents'][1],
412 # Linknode is always itself for changesets.
413 # Linknode is always itself for changesets.
413 cset[b'node'],
414 cset[b'node'],
414 # We always send full revisions. So delta base is not set.
415 # We always send full revisions. So delta base is not set.
415 nullid,
416 nullid,
416 mdiff.trivialdiffheader(len(data)) + data,
417 mdiff.trivialdiffheader(len(data)) + data,
417 # Flags not yet supported.
418 # Flags not yet supported.
418 0,
419 0,
419 )
420 )
420
421
421 cl.addgroup(
422 cl.addgroup(
422 iterrevisions(),
423 iterrevisions(),
423 linkrev,
424 linkrev,
424 weakref.proxy(tr),
425 weakref.proxy(tr),
425 alwayscache=True,
426 alwayscache=True,
426 addrevisioncb=onchangeset,
427 addrevisioncb=onchangeset,
427 duplicaterevisioncb=ondupchangeset,
428 duplicaterevisioncb=ondupchangeset,
428 )
429 )
429
430
430 progress.complete()
431 progress.complete()
431
432
432 return {
433 return {
433 b'added': added,
434 b'added': added,
434 b'nodesbyphase': nodesbyphase,
435 b'nodesbyphase': nodesbyphase,
435 b'bookmarks': remotebookmarks,
436 b'bookmarks': remotebookmarks,
436 b'manifestnodes': manifestnodes,
437 b'manifestnodes': manifestnodes,
437 }
438 }
438
439
439
440
440 def _fetchmanifests(repo, tr, remote, manifestnodes):
441 def _fetchmanifests(repo, tr, remote, manifestnodes):
441 rootmanifest = repo.manifestlog.getstorage(b'')
442 rootmanifest = repo.manifestlog.getstorage(b'')
442
443
443 # Some manifests can be shared between changesets. Filter out revisions
444 # Some manifests can be shared between changesets. Filter out revisions
444 # we already know about.
445 # we already know about.
445 fetchnodes = []
446 fetchnodes = []
446 linkrevs = {}
447 linkrevs = {}
447 seen = set()
448 seen = set()
448
449
449 for clrev, node in sorted(pycompat.iteritems(manifestnodes)):
450 for clrev, node in sorted(pycompat.iteritems(manifestnodes)):
450 if node in seen:
451 if node in seen:
451 continue
452 continue
452
453
453 try:
454 try:
454 rootmanifest.rev(node)
455 rootmanifest.rev(node)
455 except error.LookupError:
456 except error.LookupError:
456 fetchnodes.append(node)
457 fetchnodes.append(node)
457 linkrevs[node] = clrev
458 linkrevs[node] = clrev
458
459
459 seen.add(node)
460 seen.add(node)
460
461
461 # TODO handle tree manifests
462 # TODO handle tree manifests
462
463
463 # addgroup() expects 7-tuple describing revisions. This normalizes
464 # addgroup() expects 7-tuple describing revisions. This normalizes
464 # the wire data to that format.
465 # the wire data to that format.
465 def iterrevisions(objs, progress):
466 def iterrevisions(objs, progress):
466 for manifest in objs:
467 for manifest in objs:
467 node = manifest[b'node']
468 node = manifest[b'node']
468
469
469 extrafields = {}
470 extrafields = {}
470
471
471 for field, size in manifest.get(b'fieldsfollowing', []):
472 for field, size in manifest.get(b'fieldsfollowing', []):
472 extrafields[field] = next(objs)
473 extrafields[field] = next(objs)
473
474
474 if b'delta' in extrafields:
475 if b'delta' in extrafields:
475 basenode = manifest[b'deltabasenode']
476 basenode = manifest[b'deltabasenode']
476 delta = extrafields[b'delta']
477 delta = extrafields[b'delta']
477 elif b'revision' in extrafields:
478 elif b'revision' in extrafields:
478 basenode = nullid
479 basenode = nullid
479 revision = extrafields[b'revision']
480 revision = extrafields[b'revision']
480 delta = mdiff.trivialdiffheader(len(revision)) + revision
481 delta = mdiff.trivialdiffheader(len(revision)) + revision
481 else:
482 else:
482 continue
483 continue
483
484
484 yield (
485 yield (
485 node,
486 node,
486 manifest[b'parents'][0],
487 manifest[b'parents'][0],
487 manifest[b'parents'][1],
488 manifest[b'parents'][1],
488 # The value passed in is passed to the lookup function passed
489 # The value passed in is passed to the lookup function passed
489 # to addgroup(). We already have a map of manifest node to
490 # to addgroup(). We already have a map of manifest node to
490 # changelog revision number. So we just pass in the
491 # changelog revision number. So we just pass in the
491 # manifest node here and use linkrevs.__getitem__ as the
492 # manifest node here and use linkrevs.__getitem__ as the
492 # resolution function.
493 # resolution function.
493 node,
494 node,
494 basenode,
495 basenode,
495 delta,
496 delta,
496 # Flags not yet supported.
497 # Flags not yet supported.
497 0,
498 0,
498 )
499 )
499
500
500 progress.increment()
501 progress.increment()
501
502
502 progress = repo.ui.makeprogress(
503 progress = repo.ui.makeprogress(
503 _(b'manifests'), unit=_(b'chunks'), total=len(fetchnodes)
504 _(b'manifests'), unit=_(b'chunks'), total=len(fetchnodes)
504 )
505 )
505
506
506 commandmeta = remote.apidescriptor[b'commands'][b'manifestdata']
507 commandmeta = remote.apidescriptor[b'commands'][b'manifestdata']
507 batchsize = commandmeta.get(b'recommendedbatchsize', 10000)
508 batchsize = commandmeta.get(b'recommendedbatchsize', 10000)
508 # TODO make size configurable on client?
509 # TODO make size configurable on client?
509
510
510 # We send commands 1 at a time to the remote. This is not the most
511 # We send commands 1 at a time to the remote. This is not the most
511 # efficient because we incur a round trip at the end of each batch.
512 # efficient because we incur a round trip at the end of each batch.
512 # However, the existing frame-based reactor keeps consuming server
513 # However, the existing frame-based reactor keeps consuming server
513 # data in the background. And this results in response data buffering
514 # data in the background. And this results in response data buffering
514 # in memory. This can consume gigabytes of memory.
515 # in memory. This can consume gigabytes of memory.
515 # TODO send multiple commands in a request once background buffering
516 # TODO send multiple commands in a request once background buffering
516 # issues are resolved.
517 # issues are resolved.
517
518
518 added = []
519 added = []
519
520
520 for i in pycompat.xrange(0, len(fetchnodes), batchsize):
521 for i in pycompat.xrange(0, len(fetchnodes), batchsize):
521 batch = [node for node in fetchnodes[i : i + batchsize]]
522 batch = [node for node in fetchnodes[i : i + batchsize]]
522 if not batch:
523 if not batch:
523 continue
524 continue
524
525
525 with remote.commandexecutor() as e:
526 with remote.commandexecutor() as e:
526 objs = e.callcommand(
527 objs = e.callcommand(
527 b'manifestdata',
528 b'manifestdata',
528 {
529 {
529 b'tree': b'',
530 b'tree': b'',
530 b'nodes': batch,
531 b'nodes': batch,
531 b'fields': {b'parents', b'revision'},
532 b'fields': {b'parents', b'revision'},
532 b'haveparents': True,
533 b'haveparents': True,
533 },
534 },
534 ).result()
535 ).result()
535
536
536 # Chomp off header object.
537 # Chomp off header object.
537 next(objs)
538 next(objs)
538
539
539 def onchangeset(cl, rev):
540 def onchangeset(cl, rev):
540 added.append(cl.node(rev))
541 added.append(cl.node(rev))
541
542
542 rootmanifest.addgroup(
543 rootmanifest.addgroup(
543 iterrevisions(objs, progress),
544 iterrevisions(objs, progress),
544 linkrevs.__getitem__,
545 linkrevs.__getitem__,
545 weakref.proxy(tr),
546 weakref.proxy(tr),
546 addrevisioncb=onchangeset,
547 addrevisioncb=onchangeset,
547 duplicaterevisioncb=onchangeset,
548 duplicaterevisioncb=onchangeset,
548 )
549 )
549
550
550 progress.complete()
551 progress.complete()
551
552
552 return {
553 return {
553 b'added': added,
554 b'added': added,
554 b'linkrevs': linkrevs,
555 b'linkrevs': linkrevs,
555 }
556 }
556
557
557
558
558 def _derivefilesfrommanifests(repo, matcher, manifestnodes):
559 def _derivefilesfrommanifests(repo, matcher, manifestnodes):
559 """Determine what file nodes are relevant given a set of manifest nodes.
560 """Determine what file nodes are relevant given a set of manifest nodes.
560
561
561 Returns a dict mapping file paths to dicts of file node to first manifest
562 Returns a dict mapping file paths to dicts of file node to first manifest
562 node.
563 node.
563 """
564 """
564 ml = repo.manifestlog
565 ml = repo.manifestlog
565 fnodes = collections.defaultdict(dict)
566 fnodes = collections.defaultdict(dict)
566
567
567 progress = repo.ui.makeprogress(
568 progress = repo.ui.makeprogress(
568 _(b'scanning manifests'), total=len(manifestnodes)
569 _(b'scanning manifests'), total=len(manifestnodes)
569 )
570 )
570
571
571 with progress:
572 with progress:
572 for manifestnode in manifestnodes:
573 for manifestnode in manifestnodes:
573 m = ml.get(b'', manifestnode)
574 m = ml.get(b'', manifestnode)
574
575
575 # TODO this will pull in unwanted nodes because it takes the storage
576 # TODO this will pull in unwanted nodes because it takes the storage
576 # delta into consideration. What we really want is something that
577 # delta into consideration. What we really want is something that
577 # takes the delta between the manifest's parents. And ideally we
578 # takes the delta between the manifest's parents. And ideally we
578 # would ignore file nodes that are known locally. For now, ignore
579 # would ignore file nodes that are known locally. For now, ignore
579 # both these limitations. This will result in incremental fetches
580 # both these limitations. This will result in incremental fetches
580 # requesting data we already have. So this is far from ideal.
581 # requesting data we already have. So this is far from ideal.
581 md = m.readfast()
582 md = m.readfast()
582
583
583 for path, fnode in md.items():
584 for path, fnode in md.items():
584 if matcher(path):
585 if matcher(path):
585 fnodes[path].setdefault(fnode, manifestnode)
586 fnodes[path].setdefault(fnode, manifestnode)
586
587
587 progress.increment()
588 progress.increment()
588
589
589 return fnodes
590 return fnodes
590
591
591
592
592 def _fetchfiles(repo, tr, remote, fnodes, linkrevs):
593 def _fetchfiles(repo, tr, remote, fnodes, linkrevs):
593 """Fetch file data from explicit file revisions."""
594 """Fetch file data from explicit file revisions."""
594
595
595 def iterrevisions(objs, progress):
596 def iterrevisions(objs, progress):
596 for filerevision in objs:
597 for filerevision in objs:
597 node = filerevision[b'node']
598 node = filerevision[b'node']
598
599
599 extrafields = {}
600 extrafields = {}
600
601
601 for field, size in filerevision.get(b'fieldsfollowing', []):
602 for field, size in filerevision.get(b'fieldsfollowing', []):
602 extrafields[field] = next(objs)
603 extrafields[field] = next(objs)
603
604
604 if b'delta' in extrafields:
605 if b'delta' in extrafields:
605 basenode = filerevision[b'deltabasenode']
606 basenode = filerevision[b'deltabasenode']
606 delta = extrafields[b'delta']
607 delta = extrafields[b'delta']
607 elif b'revision' in extrafields:
608 elif b'revision' in extrafields:
608 basenode = nullid
609 basenode = nullid
609 revision = extrafields[b'revision']
610 revision = extrafields[b'revision']
610 delta = mdiff.trivialdiffheader(len(revision)) + revision
611 delta = mdiff.trivialdiffheader(len(revision)) + revision
611 else:
612 else:
612 continue
613 continue
613
614
614 yield (
615 yield (
615 node,
616 node,
616 filerevision[b'parents'][0],
617 filerevision[b'parents'][0],
617 filerevision[b'parents'][1],
618 filerevision[b'parents'][1],
618 node,
619 node,
619 basenode,
620 basenode,
620 delta,
621 delta,
621 # Flags not yet supported.
622 # Flags not yet supported.
622 0,
623 0,
623 )
624 )
624
625
625 progress.increment()
626 progress.increment()
626
627
627 progress = repo.ui.makeprogress(
628 progress = repo.ui.makeprogress(
628 _(b'files'),
629 _(b'files'),
629 unit=_(b'chunks'),
630 unit=_(b'chunks'),
630 total=sum(len(v) for v in pycompat.itervalues(fnodes)),
631 total=sum(len(v) for v in pycompat.itervalues(fnodes)),
631 )
632 )
632
633
633 # TODO make batch size configurable
634 # TODO make batch size configurable
634 batchsize = 10000
635 batchsize = 10000
635 fnodeslist = [x for x in sorted(fnodes.items())]
636 fnodeslist = [x for x in sorted(fnodes.items())]
636
637
637 for i in pycompat.xrange(0, len(fnodeslist), batchsize):
638 for i in pycompat.xrange(0, len(fnodeslist), batchsize):
638 batch = [x for x in fnodeslist[i : i + batchsize]]
639 batch = [x for x in fnodeslist[i : i + batchsize]]
639 if not batch:
640 if not batch:
640 continue
641 continue
641
642
642 with remote.commandexecutor() as e:
643 with remote.commandexecutor() as e:
643 fs = []
644 fs = []
644 locallinkrevs = {}
645 locallinkrevs = {}
645
646
646 for path, nodes in batch:
647 for path, nodes in batch:
647 fs.append(
648 fs.append(
648 (
649 (
649 path,
650 path,
650 e.callcommand(
651 e.callcommand(
651 b'filedata',
652 b'filedata',
652 {
653 {
653 b'path': path,
654 b'path': path,
654 b'nodes': sorted(nodes),
655 b'nodes': sorted(nodes),
655 b'fields': {b'parents', b'revision'},
656 b'fields': {b'parents', b'revision'},
656 b'haveparents': True,
657 b'haveparents': True,
657 },
658 },
658 ),
659 ),
659 )
660 )
660 )
661 )
661
662
662 locallinkrevs[path] = {
663 locallinkrevs[path] = {
663 node: linkrevs[manifestnode]
664 node: linkrevs[manifestnode]
664 for node, manifestnode in pycompat.iteritems(nodes)
665 for node, manifestnode in pycompat.iteritems(nodes)
665 }
666 }
666
667
667 for path, f in fs:
668 for path, f in fs:
668 objs = f.result()
669 objs = f.result()
669
670
670 # Chomp off header objects.
671 # Chomp off header objects.
671 next(objs)
672 next(objs)
672
673
673 store = repo.file(path)
674 store = repo.file(path)
674 store.addgroup(
675 store.addgroup(
675 iterrevisions(objs, progress),
676 iterrevisions(objs, progress),
676 locallinkrevs[path].__getitem__,
677 locallinkrevs[path].__getitem__,
677 weakref.proxy(tr),
678 weakref.proxy(tr),
678 )
679 )
679
680
680
681
681 def _fetchfilesfromcsets(
682 def _fetchfilesfromcsets(
682 repo, tr, remote, pathfilter, fnodes, csets, manlinkrevs, shallow=False
683 repo, tr, remote, pathfilter, fnodes, csets, manlinkrevs, shallow=False
683 ):
684 ):
684 """Fetch file data from explicit changeset revisions."""
685 """Fetch file data from explicit changeset revisions."""
685
686
686 def iterrevisions(objs, remaining, progress):
687 def iterrevisions(objs, remaining, progress):
687 while remaining:
688 while remaining:
688 filerevision = next(objs)
689 filerevision = next(objs)
689
690
690 node = filerevision[b'node']
691 node = filerevision[b'node']
691
692
692 extrafields = {}
693 extrafields = {}
693
694
694 for field, size in filerevision.get(b'fieldsfollowing', []):
695 for field, size in filerevision.get(b'fieldsfollowing', []):
695 extrafields[field] = next(objs)
696 extrafields[field] = next(objs)
696
697
697 if b'delta' in extrafields:
698 if b'delta' in extrafields:
698 basenode = filerevision[b'deltabasenode']
699 basenode = filerevision[b'deltabasenode']
699 delta = extrafields[b'delta']
700 delta = extrafields[b'delta']
700 elif b'revision' in extrafields:
701 elif b'revision' in extrafields:
701 basenode = nullid
702 basenode = nullid
702 revision = extrafields[b'revision']
703 revision = extrafields[b'revision']
703 delta = mdiff.trivialdiffheader(len(revision)) + revision
704 delta = mdiff.trivialdiffheader(len(revision)) + revision
704 else:
705 else:
705 continue
706 continue
706
707
707 if b'linknode' in filerevision:
708 if b'linknode' in filerevision:
708 linknode = filerevision[b'linknode']
709 linknode = filerevision[b'linknode']
709 else:
710 else:
710 linknode = node
711 linknode = node
711
712
712 yield (
713 yield (
713 node,
714 node,
714 filerevision[b'parents'][0],
715 filerevision[b'parents'][0],
715 filerevision[b'parents'][1],
716 filerevision[b'parents'][1],
716 linknode,
717 linknode,
717 basenode,
718 basenode,
718 delta,
719 delta,
719 # Flags not yet supported.
720 # Flags not yet supported.
720 0,
721 0,
721 )
722 )
722
723
723 progress.increment()
724 progress.increment()
724 remaining -= 1
725 remaining -= 1
725
726
726 progress = repo.ui.makeprogress(
727 progress = repo.ui.makeprogress(
727 _(b'files'),
728 _(b'files'),
728 unit=_(b'chunks'),
729 unit=_(b'chunks'),
729 total=sum(len(v) for v in pycompat.itervalues(fnodes)),
730 total=sum(len(v) for v in pycompat.itervalues(fnodes)),
730 )
731 )
731
732
732 commandmeta = remote.apidescriptor[b'commands'][b'filesdata']
733 commandmeta = remote.apidescriptor[b'commands'][b'filesdata']
733 batchsize = commandmeta.get(b'recommendedbatchsize', 50000)
734 batchsize = commandmeta.get(b'recommendedbatchsize', 50000)
734
735
735 shallowfiles = repository.REPO_FEATURE_SHALLOW_FILE_STORAGE in repo.features
736 shallowfiles = repository.REPO_FEATURE_SHALLOW_FILE_STORAGE in repo.features
736 fields = {b'parents', b'revision'}
737 fields = {b'parents', b'revision'}
737 clrev = repo.changelog.rev
738 clrev = repo.changelog.rev
738
739
739 # There are no guarantees that we'll have ancestor revisions if
740 # There are no guarantees that we'll have ancestor revisions if
740 # a) this repo has shallow file storage b) shallow data fetching is enabled.
741 # a) this repo has shallow file storage b) shallow data fetching is enabled.
741 # Force remote to not delta against possibly unknown revisions when these
742 # Force remote to not delta against possibly unknown revisions when these
742 # conditions hold.
743 # conditions hold.
743 haveparents = not (shallowfiles or shallow)
744 haveparents = not (shallowfiles or shallow)
744
745
745 # Similarly, we may not have calculated linkrevs for all incoming file
746 # Similarly, we may not have calculated linkrevs for all incoming file
746 # revisions. Ask the remote to do work for us in this case.
747 # revisions. Ask the remote to do work for us in this case.
747 if not haveparents:
748 if not haveparents:
748 fields.add(b'linknode')
749 fields.add(b'linknode')
749
750
750 for i in pycompat.xrange(0, len(csets), batchsize):
751 for i in pycompat.xrange(0, len(csets), batchsize):
751 batch = [x for x in csets[i : i + batchsize]]
752 batch = [x for x in csets[i : i + batchsize]]
752 if not batch:
753 if not batch:
753 continue
754 continue
754
755
755 with remote.commandexecutor() as e:
756 with remote.commandexecutor() as e:
756 args = {
757 args = {
757 b'revisions': [
758 b'revisions': [
758 {
759 {
759 b'type': b'changesetexplicit',
760 b'type': b'changesetexplicit',
760 b'nodes': batch,
761 b'nodes': batch,
761 }
762 }
762 ],
763 ],
763 b'fields': fields,
764 b'fields': fields,
764 b'haveparents': haveparents,
765 b'haveparents': haveparents,
765 }
766 }
766
767
767 if pathfilter:
768 if pathfilter:
768 args[b'pathfilter'] = pathfilter
769 args[b'pathfilter'] = pathfilter
769
770
770 objs = e.callcommand(b'filesdata', args).result()
771 objs = e.callcommand(b'filesdata', args).result()
771
772
772 # First object is an overall header.
773 # First object is an overall header.
773 overall = next(objs)
774 overall = next(objs)
774
775
775 # We have overall['totalpaths'] segments.
776 # We have overall['totalpaths'] segments.
776 for i in pycompat.xrange(overall[b'totalpaths']):
777 for i in pycompat.xrange(overall[b'totalpaths']):
777 header = next(objs)
778 header = next(objs)
778
779
779 path = header[b'path']
780 path = header[b'path']
780 store = repo.file(path)
781 store = repo.file(path)
781
782
782 linkrevs = {
783 linkrevs = {
783 fnode: manlinkrevs[mnode]
784 fnode: manlinkrevs[mnode]
784 for fnode, mnode in pycompat.iteritems(fnodes[path])
785 for fnode, mnode in pycompat.iteritems(fnodes[path])
785 }
786 }
786
787
787 def getlinkrev(node):
788 def getlinkrev(node):
788 if node in linkrevs:
789 if node in linkrevs:
789 return linkrevs[node]
790 return linkrevs[node]
790 else:
791 else:
791 return clrev(node)
792 return clrev(node)
792
793
793 store.addgroup(
794 store.addgroup(
794 iterrevisions(objs, header[b'totalitems'], progress),
795 iterrevisions(objs, header[b'totalitems'], progress),
795 getlinkrev,
796 getlinkrev,
796 weakref.proxy(tr),
797 weakref.proxy(tr),
797 maybemissingparents=shallow,
798 maybemissingparents=shallow,
798 )
799 )
@@ -1,3698 +1,3698 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 delattr,
27 delattr,
28 getattr,
28 getattr,
29 )
29 )
30 from . import (
30 from . import (
31 bookmarks,
31 bookmarks,
32 branchmap,
32 branchmap,
33 bundle2,
33 bundle2,
34 bundlecaches,
34 bundlecaches,
35 changegroup,
35 changegroup,
36 color,
36 color,
37 commit,
37 commit,
38 context,
38 context,
39 dirstate,
39 dirstate,
40 dirstateguard,
40 dirstateguard,
41 discovery,
41 discovery,
42 encoding,
42 encoding,
43 error,
43 error,
44 exchange,
44 exchange,
45 extensions,
45 extensions,
46 filelog,
46 filelog,
47 hook,
47 hook,
48 lock as lockmod,
48 lock as lockmod,
49 match as matchmod,
49 match as matchmod,
50 mergestate as mergestatemod,
50 mergestate as mergestatemod,
51 mergeutil,
51 mergeutil,
52 namespaces,
52 namespaces,
53 narrowspec,
53 narrowspec,
54 obsolete,
54 obsolete,
55 pathutil,
55 pathutil,
56 phases,
56 phases,
57 pushkey,
57 pushkey,
58 pycompat,
58 pycompat,
59 rcutil,
59 rcutil,
60 repoview,
60 repoview,
61 requirements as requirementsmod,
61 requirements as requirementsmod,
62 revlog,
62 revlog,
63 revset,
63 revset,
64 revsetlang,
64 revsetlang,
65 scmutil,
65 scmutil,
66 sparse,
66 sparse,
67 store as storemod,
67 store as storemod,
68 subrepoutil,
68 subrepoutil,
69 tags as tagsmod,
69 tags as tagsmod,
70 transaction,
70 transaction,
71 txnutil,
71 txnutil,
72 util,
72 util,
73 vfs as vfsmod,
73 vfs as vfsmod,
74 )
74 )
75
75
76 from .interfaces import (
76 from .interfaces import (
77 repository,
77 repository,
78 util as interfaceutil,
78 util as interfaceutil,
79 )
79 )
80
80
81 from .utils import (
81 from .utils import (
82 hashutil,
82 hashutil,
83 procutil,
83 procutil,
84 stringutil,
84 stringutil,
85 )
85 )
86
86
87 from .revlogutils import (
87 from .revlogutils import (
88 concurrency_checker as revlogchecker,
88 concurrency_checker as revlogchecker,
89 constants as revlogconst,
89 constants as revlogconst,
90 )
90 )
91
91
92 release = lockmod.release
92 release = lockmod.release
93 urlerr = util.urlerr
93 urlerr = util.urlerr
94 urlreq = util.urlreq
94 urlreq = util.urlreq
95
95
96 # set of (path, vfs-location) tuples. vfs-location is:
96 # set of (path, vfs-location) tuples. vfs-location is:
97 # - 'plain for vfs relative paths
97 # - 'plain for vfs relative paths
98 # - '' for svfs relative paths
98 # - '' for svfs relative paths
99 _cachedfiles = set()
99 _cachedfiles = set()
100
100
101
101
102 class _basefilecache(scmutil.filecache):
102 class _basefilecache(scmutil.filecache):
103 """All filecache usage on repo are done for logic that should be unfiltered"""
103 """All filecache usage on repo are done for logic that should be unfiltered"""
104
104
105 def __get__(self, repo, type=None):
105 def __get__(self, repo, type=None):
106 if repo is None:
106 if repo is None:
107 return self
107 return self
108 # proxy to unfiltered __dict__ since filtered repo has no entry
108 # proxy to unfiltered __dict__ since filtered repo has no entry
109 unfi = repo.unfiltered()
109 unfi = repo.unfiltered()
110 try:
110 try:
111 return unfi.__dict__[self.sname]
111 return unfi.__dict__[self.sname]
112 except KeyError:
112 except KeyError:
113 pass
113 pass
114 return super(_basefilecache, self).__get__(unfi, type)
114 return super(_basefilecache, self).__get__(unfi, type)
115
115
116 def set(self, repo, value):
116 def set(self, repo, value):
117 return super(_basefilecache, self).set(repo.unfiltered(), value)
117 return super(_basefilecache, self).set(repo.unfiltered(), value)
118
118
119
119
120 class repofilecache(_basefilecache):
120 class repofilecache(_basefilecache):
121 """filecache for files in .hg but outside of .hg/store"""
121 """filecache for files in .hg but outside of .hg/store"""
122
122
123 def __init__(self, *paths):
123 def __init__(self, *paths):
124 super(repofilecache, self).__init__(*paths)
124 super(repofilecache, self).__init__(*paths)
125 for path in paths:
125 for path in paths:
126 _cachedfiles.add((path, b'plain'))
126 _cachedfiles.add((path, b'plain'))
127
127
128 def join(self, obj, fname):
128 def join(self, obj, fname):
129 return obj.vfs.join(fname)
129 return obj.vfs.join(fname)
130
130
131
131
132 class storecache(_basefilecache):
132 class storecache(_basefilecache):
133 """filecache for files in the store"""
133 """filecache for files in the store"""
134
134
135 def __init__(self, *paths):
135 def __init__(self, *paths):
136 super(storecache, self).__init__(*paths)
136 super(storecache, self).__init__(*paths)
137 for path in paths:
137 for path in paths:
138 _cachedfiles.add((path, b''))
138 _cachedfiles.add((path, b''))
139
139
140 def join(self, obj, fname):
140 def join(self, obj, fname):
141 return obj.sjoin(fname)
141 return obj.sjoin(fname)
142
142
143
143
144 class mixedrepostorecache(_basefilecache):
144 class mixedrepostorecache(_basefilecache):
145 """filecache for a mix files in .hg/store and outside"""
145 """filecache for a mix files in .hg/store and outside"""
146
146
147 def __init__(self, *pathsandlocations):
147 def __init__(self, *pathsandlocations):
148 # scmutil.filecache only uses the path for passing back into our
148 # scmutil.filecache only uses the path for passing back into our
149 # join(), so we can safely pass a list of paths and locations
149 # join(), so we can safely pass a list of paths and locations
150 super(mixedrepostorecache, self).__init__(*pathsandlocations)
150 super(mixedrepostorecache, self).__init__(*pathsandlocations)
151 _cachedfiles.update(pathsandlocations)
151 _cachedfiles.update(pathsandlocations)
152
152
153 def join(self, obj, fnameandlocation):
153 def join(self, obj, fnameandlocation):
154 fname, location = fnameandlocation
154 fname, location = fnameandlocation
155 if location == b'plain':
155 if location == b'plain':
156 return obj.vfs.join(fname)
156 return obj.vfs.join(fname)
157 else:
157 else:
158 if location != b'':
158 if location != b'':
159 raise error.ProgrammingError(
159 raise error.ProgrammingError(
160 b'unexpected location: %s' % location
160 b'unexpected location: %s' % location
161 )
161 )
162 return obj.sjoin(fname)
162 return obj.sjoin(fname)
163
163
164
164
165 def isfilecached(repo, name):
165 def isfilecached(repo, name):
166 """check if a repo has already cached "name" filecache-ed property
166 """check if a repo has already cached "name" filecache-ed property
167
167
168 This returns (cachedobj-or-None, iscached) tuple.
168 This returns (cachedobj-or-None, iscached) tuple.
169 """
169 """
170 cacheentry = repo.unfiltered()._filecache.get(name, None)
170 cacheentry = repo.unfiltered()._filecache.get(name, None)
171 if not cacheentry:
171 if not cacheentry:
172 return None, False
172 return None, False
173 return cacheentry.obj, True
173 return cacheentry.obj, True
174
174
175
175
176 class unfilteredpropertycache(util.propertycache):
176 class unfilteredpropertycache(util.propertycache):
177 """propertycache that apply to unfiltered repo only"""
177 """propertycache that apply to unfiltered repo only"""
178
178
179 def __get__(self, repo, type=None):
179 def __get__(self, repo, type=None):
180 unfi = repo.unfiltered()
180 unfi = repo.unfiltered()
181 if unfi is repo:
181 if unfi is repo:
182 return super(unfilteredpropertycache, self).__get__(unfi)
182 return super(unfilteredpropertycache, self).__get__(unfi)
183 return getattr(unfi, self.name)
183 return getattr(unfi, self.name)
184
184
185
185
186 class filteredpropertycache(util.propertycache):
186 class filteredpropertycache(util.propertycache):
187 """propertycache that must take filtering in account"""
187 """propertycache that must take filtering in account"""
188
188
189 def cachevalue(self, obj, value):
189 def cachevalue(self, obj, value):
190 object.__setattr__(obj, self.name, value)
190 object.__setattr__(obj, self.name, value)
191
191
192
192
193 def hasunfilteredcache(repo, name):
193 def hasunfilteredcache(repo, name):
194 """check if a repo has an unfilteredpropertycache value for <name>"""
194 """check if a repo has an unfilteredpropertycache value for <name>"""
195 return name in vars(repo.unfiltered())
195 return name in vars(repo.unfiltered())
196
196
197
197
198 def unfilteredmethod(orig):
198 def unfilteredmethod(orig):
199 """decorate method that always need to be run on unfiltered version"""
199 """decorate method that always need to be run on unfiltered version"""
200
200
201 @functools.wraps(orig)
201 @functools.wraps(orig)
202 def wrapper(repo, *args, **kwargs):
202 def wrapper(repo, *args, **kwargs):
203 return orig(repo.unfiltered(), *args, **kwargs)
203 return orig(repo.unfiltered(), *args, **kwargs)
204
204
205 return wrapper
205 return wrapper
206
206
207
207
208 moderncaps = {
208 moderncaps = {
209 b'lookup',
209 b'lookup',
210 b'branchmap',
210 b'branchmap',
211 b'pushkey',
211 b'pushkey',
212 b'known',
212 b'known',
213 b'getbundle',
213 b'getbundle',
214 b'unbundle',
214 b'unbundle',
215 }
215 }
216 legacycaps = moderncaps.union({b'changegroupsubset'})
216 legacycaps = moderncaps.union({b'changegroupsubset'})
217
217
218
218
219 @interfaceutil.implementer(repository.ipeercommandexecutor)
219 @interfaceutil.implementer(repository.ipeercommandexecutor)
220 class localcommandexecutor(object):
220 class localcommandexecutor(object):
221 def __init__(self, peer):
221 def __init__(self, peer):
222 self._peer = peer
222 self._peer = peer
223 self._sent = False
223 self._sent = False
224 self._closed = False
224 self._closed = False
225
225
226 def __enter__(self):
226 def __enter__(self):
227 return self
227 return self
228
228
229 def __exit__(self, exctype, excvalue, exctb):
229 def __exit__(self, exctype, excvalue, exctb):
230 self.close()
230 self.close()
231
231
232 def callcommand(self, command, args):
232 def callcommand(self, command, args):
233 if self._sent:
233 if self._sent:
234 raise error.ProgrammingError(
234 raise error.ProgrammingError(
235 b'callcommand() cannot be used after sendcommands()'
235 b'callcommand() cannot be used after sendcommands()'
236 )
236 )
237
237
238 if self._closed:
238 if self._closed:
239 raise error.ProgrammingError(
239 raise error.ProgrammingError(
240 b'callcommand() cannot be used after close()'
240 b'callcommand() cannot be used after close()'
241 )
241 )
242
242
243 # We don't need to support anything fancy. Just call the named
243 # We don't need to support anything fancy. Just call the named
244 # method on the peer and return a resolved future.
244 # method on the peer and return a resolved future.
245 fn = getattr(self._peer, pycompat.sysstr(command))
245 fn = getattr(self._peer, pycompat.sysstr(command))
246
246
247 f = pycompat.futures.Future()
247 f = pycompat.futures.Future()
248
248
249 try:
249 try:
250 result = fn(**pycompat.strkwargs(args))
250 result = fn(**pycompat.strkwargs(args))
251 except Exception:
251 except Exception:
252 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
252 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
253 else:
253 else:
254 f.set_result(result)
254 f.set_result(result)
255
255
256 return f
256 return f
257
257
258 def sendcommands(self):
258 def sendcommands(self):
259 self._sent = True
259 self._sent = True
260
260
261 def close(self):
261 def close(self):
262 self._closed = True
262 self._closed = True
263
263
264
264
265 @interfaceutil.implementer(repository.ipeercommands)
265 @interfaceutil.implementer(repository.ipeercommands)
266 class localpeer(repository.peer):
266 class localpeer(repository.peer):
267 '''peer for a local repo; reflects only the most recent API'''
267 '''peer for a local repo; reflects only the most recent API'''
268
268
269 def __init__(self, repo, caps=None):
269 def __init__(self, repo, caps=None):
270 super(localpeer, self).__init__()
270 super(localpeer, self).__init__()
271
271
272 if caps is None:
272 if caps is None:
273 caps = moderncaps.copy()
273 caps = moderncaps.copy()
274 self._repo = repo.filtered(b'served')
274 self._repo = repo.filtered(b'served')
275 self.ui = repo.ui
275 self.ui = repo.ui
276 self._caps = repo._restrictcapabilities(caps)
276 self._caps = repo._restrictcapabilities(caps)
277
277
278 # Begin of _basepeer interface.
278 # Begin of _basepeer interface.
279
279
280 def url(self):
280 def url(self):
281 return self._repo.url()
281 return self._repo.url()
282
282
283 def local(self):
283 def local(self):
284 return self._repo
284 return self._repo
285
285
286 def peer(self):
286 def peer(self):
287 return self
287 return self
288
288
289 def canpush(self):
289 def canpush(self):
290 return True
290 return True
291
291
292 def close(self):
292 def close(self):
293 self._repo.close()
293 self._repo.close()
294
294
295 # End of _basepeer interface.
295 # End of _basepeer interface.
296
296
297 # Begin of _basewirecommands interface.
297 # Begin of _basewirecommands interface.
298
298
299 def branchmap(self):
299 def branchmap(self):
300 return self._repo.branchmap()
300 return self._repo.branchmap()
301
301
302 def capabilities(self):
302 def capabilities(self):
303 return self._caps
303 return self._caps
304
304
305 def clonebundles(self):
305 def clonebundles(self):
306 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
306 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
307
307
308 def debugwireargs(self, one, two, three=None, four=None, five=None):
308 def debugwireargs(self, one, two, three=None, four=None, five=None):
309 """Used to test argument passing over the wire"""
309 """Used to test argument passing over the wire"""
310 return b"%s %s %s %s %s" % (
310 return b"%s %s %s %s %s" % (
311 one,
311 one,
312 two,
312 two,
313 pycompat.bytestr(three),
313 pycompat.bytestr(three),
314 pycompat.bytestr(four),
314 pycompat.bytestr(four),
315 pycompat.bytestr(five),
315 pycompat.bytestr(five),
316 )
316 )
317
317
318 def getbundle(
318 def getbundle(
319 self, source, heads=None, common=None, bundlecaps=None, **kwargs
319 self, source, heads=None, common=None, bundlecaps=None, **kwargs
320 ):
320 ):
321 chunks = exchange.getbundlechunks(
321 chunks = exchange.getbundlechunks(
322 self._repo,
322 self._repo,
323 source,
323 source,
324 heads=heads,
324 heads=heads,
325 common=common,
325 common=common,
326 bundlecaps=bundlecaps,
326 bundlecaps=bundlecaps,
327 **kwargs
327 **kwargs
328 )[1]
328 )[1]
329 cb = util.chunkbuffer(chunks)
329 cb = util.chunkbuffer(chunks)
330
330
331 if exchange.bundle2requested(bundlecaps):
331 if exchange.bundle2requested(bundlecaps):
332 # When requesting a bundle2, getbundle returns a stream to make the
332 # When requesting a bundle2, getbundle returns a stream to make the
333 # wire level function happier. We need to build a proper object
333 # wire level function happier. We need to build a proper object
334 # from it in local peer.
334 # from it in local peer.
335 return bundle2.getunbundler(self.ui, cb)
335 return bundle2.getunbundler(self.ui, cb)
336 else:
336 else:
337 return changegroup.getunbundler(b'01', cb, None)
337 return changegroup.getunbundler(b'01', cb, None)
338
338
339 def heads(self):
339 def heads(self):
340 return self._repo.heads()
340 return self._repo.heads()
341
341
342 def known(self, nodes):
342 def known(self, nodes):
343 return self._repo.known(nodes)
343 return self._repo.known(nodes)
344
344
345 def listkeys(self, namespace):
345 def listkeys(self, namespace):
346 return self._repo.listkeys(namespace)
346 return self._repo.listkeys(namespace)
347
347
348 def lookup(self, key):
348 def lookup(self, key):
349 return self._repo.lookup(key)
349 return self._repo.lookup(key)
350
350
351 def pushkey(self, namespace, key, old, new):
351 def pushkey(self, namespace, key, old, new):
352 return self._repo.pushkey(namespace, key, old, new)
352 return self._repo.pushkey(namespace, key, old, new)
353
353
354 def stream_out(self):
354 def stream_out(self):
355 raise error.Abort(_(b'cannot perform stream clone against local peer'))
355 raise error.Abort(_(b'cannot perform stream clone against local peer'))
356
356
357 def unbundle(self, bundle, heads, url):
357 def unbundle(self, bundle, heads, url):
358 """apply a bundle on a repo
358 """apply a bundle on a repo
359
359
360 This function handles the repo locking itself."""
360 This function handles the repo locking itself."""
361 try:
361 try:
362 try:
362 try:
363 bundle = exchange.readbundle(self.ui, bundle, None)
363 bundle = exchange.readbundle(self.ui, bundle, None)
364 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
364 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
365 if util.safehasattr(ret, b'getchunks'):
365 if util.safehasattr(ret, b'getchunks'):
366 # This is a bundle20 object, turn it into an unbundler.
366 # This is a bundle20 object, turn it into an unbundler.
367 # This little dance should be dropped eventually when the
367 # This little dance should be dropped eventually when the
368 # API is finally improved.
368 # API is finally improved.
369 stream = util.chunkbuffer(ret.getchunks())
369 stream = util.chunkbuffer(ret.getchunks())
370 ret = bundle2.getunbundler(self.ui, stream)
370 ret = bundle2.getunbundler(self.ui, stream)
371 return ret
371 return ret
372 except Exception as exc:
372 except Exception as exc:
373 # If the exception contains output salvaged from a bundle2
373 # If the exception contains output salvaged from a bundle2
374 # reply, we need to make sure it is printed before continuing
374 # reply, we need to make sure it is printed before continuing
375 # to fail. So we build a bundle2 with such output and consume
375 # to fail. So we build a bundle2 with such output and consume
376 # it directly.
376 # it directly.
377 #
377 #
378 # This is not very elegant but allows a "simple" solution for
378 # This is not very elegant but allows a "simple" solution for
379 # issue4594
379 # issue4594
380 output = getattr(exc, '_bundle2salvagedoutput', ())
380 output = getattr(exc, '_bundle2salvagedoutput', ())
381 if output:
381 if output:
382 bundler = bundle2.bundle20(self._repo.ui)
382 bundler = bundle2.bundle20(self._repo.ui)
383 for out in output:
383 for out in output:
384 bundler.addpart(out)
384 bundler.addpart(out)
385 stream = util.chunkbuffer(bundler.getchunks())
385 stream = util.chunkbuffer(bundler.getchunks())
386 b = bundle2.getunbundler(self.ui, stream)
386 b = bundle2.getunbundler(self.ui, stream)
387 bundle2.processbundle(self._repo, b)
387 bundle2.processbundle(self._repo, b)
388 raise
388 raise
389 except error.PushRaced as exc:
389 except error.PushRaced as exc:
390 raise error.ResponseError(
390 raise error.ResponseError(
391 _(b'push failed:'), stringutil.forcebytestr(exc)
391 _(b'push failed:'), stringutil.forcebytestr(exc)
392 )
392 )
393
393
394 # End of _basewirecommands interface.
394 # End of _basewirecommands interface.
395
395
396 # Begin of peer interface.
396 # Begin of peer interface.
397
397
398 def commandexecutor(self):
398 def commandexecutor(self):
399 return localcommandexecutor(self)
399 return localcommandexecutor(self)
400
400
401 # End of peer interface.
401 # End of peer interface.
402
402
403
403
404 @interfaceutil.implementer(repository.ipeerlegacycommands)
404 @interfaceutil.implementer(repository.ipeerlegacycommands)
405 class locallegacypeer(localpeer):
405 class locallegacypeer(localpeer):
406 """peer extension which implements legacy methods too; used for tests with
406 """peer extension which implements legacy methods too; used for tests with
407 restricted capabilities"""
407 restricted capabilities"""
408
408
409 def __init__(self, repo):
409 def __init__(self, repo):
410 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
410 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
411
411
412 # Begin of baselegacywirecommands interface.
412 # Begin of baselegacywirecommands interface.
413
413
414 def between(self, pairs):
414 def between(self, pairs):
415 return self._repo.between(pairs)
415 return self._repo.between(pairs)
416
416
417 def branches(self, nodes):
417 def branches(self, nodes):
418 return self._repo.branches(nodes)
418 return self._repo.branches(nodes)
419
419
420 def changegroup(self, nodes, source):
420 def changegroup(self, nodes, source):
421 outgoing = discovery.outgoing(
421 outgoing = discovery.outgoing(
422 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
422 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
423 )
423 )
424 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
424 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
425
425
426 def changegroupsubset(self, bases, heads, source):
426 def changegroupsubset(self, bases, heads, source):
427 outgoing = discovery.outgoing(
427 outgoing = discovery.outgoing(
428 self._repo, missingroots=bases, ancestorsof=heads
428 self._repo, missingroots=bases, ancestorsof=heads
429 )
429 )
430 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
430 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
431
431
432 # End of baselegacywirecommands interface.
432 # End of baselegacywirecommands interface.
433
433
434
434
435 # Functions receiving (ui, features) that extensions can register to impact
435 # Functions receiving (ui, features) that extensions can register to impact
436 # the ability to load repositories with custom requirements. Only
436 # the ability to load repositories with custom requirements. Only
437 # functions defined in loaded extensions are called.
437 # functions defined in loaded extensions are called.
438 #
438 #
439 # The function receives a set of requirement strings that the repository
439 # The function receives a set of requirement strings that the repository
440 # is capable of opening. Functions will typically add elements to the
440 # is capable of opening. Functions will typically add elements to the
441 # set to reflect that the extension knows how to handle that requirements.
441 # set to reflect that the extension knows how to handle that requirements.
442 featuresetupfuncs = set()
442 featuresetupfuncs = set()
443
443
444
444
445 def _getsharedvfs(hgvfs, requirements):
445 def _getsharedvfs(hgvfs, requirements):
446 """returns the vfs object pointing to root of shared source
446 """returns the vfs object pointing to root of shared source
447 repo for a shared repository
447 repo for a shared repository
448
448
449 hgvfs is vfs pointing at .hg/ of current repo (shared one)
449 hgvfs is vfs pointing at .hg/ of current repo (shared one)
450 requirements is a set of requirements of current repo (shared one)
450 requirements is a set of requirements of current repo (shared one)
451 """
451 """
452 # The ``shared`` or ``relshared`` requirements indicate the
452 # The ``shared`` or ``relshared`` requirements indicate the
453 # store lives in the path contained in the ``.hg/sharedpath`` file.
453 # store lives in the path contained in the ``.hg/sharedpath`` file.
454 # This is an absolute path for ``shared`` and relative to
454 # This is an absolute path for ``shared`` and relative to
455 # ``.hg/`` for ``relshared``.
455 # ``.hg/`` for ``relshared``.
456 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
456 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
457 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
457 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
458 sharedpath = hgvfs.join(sharedpath)
458 sharedpath = hgvfs.join(sharedpath)
459
459
460 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
460 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
461
461
462 if not sharedvfs.exists():
462 if not sharedvfs.exists():
463 raise error.RepoError(
463 raise error.RepoError(
464 _(b'.hg/sharedpath points to nonexistent directory %s')
464 _(b'.hg/sharedpath points to nonexistent directory %s')
465 % sharedvfs.base
465 % sharedvfs.base
466 )
466 )
467 return sharedvfs
467 return sharedvfs
468
468
469
469
470 def _readrequires(vfs, allowmissing):
470 def _readrequires(vfs, allowmissing):
471 """reads the require file present at root of this vfs
471 """reads the require file present at root of this vfs
472 and return a set of requirements
472 and return a set of requirements
473
473
474 If allowmissing is True, we suppress ENOENT if raised"""
474 If allowmissing is True, we suppress ENOENT if raised"""
475 # requires file contains a newline-delimited list of
475 # requires file contains a newline-delimited list of
476 # features/capabilities the opener (us) must have in order to use
476 # features/capabilities the opener (us) must have in order to use
477 # the repository. This file was introduced in Mercurial 0.9.2,
477 # the repository. This file was introduced in Mercurial 0.9.2,
478 # which means very old repositories may not have one. We assume
478 # which means very old repositories may not have one. We assume
479 # a missing file translates to no requirements.
479 # a missing file translates to no requirements.
480 try:
480 try:
481 requirements = set(vfs.read(b'requires').splitlines())
481 requirements = set(vfs.read(b'requires').splitlines())
482 except IOError as e:
482 except IOError as e:
483 if not (allowmissing and e.errno == errno.ENOENT):
483 if not (allowmissing and e.errno == errno.ENOENT):
484 raise
484 raise
485 requirements = set()
485 requirements = set()
486 return requirements
486 return requirements
487
487
488
488
489 def makelocalrepository(baseui, path, intents=None):
489 def makelocalrepository(baseui, path, intents=None):
490 """Create a local repository object.
490 """Create a local repository object.
491
491
492 Given arguments needed to construct a local repository, this function
492 Given arguments needed to construct a local repository, this function
493 performs various early repository loading functionality (such as
493 performs various early repository loading functionality (such as
494 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
494 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
495 the repository can be opened, derives a type suitable for representing
495 the repository can be opened, derives a type suitable for representing
496 that repository, and returns an instance of it.
496 that repository, and returns an instance of it.
497
497
498 The returned object conforms to the ``repository.completelocalrepository``
498 The returned object conforms to the ``repository.completelocalrepository``
499 interface.
499 interface.
500
500
501 The repository type is derived by calling a series of factory functions
501 The repository type is derived by calling a series of factory functions
502 for each aspect/interface of the final repository. These are defined by
502 for each aspect/interface of the final repository. These are defined by
503 ``REPO_INTERFACES``.
503 ``REPO_INTERFACES``.
504
504
505 Each factory function is called to produce a type implementing a specific
505 Each factory function is called to produce a type implementing a specific
506 interface. The cumulative list of returned types will be combined into a
506 interface. The cumulative list of returned types will be combined into a
507 new type and that type will be instantiated to represent the local
507 new type and that type will be instantiated to represent the local
508 repository.
508 repository.
509
509
510 The factory functions each receive various state that may be consulted
510 The factory functions each receive various state that may be consulted
511 as part of deriving a type.
511 as part of deriving a type.
512
512
513 Extensions should wrap these factory functions to customize repository type
513 Extensions should wrap these factory functions to customize repository type
514 creation. Note that an extension's wrapped function may be called even if
514 creation. Note that an extension's wrapped function may be called even if
515 that extension is not loaded for the repo being constructed. Extensions
515 that extension is not loaded for the repo being constructed. Extensions
516 should check if their ``__name__`` appears in the
516 should check if their ``__name__`` appears in the
517 ``extensionmodulenames`` set passed to the factory function and no-op if
517 ``extensionmodulenames`` set passed to the factory function and no-op if
518 not.
518 not.
519 """
519 """
520 ui = baseui.copy()
520 ui = baseui.copy()
521 # Prevent copying repo configuration.
521 # Prevent copying repo configuration.
522 ui.copy = baseui.copy
522 ui.copy = baseui.copy
523
523
524 # Working directory VFS rooted at repository root.
524 # Working directory VFS rooted at repository root.
525 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
525 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
526
526
527 # Main VFS for .hg/ directory.
527 # Main VFS for .hg/ directory.
528 hgpath = wdirvfs.join(b'.hg')
528 hgpath = wdirvfs.join(b'.hg')
529 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
529 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
530 # Whether this repository is shared one or not
530 # Whether this repository is shared one or not
531 shared = False
531 shared = False
532 # If this repository is shared, vfs pointing to shared repo
532 # If this repository is shared, vfs pointing to shared repo
533 sharedvfs = None
533 sharedvfs = None
534
534
535 # The .hg/ path should exist and should be a directory. All other
535 # The .hg/ path should exist and should be a directory. All other
536 # cases are errors.
536 # cases are errors.
537 if not hgvfs.isdir():
537 if not hgvfs.isdir():
538 try:
538 try:
539 hgvfs.stat()
539 hgvfs.stat()
540 except OSError as e:
540 except OSError as e:
541 if e.errno != errno.ENOENT:
541 if e.errno != errno.ENOENT:
542 raise
542 raise
543 except ValueError as e:
543 except ValueError as e:
544 # Can be raised on Python 3.8 when path is invalid.
544 # Can be raised on Python 3.8 when path is invalid.
545 raise error.Abort(
545 raise error.Abort(
546 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
546 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
547 )
547 )
548
548
549 raise error.RepoError(_(b'repository %s not found') % path)
549 raise error.RepoError(_(b'repository %s not found') % path)
550
550
551 requirements = _readrequires(hgvfs, True)
551 requirements = _readrequires(hgvfs, True)
552 shared = (
552 shared = (
553 requirementsmod.SHARED_REQUIREMENT in requirements
553 requirementsmod.SHARED_REQUIREMENT in requirements
554 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
554 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
555 )
555 )
556 storevfs = None
556 storevfs = None
557 if shared:
557 if shared:
558 # This is a shared repo
558 # This is a shared repo
559 sharedvfs = _getsharedvfs(hgvfs, requirements)
559 sharedvfs = _getsharedvfs(hgvfs, requirements)
560 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
560 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
561 else:
561 else:
562 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
562 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
563
563
564 # if .hg/requires contains the sharesafe requirement, it means
564 # if .hg/requires contains the sharesafe requirement, it means
565 # there exists a `.hg/store/requires` too and we should read it
565 # there exists a `.hg/store/requires` too and we should read it
566 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
566 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
567 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
567 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
568 # is not present, refer checkrequirementscompat() for that
568 # is not present, refer checkrequirementscompat() for that
569 #
569 #
570 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
570 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
571 # repository was shared the old way. We check the share source .hg/requires
571 # repository was shared the old way. We check the share source .hg/requires
572 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
572 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
573 # to be reshared
573 # to be reshared
574 hint = _("see `hg help config.format.use-share-safe` for more information")
574 hint = _("see `hg help config.format.use-share-safe` for more information")
575 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
575 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
576
576
577 if (
577 if (
578 shared
578 shared
579 and requirementsmod.SHARESAFE_REQUIREMENT
579 and requirementsmod.SHARESAFE_REQUIREMENT
580 not in _readrequires(sharedvfs, True)
580 not in _readrequires(sharedvfs, True)
581 ):
581 ):
582 mismatch_warn = ui.configbool(
582 mismatch_warn = ui.configbool(
583 b'share', b'safe-mismatch.source-not-safe.warn'
583 b'share', b'safe-mismatch.source-not-safe.warn'
584 )
584 )
585 mismatch_config = ui.config(
585 mismatch_config = ui.config(
586 b'share', b'safe-mismatch.source-not-safe'
586 b'share', b'safe-mismatch.source-not-safe'
587 )
587 )
588 if mismatch_config in (
588 if mismatch_config in (
589 b'downgrade-allow',
589 b'downgrade-allow',
590 b'allow',
590 b'allow',
591 b'downgrade-abort',
591 b'downgrade-abort',
592 ):
592 ):
593 # prevent cyclic import localrepo -> upgrade -> localrepo
593 # prevent cyclic import localrepo -> upgrade -> localrepo
594 from . import upgrade
594 from . import upgrade
595
595
596 upgrade.downgrade_share_to_non_safe(
596 upgrade.downgrade_share_to_non_safe(
597 ui,
597 ui,
598 hgvfs,
598 hgvfs,
599 sharedvfs,
599 sharedvfs,
600 requirements,
600 requirements,
601 mismatch_config,
601 mismatch_config,
602 mismatch_warn,
602 mismatch_warn,
603 )
603 )
604 elif mismatch_config == b'abort':
604 elif mismatch_config == b'abort':
605 raise error.Abort(
605 raise error.Abort(
606 _(b"share source does not support share-safe requirement"),
606 _(b"share source does not support share-safe requirement"),
607 hint=hint,
607 hint=hint,
608 )
608 )
609 else:
609 else:
610 raise error.Abort(
610 raise error.Abort(
611 _(
611 _(
612 b"share-safe mismatch with source.\nUnrecognized"
612 b"share-safe mismatch with source.\nUnrecognized"
613 b" value '%s' of `share.safe-mismatch.source-not-safe`"
613 b" value '%s' of `share.safe-mismatch.source-not-safe`"
614 b" set."
614 b" set."
615 )
615 )
616 % mismatch_config,
616 % mismatch_config,
617 hint=hint,
617 hint=hint,
618 )
618 )
619 else:
619 else:
620 requirements |= _readrequires(storevfs, False)
620 requirements |= _readrequires(storevfs, False)
621 elif shared:
621 elif shared:
622 sourcerequires = _readrequires(sharedvfs, False)
622 sourcerequires = _readrequires(sharedvfs, False)
623 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
623 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
624 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
624 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
625 mismatch_warn = ui.configbool(
625 mismatch_warn = ui.configbool(
626 b'share', b'safe-mismatch.source-safe.warn'
626 b'share', b'safe-mismatch.source-safe.warn'
627 )
627 )
628 if mismatch_config in (
628 if mismatch_config in (
629 b'upgrade-allow',
629 b'upgrade-allow',
630 b'allow',
630 b'allow',
631 b'upgrade-abort',
631 b'upgrade-abort',
632 ):
632 ):
633 # prevent cyclic import localrepo -> upgrade -> localrepo
633 # prevent cyclic import localrepo -> upgrade -> localrepo
634 from . import upgrade
634 from . import upgrade
635
635
636 upgrade.upgrade_share_to_safe(
636 upgrade.upgrade_share_to_safe(
637 ui,
637 ui,
638 hgvfs,
638 hgvfs,
639 storevfs,
639 storevfs,
640 requirements,
640 requirements,
641 mismatch_config,
641 mismatch_config,
642 mismatch_warn,
642 mismatch_warn,
643 )
643 )
644 elif mismatch_config == b'abort':
644 elif mismatch_config == b'abort':
645 raise error.Abort(
645 raise error.Abort(
646 _(
646 _(
647 b'version mismatch: source uses share-safe'
647 b'version mismatch: source uses share-safe'
648 b' functionality while the current share does not'
648 b' functionality while the current share does not'
649 ),
649 ),
650 hint=hint,
650 hint=hint,
651 )
651 )
652 else:
652 else:
653 raise error.Abort(
653 raise error.Abort(
654 _(
654 _(
655 b"share-safe mismatch with source.\nUnrecognized"
655 b"share-safe mismatch with source.\nUnrecognized"
656 b" value '%s' of `share.safe-mismatch.source-safe` set."
656 b" value '%s' of `share.safe-mismatch.source-safe` set."
657 )
657 )
658 % mismatch_config,
658 % mismatch_config,
659 hint=hint,
659 hint=hint,
660 )
660 )
661
661
662 # The .hg/hgrc file may load extensions or contain config options
662 # The .hg/hgrc file may load extensions or contain config options
663 # that influence repository construction. Attempt to load it and
663 # that influence repository construction. Attempt to load it and
664 # process any new extensions that it may have pulled in.
664 # process any new extensions that it may have pulled in.
665 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
665 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
666 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
666 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
667 extensions.loadall(ui)
667 extensions.loadall(ui)
668 extensions.populateui(ui)
668 extensions.populateui(ui)
669
669
670 # Set of module names of extensions loaded for this repository.
670 # Set of module names of extensions loaded for this repository.
671 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
671 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
672
672
673 supportedrequirements = gathersupportedrequirements(ui)
673 supportedrequirements = gathersupportedrequirements(ui)
674
674
675 # We first validate the requirements are known.
675 # We first validate the requirements are known.
676 ensurerequirementsrecognized(requirements, supportedrequirements)
676 ensurerequirementsrecognized(requirements, supportedrequirements)
677
677
678 # Then we validate that the known set is reasonable to use together.
678 # Then we validate that the known set is reasonable to use together.
679 ensurerequirementscompatible(ui, requirements)
679 ensurerequirementscompatible(ui, requirements)
680
680
681 # TODO there are unhandled edge cases related to opening repositories with
681 # TODO there are unhandled edge cases related to opening repositories with
682 # shared storage. If storage is shared, we should also test for requirements
682 # shared storage. If storage is shared, we should also test for requirements
683 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
683 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
684 # that repo, as that repo may load extensions needed to open it. This is a
684 # that repo, as that repo may load extensions needed to open it. This is a
685 # bit complicated because we don't want the other hgrc to overwrite settings
685 # bit complicated because we don't want the other hgrc to overwrite settings
686 # in this hgrc.
686 # in this hgrc.
687 #
687 #
688 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
688 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
689 # file when sharing repos. But if a requirement is added after the share is
689 # file when sharing repos. But if a requirement is added after the share is
690 # performed, thereby introducing a new requirement for the opener, we may
690 # performed, thereby introducing a new requirement for the opener, we may
691 # will not see that and could encounter a run-time error interacting with
691 # will not see that and could encounter a run-time error interacting with
692 # that shared store since it has an unknown-to-us requirement.
692 # that shared store since it has an unknown-to-us requirement.
693
693
694 # At this point, we know we should be capable of opening the repository.
694 # At this point, we know we should be capable of opening the repository.
695 # Now get on with doing that.
695 # Now get on with doing that.
696
696
697 features = set()
697 features = set()
698
698
699 # The "store" part of the repository holds versioned data. How it is
699 # The "store" part of the repository holds versioned data. How it is
700 # accessed is determined by various requirements. If `shared` or
700 # accessed is determined by various requirements. If `shared` or
701 # `relshared` requirements are present, this indicates current repository
701 # `relshared` requirements are present, this indicates current repository
702 # is a share and store exists in path mentioned in `.hg/sharedpath`
702 # is a share and store exists in path mentioned in `.hg/sharedpath`
703 if shared:
703 if shared:
704 storebasepath = sharedvfs.base
704 storebasepath = sharedvfs.base
705 cachepath = sharedvfs.join(b'cache')
705 cachepath = sharedvfs.join(b'cache')
706 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
706 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
707 else:
707 else:
708 storebasepath = hgvfs.base
708 storebasepath = hgvfs.base
709 cachepath = hgvfs.join(b'cache')
709 cachepath = hgvfs.join(b'cache')
710 wcachepath = hgvfs.join(b'wcache')
710 wcachepath = hgvfs.join(b'wcache')
711
711
712 # The store has changed over time and the exact layout is dictated by
712 # The store has changed over time and the exact layout is dictated by
713 # requirements. The store interface abstracts differences across all
713 # requirements. The store interface abstracts differences across all
714 # of them.
714 # of them.
715 store = makestore(
715 store = makestore(
716 requirements,
716 requirements,
717 storebasepath,
717 storebasepath,
718 lambda base: vfsmod.vfs(base, cacheaudited=True),
718 lambda base: vfsmod.vfs(base, cacheaudited=True),
719 )
719 )
720 hgvfs.createmode = store.createmode
720 hgvfs.createmode = store.createmode
721
721
722 storevfs = store.vfs
722 storevfs = store.vfs
723 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
723 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
724
724
725 # The cache vfs is used to manage cache files.
725 # The cache vfs is used to manage cache files.
726 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
726 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
727 cachevfs.createmode = store.createmode
727 cachevfs.createmode = store.createmode
728 # The cache vfs is used to manage cache files related to the working copy
728 # The cache vfs is used to manage cache files related to the working copy
729 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
729 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
730 wcachevfs.createmode = store.createmode
730 wcachevfs.createmode = store.createmode
731
731
732 # Now resolve the type for the repository object. We do this by repeatedly
732 # Now resolve the type for the repository object. We do this by repeatedly
733 # calling a factory function to produces types for specific aspects of the
733 # calling a factory function to produces types for specific aspects of the
734 # repo's operation. The aggregate returned types are used as base classes
734 # repo's operation. The aggregate returned types are used as base classes
735 # for a dynamically-derived type, which will represent our new repository.
735 # for a dynamically-derived type, which will represent our new repository.
736
736
737 bases = []
737 bases = []
738 extrastate = {}
738 extrastate = {}
739
739
740 for iface, fn in REPO_INTERFACES:
740 for iface, fn in REPO_INTERFACES:
741 # We pass all potentially useful state to give extensions tons of
741 # We pass all potentially useful state to give extensions tons of
742 # flexibility.
742 # flexibility.
743 typ = fn()(
743 typ = fn()(
744 ui=ui,
744 ui=ui,
745 intents=intents,
745 intents=intents,
746 requirements=requirements,
746 requirements=requirements,
747 features=features,
747 features=features,
748 wdirvfs=wdirvfs,
748 wdirvfs=wdirvfs,
749 hgvfs=hgvfs,
749 hgvfs=hgvfs,
750 store=store,
750 store=store,
751 storevfs=storevfs,
751 storevfs=storevfs,
752 storeoptions=storevfs.options,
752 storeoptions=storevfs.options,
753 cachevfs=cachevfs,
753 cachevfs=cachevfs,
754 wcachevfs=wcachevfs,
754 wcachevfs=wcachevfs,
755 extensionmodulenames=extensionmodulenames,
755 extensionmodulenames=extensionmodulenames,
756 extrastate=extrastate,
756 extrastate=extrastate,
757 baseclasses=bases,
757 baseclasses=bases,
758 )
758 )
759
759
760 if not isinstance(typ, type):
760 if not isinstance(typ, type):
761 raise error.ProgrammingError(
761 raise error.ProgrammingError(
762 b'unable to construct type for %s' % iface
762 b'unable to construct type for %s' % iface
763 )
763 )
764
764
765 bases.append(typ)
765 bases.append(typ)
766
766
767 # type() allows you to use characters in type names that wouldn't be
767 # type() allows you to use characters in type names that wouldn't be
768 # recognized as Python symbols in source code. We abuse that to add
768 # recognized as Python symbols in source code. We abuse that to add
769 # rich information about our constructed repo.
769 # rich information about our constructed repo.
770 name = pycompat.sysstr(
770 name = pycompat.sysstr(
771 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
771 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
772 )
772 )
773
773
774 cls = type(name, tuple(bases), {})
774 cls = type(name, tuple(bases), {})
775
775
776 return cls(
776 return cls(
777 baseui=baseui,
777 baseui=baseui,
778 ui=ui,
778 ui=ui,
779 origroot=path,
779 origroot=path,
780 wdirvfs=wdirvfs,
780 wdirvfs=wdirvfs,
781 hgvfs=hgvfs,
781 hgvfs=hgvfs,
782 requirements=requirements,
782 requirements=requirements,
783 supportedrequirements=supportedrequirements,
783 supportedrequirements=supportedrequirements,
784 sharedpath=storebasepath,
784 sharedpath=storebasepath,
785 store=store,
785 store=store,
786 cachevfs=cachevfs,
786 cachevfs=cachevfs,
787 wcachevfs=wcachevfs,
787 wcachevfs=wcachevfs,
788 features=features,
788 features=features,
789 intents=intents,
789 intents=intents,
790 )
790 )
791
791
792
792
793 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
793 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
794 """Load hgrc files/content into a ui instance.
794 """Load hgrc files/content into a ui instance.
795
795
796 This is called during repository opening to load any additional
796 This is called during repository opening to load any additional
797 config files or settings relevant to the current repository.
797 config files or settings relevant to the current repository.
798
798
799 Returns a bool indicating whether any additional configs were loaded.
799 Returns a bool indicating whether any additional configs were loaded.
800
800
801 Extensions should monkeypatch this function to modify how per-repo
801 Extensions should monkeypatch this function to modify how per-repo
802 configs are loaded. For example, an extension may wish to pull in
802 configs are loaded. For example, an extension may wish to pull in
803 configs from alternate files or sources.
803 configs from alternate files or sources.
804
804
805 sharedvfs is vfs object pointing to source repo if the current one is a
805 sharedvfs is vfs object pointing to source repo if the current one is a
806 shared one
806 shared one
807 """
807 """
808 if not rcutil.use_repo_hgrc():
808 if not rcutil.use_repo_hgrc():
809 return False
809 return False
810
810
811 ret = False
811 ret = False
812 # first load config from shared source if we has to
812 # first load config from shared source if we has to
813 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
813 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
814 try:
814 try:
815 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
815 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
816 ret = True
816 ret = True
817 except IOError:
817 except IOError:
818 pass
818 pass
819
819
820 try:
820 try:
821 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
821 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
822 ret = True
822 ret = True
823 except IOError:
823 except IOError:
824 pass
824 pass
825
825
826 try:
826 try:
827 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
827 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
828 ret = True
828 ret = True
829 except IOError:
829 except IOError:
830 pass
830 pass
831
831
832 return ret
832 return ret
833
833
834
834
835 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
835 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
836 """Perform additional actions after .hg/hgrc is loaded.
836 """Perform additional actions after .hg/hgrc is loaded.
837
837
838 This function is called during repository loading immediately after
838 This function is called during repository loading immediately after
839 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
839 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
840
840
841 The function can be used to validate configs, automatically add
841 The function can be used to validate configs, automatically add
842 options (including extensions) based on requirements, etc.
842 options (including extensions) based on requirements, etc.
843 """
843 """
844
844
845 # Map of requirements to list of extensions to load automatically when
845 # Map of requirements to list of extensions to load automatically when
846 # requirement is present.
846 # requirement is present.
847 autoextensions = {
847 autoextensions = {
848 b'git': [b'git'],
848 b'git': [b'git'],
849 b'largefiles': [b'largefiles'],
849 b'largefiles': [b'largefiles'],
850 b'lfs': [b'lfs'],
850 b'lfs': [b'lfs'],
851 }
851 }
852
852
853 for requirement, names in sorted(autoextensions.items()):
853 for requirement, names in sorted(autoextensions.items()):
854 if requirement not in requirements:
854 if requirement not in requirements:
855 continue
855 continue
856
856
857 for name in names:
857 for name in names:
858 if not ui.hasconfig(b'extensions', name):
858 if not ui.hasconfig(b'extensions', name):
859 ui.setconfig(b'extensions', name, b'', source=b'autoload')
859 ui.setconfig(b'extensions', name, b'', source=b'autoload')
860
860
861
861
862 def gathersupportedrequirements(ui):
862 def gathersupportedrequirements(ui):
863 """Determine the complete set of recognized requirements."""
863 """Determine the complete set of recognized requirements."""
864 # Start with all requirements supported by this file.
864 # Start with all requirements supported by this file.
865 supported = set(localrepository._basesupported)
865 supported = set(localrepository._basesupported)
866
866
867 # Execute ``featuresetupfuncs`` entries if they belong to an extension
867 # Execute ``featuresetupfuncs`` entries if they belong to an extension
868 # relevant to this ui instance.
868 # relevant to this ui instance.
869 modules = {m.__name__ for n, m in extensions.extensions(ui)}
869 modules = {m.__name__ for n, m in extensions.extensions(ui)}
870
870
871 for fn in featuresetupfuncs:
871 for fn in featuresetupfuncs:
872 if fn.__module__ in modules:
872 if fn.__module__ in modules:
873 fn(ui, supported)
873 fn(ui, supported)
874
874
875 # Add derived requirements from registered compression engines.
875 # Add derived requirements from registered compression engines.
876 for name in util.compengines:
876 for name in util.compengines:
877 engine = util.compengines[name]
877 engine = util.compengines[name]
878 if engine.available() and engine.revlogheader():
878 if engine.available() and engine.revlogheader():
879 supported.add(b'exp-compression-%s' % name)
879 supported.add(b'exp-compression-%s' % name)
880 if engine.name() == b'zstd':
880 if engine.name() == b'zstd':
881 supported.add(b'revlog-compression-zstd')
881 supported.add(b'revlog-compression-zstd')
882
882
883 return supported
883 return supported
884
884
885
885
886 def ensurerequirementsrecognized(requirements, supported):
886 def ensurerequirementsrecognized(requirements, supported):
887 """Validate that a set of local requirements is recognized.
887 """Validate that a set of local requirements is recognized.
888
888
889 Receives a set of requirements. Raises an ``error.RepoError`` if there
889 Receives a set of requirements. Raises an ``error.RepoError`` if there
890 exists any requirement in that set that currently loaded code doesn't
890 exists any requirement in that set that currently loaded code doesn't
891 recognize.
891 recognize.
892
892
893 Returns a set of supported requirements.
893 Returns a set of supported requirements.
894 """
894 """
895 missing = set()
895 missing = set()
896
896
897 for requirement in requirements:
897 for requirement in requirements:
898 if requirement in supported:
898 if requirement in supported:
899 continue
899 continue
900
900
901 if not requirement or not requirement[0:1].isalnum():
901 if not requirement or not requirement[0:1].isalnum():
902 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
902 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
903
903
904 missing.add(requirement)
904 missing.add(requirement)
905
905
906 if missing:
906 if missing:
907 raise error.RequirementError(
907 raise error.RequirementError(
908 _(b'repository requires features unknown to this Mercurial: %s')
908 _(b'repository requires features unknown to this Mercurial: %s')
909 % b' '.join(sorted(missing)),
909 % b' '.join(sorted(missing)),
910 hint=_(
910 hint=_(
911 b'see https://mercurial-scm.org/wiki/MissingRequirement '
911 b'see https://mercurial-scm.org/wiki/MissingRequirement '
912 b'for more information'
912 b'for more information'
913 ),
913 ),
914 )
914 )
915
915
916
916
917 def ensurerequirementscompatible(ui, requirements):
917 def ensurerequirementscompatible(ui, requirements):
918 """Validates that a set of recognized requirements is mutually compatible.
918 """Validates that a set of recognized requirements is mutually compatible.
919
919
920 Some requirements may not be compatible with others or require
920 Some requirements may not be compatible with others or require
921 config options that aren't enabled. This function is called during
921 config options that aren't enabled. This function is called during
922 repository opening to ensure that the set of requirements needed
922 repository opening to ensure that the set of requirements needed
923 to open a repository is sane and compatible with config options.
923 to open a repository is sane and compatible with config options.
924
924
925 Extensions can monkeypatch this function to perform additional
925 Extensions can monkeypatch this function to perform additional
926 checking.
926 checking.
927
927
928 ``error.RepoError`` should be raised on failure.
928 ``error.RepoError`` should be raised on failure.
929 """
929 """
930 if (
930 if (
931 requirementsmod.SPARSE_REQUIREMENT in requirements
931 requirementsmod.SPARSE_REQUIREMENT in requirements
932 and not sparse.enabled
932 and not sparse.enabled
933 ):
933 ):
934 raise error.RepoError(
934 raise error.RepoError(
935 _(
935 _(
936 b'repository is using sparse feature but '
936 b'repository is using sparse feature but '
937 b'sparse is not enabled; enable the '
937 b'sparse is not enabled; enable the '
938 b'"sparse" extensions to access'
938 b'"sparse" extensions to access'
939 )
939 )
940 )
940 )
941
941
942
942
943 def makestore(requirements, path, vfstype):
943 def makestore(requirements, path, vfstype):
944 """Construct a storage object for a repository."""
944 """Construct a storage object for a repository."""
945 if b'store' in requirements:
945 if b'store' in requirements:
946 if b'fncache' in requirements:
946 if b'fncache' in requirements:
947 return storemod.fncachestore(
947 return storemod.fncachestore(
948 path, vfstype, b'dotencode' in requirements
948 path, vfstype, b'dotencode' in requirements
949 )
949 )
950
950
951 return storemod.encodedstore(path, vfstype)
951 return storemod.encodedstore(path, vfstype)
952
952
953 return storemod.basicstore(path, vfstype)
953 return storemod.basicstore(path, vfstype)
954
954
955
955
956 def resolvestorevfsoptions(ui, requirements, features):
956 def resolvestorevfsoptions(ui, requirements, features):
957 """Resolve the options to pass to the store vfs opener.
957 """Resolve the options to pass to the store vfs opener.
958
958
959 The returned dict is used to influence behavior of the storage layer.
959 The returned dict is used to influence behavior of the storage layer.
960 """
960 """
961 options = {}
961 options = {}
962
962
963 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
963 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
964 options[b'treemanifest'] = True
964 options[b'treemanifest'] = True
965
965
966 # experimental config: format.manifestcachesize
966 # experimental config: format.manifestcachesize
967 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
967 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
968 if manifestcachesize is not None:
968 if manifestcachesize is not None:
969 options[b'manifestcachesize'] = manifestcachesize
969 options[b'manifestcachesize'] = manifestcachesize
970
970
971 # In the absence of another requirement superseding a revlog-related
971 # In the absence of another requirement superseding a revlog-related
972 # requirement, we have to assume the repo is using revlog version 0.
972 # requirement, we have to assume the repo is using revlog version 0.
973 # This revlog format is super old and we don't bother trying to parse
973 # This revlog format is super old and we don't bother trying to parse
974 # opener options for it because those options wouldn't do anything
974 # opener options for it because those options wouldn't do anything
975 # meaningful on such old repos.
975 # meaningful on such old repos.
976 if (
976 if (
977 b'revlogv1' in requirements
977 requirementsmod.REVLOGV1_REQUIREMENT in requirements
978 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
978 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
979 ):
979 ):
980 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
980 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
981 else: # explicitly mark repo as using revlogv0
981 else: # explicitly mark repo as using revlogv0
982 options[b'revlogv0'] = True
982 options[b'revlogv0'] = True
983
983
984 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
984 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
985 options[b'copies-storage'] = b'changeset-sidedata'
985 options[b'copies-storage'] = b'changeset-sidedata'
986 else:
986 else:
987 writecopiesto = ui.config(b'experimental', b'copies.write-to')
987 writecopiesto = ui.config(b'experimental', b'copies.write-to')
988 copiesextramode = (b'changeset-only', b'compatibility')
988 copiesextramode = (b'changeset-only', b'compatibility')
989 if writecopiesto in copiesextramode:
989 if writecopiesto in copiesextramode:
990 options[b'copies-storage'] = b'extra'
990 options[b'copies-storage'] = b'extra'
991
991
992 return options
992 return options
993
993
994
994
995 def resolverevlogstorevfsoptions(ui, requirements, features):
995 def resolverevlogstorevfsoptions(ui, requirements, features):
996 """Resolve opener options specific to revlogs."""
996 """Resolve opener options specific to revlogs."""
997
997
998 options = {}
998 options = {}
999 options[b'flagprocessors'] = {}
999 options[b'flagprocessors'] = {}
1000
1000
1001 if b'revlogv1' in requirements:
1001 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1002 options[b'revlogv1'] = True
1002 options[b'revlogv1'] = True
1003 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1003 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1004 options[b'revlogv2'] = True
1004 options[b'revlogv2'] = True
1005
1005
1006 if b'generaldelta' in requirements:
1006 if b'generaldelta' in requirements:
1007 options[b'generaldelta'] = True
1007 options[b'generaldelta'] = True
1008
1008
1009 # experimental config: format.chunkcachesize
1009 # experimental config: format.chunkcachesize
1010 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1010 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1011 if chunkcachesize is not None:
1011 if chunkcachesize is not None:
1012 options[b'chunkcachesize'] = chunkcachesize
1012 options[b'chunkcachesize'] = chunkcachesize
1013
1013
1014 deltabothparents = ui.configbool(
1014 deltabothparents = ui.configbool(
1015 b'storage', b'revlog.optimize-delta-parent-choice'
1015 b'storage', b'revlog.optimize-delta-parent-choice'
1016 )
1016 )
1017 options[b'deltabothparents'] = deltabothparents
1017 options[b'deltabothparents'] = deltabothparents
1018
1018
1019 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1019 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1020 lazydeltabase = False
1020 lazydeltabase = False
1021 if lazydelta:
1021 if lazydelta:
1022 lazydeltabase = ui.configbool(
1022 lazydeltabase = ui.configbool(
1023 b'storage', b'revlog.reuse-external-delta-parent'
1023 b'storage', b'revlog.reuse-external-delta-parent'
1024 )
1024 )
1025 if lazydeltabase is None:
1025 if lazydeltabase is None:
1026 lazydeltabase = not scmutil.gddeltaconfig(ui)
1026 lazydeltabase = not scmutil.gddeltaconfig(ui)
1027 options[b'lazydelta'] = lazydelta
1027 options[b'lazydelta'] = lazydelta
1028 options[b'lazydeltabase'] = lazydeltabase
1028 options[b'lazydeltabase'] = lazydeltabase
1029
1029
1030 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1030 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1031 if 0 <= chainspan:
1031 if 0 <= chainspan:
1032 options[b'maxdeltachainspan'] = chainspan
1032 options[b'maxdeltachainspan'] = chainspan
1033
1033
1034 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1034 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1035 if mmapindexthreshold is not None:
1035 if mmapindexthreshold is not None:
1036 options[b'mmapindexthreshold'] = mmapindexthreshold
1036 options[b'mmapindexthreshold'] = mmapindexthreshold
1037
1037
1038 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1038 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1039 srdensitythres = float(
1039 srdensitythres = float(
1040 ui.config(b'experimental', b'sparse-read.density-threshold')
1040 ui.config(b'experimental', b'sparse-read.density-threshold')
1041 )
1041 )
1042 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1042 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1043 options[b'with-sparse-read'] = withsparseread
1043 options[b'with-sparse-read'] = withsparseread
1044 options[b'sparse-read-density-threshold'] = srdensitythres
1044 options[b'sparse-read-density-threshold'] = srdensitythres
1045 options[b'sparse-read-min-gap-size'] = srmingapsize
1045 options[b'sparse-read-min-gap-size'] = srmingapsize
1046
1046
1047 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1047 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1048 options[b'sparse-revlog'] = sparserevlog
1048 options[b'sparse-revlog'] = sparserevlog
1049 if sparserevlog:
1049 if sparserevlog:
1050 options[b'generaldelta'] = True
1050 options[b'generaldelta'] = True
1051
1051
1052 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
1052 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
1053 options[b'side-data'] = sidedata
1053 options[b'side-data'] = sidedata
1054
1054
1055 maxchainlen = None
1055 maxchainlen = None
1056 if sparserevlog:
1056 if sparserevlog:
1057 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1057 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1058 # experimental config: format.maxchainlen
1058 # experimental config: format.maxchainlen
1059 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1059 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1060 if maxchainlen is not None:
1060 if maxchainlen is not None:
1061 options[b'maxchainlen'] = maxchainlen
1061 options[b'maxchainlen'] = maxchainlen
1062
1062
1063 for r in requirements:
1063 for r in requirements:
1064 # we allow multiple compression engine requirement to co-exist because
1064 # we allow multiple compression engine requirement to co-exist because
1065 # strickly speaking, revlog seems to support mixed compression style.
1065 # strickly speaking, revlog seems to support mixed compression style.
1066 #
1066 #
1067 # The compression used for new entries will be "the last one"
1067 # The compression used for new entries will be "the last one"
1068 prefix = r.startswith
1068 prefix = r.startswith
1069 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1069 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1070 options[b'compengine'] = r.split(b'-', 2)[2]
1070 options[b'compengine'] = r.split(b'-', 2)[2]
1071
1071
1072 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1072 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1073 if options[b'zlib.level'] is not None:
1073 if options[b'zlib.level'] is not None:
1074 if not (0 <= options[b'zlib.level'] <= 9):
1074 if not (0 <= options[b'zlib.level'] <= 9):
1075 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1075 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1076 raise error.Abort(msg % options[b'zlib.level'])
1076 raise error.Abort(msg % options[b'zlib.level'])
1077 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1077 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1078 if options[b'zstd.level'] is not None:
1078 if options[b'zstd.level'] is not None:
1079 if not (0 <= options[b'zstd.level'] <= 22):
1079 if not (0 <= options[b'zstd.level'] <= 22):
1080 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1080 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1081 raise error.Abort(msg % options[b'zstd.level'])
1081 raise error.Abort(msg % options[b'zstd.level'])
1082
1082
1083 if requirementsmod.NARROW_REQUIREMENT in requirements:
1083 if requirementsmod.NARROW_REQUIREMENT in requirements:
1084 options[b'enableellipsis'] = True
1084 options[b'enableellipsis'] = True
1085
1085
1086 if ui.configbool(b'experimental', b'rust.index'):
1086 if ui.configbool(b'experimental', b'rust.index'):
1087 options[b'rust.index'] = True
1087 options[b'rust.index'] = True
1088 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1088 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1089 slow_path = ui.config(
1089 slow_path = ui.config(
1090 b'storage', b'revlog.persistent-nodemap.slow-path'
1090 b'storage', b'revlog.persistent-nodemap.slow-path'
1091 )
1091 )
1092 if slow_path not in (b'allow', b'warn', b'abort'):
1092 if slow_path not in (b'allow', b'warn', b'abort'):
1093 default = ui.config_default(
1093 default = ui.config_default(
1094 b'storage', b'revlog.persistent-nodemap.slow-path'
1094 b'storage', b'revlog.persistent-nodemap.slow-path'
1095 )
1095 )
1096 msg = _(
1096 msg = _(
1097 b'unknown value for config '
1097 b'unknown value for config '
1098 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1098 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1099 )
1099 )
1100 ui.warn(msg % slow_path)
1100 ui.warn(msg % slow_path)
1101 if not ui.quiet:
1101 if not ui.quiet:
1102 ui.warn(_(b'falling back to default value: %s\n') % default)
1102 ui.warn(_(b'falling back to default value: %s\n') % default)
1103 slow_path = default
1103 slow_path = default
1104
1104
1105 msg = _(
1105 msg = _(
1106 b"accessing `persistent-nodemap` repository without associated "
1106 b"accessing `persistent-nodemap` repository without associated "
1107 b"fast implementation."
1107 b"fast implementation."
1108 )
1108 )
1109 hint = _(
1109 hint = _(
1110 b"check `hg help config.format.use-persistent-nodemap` "
1110 b"check `hg help config.format.use-persistent-nodemap` "
1111 b"for details"
1111 b"for details"
1112 )
1112 )
1113 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1113 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1114 if slow_path == b'warn':
1114 if slow_path == b'warn':
1115 msg = b"warning: " + msg + b'\n'
1115 msg = b"warning: " + msg + b'\n'
1116 ui.warn(msg)
1116 ui.warn(msg)
1117 if not ui.quiet:
1117 if not ui.quiet:
1118 hint = b'(' + hint + b')\n'
1118 hint = b'(' + hint + b')\n'
1119 ui.warn(hint)
1119 ui.warn(hint)
1120 if slow_path == b'abort':
1120 if slow_path == b'abort':
1121 raise error.Abort(msg, hint=hint)
1121 raise error.Abort(msg, hint=hint)
1122 options[b'persistent-nodemap'] = True
1122 options[b'persistent-nodemap'] = True
1123 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1123 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1124 options[b'persistent-nodemap.mmap'] = True
1124 options[b'persistent-nodemap.mmap'] = True
1125 if ui.configbool(b'devel', b'persistent-nodemap'):
1125 if ui.configbool(b'devel', b'persistent-nodemap'):
1126 options[b'devel-force-nodemap'] = True
1126 options[b'devel-force-nodemap'] = True
1127
1127
1128 return options
1128 return options
1129
1129
1130
1130
1131 def makemain(**kwargs):
1131 def makemain(**kwargs):
1132 """Produce a type conforming to ``ilocalrepositorymain``."""
1132 """Produce a type conforming to ``ilocalrepositorymain``."""
1133 return localrepository
1133 return localrepository
1134
1134
1135
1135
1136 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1136 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1137 class revlogfilestorage(object):
1137 class revlogfilestorage(object):
1138 """File storage when using revlogs."""
1138 """File storage when using revlogs."""
1139
1139
1140 def file(self, path):
1140 def file(self, path):
1141 if path[0] == b'/':
1141 if path[0] == b'/':
1142 path = path[1:]
1142 path = path[1:]
1143
1143
1144 return filelog.filelog(self.svfs, path)
1144 return filelog.filelog(self.svfs, path)
1145
1145
1146
1146
1147 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1147 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1148 class revlognarrowfilestorage(object):
1148 class revlognarrowfilestorage(object):
1149 """File storage when using revlogs and narrow files."""
1149 """File storage when using revlogs and narrow files."""
1150
1150
1151 def file(self, path):
1151 def file(self, path):
1152 if path[0] == b'/':
1152 if path[0] == b'/':
1153 path = path[1:]
1153 path = path[1:]
1154
1154
1155 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1155 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1156
1156
1157
1157
1158 def makefilestorage(requirements, features, **kwargs):
1158 def makefilestorage(requirements, features, **kwargs):
1159 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1159 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1160 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1160 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1161 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1161 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1162
1162
1163 if requirementsmod.NARROW_REQUIREMENT in requirements:
1163 if requirementsmod.NARROW_REQUIREMENT in requirements:
1164 return revlognarrowfilestorage
1164 return revlognarrowfilestorage
1165 else:
1165 else:
1166 return revlogfilestorage
1166 return revlogfilestorage
1167
1167
1168
1168
1169 # List of repository interfaces and factory functions for them. Each
1169 # List of repository interfaces and factory functions for them. Each
1170 # will be called in order during ``makelocalrepository()`` to iteratively
1170 # will be called in order during ``makelocalrepository()`` to iteratively
1171 # derive the final type for a local repository instance. We capture the
1171 # derive the final type for a local repository instance. We capture the
1172 # function as a lambda so we don't hold a reference and the module-level
1172 # function as a lambda so we don't hold a reference and the module-level
1173 # functions can be wrapped.
1173 # functions can be wrapped.
1174 REPO_INTERFACES = [
1174 REPO_INTERFACES = [
1175 (repository.ilocalrepositorymain, lambda: makemain),
1175 (repository.ilocalrepositorymain, lambda: makemain),
1176 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1176 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1177 ]
1177 ]
1178
1178
1179
1179
1180 @interfaceutil.implementer(repository.ilocalrepositorymain)
1180 @interfaceutil.implementer(repository.ilocalrepositorymain)
1181 class localrepository(object):
1181 class localrepository(object):
1182 """Main class for representing local repositories.
1182 """Main class for representing local repositories.
1183
1183
1184 All local repositories are instances of this class.
1184 All local repositories are instances of this class.
1185
1185
1186 Constructed on its own, instances of this class are not usable as
1186 Constructed on its own, instances of this class are not usable as
1187 repository objects. To obtain a usable repository object, call
1187 repository objects. To obtain a usable repository object, call
1188 ``hg.repository()``, ``localrepo.instance()``, or
1188 ``hg.repository()``, ``localrepo.instance()``, or
1189 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1189 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1190 ``instance()`` adds support for creating new repositories.
1190 ``instance()`` adds support for creating new repositories.
1191 ``hg.repository()`` adds more extension integration, including calling
1191 ``hg.repository()`` adds more extension integration, including calling
1192 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1192 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1193 used.
1193 used.
1194 """
1194 """
1195
1195
1196 # obsolete experimental requirements:
1196 # obsolete experimental requirements:
1197 # - manifestv2: An experimental new manifest format that allowed
1197 # - manifestv2: An experimental new manifest format that allowed
1198 # for stem compression of long paths. Experiment ended up not
1198 # for stem compression of long paths. Experiment ended up not
1199 # being successful (repository sizes went up due to worse delta
1199 # being successful (repository sizes went up due to worse delta
1200 # chains), and the code was deleted in 4.6.
1200 # chains), and the code was deleted in 4.6.
1201 supportedformats = {
1201 supportedformats = {
1202 b'revlogv1',
1202 requirementsmod.REVLOGV1_REQUIREMENT,
1203 b'generaldelta',
1203 b'generaldelta',
1204 requirementsmod.TREEMANIFEST_REQUIREMENT,
1204 requirementsmod.TREEMANIFEST_REQUIREMENT,
1205 requirementsmod.COPIESSDC_REQUIREMENT,
1205 requirementsmod.COPIESSDC_REQUIREMENT,
1206 requirementsmod.REVLOGV2_REQUIREMENT,
1206 requirementsmod.REVLOGV2_REQUIREMENT,
1207 requirementsmod.SIDEDATA_REQUIREMENT,
1207 requirementsmod.SIDEDATA_REQUIREMENT,
1208 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1208 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1209 requirementsmod.NODEMAP_REQUIREMENT,
1209 requirementsmod.NODEMAP_REQUIREMENT,
1210 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1210 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1211 requirementsmod.SHARESAFE_REQUIREMENT,
1211 requirementsmod.SHARESAFE_REQUIREMENT,
1212 }
1212 }
1213 _basesupported = supportedformats | {
1213 _basesupported = supportedformats | {
1214 b'store',
1214 b'store',
1215 b'fncache',
1215 b'fncache',
1216 requirementsmod.SHARED_REQUIREMENT,
1216 requirementsmod.SHARED_REQUIREMENT,
1217 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1217 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1218 b'dotencode',
1218 b'dotencode',
1219 requirementsmod.SPARSE_REQUIREMENT,
1219 requirementsmod.SPARSE_REQUIREMENT,
1220 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1220 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1221 }
1221 }
1222
1222
1223 # list of prefix for file which can be written without 'wlock'
1223 # list of prefix for file which can be written without 'wlock'
1224 # Extensions should extend this list when needed
1224 # Extensions should extend this list when needed
1225 _wlockfreeprefix = {
1225 _wlockfreeprefix = {
1226 # We migh consider requiring 'wlock' for the next
1226 # We migh consider requiring 'wlock' for the next
1227 # two, but pretty much all the existing code assume
1227 # two, but pretty much all the existing code assume
1228 # wlock is not needed so we keep them excluded for
1228 # wlock is not needed so we keep them excluded for
1229 # now.
1229 # now.
1230 b'hgrc',
1230 b'hgrc',
1231 b'requires',
1231 b'requires',
1232 # XXX cache is a complicatged business someone
1232 # XXX cache is a complicatged business someone
1233 # should investigate this in depth at some point
1233 # should investigate this in depth at some point
1234 b'cache/',
1234 b'cache/',
1235 # XXX shouldn't be dirstate covered by the wlock?
1235 # XXX shouldn't be dirstate covered by the wlock?
1236 b'dirstate',
1236 b'dirstate',
1237 # XXX bisect was still a bit too messy at the time
1237 # XXX bisect was still a bit too messy at the time
1238 # this changeset was introduced. Someone should fix
1238 # this changeset was introduced. Someone should fix
1239 # the remainig bit and drop this line
1239 # the remainig bit and drop this line
1240 b'bisect.state',
1240 b'bisect.state',
1241 }
1241 }
1242
1242
1243 def __init__(
1243 def __init__(
1244 self,
1244 self,
1245 baseui,
1245 baseui,
1246 ui,
1246 ui,
1247 origroot,
1247 origroot,
1248 wdirvfs,
1248 wdirvfs,
1249 hgvfs,
1249 hgvfs,
1250 requirements,
1250 requirements,
1251 supportedrequirements,
1251 supportedrequirements,
1252 sharedpath,
1252 sharedpath,
1253 store,
1253 store,
1254 cachevfs,
1254 cachevfs,
1255 wcachevfs,
1255 wcachevfs,
1256 features,
1256 features,
1257 intents=None,
1257 intents=None,
1258 ):
1258 ):
1259 """Create a new local repository instance.
1259 """Create a new local repository instance.
1260
1260
1261 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1261 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1262 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1262 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1263 object.
1263 object.
1264
1264
1265 Arguments:
1265 Arguments:
1266
1266
1267 baseui
1267 baseui
1268 ``ui.ui`` instance that ``ui`` argument was based off of.
1268 ``ui.ui`` instance that ``ui`` argument was based off of.
1269
1269
1270 ui
1270 ui
1271 ``ui.ui`` instance for use by the repository.
1271 ``ui.ui`` instance for use by the repository.
1272
1272
1273 origroot
1273 origroot
1274 ``bytes`` path to working directory root of this repository.
1274 ``bytes`` path to working directory root of this repository.
1275
1275
1276 wdirvfs
1276 wdirvfs
1277 ``vfs.vfs`` rooted at the working directory.
1277 ``vfs.vfs`` rooted at the working directory.
1278
1278
1279 hgvfs
1279 hgvfs
1280 ``vfs.vfs`` rooted at .hg/
1280 ``vfs.vfs`` rooted at .hg/
1281
1281
1282 requirements
1282 requirements
1283 ``set`` of bytestrings representing repository opening requirements.
1283 ``set`` of bytestrings representing repository opening requirements.
1284
1284
1285 supportedrequirements
1285 supportedrequirements
1286 ``set`` of bytestrings representing repository requirements that we
1286 ``set`` of bytestrings representing repository requirements that we
1287 know how to open. May be a supetset of ``requirements``.
1287 know how to open. May be a supetset of ``requirements``.
1288
1288
1289 sharedpath
1289 sharedpath
1290 ``bytes`` Defining path to storage base directory. Points to a
1290 ``bytes`` Defining path to storage base directory. Points to a
1291 ``.hg/`` directory somewhere.
1291 ``.hg/`` directory somewhere.
1292
1292
1293 store
1293 store
1294 ``store.basicstore`` (or derived) instance providing access to
1294 ``store.basicstore`` (or derived) instance providing access to
1295 versioned storage.
1295 versioned storage.
1296
1296
1297 cachevfs
1297 cachevfs
1298 ``vfs.vfs`` used for cache files.
1298 ``vfs.vfs`` used for cache files.
1299
1299
1300 wcachevfs
1300 wcachevfs
1301 ``vfs.vfs`` used for cache files related to the working copy.
1301 ``vfs.vfs`` used for cache files related to the working copy.
1302
1302
1303 features
1303 features
1304 ``set`` of bytestrings defining features/capabilities of this
1304 ``set`` of bytestrings defining features/capabilities of this
1305 instance.
1305 instance.
1306
1306
1307 intents
1307 intents
1308 ``set`` of system strings indicating what this repo will be used
1308 ``set`` of system strings indicating what this repo will be used
1309 for.
1309 for.
1310 """
1310 """
1311 self.baseui = baseui
1311 self.baseui = baseui
1312 self.ui = ui
1312 self.ui = ui
1313 self.origroot = origroot
1313 self.origroot = origroot
1314 # vfs rooted at working directory.
1314 # vfs rooted at working directory.
1315 self.wvfs = wdirvfs
1315 self.wvfs = wdirvfs
1316 self.root = wdirvfs.base
1316 self.root = wdirvfs.base
1317 # vfs rooted at .hg/. Used to access most non-store paths.
1317 # vfs rooted at .hg/. Used to access most non-store paths.
1318 self.vfs = hgvfs
1318 self.vfs = hgvfs
1319 self.path = hgvfs.base
1319 self.path = hgvfs.base
1320 self.requirements = requirements
1320 self.requirements = requirements
1321 self.supported = supportedrequirements
1321 self.supported = supportedrequirements
1322 self.sharedpath = sharedpath
1322 self.sharedpath = sharedpath
1323 self.store = store
1323 self.store = store
1324 self.cachevfs = cachevfs
1324 self.cachevfs = cachevfs
1325 self.wcachevfs = wcachevfs
1325 self.wcachevfs = wcachevfs
1326 self.features = features
1326 self.features = features
1327
1327
1328 self.filtername = None
1328 self.filtername = None
1329
1329
1330 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1330 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1331 b'devel', b'check-locks'
1331 b'devel', b'check-locks'
1332 ):
1332 ):
1333 self.vfs.audit = self._getvfsward(self.vfs.audit)
1333 self.vfs.audit = self._getvfsward(self.vfs.audit)
1334 # A list of callback to shape the phase if no data were found.
1334 # A list of callback to shape the phase if no data were found.
1335 # Callback are in the form: func(repo, roots) --> processed root.
1335 # Callback are in the form: func(repo, roots) --> processed root.
1336 # This list it to be filled by extension during repo setup
1336 # This list it to be filled by extension during repo setup
1337 self._phasedefaults = []
1337 self._phasedefaults = []
1338
1338
1339 color.setup(self.ui)
1339 color.setup(self.ui)
1340
1340
1341 self.spath = self.store.path
1341 self.spath = self.store.path
1342 self.svfs = self.store.vfs
1342 self.svfs = self.store.vfs
1343 self.sjoin = self.store.join
1343 self.sjoin = self.store.join
1344 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1344 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1345 b'devel', b'check-locks'
1345 b'devel', b'check-locks'
1346 ):
1346 ):
1347 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1347 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1348 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1348 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1349 else: # standard vfs
1349 else: # standard vfs
1350 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1350 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1351
1351
1352 self._dirstatevalidatewarned = False
1352 self._dirstatevalidatewarned = False
1353
1353
1354 self._branchcaches = branchmap.BranchMapCache()
1354 self._branchcaches = branchmap.BranchMapCache()
1355 self._revbranchcache = None
1355 self._revbranchcache = None
1356 self._filterpats = {}
1356 self._filterpats = {}
1357 self._datafilters = {}
1357 self._datafilters = {}
1358 self._transref = self._lockref = self._wlockref = None
1358 self._transref = self._lockref = self._wlockref = None
1359
1359
1360 # A cache for various files under .hg/ that tracks file changes,
1360 # A cache for various files under .hg/ that tracks file changes,
1361 # (used by the filecache decorator)
1361 # (used by the filecache decorator)
1362 #
1362 #
1363 # Maps a property name to its util.filecacheentry
1363 # Maps a property name to its util.filecacheentry
1364 self._filecache = {}
1364 self._filecache = {}
1365
1365
1366 # hold sets of revision to be filtered
1366 # hold sets of revision to be filtered
1367 # should be cleared when something might have changed the filter value:
1367 # should be cleared when something might have changed the filter value:
1368 # - new changesets,
1368 # - new changesets,
1369 # - phase change,
1369 # - phase change,
1370 # - new obsolescence marker,
1370 # - new obsolescence marker,
1371 # - working directory parent change,
1371 # - working directory parent change,
1372 # - bookmark changes
1372 # - bookmark changes
1373 self.filteredrevcache = {}
1373 self.filteredrevcache = {}
1374
1374
1375 # post-dirstate-status hooks
1375 # post-dirstate-status hooks
1376 self._postdsstatus = []
1376 self._postdsstatus = []
1377
1377
1378 # generic mapping between names and nodes
1378 # generic mapping between names and nodes
1379 self.names = namespaces.namespaces()
1379 self.names = namespaces.namespaces()
1380
1380
1381 # Key to signature value.
1381 # Key to signature value.
1382 self._sparsesignaturecache = {}
1382 self._sparsesignaturecache = {}
1383 # Signature to cached matcher instance.
1383 # Signature to cached matcher instance.
1384 self._sparsematchercache = {}
1384 self._sparsematchercache = {}
1385
1385
1386 self._extrafilterid = repoview.extrafilter(ui)
1386 self._extrafilterid = repoview.extrafilter(ui)
1387
1387
1388 self.filecopiesmode = None
1388 self.filecopiesmode = None
1389 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1389 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1390 self.filecopiesmode = b'changeset-sidedata'
1390 self.filecopiesmode = b'changeset-sidedata'
1391
1391
1392 def _getvfsward(self, origfunc):
1392 def _getvfsward(self, origfunc):
1393 """build a ward for self.vfs"""
1393 """build a ward for self.vfs"""
1394 rref = weakref.ref(self)
1394 rref = weakref.ref(self)
1395
1395
1396 def checkvfs(path, mode=None):
1396 def checkvfs(path, mode=None):
1397 ret = origfunc(path, mode=mode)
1397 ret = origfunc(path, mode=mode)
1398 repo = rref()
1398 repo = rref()
1399 if (
1399 if (
1400 repo is None
1400 repo is None
1401 or not util.safehasattr(repo, b'_wlockref')
1401 or not util.safehasattr(repo, b'_wlockref')
1402 or not util.safehasattr(repo, b'_lockref')
1402 or not util.safehasattr(repo, b'_lockref')
1403 ):
1403 ):
1404 return
1404 return
1405 if mode in (None, b'r', b'rb'):
1405 if mode in (None, b'r', b'rb'):
1406 return
1406 return
1407 if path.startswith(repo.path):
1407 if path.startswith(repo.path):
1408 # truncate name relative to the repository (.hg)
1408 # truncate name relative to the repository (.hg)
1409 path = path[len(repo.path) + 1 :]
1409 path = path[len(repo.path) + 1 :]
1410 if path.startswith(b'cache/'):
1410 if path.startswith(b'cache/'):
1411 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1411 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1412 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1412 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1413 # path prefixes covered by 'lock'
1413 # path prefixes covered by 'lock'
1414 vfs_path_prefixes = (
1414 vfs_path_prefixes = (
1415 b'journal.',
1415 b'journal.',
1416 b'undo.',
1416 b'undo.',
1417 b'strip-backup/',
1417 b'strip-backup/',
1418 b'cache/',
1418 b'cache/',
1419 )
1419 )
1420 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1420 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1421 if repo._currentlock(repo._lockref) is None:
1421 if repo._currentlock(repo._lockref) is None:
1422 repo.ui.develwarn(
1422 repo.ui.develwarn(
1423 b'write with no lock: "%s"' % path,
1423 b'write with no lock: "%s"' % path,
1424 stacklevel=3,
1424 stacklevel=3,
1425 config=b'check-locks',
1425 config=b'check-locks',
1426 )
1426 )
1427 elif repo._currentlock(repo._wlockref) is None:
1427 elif repo._currentlock(repo._wlockref) is None:
1428 # rest of vfs files are covered by 'wlock'
1428 # rest of vfs files are covered by 'wlock'
1429 #
1429 #
1430 # exclude special files
1430 # exclude special files
1431 for prefix in self._wlockfreeprefix:
1431 for prefix in self._wlockfreeprefix:
1432 if path.startswith(prefix):
1432 if path.startswith(prefix):
1433 return
1433 return
1434 repo.ui.develwarn(
1434 repo.ui.develwarn(
1435 b'write with no wlock: "%s"' % path,
1435 b'write with no wlock: "%s"' % path,
1436 stacklevel=3,
1436 stacklevel=3,
1437 config=b'check-locks',
1437 config=b'check-locks',
1438 )
1438 )
1439 return ret
1439 return ret
1440
1440
1441 return checkvfs
1441 return checkvfs
1442
1442
1443 def _getsvfsward(self, origfunc):
1443 def _getsvfsward(self, origfunc):
1444 """build a ward for self.svfs"""
1444 """build a ward for self.svfs"""
1445 rref = weakref.ref(self)
1445 rref = weakref.ref(self)
1446
1446
1447 def checksvfs(path, mode=None):
1447 def checksvfs(path, mode=None):
1448 ret = origfunc(path, mode=mode)
1448 ret = origfunc(path, mode=mode)
1449 repo = rref()
1449 repo = rref()
1450 if repo is None or not util.safehasattr(repo, b'_lockref'):
1450 if repo is None or not util.safehasattr(repo, b'_lockref'):
1451 return
1451 return
1452 if mode in (None, b'r', b'rb'):
1452 if mode in (None, b'r', b'rb'):
1453 return
1453 return
1454 if path.startswith(repo.sharedpath):
1454 if path.startswith(repo.sharedpath):
1455 # truncate name relative to the repository (.hg)
1455 # truncate name relative to the repository (.hg)
1456 path = path[len(repo.sharedpath) + 1 :]
1456 path = path[len(repo.sharedpath) + 1 :]
1457 if repo._currentlock(repo._lockref) is None:
1457 if repo._currentlock(repo._lockref) is None:
1458 repo.ui.develwarn(
1458 repo.ui.develwarn(
1459 b'write with no lock: "%s"' % path, stacklevel=4
1459 b'write with no lock: "%s"' % path, stacklevel=4
1460 )
1460 )
1461 return ret
1461 return ret
1462
1462
1463 return checksvfs
1463 return checksvfs
1464
1464
1465 def close(self):
1465 def close(self):
1466 self._writecaches()
1466 self._writecaches()
1467
1467
1468 def _writecaches(self):
1468 def _writecaches(self):
1469 if self._revbranchcache:
1469 if self._revbranchcache:
1470 self._revbranchcache.write()
1470 self._revbranchcache.write()
1471
1471
1472 def _restrictcapabilities(self, caps):
1472 def _restrictcapabilities(self, caps):
1473 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1473 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1474 caps = set(caps)
1474 caps = set(caps)
1475 capsblob = bundle2.encodecaps(
1475 capsblob = bundle2.encodecaps(
1476 bundle2.getrepocaps(self, role=b'client')
1476 bundle2.getrepocaps(self, role=b'client')
1477 )
1477 )
1478 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1478 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1479 return caps
1479 return caps
1480
1480
1481 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1481 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1482 # self -> auditor -> self._checknested -> self
1482 # self -> auditor -> self._checknested -> self
1483
1483
1484 @property
1484 @property
1485 def auditor(self):
1485 def auditor(self):
1486 # This is only used by context.workingctx.match in order to
1486 # This is only used by context.workingctx.match in order to
1487 # detect files in subrepos.
1487 # detect files in subrepos.
1488 return pathutil.pathauditor(self.root, callback=self._checknested)
1488 return pathutil.pathauditor(self.root, callback=self._checknested)
1489
1489
1490 @property
1490 @property
1491 def nofsauditor(self):
1491 def nofsauditor(self):
1492 # This is only used by context.basectx.match in order to detect
1492 # This is only used by context.basectx.match in order to detect
1493 # files in subrepos.
1493 # files in subrepos.
1494 return pathutil.pathauditor(
1494 return pathutil.pathauditor(
1495 self.root, callback=self._checknested, realfs=False, cached=True
1495 self.root, callback=self._checknested, realfs=False, cached=True
1496 )
1496 )
1497
1497
1498 def _checknested(self, path):
1498 def _checknested(self, path):
1499 """Determine if path is a legal nested repository."""
1499 """Determine if path is a legal nested repository."""
1500 if not path.startswith(self.root):
1500 if not path.startswith(self.root):
1501 return False
1501 return False
1502 subpath = path[len(self.root) + 1 :]
1502 subpath = path[len(self.root) + 1 :]
1503 normsubpath = util.pconvert(subpath)
1503 normsubpath = util.pconvert(subpath)
1504
1504
1505 # XXX: Checking against the current working copy is wrong in
1505 # XXX: Checking against the current working copy is wrong in
1506 # the sense that it can reject things like
1506 # the sense that it can reject things like
1507 #
1507 #
1508 # $ hg cat -r 10 sub/x.txt
1508 # $ hg cat -r 10 sub/x.txt
1509 #
1509 #
1510 # if sub/ is no longer a subrepository in the working copy
1510 # if sub/ is no longer a subrepository in the working copy
1511 # parent revision.
1511 # parent revision.
1512 #
1512 #
1513 # However, it can of course also allow things that would have
1513 # However, it can of course also allow things that would have
1514 # been rejected before, such as the above cat command if sub/
1514 # been rejected before, such as the above cat command if sub/
1515 # is a subrepository now, but was a normal directory before.
1515 # is a subrepository now, but was a normal directory before.
1516 # The old path auditor would have rejected by mistake since it
1516 # The old path auditor would have rejected by mistake since it
1517 # panics when it sees sub/.hg/.
1517 # panics when it sees sub/.hg/.
1518 #
1518 #
1519 # All in all, checking against the working copy seems sensible
1519 # All in all, checking against the working copy seems sensible
1520 # since we want to prevent access to nested repositories on
1520 # since we want to prevent access to nested repositories on
1521 # the filesystem *now*.
1521 # the filesystem *now*.
1522 ctx = self[None]
1522 ctx = self[None]
1523 parts = util.splitpath(subpath)
1523 parts = util.splitpath(subpath)
1524 while parts:
1524 while parts:
1525 prefix = b'/'.join(parts)
1525 prefix = b'/'.join(parts)
1526 if prefix in ctx.substate:
1526 if prefix in ctx.substate:
1527 if prefix == normsubpath:
1527 if prefix == normsubpath:
1528 return True
1528 return True
1529 else:
1529 else:
1530 sub = ctx.sub(prefix)
1530 sub = ctx.sub(prefix)
1531 return sub.checknested(subpath[len(prefix) + 1 :])
1531 return sub.checknested(subpath[len(prefix) + 1 :])
1532 else:
1532 else:
1533 parts.pop()
1533 parts.pop()
1534 return False
1534 return False
1535
1535
1536 def peer(self):
1536 def peer(self):
1537 return localpeer(self) # not cached to avoid reference cycle
1537 return localpeer(self) # not cached to avoid reference cycle
1538
1538
1539 def unfiltered(self):
1539 def unfiltered(self):
1540 """Return unfiltered version of the repository
1540 """Return unfiltered version of the repository
1541
1541
1542 Intended to be overwritten by filtered repo."""
1542 Intended to be overwritten by filtered repo."""
1543 return self
1543 return self
1544
1544
1545 def filtered(self, name, visibilityexceptions=None):
1545 def filtered(self, name, visibilityexceptions=None):
1546 """Return a filtered version of a repository
1546 """Return a filtered version of a repository
1547
1547
1548 The `name` parameter is the identifier of the requested view. This
1548 The `name` parameter is the identifier of the requested view. This
1549 will return a repoview object set "exactly" to the specified view.
1549 will return a repoview object set "exactly" to the specified view.
1550
1550
1551 This function does not apply recursive filtering to a repository. For
1551 This function does not apply recursive filtering to a repository. For
1552 example calling `repo.filtered("served")` will return a repoview using
1552 example calling `repo.filtered("served")` will return a repoview using
1553 the "served" view, regardless of the initial view used by `repo`.
1553 the "served" view, regardless of the initial view used by `repo`.
1554
1554
1555 In other word, there is always only one level of `repoview` "filtering".
1555 In other word, there is always only one level of `repoview` "filtering".
1556 """
1556 """
1557 if self._extrafilterid is not None and b'%' not in name:
1557 if self._extrafilterid is not None and b'%' not in name:
1558 name = name + b'%' + self._extrafilterid
1558 name = name + b'%' + self._extrafilterid
1559
1559
1560 cls = repoview.newtype(self.unfiltered().__class__)
1560 cls = repoview.newtype(self.unfiltered().__class__)
1561 return cls(self, name, visibilityexceptions)
1561 return cls(self, name, visibilityexceptions)
1562
1562
1563 @mixedrepostorecache(
1563 @mixedrepostorecache(
1564 (b'bookmarks', b'plain'),
1564 (b'bookmarks', b'plain'),
1565 (b'bookmarks.current', b'plain'),
1565 (b'bookmarks.current', b'plain'),
1566 (b'bookmarks', b''),
1566 (b'bookmarks', b''),
1567 (b'00changelog.i', b''),
1567 (b'00changelog.i', b''),
1568 )
1568 )
1569 def _bookmarks(self):
1569 def _bookmarks(self):
1570 # Since the multiple files involved in the transaction cannot be
1570 # Since the multiple files involved in the transaction cannot be
1571 # written atomically (with current repository format), there is a race
1571 # written atomically (with current repository format), there is a race
1572 # condition here.
1572 # condition here.
1573 #
1573 #
1574 # 1) changelog content A is read
1574 # 1) changelog content A is read
1575 # 2) outside transaction update changelog to content B
1575 # 2) outside transaction update changelog to content B
1576 # 3) outside transaction update bookmark file referring to content B
1576 # 3) outside transaction update bookmark file referring to content B
1577 # 4) bookmarks file content is read and filtered against changelog-A
1577 # 4) bookmarks file content is read and filtered against changelog-A
1578 #
1578 #
1579 # When this happens, bookmarks against nodes missing from A are dropped.
1579 # When this happens, bookmarks against nodes missing from A are dropped.
1580 #
1580 #
1581 # Having this happening during read is not great, but it become worse
1581 # Having this happening during read is not great, but it become worse
1582 # when this happen during write because the bookmarks to the "unknown"
1582 # when this happen during write because the bookmarks to the "unknown"
1583 # nodes will be dropped for good. However, writes happen within locks.
1583 # nodes will be dropped for good. However, writes happen within locks.
1584 # This locking makes it possible to have a race free consistent read.
1584 # This locking makes it possible to have a race free consistent read.
1585 # For this purpose data read from disc before locking are
1585 # For this purpose data read from disc before locking are
1586 # "invalidated" right after the locks are taken. This invalidations are
1586 # "invalidated" right after the locks are taken. This invalidations are
1587 # "light", the `filecache` mechanism keep the data in memory and will
1587 # "light", the `filecache` mechanism keep the data in memory and will
1588 # reuse them if the underlying files did not changed. Not parsing the
1588 # reuse them if the underlying files did not changed. Not parsing the
1589 # same data multiple times helps performances.
1589 # same data multiple times helps performances.
1590 #
1590 #
1591 # Unfortunately in the case describe above, the files tracked by the
1591 # Unfortunately in the case describe above, the files tracked by the
1592 # bookmarks file cache might not have changed, but the in-memory
1592 # bookmarks file cache might not have changed, but the in-memory
1593 # content is still "wrong" because we used an older changelog content
1593 # content is still "wrong" because we used an older changelog content
1594 # to process the on-disk data. So after locking, the changelog would be
1594 # to process the on-disk data. So after locking, the changelog would be
1595 # refreshed but `_bookmarks` would be preserved.
1595 # refreshed but `_bookmarks` would be preserved.
1596 # Adding `00changelog.i` to the list of tracked file is not
1596 # Adding `00changelog.i` to the list of tracked file is not
1597 # enough, because at the time we build the content for `_bookmarks` in
1597 # enough, because at the time we build the content for `_bookmarks` in
1598 # (4), the changelog file has already diverged from the content used
1598 # (4), the changelog file has already diverged from the content used
1599 # for loading `changelog` in (1)
1599 # for loading `changelog` in (1)
1600 #
1600 #
1601 # To prevent the issue, we force the changelog to be explicitly
1601 # To prevent the issue, we force the changelog to be explicitly
1602 # reloaded while computing `_bookmarks`. The data race can still happen
1602 # reloaded while computing `_bookmarks`. The data race can still happen
1603 # without the lock (with a narrower window), but it would no longer go
1603 # without the lock (with a narrower window), but it would no longer go
1604 # undetected during the lock time refresh.
1604 # undetected during the lock time refresh.
1605 #
1605 #
1606 # The new schedule is as follow
1606 # The new schedule is as follow
1607 #
1607 #
1608 # 1) filecache logic detect that `_bookmarks` needs to be computed
1608 # 1) filecache logic detect that `_bookmarks` needs to be computed
1609 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1609 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1610 # 3) We force `changelog` filecache to be tested
1610 # 3) We force `changelog` filecache to be tested
1611 # 4) cachestat for `changelog` are captured (for changelog)
1611 # 4) cachestat for `changelog` are captured (for changelog)
1612 # 5) `_bookmarks` is computed and cached
1612 # 5) `_bookmarks` is computed and cached
1613 #
1613 #
1614 # The step in (3) ensure we have a changelog at least as recent as the
1614 # The step in (3) ensure we have a changelog at least as recent as the
1615 # cache stat computed in (1). As a result at locking time:
1615 # cache stat computed in (1). As a result at locking time:
1616 # * if the changelog did not changed since (1) -> we can reuse the data
1616 # * if the changelog did not changed since (1) -> we can reuse the data
1617 # * otherwise -> the bookmarks get refreshed.
1617 # * otherwise -> the bookmarks get refreshed.
1618 self._refreshchangelog()
1618 self._refreshchangelog()
1619 return bookmarks.bmstore(self)
1619 return bookmarks.bmstore(self)
1620
1620
1621 def _refreshchangelog(self):
1621 def _refreshchangelog(self):
1622 """make sure the in memory changelog match the on-disk one"""
1622 """make sure the in memory changelog match the on-disk one"""
1623 if 'changelog' in vars(self) and self.currenttransaction() is None:
1623 if 'changelog' in vars(self) and self.currenttransaction() is None:
1624 del self.changelog
1624 del self.changelog
1625
1625
1626 @property
1626 @property
1627 def _activebookmark(self):
1627 def _activebookmark(self):
1628 return self._bookmarks.active
1628 return self._bookmarks.active
1629
1629
1630 # _phasesets depend on changelog. what we need is to call
1630 # _phasesets depend on changelog. what we need is to call
1631 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1631 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1632 # can't be easily expressed in filecache mechanism.
1632 # can't be easily expressed in filecache mechanism.
1633 @storecache(b'phaseroots', b'00changelog.i')
1633 @storecache(b'phaseroots', b'00changelog.i')
1634 def _phasecache(self):
1634 def _phasecache(self):
1635 return phases.phasecache(self, self._phasedefaults)
1635 return phases.phasecache(self, self._phasedefaults)
1636
1636
1637 @storecache(b'obsstore')
1637 @storecache(b'obsstore')
1638 def obsstore(self):
1638 def obsstore(self):
1639 return obsolete.makestore(self.ui, self)
1639 return obsolete.makestore(self.ui, self)
1640
1640
1641 @storecache(b'00changelog.i')
1641 @storecache(b'00changelog.i')
1642 def changelog(self):
1642 def changelog(self):
1643 # load dirstate before changelog to avoid race see issue6303
1643 # load dirstate before changelog to avoid race see issue6303
1644 self.dirstate.prefetch_parents()
1644 self.dirstate.prefetch_parents()
1645 return self.store.changelog(
1645 return self.store.changelog(
1646 txnutil.mayhavepending(self.root),
1646 txnutil.mayhavepending(self.root),
1647 concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
1647 concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
1648 )
1648 )
1649
1649
1650 @storecache(b'00manifest.i')
1650 @storecache(b'00manifest.i')
1651 def manifestlog(self):
1651 def manifestlog(self):
1652 return self.store.manifestlog(self, self._storenarrowmatch)
1652 return self.store.manifestlog(self, self._storenarrowmatch)
1653
1653
1654 @repofilecache(b'dirstate')
1654 @repofilecache(b'dirstate')
1655 def dirstate(self):
1655 def dirstate(self):
1656 return self._makedirstate()
1656 return self._makedirstate()
1657
1657
1658 def _makedirstate(self):
1658 def _makedirstate(self):
1659 """Extension point for wrapping the dirstate per-repo."""
1659 """Extension point for wrapping the dirstate per-repo."""
1660 sparsematchfn = lambda: sparse.matcher(self)
1660 sparsematchfn = lambda: sparse.matcher(self)
1661
1661
1662 return dirstate.dirstate(
1662 return dirstate.dirstate(
1663 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1663 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1664 )
1664 )
1665
1665
1666 def _dirstatevalidate(self, node):
1666 def _dirstatevalidate(self, node):
1667 try:
1667 try:
1668 self.changelog.rev(node)
1668 self.changelog.rev(node)
1669 return node
1669 return node
1670 except error.LookupError:
1670 except error.LookupError:
1671 if not self._dirstatevalidatewarned:
1671 if not self._dirstatevalidatewarned:
1672 self._dirstatevalidatewarned = True
1672 self._dirstatevalidatewarned = True
1673 self.ui.warn(
1673 self.ui.warn(
1674 _(b"warning: ignoring unknown working parent %s!\n")
1674 _(b"warning: ignoring unknown working parent %s!\n")
1675 % short(node)
1675 % short(node)
1676 )
1676 )
1677 return nullid
1677 return nullid
1678
1678
1679 @storecache(narrowspec.FILENAME)
1679 @storecache(narrowspec.FILENAME)
1680 def narrowpats(self):
1680 def narrowpats(self):
1681 """matcher patterns for this repository's narrowspec
1681 """matcher patterns for this repository's narrowspec
1682
1682
1683 A tuple of (includes, excludes).
1683 A tuple of (includes, excludes).
1684 """
1684 """
1685 return narrowspec.load(self)
1685 return narrowspec.load(self)
1686
1686
1687 @storecache(narrowspec.FILENAME)
1687 @storecache(narrowspec.FILENAME)
1688 def _storenarrowmatch(self):
1688 def _storenarrowmatch(self):
1689 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1689 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1690 return matchmod.always()
1690 return matchmod.always()
1691 include, exclude = self.narrowpats
1691 include, exclude = self.narrowpats
1692 return narrowspec.match(self.root, include=include, exclude=exclude)
1692 return narrowspec.match(self.root, include=include, exclude=exclude)
1693
1693
1694 @storecache(narrowspec.FILENAME)
1694 @storecache(narrowspec.FILENAME)
1695 def _narrowmatch(self):
1695 def _narrowmatch(self):
1696 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1696 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1697 return matchmod.always()
1697 return matchmod.always()
1698 narrowspec.checkworkingcopynarrowspec(self)
1698 narrowspec.checkworkingcopynarrowspec(self)
1699 include, exclude = self.narrowpats
1699 include, exclude = self.narrowpats
1700 return narrowspec.match(self.root, include=include, exclude=exclude)
1700 return narrowspec.match(self.root, include=include, exclude=exclude)
1701
1701
1702 def narrowmatch(self, match=None, includeexact=False):
1702 def narrowmatch(self, match=None, includeexact=False):
1703 """matcher corresponding the the repo's narrowspec
1703 """matcher corresponding the the repo's narrowspec
1704
1704
1705 If `match` is given, then that will be intersected with the narrow
1705 If `match` is given, then that will be intersected with the narrow
1706 matcher.
1706 matcher.
1707
1707
1708 If `includeexact` is True, then any exact matches from `match` will
1708 If `includeexact` is True, then any exact matches from `match` will
1709 be included even if they're outside the narrowspec.
1709 be included even if they're outside the narrowspec.
1710 """
1710 """
1711 if match:
1711 if match:
1712 if includeexact and not self._narrowmatch.always():
1712 if includeexact and not self._narrowmatch.always():
1713 # do not exclude explicitly-specified paths so that they can
1713 # do not exclude explicitly-specified paths so that they can
1714 # be warned later on
1714 # be warned later on
1715 em = matchmod.exact(match.files())
1715 em = matchmod.exact(match.files())
1716 nm = matchmod.unionmatcher([self._narrowmatch, em])
1716 nm = matchmod.unionmatcher([self._narrowmatch, em])
1717 return matchmod.intersectmatchers(match, nm)
1717 return matchmod.intersectmatchers(match, nm)
1718 return matchmod.intersectmatchers(match, self._narrowmatch)
1718 return matchmod.intersectmatchers(match, self._narrowmatch)
1719 return self._narrowmatch
1719 return self._narrowmatch
1720
1720
1721 def setnarrowpats(self, newincludes, newexcludes):
1721 def setnarrowpats(self, newincludes, newexcludes):
1722 narrowspec.save(self, newincludes, newexcludes)
1722 narrowspec.save(self, newincludes, newexcludes)
1723 self.invalidate(clearfilecache=True)
1723 self.invalidate(clearfilecache=True)
1724
1724
1725 @unfilteredpropertycache
1725 @unfilteredpropertycache
1726 def _quick_access_changeid_null(self):
1726 def _quick_access_changeid_null(self):
1727 return {
1727 return {
1728 b'null': (nullrev, nullid),
1728 b'null': (nullrev, nullid),
1729 nullrev: (nullrev, nullid),
1729 nullrev: (nullrev, nullid),
1730 nullid: (nullrev, nullid),
1730 nullid: (nullrev, nullid),
1731 }
1731 }
1732
1732
1733 @unfilteredpropertycache
1733 @unfilteredpropertycache
1734 def _quick_access_changeid_wc(self):
1734 def _quick_access_changeid_wc(self):
1735 # also fast path access to the working copy parents
1735 # also fast path access to the working copy parents
1736 # however, only do it for filter that ensure wc is visible.
1736 # however, only do it for filter that ensure wc is visible.
1737 quick = self._quick_access_changeid_null.copy()
1737 quick = self._quick_access_changeid_null.copy()
1738 cl = self.unfiltered().changelog
1738 cl = self.unfiltered().changelog
1739 for node in self.dirstate.parents():
1739 for node in self.dirstate.parents():
1740 if node == nullid:
1740 if node == nullid:
1741 continue
1741 continue
1742 rev = cl.index.get_rev(node)
1742 rev = cl.index.get_rev(node)
1743 if rev is None:
1743 if rev is None:
1744 # unknown working copy parent case:
1744 # unknown working copy parent case:
1745 #
1745 #
1746 # skip the fast path and let higher code deal with it
1746 # skip the fast path and let higher code deal with it
1747 continue
1747 continue
1748 pair = (rev, node)
1748 pair = (rev, node)
1749 quick[rev] = pair
1749 quick[rev] = pair
1750 quick[node] = pair
1750 quick[node] = pair
1751 # also add the parents of the parents
1751 # also add the parents of the parents
1752 for r in cl.parentrevs(rev):
1752 for r in cl.parentrevs(rev):
1753 if r == nullrev:
1753 if r == nullrev:
1754 continue
1754 continue
1755 n = cl.node(r)
1755 n = cl.node(r)
1756 pair = (r, n)
1756 pair = (r, n)
1757 quick[r] = pair
1757 quick[r] = pair
1758 quick[n] = pair
1758 quick[n] = pair
1759 p1node = self.dirstate.p1()
1759 p1node = self.dirstate.p1()
1760 if p1node != nullid:
1760 if p1node != nullid:
1761 quick[b'.'] = quick[p1node]
1761 quick[b'.'] = quick[p1node]
1762 return quick
1762 return quick
1763
1763
1764 @unfilteredmethod
1764 @unfilteredmethod
1765 def _quick_access_changeid_invalidate(self):
1765 def _quick_access_changeid_invalidate(self):
1766 if '_quick_access_changeid_wc' in vars(self):
1766 if '_quick_access_changeid_wc' in vars(self):
1767 del self.__dict__['_quick_access_changeid_wc']
1767 del self.__dict__['_quick_access_changeid_wc']
1768
1768
1769 @property
1769 @property
1770 def _quick_access_changeid(self):
1770 def _quick_access_changeid(self):
1771 """an helper dictionnary for __getitem__ calls
1771 """an helper dictionnary for __getitem__ calls
1772
1772
1773 This contains a list of symbol we can recognise right away without
1773 This contains a list of symbol we can recognise right away without
1774 further processing.
1774 further processing.
1775 """
1775 """
1776 if self.filtername in repoview.filter_has_wc:
1776 if self.filtername in repoview.filter_has_wc:
1777 return self._quick_access_changeid_wc
1777 return self._quick_access_changeid_wc
1778 return self._quick_access_changeid_null
1778 return self._quick_access_changeid_null
1779
1779
1780 def __getitem__(self, changeid):
1780 def __getitem__(self, changeid):
1781 # dealing with special cases
1781 # dealing with special cases
1782 if changeid is None:
1782 if changeid is None:
1783 return context.workingctx(self)
1783 return context.workingctx(self)
1784 if isinstance(changeid, context.basectx):
1784 if isinstance(changeid, context.basectx):
1785 return changeid
1785 return changeid
1786
1786
1787 # dealing with multiple revisions
1787 # dealing with multiple revisions
1788 if isinstance(changeid, slice):
1788 if isinstance(changeid, slice):
1789 # wdirrev isn't contiguous so the slice shouldn't include it
1789 # wdirrev isn't contiguous so the slice shouldn't include it
1790 return [
1790 return [
1791 self[i]
1791 self[i]
1792 for i in pycompat.xrange(*changeid.indices(len(self)))
1792 for i in pycompat.xrange(*changeid.indices(len(self)))
1793 if i not in self.changelog.filteredrevs
1793 if i not in self.changelog.filteredrevs
1794 ]
1794 ]
1795
1795
1796 # dealing with some special values
1796 # dealing with some special values
1797 quick_access = self._quick_access_changeid.get(changeid)
1797 quick_access = self._quick_access_changeid.get(changeid)
1798 if quick_access is not None:
1798 if quick_access is not None:
1799 rev, node = quick_access
1799 rev, node = quick_access
1800 return context.changectx(self, rev, node, maybe_filtered=False)
1800 return context.changectx(self, rev, node, maybe_filtered=False)
1801 if changeid == b'tip':
1801 if changeid == b'tip':
1802 node = self.changelog.tip()
1802 node = self.changelog.tip()
1803 rev = self.changelog.rev(node)
1803 rev = self.changelog.rev(node)
1804 return context.changectx(self, rev, node)
1804 return context.changectx(self, rev, node)
1805
1805
1806 # dealing with arbitrary values
1806 # dealing with arbitrary values
1807 try:
1807 try:
1808 if isinstance(changeid, int):
1808 if isinstance(changeid, int):
1809 node = self.changelog.node(changeid)
1809 node = self.changelog.node(changeid)
1810 rev = changeid
1810 rev = changeid
1811 elif changeid == b'.':
1811 elif changeid == b'.':
1812 # this is a hack to delay/avoid loading obsmarkers
1812 # this is a hack to delay/avoid loading obsmarkers
1813 # when we know that '.' won't be hidden
1813 # when we know that '.' won't be hidden
1814 node = self.dirstate.p1()
1814 node = self.dirstate.p1()
1815 rev = self.unfiltered().changelog.rev(node)
1815 rev = self.unfiltered().changelog.rev(node)
1816 elif len(changeid) == 20:
1816 elif len(changeid) == 20:
1817 try:
1817 try:
1818 node = changeid
1818 node = changeid
1819 rev = self.changelog.rev(changeid)
1819 rev = self.changelog.rev(changeid)
1820 except error.FilteredLookupError:
1820 except error.FilteredLookupError:
1821 changeid = hex(changeid) # for the error message
1821 changeid = hex(changeid) # for the error message
1822 raise
1822 raise
1823 except LookupError:
1823 except LookupError:
1824 # check if it might have come from damaged dirstate
1824 # check if it might have come from damaged dirstate
1825 #
1825 #
1826 # XXX we could avoid the unfiltered if we had a recognizable
1826 # XXX we could avoid the unfiltered if we had a recognizable
1827 # exception for filtered changeset access
1827 # exception for filtered changeset access
1828 if (
1828 if (
1829 self.local()
1829 self.local()
1830 and changeid in self.unfiltered().dirstate.parents()
1830 and changeid in self.unfiltered().dirstate.parents()
1831 ):
1831 ):
1832 msg = _(b"working directory has unknown parent '%s'!")
1832 msg = _(b"working directory has unknown parent '%s'!")
1833 raise error.Abort(msg % short(changeid))
1833 raise error.Abort(msg % short(changeid))
1834 changeid = hex(changeid) # for the error message
1834 changeid = hex(changeid) # for the error message
1835 raise
1835 raise
1836
1836
1837 elif len(changeid) == 40:
1837 elif len(changeid) == 40:
1838 node = bin(changeid)
1838 node = bin(changeid)
1839 rev = self.changelog.rev(node)
1839 rev = self.changelog.rev(node)
1840 else:
1840 else:
1841 raise error.ProgrammingError(
1841 raise error.ProgrammingError(
1842 b"unsupported changeid '%s' of type %s"
1842 b"unsupported changeid '%s' of type %s"
1843 % (changeid, pycompat.bytestr(type(changeid)))
1843 % (changeid, pycompat.bytestr(type(changeid)))
1844 )
1844 )
1845
1845
1846 return context.changectx(self, rev, node)
1846 return context.changectx(self, rev, node)
1847
1847
1848 except (error.FilteredIndexError, error.FilteredLookupError):
1848 except (error.FilteredIndexError, error.FilteredLookupError):
1849 raise error.FilteredRepoLookupError(
1849 raise error.FilteredRepoLookupError(
1850 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1850 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1851 )
1851 )
1852 except (IndexError, LookupError):
1852 except (IndexError, LookupError):
1853 raise error.RepoLookupError(
1853 raise error.RepoLookupError(
1854 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1854 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1855 )
1855 )
1856 except error.WdirUnsupported:
1856 except error.WdirUnsupported:
1857 return context.workingctx(self)
1857 return context.workingctx(self)
1858
1858
1859 def __contains__(self, changeid):
1859 def __contains__(self, changeid):
1860 """True if the given changeid exists"""
1860 """True if the given changeid exists"""
1861 try:
1861 try:
1862 self[changeid]
1862 self[changeid]
1863 return True
1863 return True
1864 except error.RepoLookupError:
1864 except error.RepoLookupError:
1865 return False
1865 return False
1866
1866
1867 def __nonzero__(self):
1867 def __nonzero__(self):
1868 return True
1868 return True
1869
1869
1870 __bool__ = __nonzero__
1870 __bool__ = __nonzero__
1871
1871
1872 def __len__(self):
1872 def __len__(self):
1873 # no need to pay the cost of repoview.changelog
1873 # no need to pay the cost of repoview.changelog
1874 unfi = self.unfiltered()
1874 unfi = self.unfiltered()
1875 return len(unfi.changelog)
1875 return len(unfi.changelog)
1876
1876
1877 def __iter__(self):
1877 def __iter__(self):
1878 return iter(self.changelog)
1878 return iter(self.changelog)
1879
1879
1880 def revs(self, expr, *args):
1880 def revs(self, expr, *args):
1881 """Find revisions matching a revset.
1881 """Find revisions matching a revset.
1882
1882
1883 The revset is specified as a string ``expr`` that may contain
1883 The revset is specified as a string ``expr`` that may contain
1884 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1884 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1885
1885
1886 Revset aliases from the configuration are not expanded. To expand
1886 Revset aliases from the configuration are not expanded. To expand
1887 user aliases, consider calling ``scmutil.revrange()`` or
1887 user aliases, consider calling ``scmutil.revrange()`` or
1888 ``repo.anyrevs([expr], user=True)``.
1888 ``repo.anyrevs([expr], user=True)``.
1889
1889
1890 Returns a smartset.abstractsmartset, which is a list-like interface
1890 Returns a smartset.abstractsmartset, which is a list-like interface
1891 that contains integer revisions.
1891 that contains integer revisions.
1892 """
1892 """
1893 tree = revsetlang.spectree(expr, *args)
1893 tree = revsetlang.spectree(expr, *args)
1894 return revset.makematcher(tree)(self)
1894 return revset.makematcher(tree)(self)
1895
1895
1896 def set(self, expr, *args):
1896 def set(self, expr, *args):
1897 """Find revisions matching a revset and emit changectx instances.
1897 """Find revisions matching a revset and emit changectx instances.
1898
1898
1899 This is a convenience wrapper around ``revs()`` that iterates the
1899 This is a convenience wrapper around ``revs()`` that iterates the
1900 result and is a generator of changectx instances.
1900 result and is a generator of changectx instances.
1901
1901
1902 Revset aliases from the configuration are not expanded. To expand
1902 Revset aliases from the configuration are not expanded. To expand
1903 user aliases, consider calling ``scmutil.revrange()``.
1903 user aliases, consider calling ``scmutil.revrange()``.
1904 """
1904 """
1905 for r in self.revs(expr, *args):
1905 for r in self.revs(expr, *args):
1906 yield self[r]
1906 yield self[r]
1907
1907
1908 def anyrevs(self, specs, user=False, localalias=None):
1908 def anyrevs(self, specs, user=False, localalias=None):
1909 """Find revisions matching one of the given revsets.
1909 """Find revisions matching one of the given revsets.
1910
1910
1911 Revset aliases from the configuration are not expanded by default. To
1911 Revset aliases from the configuration are not expanded by default. To
1912 expand user aliases, specify ``user=True``. To provide some local
1912 expand user aliases, specify ``user=True``. To provide some local
1913 definitions overriding user aliases, set ``localalias`` to
1913 definitions overriding user aliases, set ``localalias`` to
1914 ``{name: definitionstring}``.
1914 ``{name: definitionstring}``.
1915 """
1915 """
1916 if specs == [b'null']:
1916 if specs == [b'null']:
1917 return revset.baseset([nullrev])
1917 return revset.baseset([nullrev])
1918 if specs == [b'.']:
1918 if specs == [b'.']:
1919 quick_data = self._quick_access_changeid.get(b'.')
1919 quick_data = self._quick_access_changeid.get(b'.')
1920 if quick_data is not None:
1920 if quick_data is not None:
1921 return revset.baseset([quick_data[0]])
1921 return revset.baseset([quick_data[0]])
1922 if user:
1922 if user:
1923 m = revset.matchany(
1923 m = revset.matchany(
1924 self.ui,
1924 self.ui,
1925 specs,
1925 specs,
1926 lookup=revset.lookupfn(self),
1926 lookup=revset.lookupfn(self),
1927 localalias=localalias,
1927 localalias=localalias,
1928 )
1928 )
1929 else:
1929 else:
1930 m = revset.matchany(None, specs, localalias=localalias)
1930 m = revset.matchany(None, specs, localalias=localalias)
1931 return m(self)
1931 return m(self)
1932
1932
1933 def url(self):
1933 def url(self):
1934 return b'file:' + self.root
1934 return b'file:' + self.root
1935
1935
1936 def hook(self, name, throw=False, **args):
1936 def hook(self, name, throw=False, **args):
1937 """Call a hook, passing this repo instance.
1937 """Call a hook, passing this repo instance.
1938
1938
1939 This a convenience method to aid invoking hooks. Extensions likely
1939 This a convenience method to aid invoking hooks. Extensions likely
1940 won't call this unless they have registered a custom hook or are
1940 won't call this unless they have registered a custom hook or are
1941 replacing code that is expected to call a hook.
1941 replacing code that is expected to call a hook.
1942 """
1942 """
1943 return hook.hook(self.ui, self, name, throw, **args)
1943 return hook.hook(self.ui, self, name, throw, **args)
1944
1944
1945 @filteredpropertycache
1945 @filteredpropertycache
1946 def _tagscache(self):
1946 def _tagscache(self):
1947 """Returns a tagscache object that contains various tags related
1947 """Returns a tagscache object that contains various tags related
1948 caches."""
1948 caches."""
1949
1949
1950 # This simplifies its cache management by having one decorated
1950 # This simplifies its cache management by having one decorated
1951 # function (this one) and the rest simply fetch things from it.
1951 # function (this one) and the rest simply fetch things from it.
1952 class tagscache(object):
1952 class tagscache(object):
1953 def __init__(self):
1953 def __init__(self):
1954 # These two define the set of tags for this repository. tags
1954 # These two define the set of tags for this repository. tags
1955 # maps tag name to node; tagtypes maps tag name to 'global' or
1955 # maps tag name to node; tagtypes maps tag name to 'global' or
1956 # 'local'. (Global tags are defined by .hgtags across all
1956 # 'local'. (Global tags are defined by .hgtags across all
1957 # heads, and local tags are defined in .hg/localtags.)
1957 # heads, and local tags are defined in .hg/localtags.)
1958 # They constitute the in-memory cache of tags.
1958 # They constitute the in-memory cache of tags.
1959 self.tags = self.tagtypes = None
1959 self.tags = self.tagtypes = None
1960
1960
1961 self.nodetagscache = self.tagslist = None
1961 self.nodetagscache = self.tagslist = None
1962
1962
1963 cache = tagscache()
1963 cache = tagscache()
1964 cache.tags, cache.tagtypes = self._findtags()
1964 cache.tags, cache.tagtypes = self._findtags()
1965
1965
1966 return cache
1966 return cache
1967
1967
1968 def tags(self):
1968 def tags(self):
1969 '''return a mapping of tag to node'''
1969 '''return a mapping of tag to node'''
1970 t = {}
1970 t = {}
1971 if self.changelog.filteredrevs:
1971 if self.changelog.filteredrevs:
1972 tags, tt = self._findtags()
1972 tags, tt = self._findtags()
1973 else:
1973 else:
1974 tags = self._tagscache.tags
1974 tags = self._tagscache.tags
1975 rev = self.changelog.rev
1975 rev = self.changelog.rev
1976 for k, v in pycompat.iteritems(tags):
1976 for k, v in pycompat.iteritems(tags):
1977 try:
1977 try:
1978 # ignore tags to unknown nodes
1978 # ignore tags to unknown nodes
1979 rev(v)
1979 rev(v)
1980 t[k] = v
1980 t[k] = v
1981 except (error.LookupError, ValueError):
1981 except (error.LookupError, ValueError):
1982 pass
1982 pass
1983 return t
1983 return t
1984
1984
1985 def _findtags(self):
1985 def _findtags(self):
1986 """Do the hard work of finding tags. Return a pair of dicts
1986 """Do the hard work of finding tags. Return a pair of dicts
1987 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1987 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1988 maps tag name to a string like \'global\' or \'local\'.
1988 maps tag name to a string like \'global\' or \'local\'.
1989 Subclasses or extensions are free to add their own tags, but
1989 Subclasses or extensions are free to add their own tags, but
1990 should be aware that the returned dicts will be retained for the
1990 should be aware that the returned dicts will be retained for the
1991 duration of the localrepo object."""
1991 duration of the localrepo object."""
1992
1992
1993 # XXX what tagtype should subclasses/extensions use? Currently
1993 # XXX what tagtype should subclasses/extensions use? Currently
1994 # mq and bookmarks add tags, but do not set the tagtype at all.
1994 # mq and bookmarks add tags, but do not set the tagtype at all.
1995 # Should each extension invent its own tag type? Should there
1995 # Should each extension invent its own tag type? Should there
1996 # be one tagtype for all such "virtual" tags? Or is the status
1996 # be one tagtype for all such "virtual" tags? Or is the status
1997 # quo fine?
1997 # quo fine?
1998
1998
1999 # map tag name to (node, hist)
1999 # map tag name to (node, hist)
2000 alltags = tagsmod.findglobaltags(self.ui, self)
2000 alltags = tagsmod.findglobaltags(self.ui, self)
2001 # map tag name to tag type
2001 # map tag name to tag type
2002 tagtypes = {tag: b'global' for tag in alltags}
2002 tagtypes = {tag: b'global' for tag in alltags}
2003
2003
2004 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2004 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2005
2005
2006 # Build the return dicts. Have to re-encode tag names because
2006 # Build the return dicts. Have to re-encode tag names because
2007 # the tags module always uses UTF-8 (in order not to lose info
2007 # the tags module always uses UTF-8 (in order not to lose info
2008 # writing to the cache), but the rest of Mercurial wants them in
2008 # writing to the cache), but the rest of Mercurial wants them in
2009 # local encoding.
2009 # local encoding.
2010 tags = {}
2010 tags = {}
2011 for (name, (node, hist)) in pycompat.iteritems(alltags):
2011 for (name, (node, hist)) in pycompat.iteritems(alltags):
2012 if node != nullid:
2012 if node != nullid:
2013 tags[encoding.tolocal(name)] = node
2013 tags[encoding.tolocal(name)] = node
2014 tags[b'tip'] = self.changelog.tip()
2014 tags[b'tip'] = self.changelog.tip()
2015 tagtypes = {
2015 tagtypes = {
2016 encoding.tolocal(name): value
2016 encoding.tolocal(name): value
2017 for (name, value) in pycompat.iteritems(tagtypes)
2017 for (name, value) in pycompat.iteritems(tagtypes)
2018 }
2018 }
2019 return (tags, tagtypes)
2019 return (tags, tagtypes)
2020
2020
2021 def tagtype(self, tagname):
2021 def tagtype(self, tagname):
2022 """
2022 """
2023 return the type of the given tag. result can be:
2023 return the type of the given tag. result can be:
2024
2024
2025 'local' : a local tag
2025 'local' : a local tag
2026 'global' : a global tag
2026 'global' : a global tag
2027 None : tag does not exist
2027 None : tag does not exist
2028 """
2028 """
2029
2029
2030 return self._tagscache.tagtypes.get(tagname)
2030 return self._tagscache.tagtypes.get(tagname)
2031
2031
2032 def tagslist(self):
2032 def tagslist(self):
2033 '''return a list of tags ordered by revision'''
2033 '''return a list of tags ordered by revision'''
2034 if not self._tagscache.tagslist:
2034 if not self._tagscache.tagslist:
2035 l = []
2035 l = []
2036 for t, n in pycompat.iteritems(self.tags()):
2036 for t, n in pycompat.iteritems(self.tags()):
2037 l.append((self.changelog.rev(n), t, n))
2037 l.append((self.changelog.rev(n), t, n))
2038 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2038 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2039
2039
2040 return self._tagscache.tagslist
2040 return self._tagscache.tagslist
2041
2041
2042 def nodetags(self, node):
2042 def nodetags(self, node):
2043 '''return the tags associated with a node'''
2043 '''return the tags associated with a node'''
2044 if not self._tagscache.nodetagscache:
2044 if not self._tagscache.nodetagscache:
2045 nodetagscache = {}
2045 nodetagscache = {}
2046 for t, n in pycompat.iteritems(self._tagscache.tags):
2046 for t, n in pycompat.iteritems(self._tagscache.tags):
2047 nodetagscache.setdefault(n, []).append(t)
2047 nodetagscache.setdefault(n, []).append(t)
2048 for tags in pycompat.itervalues(nodetagscache):
2048 for tags in pycompat.itervalues(nodetagscache):
2049 tags.sort()
2049 tags.sort()
2050 self._tagscache.nodetagscache = nodetagscache
2050 self._tagscache.nodetagscache = nodetagscache
2051 return self._tagscache.nodetagscache.get(node, [])
2051 return self._tagscache.nodetagscache.get(node, [])
2052
2052
2053 def nodebookmarks(self, node):
2053 def nodebookmarks(self, node):
2054 """return the list of bookmarks pointing to the specified node"""
2054 """return the list of bookmarks pointing to the specified node"""
2055 return self._bookmarks.names(node)
2055 return self._bookmarks.names(node)
2056
2056
2057 def branchmap(self):
2057 def branchmap(self):
2058 """returns a dictionary {branch: [branchheads]} with branchheads
2058 """returns a dictionary {branch: [branchheads]} with branchheads
2059 ordered by increasing revision number"""
2059 ordered by increasing revision number"""
2060 return self._branchcaches[self]
2060 return self._branchcaches[self]
2061
2061
2062 @unfilteredmethod
2062 @unfilteredmethod
2063 def revbranchcache(self):
2063 def revbranchcache(self):
2064 if not self._revbranchcache:
2064 if not self._revbranchcache:
2065 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2065 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2066 return self._revbranchcache
2066 return self._revbranchcache
2067
2067
2068 def register_changeset(self, rev, changelogrevision):
2068 def register_changeset(self, rev, changelogrevision):
2069 self.revbranchcache().setdata(rev, changelogrevision)
2069 self.revbranchcache().setdata(rev, changelogrevision)
2070
2070
2071 def branchtip(self, branch, ignoremissing=False):
2071 def branchtip(self, branch, ignoremissing=False):
2072 """return the tip node for a given branch
2072 """return the tip node for a given branch
2073
2073
2074 If ignoremissing is True, then this method will not raise an error.
2074 If ignoremissing is True, then this method will not raise an error.
2075 This is helpful for callers that only expect None for a missing branch
2075 This is helpful for callers that only expect None for a missing branch
2076 (e.g. namespace).
2076 (e.g. namespace).
2077
2077
2078 """
2078 """
2079 try:
2079 try:
2080 return self.branchmap().branchtip(branch)
2080 return self.branchmap().branchtip(branch)
2081 except KeyError:
2081 except KeyError:
2082 if not ignoremissing:
2082 if not ignoremissing:
2083 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2083 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2084 else:
2084 else:
2085 pass
2085 pass
2086
2086
2087 def lookup(self, key):
2087 def lookup(self, key):
2088 node = scmutil.revsymbol(self, key).node()
2088 node = scmutil.revsymbol(self, key).node()
2089 if node is None:
2089 if node is None:
2090 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2090 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2091 return node
2091 return node
2092
2092
2093 def lookupbranch(self, key):
2093 def lookupbranch(self, key):
2094 if self.branchmap().hasbranch(key):
2094 if self.branchmap().hasbranch(key):
2095 return key
2095 return key
2096
2096
2097 return scmutil.revsymbol(self, key).branch()
2097 return scmutil.revsymbol(self, key).branch()
2098
2098
2099 def known(self, nodes):
2099 def known(self, nodes):
2100 cl = self.changelog
2100 cl = self.changelog
2101 get_rev = cl.index.get_rev
2101 get_rev = cl.index.get_rev
2102 filtered = cl.filteredrevs
2102 filtered = cl.filteredrevs
2103 result = []
2103 result = []
2104 for n in nodes:
2104 for n in nodes:
2105 r = get_rev(n)
2105 r = get_rev(n)
2106 resp = not (r is None or r in filtered)
2106 resp = not (r is None or r in filtered)
2107 result.append(resp)
2107 result.append(resp)
2108 return result
2108 return result
2109
2109
2110 def local(self):
2110 def local(self):
2111 return self
2111 return self
2112
2112
2113 def publishing(self):
2113 def publishing(self):
2114 # it's safe (and desirable) to trust the publish flag unconditionally
2114 # it's safe (and desirable) to trust the publish flag unconditionally
2115 # so that we don't finalize changes shared between users via ssh or nfs
2115 # so that we don't finalize changes shared between users via ssh or nfs
2116 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2116 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2117
2117
2118 def cancopy(self):
2118 def cancopy(self):
2119 # so statichttprepo's override of local() works
2119 # so statichttprepo's override of local() works
2120 if not self.local():
2120 if not self.local():
2121 return False
2121 return False
2122 if not self.publishing():
2122 if not self.publishing():
2123 return True
2123 return True
2124 # if publishing we can't copy if there is filtered content
2124 # if publishing we can't copy if there is filtered content
2125 return not self.filtered(b'visible').changelog.filteredrevs
2125 return not self.filtered(b'visible').changelog.filteredrevs
2126
2126
2127 def shared(self):
2127 def shared(self):
2128 '''the type of shared repository (None if not shared)'''
2128 '''the type of shared repository (None if not shared)'''
2129 if self.sharedpath != self.path:
2129 if self.sharedpath != self.path:
2130 return b'store'
2130 return b'store'
2131 return None
2131 return None
2132
2132
2133 def wjoin(self, f, *insidef):
2133 def wjoin(self, f, *insidef):
2134 return self.vfs.reljoin(self.root, f, *insidef)
2134 return self.vfs.reljoin(self.root, f, *insidef)
2135
2135
2136 def setparents(self, p1, p2=nullid):
2136 def setparents(self, p1, p2=nullid):
2137 self[None].setparents(p1, p2)
2137 self[None].setparents(p1, p2)
2138 self._quick_access_changeid_invalidate()
2138 self._quick_access_changeid_invalidate()
2139
2139
2140 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2140 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2141 """changeid must be a changeset revision, if specified.
2141 """changeid must be a changeset revision, if specified.
2142 fileid can be a file revision or node."""
2142 fileid can be a file revision or node."""
2143 return context.filectx(
2143 return context.filectx(
2144 self, path, changeid, fileid, changectx=changectx
2144 self, path, changeid, fileid, changectx=changectx
2145 )
2145 )
2146
2146
2147 def getcwd(self):
2147 def getcwd(self):
2148 return self.dirstate.getcwd()
2148 return self.dirstate.getcwd()
2149
2149
2150 def pathto(self, f, cwd=None):
2150 def pathto(self, f, cwd=None):
2151 return self.dirstate.pathto(f, cwd)
2151 return self.dirstate.pathto(f, cwd)
2152
2152
2153 def _loadfilter(self, filter):
2153 def _loadfilter(self, filter):
2154 if filter not in self._filterpats:
2154 if filter not in self._filterpats:
2155 l = []
2155 l = []
2156 for pat, cmd in self.ui.configitems(filter):
2156 for pat, cmd in self.ui.configitems(filter):
2157 if cmd == b'!':
2157 if cmd == b'!':
2158 continue
2158 continue
2159 mf = matchmod.match(self.root, b'', [pat])
2159 mf = matchmod.match(self.root, b'', [pat])
2160 fn = None
2160 fn = None
2161 params = cmd
2161 params = cmd
2162 for name, filterfn in pycompat.iteritems(self._datafilters):
2162 for name, filterfn in pycompat.iteritems(self._datafilters):
2163 if cmd.startswith(name):
2163 if cmd.startswith(name):
2164 fn = filterfn
2164 fn = filterfn
2165 params = cmd[len(name) :].lstrip()
2165 params = cmd[len(name) :].lstrip()
2166 break
2166 break
2167 if not fn:
2167 if not fn:
2168 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2168 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2169 fn.__name__ = 'commandfilter'
2169 fn.__name__ = 'commandfilter'
2170 # Wrap old filters not supporting keyword arguments
2170 # Wrap old filters not supporting keyword arguments
2171 if not pycompat.getargspec(fn)[2]:
2171 if not pycompat.getargspec(fn)[2]:
2172 oldfn = fn
2172 oldfn = fn
2173 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2173 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2174 fn.__name__ = 'compat-' + oldfn.__name__
2174 fn.__name__ = 'compat-' + oldfn.__name__
2175 l.append((mf, fn, params))
2175 l.append((mf, fn, params))
2176 self._filterpats[filter] = l
2176 self._filterpats[filter] = l
2177 return self._filterpats[filter]
2177 return self._filterpats[filter]
2178
2178
2179 def _filter(self, filterpats, filename, data):
2179 def _filter(self, filterpats, filename, data):
2180 for mf, fn, cmd in filterpats:
2180 for mf, fn, cmd in filterpats:
2181 if mf(filename):
2181 if mf(filename):
2182 self.ui.debug(
2182 self.ui.debug(
2183 b"filtering %s through %s\n"
2183 b"filtering %s through %s\n"
2184 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2184 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2185 )
2185 )
2186 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2186 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2187 break
2187 break
2188
2188
2189 return data
2189 return data
2190
2190
2191 @unfilteredpropertycache
2191 @unfilteredpropertycache
2192 def _encodefilterpats(self):
2192 def _encodefilterpats(self):
2193 return self._loadfilter(b'encode')
2193 return self._loadfilter(b'encode')
2194
2194
2195 @unfilteredpropertycache
2195 @unfilteredpropertycache
2196 def _decodefilterpats(self):
2196 def _decodefilterpats(self):
2197 return self._loadfilter(b'decode')
2197 return self._loadfilter(b'decode')
2198
2198
2199 def adddatafilter(self, name, filter):
2199 def adddatafilter(self, name, filter):
2200 self._datafilters[name] = filter
2200 self._datafilters[name] = filter
2201
2201
2202 def wread(self, filename):
2202 def wread(self, filename):
2203 if self.wvfs.islink(filename):
2203 if self.wvfs.islink(filename):
2204 data = self.wvfs.readlink(filename)
2204 data = self.wvfs.readlink(filename)
2205 else:
2205 else:
2206 data = self.wvfs.read(filename)
2206 data = self.wvfs.read(filename)
2207 return self._filter(self._encodefilterpats, filename, data)
2207 return self._filter(self._encodefilterpats, filename, data)
2208
2208
2209 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2209 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2210 """write ``data`` into ``filename`` in the working directory
2210 """write ``data`` into ``filename`` in the working directory
2211
2211
2212 This returns length of written (maybe decoded) data.
2212 This returns length of written (maybe decoded) data.
2213 """
2213 """
2214 data = self._filter(self._decodefilterpats, filename, data)
2214 data = self._filter(self._decodefilterpats, filename, data)
2215 if b'l' in flags:
2215 if b'l' in flags:
2216 self.wvfs.symlink(data, filename)
2216 self.wvfs.symlink(data, filename)
2217 else:
2217 else:
2218 self.wvfs.write(
2218 self.wvfs.write(
2219 filename, data, backgroundclose=backgroundclose, **kwargs
2219 filename, data, backgroundclose=backgroundclose, **kwargs
2220 )
2220 )
2221 if b'x' in flags:
2221 if b'x' in flags:
2222 self.wvfs.setflags(filename, False, True)
2222 self.wvfs.setflags(filename, False, True)
2223 else:
2223 else:
2224 self.wvfs.setflags(filename, False, False)
2224 self.wvfs.setflags(filename, False, False)
2225 return len(data)
2225 return len(data)
2226
2226
2227 def wwritedata(self, filename, data):
2227 def wwritedata(self, filename, data):
2228 return self._filter(self._decodefilterpats, filename, data)
2228 return self._filter(self._decodefilterpats, filename, data)
2229
2229
2230 def currenttransaction(self):
2230 def currenttransaction(self):
2231 """return the current transaction or None if non exists"""
2231 """return the current transaction or None if non exists"""
2232 if self._transref:
2232 if self._transref:
2233 tr = self._transref()
2233 tr = self._transref()
2234 else:
2234 else:
2235 tr = None
2235 tr = None
2236
2236
2237 if tr and tr.running():
2237 if tr and tr.running():
2238 return tr
2238 return tr
2239 return None
2239 return None
2240
2240
2241 def transaction(self, desc, report=None):
2241 def transaction(self, desc, report=None):
2242 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2242 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2243 b'devel', b'check-locks'
2243 b'devel', b'check-locks'
2244 ):
2244 ):
2245 if self._currentlock(self._lockref) is None:
2245 if self._currentlock(self._lockref) is None:
2246 raise error.ProgrammingError(b'transaction requires locking')
2246 raise error.ProgrammingError(b'transaction requires locking')
2247 tr = self.currenttransaction()
2247 tr = self.currenttransaction()
2248 if tr is not None:
2248 if tr is not None:
2249 return tr.nest(name=desc)
2249 return tr.nest(name=desc)
2250
2250
2251 # abort here if the journal already exists
2251 # abort here if the journal already exists
2252 if self.svfs.exists(b"journal"):
2252 if self.svfs.exists(b"journal"):
2253 raise error.RepoError(
2253 raise error.RepoError(
2254 _(b"abandoned transaction found"),
2254 _(b"abandoned transaction found"),
2255 hint=_(b"run 'hg recover' to clean up transaction"),
2255 hint=_(b"run 'hg recover' to clean up transaction"),
2256 )
2256 )
2257
2257
2258 idbase = b"%.40f#%f" % (random.random(), time.time())
2258 idbase = b"%.40f#%f" % (random.random(), time.time())
2259 ha = hex(hashutil.sha1(idbase).digest())
2259 ha = hex(hashutil.sha1(idbase).digest())
2260 txnid = b'TXN:' + ha
2260 txnid = b'TXN:' + ha
2261 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2261 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2262
2262
2263 self._writejournal(desc)
2263 self._writejournal(desc)
2264 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2264 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2265 if report:
2265 if report:
2266 rp = report
2266 rp = report
2267 else:
2267 else:
2268 rp = self.ui.warn
2268 rp = self.ui.warn
2269 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2269 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2270 # we must avoid cyclic reference between repo and transaction.
2270 # we must avoid cyclic reference between repo and transaction.
2271 reporef = weakref.ref(self)
2271 reporef = weakref.ref(self)
2272 # Code to track tag movement
2272 # Code to track tag movement
2273 #
2273 #
2274 # Since tags are all handled as file content, it is actually quite hard
2274 # Since tags are all handled as file content, it is actually quite hard
2275 # to track these movement from a code perspective. So we fallback to a
2275 # to track these movement from a code perspective. So we fallback to a
2276 # tracking at the repository level. One could envision to track changes
2276 # tracking at the repository level. One could envision to track changes
2277 # to the '.hgtags' file through changegroup apply but that fails to
2277 # to the '.hgtags' file through changegroup apply but that fails to
2278 # cope with case where transaction expose new heads without changegroup
2278 # cope with case where transaction expose new heads without changegroup
2279 # being involved (eg: phase movement).
2279 # being involved (eg: phase movement).
2280 #
2280 #
2281 # For now, We gate the feature behind a flag since this likely comes
2281 # For now, We gate the feature behind a flag since this likely comes
2282 # with performance impacts. The current code run more often than needed
2282 # with performance impacts. The current code run more often than needed
2283 # and do not use caches as much as it could. The current focus is on
2283 # and do not use caches as much as it could. The current focus is on
2284 # the behavior of the feature so we disable it by default. The flag
2284 # the behavior of the feature so we disable it by default. The flag
2285 # will be removed when we are happy with the performance impact.
2285 # will be removed when we are happy with the performance impact.
2286 #
2286 #
2287 # Once this feature is no longer experimental move the following
2287 # Once this feature is no longer experimental move the following
2288 # documentation to the appropriate help section:
2288 # documentation to the appropriate help section:
2289 #
2289 #
2290 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2290 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2291 # tags (new or changed or deleted tags). In addition the details of
2291 # tags (new or changed or deleted tags). In addition the details of
2292 # these changes are made available in a file at:
2292 # these changes are made available in a file at:
2293 # ``REPOROOT/.hg/changes/tags.changes``.
2293 # ``REPOROOT/.hg/changes/tags.changes``.
2294 # Make sure you check for HG_TAG_MOVED before reading that file as it
2294 # Make sure you check for HG_TAG_MOVED before reading that file as it
2295 # might exist from a previous transaction even if no tag were touched
2295 # might exist from a previous transaction even if no tag were touched
2296 # in this one. Changes are recorded in a line base format::
2296 # in this one. Changes are recorded in a line base format::
2297 #
2297 #
2298 # <action> <hex-node> <tag-name>\n
2298 # <action> <hex-node> <tag-name>\n
2299 #
2299 #
2300 # Actions are defined as follow:
2300 # Actions are defined as follow:
2301 # "-R": tag is removed,
2301 # "-R": tag is removed,
2302 # "+A": tag is added,
2302 # "+A": tag is added,
2303 # "-M": tag is moved (old value),
2303 # "-M": tag is moved (old value),
2304 # "+M": tag is moved (new value),
2304 # "+M": tag is moved (new value),
2305 tracktags = lambda x: None
2305 tracktags = lambda x: None
2306 # experimental config: experimental.hook-track-tags
2306 # experimental config: experimental.hook-track-tags
2307 shouldtracktags = self.ui.configbool(
2307 shouldtracktags = self.ui.configbool(
2308 b'experimental', b'hook-track-tags'
2308 b'experimental', b'hook-track-tags'
2309 )
2309 )
2310 if desc != b'strip' and shouldtracktags:
2310 if desc != b'strip' and shouldtracktags:
2311 oldheads = self.changelog.headrevs()
2311 oldheads = self.changelog.headrevs()
2312
2312
2313 def tracktags(tr2):
2313 def tracktags(tr2):
2314 repo = reporef()
2314 repo = reporef()
2315 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2315 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2316 newheads = repo.changelog.headrevs()
2316 newheads = repo.changelog.headrevs()
2317 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2317 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2318 # notes: we compare lists here.
2318 # notes: we compare lists here.
2319 # As we do it only once buiding set would not be cheaper
2319 # As we do it only once buiding set would not be cheaper
2320 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2320 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2321 if changes:
2321 if changes:
2322 tr2.hookargs[b'tag_moved'] = b'1'
2322 tr2.hookargs[b'tag_moved'] = b'1'
2323 with repo.vfs(
2323 with repo.vfs(
2324 b'changes/tags.changes', b'w', atomictemp=True
2324 b'changes/tags.changes', b'w', atomictemp=True
2325 ) as changesfile:
2325 ) as changesfile:
2326 # note: we do not register the file to the transaction
2326 # note: we do not register the file to the transaction
2327 # because we needs it to still exist on the transaction
2327 # because we needs it to still exist on the transaction
2328 # is close (for txnclose hooks)
2328 # is close (for txnclose hooks)
2329 tagsmod.writediff(changesfile, changes)
2329 tagsmod.writediff(changesfile, changes)
2330
2330
2331 def validate(tr2):
2331 def validate(tr2):
2332 """will run pre-closing hooks"""
2332 """will run pre-closing hooks"""
2333 # XXX the transaction API is a bit lacking here so we take a hacky
2333 # XXX the transaction API is a bit lacking here so we take a hacky
2334 # path for now
2334 # path for now
2335 #
2335 #
2336 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2336 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2337 # dict is copied before these run. In addition we needs the data
2337 # dict is copied before these run. In addition we needs the data
2338 # available to in memory hooks too.
2338 # available to in memory hooks too.
2339 #
2339 #
2340 # Moreover, we also need to make sure this runs before txnclose
2340 # Moreover, we also need to make sure this runs before txnclose
2341 # hooks and there is no "pending" mechanism that would execute
2341 # hooks and there is no "pending" mechanism that would execute
2342 # logic only if hooks are about to run.
2342 # logic only if hooks are about to run.
2343 #
2343 #
2344 # Fixing this limitation of the transaction is also needed to track
2344 # Fixing this limitation of the transaction is also needed to track
2345 # other families of changes (bookmarks, phases, obsolescence).
2345 # other families of changes (bookmarks, phases, obsolescence).
2346 #
2346 #
2347 # This will have to be fixed before we remove the experimental
2347 # This will have to be fixed before we remove the experimental
2348 # gating.
2348 # gating.
2349 tracktags(tr2)
2349 tracktags(tr2)
2350 repo = reporef()
2350 repo = reporef()
2351
2351
2352 singleheadopt = (b'experimental', b'single-head-per-branch')
2352 singleheadopt = (b'experimental', b'single-head-per-branch')
2353 singlehead = repo.ui.configbool(*singleheadopt)
2353 singlehead = repo.ui.configbool(*singleheadopt)
2354 if singlehead:
2354 if singlehead:
2355 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2355 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2356 accountclosed = singleheadsub.get(
2356 accountclosed = singleheadsub.get(
2357 b"account-closed-heads", False
2357 b"account-closed-heads", False
2358 )
2358 )
2359 if singleheadsub.get(b"public-changes-only", False):
2359 if singleheadsub.get(b"public-changes-only", False):
2360 filtername = b"immutable"
2360 filtername = b"immutable"
2361 else:
2361 else:
2362 filtername = b"visible"
2362 filtername = b"visible"
2363 scmutil.enforcesinglehead(
2363 scmutil.enforcesinglehead(
2364 repo, tr2, desc, accountclosed, filtername
2364 repo, tr2, desc, accountclosed, filtername
2365 )
2365 )
2366 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2366 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2367 for name, (old, new) in sorted(
2367 for name, (old, new) in sorted(
2368 tr.changes[b'bookmarks'].items()
2368 tr.changes[b'bookmarks'].items()
2369 ):
2369 ):
2370 args = tr.hookargs.copy()
2370 args = tr.hookargs.copy()
2371 args.update(bookmarks.preparehookargs(name, old, new))
2371 args.update(bookmarks.preparehookargs(name, old, new))
2372 repo.hook(
2372 repo.hook(
2373 b'pretxnclose-bookmark',
2373 b'pretxnclose-bookmark',
2374 throw=True,
2374 throw=True,
2375 **pycompat.strkwargs(args)
2375 **pycompat.strkwargs(args)
2376 )
2376 )
2377 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2377 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2378 cl = repo.unfiltered().changelog
2378 cl = repo.unfiltered().changelog
2379 for revs, (old, new) in tr.changes[b'phases']:
2379 for revs, (old, new) in tr.changes[b'phases']:
2380 for rev in revs:
2380 for rev in revs:
2381 args = tr.hookargs.copy()
2381 args = tr.hookargs.copy()
2382 node = hex(cl.node(rev))
2382 node = hex(cl.node(rev))
2383 args.update(phases.preparehookargs(node, old, new))
2383 args.update(phases.preparehookargs(node, old, new))
2384 repo.hook(
2384 repo.hook(
2385 b'pretxnclose-phase',
2385 b'pretxnclose-phase',
2386 throw=True,
2386 throw=True,
2387 **pycompat.strkwargs(args)
2387 **pycompat.strkwargs(args)
2388 )
2388 )
2389
2389
2390 repo.hook(
2390 repo.hook(
2391 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2391 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2392 )
2392 )
2393
2393
2394 def releasefn(tr, success):
2394 def releasefn(tr, success):
2395 repo = reporef()
2395 repo = reporef()
2396 if repo is None:
2396 if repo is None:
2397 # If the repo has been GC'd (and this release function is being
2397 # If the repo has been GC'd (and this release function is being
2398 # called from transaction.__del__), there's not much we can do,
2398 # called from transaction.__del__), there's not much we can do,
2399 # so just leave the unfinished transaction there and let the
2399 # so just leave the unfinished transaction there and let the
2400 # user run `hg recover`.
2400 # user run `hg recover`.
2401 return
2401 return
2402 if success:
2402 if success:
2403 # this should be explicitly invoked here, because
2403 # this should be explicitly invoked here, because
2404 # in-memory changes aren't written out at closing
2404 # in-memory changes aren't written out at closing
2405 # transaction, if tr.addfilegenerator (via
2405 # transaction, if tr.addfilegenerator (via
2406 # dirstate.write or so) isn't invoked while
2406 # dirstate.write or so) isn't invoked while
2407 # transaction running
2407 # transaction running
2408 repo.dirstate.write(None)
2408 repo.dirstate.write(None)
2409 else:
2409 else:
2410 # discard all changes (including ones already written
2410 # discard all changes (including ones already written
2411 # out) in this transaction
2411 # out) in this transaction
2412 narrowspec.restorebackup(self, b'journal.narrowspec')
2412 narrowspec.restorebackup(self, b'journal.narrowspec')
2413 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2413 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2414 repo.dirstate.restorebackup(None, b'journal.dirstate')
2414 repo.dirstate.restorebackup(None, b'journal.dirstate')
2415
2415
2416 repo.invalidate(clearfilecache=True)
2416 repo.invalidate(clearfilecache=True)
2417
2417
2418 tr = transaction.transaction(
2418 tr = transaction.transaction(
2419 rp,
2419 rp,
2420 self.svfs,
2420 self.svfs,
2421 vfsmap,
2421 vfsmap,
2422 b"journal",
2422 b"journal",
2423 b"undo",
2423 b"undo",
2424 aftertrans(renames),
2424 aftertrans(renames),
2425 self.store.createmode,
2425 self.store.createmode,
2426 validator=validate,
2426 validator=validate,
2427 releasefn=releasefn,
2427 releasefn=releasefn,
2428 checkambigfiles=_cachedfiles,
2428 checkambigfiles=_cachedfiles,
2429 name=desc,
2429 name=desc,
2430 )
2430 )
2431 tr.changes[b'origrepolen'] = len(self)
2431 tr.changes[b'origrepolen'] = len(self)
2432 tr.changes[b'obsmarkers'] = set()
2432 tr.changes[b'obsmarkers'] = set()
2433 tr.changes[b'phases'] = []
2433 tr.changes[b'phases'] = []
2434 tr.changes[b'bookmarks'] = {}
2434 tr.changes[b'bookmarks'] = {}
2435
2435
2436 tr.hookargs[b'txnid'] = txnid
2436 tr.hookargs[b'txnid'] = txnid
2437 tr.hookargs[b'txnname'] = desc
2437 tr.hookargs[b'txnname'] = desc
2438 tr.hookargs[b'changes'] = tr.changes
2438 tr.hookargs[b'changes'] = tr.changes
2439 # note: writing the fncache only during finalize mean that the file is
2439 # note: writing the fncache only during finalize mean that the file is
2440 # outdated when running hooks. As fncache is used for streaming clone,
2440 # outdated when running hooks. As fncache is used for streaming clone,
2441 # this is not expected to break anything that happen during the hooks.
2441 # this is not expected to break anything that happen during the hooks.
2442 tr.addfinalize(b'flush-fncache', self.store.write)
2442 tr.addfinalize(b'flush-fncache', self.store.write)
2443
2443
2444 def txnclosehook(tr2):
2444 def txnclosehook(tr2):
2445 """To be run if transaction is successful, will schedule a hook run"""
2445 """To be run if transaction is successful, will schedule a hook run"""
2446 # Don't reference tr2 in hook() so we don't hold a reference.
2446 # Don't reference tr2 in hook() so we don't hold a reference.
2447 # This reduces memory consumption when there are multiple
2447 # This reduces memory consumption when there are multiple
2448 # transactions per lock. This can likely go away if issue5045
2448 # transactions per lock. This can likely go away if issue5045
2449 # fixes the function accumulation.
2449 # fixes the function accumulation.
2450 hookargs = tr2.hookargs
2450 hookargs = tr2.hookargs
2451
2451
2452 def hookfunc(unused_success):
2452 def hookfunc(unused_success):
2453 repo = reporef()
2453 repo = reporef()
2454 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2454 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2455 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2455 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2456 for name, (old, new) in bmchanges:
2456 for name, (old, new) in bmchanges:
2457 args = tr.hookargs.copy()
2457 args = tr.hookargs.copy()
2458 args.update(bookmarks.preparehookargs(name, old, new))
2458 args.update(bookmarks.preparehookargs(name, old, new))
2459 repo.hook(
2459 repo.hook(
2460 b'txnclose-bookmark',
2460 b'txnclose-bookmark',
2461 throw=False,
2461 throw=False,
2462 **pycompat.strkwargs(args)
2462 **pycompat.strkwargs(args)
2463 )
2463 )
2464
2464
2465 if hook.hashook(repo.ui, b'txnclose-phase'):
2465 if hook.hashook(repo.ui, b'txnclose-phase'):
2466 cl = repo.unfiltered().changelog
2466 cl = repo.unfiltered().changelog
2467 phasemv = sorted(
2467 phasemv = sorted(
2468 tr.changes[b'phases'], key=lambda r: r[0][0]
2468 tr.changes[b'phases'], key=lambda r: r[0][0]
2469 )
2469 )
2470 for revs, (old, new) in phasemv:
2470 for revs, (old, new) in phasemv:
2471 for rev in revs:
2471 for rev in revs:
2472 args = tr.hookargs.copy()
2472 args = tr.hookargs.copy()
2473 node = hex(cl.node(rev))
2473 node = hex(cl.node(rev))
2474 args.update(phases.preparehookargs(node, old, new))
2474 args.update(phases.preparehookargs(node, old, new))
2475 repo.hook(
2475 repo.hook(
2476 b'txnclose-phase',
2476 b'txnclose-phase',
2477 throw=False,
2477 throw=False,
2478 **pycompat.strkwargs(args)
2478 **pycompat.strkwargs(args)
2479 )
2479 )
2480
2480
2481 repo.hook(
2481 repo.hook(
2482 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2482 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2483 )
2483 )
2484
2484
2485 reporef()._afterlock(hookfunc)
2485 reporef()._afterlock(hookfunc)
2486
2486
2487 tr.addfinalize(b'txnclose-hook', txnclosehook)
2487 tr.addfinalize(b'txnclose-hook', txnclosehook)
2488 # Include a leading "-" to make it happen before the transaction summary
2488 # Include a leading "-" to make it happen before the transaction summary
2489 # reports registered via scmutil.registersummarycallback() whose names
2489 # reports registered via scmutil.registersummarycallback() whose names
2490 # are 00-txnreport etc. That way, the caches will be warm when the
2490 # are 00-txnreport etc. That way, the caches will be warm when the
2491 # callbacks run.
2491 # callbacks run.
2492 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2492 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2493
2493
2494 def txnaborthook(tr2):
2494 def txnaborthook(tr2):
2495 """To be run if transaction is aborted"""
2495 """To be run if transaction is aborted"""
2496 reporef().hook(
2496 reporef().hook(
2497 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2497 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2498 )
2498 )
2499
2499
2500 tr.addabort(b'txnabort-hook', txnaborthook)
2500 tr.addabort(b'txnabort-hook', txnaborthook)
2501 # avoid eager cache invalidation. in-memory data should be identical
2501 # avoid eager cache invalidation. in-memory data should be identical
2502 # to stored data if transaction has no error.
2502 # to stored data if transaction has no error.
2503 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2503 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2504 self._transref = weakref.ref(tr)
2504 self._transref = weakref.ref(tr)
2505 scmutil.registersummarycallback(self, tr, desc)
2505 scmutil.registersummarycallback(self, tr, desc)
2506 return tr
2506 return tr
2507
2507
2508 def _journalfiles(self):
2508 def _journalfiles(self):
2509 return (
2509 return (
2510 (self.svfs, b'journal'),
2510 (self.svfs, b'journal'),
2511 (self.svfs, b'journal.narrowspec'),
2511 (self.svfs, b'journal.narrowspec'),
2512 (self.vfs, b'journal.narrowspec.dirstate'),
2512 (self.vfs, b'journal.narrowspec.dirstate'),
2513 (self.vfs, b'journal.dirstate'),
2513 (self.vfs, b'journal.dirstate'),
2514 (self.vfs, b'journal.branch'),
2514 (self.vfs, b'journal.branch'),
2515 (self.vfs, b'journal.desc'),
2515 (self.vfs, b'journal.desc'),
2516 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2516 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2517 (self.svfs, b'journal.phaseroots'),
2517 (self.svfs, b'journal.phaseroots'),
2518 )
2518 )
2519
2519
2520 def undofiles(self):
2520 def undofiles(self):
2521 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2521 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2522
2522
2523 @unfilteredmethod
2523 @unfilteredmethod
2524 def _writejournal(self, desc):
2524 def _writejournal(self, desc):
2525 self.dirstate.savebackup(None, b'journal.dirstate')
2525 self.dirstate.savebackup(None, b'journal.dirstate')
2526 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2526 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2527 narrowspec.savebackup(self, b'journal.narrowspec')
2527 narrowspec.savebackup(self, b'journal.narrowspec')
2528 self.vfs.write(
2528 self.vfs.write(
2529 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2529 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2530 )
2530 )
2531 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2531 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2532 bookmarksvfs = bookmarks.bookmarksvfs(self)
2532 bookmarksvfs = bookmarks.bookmarksvfs(self)
2533 bookmarksvfs.write(
2533 bookmarksvfs.write(
2534 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2534 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2535 )
2535 )
2536 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2536 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2537
2537
2538 def recover(self):
2538 def recover(self):
2539 with self.lock():
2539 with self.lock():
2540 if self.svfs.exists(b"journal"):
2540 if self.svfs.exists(b"journal"):
2541 self.ui.status(_(b"rolling back interrupted transaction\n"))
2541 self.ui.status(_(b"rolling back interrupted transaction\n"))
2542 vfsmap = {
2542 vfsmap = {
2543 b'': self.svfs,
2543 b'': self.svfs,
2544 b'plain': self.vfs,
2544 b'plain': self.vfs,
2545 }
2545 }
2546 transaction.rollback(
2546 transaction.rollback(
2547 self.svfs,
2547 self.svfs,
2548 vfsmap,
2548 vfsmap,
2549 b"journal",
2549 b"journal",
2550 self.ui.warn,
2550 self.ui.warn,
2551 checkambigfiles=_cachedfiles,
2551 checkambigfiles=_cachedfiles,
2552 )
2552 )
2553 self.invalidate()
2553 self.invalidate()
2554 return True
2554 return True
2555 else:
2555 else:
2556 self.ui.warn(_(b"no interrupted transaction available\n"))
2556 self.ui.warn(_(b"no interrupted transaction available\n"))
2557 return False
2557 return False
2558
2558
2559 def rollback(self, dryrun=False, force=False):
2559 def rollback(self, dryrun=False, force=False):
2560 wlock = lock = dsguard = None
2560 wlock = lock = dsguard = None
2561 try:
2561 try:
2562 wlock = self.wlock()
2562 wlock = self.wlock()
2563 lock = self.lock()
2563 lock = self.lock()
2564 if self.svfs.exists(b"undo"):
2564 if self.svfs.exists(b"undo"):
2565 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2565 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2566
2566
2567 return self._rollback(dryrun, force, dsguard)
2567 return self._rollback(dryrun, force, dsguard)
2568 else:
2568 else:
2569 self.ui.warn(_(b"no rollback information available\n"))
2569 self.ui.warn(_(b"no rollback information available\n"))
2570 return 1
2570 return 1
2571 finally:
2571 finally:
2572 release(dsguard, lock, wlock)
2572 release(dsguard, lock, wlock)
2573
2573
2574 @unfilteredmethod # Until we get smarter cache management
2574 @unfilteredmethod # Until we get smarter cache management
2575 def _rollback(self, dryrun, force, dsguard):
2575 def _rollback(self, dryrun, force, dsguard):
2576 ui = self.ui
2576 ui = self.ui
2577 try:
2577 try:
2578 args = self.vfs.read(b'undo.desc').splitlines()
2578 args = self.vfs.read(b'undo.desc').splitlines()
2579 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2579 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2580 if len(args) >= 3:
2580 if len(args) >= 3:
2581 detail = args[2]
2581 detail = args[2]
2582 oldtip = oldlen - 1
2582 oldtip = oldlen - 1
2583
2583
2584 if detail and ui.verbose:
2584 if detail and ui.verbose:
2585 msg = _(
2585 msg = _(
2586 b'repository tip rolled back to revision %d'
2586 b'repository tip rolled back to revision %d'
2587 b' (undo %s: %s)\n'
2587 b' (undo %s: %s)\n'
2588 ) % (oldtip, desc, detail)
2588 ) % (oldtip, desc, detail)
2589 else:
2589 else:
2590 msg = _(
2590 msg = _(
2591 b'repository tip rolled back to revision %d (undo %s)\n'
2591 b'repository tip rolled back to revision %d (undo %s)\n'
2592 ) % (oldtip, desc)
2592 ) % (oldtip, desc)
2593 except IOError:
2593 except IOError:
2594 msg = _(b'rolling back unknown transaction\n')
2594 msg = _(b'rolling back unknown transaction\n')
2595 desc = None
2595 desc = None
2596
2596
2597 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2597 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2598 raise error.Abort(
2598 raise error.Abort(
2599 _(
2599 _(
2600 b'rollback of last commit while not checked out '
2600 b'rollback of last commit while not checked out '
2601 b'may lose data'
2601 b'may lose data'
2602 ),
2602 ),
2603 hint=_(b'use -f to force'),
2603 hint=_(b'use -f to force'),
2604 )
2604 )
2605
2605
2606 ui.status(msg)
2606 ui.status(msg)
2607 if dryrun:
2607 if dryrun:
2608 return 0
2608 return 0
2609
2609
2610 parents = self.dirstate.parents()
2610 parents = self.dirstate.parents()
2611 self.destroying()
2611 self.destroying()
2612 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2612 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2613 transaction.rollback(
2613 transaction.rollback(
2614 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2614 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2615 )
2615 )
2616 bookmarksvfs = bookmarks.bookmarksvfs(self)
2616 bookmarksvfs = bookmarks.bookmarksvfs(self)
2617 if bookmarksvfs.exists(b'undo.bookmarks'):
2617 if bookmarksvfs.exists(b'undo.bookmarks'):
2618 bookmarksvfs.rename(
2618 bookmarksvfs.rename(
2619 b'undo.bookmarks', b'bookmarks', checkambig=True
2619 b'undo.bookmarks', b'bookmarks', checkambig=True
2620 )
2620 )
2621 if self.svfs.exists(b'undo.phaseroots'):
2621 if self.svfs.exists(b'undo.phaseroots'):
2622 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2622 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2623 self.invalidate()
2623 self.invalidate()
2624
2624
2625 has_node = self.changelog.index.has_node
2625 has_node = self.changelog.index.has_node
2626 parentgone = any(not has_node(p) for p in parents)
2626 parentgone = any(not has_node(p) for p in parents)
2627 if parentgone:
2627 if parentgone:
2628 # prevent dirstateguard from overwriting already restored one
2628 # prevent dirstateguard from overwriting already restored one
2629 dsguard.close()
2629 dsguard.close()
2630
2630
2631 narrowspec.restorebackup(self, b'undo.narrowspec')
2631 narrowspec.restorebackup(self, b'undo.narrowspec')
2632 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2632 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2633 self.dirstate.restorebackup(None, b'undo.dirstate')
2633 self.dirstate.restorebackup(None, b'undo.dirstate')
2634 try:
2634 try:
2635 branch = self.vfs.read(b'undo.branch')
2635 branch = self.vfs.read(b'undo.branch')
2636 self.dirstate.setbranch(encoding.tolocal(branch))
2636 self.dirstate.setbranch(encoding.tolocal(branch))
2637 except IOError:
2637 except IOError:
2638 ui.warn(
2638 ui.warn(
2639 _(
2639 _(
2640 b'named branch could not be reset: '
2640 b'named branch could not be reset: '
2641 b'current branch is still \'%s\'\n'
2641 b'current branch is still \'%s\'\n'
2642 )
2642 )
2643 % self.dirstate.branch()
2643 % self.dirstate.branch()
2644 )
2644 )
2645
2645
2646 parents = tuple([p.rev() for p in self[None].parents()])
2646 parents = tuple([p.rev() for p in self[None].parents()])
2647 if len(parents) > 1:
2647 if len(parents) > 1:
2648 ui.status(
2648 ui.status(
2649 _(
2649 _(
2650 b'working directory now based on '
2650 b'working directory now based on '
2651 b'revisions %d and %d\n'
2651 b'revisions %d and %d\n'
2652 )
2652 )
2653 % parents
2653 % parents
2654 )
2654 )
2655 else:
2655 else:
2656 ui.status(
2656 ui.status(
2657 _(b'working directory now based on revision %d\n') % parents
2657 _(b'working directory now based on revision %d\n') % parents
2658 )
2658 )
2659 mergestatemod.mergestate.clean(self)
2659 mergestatemod.mergestate.clean(self)
2660
2660
2661 # TODO: if we know which new heads may result from this rollback, pass
2661 # TODO: if we know which new heads may result from this rollback, pass
2662 # them to destroy(), which will prevent the branchhead cache from being
2662 # them to destroy(), which will prevent the branchhead cache from being
2663 # invalidated.
2663 # invalidated.
2664 self.destroyed()
2664 self.destroyed()
2665 return 0
2665 return 0
2666
2666
2667 def _buildcacheupdater(self, newtransaction):
2667 def _buildcacheupdater(self, newtransaction):
2668 """called during transaction to build the callback updating cache
2668 """called during transaction to build the callback updating cache
2669
2669
2670 Lives on the repository to help extension who might want to augment
2670 Lives on the repository to help extension who might want to augment
2671 this logic. For this purpose, the created transaction is passed to the
2671 this logic. For this purpose, the created transaction is passed to the
2672 method.
2672 method.
2673 """
2673 """
2674 # we must avoid cyclic reference between repo and transaction.
2674 # we must avoid cyclic reference between repo and transaction.
2675 reporef = weakref.ref(self)
2675 reporef = weakref.ref(self)
2676
2676
2677 def updater(tr):
2677 def updater(tr):
2678 repo = reporef()
2678 repo = reporef()
2679 repo.updatecaches(tr)
2679 repo.updatecaches(tr)
2680
2680
2681 return updater
2681 return updater
2682
2682
2683 @unfilteredmethod
2683 @unfilteredmethod
2684 def updatecaches(self, tr=None, full=False):
2684 def updatecaches(self, tr=None, full=False):
2685 """warm appropriate caches
2685 """warm appropriate caches
2686
2686
2687 If this function is called after a transaction closed. The transaction
2687 If this function is called after a transaction closed. The transaction
2688 will be available in the 'tr' argument. This can be used to selectively
2688 will be available in the 'tr' argument. This can be used to selectively
2689 update caches relevant to the changes in that transaction.
2689 update caches relevant to the changes in that transaction.
2690
2690
2691 If 'full' is set, make sure all caches the function knows about have
2691 If 'full' is set, make sure all caches the function knows about have
2692 up-to-date data. Even the ones usually loaded more lazily.
2692 up-to-date data. Even the ones usually loaded more lazily.
2693 """
2693 """
2694 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2694 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2695 # During strip, many caches are invalid but
2695 # During strip, many caches are invalid but
2696 # later call to `destroyed` will refresh them.
2696 # later call to `destroyed` will refresh them.
2697 return
2697 return
2698
2698
2699 if tr is None or tr.changes[b'origrepolen'] < len(self):
2699 if tr is None or tr.changes[b'origrepolen'] < len(self):
2700 # accessing the 'served' branchmap should refresh all the others,
2700 # accessing the 'served' branchmap should refresh all the others,
2701 self.ui.debug(b'updating the branch cache\n')
2701 self.ui.debug(b'updating the branch cache\n')
2702 self.filtered(b'served').branchmap()
2702 self.filtered(b'served').branchmap()
2703 self.filtered(b'served.hidden').branchmap()
2703 self.filtered(b'served.hidden').branchmap()
2704
2704
2705 if full:
2705 if full:
2706 unfi = self.unfiltered()
2706 unfi = self.unfiltered()
2707
2707
2708 self.changelog.update_caches(transaction=tr)
2708 self.changelog.update_caches(transaction=tr)
2709 self.manifestlog.update_caches(transaction=tr)
2709 self.manifestlog.update_caches(transaction=tr)
2710
2710
2711 rbc = unfi.revbranchcache()
2711 rbc = unfi.revbranchcache()
2712 for r in unfi.changelog:
2712 for r in unfi.changelog:
2713 rbc.branchinfo(r)
2713 rbc.branchinfo(r)
2714 rbc.write()
2714 rbc.write()
2715
2715
2716 # ensure the working copy parents are in the manifestfulltextcache
2716 # ensure the working copy parents are in the manifestfulltextcache
2717 for ctx in self[b'.'].parents():
2717 for ctx in self[b'.'].parents():
2718 ctx.manifest() # accessing the manifest is enough
2718 ctx.manifest() # accessing the manifest is enough
2719
2719
2720 # accessing fnode cache warms the cache
2720 # accessing fnode cache warms the cache
2721 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2721 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2722 # accessing tags warm the cache
2722 # accessing tags warm the cache
2723 self.tags()
2723 self.tags()
2724 self.filtered(b'served').tags()
2724 self.filtered(b'served').tags()
2725
2725
2726 # The `full` arg is documented as updating even the lazily-loaded
2726 # The `full` arg is documented as updating even the lazily-loaded
2727 # caches immediately, so we're forcing a write to cause these caches
2727 # caches immediately, so we're forcing a write to cause these caches
2728 # to be warmed up even if they haven't explicitly been requested
2728 # to be warmed up even if they haven't explicitly been requested
2729 # yet (if they've never been used by hg, they won't ever have been
2729 # yet (if they've never been used by hg, they won't ever have been
2730 # written, even if they're a subset of another kind of cache that
2730 # written, even if they're a subset of another kind of cache that
2731 # *has* been used).
2731 # *has* been used).
2732 for filt in repoview.filtertable.keys():
2732 for filt in repoview.filtertable.keys():
2733 filtered = self.filtered(filt)
2733 filtered = self.filtered(filt)
2734 filtered.branchmap().write(filtered)
2734 filtered.branchmap().write(filtered)
2735
2735
2736 def invalidatecaches(self):
2736 def invalidatecaches(self):
2737
2737
2738 if '_tagscache' in vars(self):
2738 if '_tagscache' in vars(self):
2739 # can't use delattr on proxy
2739 # can't use delattr on proxy
2740 del self.__dict__['_tagscache']
2740 del self.__dict__['_tagscache']
2741
2741
2742 self._branchcaches.clear()
2742 self._branchcaches.clear()
2743 self.invalidatevolatilesets()
2743 self.invalidatevolatilesets()
2744 self._sparsesignaturecache.clear()
2744 self._sparsesignaturecache.clear()
2745
2745
2746 def invalidatevolatilesets(self):
2746 def invalidatevolatilesets(self):
2747 self.filteredrevcache.clear()
2747 self.filteredrevcache.clear()
2748 obsolete.clearobscaches(self)
2748 obsolete.clearobscaches(self)
2749 self._quick_access_changeid_invalidate()
2749 self._quick_access_changeid_invalidate()
2750
2750
2751 def invalidatedirstate(self):
2751 def invalidatedirstate(self):
2752 """Invalidates the dirstate, causing the next call to dirstate
2752 """Invalidates the dirstate, causing the next call to dirstate
2753 to check if it was modified since the last time it was read,
2753 to check if it was modified since the last time it was read,
2754 rereading it if it has.
2754 rereading it if it has.
2755
2755
2756 This is different to dirstate.invalidate() that it doesn't always
2756 This is different to dirstate.invalidate() that it doesn't always
2757 rereads the dirstate. Use dirstate.invalidate() if you want to
2757 rereads the dirstate. Use dirstate.invalidate() if you want to
2758 explicitly read the dirstate again (i.e. restoring it to a previous
2758 explicitly read the dirstate again (i.e. restoring it to a previous
2759 known good state)."""
2759 known good state)."""
2760 if hasunfilteredcache(self, 'dirstate'):
2760 if hasunfilteredcache(self, 'dirstate'):
2761 for k in self.dirstate._filecache:
2761 for k in self.dirstate._filecache:
2762 try:
2762 try:
2763 delattr(self.dirstate, k)
2763 delattr(self.dirstate, k)
2764 except AttributeError:
2764 except AttributeError:
2765 pass
2765 pass
2766 delattr(self.unfiltered(), 'dirstate')
2766 delattr(self.unfiltered(), 'dirstate')
2767
2767
2768 def invalidate(self, clearfilecache=False):
2768 def invalidate(self, clearfilecache=False):
2769 """Invalidates both store and non-store parts other than dirstate
2769 """Invalidates both store and non-store parts other than dirstate
2770
2770
2771 If a transaction is running, invalidation of store is omitted,
2771 If a transaction is running, invalidation of store is omitted,
2772 because discarding in-memory changes might cause inconsistency
2772 because discarding in-memory changes might cause inconsistency
2773 (e.g. incomplete fncache causes unintentional failure, but
2773 (e.g. incomplete fncache causes unintentional failure, but
2774 redundant one doesn't).
2774 redundant one doesn't).
2775 """
2775 """
2776 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2776 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2777 for k in list(self._filecache.keys()):
2777 for k in list(self._filecache.keys()):
2778 # dirstate is invalidated separately in invalidatedirstate()
2778 # dirstate is invalidated separately in invalidatedirstate()
2779 if k == b'dirstate':
2779 if k == b'dirstate':
2780 continue
2780 continue
2781 if (
2781 if (
2782 k == b'changelog'
2782 k == b'changelog'
2783 and self.currenttransaction()
2783 and self.currenttransaction()
2784 and self.changelog._delayed
2784 and self.changelog._delayed
2785 ):
2785 ):
2786 # The changelog object may store unwritten revisions. We don't
2786 # The changelog object may store unwritten revisions. We don't
2787 # want to lose them.
2787 # want to lose them.
2788 # TODO: Solve the problem instead of working around it.
2788 # TODO: Solve the problem instead of working around it.
2789 continue
2789 continue
2790
2790
2791 if clearfilecache:
2791 if clearfilecache:
2792 del self._filecache[k]
2792 del self._filecache[k]
2793 try:
2793 try:
2794 delattr(unfiltered, k)
2794 delattr(unfiltered, k)
2795 except AttributeError:
2795 except AttributeError:
2796 pass
2796 pass
2797 self.invalidatecaches()
2797 self.invalidatecaches()
2798 if not self.currenttransaction():
2798 if not self.currenttransaction():
2799 # TODO: Changing contents of store outside transaction
2799 # TODO: Changing contents of store outside transaction
2800 # causes inconsistency. We should make in-memory store
2800 # causes inconsistency. We should make in-memory store
2801 # changes detectable, and abort if changed.
2801 # changes detectable, and abort if changed.
2802 self.store.invalidatecaches()
2802 self.store.invalidatecaches()
2803
2803
2804 def invalidateall(self):
2804 def invalidateall(self):
2805 """Fully invalidates both store and non-store parts, causing the
2805 """Fully invalidates both store and non-store parts, causing the
2806 subsequent operation to reread any outside changes."""
2806 subsequent operation to reread any outside changes."""
2807 # extension should hook this to invalidate its caches
2807 # extension should hook this to invalidate its caches
2808 self.invalidate()
2808 self.invalidate()
2809 self.invalidatedirstate()
2809 self.invalidatedirstate()
2810
2810
2811 @unfilteredmethod
2811 @unfilteredmethod
2812 def _refreshfilecachestats(self, tr):
2812 def _refreshfilecachestats(self, tr):
2813 """Reload stats of cached files so that they are flagged as valid"""
2813 """Reload stats of cached files so that they are flagged as valid"""
2814 for k, ce in self._filecache.items():
2814 for k, ce in self._filecache.items():
2815 k = pycompat.sysstr(k)
2815 k = pycompat.sysstr(k)
2816 if k == 'dirstate' or k not in self.__dict__:
2816 if k == 'dirstate' or k not in self.__dict__:
2817 continue
2817 continue
2818 ce.refresh()
2818 ce.refresh()
2819
2819
2820 def _lock(
2820 def _lock(
2821 self,
2821 self,
2822 vfs,
2822 vfs,
2823 lockname,
2823 lockname,
2824 wait,
2824 wait,
2825 releasefn,
2825 releasefn,
2826 acquirefn,
2826 acquirefn,
2827 desc,
2827 desc,
2828 ):
2828 ):
2829 timeout = 0
2829 timeout = 0
2830 warntimeout = 0
2830 warntimeout = 0
2831 if wait:
2831 if wait:
2832 timeout = self.ui.configint(b"ui", b"timeout")
2832 timeout = self.ui.configint(b"ui", b"timeout")
2833 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2833 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2834 # internal config: ui.signal-safe-lock
2834 # internal config: ui.signal-safe-lock
2835 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2835 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2836
2836
2837 l = lockmod.trylock(
2837 l = lockmod.trylock(
2838 self.ui,
2838 self.ui,
2839 vfs,
2839 vfs,
2840 lockname,
2840 lockname,
2841 timeout,
2841 timeout,
2842 warntimeout,
2842 warntimeout,
2843 releasefn=releasefn,
2843 releasefn=releasefn,
2844 acquirefn=acquirefn,
2844 acquirefn=acquirefn,
2845 desc=desc,
2845 desc=desc,
2846 signalsafe=signalsafe,
2846 signalsafe=signalsafe,
2847 )
2847 )
2848 return l
2848 return l
2849
2849
2850 def _afterlock(self, callback):
2850 def _afterlock(self, callback):
2851 """add a callback to be run when the repository is fully unlocked
2851 """add a callback to be run when the repository is fully unlocked
2852
2852
2853 The callback will be executed when the outermost lock is released
2853 The callback will be executed when the outermost lock is released
2854 (with wlock being higher level than 'lock')."""
2854 (with wlock being higher level than 'lock')."""
2855 for ref in (self._wlockref, self._lockref):
2855 for ref in (self._wlockref, self._lockref):
2856 l = ref and ref()
2856 l = ref and ref()
2857 if l and l.held:
2857 if l and l.held:
2858 l.postrelease.append(callback)
2858 l.postrelease.append(callback)
2859 break
2859 break
2860 else: # no lock have been found.
2860 else: # no lock have been found.
2861 callback(True)
2861 callback(True)
2862
2862
2863 def lock(self, wait=True):
2863 def lock(self, wait=True):
2864 """Lock the repository store (.hg/store) and return a weak reference
2864 """Lock the repository store (.hg/store) and return a weak reference
2865 to the lock. Use this before modifying the store (e.g. committing or
2865 to the lock. Use this before modifying the store (e.g. committing or
2866 stripping). If you are opening a transaction, get a lock as well.)
2866 stripping). If you are opening a transaction, get a lock as well.)
2867
2867
2868 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2868 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2869 'wlock' first to avoid a dead-lock hazard."""
2869 'wlock' first to avoid a dead-lock hazard."""
2870 l = self._currentlock(self._lockref)
2870 l = self._currentlock(self._lockref)
2871 if l is not None:
2871 if l is not None:
2872 l.lock()
2872 l.lock()
2873 return l
2873 return l
2874
2874
2875 l = self._lock(
2875 l = self._lock(
2876 vfs=self.svfs,
2876 vfs=self.svfs,
2877 lockname=b"lock",
2877 lockname=b"lock",
2878 wait=wait,
2878 wait=wait,
2879 releasefn=None,
2879 releasefn=None,
2880 acquirefn=self.invalidate,
2880 acquirefn=self.invalidate,
2881 desc=_(b'repository %s') % self.origroot,
2881 desc=_(b'repository %s') % self.origroot,
2882 )
2882 )
2883 self._lockref = weakref.ref(l)
2883 self._lockref = weakref.ref(l)
2884 return l
2884 return l
2885
2885
2886 def wlock(self, wait=True):
2886 def wlock(self, wait=True):
2887 """Lock the non-store parts of the repository (everything under
2887 """Lock the non-store parts of the repository (everything under
2888 .hg except .hg/store) and return a weak reference to the lock.
2888 .hg except .hg/store) and return a weak reference to the lock.
2889
2889
2890 Use this before modifying files in .hg.
2890 Use this before modifying files in .hg.
2891
2891
2892 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2892 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2893 'wlock' first to avoid a dead-lock hazard."""
2893 'wlock' first to avoid a dead-lock hazard."""
2894 l = self._wlockref and self._wlockref()
2894 l = self._wlockref and self._wlockref()
2895 if l is not None and l.held:
2895 if l is not None and l.held:
2896 l.lock()
2896 l.lock()
2897 return l
2897 return l
2898
2898
2899 # We do not need to check for non-waiting lock acquisition. Such
2899 # We do not need to check for non-waiting lock acquisition. Such
2900 # acquisition would not cause dead-lock as they would just fail.
2900 # acquisition would not cause dead-lock as they would just fail.
2901 if wait and (
2901 if wait and (
2902 self.ui.configbool(b'devel', b'all-warnings')
2902 self.ui.configbool(b'devel', b'all-warnings')
2903 or self.ui.configbool(b'devel', b'check-locks')
2903 or self.ui.configbool(b'devel', b'check-locks')
2904 ):
2904 ):
2905 if self._currentlock(self._lockref) is not None:
2905 if self._currentlock(self._lockref) is not None:
2906 self.ui.develwarn(b'"wlock" acquired after "lock"')
2906 self.ui.develwarn(b'"wlock" acquired after "lock"')
2907
2907
2908 def unlock():
2908 def unlock():
2909 if self.dirstate.pendingparentchange():
2909 if self.dirstate.pendingparentchange():
2910 self.dirstate.invalidate()
2910 self.dirstate.invalidate()
2911 else:
2911 else:
2912 self.dirstate.write(None)
2912 self.dirstate.write(None)
2913
2913
2914 self._filecache[b'dirstate'].refresh()
2914 self._filecache[b'dirstate'].refresh()
2915
2915
2916 l = self._lock(
2916 l = self._lock(
2917 self.vfs,
2917 self.vfs,
2918 b"wlock",
2918 b"wlock",
2919 wait,
2919 wait,
2920 unlock,
2920 unlock,
2921 self.invalidatedirstate,
2921 self.invalidatedirstate,
2922 _(b'working directory of %s') % self.origroot,
2922 _(b'working directory of %s') % self.origroot,
2923 )
2923 )
2924 self._wlockref = weakref.ref(l)
2924 self._wlockref = weakref.ref(l)
2925 return l
2925 return l
2926
2926
2927 def _currentlock(self, lockref):
2927 def _currentlock(self, lockref):
2928 """Returns the lock if it's held, or None if it's not."""
2928 """Returns the lock if it's held, or None if it's not."""
2929 if lockref is None:
2929 if lockref is None:
2930 return None
2930 return None
2931 l = lockref()
2931 l = lockref()
2932 if l is None or not l.held:
2932 if l is None or not l.held:
2933 return None
2933 return None
2934 return l
2934 return l
2935
2935
2936 def currentwlock(self):
2936 def currentwlock(self):
2937 """Returns the wlock if it's held, or None if it's not."""
2937 """Returns the wlock if it's held, or None if it's not."""
2938 return self._currentlock(self._wlockref)
2938 return self._currentlock(self._wlockref)
2939
2939
2940 def checkcommitpatterns(self, wctx, match, status, fail):
2940 def checkcommitpatterns(self, wctx, match, status, fail):
2941 """check for commit arguments that aren't committable"""
2941 """check for commit arguments that aren't committable"""
2942 if match.isexact() or match.prefix():
2942 if match.isexact() or match.prefix():
2943 matched = set(status.modified + status.added + status.removed)
2943 matched = set(status.modified + status.added + status.removed)
2944
2944
2945 for f in match.files():
2945 for f in match.files():
2946 f = self.dirstate.normalize(f)
2946 f = self.dirstate.normalize(f)
2947 if f == b'.' or f in matched or f in wctx.substate:
2947 if f == b'.' or f in matched or f in wctx.substate:
2948 continue
2948 continue
2949 if f in status.deleted:
2949 if f in status.deleted:
2950 fail(f, _(b'file not found!'))
2950 fail(f, _(b'file not found!'))
2951 # Is it a directory that exists or used to exist?
2951 # Is it a directory that exists or used to exist?
2952 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2952 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2953 d = f + b'/'
2953 d = f + b'/'
2954 for mf in matched:
2954 for mf in matched:
2955 if mf.startswith(d):
2955 if mf.startswith(d):
2956 break
2956 break
2957 else:
2957 else:
2958 fail(f, _(b"no match under directory!"))
2958 fail(f, _(b"no match under directory!"))
2959 elif f not in self.dirstate:
2959 elif f not in self.dirstate:
2960 fail(f, _(b"file not tracked!"))
2960 fail(f, _(b"file not tracked!"))
2961
2961
2962 @unfilteredmethod
2962 @unfilteredmethod
2963 def commit(
2963 def commit(
2964 self,
2964 self,
2965 text=b"",
2965 text=b"",
2966 user=None,
2966 user=None,
2967 date=None,
2967 date=None,
2968 match=None,
2968 match=None,
2969 force=False,
2969 force=False,
2970 editor=None,
2970 editor=None,
2971 extra=None,
2971 extra=None,
2972 ):
2972 ):
2973 """Add a new revision to current repository.
2973 """Add a new revision to current repository.
2974
2974
2975 Revision information is gathered from the working directory,
2975 Revision information is gathered from the working directory,
2976 match can be used to filter the committed files. If editor is
2976 match can be used to filter the committed files. If editor is
2977 supplied, it is called to get a commit message.
2977 supplied, it is called to get a commit message.
2978 """
2978 """
2979 if extra is None:
2979 if extra is None:
2980 extra = {}
2980 extra = {}
2981
2981
2982 def fail(f, msg):
2982 def fail(f, msg):
2983 raise error.InputError(b'%s: %s' % (f, msg))
2983 raise error.InputError(b'%s: %s' % (f, msg))
2984
2984
2985 if not match:
2985 if not match:
2986 match = matchmod.always()
2986 match = matchmod.always()
2987
2987
2988 if not force:
2988 if not force:
2989 match.bad = fail
2989 match.bad = fail
2990
2990
2991 # lock() for recent changelog (see issue4368)
2991 # lock() for recent changelog (see issue4368)
2992 with self.wlock(), self.lock():
2992 with self.wlock(), self.lock():
2993 wctx = self[None]
2993 wctx = self[None]
2994 merge = len(wctx.parents()) > 1
2994 merge = len(wctx.parents()) > 1
2995
2995
2996 if not force and merge and not match.always():
2996 if not force and merge and not match.always():
2997 raise error.Abort(
2997 raise error.Abort(
2998 _(
2998 _(
2999 b'cannot partially commit a merge '
2999 b'cannot partially commit a merge '
3000 b'(do not specify files or patterns)'
3000 b'(do not specify files or patterns)'
3001 )
3001 )
3002 )
3002 )
3003
3003
3004 status = self.status(match=match, clean=force)
3004 status = self.status(match=match, clean=force)
3005 if force:
3005 if force:
3006 status.modified.extend(
3006 status.modified.extend(
3007 status.clean
3007 status.clean
3008 ) # mq may commit clean files
3008 ) # mq may commit clean files
3009
3009
3010 # check subrepos
3010 # check subrepos
3011 subs, commitsubs, newstate = subrepoutil.precommit(
3011 subs, commitsubs, newstate = subrepoutil.precommit(
3012 self.ui, wctx, status, match, force=force
3012 self.ui, wctx, status, match, force=force
3013 )
3013 )
3014
3014
3015 # make sure all explicit patterns are matched
3015 # make sure all explicit patterns are matched
3016 if not force:
3016 if not force:
3017 self.checkcommitpatterns(wctx, match, status, fail)
3017 self.checkcommitpatterns(wctx, match, status, fail)
3018
3018
3019 cctx = context.workingcommitctx(
3019 cctx = context.workingcommitctx(
3020 self, status, text, user, date, extra
3020 self, status, text, user, date, extra
3021 )
3021 )
3022
3022
3023 ms = mergestatemod.mergestate.read(self)
3023 ms = mergestatemod.mergestate.read(self)
3024 mergeutil.checkunresolved(ms)
3024 mergeutil.checkunresolved(ms)
3025
3025
3026 # internal config: ui.allowemptycommit
3026 # internal config: ui.allowemptycommit
3027 if cctx.isempty() and not self.ui.configbool(
3027 if cctx.isempty() and not self.ui.configbool(
3028 b'ui', b'allowemptycommit'
3028 b'ui', b'allowemptycommit'
3029 ):
3029 ):
3030 self.ui.debug(b'nothing to commit, clearing merge state\n')
3030 self.ui.debug(b'nothing to commit, clearing merge state\n')
3031 ms.reset()
3031 ms.reset()
3032 return None
3032 return None
3033
3033
3034 if merge and cctx.deleted():
3034 if merge and cctx.deleted():
3035 raise error.Abort(_(b"cannot commit merge with missing files"))
3035 raise error.Abort(_(b"cannot commit merge with missing files"))
3036
3036
3037 if editor:
3037 if editor:
3038 cctx._text = editor(self, cctx, subs)
3038 cctx._text = editor(self, cctx, subs)
3039 edited = text != cctx._text
3039 edited = text != cctx._text
3040
3040
3041 # Save commit message in case this transaction gets rolled back
3041 # Save commit message in case this transaction gets rolled back
3042 # (e.g. by a pretxncommit hook). Leave the content alone on
3042 # (e.g. by a pretxncommit hook). Leave the content alone on
3043 # the assumption that the user will use the same editor again.
3043 # the assumption that the user will use the same editor again.
3044 msgfn = self.savecommitmessage(cctx._text)
3044 msgfn = self.savecommitmessage(cctx._text)
3045
3045
3046 # commit subs and write new state
3046 # commit subs and write new state
3047 if subs:
3047 if subs:
3048 uipathfn = scmutil.getuipathfn(self)
3048 uipathfn = scmutil.getuipathfn(self)
3049 for s in sorted(commitsubs):
3049 for s in sorted(commitsubs):
3050 sub = wctx.sub(s)
3050 sub = wctx.sub(s)
3051 self.ui.status(
3051 self.ui.status(
3052 _(b'committing subrepository %s\n')
3052 _(b'committing subrepository %s\n')
3053 % uipathfn(subrepoutil.subrelpath(sub))
3053 % uipathfn(subrepoutil.subrelpath(sub))
3054 )
3054 )
3055 sr = sub.commit(cctx._text, user, date)
3055 sr = sub.commit(cctx._text, user, date)
3056 newstate[s] = (newstate[s][0], sr)
3056 newstate[s] = (newstate[s][0], sr)
3057 subrepoutil.writestate(self, newstate)
3057 subrepoutil.writestate(self, newstate)
3058
3058
3059 p1, p2 = self.dirstate.parents()
3059 p1, p2 = self.dirstate.parents()
3060 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3060 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3061 try:
3061 try:
3062 self.hook(
3062 self.hook(
3063 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3063 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3064 )
3064 )
3065 with self.transaction(b'commit'):
3065 with self.transaction(b'commit'):
3066 ret = self.commitctx(cctx, True)
3066 ret = self.commitctx(cctx, True)
3067 # update bookmarks, dirstate and mergestate
3067 # update bookmarks, dirstate and mergestate
3068 bookmarks.update(self, [p1, p2], ret)
3068 bookmarks.update(self, [p1, p2], ret)
3069 cctx.markcommitted(ret)
3069 cctx.markcommitted(ret)
3070 ms.reset()
3070 ms.reset()
3071 except: # re-raises
3071 except: # re-raises
3072 if edited:
3072 if edited:
3073 self.ui.write(
3073 self.ui.write(
3074 _(b'note: commit message saved in %s\n') % msgfn
3074 _(b'note: commit message saved in %s\n') % msgfn
3075 )
3075 )
3076 self.ui.write(
3076 self.ui.write(
3077 _(
3077 _(
3078 b"note: use 'hg commit --logfile "
3078 b"note: use 'hg commit --logfile "
3079 b".hg/last-message.txt --edit' to reuse it\n"
3079 b".hg/last-message.txt --edit' to reuse it\n"
3080 )
3080 )
3081 )
3081 )
3082 raise
3082 raise
3083
3083
3084 def commithook(unused_success):
3084 def commithook(unused_success):
3085 # hack for command that use a temporary commit (eg: histedit)
3085 # hack for command that use a temporary commit (eg: histedit)
3086 # temporary commit got stripped before hook release
3086 # temporary commit got stripped before hook release
3087 if self.changelog.hasnode(ret):
3087 if self.changelog.hasnode(ret):
3088 self.hook(
3088 self.hook(
3089 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3089 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3090 )
3090 )
3091
3091
3092 self._afterlock(commithook)
3092 self._afterlock(commithook)
3093 return ret
3093 return ret
3094
3094
3095 @unfilteredmethod
3095 @unfilteredmethod
3096 def commitctx(self, ctx, error=False, origctx=None):
3096 def commitctx(self, ctx, error=False, origctx=None):
3097 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3097 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3098
3098
3099 @unfilteredmethod
3099 @unfilteredmethod
3100 def destroying(self):
3100 def destroying(self):
3101 """Inform the repository that nodes are about to be destroyed.
3101 """Inform the repository that nodes are about to be destroyed.
3102 Intended for use by strip and rollback, so there's a common
3102 Intended for use by strip and rollback, so there's a common
3103 place for anything that has to be done before destroying history.
3103 place for anything that has to be done before destroying history.
3104
3104
3105 This is mostly useful for saving state that is in memory and waiting
3105 This is mostly useful for saving state that is in memory and waiting
3106 to be flushed when the current lock is released. Because a call to
3106 to be flushed when the current lock is released. Because a call to
3107 destroyed is imminent, the repo will be invalidated causing those
3107 destroyed is imminent, the repo will be invalidated causing those
3108 changes to stay in memory (waiting for the next unlock), or vanish
3108 changes to stay in memory (waiting for the next unlock), or vanish
3109 completely.
3109 completely.
3110 """
3110 """
3111 # When using the same lock to commit and strip, the phasecache is left
3111 # When using the same lock to commit and strip, the phasecache is left
3112 # dirty after committing. Then when we strip, the repo is invalidated,
3112 # dirty after committing. Then when we strip, the repo is invalidated,
3113 # causing those changes to disappear.
3113 # causing those changes to disappear.
3114 if '_phasecache' in vars(self):
3114 if '_phasecache' in vars(self):
3115 self._phasecache.write()
3115 self._phasecache.write()
3116
3116
3117 @unfilteredmethod
3117 @unfilteredmethod
3118 def destroyed(self):
3118 def destroyed(self):
3119 """Inform the repository that nodes have been destroyed.
3119 """Inform the repository that nodes have been destroyed.
3120 Intended for use by strip and rollback, so there's a common
3120 Intended for use by strip and rollback, so there's a common
3121 place for anything that has to be done after destroying history.
3121 place for anything that has to be done after destroying history.
3122 """
3122 """
3123 # When one tries to:
3123 # When one tries to:
3124 # 1) destroy nodes thus calling this method (e.g. strip)
3124 # 1) destroy nodes thus calling this method (e.g. strip)
3125 # 2) use phasecache somewhere (e.g. commit)
3125 # 2) use phasecache somewhere (e.g. commit)
3126 #
3126 #
3127 # then 2) will fail because the phasecache contains nodes that were
3127 # then 2) will fail because the phasecache contains nodes that were
3128 # removed. We can either remove phasecache from the filecache,
3128 # removed. We can either remove phasecache from the filecache,
3129 # causing it to reload next time it is accessed, or simply filter
3129 # causing it to reload next time it is accessed, or simply filter
3130 # the removed nodes now and write the updated cache.
3130 # the removed nodes now and write the updated cache.
3131 self._phasecache.filterunknown(self)
3131 self._phasecache.filterunknown(self)
3132 self._phasecache.write()
3132 self._phasecache.write()
3133
3133
3134 # refresh all repository caches
3134 # refresh all repository caches
3135 self.updatecaches()
3135 self.updatecaches()
3136
3136
3137 # Ensure the persistent tag cache is updated. Doing it now
3137 # Ensure the persistent tag cache is updated. Doing it now
3138 # means that the tag cache only has to worry about destroyed
3138 # means that the tag cache only has to worry about destroyed
3139 # heads immediately after a strip/rollback. That in turn
3139 # heads immediately after a strip/rollback. That in turn
3140 # guarantees that "cachetip == currenttip" (comparing both rev
3140 # guarantees that "cachetip == currenttip" (comparing both rev
3141 # and node) always means no nodes have been added or destroyed.
3141 # and node) always means no nodes have been added or destroyed.
3142
3142
3143 # XXX this is suboptimal when qrefresh'ing: we strip the current
3143 # XXX this is suboptimal when qrefresh'ing: we strip the current
3144 # head, refresh the tag cache, then immediately add a new head.
3144 # head, refresh the tag cache, then immediately add a new head.
3145 # But I think doing it this way is necessary for the "instant
3145 # But I think doing it this way is necessary for the "instant
3146 # tag cache retrieval" case to work.
3146 # tag cache retrieval" case to work.
3147 self.invalidate()
3147 self.invalidate()
3148
3148
3149 def status(
3149 def status(
3150 self,
3150 self,
3151 node1=b'.',
3151 node1=b'.',
3152 node2=None,
3152 node2=None,
3153 match=None,
3153 match=None,
3154 ignored=False,
3154 ignored=False,
3155 clean=False,
3155 clean=False,
3156 unknown=False,
3156 unknown=False,
3157 listsubrepos=False,
3157 listsubrepos=False,
3158 ):
3158 ):
3159 '''a convenience method that calls node1.status(node2)'''
3159 '''a convenience method that calls node1.status(node2)'''
3160 return self[node1].status(
3160 return self[node1].status(
3161 node2, match, ignored, clean, unknown, listsubrepos
3161 node2, match, ignored, clean, unknown, listsubrepos
3162 )
3162 )
3163
3163
3164 def addpostdsstatus(self, ps):
3164 def addpostdsstatus(self, ps):
3165 """Add a callback to run within the wlock, at the point at which status
3165 """Add a callback to run within the wlock, at the point at which status
3166 fixups happen.
3166 fixups happen.
3167
3167
3168 On status completion, callback(wctx, status) will be called with the
3168 On status completion, callback(wctx, status) will be called with the
3169 wlock held, unless the dirstate has changed from underneath or the wlock
3169 wlock held, unless the dirstate has changed from underneath or the wlock
3170 couldn't be grabbed.
3170 couldn't be grabbed.
3171
3171
3172 Callbacks should not capture and use a cached copy of the dirstate --
3172 Callbacks should not capture and use a cached copy of the dirstate --
3173 it might change in the meanwhile. Instead, they should access the
3173 it might change in the meanwhile. Instead, they should access the
3174 dirstate via wctx.repo().dirstate.
3174 dirstate via wctx.repo().dirstate.
3175
3175
3176 This list is emptied out after each status run -- extensions should
3176 This list is emptied out after each status run -- extensions should
3177 make sure it adds to this list each time dirstate.status is called.
3177 make sure it adds to this list each time dirstate.status is called.
3178 Extensions should also make sure they don't call this for statuses
3178 Extensions should also make sure they don't call this for statuses
3179 that don't involve the dirstate.
3179 that don't involve the dirstate.
3180 """
3180 """
3181
3181
3182 # The list is located here for uniqueness reasons -- it is actually
3182 # The list is located here for uniqueness reasons -- it is actually
3183 # managed by the workingctx, but that isn't unique per-repo.
3183 # managed by the workingctx, but that isn't unique per-repo.
3184 self._postdsstatus.append(ps)
3184 self._postdsstatus.append(ps)
3185
3185
3186 def postdsstatus(self):
3186 def postdsstatus(self):
3187 """Used by workingctx to get the list of post-dirstate-status hooks."""
3187 """Used by workingctx to get the list of post-dirstate-status hooks."""
3188 return self._postdsstatus
3188 return self._postdsstatus
3189
3189
3190 def clearpostdsstatus(self):
3190 def clearpostdsstatus(self):
3191 """Used by workingctx to clear post-dirstate-status hooks."""
3191 """Used by workingctx to clear post-dirstate-status hooks."""
3192 del self._postdsstatus[:]
3192 del self._postdsstatus[:]
3193
3193
3194 def heads(self, start=None):
3194 def heads(self, start=None):
3195 if start is None:
3195 if start is None:
3196 cl = self.changelog
3196 cl = self.changelog
3197 headrevs = reversed(cl.headrevs())
3197 headrevs = reversed(cl.headrevs())
3198 return [cl.node(rev) for rev in headrevs]
3198 return [cl.node(rev) for rev in headrevs]
3199
3199
3200 heads = self.changelog.heads(start)
3200 heads = self.changelog.heads(start)
3201 # sort the output in rev descending order
3201 # sort the output in rev descending order
3202 return sorted(heads, key=self.changelog.rev, reverse=True)
3202 return sorted(heads, key=self.changelog.rev, reverse=True)
3203
3203
3204 def branchheads(self, branch=None, start=None, closed=False):
3204 def branchheads(self, branch=None, start=None, closed=False):
3205 """return a (possibly filtered) list of heads for the given branch
3205 """return a (possibly filtered) list of heads for the given branch
3206
3206
3207 Heads are returned in topological order, from newest to oldest.
3207 Heads are returned in topological order, from newest to oldest.
3208 If branch is None, use the dirstate branch.
3208 If branch is None, use the dirstate branch.
3209 If start is not None, return only heads reachable from start.
3209 If start is not None, return only heads reachable from start.
3210 If closed is True, return heads that are marked as closed as well.
3210 If closed is True, return heads that are marked as closed as well.
3211 """
3211 """
3212 if branch is None:
3212 if branch is None:
3213 branch = self[None].branch()
3213 branch = self[None].branch()
3214 branches = self.branchmap()
3214 branches = self.branchmap()
3215 if not branches.hasbranch(branch):
3215 if not branches.hasbranch(branch):
3216 return []
3216 return []
3217 # the cache returns heads ordered lowest to highest
3217 # the cache returns heads ordered lowest to highest
3218 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3218 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3219 if start is not None:
3219 if start is not None:
3220 # filter out the heads that cannot be reached from startrev
3220 # filter out the heads that cannot be reached from startrev
3221 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3221 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3222 bheads = [h for h in bheads if h in fbheads]
3222 bheads = [h for h in bheads if h in fbheads]
3223 return bheads
3223 return bheads
3224
3224
3225 def branches(self, nodes):
3225 def branches(self, nodes):
3226 if not nodes:
3226 if not nodes:
3227 nodes = [self.changelog.tip()]
3227 nodes = [self.changelog.tip()]
3228 b = []
3228 b = []
3229 for n in nodes:
3229 for n in nodes:
3230 t = n
3230 t = n
3231 while True:
3231 while True:
3232 p = self.changelog.parents(n)
3232 p = self.changelog.parents(n)
3233 if p[1] != nullid or p[0] == nullid:
3233 if p[1] != nullid or p[0] == nullid:
3234 b.append((t, n, p[0], p[1]))
3234 b.append((t, n, p[0], p[1]))
3235 break
3235 break
3236 n = p[0]
3236 n = p[0]
3237 return b
3237 return b
3238
3238
3239 def between(self, pairs):
3239 def between(self, pairs):
3240 r = []
3240 r = []
3241
3241
3242 for top, bottom in pairs:
3242 for top, bottom in pairs:
3243 n, l, i = top, [], 0
3243 n, l, i = top, [], 0
3244 f = 1
3244 f = 1
3245
3245
3246 while n != bottom and n != nullid:
3246 while n != bottom and n != nullid:
3247 p = self.changelog.parents(n)[0]
3247 p = self.changelog.parents(n)[0]
3248 if i == f:
3248 if i == f:
3249 l.append(n)
3249 l.append(n)
3250 f = f * 2
3250 f = f * 2
3251 n = p
3251 n = p
3252 i += 1
3252 i += 1
3253
3253
3254 r.append(l)
3254 r.append(l)
3255
3255
3256 return r
3256 return r
3257
3257
3258 def checkpush(self, pushop):
3258 def checkpush(self, pushop):
3259 """Extensions can override this function if additional checks have
3259 """Extensions can override this function if additional checks have
3260 to be performed before pushing, or call it if they override push
3260 to be performed before pushing, or call it if they override push
3261 command.
3261 command.
3262 """
3262 """
3263
3263
3264 @unfilteredpropertycache
3264 @unfilteredpropertycache
3265 def prepushoutgoinghooks(self):
3265 def prepushoutgoinghooks(self):
3266 """Return util.hooks consists of a pushop with repo, remote, outgoing
3266 """Return util.hooks consists of a pushop with repo, remote, outgoing
3267 methods, which are called before pushing changesets.
3267 methods, which are called before pushing changesets.
3268 """
3268 """
3269 return util.hooks()
3269 return util.hooks()
3270
3270
3271 def pushkey(self, namespace, key, old, new):
3271 def pushkey(self, namespace, key, old, new):
3272 try:
3272 try:
3273 tr = self.currenttransaction()
3273 tr = self.currenttransaction()
3274 hookargs = {}
3274 hookargs = {}
3275 if tr is not None:
3275 if tr is not None:
3276 hookargs.update(tr.hookargs)
3276 hookargs.update(tr.hookargs)
3277 hookargs = pycompat.strkwargs(hookargs)
3277 hookargs = pycompat.strkwargs(hookargs)
3278 hookargs['namespace'] = namespace
3278 hookargs['namespace'] = namespace
3279 hookargs['key'] = key
3279 hookargs['key'] = key
3280 hookargs['old'] = old
3280 hookargs['old'] = old
3281 hookargs['new'] = new
3281 hookargs['new'] = new
3282 self.hook(b'prepushkey', throw=True, **hookargs)
3282 self.hook(b'prepushkey', throw=True, **hookargs)
3283 except error.HookAbort as exc:
3283 except error.HookAbort as exc:
3284 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3284 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3285 if exc.hint:
3285 if exc.hint:
3286 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3286 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3287 return False
3287 return False
3288 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3288 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3289 ret = pushkey.push(self, namespace, key, old, new)
3289 ret = pushkey.push(self, namespace, key, old, new)
3290
3290
3291 def runhook(unused_success):
3291 def runhook(unused_success):
3292 self.hook(
3292 self.hook(
3293 b'pushkey',
3293 b'pushkey',
3294 namespace=namespace,
3294 namespace=namespace,
3295 key=key,
3295 key=key,
3296 old=old,
3296 old=old,
3297 new=new,
3297 new=new,
3298 ret=ret,
3298 ret=ret,
3299 )
3299 )
3300
3300
3301 self._afterlock(runhook)
3301 self._afterlock(runhook)
3302 return ret
3302 return ret
3303
3303
3304 def listkeys(self, namespace):
3304 def listkeys(self, namespace):
3305 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3305 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3306 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3306 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3307 values = pushkey.list(self, namespace)
3307 values = pushkey.list(self, namespace)
3308 self.hook(b'listkeys', namespace=namespace, values=values)
3308 self.hook(b'listkeys', namespace=namespace, values=values)
3309 return values
3309 return values
3310
3310
3311 def debugwireargs(self, one, two, three=None, four=None, five=None):
3311 def debugwireargs(self, one, two, three=None, four=None, five=None):
3312 '''used to test argument passing over the wire'''
3312 '''used to test argument passing over the wire'''
3313 return b"%s %s %s %s %s" % (
3313 return b"%s %s %s %s %s" % (
3314 one,
3314 one,
3315 two,
3315 two,
3316 pycompat.bytestr(three),
3316 pycompat.bytestr(three),
3317 pycompat.bytestr(four),
3317 pycompat.bytestr(four),
3318 pycompat.bytestr(five),
3318 pycompat.bytestr(five),
3319 )
3319 )
3320
3320
3321 def savecommitmessage(self, text):
3321 def savecommitmessage(self, text):
3322 fp = self.vfs(b'last-message.txt', b'wb')
3322 fp = self.vfs(b'last-message.txt', b'wb')
3323 try:
3323 try:
3324 fp.write(text)
3324 fp.write(text)
3325 finally:
3325 finally:
3326 fp.close()
3326 fp.close()
3327 return self.pathto(fp.name[len(self.root) + 1 :])
3327 return self.pathto(fp.name[len(self.root) + 1 :])
3328
3328
3329
3329
3330 # used to avoid circular references so destructors work
3330 # used to avoid circular references so destructors work
3331 def aftertrans(files):
3331 def aftertrans(files):
3332 renamefiles = [tuple(t) for t in files]
3332 renamefiles = [tuple(t) for t in files]
3333
3333
3334 def a():
3334 def a():
3335 for vfs, src, dest in renamefiles:
3335 for vfs, src, dest in renamefiles:
3336 # if src and dest refer to a same file, vfs.rename is a no-op,
3336 # if src and dest refer to a same file, vfs.rename is a no-op,
3337 # leaving both src and dest on disk. delete dest to make sure
3337 # leaving both src and dest on disk. delete dest to make sure
3338 # the rename couldn't be such a no-op.
3338 # the rename couldn't be such a no-op.
3339 vfs.tryunlink(dest)
3339 vfs.tryunlink(dest)
3340 try:
3340 try:
3341 vfs.rename(src, dest)
3341 vfs.rename(src, dest)
3342 except OSError: # journal file does not yet exist
3342 except OSError: # journal file does not yet exist
3343 pass
3343 pass
3344
3344
3345 return a
3345 return a
3346
3346
3347
3347
3348 def undoname(fn):
3348 def undoname(fn):
3349 base, name = os.path.split(fn)
3349 base, name = os.path.split(fn)
3350 assert name.startswith(b'journal')
3350 assert name.startswith(b'journal')
3351 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3351 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3352
3352
3353
3353
3354 def instance(ui, path, create, intents=None, createopts=None):
3354 def instance(ui, path, create, intents=None, createopts=None):
3355 localpath = util.urllocalpath(path)
3355 localpath = util.urllocalpath(path)
3356 if create:
3356 if create:
3357 createrepository(ui, localpath, createopts=createopts)
3357 createrepository(ui, localpath, createopts=createopts)
3358
3358
3359 return makelocalrepository(ui, localpath, intents=intents)
3359 return makelocalrepository(ui, localpath, intents=intents)
3360
3360
3361
3361
3362 def islocal(path):
3362 def islocal(path):
3363 return True
3363 return True
3364
3364
3365
3365
3366 def defaultcreateopts(ui, createopts=None):
3366 def defaultcreateopts(ui, createopts=None):
3367 """Populate the default creation options for a repository.
3367 """Populate the default creation options for a repository.
3368
3368
3369 A dictionary of explicitly requested creation options can be passed
3369 A dictionary of explicitly requested creation options can be passed
3370 in. Missing keys will be populated.
3370 in. Missing keys will be populated.
3371 """
3371 """
3372 createopts = dict(createopts or {})
3372 createopts = dict(createopts or {})
3373
3373
3374 if b'backend' not in createopts:
3374 if b'backend' not in createopts:
3375 # experimental config: storage.new-repo-backend
3375 # experimental config: storage.new-repo-backend
3376 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3376 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3377
3377
3378 return createopts
3378 return createopts
3379
3379
3380
3380
3381 def newreporequirements(ui, createopts):
3381 def newreporequirements(ui, createopts):
3382 """Determine the set of requirements for a new local repository.
3382 """Determine the set of requirements for a new local repository.
3383
3383
3384 Extensions can wrap this function to specify custom requirements for
3384 Extensions can wrap this function to specify custom requirements for
3385 new repositories.
3385 new repositories.
3386 """
3386 """
3387 # If the repo is being created from a shared repository, we copy
3387 # If the repo is being created from a shared repository, we copy
3388 # its requirements.
3388 # its requirements.
3389 if b'sharedrepo' in createopts:
3389 if b'sharedrepo' in createopts:
3390 requirements = set(createopts[b'sharedrepo'].requirements)
3390 requirements = set(createopts[b'sharedrepo'].requirements)
3391 if createopts.get(b'sharedrelative'):
3391 if createopts.get(b'sharedrelative'):
3392 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3392 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3393 else:
3393 else:
3394 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3394 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3395
3395
3396 return requirements
3396 return requirements
3397
3397
3398 if b'backend' not in createopts:
3398 if b'backend' not in createopts:
3399 raise error.ProgrammingError(
3399 raise error.ProgrammingError(
3400 b'backend key not present in createopts; '
3400 b'backend key not present in createopts; '
3401 b'was defaultcreateopts() called?'
3401 b'was defaultcreateopts() called?'
3402 )
3402 )
3403
3403
3404 if createopts[b'backend'] != b'revlogv1':
3404 if createopts[b'backend'] != b'revlogv1':
3405 raise error.Abort(
3405 raise error.Abort(
3406 _(
3406 _(
3407 b'unable to determine repository requirements for '
3407 b'unable to determine repository requirements for '
3408 b'storage backend: %s'
3408 b'storage backend: %s'
3409 )
3409 )
3410 % createopts[b'backend']
3410 % createopts[b'backend']
3411 )
3411 )
3412
3412
3413 requirements = {b'revlogv1'}
3413 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3414 if ui.configbool(b'format', b'usestore'):
3414 if ui.configbool(b'format', b'usestore'):
3415 requirements.add(b'store')
3415 requirements.add(b'store')
3416 if ui.configbool(b'format', b'usefncache'):
3416 if ui.configbool(b'format', b'usefncache'):
3417 requirements.add(b'fncache')
3417 requirements.add(b'fncache')
3418 if ui.configbool(b'format', b'dotencode'):
3418 if ui.configbool(b'format', b'dotencode'):
3419 requirements.add(b'dotencode')
3419 requirements.add(b'dotencode')
3420
3420
3421 compengines = ui.configlist(b'format', b'revlog-compression')
3421 compengines = ui.configlist(b'format', b'revlog-compression')
3422 for compengine in compengines:
3422 for compengine in compengines:
3423 if compengine in util.compengines:
3423 if compengine in util.compengines:
3424 break
3424 break
3425 else:
3425 else:
3426 raise error.Abort(
3426 raise error.Abort(
3427 _(
3427 _(
3428 b'compression engines %s defined by '
3428 b'compression engines %s defined by '
3429 b'format.revlog-compression not available'
3429 b'format.revlog-compression not available'
3430 )
3430 )
3431 % b', '.join(b'"%s"' % e for e in compengines),
3431 % b', '.join(b'"%s"' % e for e in compengines),
3432 hint=_(
3432 hint=_(
3433 b'run "hg debuginstall" to list available '
3433 b'run "hg debuginstall" to list available '
3434 b'compression engines'
3434 b'compression engines'
3435 ),
3435 ),
3436 )
3436 )
3437
3437
3438 # zlib is the historical default and doesn't need an explicit requirement.
3438 # zlib is the historical default and doesn't need an explicit requirement.
3439 if compengine == b'zstd':
3439 if compengine == b'zstd':
3440 requirements.add(b'revlog-compression-zstd')
3440 requirements.add(b'revlog-compression-zstd')
3441 elif compengine != b'zlib':
3441 elif compengine != b'zlib':
3442 requirements.add(b'exp-compression-%s' % compengine)
3442 requirements.add(b'exp-compression-%s' % compengine)
3443
3443
3444 if scmutil.gdinitconfig(ui):
3444 if scmutil.gdinitconfig(ui):
3445 requirements.add(b'generaldelta')
3445 requirements.add(b'generaldelta')
3446 if ui.configbool(b'format', b'sparse-revlog'):
3446 if ui.configbool(b'format', b'sparse-revlog'):
3447 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3447 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3448
3448
3449 # experimental config: format.exp-use-side-data
3449 # experimental config: format.exp-use-side-data
3450 if ui.configbool(b'format', b'exp-use-side-data'):
3450 if ui.configbool(b'format', b'exp-use-side-data'):
3451 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3451 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3452 # experimental config: format.exp-use-copies-side-data-changeset
3452 # experimental config: format.exp-use-copies-side-data-changeset
3453 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3453 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3454 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3454 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3455 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3455 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3456 if ui.configbool(b'experimental', b'treemanifest'):
3456 if ui.configbool(b'experimental', b'treemanifest'):
3457 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3457 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3458
3458
3459 revlogv2 = ui.config(b'experimental', b'revlogv2')
3459 revlogv2 = ui.config(b'experimental', b'revlogv2')
3460 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3460 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3461 requirements.remove(b'revlogv1')
3461 requirements.remove(requirementsmod.REVLOGV1_REQUIREMENT)
3462 # generaldelta is implied by revlogv2.
3462 # generaldelta is implied by revlogv2.
3463 requirements.discard(b'generaldelta')
3463 requirements.discard(b'generaldelta')
3464 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3464 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3465 # experimental config: format.internal-phase
3465 # experimental config: format.internal-phase
3466 if ui.configbool(b'format', b'internal-phase'):
3466 if ui.configbool(b'format', b'internal-phase'):
3467 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3467 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3468
3468
3469 if createopts.get(b'narrowfiles'):
3469 if createopts.get(b'narrowfiles'):
3470 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3470 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3471
3471
3472 if createopts.get(b'lfs'):
3472 if createopts.get(b'lfs'):
3473 requirements.add(b'lfs')
3473 requirements.add(b'lfs')
3474
3474
3475 if ui.configbool(b'format', b'bookmarks-in-store'):
3475 if ui.configbool(b'format', b'bookmarks-in-store'):
3476 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3476 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3477
3477
3478 if ui.configbool(b'format', b'use-persistent-nodemap'):
3478 if ui.configbool(b'format', b'use-persistent-nodemap'):
3479 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3479 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3480
3480
3481 # if share-safe is enabled, let's create the new repository with the new
3481 # if share-safe is enabled, let's create the new repository with the new
3482 # requirement
3482 # requirement
3483 if ui.configbool(b'format', b'use-share-safe'):
3483 if ui.configbool(b'format', b'use-share-safe'):
3484 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3484 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3485
3485
3486 return requirements
3486 return requirements
3487
3487
3488
3488
3489 def checkrequirementscompat(ui, requirements):
3489 def checkrequirementscompat(ui, requirements):
3490 """Checks compatibility of repository requirements enabled and disabled.
3490 """Checks compatibility of repository requirements enabled and disabled.
3491
3491
3492 Returns a set of requirements which needs to be dropped because dependend
3492 Returns a set of requirements which needs to be dropped because dependend
3493 requirements are not enabled. Also warns users about it"""
3493 requirements are not enabled. Also warns users about it"""
3494
3494
3495 dropped = set()
3495 dropped = set()
3496
3496
3497 if b'store' not in requirements:
3497 if b'store' not in requirements:
3498 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3498 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3499 ui.warn(
3499 ui.warn(
3500 _(
3500 _(
3501 b'ignoring enabled \'format.bookmarks-in-store\' config '
3501 b'ignoring enabled \'format.bookmarks-in-store\' config '
3502 b'beacuse it is incompatible with disabled '
3502 b'beacuse it is incompatible with disabled '
3503 b'\'format.usestore\' config\n'
3503 b'\'format.usestore\' config\n'
3504 )
3504 )
3505 )
3505 )
3506 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3506 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3507
3507
3508 if (
3508 if (
3509 requirementsmod.SHARED_REQUIREMENT in requirements
3509 requirementsmod.SHARED_REQUIREMENT in requirements
3510 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3510 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3511 ):
3511 ):
3512 raise error.Abort(
3512 raise error.Abort(
3513 _(
3513 _(
3514 b"cannot create shared repository as source was created"
3514 b"cannot create shared repository as source was created"
3515 b" with 'format.usestore' config disabled"
3515 b" with 'format.usestore' config disabled"
3516 )
3516 )
3517 )
3517 )
3518
3518
3519 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3519 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3520 ui.warn(
3520 ui.warn(
3521 _(
3521 _(
3522 b"ignoring enabled 'format.use-share-safe' config because "
3522 b"ignoring enabled 'format.use-share-safe' config because "
3523 b"it is incompatible with disabled 'format.usestore'"
3523 b"it is incompatible with disabled 'format.usestore'"
3524 b" config\n"
3524 b" config\n"
3525 )
3525 )
3526 )
3526 )
3527 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3527 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3528
3528
3529 return dropped
3529 return dropped
3530
3530
3531
3531
3532 def filterknowncreateopts(ui, createopts):
3532 def filterknowncreateopts(ui, createopts):
3533 """Filters a dict of repo creation options against options that are known.
3533 """Filters a dict of repo creation options against options that are known.
3534
3534
3535 Receives a dict of repo creation options and returns a dict of those
3535 Receives a dict of repo creation options and returns a dict of those
3536 options that we don't know how to handle.
3536 options that we don't know how to handle.
3537
3537
3538 This function is called as part of repository creation. If the
3538 This function is called as part of repository creation. If the
3539 returned dict contains any items, repository creation will not
3539 returned dict contains any items, repository creation will not
3540 be allowed, as it means there was a request to create a repository
3540 be allowed, as it means there was a request to create a repository
3541 with options not recognized by loaded code.
3541 with options not recognized by loaded code.
3542
3542
3543 Extensions can wrap this function to filter out creation options
3543 Extensions can wrap this function to filter out creation options
3544 they know how to handle.
3544 they know how to handle.
3545 """
3545 """
3546 known = {
3546 known = {
3547 b'backend',
3547 b'backend',
3548 b'lfs',
3548 b'lfs',
3549 b'narrowfiles',
3549 b'narrowfiles',
3550 b'sharedrepo',
3550 b'sharedrepo',
3551 b'sharedrelative',
3551 b'sharedrelative',
3552 b'shareditems',
3552 b'shareditems',
3553 b'shallowfilestore',
3553 b'shallowfilestore',
3554 }
3554 }
3555
3555
3556 return {k: v for k, v in createopts.items() if k not in known}
3556 return {k: v for k, v in createopts.items() if k not in known}
3557
3557
3558
3558
3559 def createrepository(ui, path, createopts=None):
3559 def createrepository(ui, path, createopts=None):
3560 """Create a new repository in a vfs.
3560 """Create a new repository in a vfs.
3561
3561
3562 ``path`` path to the new repo's working directory.
3562 ``path`` path to the new repo's working directory.
3563 ``createopts`` options for the new repository.
3563 ``createopts`` options for the new repository.
3564
3564
3565 The following keys for ``createopts`` are recognized:
3565 The following keys for ``createopts`` are recognized:
3566
3566
3567 backend
3567 backend
3568 The storage backend to use.
3568 The storage backend to use.
3569 lfs
3569 lfs
3570 Repository will be created with ``lfs`` requirement. The lfs extension
3570 Repository will be created with ``lfs`` requirement. The lfs extension
3571 will automatically be loaded when the repository is accessed.
3571 will automatically be loaded when the repository is accessed.
3572 narrowfiles
3572 narrowfiles
3573 Set up repository to support narrow file storage.
3573 Set up repository to support narrow file storage.
3574 sharedrepo
3574 sharedrepo
3575 Repository object from which storage should be shared.
3575 Repository object from which storage should be shared.
3576 sharedrelative
3576 sharedrelative
3577 Boolean indicating if the path to the shared repo should be
3577 Boolean indicating if the path to the shared repo should be
3578 stored as relative. By default, the pointer to the "parent" repo
3578 stored as relative. By default, the pointer to the "parent" repo
3579 is stored as an absolute path.
3579 is stored as an absolute path.
3580 shareditems
3580 shareditems
3581 Set of items to share to the new repository (in addition to storage).
3581 Set of items to share to the new repository (in addition to storage).
3582 shallowfilestore
3582 shallowfilestore
3583 Indicates that storage for files should be shallow (not all ancestor
3583 Indicates that storage for files should be shallow (not all ancestor
3584 revisions are known).
3584 revisions are known).
3585 """
3585 """
3586 createopts = defaultcreateopts(ui, createopts=createopts)
3586 createopts = defaultcreateopts(ui, createopts=createopts)
3587
3587
3588 unknownopts = filterknowncreateopts(ui, createopts)
3588 unknownopts = filterknowncreateopts(ui, createopts)
3589
3589
3590 if not isinstance(unknownopts, dict):
3590 if not isinstance(unknownopts, dict):
3591 raise error.ProgrammingError(
3591 raise error.ProgrammingError(
3592 b'filterknowncreateopts() did not return a dict'
3592 b'filterknowncreateopts() did not return a dict'
3593 )
3593 )
3594
3594
3595 if unknownopts:
3595 if unknownopts:
3596 raise error.Abort(
3596 raise error.Abort(
3597 _(
3597 _(
3598 b'unable to create repository because of unknown '
3598 b'unable to create repository because of unknown '
3599 b'creation option: %s'
3599 b'creation option: %s'
3600 )
3600 )
3601 % b', '.join(sorted(unknownopts)),
3601 % b', '.join(sorted(unknownopts)),
3602 hint=_(b'is a required extension not loaded?'),
3602 hint=_(b'is a required extension not loaded?'),
3603 )
3603 )
3604
3604
3605 requirements = newreporequirements(ui, createopts=createopts)
3605 requirements = newreporequirements(ui, createopts=createopts)
3606 requirements -= checkrequirementscompat(ui, requirements)
3606 requirements -= checkrequirementscompat(ui, requirements)
3607
3607
3608 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3608 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3609
3609
3610 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3610 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3611 if hgvfs.exists():
3611 if hgvfs.exists():
3612 raise error.RepoError(_(b'repository %s already exists') % path)
3612 raise error.RepoError(_(b'repository %s already exists') % path)
3613
3613
3614 if b'sharedrepo' in createopts:
3614 if b'sharedrepo' in createopts:
3615 sharedpath = createopts[b'sharedrepo'].sharedpath
3615 sharedpath = createopts[b'sharedrepo'].sharedpath
3616
3616
3617 if createopts.get(b'sharedrelative'):
3617 if createopts.get(b'sharedrelative'):
3618 try:
3618 try:
3619 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3619 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3620 except (IOError, ValueError) as e:
3620 except (IOError, ValueError) as e:
3621 # ValueError is raised on Windows if the drive letters differ
3621 # ValueError is raised on Windows if the drive letters differ
3622 # on each path.
3622 # on each path.
3623 raise error.Abort(
3623 raise error.Abort(
3624 _(b'cannot calculate relative path'),
3624 _(b'cannot calculate relative path'),
3625 hint=stringutil.forcebytestr(e),
3625 hint=stringutil.forcebytestr(e),
3626 )
3626 )
3627
3627
3628 if not wdirvfs.exists():
3628 if not wdirvfs.exists():
3629 wdirvfs.makedirs()
3629 wdirvfs.makedirs()
3630
3630
3631 hgvfs.makedir(notindexed=True)
3631 hgvfs.makedir(notindexed=True)
3632 if b'sharedrepo' not in createopts:
3632 if b'sharedrepo' not in createopts:
3633 hgvfs.mkdir(b'cache')
3633 hgvfs.mkdir(b'cache')
3634 hgvfs.mkdir(b'wcache')
3634 hgvfs.mkdir(b'wcache')
3635
3635
3636 if b'store' in requirements and b'sharedrepo' not in createopts:
3636 if b'store' in requirements and b'sharedrepo' not in createopts:
3637 hgvfs.mkdir(b'store')
3637 hgvfs.mkdir(b'store')
3638
3638
3639 # We create an invalid changelog outside the store so very old
3639 # We create an invalid changelog outside the store so very old
3640 # Mercurial versions (which didn't know about the requirements
3640 # Mercurial versions (which didn't know about the requirements
3641 # file) encounter an error on reading the changelog. This
3641 # file) encounter an error on reading the changelog. This
3642 # effectively locks out old clients and prevents them from
3642 # effectively locks out old clients and prevents them from
3643 # mucking with a repo in an unknown format.
3643 # mucking with a repo in an unknown format.
3644 #
3644 #
3645 # The revlog header has version 65535, which won't be recognized by
3645 # The revlog header has version 65535, which won't be recognized by
3646 # such old clients.
3646 # such old clients.
3647 hgvfs.append(
3647 hgvfs.append(
3648 b'00changelog.i',
3648 b'00changelog.i',
3649 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3649 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3650 b'layout',
3650 b'layout',
3651 )
3651 )
3652
3652
3653 # Filter the requirements into working copy and store ones
3653 # Filter the requirements into working copy and store ones
3654 wcreq, storereq = scmutil.filterrequirements(requirements)
3654 wcreq, storereq = scmutil.filterrequirements(requirements)
3655 # write working copy ones
3655 # write working copy ones
3656 scmutil.writerequires(hgvfs, wcreq)
3656 scmutil.writerequires(hgvfs, wcreq)
3657 # If there are store requirements and the current repository
3657 # If there are store requirements and the current repository
3658 # is not a shared one, write stored requirements
3658 # is not a shared one, write stored requirements
3659 # For new shared repository, we don't need to write the store
3659 # For new shared repository, we don't need to write the store
3660 # requirements as they are already present in store requires
3660 # requirements as they are already present in store requires
3661 if storereq and b'sharedrepo' not in createopts:
3661 if storereq and b'sharedrepo' not in createopts:
3662 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3662 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3663 scmutil.writerequires(storevfs, storereq)
3663 scmutil.writerequires(storevfs, storereq)
3664
3664
3665 # Write out file telling readers where to find the shared store.
3665 # Write out file telling readers where to find the shared store.
3666 if b'sharedrepo' in createopts:
3666 if b'sharedrepo' in createopts:
3667 hgvfs.write(b'sharedpath', sharedpath)
3667 hgvfs.write(b'sharedpath', sharedpath)
3668
3668
3669 if createopts.get(b'shareditems'):
3669 if createopts.get(b'shareditems'):
3670 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3670 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3671 hgvfs.write(b'shared', shared)
3671 hgvfs.write(b'shared', shared)
3672
3672
3673
3673
3674 def poisonrepository(repo):
3674 def poisonrepository(repo):
3675 """Poison a repository instance so it can no longer be used."""
3675 """Poison a repository instance so it can no longer be used."""
3676 # Perform any cleanup on the instance.
3676 # Perform any cleanup on the instance.
3677 repo.close()
3677 repo.close()
3678
3678
3679 # Our strategy is to replace the type of the object with one that
3679 # Our strategy is to replace the type of the object with one that
3680 # has all attribute lookups result in error.
3680 # has all attribute lookups result in error.
3681 #
3681 #
3682 # But we have to allow the close() method because some constructors
3682 # But we have to allow the close() method because some constructors
3683 # of repos call close() on repo references.
3683 # of repos call close() on repo references.
3684 class poisonedrepository(object):
3684 class poisonedrepository(object):
3685 def __getattribute__(self, item):
3685 def __getattribute__(self, item):
3686 if item == 'close':
3686 if item == 'close':
3687 return object.__getattribute__(self, item)
3687 return object.__getattribute__(self, item)
3688
3688
3689 raise error.ProgrammingError(
3689 raise error.ProgrammingError(
3690 b'repo instances should not be used after unshare'
3690 b'repo instances should not be used after unshare'
3691 )
3691 )
3692
3692
3693 def close(self):
3693 def close(self):
3694 pass
3694 pass
3695
3695
3696 # We may have a repoview, which intercepts __setattr__. So be sure
3696 # We may have a repoview, which intercepts __setattr__. So be sure
3697 # we operate at the lowest level possible.
3697 # we operate at the lowest level possible.
3698 object.__setattr__(repo, '__class__', poisonedrepository)
3698 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,75 +1,77 b''
1 # requirements.py - objects and functions related to repository requirements
1 # requirements.py - objects and functions related to repository requirements
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 # When narrowing is finalized and no longer subject to format changes,
10 # When narrowing is finalized and no longer subject to format changes,
11 # we should move this to just "narrow" or similar.
11 # we should move this to just "narrow" or similar.
12 NARROW_REQUIREMENT = b'narrowhg-experimental'
12 NARROW_REQUIREMENT = b'narrowhg-experimental'
13
13
14 # Enables sparse working directory usage
14 # Enables sparse working directory usage
15 SPARSE_REQUIREMENT = b'exp-sparse'
15 SPARSE_REQUIREMENT = b'exp-sparse'
16
16
17 # Enables the internal phase which is used to hide changesets instead
17 # Enables the internal phase which is used to hide changesets instead
18 # of stripping them
18 # of stripping them
19 INTERNAL_PHASE_REQUIREMENT = b'internal-phase'
19 INTERNAL_PHASE_REQUIREMENT = b'internal-phase'
20
20
21 # Stores manifest in Tree structure
21 # Stores manifest in Tree structure
22 TREEMANIFEST_REQUIREMENT = b'treemanifest'
22 TREEMANIFEST_REQUIREMENT = b'treemanifest'
23
23
24 REVLOGV1_REQUIREMENT = b'revlogv1'
25
24 # Increment the sub-version when the revlog v2 format changes to lock out old
26 # Increment the sub-version when the revlog v2 format changes to lock out old
25 # clients.
27 # clients.
26 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
28 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
27
29
28 # A repository with the sparserevlog feature will have delta chains that
30 # A repository with the sparserevlog feature will have delta chains that
29 # can spread over a larger span. Sparse reading cuts these large spans into
31 # can spread over a larger span. Sparse reading cuts these large spans into
30 # pieces, so that each piece isn't too big.
32 # pieces, so that each piece isn't too big.
31 # Without the sparserevlog capability, reading from the repository could use
33 # Without the sparserevlog capability, reading from the repository could use
32 # huge amounts of memory, because the whole span would be read at once,
34 # huge amounts of memory, because the whole span would be read at once,
33 # including all the intermediate revisions that aren't pertinent for the chain.
35 # including all the intermediate revisions that aren't pertinent for the chain.
34 # This is why once a repository has enabled sparse-read, it becomes required.
36 # This is why once a repository has enabled sparse-read, it becomes required.
35 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
37 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
36
38
37 # A repository with the sidedataflag requirement will allow to store extra
39 # A repository with the sidedataflag requirement will allow to store extra
38 # information for revision without altering their original hashes.
40 # information for revision without altering their original hashes.
39 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
41 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
40
42
41 # A repository with the the copies-sidedata-changeset requirement will store
43 # A repository with the the copies-sidedata-changeset requirement will store
42 # copies related information in changeset's sidedata.
44 # copies related information in changeset's sidedata.
43 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
45 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
44
46
45 # The repository use persistent nodemap for the changelog and the manifest.
47 # The repository use persistent nodemap for the changelog and the manifest.
46 NODEMAP_REQUIREMENT = b'persistent-nodemap'
48 NODEMAP_REQUIREMENT = b'persistent-nodemap'
47
49
48 # Denotes that the current repository is a share
50 # Denotes that the current repository is a share
49 SHARED_REQUIREMENT = b'shared'
51 SHARED_REQUIREMENT = b'shared'
50
52
51 # Denotes that current repository is a share and the shared source path is
53 # Denotes that current repository is a share and the shared source path is
52 # relative to the current repository root path
54 # relative to the current repository root path
53 RELATIVE_SHARED_REQUIREMENT = b'relshared'
55 RELATIVE_SHARED_REQUIREMENT = b'relshared'
54
56
55 # A repository with share implemented safely. The repository has different
57 # A repository with share implemented safely. The repository has different
56 # store and working copy requirements i.e. both `.hg/requires` and
58 # store and working copy requirements i.e. both `.hg/requires` and
57 # `.hg/store/requires` are present.
59 # `.hg/store/requires` are present.
58 SHARESAFE_REQUIREMENT = b'share-safe'
60 SHARESAFE_REQUIREMENT = b'share-safe'
59
61
60 # List of requirements which are working directory specific
62 # List of requirements which are working directory specific
61 # These requirements cannot be shared between repositories if they
63 # These requirements cannot be shared between repositories if they
62 # share the same store
64 # share the same store
63 # * sparse is a working directory specific functionality and hence working
65 # * sparse is a working directory specific functionality and hence working
64 # directory specific requirement
66 # directory specific requirement
65 # * SHARED_REQUIREMENT and RELATIVE_SHARED_REQUIREMENT are requirements which
67 # * SHARED_REQUIREMENT and RELATIVE_SHARED_REQUIREMENT are requirements which
66 # represents that the current working copy/repository shares store of another
68 # represents that the current working copy/repository shares store of another
67 # repo. Hence both of them should be stored in working copy
69 # repo. Hence both of them should be stored in working copy
68 # * SHARESAFE_REQUIREMENT needs to be stored in working dir to mark that rest of
70 # * SHARESAFE_REQUIREMENT needs to be stored in working dir to mark that rest of
69 # the requirements are stored in store's requires
71 # the requirements are stored in store's requires
70 WORKING_DIR_REQUIREMENTS = {
72 WORKING_DIR_REQUIREMENTS = {
71 SPARSE_REQUIREMENT,
73 SPARSE_REQUIREMENT,
72 SHARED_REQUIREMENT,
74 SHARED_REQUIREMENT,
73 RELATIVE_SHARED_REQUIREMENT,
75 RELATIVE_SHARED_REQUIREMENT,
74 SHARESAFE_REQUIREMENT,
76 SHARESAFE_REQUIREMENT,
75 }
77 }
@@ -1,734 +1,735 b''
1 # streamclone.py - producing and consuming streaming repository data
1 # streamclone.py - producing and consuming streaming repository data
2 #
2 #
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import os
11 import os
12 import struct
12 import struct
13
13
14 from .i18n import _
14 from .i18n import _
15 from .pycompat import open
15 from .pycompat import open
16 from .interfaces import repository
16 from .interfaces import repository
17 from . import (
17 from . import (
18 cacheutil,
18 cacheutil,
19 error,
19 error,
20 narrowspec,
20 narrowspec,
21 phases,
21 phases,
22 pycompat,
22 pycompat,
23 requirements as requirementsmod,
23 scmutil,
24 scmutil,
24 store,
25 store,
25 util,
26 util,
26 )
27 )
27
28
28
29
29 def canperformstreamclone(pullop, bundle2=False):
30 def canperformstreamclone(pullop, bundle2=False):
30 """Whether it is possible to perform a streaming clone as part of pull.
31 """Whether it is possible to perform a streaming clone as part of pull.
31
32
32 ``bundle2`` will cause the function to consider stream clone through
33 ``bundle2`` will cause the function to consider stream clone through
33 bundle2 and only through bundle2.
34 bundle2 and only through bundle2.
34
35
35 Returns a tuple of (supported, requirements). ``supported`` is True if
36 Returns a tuple of (supported, requirements). ``supported`` is True if
36 streaming clone is supported and False otherwise. ``requirements`` is
37 streaming clone is supported and False otherwise. ``requirements`` is
37 a set of repo requirements from the remote, or ``None`` if stream clone
38 a set of repo requirements from the remote, or ``None`` if stream clone
38 isn't supported.
39 isn't supported.
39 """
40 """
40 repo = pullop.repo
41 repo = pullop.repo
41 remote = pullop.remote
42 remote = pullop.remote
42
43
43 bundle2supported = False
44 bundle2supported = False
44 if pullop.canusebundle2:
45 if pullop.canusebundle2:
45 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
46 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
46 bundle2supported = True
47 bundle2supported = True
47 # else
48 # else
48 # Server doesn't support bundle2 stream clone or doesn't support
49 # Server doesn't support bundle2 stream clone or doesn't support
49 # the versions we support. Fall back and possibly allow legacy.
50 # the versions we support. Fall back and possibly allow legacy.
50
51
51 # Ensures legacy code path uses available bundle2.
52 # Ensures legacy code path uses available bundle2.
52 if bundle2supported and not bundle2:
53 if bundle2supported and not bundle2:
53 return False, None
54 return False, None
54 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
55 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
55 elif bundle2 and not bundle2supported:
56 elif bundle2 and not bundle2supported:
56 return False, None
57 return False, None
57
58
58 # Streaming clone only works on empty repositories.
59 # Streaming clone only works on empty repositories.
59 if len(repo):
60 if len(repo):
60 return False, None
61 return False, None
61
62
62 # Streaming clone only works if all data is being requested.
63 # Streaming clone only works if all data is being requested.
63 if pullop.heads:
64 if pullop.heads:
64 return False, None
65 return False, None
65
66
66 streamrequested = pullop.streamclonerequested
67 streamrequested = pullop.streamclonerequested
67
68
68 # If we don't have a preference, let the server decide for us. This
69 # If we don't have a preference, let the server decide for us. This
69 # likely only comes into play in LANs.
70 # likely only comes into play in LANs.
70 if streamrequested is None:
71 if streamrequested is None:
71 # The server can advertise whether to prefer streaming clone.
72 # The server can advertise whether to prefer streaming clone.
72 streamrequested = remote.capable(b'stream-preferred')
73 streamrequested = remote.capable(b'stream-preferred')
73
74
74 if not streamrequested:
75 if not streamrequested:
75 return False, None
76 return False, None
76
77
77 # In order for stream clone to work, the client has to support all the
78 # In order for stream clone to work, the client has to support all the
78 # requirements advertised by the server.
79 # requirements advertised by the server.
79 #
80 #
80 # The server advertises its requirements via the "stream" and "streamreqs"
81 # The server advertises its requirements via the "stream" and "streamreqs"
81 # capability. "stream" (a value-less capability) is advertised if and only
82 # capability. "stream" (a value-less capability) is advertised if and only
82 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
83 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
83 # is advertised and contains a comma-delimited list of requirements.
84 # is advertised and contains a comma-delimited list of requirements.
84 requirements = set()
85 requirements = set()
85 if remote.capable(b'stream'):
86 if remote.capable(b'stream'):
86 requirements.add(b'revlogv1')
87 requirements.add(requirementsmod.REVLOGV1_REQUIREMENT)
87 else:
88 else:
88 streamreqs = remote.capable(b'streamreqs')
89 streamreqs = remote.capable(b'streamreqs')
89 # This is weird and shouldn't happen with modern servers.
90 # This is weird and shouldn't happen with modern servers.
90 if not streamreqs:
91 if not streamreqs:
91 pullop.repo.ui.warn(
92 pullop.repo.ui.warn(
92 _(
93 _(
93 b'warning: stream clone requested but server has them '
94 b'warning: stream clone requested but server has them '
94 b'disabled\n'
95 b'disabled\n'
95 )
96 )
96 )
97 )
97 return False, None
98 return False, None
98
99
99 streamreqs = set(streamreqs.split(b','))
100 streamreqs = set(streamreqs.split(b','))
100 # Server requires something we don't support. Bail.
101 # Server requires something we don't support. Bail.
101 missingreqs = streamreqs - repo.supportedformats
102 missingreqs = streamreqs - repo.supportedformats
102 if missingreqs:
103 if missingreqs:
103 pullop.repo.ui.warn(
104 pullop.repo.ui.warn(
104 _(
105 _(
105 b'warning: stream clone requested but client is missing '
106 b'warning: stream clone requested but client is missing '
106 b'requirements: %s\n'
107 b'requirements: %s\n'
107 )
108 )
108 % b', '.join(sorted(missingreqs))
109 % b', '.join(sorted(missingreqs))
109 )
110 )
110 pullop.repo.ui.warn(
111 pullop.repo.ui.warn(
111 _(
112 _(
112 b'(see https://www.mercurial-scm.org/wiki/MissingRequirement '
113 b'(see https://www.mercurial-scm.org/wiki/MissingRequirement '
113 b'for more information)\n'
114 b'for more information)\n'
114 )
115 )
115 )
116 )
116 return False, None
117 return False, None
117 requirements = streamreqs
118 requirements = streamreqs
118
119
119 return True, requirements
120 return True, requirements
120
121
121
122
122 def maybeperformlegacystreamclone(pullop):
123 def maybeperformlegacystreamclone(pullop):
123 """Possibly perform a legacy stream clone operation.
124 """Possibly perform a legacy stream clone operation.
124
125
125 Legacy stream clones are performed as part of pull but before all other
126 Legacy stream clones are performed as part of pull but before all other
126 operations.
127 operations.
127
128
128 A legacy stream clone will not be performed if a bundle2 stream clone is
129 A legacy stream clone will not be performed if a bundle2 stream clone is
129 supported.
130 supported.
130 """
131 """
131 from . import localrepo
132 from . import localrepo
132
133
133 supported, requirements = canperformstreamclone(pullop)
134 supported, requirements = canperformstreamclone(pullop)
134
135
135 if not supported:
136 if not supported:
136 return
137 return
137
138
138 repo = pullop.repo
139 repo = pullop.repo
139 remote = pullop.remote
140 remote = pullop.remote
140
141
141 # Save remote branchmap. We will use it later to speed up branchcache
142 # Save remote branchmap. We will use it later to speed up branchcache
142 # creation.
143 # creation.
143 rbranchmap = None
144 rbranchmap = None
144 if remote.capable(b'branchmap'):
145 if remote.capable(b'branchmap'):
145 with remote.commandexecutor() as e:
146 with remote.commandexecutor() as e:
146 rbranchmap = e.callcommand(b'branchmap', {}).result()
147 rbranchmap = e.callcommand(b'branchmap', {}).result()
147
148
148 repo.ui.status(_(b'streaming all changes\n'))
149 repo.ui.status(_(b'streaming all changes\n'))
149
150
150 with remote.commandexecutor() as e:
151 with remote.commandexecutor() as e:
151 fp = e.callcommand(b'stream_out', {}).result()
152 fp = e.callcommand(b'stream_out', {}).result()
152
153
153 # TODO strictly speaking, this code should all be inside the context
154 # TODO strictly speaking, this code should all be inside the context
154 # manager because the context manager is supposed to ensure all wire state
155 # manager because the context manager is supposed to ensure all wire state
155 # is flushed when exiting. But the legacy peers don't do this, so it
156 # is flushed when exiting. But the legacy peers don't do this, so it
156 # doesn't matter.
157 # doesn't matter.
157 l = fp.readline()
158 l = fp.readline()
158 try:
159 try:
159 resp = int(l)
160 resp = int(l)
160 except ValueError:
161 except ValueError:
161 raise error.ResponseError(
162 raise error.ResponseError(
162 _(b'unexpected response from remote server:'), l
163 _(b'unexpected response from remote server:'), l
163 )
164 )
164 if resp == 1:
165 if resp == 1:
165 raise error.Abort(_(b'operation forbidden by server'))
166 raise error.Abort(_(b'operation forbidden by server'))
166 elif resp == 2:
167 elif resp == 2:
167 raise error.Abort(_(b'locking the remote repository failed'))
168 raise error.Abort(_(b'locking the remote repository failed'))
168 elif resp != 0:
169 elif resp != 0:
169 raise error.Abort(_(b'the server sent an unknown error code'))
170 raise error.Abort(_(b'the server sent an unknown error code'))
170
171
171 l = fp.readline()
172 l = fp.readline()
172 try:
173 try:
173 filecount, bytecount = map(int, l.split(b' ', 1))
174 filecount, bytecount = map(int, l.split(b' ', 1))
174 except (ValueError, TypeError):
175 except (ValueError, TypeError):
175 raise error.ResponseError(
176 raise error.ResponseError(
176 _(b'unexpected response from remote server:'), l
177 _(b'unexpected response from remote server:'), l
177 )
178 )
178
179
179 with repo.lock():
180 with repo.lock():
180 consumev1(repo, fp, filecount, bytecount)
181 consumev1(repo, fp, filecount, bytecount)
181
182
182 # new requirements = old non-format requirements +
183 # new requirements = old non-format requirements +
183 # new format-related remote requirements
184 # new format-related remote requirements
184 # requirements from the streamed-in repository
185 # requirements from the streamed-in repository
185 repo.requirements = requirements | (
186 repo.requirements = requirements | (
186 repo.requirements - repo.supportedformats
187 repo.requirements - repo.supportedformats
187 )
188 )
188 repo.svfs.options = localrepo.resolvestorevfsoptions(
189 repo.svfs.options = localrepo.resolvestorevfsoptions(
189 repo.ui, repo.requirements, repo.features
190 repo.ui, repo.requirements, repo.features
190 )
191 )
191 scmutil.writereporequirements(repo)
192 scmutil.writereporequirements(repo)
192
193
193 if rbranchmap:
194 if rbranchmap:
194 repo._branchcaches.replace(repo, rbranchmap)
195 repo._branchcaches.replace(repo, rbranchmap)
195
196
196 repo.invalidate()
197 repo.invalidate()
197
198
198
199
199 def allowservergeneration(repo):
200 def allowservergeneration(repo):
200 """Whether streaming clones are allowed from the server."""
201 """Whether streaming clones are allowed from the server."""
201 if repository.REPO_FEATURE_STREAM_CLONE not in repo.features:
202 if repository.REPO_FEATURE_STREAM_CLONE not in repo.features:
202 return False
203 return False
203
204
204 if not repo.ui.configbool(b'server', b'uncompressed', untrusted=True):
205 if not repo.ui.configbool(b'server', b'uncompressed', untrusted=True):
205 return False
206 return False
206
207
207 # The way stream clone works makes it impossible to hide secret changesets.
208 # The way stream clone works makes it impossible to hide secret changesets.
208 # So don't allow this by default.
209 # So don't allow this by default.
209 secret = phases.hassecret(repo)
210 secret = phases.hassecret(repo)
210 if secret:
211 if secret:
211 return repo.ui.configbool(b'server', b'uncompressedallowsecret')
212 return repo.ui.configbool(b'server', b'uncompressedallowsecret')
212
213
213 return True
214 return True
214
215
215
216
216 # This is it's own function so extensions can override it.
217 # This is it's own function so extensions can override it.
217 def _walkstreamfiles(repo, matcher=None):
218 def _walkstreamfiles(repo, matcher=None):
218 return repo.store.walk(matcher)
219 return repo.store.walk(matcher)
219
220
220
221
221 def generatev1(repo):
222 def generatev1(repo):
222 """Emit content for version 1 of a streaming clone.
223 """Emit content for version 1 of a streaming clone.
223
224
224 This returns a 3-tuple of (file count, byte size, data iterator).
225 This returns a 3-tuple of (file count, byte size, data iterator).
225
226
226 The data iterator consists of N entries for each file being transferred.
227 The data iterator consists of N entries for each file being transferred.
227 Each file entry starts as a line with the file name and integer size
228 Each file entry starts as a line with the file name and integer size
228 delimited by a null byte.
229 delimited by a null byte.
229
230
230 The raw file data follows. Following the raw file data is the next file
231 The raw file data follows. Following the raw file data is the next file
231 entry, or EOF.
232 entry, or EOF.
232
233
233 When used on the wire protocol, an additional line indicating protocol
234 When used on the wire protocol, an additional line indicating protocol
234 success will be prepended to the stream. This function is not responsible
235 success will be prepended to the stream. This function is not responsible
235 for adding it.
236 for adding it.
236
237
237 This function will obtain a repository lock to ensure a consistent view of
238 This function will obtain a repository lock to ensure a consistent view of
238 the store is captured. It therefore may raise LockError.
239 the store is captured. It therefore may raise LockError.
239 """
240 """
240 entries = []
241 entries = []
241 total_bytes = 0
242 total_bytes = 0
242 # Get consistent snapshot of repo, lock during scan.
243 # Get consistent snapshot of repo, lock during scan.
243 with repo.lock():
244 with repo.lock():
244 repo.ui.debug(b'scanning\n')
245 repo.ui.debug(b'scanning\n')
245 for name, ename, size in _walkstreamfiles(repo):
246 for name, ename, size in _walkstreamfiles(repo):
246 if size:
247 if size:
247 entries.append((name, size))
248 entries.append((name, size))
248 total_bytes += size
249 total_bytes += size
249
250
250 repo.ui.debug(
251 repo.ui.debug(
251 b'%d files, %d bytes to transfer\n' % (len(entries), total_bytes)
252 b'%d files, %d bytes to transfer\n' % (len(entries), total_bytes)
252 )
253 )
253
254
254 svfs = repo.svfs
255 svfs = repo.svfs
255 debugflag = repo.ui.debugflag
256 debugflag = repo.ui.debugflag
256
257
257 def emitrevlogdata():
258 def emitrevlogdata():
258 for name, size in entries:
259 for name, size in entries:
259 if debugflag:
260 if debugflag:
260 repo.ui.debug(b'sending %s (%d bytes)\n' % (name, size))
261 repo.ui.debug(b'sending %s (%d bytes)\n' % (name, size))
261 # partially encode name over the wire for backwards compat
262 # partially encode name over the wire for backwards compat
262 yield b'%s\0%d\n' % (store.encodedir(name), size)
263 yield b'%s\0%d\n' % (store.encodedir(name), size)
263 # auditing at this stage is both pointless (paths are already
264 # auditing at this stage is both pointless (paths are already
264 # trusted by the local repo) and expensive
265 # trusted by the local repo) and expensive
265 with svfs(name, b'rb', auditpath=False) as fp:
266 with svfs(name, b'rb', auditpath=False) as fp:
266 if size <= 65536:
267 if size <= 65536:
267 yield fp.read(size)
268 yield fp.read(size)
268 else:
269 else:
269 for chunk in util.filechunkiter(fp, limit=size):
270 for chunk in util.filechunkiter(fp, limit=size):
270 yield chunk
271 yield chunk
271
272
272 return len(entries), total_bytes, emitrevlogdata()
273 return len(entries), total_bytes, emitrevlogdata()
273
274
274
275
275 def generatev1wireproto(repo):
276 def generatev1wireproto(repo):
276 """Emit content for version 1 of streaming clone suitable for the wire.
277 """Emit content for version 1 of streaming clone suitable for the wire.
277
278
278 This is the data output from ``generatev1()`` with 2 header lines. The
279 This is the data output from ``generatev1()`` with 2 header lines. The
279 first line indicates overall success. The 2nd contains the file count and
280 first line indicates overall success. The 2nd contains the file count and
280 byte size of payload.
281 byte size of payload.
281
282
282 The success line contains "0" for success, "1" for stream generation not
283 The success line contains "0" for success, "1" for stream generation not
283 allowed, and "2" for error locking the repository (possibly indicating
284 allowed, and "2" for error locking the repository (possibly indicating
284 a permissions error for the server process).
285 a permissions error for the server process).
285 """
286 """
286 if not allowservergeneration(repo):
287 if not allowservergeneration(repo):
287 yield b'1\n'
288 yield b'1\n'
288 return
289 return
289
290
290 try:
291 try:
291 filecount, bytecount, it = generatev1(repo)
292 filecount, bytecount, it = generatev1(repo)
292 except error.LockError:
293 except error.LockError:
293 yield b'2\n'
294 yield b'2\n'
294 return
295 return
295
296
296 # Indicates successful response.
297 # Indicates successful response.
297 yield b'0\n'
298 yield b'0\n'
298 yield b'%d %d\n' % (filecount, bytecount)
299 yield b'%d %d\n' % (filecount, bytecount)
299 for chunk in it:
300 for chunk in it:
300 yield chunk
301 yield chunk
301
302
302
303
303 def generatebundlev1(repo, compression=b'UN'):
304 def generatebundlev1(repo, compression=b'UN'):
304 """Emit content for version 1 of a stream clone bundle.
305 """Emit content for version 1 of a stream clone bundle.
305
306
306 The first 4 bytes of the output ("HGS1") denote this as stream clone
307 The first 4 bytes of the output ("HGS1") denote this as stream clone
307 bundle version 1.
308 bundle version 1.
308
309
309 The next 2 bytes indicate the compression type. Only "UN" is currently
310 The next 2 bytes indicate the compression type. Only "UN" is currently
310 supported.
311 supported.
311
312
312 The next 16 bytes are two 64-bit big endian unsigned integers indicating
313 The next 16 bytes are two 64-bit big endian unsigned integers indicating
313 file count and byte count, respectively.
314 file count and byte count, respectively.
314
315
315 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
316 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
316 of the requirements string, including a trailing \0. The following N bytes
317 of the requirements string, including a trailing \0. The following N bytes
317 are the requirements string, which is ASCII containing a comma-delimited
318 are the requirements string, which is ASCII containing a comma-delimited
318 list of repo requirements that are needed to support the data.
319 list of repo requirements that are needed to support the data.
319
320
320 The remaining content is the output of ``generatev1()`` (which may be
321 The remaining content is the output of ``generatev1()`` (which may be
321 compressed in the future).
322 compressed in the future).
322
323
323 Returns a tuple of (requirements, data generator).
324 Returns a tuple of (requirements, data generator).
324 """
325 """
325 if compression != b'UN':
326 if compression != b'UN':
326 raise ValueError(b'we do not support the compression argument yet')
327 raise ValueError(b'we do not support the compression argument yet')
327
328
328 requirements = repo.requirements & repo.supportedformats
329 requirements = repo.requirements & repo.supportedformats
329 requires = b','.join(sorted(requirements))
330 requires = b','.join(sorted(requirements))
330
331
331 def gen():
332 def gen():
332 yield b'HGS1'
333 yield b'HGS1'
333 yield compression
334 yield compression
334
335
335 filecount, bytecount, it = generatev1(repo)
336 filecount, bytecount, it = generatev1(repo)
336 repo.ui.status(
337 repo.ui.status(
337 _(b'writing %d bytes for %d files\n') % (bytecount, filecount)
338 _(b'writing %d bytes for %d files\n') % (bytecount, filecount)
338 )
339 )
339
340
340 yield struct.pack(b'>QQ', filecount, bytecount)
341 yield struct.pack(b'>QQ', filecount, bytecount)
341 yield struct.pack(b'>H', len(requires) + 1)
342 yield struct.pack(b'>H', len(requires) + 1)
342 yield requires + b'\0'
343 yield requires + b'\0'
343
344
344 # This is where we'll add compression in the future.
345 # This is where we'll add compression in the future.
345 assert compression == b'UN'
346 assert compression == b'UN'
346
347
347 progress = repo.ui.makeprogress(
348 progress = repo.ui.makeprogress(
348 _(b'bundle'), total=bytecount, unit=_(b'bytes')
349 _(b'bundle'), total=bytecount, unit=_(b'bytes')
349 )
350 )
350 progress.update(0)
351 progress.update(0)
351
352
352 for chunk in it:
353 for chunk in it:
353 progress.increment(step=len(chunk))
354 progress.increment(step=len(chunk))
354 yield chunk
355 yield chunk
355
356
356 progress.complete()
357 progress.complete()
357
358
358 return requirements, gen()
359 return requirements, gen()
359
360
360
361
361 def consumev1(repo, fp, filecount, bytecount):
362 def consumev1(repo, fp, filecount, bytecount):
362 """Apply the contents from version 1 of a streaming clone file handle.
363 """Apply the contents from version 1 of a streaming clone file handle.
363
364
364 This takes the output from "stream_out" and applies it to the specified
365 This takes the output from "stream_out" and applies it to the specified
365 repository.
366 repository.
366
367
367 Like "stream_out," the status line added by the wire protocol is not
368 Like "stream_out," the status line added by the wire protocol is not
368 handled by this function.
369 handled by this function.
369 """
370 """
370 with repo.lock():
371 with repo.lock():
371 repo.ui.status(
372 repo.ui.status(
372 _(b'%d files to transfer, %s of data\n')
373 _(b'%d files to transfer, %s of data\n')
373 % (filecount, util.bytecount(bytecount))
374 % (filecount, util.bytecount(bytecount))
374 )
375 )
375 progress = repo.ui.makeprogress(
376 progress = repo.ui.makeprogress(
376 _(b'clone'), total=bytecount, unit=_(b'bytes')
377 _(b'clone'), total=bytecount, unit=_(b'bytes')
377 )
378 )
378 progress.update(0)
379 progress.update(0)
379 start = util.timer()
380 start = util.timer()
380
381
381 # TODO: get rid of (potential) inconsistency
382 # TODO: get rid of (potential) inconsistency
382 #
383 #
383 # If transaction is started and any @filecache property is
384 # If transaction is started and any @filecache property is
384 # changed at this point, it causes inconsistency between
385 # changed at this point, it causes inconsistency between
385 # in-memory cached property and streamclone-ed file on the
386 # in-memory cached property and streamclone-ed file on the
386 # disk. Nested transaction prevents transaction scope "clone"
387 # disk. Nested transaction prevents transaction scope "clone"
387 # below from writing in-memory changes out at the end of it,
388 # below from writing in-memory changes out at the end of it,
388 # even though in-memory changes are discarded at the end of it
389 # even though in-memory changes are discarded at the end of it
389 # regardless of transaction nesting.
390 # regardless of transaction nesting.
390 #
391 #
391 # But transaction nesting can't be simply prohibited, because
392 # But transaction nesting can't be simply prohibited, because
392 # nesting occurs also in ordinary case (e.g. enabling
393 # nesting occurs also in ordinary case (e.g. enabling
393 # clonebundles).
394 # clonebundles).
394
395
395 with repo.transaction(b'clone'):
396 with repo.transaction(b'clone'):
396 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
397 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
397 for i in pycompat.xrange(filecount):
398 for i in pycompat.xrange(filecount):
398 # XXX doesn't support '\n' or '\r' in filenames
399 # XXX doesn't support '\n' or '\r' in filenames
399 l = fp.readline()
400 l = fp.readline()
400 try:
401 try:
401 name, size = l.split(b'\0', 1)
402 name, size = l.split(b'\0', 1)
402 size = int(size)
403 size = int(size)
403 except (ValueError, TypeError):
404 except (ValueError, TypeError):
404 raise error.ResponseError(
405 raise error.ResponseError(
405 _(b'unexpected response from remote server:'), l
406 _(b'unexpected response from remote server:'), l
406 )
407 )
407 if repo.ui.debugflag:
408 if repo.ui.debugflag:
408 repo.ui.debug(
409 repo.ui.debug(
409 b'adding %s (%s)\n' % (name, util.bytecount(size))
410 b'adding %s (%s)\n' % (name, util.bytecount(size))
410 )
411 )
411 # for backwards compat, name was partially encoded
412 # for backwards compat, name was partially encoded
412 path = store.decodedir(name)
413 path = store.decodedir(name)
413 with repo.svfs(path, b'w', backgroundclose=True) as ofp:
414 with repo.svfs(path, b'w', backgroundclose=True) as ofp:
414 for chunk in util.filechunkiter(fp, limit=size):
415 for chunk in util.filechunkiter(fp, limit=size):
415 progress.increment(step=len(chunk))
416 progress.increment(step=len(chunk))
416 ofp.write(chunk)
417 ofp.write(chunk)
417
418
418 # force @filecache properties to be reloaded from
419 # force @filecache properties to be reloaded from
419 # streamclone-ed file at next access
420 # streamclone-ed file at next access
420 repo.invalidate(clearfilecache=True)
421 repo.invalidate(clearfilecache=True)
421
422
422 elapsed = util.timer() - start
423 elapsed = util.timer() - start
423 if elapsed <= 0:
424 if elapsed <= 0:
424 elapsed = 0.001
425 elapsed = 0.001
425 progress.complete()
426 progress.complete()
426 repo.ui.status(
427 repo.ui.status(
427 _(b'transferred %s in %.1f seconds (%s/sec)\n')
428 _(b'transferred %s in %.1f seconds (%s/sec)\n')
428 % (
429 % (
429 util.bytecount(bytecount),
430 util.bytecount(bytecount),
430 elapsed,
431 elapsed,
431 util.bytecount(bytecount / elapsed),
432 util.bytecount(bytecount / elapsed),
432 )
433 )
433 )
434 )
434
435
435
436
436 def readbundle1header(fp):
437 def readbundle1header(fp):
437 compression = fp.read(2)
438 compression = fp.read(2)
438 if compression != b'UN':
439 if compression != b'UN':
439 raise error.Abort(
440 raise error.Abort(
440 _(
441 _(
441 b'only uncompressed stream clone bundles are '
442 b'only uncompressed stream clone bundles are '
442 b'supported; got %s'
443 b'supported; got %s'
443 )
444 )
444 % compression
445 % compression
445 )
446 )
446
447
447 filecount, bytecount = struct.unpack(b'>QQ', fp.read(16))
448 filecount, bytecount = struct.unpack(b'>QQ', fp.read(16))
448 requireslen = struct.unpack(b'>H', fp.read(2))[0]
449 requireslen = struct.unpack(b'>H', fp.read(2))[0]
449 requires = fp.read(requireslen)
450 requires = fp.read(requireslen)
450
451
451 if not requires.endswith(b'\0'):
452 if not requires.endswith(b'\0'):
452 raise error.Abort(
453 raise error.Abort(
453 _(
454 _(
454 b'malformed stream clone bundle: '
455 b'malformed stream clone bundle: '
455 b'requirements not properly encoded'
456 b'requirements not properly encoded'
456 )
457 )
457 )
458 )
458
459
459 requirements = set(requires.rstrip(b'\0').split(b','))
460 requirements = set(requires.rstrip(b'\0').split(b','))
460
461
461 return filecount, bytecount, requirements
462 return filecount, bytecount, requirements
462
463
463
464
464 def applybundlev1(repo, fp):
465 def applybundlev1(repo, fp):
465 """Apply the content from a stream clone bundle version 1.
466 """Apply the content from a stream clone bundle version 1.
466
467
467 We assume the 4 byte header has been read and validated and the file handle
468 We assume the 4 byte header has been read and validated and the file handle
468 is at the 2 byte compression identifier.
469 is at the 2 byte compression identifier.
469 """
470 """
470 if len(repo):
471 if len(repo):
471 raise error.Abort(
472 raise error.Abort(
472 _(b'cannot apply stream clone bundle on non-empty repo')
473 _(b'cannot apply stream clone bundle on non-empty repo')
473 )
474 )
474
475
475 filecount, bytecount, requirements = readbundle1header(fp)
476 filecount, bytecount, requirements = readbundle1header(fp)
476 missingreqs = requirements - repo.supportedformats
477 missingreqs = requirements - repo.supportedformats
477 if missingreqs:
478 if missingreqs:
478 raise error.Abort(
479 raise error.Abort(
479 _(b'unable to apply stream clone: unsupported format: %s')
480 _(b'unable to apply stream clone: unsupported format: %s')
480 % b', '.join(sorted(missingreqs))
481 % b', '.join(sorted(missingreqs))
481 )
482 )
482
483
483 consumev1(repo, fp, filecount, bytecount)
484 consumev1(repo, fp, filecount, bytecount)
484
485
485
486
486 class streamcloneapplier(object):
487 class streamcloneapplier(object):
487 """Class to manage applying streaming clone bundles.
488 """Class to manage applying streaming clone bundles.
488
489
489 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
490 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
490 readers to perform bundle type-specific functionality.
491 readers to perform bundle type-specific functionality.
491 """
492 """
492
493
493 def __init__(self, fh):
494 def __init__(self, fh):
494 self._fh = fh
495 self._fh = fh
495
496
496 def apply(self, repo):
497 def apply(self, repo):
497 return applybundlev1(repo, self._fh)
498 return applybundlev1(repo, self._fh)
498
499
499
500
500 # type of file to stream
501 # type of file to stream
501 _fileappend = 0 # append only file
502 _fileappend = 0 # append only file
502 _filefull = 1 # full snapshot file
503 _filefull = 1 # full snapshot file
503
504
504 # Source of the file
505 # Source of the file
505 _srcstore = b's' # store (svfs)
506 _srcstore = b's' # store (svfs)
506 _srccache = b'c' # cache (cache)
507 _srccache = b'c' # cache (cache)
507
508
508 # This is it's own function so extensions can override it.
509 # This is it's own function so extensions can override it.
509 def _walkstreamfullstorefiles(repo):
510 def _walkstreamfullstorefiles(repo):
510 """list snapshot file from the store"""
511 """list snapshot file from the store"""
511 fnames = []
512 fnames = []
512 if not repo.publishing():
513 if not repo.publishing():
513 fnames.append(b'phaseroots')
514 fnames.append(b'phaseroots')
514 return fnames
515 return fnames
515
516
516
517
517 def _filterfull(entry, copy, vfsmap):
518 def _filterfull(entry, copy, vfsmap):
518 """actually copy the snapshot files"""
519 """actually copy the snapshot files"""
519 src, name, ftype, data = entry
520 src, name, ftype, data = entry
520 if ftype != _filefull:
521 if ftype != _filefull:
521 return entry
522 return entry
522 return (src, name, ftype, copy(vfsmap[src].join(name)))
523 return (src, name, ftype, copy(vfsmap[src].join(name)))
523
524
524
525
525 @contextlib.contextmanager
526 @contextlib.contextmanager
526 def maketempcopies():
527 def maketempcopies():
527 """return a function to temporary copy file"""
528 """return a function to temporary copy file"""
528 files = []
529 files = []
529 try:
530 try:
530
531
531 def copy(src):
532 def copy(src):
532 fd, dst = pycompat.mkstemp()
533 fd, dst = pycompat.mkstemp()
533 os.close(fd)
534 os.close(fd)
534 files.append(dst)
535 files.append(dst)
535 util.copyfiles(src, dst, hardlink=True)
536 util.copyfiles(src, dst, hardlink=True)
536 return dst
537 return dst
537
538
538 yield copy
539 yield copy
539 finally:
540 finally:
540 for tmp in files:
541 for tmp in files:
541 util.tryunlink(tmp)
542 util.tryunlink(tmp)
542
543
543
544
544 def _makemap(repo):
545 def _makemap(repo):
545 """make a (src -> vfs) map for the repo"""
546 """make a (src -> vfs) map for the repo"""
546 vfsmap = {
547 vfsmap = {
547 _srcstore: repo.svfs,
548 _srcstore: repo.svfs,
548 _srccache: repo.cachevfs,
549 _srccache: repo.cachevfs,
549 }
550 }
550 # we keep repo.vfs out of the on purpose, ther are too many danger there
551 # we keep repo.vfs out of the on purpose, ther are too many danger there
551 # (eg: .hg/hgrc)
552 # (eg: .hg/hgrc)
552 assert repo.vfs not in vfsmap.values()
553 assert repo.vfs not in vfsmap.values()
553
554
554 return vfsmap
555 return vfsmap
555
556
556
557
557 def _emit2(repo, entries, totalfilesize):
558 def _emit2(repo, entries, totalfilesize):
558 """actually emit the stream bundle"""
559 """actually emit the stream bundle"""
559 vfsmap = _makemap(repo)
560 vfsmap = _makemap(repo)
560 progress = repo.ui.makeprogress(
561 progress = repo.ui.makeprogress(
561 _(b'bundle'), total=totalfilesize, unit=_(b'bytes')
562 _(b'bundle'), total=totalfilesize, unit=_(b'bytes')
562 )
563 )
563 progress.update(0)
564 progress.update(0)
564 with maketempcopies() as copy, progress:
565 with maketempcopies() as copy, progress:
565 # copy is delayed until we are in the try
566 # copy is delayed until we are in the try
566 entries = [_filterfull(e, copy, vfsmap) for e in entries]
567 entries = [_filterfull(e, copy, vfsmap) for e in entries]
567 yield None # this release the lock on the repository
568 yield None # this release the lock on the repository
568 seen = 0
569 seen = 0
569
570
570 for src, name, ftype, data in entries:
571 for src, name, ftype, data in entries:
571 vfs = vfsmap[src]
572 vfs = vfsmap[src]
572 yield src
573 yield src
573 yield util.uvarintencode(len(name))
574 yield util.uvarintencode(len(name))
574 if ftype == _fileappend:
575 if ftype == _fileappend:
575 fp = vfs(name)
576 fp = vfs(name)
576 size = data
577 size = data
577 elif ftype == _filefull:
578 elif ftype == _filefull:
578 fp = open(data, b'rb')
579 fp = open(data, b'rb')
579 size = util.fstat(fp).st_size
580 size = util.fstat(fp).st_size
580 try:
581 try:
581 yield util.uvarintencode(size)
582 yield util.uvarintencode(size)
582 yield name
583 yield name
583 if size <= 65536:
584 if size <= 65536:
584 chunks = (fp.read(size),)
585 chunks = (fp.read(size),)
585 else:
586 else:
586 chunks = util.filechunkiter(fp, limit=size)
587 chunks = util.filechunkiter(fp, limit=size)
587 for chunk in chunks:
588 for chunk in chunks:
588 seen += len(chunk)
589 seen += len(chunk)
589 progress.update(seen)
590 progress.update(seen)
590 yield chunk
591 yield chunk
591 finally:
592 finally:
592 fp.close()
593 fp.close()
593
594
594
595
595 def generatev2(repo, includes, excludes, includeobsmarkers):
596 def generatev2(repo, includes, excludes, includeobsmarkers):
596 """Emit content for version 2 of a streaming clone.
597 """Emit content for version 2 of a streaming clone.
597
598
598 the data stream consists the following entries:
599 the data stream consists the following entries:
599 1) A char representing the file destination (eg: store or cache)
600 1) A char representing the file destination (eg: store or cache)
600 2) A varint containing the length of the filename
601 2) A varint containing the length of the filename
601 3) A varint containing the length of file data
602 3) A varint containing the length of file data
602 4) N bytes containing the filename (the internal, store-agnostic form)
603 4) N bytes containing the filename (the internal, store-agnostic form)
603 5) N bytes containing the file data
604 5) N bytes containing the file data
604
605
605 Returns a 3-tuple of (file count, file size, data iterator).
606 Returns a 3-tuple of (file count, file size, data iterator).
606 """
607 """
607
608
608 with repo.lock():
609 with repo.lock():
609
610
610 entries = []
611 entries = []
611 totalfilesize = 0
612 totalfilesize = 0
612
613
613 matcher = None
614 matcher = None
614 if includes or excludes:
615 if includes or excludes:
615 matcher = narrowspec.match(repo.root, includes, excludes)
616 matcher = narrowspec.match(repo.root, includes, excludes)
616
617
617 repo.ui.debug(b'scanning\n')
618 repo.ui.debug(b'scanning\n')
618 for name, ename, size in _walkstreamfiles(repo, matcher):
619 for name, ename, size in _walkstreamfiles(repo, matcher):
619 if size:
620 if size:
620 entries.append((_srcstore, name, _fileappend, size))
621 entries.append((_srcstore, name, _fileappend, size))
621 totalfilesize += size
622 totalfilesize += size
622 for name in _walkstreamfullstorefiles(repo):
623 for name in _walkstreamfullstorefiles(repo):
623 if repo.svfs.exists(name):
624 if repo.svfs.exists(name):
624 totalfilesize += repo.svfs.lstat(name).st_size
625 totalfilesize += repo.svfs.lstat(name).st_size
625 entries.append((_srcstore, name, _filefull, None))
626 entries.append((_srcstore, name, _filefull, None))
626 if includeobsmarkers and repo.svfs.exists(b'obsstore'):
627 if includeobsmarkers and repo.svfs.exists(b'obsstore'):
627 totalfilesize += repo.svfs.lstat(b'obsstore').st_size
628 totalfilesize += repo.svfs.lstat(b'obsstore').st_size
628 entries.append((_srcstore, b'obsstore', _filefull, None))
629 entries.append((_srcstore, b'obsstore', _filefull, None))
629 for name in cacheutil.cachetocopy(repo):
630 for name in cacheutil.cachetocopy(repo):
630 if repo.cachevfs.exists(name):
631 if repo.cachevfs.exists(name):
631 totalfilesize += repo.cachevfs.lstat(name).st_size
632 totalfilesize += repo.cachevfs.lstat(name).st_size
632 entries.append((_srccache, name, _filefull, None))
633 entries.append((_srccache, name, _filefull, None))
633
634
634 chunks = _emit2(repo, entries, totalfilesize)
635 chunks = _emit2(repo, entries, totalfilesize)
635 first = next(chunks)
636 first = next(chunks)
636 assert first is None
637 assert first is None
637
638
638 return len(entries), totalfilesize, chunks
639 return len(entries), totalfilesize, chunks
639
640
640
641
641 @contextlib.contextmanager
642 @contextlib.contextmanager
642 def nested(*ctxs):
643 def nested(*ctxs):
643 this = ctxs[0]
644 this = ctxs[0]
644 rest = ctxs[1:]
645 rest = ctxs[1:]
645 with this:
646 with this:
646 if rest:
647 if rest:
647 with nested(*rest):
648 with nested(*rest):
648 yield
649 yield
649 else:
650 else:
650 yield
651 yield
651
652
652
653
653 def consumev2(repo, fp, filecount, filesize):
654 def consumev2(repo, fp, filecount, filesize):
654 """Apply the contents from a version 2 streaming clone.
655 """Apply the contents from a version 2 streaming clone.
655
656
656 Data is read from an object that only needs to provide a ``read(size)``
657 Data is read from an object that only needs to provide a ``read(size)``
657 method.
658 method.
658 """
659 """
659 with repo.lock():
660 with repo.lock():
660 repo.ui.status(
661 repo.ui.status(
661 _(b'%d files to transfer, %s of data\n')
662 _(b'%d files to transfer, %s of data\n')
662 % (filecount, util.bytecount(filesize))
663 % (filecount, util.bytecount(filesize))
663 )
664 )
664
665
665 start = util.timer()
666 start = util.timer()
666 progress = repo.ui.makeprogress(
667 progress = repo.ui.makeprogress(
667 _(b'clone'), total=filesize, unit=_(b'bytes')
668 _(b'clone'), total=filesize, unit=_(b'bytes')
668 )
669 )
669 progress.update(0)
670 progress.update(0)
670
671
671 vfsmap = _makemap(repo)
672 vfsmap = _makemap(repo)
672
673
673 with repo.transaction(b'clone'):
674 with repo.transaction(b'clone'):
674 ctxs = (vfs.backgroundclosing(repo.ui) for vfs in vfsmap.values())
675 ctxs = (vfs.backgroundclosing(repo.ui) for vfs in vfsmap.values())
675 with nested(*ctxs):
676 with nested(*ctxs):
676 for i in range(filecount):
677 for i in range(filecount):
677 src = util.readexactly(fp, 1)
678 src = util.readexactly(fp, 1)
678 vfs = vfsmap[src]
679 vfs = vfsmap[src]
679 namelen = util.uvarintdecodestream(fp)
680 namelen = util.uvarintdecodestream(fp)
680 datalen = util.uvarintdecodestream(fp)
681 datalen = util.uvarintdecodestream(fp)
681
682
682 name = util.readexactly(fp, namelen)
683 name = util.readexactly(fp, namelen)
683
684
684 if repo.ui.debugflag:
685 if repo.ui.debugflag:
685 repo.ui.debug(
686 repo.ui.debug(
686 b'adding [%s] %s (%s)\n'
687 b'adding [%s] %s (%s)\n'
687 % (src, name, util.bytecount(datalen))
688 % (src, name, util.bytecount(datalen))
688 )
689 )
689
690
690 with vfs(name, b'w') as ofp:
691 with vfs(name, b'w') as ofp:
691 for chunk in util.filechunkiter(fp, limit=datalen):
692 for chunk in util.filechunkiter(fp, limit=datalen):
692 progress.increment(step=len(chunk))
693 progress.increment(step=len(chunk))
693 ofp.write(chunk)
694 ofp.write(chunk)
694
695
695 # force @filecache properties to be reloaded from
696 # force @filecache properties to be reloaded from
696 # streamclone-ed file at next access
697 # streamclone-ed file at next access
697 repo.invalidate(clearfilecache=True)
698 repo.invalidate(clearfilecache=True)
698
699
699 elapsed = util.timer() - start
700 elapsed = util.timer() - start
700 if elapsed <= 0:
701 if elapsed <= 0:
701 elapsed = 0.001
702 elapsed = 0.001
702 repo.ui.status(
703 repo.ui.status(
703 _(b'transferred %s in %.1f seconds (%s/sec)\n')
704 _(b'transferred %s in %.1f seconds (%s/sec)\n')
704 % (
705 % (
705 util.bytecount(progress.pos),
706 util.bytecount(progress.pos),
706 elapsed,
707 elapsed,
707 util.bytecount(progress.pos / elapsed),
708 util.bytecount(progress.pos / elapsed),
708 )
709 )
709 )
710 )
710 progress.complete()
711 progress.complete()
711
712
712
713
713 def applybundlev2(repo, fp, filecount, filesize, requirements):
714 def applybundlev2(repo, fp, filecount, filesize, requirements):
714 from . import localrepo
715 from . import localrepo
715
716
716 missingreqs = [r for r in requirements if r not in repo.supported]
717 missingreqs = [r for r in requirements if r not in repo.supported]
717 if missingreqs:
718 if missingreqs:
718 raise error.Abort(
719 raise error.Abort(
719 _(b'unable to apply stream clone: unsupported format: %s')
720 _(b'unable to apply stream clone: unsupported format: %s')
720 % b', '.join(sorted(missingreqs))
721 % b', '.join(sorted(missingreqs))
721 )
722 )
722
723
723 consumev2(repo, fp, filecount, filesize)
724 consumev2(repo, fp, filecount, filesize)
724
725
725 # new requirements = old non-format requirements +
726 # new requirements = old non-format requirements +
726 # new format-related remote requirements
727 # new format-related remote requirements
727 # requirements from the streamed-in repository
728 # requirements from the streamed-in repository
728 repo.requirements = set(requirements) | (
729 repo.requirements = set(requirements) | (
729 repo.requirements - repo.supportedformats
730 repo.requirements - repo.supportedformats
730 )
731 )
731 repo.svfs.options = localrepo.resolvestorevfsoptions(
732 repo.svfs.options = localrepo.resolvestorevfsoptions(
732 repo.ui, repo.requirements, repo.features
733 repo.ui, repo.requirements, repo.features
733 )
734 )
734 scmutil.writereporequirements(repo)
735 scmutil.writereporequirements(repo)
@@ -1,1011 +1,1011 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from ..i18n import _
10 from ..i18n import _
11 from .. import (
11 from .. import (
12 error,
12 error,
13 localrepo,
13 localrepo,
14 requirements,
14 requirements,
15 revlog,
15 revlog,
16 util,
16 util,
17 )
17 )
18
18
19 from ..utils import compression
19 from ..utils import compression
20
20
21 # list of requirements that request a clone of all revlog if added/removed
21 # list of requirements that request a clone of all revlog if added/removed
22 RECLONES_REQUIREMENTS = {
22 RECLONES_REQUIREMENTS = {
23 b'generaldelta',
23 b'generaldelta',
24 requirements.SPARSEREVLOG_REQUIREMENT,
24 requirements.SPARSEREVLOG_REQUIREMENT,
25 }
25 }
26
26
27
27
28 def preservedrequirements(repo):
28 def preservedrequirements(repo):
29 return set()
29 return set()
30
30
31
31
32 FORMAT_VARIANT = b'deficiency'
32 FORMAT_VARIANT = b'deficiency'
33 OPTIMISATION = b'optimization'
33 OPTIMISATION = b'optimization'
34
34
35
35
36 class improvement(object):
36 class improvement(object):
37 """Represents an improvement that can be made as part of an upgrade.
37 """Represents an improvement that can be made as part of an upgrade.
38
38
39 The following attributes are defined on each instance:
39 The following attributes are defined on each instance:
40
40
41 name
41 name
42 Machine-readable string uniquely identifying this improvement. It
42 Machine-readable string uniquely identifying this improvement. It
43 will be mapped to an action later in the upgrade process.
43 will be mapped to an action later in the upgrade process.
44
44
45 type
45 type
46 Either ``FORMAT_VARIANT`` or ``OPTIMISATION``.
46 Either ``FORMAT_VARIANT`` or ``OPTIMISATION``.
47 A format variant is where we change the storage format. Not all format
47 A format variant is where we change the storage format. Not all format
48 variant changes are an obvious problem.
48 variant changes are an obvious problem.
49 An optimization is an action (sometimes optional) that
49 An optimization is an action (sometimes optional) that
50 can be taken to further improve the state of the repository.
50 can be taken to further improve the state of the repository.
51
51
52 description
52 description
53 Message intended for humans explaining the improvement in more detail,
53 Message intended for humans explaining the improvement in more detail,
54 including the implications of it. For ``FORMAT_VARIANT`` types, should be
54 including the implications of it. For ``FORMAT_VARIANT`` types, should be
55 worded in the present tense. For ``OPTIMISATION`` types, should be
55 worded in the present tense. For ``OPTIMISATION`` types, should be
56 worded in the future tense.
56 worded in the future tense.
57
57
58 upgrademessage
58 upgrademessage
59 Message intended for humans explaining what an upgrade addressing this
59 Message intended for humans explaining what an upgrade addressing this
60 issue will do. Should be worded in the future tense.
60 issue will do. Should be worded in the future tense.
61
61
62 postupgrademessage
62 postupgrademessage
63 Message intended for humans which will be shown post an upgrade
63 Message intended for humans which will be shown post an upgrade
64 operation when the improvement will be added
64 operation when the improvement will be added
65
65
66 postdowngrademessage
66 postdowngrademessage
67 Message intended for humans which will be shown post an upgrade
67 Message intended for humans which will be shown post an upgrade
68 operation in which this improvement was removed
68 operation in which this improvement was removed
69
69
70 touches_filelogs (bool)
70 touches_filelogs (bool)
71 Whether this improvement touches filelogs
71 Whether this improvement touches filelogs
72
72
73 touches_manifests (bool)
73 touches_manifests (bool)
74 Whether this improvement touches manifests
74 Whether this improvement touches manifests
75
75
76 touches_changelog (bool)
76 touches_changelog (bool)
77 Whether this improvement touches changelog
77 Whether this improvement touches changelog
78
78
79 touches_requirements (bool)
79 touches_requirements (bool)
80 Whether this improvement changes repository requirements
80 Whether this improvement changes repository requirements
81 """
81 """
82
82
83 def __init__(self, name, type, description, upgrademessage):
83 def __init__(self, name, type, description, upgrademessage):
84 self.name = name
84 self.name = name
85 self.type = type
85 self.type = type
86 self.description = description
86 self.description = description
87 self.upgrademessage = upgrademessage
87 self.upgrademessage = upgrademessage
88 self.postupgrademessage = None
88 self.postupgrademessage = None
89 self.postdowngrademessage = None
89 self.postdowngrademessage = None
90 # By default for now, we assume every improvement touches
90 # By default for now, we assume every improvement touches
91 # all the things
91 # all the things
92 self.touches_filelogs = True
92 self.touches_filelogs = True
93 self.touches_manifests = True
93 self.touches_manifests = True
94 self.touches_changelog = True
94 self.touches_changelog = True
95 self.touches_requirements = True
95 self.touches_requirements = True
96
96
97 def __eq__(self, other):
97 def __eq__(self, other):
98 if not isinstance(other, improvement):
98 if not isinstance(other, improvement):
99 # This is what python tell use to do
99 # This is what python tell use to do
100 return NotImplemented
100 return NotImplemented
101 return self.name == other.name
101 return self.name == other.name
102
102
103 def __ne__(self, other):
103 def __ne__(self, other):
104 return not (self == other)
104 return not (self == other)
105
105
106 def __hash__(self):
106 def __hash__(self):
107 return hash(self.name)
107 return hash(self.name)
108
108
109
109
110 allformatvariant = []
110 allformatvariant = []
111
111
112
112
113 def registerformatvariant(cls):
113 def registerformatvariant(cls):
114 allformatvariant.append(cls)
114 allformatvariant.append(cls)
115 return cls
115 return cls
116
116
117
117
118 class formatvariant(improvement):
118 class formatvariant(improvement):
119 """an improvement subclass dedicated to repository format"""
119 """an improvement subclass dedicated to repository format"""
120
120
121 type = FORMAT_VARIANT
121 type = FORMAT_VARIANT
122 ### The following attributes should be defined for each class:
122 ### The following attributes should be defined for each class:
123
123
124 # machine-readable string uniquely identifying this improvement. it will be
124 # machine-readable string uniquely identifying this improvement. it will be
125 # mapped to an action later in the upgrade process.
125 # mapped to an action later in the upgrade process.
126 name = None
126 name = None
127
127
128 # message intended for humans explaining the improvement in more detail,
128 # message intended for humans explaining the improvement in more detail,
129 # including the implications of it ``FORMAT_VARIANT`` types, should be
129 # including the implications of it ``FORMAT_VARIANT`` types, should be
130 # worded
130 # worded
131 # in the present tense.
131 # in the present tense.
132 description = None
132 description = None
133
133
134 # message intended for humans explaining what an upgrade addressing this
134 # message intended for humans explaining what an upgrade addressing this
135 # issue will do. should be worded in the future tense.
135 # issue will do. should be worded in the future tense.
136 upgrademessage = None
136 upgrademessage = None
137
137
138 # value of current Mercurial default for new repository
138 # value of current Mercurial default for new repository
139 default = None
139 default = None
140
140
141 # Message intended for humans which will be shown post an upgrade
141 # Message intended for humans which will be shown post an upgrade
142 # operation when the improvement will be added
142 # operation when the improvement will be added
143 postupgrademessage = None
143 postupgrademessage = None
144
144
145 # Message intended for humans which will be shown post an upgrade
145 # Message intended for humans which will be shown post an upgrade
146 # operation in which this improvement was removed
146 # operation in which this improvement was removed
147 postdowngrademessage = None
147 postdowngrademessage = None
148
148
149 # By default for now, we assume every improvement touches all the things
149 # By default for now, we assume every improvement touches all the things
150 touches_filelogs = True
150 touches_filelogs = True
151 touches_manifests = True
151 touches_manifests = True
152 touches_changelog = True
152 touches_changelog = True
153 touches_requirements = True
153 touches_requirements = True
154
154
155 def __init__(self):
155 def __init__(self):
156 raise NotImplementedError()
156 raise NotImplementedError()
157
157
158 @staticmethod
158 @staticmethod
159 def fromrepo(repo):
159 def fromrepo(repo):
160 """current value of the variant in the repository"""
160 """current value of the variant in the repository"""
161 raise NotImplementedError()
161 raise NotImplementedError()
162
162
163 @staticmethod
163 @staticmethod
164 def fromconfig(repo):
164 def fromconfig(repo):
165 """current value of the variant in the configuration"""
165 """current value of the variant in the configuration"""
166 raise NotImplementedError()
166 raise NotImplementedError()
167
167
168
168
169 class requirementformatvariant(formatvariant):
169 class requirementformatvariant(formatvariant):
170 """formatvariant based on a 'requirement' name.
170 """formatvariant based on a 'requirement' name.
171
171
172 Many format variant are controlled by a 'requirement'. We define a small
172 Many format variant are controlled by a 'requirement'. We define a small
173 subclass to factor the code.
173 subclass to factor the code.
174 """
174 """
175
175
176 # the requirement that control this format variant
176 # the requirement that control this format variant
177 _requirement = None
177 _requirement = None
178
178
179 @staticmethod
179 @staticmethod
180 def _newreporequirements(ui):
180 def _newreporequirements(ui):
181 return localrepo.newreporequirements(
181 return localrepo.newreporequirements(
182 ui, localrepo.defaultcreateopts(ui)
182 ui, localrepo.defaultcreateopts(ui)
183 )
183 )
184
184
185 @classmethod
185 @classmethod
186 def fromrepo(cls, repo):
186 def fromrepo(cls, repo):
187 assert cls._requirement is not None
187 assert cls._requirement is not None
188 return cls._requirement in repo.requirements
188 return cls._requirement in repo.requirements
189
189
190 @classmethod
190 @classmethod
191 def fromconfig(cls, repo):
191 def fromconfig(cls, repo):
192 assert cls._requirement is not None
192 assert cls._requirement is not None
193 return cls._requirement in cls._newreporequirements(repo.ui)
193 return cls._requirement in cls._newreporequirements(repo.ui)
194
194
195
195
196 @registerformatvariant
196 @registerformatvariant
197 class fncache(requirementformatvariant):
197 class fncache(requirementformatvariant):
198 name = b'fncache'
198 name = b'fncache'
199
199
200 _requirement = b'fncache'
200 _requirement = b'fncache'
201
201
202 default = True
202 default = True
203
203
204 description = _(
204 description = _(
205 b'long and reserved filenames may not work correctly; '
205 b'long and reserved filenames may not work correctly; '
206 b'repository performance is sub-optimal'
206 b'repository performance is sub-optimal'
207 )
207 )
208
208
209 upgrademessage = _(
209 upgrademessage = _(
210 b'repository will be more resilient to storing '
210 b'repository will be more resilient to storing '
211 b'certain paths and performance of certain '
211 b'certain paths and performance of certain '
212 b'operations should be improved'
212 b'operations should be improved'
213 )
213 )
214
214
215
215
216 @registerformatvariant
216 @registerformatvariant
217 class dotencode(requirementformatvariant):
217 class dotencode(requirementformatvariant):
218 name = b'dotencode'
218 name = b'dotencode'
219
219
220 _requirement = b'dotencode'
220 _requirement = b'dotencode'
221
221
222 default = True
222 default = True
223
223
224 description = _(
224 description = _(
225 b'storage of filenames beginning with a period or '
225 b'storage of filenames beginning with a period or '
226 b'space may not work correctly'
226 b'space may not work correctly'
227 )
227 )
228
228
229 upgrademessage = _(
229 upgrademessage = _(
230 b'repository will be better able to store files '
230 b'repository will be better able to store files '
231 b'beginning with a space or period'
231 b'beginning with a space or period'
232 )
232 )
233
233
234
234
235 @registerformatvariant
235 @registerformatvariant
236 class generaldelta(requirementformatvariant):
236 class generaldelta(requirementformatvariant):
237 name = b'generaldelta'
237 name = b'generaldelta'
238
238
239 _requirement = b'generaldelta'
239 _requirement = b'generaldelta'
240
240
241 default = True
241 default = True
242
242
243 description = _(
243 description = _(
244 b'deltas within internal storage are unable to '
244 b'deltas within internal storage are unable to '
245 b'choose optimal revisions; repository is larger and '
245 b'choose optimal revisions; repository is larger and '
246 b'slower than it could be; interaction with other '
246 b'slower than it could be; interaction with other '
247 b'repositories may require extra network and CPU '
247 b'repositories may require extra network and CPU '
248 b'resources, making "hg push" and "hg pull" slower'
248 b'resources, making "hg push" and "hg pull" slower'
249 )
249 )
250
250
251 upgrademessage = _(
251 upgrademessage = _(
252 b'repository storage will be able to create '
252 b'repository storage will be able to create '
253 b'optimal deltas; new repository data will be '
253 b'optimal deltas; new repository data will be '
254 b'smaller and read times should decrease; '
254 b'smaller and read times should decrease; '
255 b'interacting with other repositories using this '
255 b'interacting with other repositories using this '
256 b'storage model should require less network and '
256 b'storage model should require less network and '
257 b'CPU resources, making "hg push" and "hg pull" '
257 b'CPU resources, making "hg push" and "hg pull" '
258 b'faster'
258 b'faster'
259 )
259 )
260
260
261
261
262 @registerformatvariant
262 @registerformatvariant
263 class sharesafe(requirementformatvariant):
263 class sharesafe(requirementformatvariant):
264 name = b'share-safe'
264 name = b'share-safe'
265 _requirement = requirements.SHARESAFE_REQUIREMENT
265 _requirement = requirements.SHARESAFE_REQUIREMENT
266
266
267 default = False
267 default = False
268
268
269 description = _(
269 description = _(
270 b'old shared repositories do not share source repository '
270 b'old shared repositories do not share source repository '
271 b'requirements and config. This leads to various problems '
271 b'requirements and config. This leads to various problems '
272 b'when the source repository format is upgraded or some new '
272 b'when the source repository format is upgraded or some new '
273 b'extensions are enabled.'
273 b'extensions are enabled.'
274 )
274 )
275
275
276 upgrademessage = _(
276 upgrademessage = _(
277 b'Upgrades a repository to share-safe format so that future '
277 b'Upgrades a repository to share-safe format so that future '
278 b'shares of this repository share its requirements and configs.'
278 b'shares of this repository share its requirements and configs.'
279 )
279 )
280
280
281 postdowngrademessage = _(
281 postdowngrademessage = _(
282 b'repository downgraded to not use share safe mode, '
282 b'repository downgraded to not use share safe mode, '
283 b'existing shares will not work and needs to'
283 b'existing shares will not work and needs to'
284 b' be reshared.'
284 b' be reshared.'
285 )
285 )
286
286
287 postupgrademessage = _(
287 postupgrademessage = _(
288 b'repository upgraded to share safe mode, existing'
288 b'repository upgraded to share safe mode, existing'
289 b' shares will still work in old non-safe mode. '
289 b' shares will still work in old non-safe mode. '
290 b'Re-share existing shares to use them in safe mode'
290 b'Re-share existing shares to use them in safe mode'
291 b' New shares will be created in safe mode.'
291 b' New shares will be created in safe mode.'
292 )
292 )
293
293
294 # upgrade only needs to change the requirements
294 # upgrade only needs to change the requirements
295 touches_filelogs = False
295 touches_filelogs = False
296 touches_manifests = False
296 touches_manifests = False
297 touches_changelog = False
297 touches_changelog = False
298 touches_requirements = True
298 touches_requirements = True
299
299
300
300
301 @registerformatvariant
301 @registerformatvariant
302 class sparserevlog(requirementformatvariant):
302 class sparserevlog(requirementformatvariant):
303 name = b'sparserevlog'
303 name = b'sparserevlog'
304
304
305 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
305 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
306
306
307 default = True
307 default = True
308
308
309 description = _(
309 description = _(
310 b'in order to limit disk reading and memory usage on older '
310 b'in order to limit disk reading and memory usage on older '
311 b'version, the span of a delta chain from its root to its '
311 b'version, the span of a delta chain from its root to its '
312 b'end is limited, whatever the relevant data in this span. '
312 b'end is limited, whatever the relevant data in this span. '
313 b'This can severly limit Mercurial ability to build good '
313 b'This can severly limit Mercurial ability to build good '
314 b'chain of delta resulting is much more storage space being '
314 b'chain of delta resulting is much more storage space being '
315 b'taken and limit reusability of on disk delta during '
315 b'taken and limit reusability of on disk delta during '
316 b'exchange.'
316 b'exchange.'
317 )
317 )
318
318
319 upgrademessage = _(
319 upgrademessage = _(
320 b'Revlog supports delta chain with more unused data '
320 b'Revlog supports delta chain with more unused data '
321 b'between payload. These gaps will be skipped at read '
321 b'between payload. These gaps will be skipped at read '
322 b'time. This allows for better delta chains, making a '
322 b'time. This allows for better delta chains, making a '
323 b'better compression and faster exchange with server.'
323 b'better compression and faster exchange with server.'
324 )
324 )
325
325
326
326
327 @registerformatvariant
327 @registerformatvariant
328 class sidedata(requirementformatvariant):
328 class sidedata(requirementformatvariant):
329 name = b'sidedata'
329 name = b'sidedata'
330
330
331 _requirement = requirements.SIDEDATA_REQUIREMENT
331 _requirement = requirements.SIDEDATA_REQUIREMENT
332
332
333 default = False
333 default = False
334
334
335 description = _(
335 description = _(
336 b'Allows storage of extra data alongside a revision, '
336 b'Allows storage of extra data alongside a revision, '
337 b'unlocking various caching options.'
337 b'unlocking various caching options.'
338 )
338 )
339
339
340 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
340 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
341
341
342
342
343 @registerformatvariant
343 @registerformatvariant
344 class persistentnodemap(requirementformatvariant):
344 class persistentnodemap(requirementformatvariant):
345 name = b'persistent-nodemap'
345 name = b'persistent-nodemap'
346
346
347 _requirement = requirements.NODEMAP_REQUIREMENT
347 _requirement = requirements.NODEMAP_REQUIREMENT
348
348
349 default = False
349 default = False
350
350
351 description = _(
351 description = _(
352 b'persist the node -> rev mapping on disk to speedup lookup'
352 b'persist the node -> rev mapping on disk to speedup lookup'
353 )
353 )
354
354
355 upgrademessage = _(b'Speedup revision lookup by node id.')
355 upgrademessage = _(b'Speedup revision lookup by node id.')
356
356
357
357
358 @registerformatvariant
358 @registerformatvariant
359 class copiessdc(requirementformatvariant):
359 class copiessdc(requirementformatvariant):
360 name = b'copies-sdc'
360 name = b'copies-sdc'
361
361
362 _requirement = requirements.COPIESSDC_REQUIREMENT
362 _requirement = requirements.COPIESSDC_REQUIREMENT
363
363
364 default = False
364 default = False
365
365
366 description = _(b'Stores copies information alongside changesets.')
366 description = _(b'Stores copies information alongside changesets.')
367
367
368 upgrademessage = _(
368 upgrademessage = _(
369 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
369 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
370 )
370 )
371
371
372
372
373 @registerformatvariant
373 @registerformatvariant
374 class removecldeltachain(formatvariant):
374 class removecldeltachain(formatvariant):
375 name = b'plain-cl-delta'
375 name = b'plain-cl-delta'
376
376
377 default = True
377 default = True
378
378
379 description = _(
379 description = _(
380 b'changelog storage is using deltas instead of '
380 b'changelog storage is using deltas instead of '
381 b'raw entries; changelog reading and any '
381 b'raw entries; changelog reading and any '
382 b'operation relying on changelog data are slower '
382 b'operation relying on changelog data are slower '
383 b'than they could be'
383 b'than they could be'
384 )
384 )
385
385
386 upgrademessage = _(
386 upgrademessage = _(
387 b'changelog storage will be reformated to '
387 b'changelog storage will be reformated to '
388 b'store raw entries; changelog reading will be '
388 b'store raw entries; changelog reading will be '
389 b'faster; changelog size may be reduced'
389 b'faster; changelog size may be reduced'
390 )
390 )
391
391
392 @staticmethod
392 @staticmethod
393 def fromrepo(repo):
393 def fromrepo(repo):
394 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
394 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
395 # changelogs with deltas.
395 # changelogs with deltas.
396 cl = repo.changelog
396 cl = repo.changelog
397 chainbase = cl.chainbase
397 chainbase = cl.chainbase
398 return all(rev == chainbase(rev) for rev in cl)
398 return all(rev == chainbase(rev) for rev in cl)
399
399
400 @staticmethod
400 @staticmethod
401 def fromconfig(repo):
401 def fromconfig(repo):
402 return True
402 return True
403
403
404
404
405 @registerformatvariant
405 @registerformatvariant
406 class compressionengine(formatvariant):
406 class compressionengine(formatvariant):
407 name = b'compression'
407 name = b'compression'
408 default = b'zlib'
408 default = b'zlib'
409
409
410 description = _(
410 description = _(
411 b'Compresion algorithm used to compress data. '
411 b'Compresion algorithm used to compress data. '
412 b'Some engine are faster than other'
412 b'Some engine are faster than other'
413 )
413 )
414
414
415 upgrademessage = _(
415 upgrademessage = _(
416 b'revlog content will be recompressed with the new algorithm.'
416 b'revlog content will be recompressed with the new algorithm.'
417 )
417 )
418
418
419 @classmethod
419 @classmethod
420 def fromrepo(cls, repo):
420 def fromrepo(cls, repo):
421 # we allow multiple compression engine requirement to co-exist because
421 # we allow multiple compression engine requirement to co-exist because
422 # strickly speaking, revlog seems to support mixed compression style.
422 # strickly speaking, revlog seems to support mixed compression style.
423 #
423 #
424 # The compression used for new entries will be "the last one"
424 # The compression used for new entries will be "the last one"
425 compression = b'zlib'
425 compression = b'zlib'
426 for req in repo.requirements:
426 for req in repo.requirements:
427 prefix = req.startswith
427 prefix = req.startswith
428 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
428 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
429 compression = req.split(b'-', 2)[2]
429 compression = req.split(b'-', 2)[2]
430 return compression
430 return compression
431
431
432 @classmethod
432 @classmethod
433 def fromconfig(cls, repo):
433 def fromconfig(cls, repo):
434 compengines = repo.ui.configlist(b'format', b'revlog-compression')
434 compengines = repo.ui.configlist(b'format', b'revlog-compression')
435 # return the first valid value as the selection code would do
435 # return the first valid value as the selection code would do
436 for comp in compengines:
436 for comp in compengines:
437 if comp in util.compengines:
437 if comp in util.compengines:
438 return comp
438 return comp
439
439
440 # no valide compression found lets display it all for clarity
440 # no valide compression found lets display it all for clarity
441 return b','.join(compengines)
441 return b','.join(compengines)
442
442
443
443
444 @registerformatvariant
444 @registerformatvariant
445 class compressionlevel(formatvariant):
445 class compressionlevel(formatvariant):
446 name = b'compression-level'
446 name = b'compression-level'
447 default = b'default'
447 default = b'default'
448
448
449 description = _(b'compression level')
449 description = _(b'compression level')
450
450
451 upgrademessage = _(b'revlog content will be recompressed')
451 upgrademessage = _(b'revlog content will be recompressed')
452
452
453 @classmethod
453 @classmethod
454 def fromrepo(cls, repo):
454 def fromrepo(cls, repo):
455 comp = compressionengine.fromrepo(repo)
455 comp = compressionengine.fromrepo(repo)
456 level = None
456 level = None
457 if comp == b'zlib':
457 if comp == b'zlib':
458 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
458 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
459 elif comp == b'zstd':
459 elif comp == b'zstd':
460 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
460 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
461 if level is None:
461 if level is None:
462 return b'default'
462 return b'default'
463 return bytes(level)
463 return bytes(level)
464
464
465 @classmethod
465 @classmethod
466 def fromconfig(cls, repo):
466 def fromconfig(cls, repo):
467 comp = compressionengine.fromconfig(repo)
467 comp = compressionengine.fromconfig(repo)
468 level = None
468 level = None
469 if comp == b'zlib':
469 if comp == b'zlib':
470 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
470 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
471 elif comp == b'zstd':
471 elif comp == b'zstd':
472 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
472 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
473 if level is None:
473 if level is None:
474 return b'default'
474 return b'default'
475 return bytes(level)
475 return bytes(level)
476
476
477
477
478 def find_format_upgrades(repo):
478 def find_format_upgrades(repo):
479 """returns a list of format upgrades which can be perform on the repo"""
479 """returns a list of format upgrades which can be perform on the repo"""
480 upgrades = []
480 upgrades = []
481
481
482 # We could detect lack of revlogv1 and store here, but they were added
482 # We could detect lack of revlogv1 and store here, but they were added
483 # in 0.9.2 and we don't support upgrading repos without these
483 # in 0.9.2 and we don't support upgrading repos without these
484 # requirements, so let's not bother.
484 # requirements, so let's not bother.
485
485
486 for fv in allformatvariant:
486 for fv in allformatvariant:
487 if not fv.fromrepo(repo):
487 if not fv.fromrepo(repo):
488 upgrades.append(fv)
488 upgrades.append(fv)
489
489
490 return upgrades
490 return upgrades
491
491
492
492
493 def find_format_downgrades(repo):
493 def find_format_downgrades(repo):
494 """returns a list of format downgrades which will be performed on the repo
494 """returns a list of format downgrades which will be performed on the repo
495 because of disabled config option for them"""
495 because of disabled config option for them"""
496
496
497 downgrades = []
497 downgrades = []
498
498
499 for fv in allformatvariant:
499 for fv in allformatvariant:
500 if fv.name == b'compression':
500 if fv.name == b'compression':
501 # If there is a compression change between repository
501 # If there is a compression change between repository
502 # and config, destination repository compression will change
502 # and config, destination repository compression will change
503 # and current compression will be removed.
503 # and current compression will be removed.
504 if fv.fromrepo(repo) != fv.fromconfig(repo):
504 if fv.fromrepo(repo) != fv.fromconfig(repo):
505 downgrades.append(fv)
505 downgrades.append(fv)
506 continue
506 continue
507 # format variant exist in repo but does not exist in new repository
507 # format variant exist in repo but does not exist in new repository
508 # config
508 # config
509 if fv.fromrepo(repo) and not fv.fromconfig(repo):
509 if fv.fromrepo(repo) and not fv.fromconfig(repo):
510 downgrades.append(fv)
510 downgrades.append(fv)
511
511
512 return downgrades
512 return downgrades
513
513
514
514
515 ALL_OPTIMISATIONS = []
515 ALL_OPTIMISATIONS = []
516
516
517
517
518 def register_optimization(obj):
518 def register_optimization(obj):
519 ALL_OPTIMISATIONS.append(obj)
519 ALL_OPTIMISATIONS.append(obj)
520 return obj
520 return obj
521
521
522
522
523 register_optimization(
523 register_optimization(
524 improvement(
524 improvement(
525 name=b're-delta-parent',
525 name=b're-delta-parent',
526 type=OPTIMISATION,
526 type=OPTIMISATION,
527 description=_(
527 description=_(
528 b'deltas within internal storage will be recalculated to '
528 b'deltas within internal storage will be recalculated to '
529 b'choose an optimal base revision where this was not '
529 b'choose an optimal base revision where this was not '
530 b'already done; the size of the repository may shrink and '
530 b'already done; the size of the repository may shrink and '
531 b'various operations may become faster; the first time '
531 b'various operations may become faster; the first time '
532 b'this optimization is performed could slow down upgrade '
532 b'this optimization is performed could slow down upgrade '
533 b'execution considerably; subsequent invocations should '
533 b'execution considerably; subsequent invocations should '
534 b'not run noticeably slower'
534 b'not run noticeably slower'
535 ),
535 ),
536 upgrademessage=_(
536 upgrademessage=_(
537 b'deltas within internal storage will choose a new '
537 b'deltas within internal storage will choose a new '
538 b'base revision if needed'
538 b'base revision if needed'
539 ),
539 ),
540 )
540 )
541 )
541 )
542
542
543 register_optimization(
543 register_optimization(
544 improvement(
544 improvement(
545 name=b're-delta-multibase',
545 name=b're-delta-multibase',
546 type=OPTIMISATION,
546 type=OPTIMISATION,
547 description=_(
547 description=_(
548 b'deltas within internal storage will be recalculated '
548 b'deltas within internal storage will be recalculated '
549 b'against multiple base revision and the smallest '
549 b'against multiple base revision and the smallest '
550 b'difference will be used; the size of the repository may '
550 b'difference will be used; the size of the repository may '
551 b'shrink significantly when there are many merges; this '
551 b'shrink significantly when there are many merges; this '
552 b'optimization will slow down execution in proportion to '
552 b'optimization will slow down execution in proportion to '
553 b'the number of merges in the repository and the amount '
553 b'the number of merges in the repository and the amount '
554 b'of files in the repository; this slow down should not '
554 b'of files in the repository; this slow down should not '
555 b'be significant unless there are tens of thousands of '
555 b'be significant unless there are tens of thousands of '
556 b'files and thousands of merges'
556 b'files and thousands of merges'
557 ),
557 ),
558 upgrademessage=_(
558 upgrademessage=_(
559 b'deltas within internal storage will choose an '
559 b'deltas within internal storage will choose an '
560 b'optimal delta by computing deltas against multiple '
560 b'optimal delta by computing deltas against multiple '
561 b'parents; may slow down execution time '
561 b'parents; may slow down execution time '
562 b'significantly'
562 b'significantly'
563 ),
563 ),
564 )
564 )
565 )
565 )
566
566
567 register_optimization(
567 register_optimization(
568 improvement(
568 improvement(
569 name=b're-delta-all',
569 name=b're-delta-all',
570 type=OPTIMISATION,
570 type=OPTIMISATION,
571 description=_(
571 description=_(
572 b'deltas within internal storage will always be '
572 b'deltas within internal storage will always be '
573 b'recalculated without reusing prior deltas; this will '
573 b'recalculated without reusing prior deltas; this will '
574 b'likely make execution run several times slower; this '
574 b'likely make execution run several times slower; this '
575 b'optimization is typically not needed'
575 b'optimization is typically not needed'
576 ),
576 ),
577 upgrademessage=_(
577 upgrademessage=_(
578 b'deltas within internal storage will be fully '
578 b'deltas within internal storage will be fully '
579 b'recomputed; this will likely drastically slow down '
579 b'recomputed; this will likely drastically slow down '
580 b'execution time'
580 b'execution time'
581 ),
581 ),
582 )
582 )
583 )
583 )
584
584
585 register_optimization(
585 register_optimization(
586 improvement(
586 improvement(
587 name=b're-delta-fulladd',
587 name=b're-delta-fulladd',
588 type=OPTIMISATION,
588 type=OPTIMISATION,
589 description=_(
589 description=_(
590 b'every revision will be re-added as if it was new '
590 b'every revision will be re-added as if it was new '
591 b'content. It will go through the full storage '
591 b'content. It will go through the full storage '
592 b'mechanism giving extensions a chance to process it '
592 b'mechanism giving extensions a chance to process it '
593 b'(eg. lfs). This is similar to "re-delta-all" but even '
593 b'(eg. lfs). This is similar to "re-delta-all" but even '
594 b'slower since more logic is involved.'
594 b'slower since more logic is involved.'
595 ),
595 ),
596 upgrademessage=_(
596 upgrademessage=_(
597 b'each revision will be added as new content to the '
597 b'each revision will be added as new content to the '
598 b'internal storage; this will likely drastically slow '
598 b'internal storage; this will likely drastically slow '
599 b'down execution time, but some extensions might need '
599 b'down execution time, but some extensions might need '
600 b'it'
600 b'it'
601 ),
601 ),
602 )
602 )
603 )
603 )
604
604
605
605
606 def findoptimizations(repo):
606 def findoptimizations(repo):
607 """Determine optimisation that could be used during upgrade"""
607 """Determine optimisation that could be used during upgrade"""
608 # These are unconditionally added. There is logic later that figures out
608 # These are unconditionally added. There is logic later that figures out
609 # which ones to apply.
609 # which ones to apply.
610 return list(ALL_OPTIMISATIONS)
610 return list(ALL_OPTIMISATIONS)
611
611
612
612
613 def determine_upgrade_actions(
613 def determine_upgrade_actions(
614 repo, format_upgrades, optimizations, sourcereqs, destreqs
614 repo, format_upgrades, optimizations, sourcereqs, destreqs
615 ):
615 ):
616 """Determine upgrade actions that will be performed.
616 """Determine upgrade actions that will be performed.
617
617
618 Given a list of improvements as returned by ``find_format_upgrades`` and
618 Given a list of improvements as returned by ``find_format_upgrades`` and
619 ``findoptimizations``, determine the list of upgrade actions that
619 ``findoptimizations``, determine the list of upgrade actions that
620 will be performed.
620 will be performed.
621
621
622 The role of this function is to filter improvements if needed, apply
622 The role of this function is to filter improvements if needed, apply
623 recommended optimizations from the improvements list that make sense,
623 recommended optimizations from the improvements list that make sense,
624 etc.
624 etc.
625
625
626 Returns a list of action names.
626 Returns a list of action names.
627 """
627 """
628 newactions = []
628 newactions = []
629
629
630 for d in format_upgrades:
630 for d in format_upgrades:
631 name = d._requirement
631 name = d._requirement
632
632
633 # If the action is a requirement that doesn't show up in the
633 # If the action is a requirement that doesn't show up in the
634 # destination requirements, prune the action.
634 # destination requirements, prune the action.
635 if name is not None and name not in destreqs:
635 if name is not None and name not in destreqs:
636 continue
636 continue
637
637
638 newactions.append(d)
638 newactions.append(d)
639
639
640 newactions.extend(o for o in sorted(optimizations) if o not in newactions)
640 newactions.extend(o for o in sorted(optimizations) if o not in newactions)
641
641
642 # FUTURE consider adding some optimizations here for certain transitions.
642 # FUTURE consider adding some optimizations here for certain transitions.
643 # e.g. adding generaldelta could schedule parent redeltas.
643 # e.g. adding generaldelta could schedule parent redeltas.
644
644
645 return newactions
645 return newactions
646
646
647
647
648 class UpgradeOperation(object):
648 class UpgradeOperation(object):
649 """represent the work to be done during an upgrade"""
649 """represent the work to be done during an upgrade"""
650
650
651 def __init__(
651 def __init__(
652 self,
652 self,
653 ui,
653 ui,
654 new_requirements,
654 new_requirements,
655 current_requirements,
655 current_requirements,
656 upgrade_actions,
656 upgrade_actions,
657 removed_actions,
657 removed_actions,
658 revlogs_to_process,
658 revlogs_to_process,
659 backup_store,
659 backup_store,
660 ):
660 ):
661 self.ui = ui
661 self.ui = ui
662 self.new_requirements = new_requirements
662 self.new_requirements = new_requirements
663 self.current_requirements = current_requirements
663 self.current_requirements = current_requirements
664 # list of upgrade actions the operation will perform
664 # list of upgrade actions the operation will perform
665 self.upgrade_actions = upgrade_actions
665 self.upgrade_actions = upgrade_actions
666 self._upgrade_actions_names = set([a.name for a in upgrade_actions])
666 self._upgrade_actions_names = set([a.name for a in upgrade_actions])
667 self.removed_actions = removed_actions
667 self.removed_actions = removed_actions
668 self.revlogs_to_process = revlogs_to_process
668 self.revlogs_to_process = revlogs_to_process
669 # requirements which will be added by the operation
669 # requirements which will be added by the operation
670 self._added_requirements = (
670 self._added_requirements = (
671 self.new_requirements - self.current_requirements
671 self.new_requirements - self.current_requirements
672 )
672 )
673 # requirements which will be removed by the operation
673 # requirements which will be removed by the operation
674 self._removed_requirements = (
674 self._removed_requirements = (
675 self.current_requirements - self.new_requirements
675 self.current_requirements - self.new_requirements
676 )
676 )
677 # requirements which will be preserved by the operation
677 # requirements which will be preserved by the operation
678 self._preserved_requirements = (
678 self._preserved_requirements = (
679 self.current_requirements & self.new_requirements
679 self.current_requirements & self.new_requirements
680 )
680 )
681 # optimizations which are not used and it's recommended that they
681 # optimizations which are not used and it's recommended that they
682 # should use them
682 # should use them
683 all_optimizations = findoptimizations(None)
683 all_optimizations = findoptimizations(None)
684 self.unused_optimizations = [
684 self.unused_optimizations = [
685 i for i in all_optimizations if i not in self.upgrade_actions
685 i for i in all_optimizations if i not in self.upgrade_actions
686 ]
686 ]
687
687
688 # delta reuse mode of this upgrade operation
688 # delta reuse mode of this upgrade operation
689 self.delta_reuse_mode = revlog.revlog.DELTAREUSEALWAYS
689 self.delta_reuse_mode = revlog.revlog.DELTAREUSEALWAYS
690 if b're-delta-all' in self._upgrade_actions_names:
690 if b're-delta-all' in self._upgrade_actions_names:
691 self.delta_reuse_mode = revlog.revlog.DELTAREUSENEVER
691 self.delta_reuse_mode = revlog.revlog.DELTAREUSENEVER
692 elif b're-delta-parent' in self._upgrade_actions_names:
692 elif b're-delta-parent' in self._upgrade_actions_names:
693 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
693 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
694 elif b're-delta-multibase' in self._upgrade_actions_names:
694 elif b're-delta-multibase' in self._upgrade_actions_names:
695 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
695 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
696 elif b're-delta-fulladd' in self._upgrade_actions_names:
696 elif b're-delta-fulladd' in self._upgrade_actions_names:
697 self.delta_reuse_mode = revlog.revlog.DELTAREUSEFULLADD
697 self.delta_reuse_mode = revlog.revlog.DELTAREUSEFULLADD
698
698
699 # should this operation force re-delta of both parents
699 # should this operation force re-delta of both parents
700 self.force_re_delta_both_parents = (
700 self.force_re_delta_both_parents = (
701 b're-delta-multibase' in self._upgrade_actions_names
701 b're-delta-multibase' in self._upgrade_actions_names
702 )
702 )
703
703
704 # should this operation create a backup of the store
704 # should this operation create a backup of the store
705 self.backup_store = backup_store
705 self.backup_store = backup_store
706
706
707 # whether the operation touches different revlogs at all or not
707 # whether the operation touches different revlogs at all or not
708 self.touches_filelogs = self._touches_filelogs()
708 self.touches_filelogs = self._touches_filelogs()
709 self.touches_manifests = self._touches_manifests()
709 self.touches_manifests = self._touches_manifests()
710 self.touches_changelog = self._touches_changelog()
710 self.touches_changelog = self._touches_changelog()
711 # whether the operation touches requirements file or not
711 # whether the operation touches requirements file or not
712 self.touches_requirements = self._touches_requirements()
712 self.touches_requirements = self._touches_requirements()
713 self.touches_store = (
713 self.touches_store = (
714 self.touches_filelogs
714 self.touches_filelogs
715 or self.touches_manifests
715 or self.touches_manifests
716 or self.touches_changelog
716 or self.touches_changelog
717 )
717 )
718 # does the operation only touches repository requirement
718 # does the operation only touches repository requirement
719 self.requirements_only = (
719 self.requirements_only = (
720 self.touches_requirements and not self.touches_store
720 self.touches_requirements and not self.touches_store
721 )
721 )
722
722
723 def _touches_filelogs(self):
723 def _touches_filelogs(self):
724 for a in self.upgrade_actions:
724 for a in self.upgrade_actions:
725 # in optimisations, we re-process the revlogs again
725 # in optimisations, we re-process the revlogs again
726 if a.type == OPTIMISATION:
726 if a.type == OPTIMISATION:
727 return True
727 return True
728 elif a.touches_filelogs:
728 elif a.touches_filelogs:
729 return True
729 return True
730 for a in self.removed_actions:
730 for a in self.removed_actions:
731 if a.touches_filelogs:
731 if a.touches_filelogs:
732 return True
732 return True
733 return False
733 return False
734
734
735 def _touches_manifests(self):
735 def _touches_manifests(self):
736 for a in self.upgrade_actions:
736 for a in self.upgrade_actions:
737 # in optimisations, we re-process the revlogs again
737 # in optimisations, we re-process the revlogs again
738 if a.type == OPTIMISATION:
738 if a.type == OPTIMISATION:
739 return True
739 return True
740 elif a.touches_manifests:
740 elif a.touches_manifests:
741 return True
741 return True
742 for a in self.removed_actions:
742 for a in self.removed_actions:
743 if a.touches_manifests:
743 if a.touches_manifests:
744 return True
744 return True
745 return False
745 return False
746
746
747 def _touches_changelog(self):
747 def _touches_changelog(self):
748 for a in self.upgrade_actions:
748 for a in self.upgrade_actions:
749 # in optimisations, we re-process the revlogs again
749 # in optimisations, we re-process the revlogs again
750 if a.type == OPTIMISATION:
750 if a.type == OPTIMISATION:
751 return True
751 return True
752 elif a.touches_changelog:
752 elif a.touches_changelog:
753 return True
753 return True
754 for a in self.removed_actions:
754 for a in self.removed_actions:
755 if a.touches_changelog:
755 if a.touches_changelog:
756 return True
756 return True
757 return False
757 return False
758
758
759 def _touches_requirements(self):
759 def _touches_requirements(self):
760 for a in self.upgrade_actions:
760 for a in self.upgrade_actions:
761 # optimisations are used to re-process revlogs and does not result
761 # optimisations are used to re-process revlogs and does not result
762 # in a requirement being added or removed
762 # in a requirement being added or removed
763 if a.type == OPTIMISATION:
763 if a.type == OPTIMISATION:
764 pass
764 pass
765 elif a.touches_requirements:
765 elif a.touches_requirements:
766 return True
766 return True
767 for a in self.removed_actions:
767 for a in self.removed_actions:
768 if a.touches_requirements:
768 if a.touches_requirements:
769 return True
769 return True
770
770
771 return False
771 return False
772
772
773 def _write_labeled(self, l, label):
773 def _write_labeled(self, l, label):
774 """
774 """
775 Utility function to aid writing of a list under one label
775 Utility function to aid writing of a list under one label
776 """
776 """
777 first = True
777 first = True
778 for r in sorted(l):
778 for r in sorted(l):
779 if not first:
779 if not first:
780 self.ui.write(b', ')
780 self.ui.write(b', ')
781 self.ui.write(r, label=label)
781 self.ui.write(r, label=label)
782 first = False
782 first = False
783
783
784 def print_requirements(self):
784 def print_requirements(self):
785 self.ui.write(_(b'requirements\n'))
785 self.ui.write(_(b'requirements\n'))
786 self.ui.write(_(b' preserved: '))
786 self.ui.write(_(b' preserved: '))
787 self._write_labeled(
787 self._write_labeled(
788 self._preserved_requirements, "upgrade-repo.requirement.preserved"
788 self._preserved_requirements, "upgrade-repo.requirement.preserved"
789 )
789 )
790 self.ui.write((b'\n'))
790 self.ui.write((b'\n'))
791 if self._removed_requirements:
791 if self._removed_requirements:
792 self.ui.write(_(b' removed: '))
792 self.ui.write(_(b' removed: '))
793 self._write_labeled(
793 self._write_labeled(
794 self._removed_requirements, "upgrade-repo.requirement.removed"
794 self._removed_requirements, "upgrade-repo.requirement.removed"
795 )
795 )
796 self.ui.write((b'\n'))
796 self.ui.write((b'\n'))
797 if self._added_requirements:
797 if self._added_requirements:
798 self.ui.write(_(b' added: '))
798 self.ui.write(_(b' added: '))
799 self._write_labeled(
799 self._write_labeled(
800 self._added_requirements, "upgrade-repo.requirement.added"
800 self._added_requirements, "upgrade-repo.requirement.added"
801 )
801 )
802 self.ui.write((b'\n'))
802 self.ui.write((b'\n'))
803 self.ui.write(b'\n')
803 self.ui.write(b'\n')
804
804
805 def print_optimisations(self):
805 def print_optimisations(self):
806 optimisations = [
806 optimisations = [
807 a for a in self.upgrade_actions if a.type == OPTIMISATION
807 a for a in self.upgrade_actions if a.type == OPTIMISATION
808 ]
808 ]
809 optimisations.sort(key=lambda a: a.name)
809 optimisations.sort(key=lambda a: a.name)
810 if optimisations:
810 if optimisations:
811 self.ui.write(_(b'optimisations: '))
811 self.ui.write(_(b'optimisations: '))
812 self._write_labeled(
812 self._write_labeled(
813 [a.name for a in optimisations],
813 [a.name for a in optimisations],
814 "upgrade-repo.optimisation.performed",
814 "upgrade-repo.optimisation.performed",
815 )
815 )
816 self.ui.write(b'\n\n')
816 self.ui.write(b'\n\n')
817
817
818 def print_upgrade_actions(self):
818 def print_upgrade_actions(self):
819 for a in self.upgrade_actions:
819 for a in self.upgrade_actions:
820 self.ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
820 self.ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
821
821
822 def print_affected_revlogs(self):
822 def print_affected_revlogs(self):
823 if not self.revlogs_to_process:
823 if not self.revlogs_to_process:
824 self.ui.write((b'no revlogs to process\n'))
824 self.ui.write((b'no revlogs to process\n'))
825 else:
825 else:
826 self.ui.write((b'processed revlogs:\n'))
826 self.ui.write((b'processed revlogs:\n'))
827 for r in sorted(self.revlogs_to_process):
827 for r in sorted(self.revlogs_to_process):
828 self.ui.write((b' - %s\n' % r))
828 self.ui.write((b' - %s\n' % r))
829 self.ui.write((b'\n'))
829 self.ui.write((b'\n'))
830
830
831 def print_unused_optimizations(self):
831 def print_unused_optimizations(self):
832 for i in self.unused_optimizations:
832 for i in self.unused_optimizations:
833 self.ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
833 self.ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
834
834
835 def has_upgrade_action(self, name):
835 def has_upgrade_action(self, name):
836 """ Check whether the upgrade operation will perform this action """
836 """ Check whether the upgrade operation will perform this action """
837 return name in self._upgrade_actions_names
837 return name in self._upgrade_actions_names
838
838
839 def print_post_op_messages(self):
839 def print_post_op_messages(self):
840 """ print post upgrade operation warning messages """
840 """ print post upgrade operation warning messages """
841 for a in self.upgrade_actions:
841 for a in self.upgrade_actions:
842 if a.postupgrademessage is not None:
842 if a.postupgrademessage is not None:
843 self.ui.warn(b'%s\n' % a.postupgrademessage)
843 self.ui.warn(b'%s\n' % a.postupgrademessage)
844 for a in self.removed_actions:
844 for a in self.removed_actions:
845 if a.postdowngrademessage is not None:
845 if a.postdowngrademessage is not None:
846 self.ui.warn(b'%s\n' % a.postdowngrademessage)
846 self.ui.warn(b'%s\n' % a.postdowngrademessage)
847
847
848
848
849 ### Code checking if a repository can got through the upgrade process at all. #
849 ### Code checking if a repository can got through the upgrade process at all. #
850
850
851
851
852 def requiredsourcerequirements(repo):
852 def requiredsourcerequirements(repo):
853 """Obtain requirements required to be present to upgrade a repo.
853 """Obtain requirements required to be present to upgrade a repo.
854
854
855 An upgrade will not be allowed if the repository doesn't have the
855 An upgrade will not be allowed if the repository doesn't have the
856 requirements returned by this function.
856 requirements returned by this function.
857 """
857 """
858 return {
858 return {
859 # Introduced in Mercurial 0.9.2.
859 # Introduced in Mercurial 0.9.2.
860 b'revlogv1',
860 requirements.REVLOGV1_REQUIREMENT,
861 # Introduced in Mercurial 0.9.2.
861 # Introduced in Mercurial 0.9.2.
862 b'store',
862 b'store',
863 }
863 }
864
864
865
865
866 def blocksourcerequirements(repo):
866 def blocksourcerequirements(repo):
867 """Obtain requirements that will prevent an upgrade from occurring.
867 """Obtain requirements that will prevent an upgrade from occurring.
868
868
869 An upgrade cannot be performed if the source repository contains a
869 An upgrade cannot be performed if the source repository contains a
870 requirements in the returned set.
870 requirements in the returned set.
871 """
871 """
872 return {
872 return {
873 # The upgrade code does not yet support these experimental features.
873 # The upgrade code does not yet support these experimental features.
874 # This is an artificial limitation.
874 # This is an artificial limitation.
875 requirements.TREEMANIFEST_REQUIREMENT,
875 requirements.TREEMANIFEST_REQUIREMENT,
876 # This was a precursor to generaldelta and was never enabled by default.
876 # This was a precursor to generaldelta and was never enabled by default.
877 # It should (hopefully) not exist in the wild.
877 # It should (hopefully) not exist in the wild.
878 b'parentdelta',
878 b'parentdelta',
879 # Upgrade should operate on the actual store, not the shared link.
879 # Upgrade should operate on the actual store, not the shared link.
880 requirements.SHARED_REQUIREMENT,
880 requirements.SHARED_REQUIREMENT,
881 }
881 }
882
882
883
883
884 def check_source_requirements(repo):
884 def check_source_requirements(repo):
885 """Ensure that no existing requirements prevent the repository upgrade"""
885 """Ensure that no existing requirements prevent the repository upgrade"""
886
886
887 required = requiredsourcerequirements(repo)
887 required = requiredsourcerequirements(repo)
888 missingreqs = required - repo.requirements
888 missingreqs = required - repo.requirements
889 if missingreqs:
889 if missingreqs:
890 msg = _(b'cannot upgrade repository; requirement missing: %s')
890 msg = _(b'cannot upgrade repository; requirement missing: %s')
891 missingreqs = b', '.join(sorted(missingreqs))
891 missingreqs = b', '.join(sorted(missingreqs))
892 raise error.Abort(msg % missingreqs)
892 raise error.Abort(msg % missingreqs)
893
893
894 blocking = blocksourcerequirements(repo)
894 blocking = blocksourcerequirements(repo)
895 blockingreqs = blocking & repo.requirements
895 blockingreqs = blocking & repo.requirements
896 if blockingreqs:
896 if blockingreqs:
897 m = _(b'cannot upgrade repository; unsupported source requirement: %s')
897 m = _(b'cannot upgrade repository; unsupported source requirement: %s')
898 blockingreqs = b', '.join(sorted(blockingreqs))
898 blockingreqs = b', '.join(sorted(blockingreqs))
899 raise error.Abort(m % blockingreqs)
899 raise error.Abort(m % blockingreqs)
900
900
901
901
902 ### Verify the validity of the planned requirement changes ####################
902 ### Verify the validity of the planned requirement changes ####################
903
903
904
904
905 def supportremovedrequirements(repo):
905 def supportremovedrequirements(repo):
906 """Obtain requirements that can be removed during an upgrade.
906 """Obtain requirements that can be removed during an upgrade.
907
907
908 If an upgrade were to create a repository that dropped a requirement,
908 If an upgrade were to create a repository that dropped a requirement,
909 the dropped requirement must appear in the returned set for the upgrade
909 the dropped requirement must appear in the returned set for the upgrade
910 to be allowed.
910 to be allowed.
911 """
911 """
912 supported = {
912 supported = {
913 requirements.SPARSEREVLOG_REQUIREMENT,
913 requirements.SPARSEREVLOG_REQUIREMENT,
914 requirements.SIDEDATA_REQUIREMENT,
914 requirements.SIDEDATA_REQUIREMENT,
915 requirements.COPIESSDC_REQUIREMENT,
915 requirements.COPIESSDC_REQUIREMENT,
916 requirements.NODEMAP_REQUIREMENT,
916 requirements.NODEMAP_REQUIREMENT,
917 requirements.SHARESAFE_REQUIREMENT,
917 requirements.SHARESAFE_REQUIREMENT,
918 }
918 }
919 for name in compression.compengines:
919 for name in compression.compengines:
920 engine = compression.compengines[name]
920 engine = compression.compengines[name]
921 if engine.available() and engine.revlogheader():
921 if engine.available() and engine.revlogheader():
922 supported.add(b'exp-compression-%s' % name)
922 supported.add(b'exp-compression-%s' % name)
923 if engine.name() == b'zstd':
923 if engine.name() == b'zstd':
924 supported.add(b'revlog-compression-zstd')
924 supported.add(b'revlog-compression-zstd')
925 return supported
925 return supported
926
926
927
927
928 def supporteddestrequirements(repo):
928 def supporteddestrequirements(repo):
929 """Obtain requirements that upgrade supports in the destination.
929 """Obtain requirements that upgrade supports in the destination.
930
930
931 If the result of the upgrade would create requirements not in this set,
931 If the result of the upgrade would create requirements not in this set,
932 the upgrade is disallowed.
932 the upgrade is disallowed.
933
933
934 Extensions should monkeypatch this to add their custom requirements.
934 Extensions should monkeypatch this to add their custom requirements.
935 """
935 """
936 supported = {
936 supported = {
937 b'dotencode',
937 b'dotencode',
938 b'fncache',
938 b'fncache',
939 b'generaldelta',
939 b'generaldelta',
940 b'revlogv1',
940 requirements.REVLOGV1_REQUIREMENT,
941 b'store',
941 b'store',
942 requirements.SPARSEREVLOG_REQUIREMENT,
942 requirements.SPARSEREVLOG_REQUIREMENT,
943 requirements.SIDEDATA_REQUIREMENT,
943 requirements.SIDEDATA_REQUIREMENT,
944 requirements.COPIESSDC_REQUIREMENT,
944 requirements.COPIESSDC_REQUIREMENT,
945 requirements.NODEMAP_REQUIREMENT,
945 requirements.NODEMAP_REQUIREMENT,
946 requirements.SHARESAFE_REQUIREMENT,
946 requirements.SHARESAFE_REQUIREMENT,
947 }
947 }
948 for name in compression.compengines:
948 for name in compression.compengines:
949 engine = compression.compengines[name]
949 engine = compression.compengines[name]
950 if engine.available() and engine.revlogheader():
950 if engine.available() and engine.revlogheader():
951 supported.add(b'exp-compression-%s' % name)
951 supported.add(b'exp-compression-%s' % name)
952 if engine.name() == b'zstd':
952 if engine.name() == b'zstd':
953 supported.add(b'revlog-compression-zstd')
953 supported.add(b'revlog-compression-zstd')
954 return supported
954 return supported
955
955
956
956
957 def allowednewrequirements(repo):
957 def allowednewrequirements(repo):
958 """Obtain requirements that can be added to a repository during upgrade.
958 """Obtain requirements that can be added to a repository during upgrade.
959
959
960 This is used to disallow proposed requirements from being added when
960 This is used to disallow proposed requirements from being added when
961 they weren't present before.
961 they weren't present before.
962
962
963 We use a list of allowed requirement additions instead of a list of known
963 We use a list of allowed requirement additions instead of a list of known
964 bad additions because the whitelist approach is safer and will prevent
964 bad additions because the whitelist approach is safer and will prevent
965 future, unknown requirements from accidentally being added.
965 future, unknown requirements from accidentally being added.
966 """
966 """
967 supported = {
967 supported = {
968 b'dotencode',
968 b'dotencode',
969 b'fncache',
969 b'fncache',
970 b'generaldelta',
970 b'generaldelta',
971 requirements.SPARSEREVLOG_REQUIREMENT,
971 requirements.SPARSEREVLOG_REQUIREMENT,
972 requirements.SIDEDATA_REQUIREMENT,
972 requirements.SIDEDATA_REQUIREMENT,
973 requirements.COPIESSDC_REQUIREMENT,
973 requirements.COPIESSDC_REQUIREMENT,
974 requirements.NODEMAP_REQUIREMENT,
974 requirements.NODEMAP_REQUIREMENT,
975 requirements.SHARESAFE_REQUIREMENT,
975 requirements.SHARESAFE_REQUIREMENT,
976 }
976 }
977 for name in compression.compengines:
977 for name in compression.compengines:
978 engine = compression.compengines[name]
978 engine = compression.compengines[name]
979 if engine.available() and engine.revlogheader():
979 if engine.available() and engine.revlogheader():
980 supported.add(b'exp-compression-%s' % name)
980 supported.add(b'exp-compression-%s' % name)
981 if engine.name() == b'zstd':
981 if engine.name() == b'zstd':
982 supported.add(b'revlog-compression-zstd')
982 supported.add(b'revlog-compression-zstd')
983 return supported
983 return supported
984
984
985
985
986 def check_requirements_changes(repo, new_reqs):
986 def check_requirements_changes(repo, new_reqs):
987 old_reqs = repo.requirements
987 old_reqs = repo.requirements
988
988
989 support_removal = supportremovedrequirements(repo)
989 support_removal = supportremovedrequirements(repo)
990 no_remove_reqs = old_reqs - new_reqs - support_removal
990 no_remove_reqs = old_reqs - new_reqs - support_removal
991 if no_remove_reqs:
991 if no_remove_reqs:
992 msg = _(b'cannot upgrade repository; requirement would be removed: %s')
992 msg = _(b'cannot upgrade repository; requirement would be removed: %s')
993 no_remove_reqs = b', '.join(sorted(no_remove_reqs))
993 no_remove_reqs = b', '.join(sorted(no_remove_reqs))
994 raise error.Abort(msg % no_remove_reqs)
994 raise error.Abort(msg % no_remove_reqs)
995
995
996 support_addition = allowednewrequirements(repo)
996 support_addition = allowednewrequirements(repo)
997 no_add_reqs = new_reqs - old_reqs - support_addition
997 no_add_reqs = new_reqs - old_reqs - support_addition
998 if no_add_reqs:
998 if no_add_reqs:
999 m = _(b'cannot upgrade repository; do not support adding requirement: ')
999 m = _(b'cannot upgrade repository; do not support adding requirement: ')
1000 no_add_reqs = b', '.join(sorted(no_add_reqs))
1000 no_add_reqs = b', '.join(sorted(no_add_reqs))
1001 raise error.Abort(m + no_add_reqs)
1001 raise error.Abort(m + no_add_reqs)
1002
1002
1003 supported = supporteddestrequirements(repo)
1003 supported = supporteddestrequirements(repo)
1004 unsupported_reqs = new_reqs - supported
1004 unsupported_reqs = new_reqs - supported
1005 if unsupported_reqs:
1005 if unsupported_reqs:
1006 msg = _(
1006 msg = _(
1007 b'cannot upgrade repository; do not support destination '
1007 b'cannot upgrade repository; do not support destination '
1008 b'requirement: %s'
1008 b'requirement: %s'
1009 )
1009 )
1010 unsupported_reqs = b', '.join(sorted(unsupported_reqs))
1010 unsupported_reqs = b', '.join(sorted(unsupported_reqs))
1011 raise error.Abort(msg % unsupported_reqs)
1011 raise error.Abort(msg % unsupported_reqs)
@@ -1,747 +1,748 b''
1 # wireprotov1server.py - Wire protocol version 1 server functionality
1 # wireprotov1server.py - Wire protocol version 1 server functionality
2 #
2 #
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import binascii
10 import binascii
11 import os
11 import os
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullid,
16 nullid,
17 )
17 )
18 from .pycompat import getattr
18 from .pycompat import getattr
19
19
20 from . import (
20 from . import (
21 bundle2,
21 bundle2,
22 bundlecaches,
22 bundlecaches,
23 changegroup as changegroupmod,
23 changegroup as changegroupmod,
24 discovery,
24 discovery,
25 encoding,
25 encoding,
26 error,
26 error,
27 exchange,
27 exchange,
28 pushkey as pushkeymod,
28 pushkey as pushkeymod,
29 pycompat,
29 pycompat,
30 requirements as requirementsmod,
30 streamclone,
31 streamclone,
31 util,
32 util,
32 wireprototypes,
33 wireprototypes,
33 )
34 )
34
35
35 from .utils import (
36 from .utils import (
36 procutil,
37 procutil,
37 stringutil,
38 stringutil,
38 )
39 )
39
40
40 urlerr = util.urlerr
41 urlerr = util.urlerr
41 urlreq = util.urlreq
42 urlreq = util.urlreq
42
43
43 bundle2requiredmain = _(b'incompatible Mercurial client; bundle2 required')
44 bundle2requiredmain = _(b'incompatible Mercurial client; bundle2 required')
44 bundle2requiredhint = _(
45 bundle2requiredhint = _(
45 b'see https://www.mercurial-scm.org/wiki/IncompatibleClient'
46 b'see https://www.mercurial-scm.org/wiki/IncompatibleClient'
46 )
47 )
47 bundle2required = b'%s\n(%s)\n' % (bundle2requiredmain, bundle2requiredhint)
48 bundle2required = b'%s\n(%s)\n' % (bundle2requiredmain, bundle2requiredhint)
48
49
49
50
50 def clientcompressionsupport(proto):
51 def clientcompressionsupport(proto):
51 """Returns a list of compression methods supported by the client.
52 """Returns a list of compression methods supported by the client.
52
53
53 Returns a list of the compression methods supported by the client
54 Returns a list of the compression methods supported by the client
54 according to the protocol capabilities. If no such capability has
55 according to the protocol capabilities. If no such capability has
55 been announced, fallback to the default of zlib and uncompressed.
56 been announced, fallback to the default of zlib and uncompressed.
56 """
57 """
57 for cap in proto.getprotocaps():
58 for cap in proto.getprotocaps():
58 if cap.startswith(b'comp='):
59 if cap.startswith(b'comp='):
59 return cap[5:].split(b',')
60 return cap[5:].split(b',')
60 return [b'zlib', b'none']
61 return [b'zlib', b'none']
61
62
62
63
63 # wire protocol command can either return a string or one of these classes.
64 # wire protocol command can either return a string or one of these classes.
64
65
65
66
66 def getdispatchrepo(repo, proto, command):
67 def getdispatchrepo(repo, proto, command):
67 """Obtain the repo used for processing wire protocol commands.
68 """Obtain the repo used for processing wire protocol commands.
68
69
69 The intent of this function is to serve as a monkeypatch point for
70 The intent of this function is to serve as a monkeypatch point for
70 extensions that need commands to operate on different repo views under
71 extensions that need commands to operate on different repo views under
71 specialized circumstances.
72 specialized circumstances.
72 """
73 """
73 viewconfig = repo.ui.config(b'server', b'view')
74 viewconfig = repo.ui.config(b'server', b'view')
74 return repo.filtered(viewconfig)
75 return repo.filtered(viewconfig)
75
76
76
77
77 def dispatch(repo, proto, command):
78 def dispatch(repo, proto, command):
78 repo = getdispatchrepo(repo, proto, command)
79 repo = getdispatchrepo(repo, proto, command)
79
80
80 func, spec = commands[command]
81 func, spec = commands[command]
81 args = proto.getargs(spec)
82 args = proto.getargs(spec)
82
83
83 return func(repo, proto, *args)
84 return func(repo, proto, *args)
84
85
85
86
86 def options(cmd, keys, others):
87 def options(cmd, keys, others):
87 opts = {}
88 opts = {}
88 for k in keys:
89 for k in keys:
89 if k in others:
90 if k in others:
90 opts[k] = others[k]
91 opts[k] = others[k]
91 del others[k]
92 del others[k]
92 if others:
93 if others:
93 procutil.stderr.write(
94 procutil.stderr.write(
94 b"warning: %s ignored unexpected arguments %s\n"
95 b"warning: %s ignored unexpected arguments %s\n"
95 % (cmd, b",".join(others))
96 % (cmd, b",".join(others))
96 )
97 )
97 return opts
98 return opts
98
99
99
100
100 def bundle1allowed(repo, action):
101 def bundle1allowed(repo, action):
101 """Whether a bundle1 operation is allowed from the server.
102 """Whether a bundle1 operation is allowed from the server.
102
103
103 Priority is:
104 Priority is:
104
105
105 1. server.bundle1gd.<action> (if generaldelta active)
106 1. server.bundle1gd.<action> (if generaldelta active)
106 2. server.bundle1.<action>
107 2. server.bundle1.<action>
107 3. server.bundle1gd (if generaldelta active)
108 3. server.bundle1gd (if generaldelta active)
108 4. server.bundle1
109 4. server.bundle1
109 """
110 """
110 ui = repo.ui
111 ui = repo.ui
111 gd = b'generaldelta' in repo.requirements
112 gd = b'generaldelta' in repo.requirements
112
113
113 if gd:
114 if gd:
114 v = ui.configbool(b'server', b'bundle1gd.%s' % action)
115 v = ui.configbool(b'server', b'bundle1gd.%s' % action)
115 if v is not None:
116 if v is not None:
116 return v
117 return v
117
118
118 v = ui.configbool(b'server', b'bundle1.%s' % action)
119 v = ui.configbool(b'server', b'bundle1.%s' % action)
119 if v is not None:
120 if v is not None:
120 return v
121 return v
121
122
122 if gd:
123 if gd:
123 v = ui.configbool(b'server', b'bundle1gd')
124 v = ui.configbool(b'server', b'bundle1gd')
124 if v is not None:
125 if v is not None:
125 return v
126 return v
126
127
127 return ui.configbool(b'server', b'bundle1')
128 return ui.configbool(b'server', b'bundle1')
128
129
129
130
130 commands = wireprototypes.commanddict()
131 commands = wireprototypes.commanddict()
131
132
132
133
133 def wireprotocommand(name, args=None, permission=b'push'):
134 def wireprotocommand(name, args=None, permission=b'push'):
134 """Decorator to declare a wire protocol command.
135 """Decorator to declare a wire protocol command.
135
136
136 ``name`` is the name of the wire protocol command being provided.
137 ``name`` is the name of the wire protocol command being provided.
137
138
138 ``args`` defines the named arguments accepted by the command. It is
139 ``args`` defines the named arguments accepted by the command. It is
139 a space-delimited list of argument names. ``*`` denotes a special value
140 a space-delimited list of argument names. ``*`` denotes a special value
140 that says to accept all named arguments.
141 that says to accept all named arguments.
141
142
142 ``permission`` defines the permission type needed to run this command.
143 ``permission`` defines the permission type needed to run this command.
143 Can be ``push`` or ``pull``. These roughly map to read-write and read-only,
144 Can be ``push`` or ``pull``. These roughly map to read-write and read-only,
144 respectively. Default is to assume command requires ``push`` permissions
145 respectively. Default is to assume command requires ``push`` permissions
145 because otherwise commands not declaring their permissions could modify
146 because otherwise commands not declaring their permissions could modify
146 a repository that is supposed to be read-only.
147 a repository that is supposed to be read-only.
147 """
148 """
148 transports = {
149 transports = {
149 k for k, v in wireprototypes.TRANSPORTS.items() if v[b'version'] == 1
150 k for k, v in wireprototypes.TRANSPORTS.items() if v[b'version'] == 1
150 }
151 }
151
152
152 # Because SSHv2 is a mirror of SSHv1, we allow "batch" commands through to
153 # Because SSHv2 is a mirror of SSHv1, we allow "batch" commands through to
153 # SSHv2.
154 # SSHv2.
154 # TODO undo this hack when SSH is using the unified frame protocol.
155 # TODO undo this hack when SSH is using the unified frame protocol.
155 if name == b'batch':
156 if name == b'batch':
156 transports.add(wireprototypes.SSHV2)
157 transports.add(wireprototypes.SSHV2)
157
158
158 if permission not in (b'push', b'pull'):
159 if permission not in (b'push', b'pull'):
159 raise error.ProgrammingError(
160 raise error.ProgrammingError(
160 b'invalid wire protocol permission; '
161 b'invalid wire protocol permission; '
161 b'got %s; expected "push" or "pull"' % permission
162 b'got %s; expected "push" or "pull"' % permission
162 )
163 )
163
164
164 if args is None:
165 if args is None:
165 args = b''
166 args = b''
166
167
167 if not isinstance(args, bytes):
168 if not isinstance(args, bytes):
168 raise error.ProgrammingError(
169 raise error.ProgrammingError(
169 b'arguments for version 1 commands must be declared as bytes'
170 b'arguments for version 1 commands must be declared as bytes'
170 )
171 )
171
172
172 def register(func):
173 def register(func):
173 if name in commands:
174 if name in commands:
174 raise error.ProgrammingError(
175 raise error.ProgrammingError(
175 b'%s command already registered for version 1' % name
176 b'%s command already registered for version 1' % name
176 )
177 )
177 commands[name] = wireprototypes.commandentry(
178 commands[name] = wireprototypes.commandentry(
178 func, args=args, transports=transports, permission=permission
179 func, args=args, transports=transports, permission=permission
179 )
180 )
180
181
181 return func
182 return func
182
183
183 return register
184 return register
184
185
185
186
186 # TODO define a more appropriate permissions type to use for this.
187 # TODO define a more appropriate permissions type to use for this.
187 @wireprotocommand(b'batch', b'cmds *', permission=b'pull')
188 @wireprotocommand(b'batch', b'cmds *', permission=b'pull')
188 def batch(repo, proto, cmds, others):
189 def batch(repo, proto, cmds, others):
189 unescapearg = wireprototypes.unescapebatcharg
190 unescapearg = wireprototypes.unescapebatcharg
190 res = []
191 res = []
191 for pair in cmds.split(b';'):
192 for pair in cmds.split(b';'):
192 op, args = pair.split(b' ', 1)
193 op, args = pair.split(b' ', 1)
193 vals = {}
194 vals = {}
194 for a in args.split(b','):
195 for a in args.split(b','):
195 if a:
196 if a:
196 n, v = a.split(b'=')
197 n, v = a.split(b'=')
197 vals[unescapearg(n)] = unescapearg(v)
198 vals[unescapearg(n)] = unescapearg(v)
198 func, spec = commands[op]
199 func, spec = commands[op]
199
200
200 # Validate that client has permissions to perform this command.
201 # Validate that client has permissions to perform this command.
201 perm = commands[op].permission
202 perm = commands[op].permission
202 assert perm in (b'push', b'pull')
203 assert perm in (b'push', b'pull')
203 proto.checkperm(perm)
204 proto.checkperm(perm)
204
205
205 if spec:
206 if spec:
206 keys = spec.split()
207 keys = spec.split()
207 data = {}
208 data = {}
208 for k in keys:
209 for k in keys:
209 if k == b'*':
210 if k == b'*':
210 star = {}
211 star = {}
211 for key in vals.keys():
212 for key in vals.keys():
212 if key not in keys:
213 if key not in keys:
213 star[key] = vals[key]
214 star[key] = vals[key]
214 data[b'*'] = star
215 data[b'*'] = star
215 else:
216 else:
216 data[k] = vals[k]
217 data[k] = vals[k]
217 result = func(repo, proto, *[data[k] for k in keys])
218 result = func(repo, proto, *[data[k] for k in keys])
218 else:
219 else:
219 result = func(repo, proto)
220 result = func(repo, proto)
220 if isinstance(result, wireprototypes.ooberror):
221 if isinstance(result, wireprototypes.ooberror):
221 return result
222 return result
222
223
223 # For now, all batchable commands must return bytesresponse or
224 # For now, all batchable commands must return bytesresponse or
224 # raw bytes (for backwards compatibility).
225 # raw bytes (for backwards compatibility).
225 assert isinstance(result, (wireprototypes.bytesresponse, bytes))
226 assert isinstance(result, (wireprototypes.bytesresponse, bytes))
226 if isinstance(result, wireprototypes.bytesresponse):
227 if isinstance(result, wireprototypes.bytesresponse):
227 result = result.data
228 result = result.data
228 res.append(wireprototypes.escapebatcharg(result))
229 res.append(wireprototypes.escapebatcharg(result))
229
230
230 return wireprototypes.bytesresponse(b';'.join(res))
231 return wireprototypes.bytesresponse(b';'.join(res))
231
232
232
233
233 @wireprotocommand(b'between', b'pairs', permission=b'pull')
234 @wireprotocommand(b'between', b'pairs', permission=b'pull')
234 def between(repo, proto, pairs):
235 def between(repo, proto, pairs):
235 pairs = [wireprototypes.decodelist(p, b'-') for p in pairs.split(b" ")]
236 pairs = [wireprototypes.decodelist(p, b'-') for p in pairs.split(b" ")]
236 r = []
237 r = []
237 for b in repo.between(pairs):
238 for b in repo.between(pairs):
238 r.append(wireprototypes.encodelist(b) + b"\n")
239 r.append(wireprototypes.encodelist(b) + b"\n")
239
240
240 return wireprototypes.bytesresponse(b''.join(r))
241 return wireprototypes.bytesresponse(b''.join(r))
241
242
242
243
243 @wireprotocommand(b'branchmap', permission=b'pull')
244 @wireprotocommand(b'branchmap', permission=b'pull')
244 def branchmap(repo, proto):
245 def branchmap(repo, proto):
245 branchmap = repo.branchmap()
246 branchmap = repo.branchmap()
246 heads = []
247 heads = []
247 for branch, nodes in pycompat.iteritems(branchmap):
248 for branch, nodes in pycompat.iteritems(branchmap):
248 branchname = urlreq.quote(encoding.fromlocal(branch))
249 branchname = urlreq.quote(encoding.fromlocal(branch))
249 branchnodes = wireprototypes.encodelist(nodes)
250 branchnodes = wireprototypes.encodelist(nodes)
250 heads.append(b'%s %s' % (branchname, branchnodes))
251 heads.append(b'%s %s' % (branchname, branchnodes))
251
252
252 return wireprototypes.bytesresponse(b'\n'.join(heads))
253 return wireprototypes.bytesresponse(b'\n'.join(heads))
253
254
254
255
255 @wireprotocommand(b'branches', b'nodes', permission=b'pull')
256 @wireprotocommand(b'branches', b'nodes', permission=b'pull')
256 def branches(repo, proto, nodes):
257 def branches(repo, proto, nodes):
257 nodes = wireprototypes.decodelist(nodes)
258 nodes = wireprototypes.decodelist(nodes)
258 r = []
259 r = []
259 for b in repo.branches(nodes):
260 for b in repo.branches(nodes):
260 r.append(wireprototypes.encodelist(b) + b"\n")
261 r.append(wireprototypes.encodelist(b) + b"\n")
261
262
262 return wireprototypes.bytesresponse(b''.join(r))
263 return wireprototypes.bytesresponse(b''.join(r))
263
264
264
265
265 @wireprotocommand(b'clonebundles', b'', permission=b'pull')
266 @wireprotocommand(b'clonebundles', b'', permission=b'pull')
266 def clonebundles(repo, proto):
267 def clonebundles(repo, proto):
267 """Server command for returning info for available bundles to seed clones.
268 """Server command for returning info for available bundles to seed clones.
268
269
269 Clients will parse this response and determine what bundle to fetch.
270 Clients will parse this response and determine what bundle to fetch.
270
271
271 Extensions may wrap this command to filter or dynamically emit data
272 Extensions may wrap this command to filter or dynamically emit data
272 depending on the request. e.g. you could advertise URLs for the closest
273 depending on the request. e.g. you could advertise URLs for the closest
273 data center given the client's IP address.
274 data center given the client's IP address.
274 """
275 """
275 return wireprototypes.bytesresponse(
276 return wireprototypes.bytesresponse(
276 repo.vfs.tryread(bundlecaches.CB_MANIFEST_FILE)
277 repo.vfs.tryread(bundlecaches.CB_MANIFEST_FILE)
277 )
278 )
278
279
279
280
280 wireprotocaps = [
281 wireprotocaps = [
281 b'lookup',
282 b'lookup',
282 b'branchmap',
283 b'branchmap',
283 b'pushkey',
284 b'pushkey',
284 b'known',
285 b'known',
285 b'getbundle',
286 b'getbundle',
286 b'unbundlehash',
287 b'unbundlehash',
287 ]
288 ]
288
289
289
290
290 def _capabilities(repo, proto):
291 def _capabilities(repo, proto):
291 """return a list of capabilities for a repo
292 """return a list of capabilities for a repo
292
293
293 This function exists to allow extensions to easily wrap capabilities
294 This function exists to allow extensions to easily wrap capabilities
294 computation
295 computation
295
296
296 - returns a lists: easy to alter
297 - returns a lists: easy to alter
297 - change done here will be propagated to both `capabilities` and `hello`
298 - change done here will be propagated to both `capabilities` and `hello`
298 command without any other action needed.
299 command without any other action needed.
299 """
300 """
300 # copy to prevent modification of the global list
301 # copy to prevent modification of the global list
301 caps = list(wireprotocaps)
302 caps = list(wireprotocaps)
302
303
303 # Command of same name as capability isn't exposed to version 1 of
304 # Command of same name as capability isn't exposed to version 1 of
304 # transports. So conditionally add it.
305 # transports. So conditionally add it.
305 if commands.commandavailable(b'changegroupsubset', proto):
306 if commands.commandavailable(b'changegroupsubset', proto):
306 caps.append(b'changegroupsubset')
307 caps.append(b'changegroupsubset')
307
308
308 if streamclone.allowservergeneration(repo):
309 if streamclone.allowservergeneration(repo):
309 if repo.ui.configbool(b'server', b'preferuncompressed'):
310 if repo.ui.configbool(b'server', b'preferuncompressed'):
310 caps.append(b'stream-preferred')
311 caps.append(b'stream-preferred')
311 requiredformats = repo.requirements & repo.supportedformats
312 requiredformats = repo.requirements & repo.supportedformats
312 # if our local revlogs are just revlogv1, add 'stream' cap
313 # if our local revlogs are just revlogv1, add 'stream' cap
313 if not requiredformats - {b'revlogv1'}:
314 if not requiredformats - {requirementsmod.REVLOGV1_REQUIREMENT}:
314 caps.append(b'stream')
315 caps.append(b'stream')
315 # otherwise, add 'streamreqs' detailing our local revlog format
316 # otherwise, add 'streamreqs' detailing our local revlog format
316 else:
317 else:
317 caps.append(b'streamreqs=%s' % b','.join(sorted(requiredformats)))
318 caps.append(b'streamreqs=%s' % b','.join(sorted(requiredformats)))
318 if repo.ui.configbool(b'experimental', b'bundle2-advertise'):
319 if repo.ui.configbool(b'experimental', b'bundle2-advertise'):
319 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=b'server'))
320 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=b'server'))
320 caps.append(b'bundle2=' + urlreq.quote(capsblob))
321 caps.append(b'bundle2=' + urlreq.quote(capsblob))
321 caps.append(b'unbundle=%s' % b','.join(bundle2.bundlepriority))
322 caps.append(b'unbundle=%s' % b','.join(bundle2.bundlepriority))
322
323
323 if repo.ui.configbool(b'experimental', b'narrow'):
324 if repo.ui.configbool(b'experimental', b'narrow'):
324 caps.append(wireprototypes.NARROWCAP)
325 caps.append(wireprototypes.NARROWCAP)
325 if repo.ui.configbool(b'experimental', b'narrowservebrokenellipses'):
326 if repo.ui.configbool(b'experimental', b'narrowservebrokenellipses'):
326 caps.append(wireprototypes.ELLIPSESCAP)
327 caps.append(wireprototypes.ELLIPSESCAP)
327
328
328 return proto.addcapabilities(repo, caps)
329 return proto.addcapabilities(repo, caps)
329
330
330
331
331 # If you are writing an extension and consider wrapping this function. Wrap
332 # If you are writing an extension and consider wrapping this function. Wrap
332 # `_capabilities` instead.
333 # `_capabilities` instead.
333 @wireprotocommand(b'capabilities', permission=b'pull')
334 @wireprotocommand(b'capabilities', permission=b'pull')
334 def capabilities(repo, proto):
335 def capabilities(repo, proto):
335 caps = _capabilities(repo, proto)
336 caps = _capabilities(repo, proto)
336 return wireprototypes.bytesresponse(b' '.join(sorted(caps)))
337 return wireprototypes.bytesresponse(b' '.join(sorted(caps)))
337
338
338
339
339 @wireprotocommand(b'changegroup', b'roots', permission=b'pull')
340 @wireprotocommand(b'changegroup', b'roots', permission=b'pull')
340 def changegroup(repo, proto, roots):
341 def changegroup(repo, proto, roots):
341 nodes = wireprototypes.decodelist(roots)
342 nodes = wireprototypes.decodelist(roots)
342 outgoing = discovery.outgoing(
343 outgoing = discovery.outgoing(
343 repo, missingroots=nodes, ancestorsof=repo.heads()
344 repo, missingroots=nodes, ancestorsof=repo.heads()
344 )
345 )
345 cg = changegroupmod.makechangegroup(repo, outgoing, b'01', b'serve')
346 cg = changegroupmod.makechangegroup(repo, outgoing, b'01', b'serve')
346 gen = iter(lambda: cg.read(32768), b'')
347 gen = iter(lambda: cg.read(32768), b'')
347 return wireprototypes.streamres(gen=gen)
348 return wireprototypes.streamres(gen=gen)
348
349
349
350
350 @wireprotocommand(b'changegroupsubset', b'bases heads', permission=b'pull')
351 @wireprotocommand(b'changegroupsubset', b'bases heads', permission=b'pull')
351 def changegroupsubset(repo, proto, bases, heads):
352 def changegroupsubset(repo, proto, bases, heads):
352 bases = wireprototypes.decodelist(bases)
353 bases = wireprototypes.decodelist(bases)
353 heads = wireprototypes.decodelist(heads)
354 heads = wireprototypes.decodelist(heads)
354 outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
355 outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
355 cg = changegroupmod.makechangegroup(repo, outgoing, b'01', b'serve')
356 cg = changegroupmod.makechangegroup(repo, outgoing, b'01', b'serve')
356 gen = iter(lambda: cg.read(32768), b'')
357 gen = iter(lambda: cg.read(32768), b'')
357 return wireprototypes.streamres(gen=gen)
358 return wireprototypes.streamres(gen=gen)
358
359
359
360
360 @wireprotocommand(b'debugwireargs', b'one two *', permission=b'pull')
361 @wireprotocommand(b'debugwireargs', b'one two *', permission=b'pull')
361 def debugwireargs(repo, proto, one, two, others):
362 def debugwireargs(repo, proto, one, two, others):
362 # only accept optional args from the known set
363 # only accept optional args from the known set
363 opts = options(b'debugwireargs', [b'three', b'four'], others)
364 opts = options(b'debugwireargs', [b'three', b'four'], others)
364 return wireprototypes.bytesresponse(
365 return wireprototypes.bytesresponse(
365 repo.debugwireargs(one, two, **pycompat.strkwargs(opts))
366 repo.debugwireargs(one, two, **pycompat.strkwargs(opts))
366 )
367 )
367
368
368
369
369 def find_pullbundle(repo, proto, opts, clheads, heads, common):
370 def find_pullbundle(repo, proto, opts, clheads, heads, common):
370 """Return a file object for the first matching pullbundle.
371 """Return a file object for the first matching pullbundle.
371
372
372 Pullbundles are specified in .hg/pullbundles.manifest similar to
373 Pullbundles are specified in .hg/pullbundles.manifest similar to
373 clonebundles.
374 clonebundles.
374 For each entry, the bundle specification is checked for compatibility:
375 For each entry, the bundle specification is checked for compatibility:
375 - Client features vs the BUNDLESPEC.
376 - Client features vs the BUNDLESPEC.
376 - Revisions shared with the clients vs base revisions of the bundle.
377 - Revisions shared with the clients vs base revisions of the bundle.
377 A bundle can be applied only if all its base revisions are known by
378 A bundle can be applied only if all its base revisions are known by
378 the client.
379 the client.
379 - At least one leaf of the bundle's DAG is missing on the client.
380 - At least one leaf of the bundle's DAG is missing on the client.
380 - Every leaf of the bundle's DAG is part of node set the client wants.
381 - Every leaf of the bundle's DAG is part of node set the client wants.
381 E.g. do not send a bundle of all changes if the client wants only
382 E.g. do not send a bundle of all changes if the client wants only
382 one specific branch of many.
383 one specific branch of many.
383 """
384 """
384
385
385 def decodehexstring(s):
386 def decodehexstring(s):
386 return {binascii.unhexlify(h) for h in s.split(b';')}
387 return {binascii.unhexlify(h) for h in s.split(b';')}
387
388
388 manifest = repo.vfs.tryread(b'pullbundles.manifest')
389 manifest = repo.vfs.tryread(b'pullbundles.manifest')
389 if not manifest:
390 if not manifest:
390 return None
391 return None
391 res = bundlecaches.parseclonebundlesmanifest(repo, manifest)
392 res = bundlecaches.parseclonebundlesmanifest(repo, manifest)
392 res = bundlecaches.filterclonebundleentries(repo, res)
393 res = bundlecaches.filterclonebundleentries(repo, res)
393 if not res:
394 if not res:
394 return None
395 return None
395 cl = repo.unfiltered().changelog
396 cl = repo.unfiltered().changelog
396 heads_anc = cl.ancestors([cl.rev(rev) for rev in heads], inclusive=True)
397 heads_anc = cl.ancestors([cl.rev(rev) for rev in heads], inclusive=True)
397 common_anc = cl.ancestors([cl.rev(rev) for rev in common], inclusive=True)
398 common_anc = cl.ancestors([cl.rev(rev) for rev in common], inclusive=True)
398 compformats = clientcompressionsupport(proto)
399 compformats = clientcompressionsupport(proto)
399 for entry in res:
400 for entry in res:
400 comp = entry.get(b'COMPRESSION')
401 comp = entry.get(b'COMPRESSION')
401 altcomp = util.compengines._bundlenames.get(comp)
402 altcomp = util.compengines._bundlenames.get(comp)
402 if comp and comp not in compformats and altcomp not in compformats:
403 if comp and comp not in compformats and altcomp not in compformats:
403 continue
404 continue
404 # No test yet for VERSION, since V2 is supported by any client
405 # No test yet for VERSION, since V2 is supported by any client
405 # that advertises partial pulls
406 # that advertises partial pulls
406 if b'heads' in entry:
407 if b'heads' in entry:
407 try:
408 try:
408 bundle_heads = decodehexstring(entry[b'heads'])
409 bundle_heads = decodehexstring(entry[b'heads'])
409 except TypeError:
410 except TypeError:
410 # Bad heads entry
411 # Bad heads entry
411 continue
412 continue
412 if bundle_heads.issubset(common):
413 if bundle_heads.issubset(common):
413 continue # Nothing new
414 continue # Nothing new
414 if all(cl.rev(rev) in common_anc for rev in bundle_heads):
415 if all(cl.rev(rev) in common_anc for rev in bundle_heads):
415 continue # Still nothing new
416 continue # Still nothing new
416 if any(
417 if any(
417 cl.rev(rev) not in heads_anc and cl.rev(rev) not in common_anc
418 cl.rev(rev) not in heads_anc and cl.rev(rev) not in common_anc
418 for rev in bundle_heads
419 for rev in bundle_heads
419 ):
420 ):
420 continue
421 continue
421 if b'bases' in entry:
422 if b'bases' in entry:
422 try:
423 try:
423 bundle_bases = decodehexstring(entry[b'bases'])
424 bundle_bases = decodehexstring(entry[b'bases'])
424 except TypeError:
425 except TypeError:
425 # Bad bases entry
426 # Bad bases entry
426 continue
427 continue
427 if not all(cl.rev(rev) in common_anc for rev in bundle_bases):
428 if not all(cl.rev(rev) in common_anc for rev in bundle_bases):
428 continue
429 continue
429 path = entry[b'URL']
430 path = entry[b'URL']
430 repo.ui.debug(b'sending pullbundle "%s"\n' % path)
431 repo.ui.debug(b'sending pullbundle "%s"\n' % path)
431 try:
432 try:
432 return repo.vfs.open(path)
433 return repo.vfs.open(path)
433 except IOError:
434 except IOError:
434 repo.ui.debug(b'pullbundle "%s" not accessible\n' % path)
435 repo.ui.debug(b'pullbundle "%s" not accessible\n' % path)
435 continue
436 continue
436 return None
437 return None
437
438
438
439
439 @wireprotocommand(b'getbundle', b'*', permission=b'pull')
440 @wireprotocommand(b'getbundle', b'*', permission=b'pull')
440 def getbundle(repo, proto, others):
441 def getbundle(repo, proto, others):
441 opts = options(
442 opts = options(
442 b'getbundle', wireprototypes.GETBUNDLE_ARGUMENTS.keys(), others
443 b'getbundle', wireprototypes.GETBUNDLE_ARGUMENTS.keys(), others
443 )
444 )
444 for k, v in pycompat.iteritems(opts):
445 for k, v in pycompat.iteritems(opts):
445 keytype = wireprototypes.GETBUNDLE_ARGUMENTS[k]
446 keytype = wireprototypes.GETBUNDLE_ARGUMENTS[k]
446 if keytype == b'nodes':
447 if keytype == b'nodes':
447 opts[k] = wireprototypes.decodelist(v)
448 opts[k] = wireprototypes.decodelist(v)
448 elif keytype == b'csv':
449 elif keytype == b'csv':
449 opts[k] = list(v.split(b','))
450 opts[k] = list(v.split(b','))
450 elif keytype == b'scsv':
451 elif keytype == b'scsv':
451 opts[k] = set(v.split(b','))
452 opts[k] = set(v.split(b','))
452 elif keytype == b'boolean':
453 elif keytype == b'boolean':
453 # Client should serialize False as '0', which is a non-empty string
454 # Client should serialize False as '0', which is a non-empty string
454 # so it evaluates as a True bool.
455 # so it evaluates as a True bool.
455 if v == b'0':
456 if v == b'0':
456 opts[k] = False
457 opts[k] = False
457 else:
458 else:
458 opts[k] = bool(v)
459 opts[k] = bool(v)
459 elif keytype != b'plain':
460 elif keytype != b'plain':
460 raise KeyError(b'unknown getbundle option type %s' % keytype)
461 raise KeyError(b'unknown getbundle option type %s' % keytype)
461
462
462 if not bundle1allowed(repo, b'pull'):
463 if not bundle1allowed(repo, b'pull'):
463 if not exchange.bundle2requested(opts.get(b'bundlecaps')):
464 if not exchange.bundle2requested(opts.get(b'bundlecaps')):
464 if proto.name == b'http-v1':
465 if proto.name == b'http-v1':
465 return wireprototypes.ooberror(bundle2required)
466 return wireprototypes.ooberror(bundle2required)
466 raise error.Abort(bundle2requiredmain, hint=bundle2requiredhint)
467 raise error.Abort(bundle2requiredmain, hint=bundle2requiredhint)
467
468
468 try:
469 try:
469 clheads = set(repo.changelog.heads())
470 clheads = set(repo.changelog.heads())
470 heads = set(opts.get(b'heads', set()))
471 heads = set(opts.get(b'heads', set()))
471 common = set(opts.get(b'common', set()))
472 common = set(opts.get(b'common', set()))
472 common.discard(nullid)
473 common.discard(nullid)
473 if (
474 if (
474 repo.ui.configbool(b'server', b'pullbundle')
475 repo.ui.configbool(b'server', b'pullbundle')
475 and b'partial-pull' in proto.getprotocaps()
476 and b'partial-pull' in proto.getprotocaps()
476 ):
477 ):
477 # Check if a pre-built bundle covers this request.
478 # Check if a pre-built bundle covers this request.
478 bundle = find_pullbundle(repo, proto, opts, clheads, heads, common)
479 bundle = find_pullbundle(repo, proto, opts, clheads, heads, common)
479 if bundle:
480 if bundle:
480 return wireprototypes.streamres(
481 return wireprototypes.streamres(
481 gen=util.filechunkiter(bundle), prefer_uncompressed=True
482 gen=util.filechunkiter(bundle), prefer_uncompressed=True
482 )
483 )
483
484
484 if repo.ui.configbool(b'server', b'disablefullbundle'):
485 if repo.ui.configbool(b'server', b'disablefullbundle'):
485 # Check to see if this is a full clone.
486 # Check to see if this is a full clone.
486 changegroup = opts.get(b'cg', True)
487 changegroup = opts.get(b'cg', True)
487 if changegroup and not common and clheads == heads:
488 if changegroup and not common and clheads == heads:
488 raise error.Abort(
489 raise error.Abort(
489 _(b'server has pull-based clones disabled'),
490 _(b'server has pull-based clones disabled'),
490 hint=_(b'remove --pull if specified or upgrade Mercurial'),
491 hint=_(b'remove --pull if specified or upgrade Mercurial'),
491 )
492 )
492
493
493 info, chunks = exchange.getbundlechunks(
494 info, chunks = exchange.getbundlechunks(
494 repo, b'serve', **pycompat.strkwargs(opts)
495 repo, b'serve', **pycompat.strkwargs(opts)
495 )
496 )
496 prefercompressed = info.get(b'prefercompressed', True)
497 prefercompressed = info.get(b'prefercompressed', True)
497 except error.Abort as exc:
498 except error.Abort as exc:
498 # cleanly forward Abort error to the client
499 # cleanly forward Abort error to the client
499 if not exchange.bundle2requested(opts.get(b'bundlecaps')):
500 if not exchange.bundle2requested(opts.get(b'bundlecaps')):
500 if proto.name == b'http-v1':
501 if proto.name == b'http-v1':
501 return wireprototypes.ooberror(exc.message + b'\n')
502 return wireprototypes.ooberror(exc.message + b'\n')
502 raise # cannot do better for bundle1 + ssh
503 raise # cannot do better for bundle1 + ssh
503 # bundle2 request expect a bundle2 reply
504 # bundle2 request expect a bundle2 reply
504 bundler = bundle2.bundle20(repo.ui)
505 bundler = bundle2.bundle20(repo.ui)
505 manargs = [(b'message', exc.message)]
506 manargs = [(b'message', exc.message)]
506 advargs = []
507 advargs = []
507 if exc.hint is not None:
508 if exc.hint is not None:
508 advargs.append((b'hint', exc.hint))
509 advargs.append((b'hint', exc.hint))
509 bundler.addpart(bundle2.bundlepart(b'error:abort', manargs, advargs))
510 bundler.addpart(bundle2.bundlepart(b'error:abort', manargs, advargs))
510 chunks = bundler.getchunks()
511 chunks = bundler.getchunks()
511 prefercompressed = False
512 prefercompressed = False
512
513
513 return wireprototypes.streamres(
514 return wireprototypes.streamres(
514 gen=chunks, prefer_uncompressed=not prefercompressed
515 gen=chunks, prefer_uncompressed=not prefercompressed
515 )
516 )
516
517
517
518
518 @wireprotocommand(b'heads', permission=b'pull')
519 @wireprotocommand(b'heads', permission=b'pull')
519 def heads(repo, proto):
520 def heads(repo, proto):
520 h = repo.heads()
521 h = repo.heads()
521 return wireprototypes.bytesresponse(wireprototypes.encodelist(h) + b'\n')
522 return wireprototypes.bytesresponse(wireprototypes.encodelist(h) + b'\n')
522
523
523
524
524 @wireprotocommand(b'hello', permission=b'pull')
525 @wireprotocommand(b'hello', permission=b'pull')
525 def hello(repo, proto):
526 def hello(repo, proto):
526 """Called as part of SSH handshake to obtain server info.
527 """Called as part of SSH handshake to obtain server info.
527
528
528 Returns a list of lines describing interesting things about the
529 Returns a list of lines describing interesting things about the
529 server, in an RFC822-like format.
530 server, in an RFC822-like format.
530
531
531 Currently, the only one defined is ``capabilities``, which consists of a
532 Currently, the only one defined is ``capabilities``, which consists of a
532 line of space separated tokens describing server abilities:
533 line of space separated tokens describing server abilities:
533
534
534 capabilities: <token0> <token1> <token2>
535 capabilities: <token0> <token1> <token2>
535 """
536 """
536 caps = capabilities(repo, proto).data
537 caps = capabilities(repo, proto).data
537 return wireprototypes.bytesresponse(b'capabilities: %s\n' % caps)
538 return wireprototypes.bytesresponse(b'capabilities: %s\n' % caps)
538
539
539
540
540 @wireprotocommand(b'listkeys', b'namespace', permission=b'pull')
541 @wireprotocommand(b'listkeys', b'namespace', permission=b'pull')
541 def listkeys(repo, proto, namespace):
542 def listkeys(repo, proto, namespace):
542 d = sorted(repo.listkeys(encoding.tolocal(namespace)).items())
543 d = sorted(repo.listkeys(encoding.tolocal(namespace)).items())
543 return wireprototypes.bytesresponse(pushkeymod.encodekeys(d))
544 return wireprototypes.bytesresponse(pushkeymod.encodekeys(d))
544
545
545
546
546 @wireprotocommand(b'lookup', b'key', permission=b'pull')
547 @wireprotocommand(b'lookup', b'key', permission=b'pull')
547 def lookup(repo, proto, key):
548 def lookup(repo, proto, key):
548 try:
549 try:
549 k = encoding.tolocal(key)
550 k = encoding.tolocal(key)
550 n = repo.lookup(k)
551 n = repo.lookup(k)
551 r = hex(n)
552 r = hex(n)
552 success = 1
553 success = 1
553 except Exception as inst:
554 except Exception as inst:
554 r = stringutil.forcebytestr(inst)
555 r = stringutil.forcebytestr(inst)
555 success = 0
556 success = 0
556 return wireprototypes.bytesresponse(b'%d %s\n' % (success, r))
557 return wireprototypes.bytesresponse(b'%d %s\n' % (success, r))
557
558
558
559
559 @wireprotocommand(b'known', b'nodes *', permission=b'pull')
560 @wireprotocommand(b'known', b'nodes *', permission=b'pull')
560 def known(repo, proto, nodes, others):
561 def known(repo, proto, nodes, others):
561 v = b''.join(
562 v = b''.join(
562 b and b'1' or b'0' for b in repo.known(wireprototypes.decodelist(nodes))
563 b and b'1' or b'0' for b in repo.known(wireprototypes.decodelist(nodes))
563 )
564 )
564 return wireprototypes.bytesresponse(v)
565 return wireprototypes.bytesresponse(v)
565
566
566
567
567 @wireprotocommand(b'protocaps', b'caps', permission=b'pull')
568 @wireprotocommand(b'protocaps', b'caps', permission=b'pull')
568 def protocaps(repo, proto, caps):
569 def protocaps(repo, proto, caps):
569 if proto.name == wireprototypes.SSHV1:
570 if proto.name == wireprototypes.SSHV1:
570 proto._protocaps = set(caps.split(b' '))
571 proto._protocaps = set(caps.split(b' '))
571 return wireprototypes.bytesresponse(b'OK')
572 return wireprototypes.bytesresponse(b'OK')
572
573
573
574
574 @wireprotocommand(b'pushkey', b'namespace key old new', permission=b'push')
575 @wireprotocommand(b'pushkey', b'namespace key old new', permission=b'push')
575 def pushkey(repo, proto, namespace, key, old, new):
576 def pushkey(repo, proto, namespace, key, old, new):
576 # compatibility with pre-1.8 clients which were accidentally
577 # compatibility with pre-1.8 clients which were accidentally
577 # sending raw binary nodes rather than utf-8-encoded hex
578 # sending raw binary nodes rather than utf-8-encoded hex
578 if len(new) == 20 and stringutil.escapestr(new) != new:
579 if len(new) == 20 and stringutil.escapestr(new) != new:
579 # looks like it could be a binary node
580 # looks like it could be a binary node
580 try:
581 try:
581 new.decode('utf-8')
582 new.decode('utf-8')
582 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
583 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
583 except UnicodeDecodeError:
584 except UnicodeDecodeError:
584 pass # binary, leave unmodified
585 pass # binary, leave unmodified
585 else:
586 else:
586 new = encoding.tolocal(new) # normal path
587 new = encoding.tolocal(new) # normal path
587
588
588 with proto.mayberedirectstdio() as output:
589 with proto.mayberedirectstdio() as output:
589 r = (
590 r = (
590 repo.pushkey(
591 repo.pushkey(
591 encoding.tolocal(namespace),
592 encoding.tolocal(namespace),
592 encoding.tolocal(key),
593 encoding.tolocal(key),
593 encoding.tolocal(old),
594 encoding.tolocal(old),
594 new,
595 new,
595 )
596 )
596 or False
597 or False
597 )
598 )
598
599
599 output = output.getvalue() if output else b''
600 output = output.getvalue() if output else b''
600 return wireprototypes.bytesresponse(b'%d\n%s' % (int(r), output))
601 return wireprototypes.bytesresponse(b'%d\n%s' % (int(r), output))
601
602
602
603
603 @wireprotocommand(b'stream_out', permission=b'pull')
604 @wireprotocommand(b'stream_out', permission=b'pull')
604 def stream(repo, proto):
605 def stream(repo, proto):
605 """If the server supports streaming clone, it advertises the "stream"
606 """If the server supports streaming clone, it advertises the "stream"
606 capability with a value representing the version and flags of the repo
607 capability with a value representing the version and flags of the repo
607 it is serving. Client checks to see if it understands the format.
608 it is serving. Client checks to see if it understands the format.
608 """
609 """
609 return wireprototypes.streamreslegacy(streamclone.generatev1wireproto(repo))
610 return wireprototypes.streamreslegacy(streamclone.generatev1wireproto(repo))
610
611
611
612
612 @wireprotocommand(b'unbundle', b'heads', permission=b'push')
613 @wireprotocommand(b'unbundle', b'heads', permission=b'push')
613 def unbundle(repo, proto, heads):
614 def unbundle(repo, proto, heads):
614 their_heads = wireprototypes.decodelist(heads)
615 their_heads = wireprototypes.decodelist(heads)
615
616
616 with proto.mayberedirectstdio() as output:
617 with proto.mayberedirectstdio() as output:
617 try:
618 try:
618 exchange.check_heads(repo, their_heads, b'preparing changes')
619 exchange.check_heads(repo, their_heads, b'preparing changes')
619 cleanup = lambda: None
620 cleanup = lambda: None
620 try:
621 try:
621 payload = proto.getpayload()
622 payload = proto.getpayload()
622 if repo.ui.configbool(b'server', b'streamunbundle'):
623 if repo.ui.configbool(b'server', b'streamunbundle'):
623
624
624 def cleanup():
625 def cleanup():
625 # Ensure that the full payload is consumed, so
626 # Ensure that the full payload is consumed, so
626 # that the connection doesn't contain trailing garbage.
627 # that the connection doesn't contain trailing garbage.
627 for p in payload:
628 for p in payload:
628 pass
629 pass
629
630
630 fp = util.chunkbuffer(payload)
631 fp = util.chunkbuffer(payload)
631 else:
632 else:
632 # write bundle data to temporary file as it can be big
633 # write bundle data to temporary file as it can be big
633 fp, tempname = None, None
634 fp, tempname = None, None
634
635
635 def cleanup():
636 def cleanup():
636 if fp:
637 if fp:
637 fp.close()
638 fp.close()
638 if tempname:
639 if tempname:
639 os.unlink(tempname)
640 os.unlink(tempname)
640
641
641 fd, tempname = pycompat.mkstemp(prefix=b'hg-unbundle-')
642 fd, tempname = pycompat.mkstemp(prefix=b'hg-unbundle-')
642 repo.ui.debug(
643 repo.ui.debug(
643 b'redirecting incoming bundle to %s\n' % tempname
644 b'redirecting incoming bundle to %s\n' % tempname
644 )
645 )
645 fp = os.fdopen(fd, pycompat.sysstr(b'wb+'))
646 fp = os.fdopen(fd, pycompat.sysstr(b'wb+'))
646 for p in payload:
647 for p in payload:
647 fp.write(p)
648 fp.write(p)
648 fp.seek(0)
649 fp.seek(0)
649
650
650 gen = exchange.readbundle(repo.ui, fp, None)
651 gen = exchange.readbundle(repo.ui, fp, None)
651 if isinstance(
652 if isinstance(
652 gen, changegroupmod.cg1unpacker
653 gen, changegroupmod.cg1unpacker
653 ) and not bundle1allowed(repo, b'push'):
654 ) and not bundle1allowed(repo, b'push'):
654 if proto.name == b'http-v1':
655 if proto.name == b'http-v1':
655 # need to special case http because stderr do not get to
656 # need to special case http because stderr do not get to
656 # the http client on failed push so we need to abuse
657 # the http client on failed push so we need to abuse
657 # some other error type to make sure the message get to
658 # some other error type to make sure the message get to
658 # the user.
659 # the user.
659 return wireprototypes.ooberror(bundle2required)
660 return wireprototypes.ooberror(bundle2required)
660 raise error.Abort(
661 raise error.Abort(
661 bundle2requiredmain, hint=bundle2requiredhint
662 bundle2requiredmain, hint=bundle2requiredhint
662 )
663 )
663
664
664 r = exchange.unbundle(
665 r = exchange.unbundle(
665 repo, gen, their_heads, b'serve', proto.client()
666 repo, gen, their_heads, b'serve', proto.client()
666 )
667 )
667 if util.safehasattr(r, b'addpart'):
668 if util.safehasattr(r, b'addpart'):
668 # The return looks streamable, we are in the bundle2 case
669 # The return looks streamable, we are in the bundle2 case
669 # and should return a stream.
670 # and should return a stream.
670 return wireprototypes.streamreslegacy(gen=r.getchunks())
671 return wireprototypes.streamreslegacy(gen=r.getchunks())
671 return wireprototypes.pushres(
672 return wireprototypes.pushres(
672 r, output.getvalue() if output else b''
673 r, output.getvalue() if output else b''
673 )
674 )
674
675
675 finally:
676 finally:
676 cleanup()
677 cleanup()
677
678
678 except (error.BundleValueError, error.Abort, error.PushRaced) as exc:
679 except (error.BundleValueError, error.Abort, error.PushRaced) as exc:
679 # handle non-bundle2 case first
680 # handle non-bundle2 case first
680 if not getattr(exc, 'duringunbundle2', False):
681 if not getattr(exc, 'duringunbundle2', False):
681 try:
682 try:
682 raise
683 raise
683 except error.Abort as exc:
684 except error.Abort as exc:
684 # The old code we moved used procutil.stderr directly.
685 # The old code we moved used procutil.stderr directly.
685 # We did not change it to minimise code change.
686 # We did not change it to minimise code change.
686 # This need to be moved to something proper.
687 # This need to be moved to something proper.
687 # Feel free to do it.
688 # Feel free to do it.
688 procutil.stderr.write(exc.format())
689 procutil.stderr.write(exc.format())
689 procutil.stderr.flush()
690 procutil.stderr.flush()
690 return wireprototypes.pushres(
691 return wireprototypes.pushres(
691 0, output.getvalue() if output else b''
692 0, output.getvalue() if output else b''
692 )
693 )
693 except error.PushRaced:
694 except error.PushRaced:
694 return wireprototypes.pusherr(
695 return wireprototypes.pusherr(
695 pycompat.bytestr(exc),
696 pycompat.bytestr(exc),
696 output.getvalue() if output else b'',
697 output.getvalue() if output else b'',
697 )
698 )
698
699
699 bundler = bundle2.bundle20(repo.ui)
700 bundler = bundle2.bundle20(repo.ui)
700 for out in getattr(exc, '_bundle2salvagedoutput', ()):
701 for out in getattr(exc, '_bundle2salvagedoutput', ()):
701 bundler.addpart(out)
702 bundler.addpart(out)
702 try:
703 try:
703 try:
704 try:
704 raise
705 raise
705 except error.PushkeyFailed as exc:
706 except error.PushkeyFailed as exc:
706 # check client caps
707 # check client caps
707 remotecaps = getattr(exc, '_replycaps', None)
708 remotecaps = getattr(exc, '_replycaps', None)
708 if (
709 if (
709 remotecaps is not None
710 remotecaps is not None
710 and b'pushkey' not in remotecaps.get(b'error', ())
711 and b'pushkey' not in remotecaps.get(b'error', ())
711 ):
712 ):
712 # no support remote side, fallback to Abort handler.
713 # no support remote side, fallback to Abort handler.
713 raise
714 raise
714 part = bundler.newpart(b'error:pushkey')
715 part = bundler.newpart(b'error:pushkey')
715 part.addparam(b'in-reply-to', exc.partid)
716 part.addparam(b'in-reply-to', exc.partid)
716 if exc.namespace is not None:
717 if exc.namespace is not None:
717 part.addparam(
718 part.addparam(
718 b'namespace', exc.namespace, mandatory=False
719 b'namespace', exc.namespace, mandatory=False
719 )
720 )
720 if exc.key is not None:
721 if exc.key is not None:
721 part.addparam(b'key', exc.key, mandatory=False)
722 part.addparam(b'key', exc.key, mandatory=False)
722 if exc.new is not None:
723 if exc.new is not None:
723 part.addparam(b'new', exc.new, mandatory=False)
724 part.addparam(b'new', exc.new, mandatory=False)
724 if exc.old is not None:
725 if exc.old is not None:
725 part.addparam(b'old', exc.old, mandatory=False)
726 part.addparam(b'old', exc.old, mandatory=False)
726 if exc.ret is not None:
727 if exc.ret is not None:
727 part.addparam(b'ret', exc.ret, mandatory=False)
728 part.addparam(b'ret', exc.ret, mandatory=False)
728 except error.BundleValueError as exc:
729 except error.BundleValueError as exc:
729 errpart = bundler.newpart(b'error:unsupportedcontent')
730 errpart = bundler.newpart(b'error:unsupportedcontent')
730 if exc.parttype is not None:
731 if exc.parttype is not None:
731 errpart.addparam(b'parttype', exc.parttype)
732 errpart.addparam(b'parttype', exc.parttype)
732 if exc.params:
733 if exc.params:
733 errpart.addparam(b'params', b'\0'.join(exc.params))
734 errpart.addparam(b'params', b'\0'.join(exc.params))
734 except error.Abort as exc:
735 except error.Abort as exc:
735 manargs = [(b'message', exc.message)]
736 manargs = [(b'message', exc.message)]
736 advargs = []
737 advargs = []
737 if exc.hint is not None:
738 if exc.hint is not None:
738 advargs.append((b'hint', exc.hint))
739 advargs.append((b'hint', exc.hint))
739 bundler.addpart(
740 bundler.addpart(
740 bundle2.bundlepart(b'error:abort', manargs, advargs)
741 bundle2.bundlepart(b'error:abort', manargs, advargs)
741 )
742 )
742 except error.PushRaced as exc:
743 except error.PushRaced as exc:
743 bundler.newpart(
744 bundler.newpart(
744 b'error:pushraced',
745 b'error:pushraced',
745 [(b'message', stringutil.forcebytestr(exc))],
746 [(b'message', stringutil.forcebytestr(exc))],
746 )
747 )
747 return wireprototypes.streamreslegacy(gen=bundler.getchunks())
748 return wireprototypes.streamreslegacy(gen=bundler.getchunks())
General Comments 0
You need to be logged in to leave comments. Login now