##// END OF EJS Templates
changelogv2: introduce a "changelogv2" feature...
marmoute -
r48037:6c84fc9c default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,1954 +1,1958 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2006 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullrev,
17 nullrev,
18 short,
18 short,
19 )
19 )
20 from .pycompat import open
20 from .pycompat import open
21
21
22 from . import (
22 from . import (
23 error,
23 error,
24 match as matchmod,
24 match as matchmod,
25 mdiff,
25 mdiff,
26 phases,
26 phases,
27 pycompat,
27 pycompat,
28 requirements,
28 requirements,
29 scmutil,
29 scmutil,
30 util,
30 util,
31 )
31 )
32
32
33 from .interfaces import repository
33 from .interfaces import repository
34 from .revlogutils import sidedata as sidedatamod
34 from .revlogutils import sidedata as sidedatamod
35 from .revlogutils import constants as revlog_constants
35 from .revlogutils import constants as revlog_constants
36 from .utils import storageutil
36 from .utils import storageutil
37
37
38 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
38 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
39 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
39 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
40 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH")
40 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH")
41 _CHANGEGROUPV4_DELTA_HEADER = struct.Struct(b">B20s20s20s20s20sH")
41 _CHANGEGROUPV4_DELTA_HEADER = struct.Struct(b">B20s20s20s20s20sH")
42
42
43 LFS_REQUIREMENT = b'lfs'
43 LFS_REQUIREMENT = b'lfs'
44
44
45 readexactly = util.readexactly
45 readexactly = util.readexactly
46
46
47
47
48 def getchunk(stream):
48 def getchunk(stream):
49 """return the next chunk from stream as a string"""
49 """return the next chunk from stream as a string"""
50 d = readexactly(stream, 4)
50 d = readexactly(stream, 4)
51 l = struct.unpack(b">l", d)[0]
51 l = struct.unpack(b">l", d)[0]
52 if l <= 4:
52 if l <= 4:
53 if l:
53 if l:
54 raise error.Abort(_(b"invalid chunk length %d") % l)
54 raise error.Abort(_(b"invalid chunk length %d") % l)
55 return b""
55 return b""
56 return readexactly(stream, l - 4)
56 return readexactly(stream, l - 4)
57
57
58
58
59 def chunkheader(length):
59 def chunkheader(length):
60 """return a changegroup chunk header (string)"""
60 """return a changegroup chunk header (string)"""
61 return struct.pack(b">l", length + 4)
61 return struct.pack(b">l", length + 4)
62
62
63
63
64 def closechunk():
64 def closechunk():
65 """return a changegroup chunk header (string) for a zero-length chunk"""
65 """return a changegroup chunk header (string) for a zero-length chunk"""
66 return struct.pack(b">l", 0)
66 return struct.pack(b">l", 0)
67
67
68
68
69 def _fileheader(path):
69 def _fileheader(path):
70 """Obtain a changegroup chunk header for a named path."""
70 """Obtain a changegroup chunk header for a named path."""
71 return chunkheader(len(path)) + path
71 return chunkheader(len(path)) + path
72
72
73
73
74 def writechunks(ui, chunks, filename, vfs=None):
74 def writechunks(ui, chunks, filename, vfs=None):
75 """Write chunks to a file and return its filename.
75 """Write chunks to a file and return its filename.
76
76
77 The stream is assumed to be a bundle file.
77 The stream is assumed to be a bundle file.
78 Existing files will not be overwritten.
78 Existing files will not be overwritten.
79 If no filename is specified, a temporary file is created.
79 If no filename is specified, a temporary file is created.
80 """
80 """
81 fh = None
81 fh = None
82 cleanup = None
82 cleanup = None
83 try:
83 try:
84 if filename:
84 if filename:
85 if vfs:
85 if vfs:
86 fh = vfs.open(filename, b"wb")
86 fh = vfs.open(filename, b"wb")
87 else:
87 else:
88 # Increase default buffer size because default is usually
88 # Increase default buffer size because default is usually
89 # small (4k is common on Linux).
89 # small (4k is common on Linux).
90 fh = open(filename, b"wb", 131072)
90 fh = open(filename, b"wb", 131072)
91 else:
91 else:
92 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
92 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
93 fh = os.fdopen(fd, "wb")
93 fh = os.fdopen(fd, "wb")
94 cleanup = filename
94 cleanup = filename
95 for c in chunks:
95 for c in chunks:
96 fh.write(c)
96 fh.write(c)
97 cleanup = None
97 cleanup = None
98 return filename
98 return filename
99 finally:
99 finally:
100 if fh is not None:
100 if fh is not None:
101 fh.close()
101 fh.close()
102 if cleanup is not None:
102 if cleanup is not None:
103 if filename and vfs:
103 if filename and vfs:
104 vfs.unlink(cleanup)
104 vfs.unlink(cleanup)
105 else:
105 else:
106 os.unlink(cleanup)
106 os.unlink(cleanup)
107
107
108
108
109 class cg1unpacker(object):
109 class cg1unpacker(object):
110 """Unpacker for cg1 changegroup streams.
110 """Unpacker for cg1 changegroup streams.
111
111
112 A changegroup unpacker handles the framing of the revision data in
112 A changegroup unpacker handles the framing of the revision data in
113 the wire format. Most consumers will want to use the apply()
113 the wire format. Most consumers will want to use the apply()
114 method to add the changes from the changegroup to a repository.
114 method to add the changes from the changegroup to a repository.
115
115
116 If you're forwarding a changegroup unmodified to another consumer,
116 If you're forwarding a changegroup unmodified to another consumer,
117 use getchunks(), which returns an iterator of changegroup
117 use getchunks(), which returns an iterator of changegroup
118 chunks. This is mostly useful for cases where you need to know the
118 chunks. This is mostly useful for cases where you need to know the
119 data stream has ended by observing the end of the changegroup.
119 data stream has ended by observing the end of the changegroup.
120
120
121 deltachunk() is useful only if you're applying delta data. Most
121 deltachunk() is useful only if you're applying delta data. Most
122 consumers should prefer apply() instead.
122 consumers should prefer apply() instead.
123
123
124 A few other public methods exist. Those are used only for
124 A few other public methods exist. Those are used only for
125 bundlerepo and some debug commands - their use is discouraged.
125 bundlerepo and some debug commands - their use is discouraged.
126 """
126 """
127
127
128 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
128 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
129 deltaheadersize = deltaheader.size
129 deltaheadersize = deltaheader.size
130 version = b'01'
130 version = b'01'
131 _grouplistcount = 1 # One list of files after the manifests
131 _grouplistcount = 1 # One list of files after the manifests
132
132
133 def __init__(self, fh, alg, extras=None):
133 def __init__(self, fh, alg, extras=None):
134 if alg is None:
134 if alg is None:
135 alg = b'UN'
135 alg = b'UN'
136 if alg not in util.compengines.supportedbundletypes:
136 if alg not in util.compengines.supportedbundletypes:
137 raise error.Abort(_(b'unknown stream compression type: %s') % alg)
137 raise error.Abort(_(b'unknown stream compression type: %s') % alg)
138 if alg == b'BZ':
138 if alg == b'BZ':
139 alg = b'_truncatedBZ'
139 alg = b'_truncatedBZ'
140
140
141 compengine = util.compengines.forbundletype(alg)
141 compengine = util.compengines.forbundletype(alg)
142 self._stream = compengine.decompressorreader(fh)
142 self._stream = compengine.decompressorreader(fh)
143 self._type = alg
143 self._type = alg
144 self.extras = extras or {}
144 self.extras = extras or {}
145 self.callback = None
145 self.callback = None
146
146
147 # These methods (compressed, read, seek, tell) all appear to only
147 # These methods (compressed, read, seek, tell) all appear to only
148 # be used by bundlerepo, but it's a little hard to tell.
148 # be used by bundlerepo, but it's a little hard to tell.
149 def compressed(self):
149 def compressed(self):
150 return self._type is not None and self._type != b'UN'
150 return self._type is not None and self._type != b'UN'
151
151
152 def read(self, l):
152 def read(self, l):
153 return self._stream.read(l)
153 return self._stream.read(l)
154
154
155 def seek(self, pos):
155 def seek(self, pos):
156 return self._stream.seek(pos)
156 return self._stream.seek(pos)
157
157
158 def tell(self):
158 def tell(self):
159 return self._stream.tell()
159 return self._stream.tell()
160
160
161 def close(self):
161 def close(self):
162 return self._stream.close()
162 return self._stream.close()
163
163
164 def _chunklength(self):
164 def _chunklength(self):
165 d = readexactly(self._stream, 4)
165 d = readexactly(self._stream, 4)
166 l = struct.unpack(b">l", d)[0]
166 l = struct.unpack(b">l", d)[0]
167 if l <= 4:
167 if l <= 4:
168 if l:
168 if l:
169 raise error.Abort(_(b"invalid chunk length %d") % l)
169 raise error.Abort(_(b"invalid chunk length %d") % l)
170 return 0
170 return 0
171 if self.callback:
171 if self.callback:
172 self.callback()
172 self.callback()
173 return l - 4
173 return l - 4
174
174
175 def changelogheader(self):
175 def changelogheader(self):
176 """v10 does not have a changelog header chunk"""
176 """v10 does not have a changelog header chunk"""
177 return {}
177 return {}
178
178
179 def manifestheader(self):
179 def manifestheader(self):
180 """v10 does not have a manifest header chunk"""
180 """v10 does not have a manifest header chunk"""
181 return {}
181 return {}
182
182
183 def filelogheader(self):
183 def filelogheader(self):
184 """return the header of the filelogs chunk, v10 only has the filename"""
184 """return the header of the filelogs chunk, v10 only has the filename"""
185 l = self._chunklength()
185 l = self._chunklength()
186 if not l:
186 if not l:
187 return {}
187 return {}
188 fname = readexactly(self._stream, l)
188 fname = readexactly(self._stream, l)
189 return {b'filename': fname}
189 return {b'filename': fname}
190
190
191 def _deltaheader(self, headertuple, prevnode):
191 def _deltaheader(self, headertuple, prevnode):
192 node, p1, p2, cs = headertuple
192 node, p1, p2, cs = headertuple
193 if prevnode is None:
193 if prevnode is None:
194 deltabase = p1
194 deltabase = p1
195 else:
195 else:
196 deltabase = prevnode
196 deltabase = prevnode
197 flags = 0
197 flags = 0
198 protocol_flags = 0
198 protocol_flags = 0
199 return node, p1, p2, deltabase, cs, flags, protocol_flags
199 return node, p1, p2, deltabase, cs, flags, protocol_flags
200
200
201 def deltachunk(self, prevnode):
201 def deltachunk(self, prevnode):
202 l = self._chunklength()
202 l = self._chunklength()
203 if not l:
203 if not l:
204 return {}
204 return {}
205 headerdata = readexactly(self._stream, self.deltaheadersize)
205 headerdata = readexactly(self._stream, self.deltaheadersize)
206 header = self.deltaheader.unpack(headerdata)
206 header = self.deltaheader.unpack(headerdata)
207 delta = readexactly(self._stream, l - self.deltaheadersize)
207 delta = readexactly(self._stream, l - self.deltaheadersize)
208 header = self._deltaheader(header, prevnode)
208 header = self._deltaheader(header, prevnode)
209 node, p1, p2, deltabase, cs, flags, protocol_flags = header
209 node, p1, p2, deltabase, cs, flags, protocol_flags = header
210 return node, p1, p2, cs, deltabase, delta, flags, protocol_flags
210 return node, p1, p2, cs, deltabase, delta, flags, protocol_flags
211
211
212 def getchunks(self):
212 def getchunks(self):
213 """returns all the chunks contains in the bundle
213 """returns all the chunks contains in the bundle
214
214
215 Used when you need to forward the binary stream to a file or another
215 Used when you need to forward the binary stream to a file or another
216 network API. To do so, it parse the changegroup data, otherwise it will
216 network API. To do so, it parse the changegroup data, otherwise it will
217 block in case of sshrepo because it don't know the end of the stream.
217 block in case of sshrepo because it don't know the end of the stream.
218 """
218 """
219 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
219 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
220 # and a list of filelogs. For changegroup 3, we expect 4 parts:
220 # and a list of filelogs. For changegroup 3, we expect 4 parts:
221 # changelog, manifestlog, a list of tree manifestlogs, and a list of
221 # changelog, manifestlog, a list of tree manifestlogs, and a list of
222 # filelogs.
222 # filelogs.
223 #
223 #
224 # Changelog and manifestlog parts are terminated with empty chunks. The
224 # Changelog and manifestlog parts are terminated with empty chunks. The
225 # tree and file parts are a list of entry sections. Each entry section
225 # tree and file parts are a list of entry sections. Each entry section
226 # is a series of chunks terminating in an empty chunk. The list of these
226 # is a series of chunks terminating in an empty chunk. The list of these
227 # entry sections is terminated in yet another empty chunk, so we know
227 # entry sections is terminated in yet another empty chunk, so we know
228 # we've reached the end of the tree/file list when we reach an empty
228 # we've reached the end of the tree/file list when we reach an empty
229 # chunk that was proceeded by no non-empty chunks.
229 # chunk that was proceeded by no non-empty chunks.
230
230
231 parts = 0
231 parts = 0
232 while parts < 2 + self._grouplistcount:
232 while parts < 2 + self._grouplistcount:
233 noentries = True
233 noentries = True
234 while True:
234 while True:
235 chunk = getchunk(self)
235 chunk = getchunk(self)
236 if not chunk:
236 if not chunk:
237 # The first two empty chunks represent the end of the
237 # The first two empty chunks represent the end of the
238 # changelog and the manifestlog portions. The remaining
238 # changelog and the manifestlog portions. The remaining
239 # empty chunks represent either A) the end of individual
239 # empty chunks represent either A) the end of individual
240 # tree or file entries in the file list, or B) the end of
240 # tree or file entries in the file list, or B) the end of
241 # the entire list. It's the end of the entire list if there
241 # the entire list. It's the end of the entire list if there
242 # were no entries (i.e. noentries is True).
242 # were no entries (i.e. noentries is True).
243 if parts < 2:
243 if parts < 2:
244 parts += 1
244 parts += 1
245 elif noentries:
245 elif noentries:
246 parts += 1
246 parts += 1
247 break
247 break
248 noentries = False
248 noentries = False
249 yield chunkheader(len(chunk))
249 yield chunkheader(len(chunk))
250 pos = 0
250 pos = 0
251 while pos < len(chunk):
251 while pos < len(chunk):
252 next = pos + 2 ** 20
252 next = pos + 2 ** 20
253 yield chunk[pos:next]
253 yield chunk[pos:next]
254 pos = next
254 pos = next
255 yield closechunk()
255 yield closechunk()
256
256
257 def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None):
257 def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None):
258 self.callback = prog.increment
258 self.callback = prog.increment
259 # no need to check for empty manifest group here:
259 # no need to check for empty manifest group here:
260 # if the result of the merge of 1 and 2 is the same in 3 and 4,
260 # if the result of the merge of 1 and 2 is the same in 3 and 4,
261 # no new manifest will be created and the manifest group will
261 # no new manifest will be created and the manifest group will
262 # be empty during the pull
262 # be empty during the pull
263 self.manifestheader()
263 self.manifestheader()
264 deltas = self.deltaiter()
264 deltas = self.deltaiter()
265 storage = repo.manifestlog.getstorage(b'')
265 storage = repo.manifestlog.getstorage(b'')
266 storage.addgroup(deltas, revmap, trp, addrevisioncb=addrevisioncb)
266 storage.addgroup(deltas, revmap, trp, addrevisioncb=addrevisioncb)
267 prog.complete()
267 prog.complete()
268 self.callback = None
268 self.callback = None
269
269
270 def apply(
270 def apply(
271 self,
271 self,
272 repo,
272 repo,
273 tr,
273 tr,
274 srctype,
274 srctype,
275 url,
275 url,
276 targetphase=phases.draft,
276 targetphase=phases.draft,
277 expectedtotal=None,
277 expectedtotal=None,
278 sidedata_categories=None,
278 sidedata_categories=None,
279 ):
279 ):
280 """Add the changegroup returned by source.read() to this repo.
280 """Add the changegroup returned by source.read() to this repo.
281 srctype is a string like 'push', 'pull', or 'unbundle'. url is
281 srctype is a string like 'push', 'pull', or 'unbundle'. url is
282 the URL of the repo where this changegroup is coming from.
282 the URL of the repo where this changegroup is coming from.
283
283
284 Return an integer summarizing the change to this repo:
284 Return an integer summarizing the change to this repo:
285 - nothing changed or no source: 0
285 - nothing changed or no source: 0
286 - more heads than before: 1+added heads (2..n)
286 - more heads than before: 1+added heads (2..n)
287 - fewer heads than before: -1-removed heads (-2..-n)
287 - fewer heads than before: -1-removed heads (-2..-n)
288 - number of heads stays the same: 1
288 - number of heads stays the same: 1
289
289
290 `sidedata_categories` is an optional set of the remote's sidedata wanted
290 `sidedata_categories` is an optional set of the remote's sidedata wanted
291 categories.
291 categories.
292 """
292 """
293 repo = repo.unfiltered()
293 repo = repo.unfiltered()
294
294
295 # Only useful if we're adding sidedata categories. If both peers have
295 # Only useful if we're adding sidedata categories. If both peers have
296 # the same categories, then we simply don't do anything.
296 # the same categories, then we simply don't do anything.
297 adding_sidedata = (
297 adding_sidedata = (
298 requirements.REVLOGV2_REQUIREMENT in repo.requirements
298 (
299 requirements.REVLOGV2_REQUIREMENT in repo.requirements
300 or requirements.CHANGELOGV2_REQUIREMENT in repo.requirements
301 )
299 and self.version == b'04'
302 and self.version == b'04'
300 and srctype == b'pull'
303 and srctype == b'pull'
301 )
304 )
302 if adding_sidedata:
305 if adding_sidedata:
303 sidedata_helpers = sidedatamod.get_sidedata_helpers(
306 sidedata_helpers = sidedatamod.get_sidedata_helpers(
304 repo,
307 repo,
305 sidedata_categories or set(),
308 sidedata_categories or set(),
306 pull=True,
309 pull=True,
307 )
310 )
308 else:
311 else:
309 sidedata_helpers = None
312 sidedata_helpers = None
310
313
311 def csmap(x):
314 def csmap(x):
312 repo.ui.debug(b"add changeset %s\n" % short(x))
315 repo.ui.debug(b"add changeset %s\n" % short(x))
313 return len(cl)
316 return len(cl)
314
317
315 def revmap(x):
318 def revmap(x):
316 return cl.rev(x)
319 return cl.rev(x)
317
320
318 try:
321 try:
319 # The transaction may already carry source information. In this
322 # The transaction may already carry source information. In this
320 # case we use the top level data. We overwrite the argument
323 # case we use the top level data. We overwrite the argument
321 # because we need to use the top level value (if they exist)
324 # because we need to use the top level value (if they exist)
322 # in this function.
325 # in this function.
323 srctype = tr.hookargs.setdefault(b'source', srctype)
326 srctype = tr.hookargs.setdefault(b'source', srctype)
324 tr.hookargs.setdefault(b'url', url)
327 tr.hookargs.setdefault(b'url', url)
325 repo.hook(
328 repo.hook(
326 b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs)
329 b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs)
327 )
330 )
328
331
329 # write changelog data to temp files so concurrent readers
332 # write changelog data to temp files so concurrent readers
330 # will not see an inconsistent view
333 # will not see an inconsistent view
331 cl = repo.changelog
334 cl = repo.changelog
332 cl.delayupdate(tr)
335 cl.delayupdate(tr)
333 oldheads = set(cl.heads())
336 oldheads = set(cl.heads())
334
337
335 trp = weakref.proxy(tr)
338 trp = weakref.proxy(tr)
336 # pull off the changeset group
339 # pull off the changeset group
337 repo.ui.status(_(b"adding changesets\n"))
340 repo.ui.status(_(b"adding changesets\n"))
338 clstart = len(cl)
341 clstart = len(cl)
339 progress = repo.ui.makeprogress(
342 progress = repo.ui.makeprogress(
340 _(b'changesets'), unit=_(b'chunks'), total=expectedtotal
343 _(b'changesets'), unit=_(b'chunks'), total=expectedtotal
341 )
344 )
342 self.callback = progress.increment
345 self.callback = progress.increment
343
346
344 efilesset = set()
347 efilesset = set()
345 duprevs = []
348 duprevs = []
346
349
347 def ondupchangelog(cl, rev):
350 def ondupchangelog(cl, rev):
348 if rev < clstart:
351 if rev < clstart:
349 duprevs.append(rev)
352 duprevs.append(rev)
350
353
351 def onchangelog(cl, rev):
354 def onchangelog(cl, rev):
352 ctx = cl.changelogrevision(rev)
355 ctx = cl.changelogrevision(rev)
353 efilesset.update(ctx.files)
356 efilesset.update(ctx.files)
354 repo.register_changeset(rev, ctx)
357 repo.register_changeset(rev, ctx)
355
358
356 self.changelogheader()
359 self.changelogheader()
357 deltas = self.deltaiter()
360 deltas = self.deltaiter()
358 if not cl.addgroup(
361 if not cl.addgroup(
359 deltas,
362 deltas,
360 csmap,
363 csmap,
361 trp,
364 trp,
362 alwayscache=True,
365 alwayscache=True,
363 addrevisioncb=onchangelog,
366 addrevisioncb=onchangelog,
364 duplicaterevisioncb=ondupchangelog,
367 duplicaterevisioncb=ondupchangelog,
365 ):
368 ):
366 repo.ui.develwarn(
369 repo.ui.develwarn(
367 b'applied empty changelog from changegroup',
370 b'applied empty changelog from changegroup',
368 config=b'warn-empty-changegroup',
371 config=b'warn-empty-changegroup',
369 )
372 )
370 efiles = len(efilesset)
373 efiles = len(efilesset)
371 clend = len(cl)
374 clend = len(cl)
372 changesets = clend - clstart
375 changesets = clend - clstart
373 progress.complete()
376 progress.complete()
374 del deltas
377 del deltas
375 # TODO Python 2.7 removal
378 # TODO Python 2.7 removal
376 # del efilesset
379 # del efilesset
377 efilesset = None
380 efilesset = None
378 self.callback = None
381 self.callback = None
379
382
380 # Keep track of the (non-changelog) revlogs we've updated and their
383 # Keep track of the (non-changelog) revlogs we've updated and their
381 # range of new revisions for sidedata rewrite.
384 # range of new revisions for sidedata rewrite.
382 # TODO do something more efficient than keeping the reference to
385 # TODO do something more efficient than keeping the reference to
383 # the revlogs, especially memory-wise.
386 # the revlogs, especially memory-wise.
384 touched_manifests = {}
387 touched_manifests = {}
385 touched_filelogs = {}
388 touched_filelogs = {}
386
389
387 # pull off the manifest group
390 # pull off the manifest group
388 repo.ui.status(_(b"adding manifests\n"))
391 repo.ui.status(_(b"adding manifests\n"))
389 # We know that we'll never have more manifests than we had
392 # We know that we'll never have more manifests than we had
390 # changesets.
393 # changesets.
391 progress = repo.ui.makeprogress(
394 progress = repo.ui.makeprogress(
392 _(b'manifests'), unit=_(b'chunks'), total=changesets
395 _(b'manifests'), unit=_(b'chunks'), total=changesets
393 )
396 )
394 on_manifest_rev = None
397 on_manifest_rev = None
395 if sidedata_helpers:
398 if sidedata_helpers:
396 if revlog_constants.KIND_MANIFESTLOG in sidedata_helpers[1]:
399 if revlog_constants.KIND_MANIFESTLOG in sidedata_helpers[1]:
397
400
398 def on_manifest_rev(manifest, rev):
401 def on_manifest_rev(manifest, rev):
399 range = touched_manifests.get(manifest)
402 range = touched_manifests.get(manifest)
400 if not range:
403 if not range:
401 touched_manifests[manifest] = (rev, rev)
404 touched_manifests[manifest] = (rev, rev)
402 else:
405 else:
403 assert rev == range[1] + 1
406 assert rev == range[1] + 1
404 touched_manifests[manifest] = (range[0], rev)
407 touched_manifests[manifest] = (range[0], rev)
405
408
406 self._unpackmanifests(
409 self._unpackmanifests(
407 repo,
410 repo,
408 revmap,
411 revmap,
409 trp,
412 trp,
410 progress,
413 progress,
411 addrevisioncb=on_manifest_rev,
414 addrevisioncb=on_manifest_rev,
412 )
415 )
413
416
414 needfiles = {}
417 needfiles = {}
415 if repo.ui.configbool(b'server', b'validate'):
418 if repo.ui.configbool(b'server', b'validate'):
416 cl = repo.changelog
419 cl = repo.changelog
417 ml = repo.manifestlog
420 ml = repo.manifestlog
418 # validate incoming csets have their manifests
421 # validate incoming csets have their manifests
419 for cset in pycompat.xrange(clstart, clend):
422 for cset in pycompat.xrange(clstart, clend):
420 mfnode = cl.changelogrevision(cset).manifest
423 mfnode = cl.changelogrevision(cset).manifest
421 mfest = ml[mfnode].readdelta()
424 mfest = ml[mfnode].readdelta()
422 # store file nodes we must see
425 # store file nodes we must see
423 for f, n in pycompat.iteritems(mfest):
426 for f, n in pycompat.iteritems(mfest):
424 needfiles.setdefault(f, set()).add(n)
427 needfiles.setdefault(f, set()).add(n)
425
428
426 on_filelog_rev = None
429 on_filelog_rev = None
427 if sidedata_helpers:
430 if sidedata_helpers:
428 if revlog_constants.KIND_FILELOG in sidedata_helpers[1]:
431 if revlog_constants.KIND_FILELOG in sidedata_helpers[1]:
429
432
430 def on_filelog_rev(filelog, rev):
433 def on_filelog_rev(filelog, rev):
431 range = touched_filelogs.get(filelog)
434 range = touched_filelogs.get(filelog)
432 if not range:
435 if not range:
433 touched_filelogs[filelog] = (rev, rev)
436 touched_filelogs[filelog] = (rev, rev)
434 else:
437 else:
435 assert rev == range[1] + 1
438 assert rev == range[1] + 1
436 touched_filelogs[filelog] = (range[0], rev)
439 touched_filelogs[filelog] = (range[0], rev)
437
440
438 # process the files
441 # process the files
439 repo.ui.status(_(b"adding file changes\n"))
442 repo.ui.status(_(b"adding file changes\n"))
440 newrevs, newfiles = _addchangegroupfiles(
443 newrevs, newfiles = _addchangegroupfiles(
441 repo,
444 repo,
442 self,
445 self,
443 revmap,
446 revmap,
444 trp,
447 trp,
445 efiles,
448 efiles,
446 needfiles,
449 needfiles,
447 addrevisioncb=on_filelog_rev,
450 addrevisioncb=on_filelog_rev,
448 )
451 )
449
452
450 if sidedata_helpers:
453 if sidedata_helpers:
451 if revlog_constants.KIND_CHANGELOG in sidedata_helpers[1]:
454 if revlog_constants.KIND_CHANGELOG in sidedata_helpers[1]:
452 cl.rewrite_sidedata(
455 cl.rewrite_sidedata(
453 trp, sidedata_helpers, clstart, clend - 1
456 trp, sidedata_helpers, clstart, clend - 1
454 )
457 )
455 for mf, (startrev, endrev) in touched_manifests.items():
458 for mf, (startrev, endrev) in touched_manifests.items():
456 mf.rewrite_sidedata(trp, sidedata_helpers, startrev, endrev)
459 mf.rewrite_sidedata(trp, sidedata_helpers, startrev, endrev)
457 for fl, (startrev, endrev) in touched_filelogs.items():
460 for fl, (startrev, endrev) in touched_filelogs.items():
458 fl.rewrite_sidedata(trp, sidedata_helpers, startrev, endrev)
461 fl.rewrite_sidedata(trp, sidedata_helpers, startrev, endrev)
459
462
460 # making sure the value exists
463 # making sure the value exists
461 tr.changes.setdefault(b'changegroup-count-changesets', 0)
464 tr.changes.setdefault(b'changegroup-count-changesets', 0)
462 tr.changes.setdefault(b'changegroup-count-revisions', 0)
465 tr.changes.setdefault(b'changegroup-count-revisions', 0)
463 tr.changes.setdefault(b'changegroup-count-files', 0)
466 tr.changes.setdefault(b'changegroup-count-files', 0)
464 tr.changes.setdefault(b'changegroup-count-heads', 0)
467 tr.changes.setdefault(b'changegroup-count-heads', 0)
465
468
466 # some code use bundle operation for internal purpose. They usually
469 # some code use bundle operation for internal purpose. They usually
467 # set `ui.quiet` to do this outside of user sight. Size the report
470 # set `ui.quiet` to do this outside of user sight. Size the report
468 # of such operation now happens at the end of the transaction, that
471 # of such operation now happens at the end of the transaction, that
469 # ui.quiet has not direct effect on the output.
472 # ui.quiet has not direct effect on the output.
470 #
473 #
471 # To preserve this intend use an inelegant hack, we fail to report
474 # To preserve this intend use an inelegant hack, we fail to report
472 # the change if `quiet` is set. We should probably move to
475 # the change if `quiet` is set. We should probably move to
473 # something better, but this is a good first step to allow the "end
476 # something better, but this is a good first step to allow the "end
474 # of transaction report" to pass tests.
477 # of transaction report" to pass tests.
475 if not repo.ui.quiet:
478 if not repo.ui.quiet:
476 tr.changes[b'changegroup-count-changesets'] += changesets
479 tr.changes[b'changegroup-count-changesets'] += changesets
477 tr.changes[b'changegroup-count-revisions'] += newrevs
480 tr.changes[b'changegroup-count-revisions'] += newrevs
478 tr.changes[b'changegroup-count-files'] += newfiles
481 tr.changes[b'changegroup-count-files'] += newfiles
479
482
480 deltaheads = 0
483 deltaheads = 0
481 if oldheads:
484 if oldheads:
482 heads = cl.heads()
485 heads = cl.heads()
483 deltaheads += len(heads) - len(oldheads)
486 deltaheads += len(heads) - len(oldheads)
484 for h in heads:
487 for h in heads:
485 if h not in oldheads and repo[h].closesbranch():
488 if h not in oldheads and repo[h].closesbranch():
486 deltaheads -= 1
489 deltaheads -= 1
487
490
488 # see previous comment about checking ui.quiet
491 # see previous comment about checking ui.quiet
489 if not repo.ui.quiet:
492 if not repo.ui.quiet:
490 tr.changes[b'changegroup-count-heads'] += deltaheads
493 tr.changes[b'changegroup-count-heads'] += deltaheads
491 repo.invalidatevolatilesets()
494 repo.invalidatevolatilesets()
492
495
493 if changesets > 0:
496 if changesets > 0:
494 if b'node' not in tr.hookargs:
497 if b'node' not in tr.hookargs:
495 tr.hookargs[b'node'] = hex(cl.node(clstart))
498 tr.hookargs[b'node'] = hex(cl.node(clstart))
496 tr.hookargs[b'node_last'] = hex(cl.node(clend - 1))
499 tr.hookargs[b'node_last'] = hex(cl.node(clend - 1))
497 hookargs = dict(tr.hookargs)
500 hookargs = dict(tr.hookargs)
498 else:
501 else:
499 hookargs = dict(tr.hookargs)
502 hookargs = dict(tr.hookargs)
500 hookargs[b'node'] = hex(cl.node(clstart))
503 hookargs[b'node'] = hex(cl.node(clstart))
501 hookargs[b'node_last'] = hex(cl.node(clend - 1))
504 hookargs[b'node_last'] = hex(cl.node(clend - 1))
502 repo.hook(
505 repo.hook(
503 b'pretxnchangegroup',
506 b'pretxnchangegroup',
504 throw=True,
507 throw=True,
505 **pycompat.strkwargs(hookargs)
508 **pycompat.strkwargs(hookargs)
506 )
509 )
507
510
508 added = pycompat.xrange(clstart, clend)
511 added = pycompat.xrange(clstart, clend)
509 phaseall = None
512 phaseall = None
510 if srctype in (b'push', b'serve'):
513 if srctype in (b'push', b'serve'):
511 # Old servers can not push the boundary themselves.
514 # Old servers can not push the boundary themselves.
512 # New servers won't push the boundary if changeset already
515 # New servers won't push the boundary if changeset already
513 # exists locally as secret
516 # exists locally as secret
514 #
517 #
515 # We should not use added here but the list of all change in
518 # We should not use added here but the list of all change in
516 # the bundle
519 # the bundle
517 if repo.publishing():
520 if repo.publishing():
518 targetphase = phaseall = phases.public
521 targetphase = phaseall = phases.public
519 else:
522 else:
520 # closer target phase computation
523 # closer target phase computation
521
524
522 # Those changesets have been pushed from the
525 # Those changesets have been pushed from the
523 # outside, their phases are going to be pushed
526 # outside, their phases are going to be pushed
524 # alongside. Therefor `targetphase` is
527 # alongside. Therefor `targetphase` is
525 # ignored.
528 # ignored.
526 targetphase = phaseall = phases.draft
529 targetphase = phaseall = phases.draft
527 if added:
530 if added:
528 phases.registernew(repo, tr, targetphase, added)
531 phases.registernew(repo, tr, targetphase, added)
529 if phaseall is not None:
532 if phaseall is not None:
530 if duprevs:
533 if duprevs:
531 duprevs.extend(added)
534 duprevs.extend(added)
532 else:
535 else:
533 duprevs = added
536 duprevs = added
534 phases.advanceboundary(repo, tr, phaseall, [], revs=duprevs)
537 phases.advanceboundary(repo, tr, phaseall, [], revs=duprevs)
535 duprevs = []
538 duprevs = []
536
539
537 if changesets > 0:
540 if changesets > 0:
538
541
539 def runhooks(unused_success):
542 def runhooks(unused_success):
540 # These hooks run when the lock releases, not when the
543 # These hooks run when the lock releases, not when the
541 # transaction closes. So it's possible for the changelog
544 # transaction closes. So it's possible for the changelog
542 # to have changed since we last saw it.
545 # to have changed since we last saw it.
543 if clstart >= len(repo):
546 if clstart >= len(repo):
544 return
547 return
545
548
546 repo.hook(b"changegroup", **pycompat.strkwargs(hookargs))
549 repo.hook(b"changegroup", **pycompat.strkwargs(hookargs))
547
550
548 for rev in added:
551 for rev in added:
549 args = hookargs.copy()
552 args = hookargs.copy()
550 args[b'node'] = hex(cl.node(rev))
553 args[b'node'] = hex(cl.node(rev))
551 del args[b'node_last']
554 del args[b'node_last']
552 repo.hook(b"incoming", **pycompat.strkwargs(args))
555 repo.hook(b"incoming", **pycompat.strkwargs(args))
553
556
554 newheads = [h for h in repo.heads() if h not in oldheads]
557 newheads = [h for h in repo.heads() if h not in oldheads]
555 repo.ui.log(
558 repo.ui.log(
556 b"incoming",
559 b"incoming",
557 b"%d incoming changes - new heads: %s\n",
560 b"%d incoming changes - new heads: %s\n",
558 len(added),
561 len(added),
559 b', '.join([hex(c[:6]) for c in newheads]),
562 b', '.join([hex(c[:6]) for c in newheads]),
560 )
563 )
561
564
562 tr.addpostclose(
565 tr.addpostclose(
563 b'changegroup-runhooks-%020i' % clstart,
566 b'changegroup-runhooks-%020i' % clstart,
564 lambda tr: repo._afterlock(runhooks),
567 lambda tr: repo._afterlock(runhooks),
565 )
568 )
566 finally:
569 finally:
567 repo.ui.flush()
570 repo.ui.flush()
568 # never return 0 here:
571 # never return 0 here:
569 if deltaheads < 0:
572 if deltaheads < 0:
570 ret = deltaheads - 1
573 ret = deltaheads - 1
571 else:
574 else:
572 ret = deltaheads + 1
575 ret = deltaheads + 1
573 return ret
576 return ret
574
577
575 def deltaiter(self):
578 def deltaiter(self):
576 """
579 """
577 returns an iterator of the deltas in this changegroup
580 returns an iterator of the deltas in this changegroup
578
581
579 Useful for passing to the underlying storage system to be stored.
582 Useful for passing to the underlying storage system to be stored.
580 """
583 """
581 chain = None
584 chain = None
582 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
585 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
583 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags, sidedata)
586 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags, sidedata)
584 yield chunkdata
587 yield chunkdata
585 chain = chunkdata[0]
588 chain = chunkdata[0]
586
589
587
590
588 class cg2unpacker(cg1unpacker):
591 class cg2unpacker(cg1unpacker):
589 """Unpacker for cg2 streams.
592 """Unpacker for cg2 streams.
590
593
591 cg2 streams add support for generaldelta, so the delta header
594 cg2 streams add support for generaldelta, so the delta header
592 format is slightly different. All other features about the data
595 format is slightly different. All other features about the data
593 remain the same.
596 remain the same.
594 """
597 """
595
598
596 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
599 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
597 deltaheadersize = deltaheader.size
600 deltaheadersize = deltaheader.size
598 version = b'02'
601 version = b'02'
599
602
600 def _deltaheader(self, headertuple, prevnode):
603 def _deltaheader(self, headertuple, prevnode):
601 node, p1, p2, deltabase, cs = headertuple
604 node, p1, p2, deltabase, cs = headertuple
602 flags = 0
605 flags = 0
603 protocol_flags = 0
606 protocol_flags = 0
604 return node, p1, p2, deltabase, cs, flags, protocol_flags
607 return node, p1, p2, deltabase, cs, flags, protocol_flags
605
608
606
609
607 class cg3unpacker(cg2unpacker):
610 class cg3unpacker(cg2unpacker):
608 """Unpacker for cg3 streams.
611 """Unpacker for cg3 streams.
609
612
610 cg3 streams add support for exchanging treemanifests and revlog
613 cg3 streams add support for exchanging treemanifests and revlog
611 flags. It adds the revlog flags to the delta header and an empty chunk
614 flags. It adds the revlog flags to the delta header and an empty chunk
612 separating manifests and files.
615 separating manifests and files.
613 """
616 """
614
617
615 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
618 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
616 deltaheadersize = deltaheader.size
619 deltaheadersize = deltaheader.size
617 version = b'03'
620 version = b'03'
618 _grouplistcount = 2 # One list of manifests and one list of files
621 _grouplistcount = 2 # One list of manifests and one list of files
619
622
620 def _deltaheader(self, headertuple, prevnode):
623 def _deltaheader(self, headertuple, prevnode):
621 node, p1, p2, deltabase, cs, flags = headertuple
624 node, p1, p2, deltabase, cs, flags = headertuple
622 protocol_flags = 0
625 protocol_flags = 0
623 return node, p1, p2, deltabase, cs, flags, protocol_flags
626 return node, p1, p2, deltabase, cs, flags, protocol_flags
624
627
625 def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None):
628 def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None):
626 super(cg3unpacker, self)._unpackmanifests(
629 super(cg3unpacker, self)._unpackmanifests(
627 repo, revmap, trp, prog, addrevisioncb=addrevisioncb
630 repo, revmap, trp, prog, addrevisioncb=addrevisioncb
628 )
631 )
629 for chunkdata in iter(self.filelogheader, {}):
632 for chunkdata in iter(self.filelogheader, {}):
630 # If we get here, there are directory manifests in the changegroup
633 # If we get here, there are directory manifests in the changegroup
631 d = chunkdata[b"filename"]
634 d = chunkdata[b"filename"]
632 repo.ui.debug(b"adding %s revisions\n" % d)
635 repo.ui.debug(b"adding %s revisions\n" % d)
633 deltas = self.deltaiter()
636 deltas = self.deltaiter()
634 if not repo.manifestlog.getstorage(d).addgroup(
637 if not repo.manifestlog.getstorage(d).addgroup(
635 deltas, revmap, trp, addrevisioncb=addrevisioncb
638 deltas, revmap, trp, addrevisioncb=addrevisioncb
636 ):
639 ):
637 raise error.Abort(_(b"received dir revlog group is empty"))
640 raise error.Abort(_(b"received dir revlog group is empty"))
638
641
639
642
640 class cg4unpacker(cg3unpacker):
643 class cg4unpacker(cg3unpacker):
641 """Unpacker for cg4 streams.
644 """Unpacker for cg4 streams.
642
645
643 cg4 streams add support for exchanging sidedata.
646 cg4 streams add support for exchanging sidedata.
644 """
647 """
645
648
646 deltaheader = _CHANGEGROUPV4_DELTA_HEADER
649 deltaheader = _CHANGEGROUPV4_DELTA_HEADER
647 deltaheadersize = deltaheader.size
650 deltaheadersize = deltaheader.size
648 version = b'04'
651 version = b'04'
649
652
650 def _deltaheader(self, headertuple, prevnode):
653 def _deltaheader(self, headertuple, prevnode):
651 protocol_flags, node, p1, p2, deltabase, cs, flags = headertuple
654 protocol_flags, node, p1, p2, deltabase, cs, flags = headertuple
652 return node, p1, p2, deltabase, cs, flags, protocol_flags
655 return node, p1, p2, deltabase, cs, flags, protocol_flags
653
656
654 def deltachunk(self, prevnode):
657 def deltachunk(self, prevnode):
655 res = super(cg4unpacker, self).deltachunk(prevnode)
658 res = super(cg4unpacker, self).deltachunk(prevnode)
656 if not res:
659 if not res:
657 return res
660 return res
658
661
659 (node, p1, p2, cs, deltabase, delta, flags, protocol_flags) = res
662 (node, p1, p2, cs, deltabase, delta, flags, protocol_flags) = res
660
663
661 sidedata = {}
664 sidedata = {}
662 if protocol_flags & storageutil.CG_FLAG_SIDEDATA:
665 if protocol_flags & storageutil.CG_FLAG_SIDEDATA:
663 sidedata_raw = getchunk(self._stream)
666 sidedata_raw = getchunk(self._stream)
664 sidedata = sidedatamod.deserialize_sidedata(sidedata_raw)
667 sidedata = sidedatamod.deserialize_sidedata(sidedata_raw)
665
668
666 return node, p1, p2, cs, deltabase, delta, flags, sidedata
669 return node, p1, p2, cs, deltabase, delta, flags, sidedata
667
670
668
671
669 class headerlessfixup(object):
672 class headerlessfixup(object):
670 def __init__(self, fh, h):
673 def __init__(self, fh, h):
671 self._h = h
674 self._h = h
672 self._fh = fh
675 self._fh = fh
673
676
674 def read(self, n):
677 def read(self, n):
675 if self._h:
678 if self._h:
676 d, self._h = self._h[:n], self._h[n:]
679 d, self._h = self._h[:n], self._h[n:]
677 if len(d) < n:
680 if len(d) < n:
678 d += readexactly(self._fh, n - len(d))
681 d += readexactly(self._fh, n - len(d))
679 return d
682 return d
680 return readexactly(self._fh, n)
683 return readexactly(self._fh, n)
681
684
682
685
683 def _revisiondeltatochunks(repo, delta, headerfn):
686 def _revisiondeltatochunks(repo, delta, headerfn):
684 """Serialize a revisiondelta to changegroup chunks."""
687 """Serialize a revisiondelta to changegroup chunks."""
685
688
686 # The captured revision delta may be encoded as a delta against
689 # The captured revision delta may be encoded as a delta against
687 # a base revision or as a full revision. The changegroup format
690 # a base revision or as a full revision. The changegroup format
688 # requires that everything on the wire be deltas. So for full
691 # requires that everything on the wire be deltas. So for full
689 # revisions, we need to invent a header that says to rewrite
692 # revisions, we need to invent a header that says to rewrite
690 # data.
693 # data.
691
694
692 if delta.delta is not None:
695 if delta.delta is not None:
693 prefix, data = b'', delta.delta
696 prefix, data = b'', delta.delta
694 elif delta.basenode == repo.nullid:
697 elif delta.basenode == repo.nullid:
695 data = delta.revision
698 data = delta.revision
696 prefix = mdiff.trivialdiffheader(len(data))
699 prefix = mdiff.trivialdiffheader(len(data))
697 else:
700 else:
698 data = delta.revision
701 data = delta.revision
699 prefix = mdiff.replacediffheader(delta.baserevisionsize, len(data))
702 prefix = mdiff.replacediffheader(delta.baserevisionsize, len(data))
700
703
701 meta = headerfn(delta)
704 meta = headerfn(delta)
702
705
703 yield chunkheader(len(meta) + len(prefix) + len(data))
706 yield chunkheader(len(meta) + len(prefix) + len(data))
704 yield meta
707 yield meta
705 if prefix:
708 if prefix:
706 yield prefix
709 yield prefix
707 yield data
710 yield data
708
711
709 if delta.protocol_flags & storageutil.CG_FLAG_SIDEDATA:
712 if delta.protocol_flags & storageutil.CG_FLAG_SIDEDATA:
710 # Need a separate chunk for sidedata to be able to differentiate
713 # Need a separate chunk for sidedata to be able to differentiate
711 # "raw delta" length and sidedata length
714 # "raw delta" length and sidedata length
712 sidedata = delta.sidedata
715 sidedata = delta.sidedata
713 yield chunkheader(len(sidedata))
716 yield chunkheader(len(sidedata))
714 yield sidedata
717 yield sidedata
715
718
716
719
717 def _sortnodesellipsis(store, nodes, cl, lookup):
720 def _sortnodesellipsis(store, nodes, cl, lookup):
718 """Sort nodes for changegroup generation."""
721 """Sort nodes for changegroup generation."""
719 # Ellipses serving mode.
722 # Ellipses serving mode.
720 #
723 #
721 # In a perfect world, we'd generate better ellipsis-ified graphs
724 # In a perfect world, we'd generate better ellipsis-ified graphs
722 # for non-changelog revlogs. In practice, we haven't started doing
725 # for non-changelog revlogs. In practice, we haven't started doing
723 # that yet, so the resulting DAGs for the manifestlog and filelogs
726 # that yet, so the resulting DAGs for the manifestlog and filelogs
724 # are actually full of bogus parentage on all the ellipsis
727 # are actually full of bogus parentage on all the ellipsis
725 # nodes. This has the side effect that, while the contents are
728 # nodes. This has the side effect that, while the contents are
726 # correct, the individual DAGs might be completely out of whack in
729 # correct, the individual DAGs might be completely out of whack in
727 # a case like 882681bc3166 and its ancestors (back about 10
730 # a case like 882681bc3166 and its ancestors (back about 10
728 # revisions or so) in the main hg repo.
731 # revisions or so) in the main hg repo.
729 #
732 #
730 # The one invariant we *know* holds is that the new (potentially
733 # The one invariant we *know* holds is that the new (potentially
731 # bogus) DAG shape will be valid if we order the nodes in the
734 # bogus) DAG shape will be valid if we order the nodes in the
732 # order that they're introduced in dramatis personae by the
735 # order that they're introduced in dramatis personae by the
733 # changelog, so what we do is we sort the non-changelog histories
736 # changelog, so what we do is we sort the non-changelog histories
734 # by the order in which they are used by the changelog.
737 # by the order in which they are used by the changelog.
735 key = lambda n: cl.rev(lookup(n))
738 key = lambda n: cl.rev(lookup(n))
736 return sorted(nodes, key=key)
739 return sorted(nodes, key=key)
737
740
738
741
739 def _resolvenarrowrevisioninfo(
742 def _resolvenarrowrevisioninfo(
740 cl,
743 cl,
741 store,
744 store,
742 ischangelog,
745 ischangelog,
743 rev,
746 rev,
744 linkrev,
747 linkrev,
745 linknode,
748 linknode,
746 clrevtolocalrev,
749 clrevtolocalrev,
747 fullclnodes,
750 fullclnodes,
748 precomputedellipsis,
751 precomputedellipsis,
749 ):
752 ):
750 linkparents = precomputedellipsis[linkrev]
753 linkparents = precomputedellipsis[linkrev]
751
754
752 def local(clrev):
755 def local(clrev):
753 """Turn a changelog revnum into a local revnum.
756 """Turn a changelog revnum into a local revnum.
754
757
755 The ellipsis dag is stored as revnums on the changelog,
758 The ellipsis dag is stored as revnums on the changelog,
756 but when we're producing ellipsis entries for
759 but when we're producing ellipsis entries for
757 non-changelog revlogs, we need to turn those numbers into
760 non-changelog revlogs, we need to turn those numbers into
758 something local. This does that for us, and during the
761 something local. This does that for us, and during the
759 changelog sending phase will also expand the stored
762 changelog sending phase will also expand the stored
760 mappings as needed.
763 mappings as needed.
761 """
764 """
762 if clrev == nullrev:
765 if clrev == nullrev:
763 return nullrev
766 return nullrev
764
767
765 if ischangelog:
768 if ischangelog:
766 return clrev
769 return clrev
767
770
768 # Walk the ellipsis-ized changelog breadth-first looking for a
771 # Walk the ellipsis-ized changelog breadth-first looking for a
769 # change that has been linked from the current revlog.
772 # change that has been linked from the current revlog.
770 #
773 #
771 # For a flat manifest revlog only a single step should be necessary
774 # For a flat manifest revlog only a single step should be necessary
772 # as all relevant changelog entries are relevant to the flat
775 # as all relevant changelog entries are relevant to the flat
773 # manifest.
776 # manifest.
774 #
777 #
775 # For a filelog or tree manifest dirlog however not every changelog
778 # For a filelog or tree manifest dirlog however not every changelog
776 # entry will have been relevant, so we need to skip some changelog
779 # entry will have been relevant, so we need to skip some changelog
777 # nodes even after ellipsis-izing.
780 # nodes even after ellipsis-izing.
778 walk = [clrev]
781 walk = [clrev]
779 while walk:
782 while walk:
780 p = walk[0]
783 p = walk[0]
781 walk = walk[1:]
784 walk = walk[1:]
782 if p in clrevtolocalrev:
785 if p in clrevtolocalrev:
783 return clrevtolocalrev[p]
786 return clrevtolocalrev[p]
784 elif p in fullclnodes:
787 elif p in fullclnodes:
785 walk.extend([pp for pp in cl.parentrevs(p) if pp != nullrev])
788 walk.extend([pp for pp in cl.parentrevs(p) if pp != nullrev])
786 elif p in precomputedellipsis:
789 elif p in precomputedellipsis:
787 walk.extend(
790 walk.extend(
788 [pp for pp in precomputedellipsis[p] if pp != nullrev]
791 [pp for pp in precomputedellipsis[p] if pp != nullrev]
789 )
792 )
790 else:
793 else:
791 # In this case, we've got an ellipsis with parents
794 # In this case, we've got an ellipsis with parents
792 # outside the current bundle (likely an
795 # outside the current bundle (likely an
793 # incremental pull). We "know" that we can use the
796 # incremental pull). We "know" that we can use the
794 # value of this same revlog at whatever revision
797 # value of this same revlog at whatever revision
795 # is pointed to by linknode. "Know" is in scare
798 # is pointed to by linknode. "Know" is in scare
796 # quotes because I haven't done enough examination
799 # quotes because I haven't done enough examination
797 # of edge cases to convince myself this is really
800 # of edge cases to convince myself this is really
798 # a fact - it works for all the (admittedly
801 # a fact - it works for all the (admittedly
799 # thorough) cases in our testsuite, but I would be
802 # thorough) cases in our testsuite, but I would be
800 # somewhat unsurprised to find a case in the wild
803 # somewhat unsurprised to find a case in the wild
801 # where this breaks down a bit. That said, I don't
804 # where this breaks down a bit. That said, I don't
802 # know if it would hurt anything.
805 # know if it would hurt anything.
803 for i in pycompat.xrange(rev, 0, -1):
806 for i in pycompat.xrange(rev, 0, -1):
804 if store.linkrev(i) == clrev:
807 if store.linkrev(i) == clrev:
805 return i
808 return i
806 # We failed to resolve a parent for this node, so
809 # We failed to resolve a parent for this node, so
807 # we crash the changegroup construction.
810 # we crash the changegroup construction.
808 if util.safehasattr(store, 'target'):
811 if util.safehasattr(store, 'target'):
809 target = store.display_id
812 target = store.display_id
810 else:
813 else:
811 # some revlog not actually a revlog
814 # some revlog not actually a revlog
812 target = store._revlog.display_id
815 target = store._revlog.display_id
813
816
814 raise error.Abort(
817 raise error.Abort(
815 b"unable to resolve parent while packing '%s' %r"
818 b"unable to resolve parent while packing '%s' %r"
816 b' for changeset %r' % (target, rev, clrev)
819 b' for changeset %r' % (target, rev, clrev)
817 )
820 )
818
821
819 return nullrev
822 return nullrev
820
823
821 if not linkparents or (store.parentrevs(rev) == (nullrev, nullrev)):
824 if not linkparents or (store.parentrevs(rev) == (nullrev, nullrev)):
822 p1, p2 = nullrev, nullrev
825 p1, p2 = nullrev, nullrev
823 elif len(linkparents) == 1:
826 elif len(linkparents) == 1:
824 (p1,) = sorted(local(p) for p in linkparents)
827 (p1,) = sorted(local(p) for p in linkparents)
825 p2 = nullrev
828 p2 = nullrev
826 else:
829 else:
827 p1, p2 = sorted(local(p) for p in linkparents)
830 p1, p2 = sorted(local(p) for p in linkparents)
828
831
829 p1node, p2node = store.node(p1), store.node(p2)
832 p1node, p2node = store.node(p1), store.node(p2)
830
833
831 return p1node, p2node, linknode
834 return p1node, p2node, linknode
832
835
833
836
834 def deltagroup(
837 def deltagroup(
835 repo,
838 repo,
836 store,
839 store,
837 nodes,
840 nodes,
838 ischangelog,
841 ischangelog,
839 lookup,
842 lookup,
840 forcedeltaparentprev,
843 forcedeltaparentprev,
841 topic=None,
844 topic=None,
842 ellipses=False,
845 ellipses=False,
843 clrevtolocalrev=None,
846 clrevtolocalrev=None,
844 fullclnodes=None,
847 fullclnodes=None,
845 precomputedellipsis=None,
848 precomputedellipsis=None,
846 sidedata_helpers=None,
849 sidedata_helpers=None,
847 ):
850 ):
848 """Calculate deltas for a set of revisions.
851 """Calculate deltas for a set of revisions.
849
852
850 Is a generator of ``revisiondelta`` instances.
853 Is a generator of ``revisiondelta`` instances.
851
854
852 If topic is not None, progress detail will be generated using this
855 If topic is not None, progress detail will be generated using this
853 topic name (e.g. changesets, manifests, etc).
856 topic name (e.g. changesets, manifests, etc).
854
857
855 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
858 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
856 `sidedata_helpers`.
859 `sidedata_helpers`.
857 """
860 """
858 if not nodes:
861 if not nodes:
859 return
862 return
860
863
861 cl = repo.changelog
864 cl = repo.changelog
862
865
863 if ischangelog:
866 if ischangelog:
864 # `hg log` shows changesets in storage order. To preserve order
867 # `hg log` shows changesets in storage order. To preserve order
865 # across clones, send out changesets in storage order.
868 # across clones, send out changesets in storage order.
866 nodesorder = b'storage'
869 nodesorder = b'storage'
867 elif ellipses:
870 elif ellipses:
868 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
871 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
869 nodesorder = b'nodes'
872 nodesorder = b'nodes'
870 else:
873 else:
871 nodesorder = None
874 nodesorder = None
872
875
873 # Perform ellipses filtering and revision massaging. We do this before
876 # Perform ellipses filtering and revision massaging. We do this before
874 # emitrevisions() because a) filtering out revisions creates less work
877 # emitrevisions() because a) filtering out revisions creates less work
875 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
878 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
876 # assumptions about delta choices and we would possibly send a delta
879 # assumptions about delta choices and we would possibly send a delta
877 # referencing a missing base revision.
880 # referencing a missing base revision.
878 #
881 #
879 # Also, calling lookup() has side-effects with regards to populating
882 # Also, calling lookup() has side-effects with regards to populating
880 # data structures. If we don't call lookup() for each node or if we call
883 # data structures. If we don't call lookup() for each node or if we call
881 # lookup() after the first pass through each node, things can break -
884 # lookup() after the first pass through each node, things can break -
882 # possibly intermittently depending on the python hash seed! For that
885 # possibly intermittently depending on the python hash seed! For that
883 # reason, we store a mapping of all linknodes during the initial node
886 # reason, we store a mapping of all linknodes during the initial node
884 # pass rather than use lookup() on the output side.
887 # pass rather than use lookup() on the output side.
885 if ellipses:
888 if ellipses:
886 filtered = []
889 filtered = []
887 adjustedparents = {}
890 adjustedparents = {}
888 linknodes = {}
891 linknodes = {}
889
892
890 for node in nodes:
893 for node in nodes:
891 rev = store.rev(node)
894 rev = store.rev(node)
892 linknode = lookup(node)
895 linknode = lookup(node)
893 linkrev = cl.rev(linknode)
896 linkrev = cl.rev(linknode)
894 clrevtolocalrev[linkrev] = rev
897 clrevtolocalrev[linkrev] = rev
895
898
896 # If linknode is in fullclnodes, it means the corresponding
899 # If linknode is in fullclnodes, it means the corresponding
897 # changeset was a full changeset and is being sent unaltered.
900 # changeset was a full changeset and is being sent unaltered.
898 if linknode in fullclnodes:
901 if linknode in fullclnodes:
899 linknodes[node] = linknode
902 linknodes[node] = linknode
900
903
901 # If the corresponding changeset wasn't in the set computed
904 # If the corresponding changeset wasn't in the set computed
902 # as relevant to us, it should be dropped outright.
905 # as relevant to us, it should be dropped outright.
903 elif linkrev not in precomputedellipsis:
906 elif linkrev not in precomputedellipsis:
904 continue
907 continue
905
908
906 else:
909 else:
907 # We could probably do this later and avoid the dict
910 # We could probably do this later and avoid the dict
908 # holding state. But it likely doesn't matter.
911 # holding state. But it likely doesn't matter.
909 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
912 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
910 cl,
913 cl,
911 store,
914 store,
912 ischangelog,
915 ischangelog,
913 rev,
916 rev,
914 linkrev,
917 linkrev,
915 linknode,
918 linknode,
916 clrevtolocalrev,
919 clrevtolocalrev,
917 fullclnodes,
920 fullclnodes,
918 precomputedellipsis,
921 precomputedellipsis,
919 )
922 )
920
923
921 adjustedparents[node] = (p1node, p2node)
924 adjustedparents[node] = (p1node, p2node)
922 linknodes[node] = linknode
925 linknodes[node] = linknode
923
926
924 filtered.append(node)
927 filtered.append(node)
925
928
926 nodes = filtered
929 nodes = filtered
927
930
928 # We expect the first pass to be fast, so we only engage the progress
931 # We expect the first pass to be fast, so we only engage the progress
929 # meter for constructing the revision deltas.
932 # meter for constructing the revision deltas.
930 progress = None
933 progress = None
931 if topic is not None:
934 if topic is not None:
932 progress = repo.ui.makeprogress(
935 progress = repo.ui.makeprogress(
933 topic, unit=_(b'chunks'), total=len(nodes)
936 topic, unit=_(b'chunks'), total=len(nodes)
934 )
937 )
935
938
936 configtarget = repo.ui.config(b'devel', b'bundle.delta')
939 configtarget = repo.ui.config(b'devel', b'bundle.delta')
937 if configtarget not in (b'', b'p1', b'full'):
940 if configtarget not in (b'', b'p1', b'full'):
938 msg = _(b"""config "devel.bundle.delta" as unknown value: %s""")
941 msg = _(b"""config "devel.bundle.delta" as unknown value: %s""")
939 repo.ui.warn(msg % configtarget)
942 repo.ui.warn(msg % configtarget)
940
943
941 deltamode = repository.CG_DELTAMODE_STD
944 deltamode = repository.CG_DELTAMODE_STD
942 if forcedeltaparentprev:
945 if forcedeltaparentprev:
943 deltamode = repository.CG_DELTAMODE_PREV
946 deltamode = repository.CG_DELTAMODE_PREV
944 elif configtarget == b'p1':
947 elif configtarget == b'p1':
945 deltamode = repository.CG_DELTAMODE_P1
948 deltamode = repository.CG_DELTAMODE_P1
946 elif configtarget == b'full':
949 elif configtarget == b'full':
947 deltamode = repository.CG_DELTAMODE_FULL
950 deltamode = repository.CG_DELTAMODE_FULL
948
951
949 revisions = store.emitrevisions(
952 revisions = store.emitrevisions(
950 nodes,
953 nodes,
951 nodesorder=nodesorder,
954 nodesorder=nodesorder,
952 revisiondata=True,
955 revisiondata=True,
953 assumehaveparentrevisions=not ellipses,
956 assumehaveparentrevisions=not ellipses,
954 deltamode=deltamode,
957 deltamode=deltamode,
955 sidedata_helpers=sidedata_helpers,
958 sidedata_helpers=sidedata_helpers,
956 )
959 )
957
960
958 for i, revision in enumerate(revisions):
961 for i, revision in enumerate(revisions):
959 if progress:
962 if progress:
960 progress.update(i + 1)
963 progress.update(i + 1)
961
964
962 if ellipses:
965 if ellipses:
963 linknode = linknodes[revision.node]
966 linknode = linknodes[revision.node]
964
967
965 if revision.node in adjustedparents:
968 if revision.node in adjustedparents:
966 p1node, p2node = adjustedparents[revision.node]
969 p1node, p2node = adjustedparents[revision.node]
967 revision.p1node = p1node
970 revision.p1node = p1node
968 revision.p2node = p2node
971 revision.p2node = p2node
969 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
972 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
970
973
971 else:
974 else:
972 linknode = lookup(revision.node)
975 linknode = lookup(revision.node)
973
976
974 revision.linknode = linknode
977 revision.linknode = linknode
975 yield revision
978 yield revision
976
979
977 if progress:
980 if progress:
978 progress.complete()
981 progress.complete()
979
982
980
983
981 class cgpacker(object):
984 class cgpacker(object):
982 def __init__(
985 def __init__(
983 self,
986 self,
984 repo,
987 repo,
985 oldmatcher,
988 oldmatcher,
986 matcher,
989 matcher,
987 version,
990 version,
988 builddeltaheader,
991 builddeltaheader,
989 manifestsend,
992 manifestsend,
990 forcedeltaparentprev=False,
993 forcedeltaparentprev=False,
991 bundlecaps=None,
994 bundlecaps=None,
992 ellipses=False,
995 ellipses=False,
993 shallow=False,
996 shallow=False,
994 ellipsisroots=None,
997 ellipsisroots=None,
995 fullnodes=None,
998 fullnodes=None,
996 remote_sidedata=None,
999 remote_sidedata=None,
997 ):
1000 ):
998 """Given a source repo, construct a bundler.
1001 """Given a source repo, construct a bundler.
999
1002
1000 oldmatcher is a matcher that matches on files the client already has.
1003 oldmatcher is a matcher that matches on files the client already has.
1001 These will not be included in the changegroup.
1004 These will not be included in the changegroup.
1002
1005
1003 matcher is a matcher that matches on files to include in the
1006 matcher is a matcher that matches on files to include in the
1004 changegroup. Used to facilitate sparse changegroups.
1007 changegroup. Used to facilitate sparse changegroups.
1005
1008
1006 forcedeltaparentprev indicates whether delta parents must be against
1009 forcedeltaparentprev indicates whether delta parents must be against
1007 the previous revision in a delta group. This should only be used for
1010 the previous revision in a delta group. This should only be used for
1008 compatibility with changegroup version 1.
1011 compatibility with changegroup version 1.
1009
1012
1010 builddeltaheader is a callable that constructs the header for a group
1013 builddeltaheader is a callable that constructs the header for a group
1011 delta.
1014 delta.
1012
1015
1013 manifestsend is a chunk to send after manifests have been fully emitted.
1016 manifestsend is a chunk to send after manifests have been fully emitted.
1014
1017
1015 ellipses indicates whether ellipsis serving mode is enabled.
1018 ellipses indicates whether ellipsis serving mode is enabled.
1016
1019
1017 bundlecaps is optional and can be used to specify the set of
1020 bundlecaps is optional and can be used to specify the set of
1018 capabilities which can be used to build the bundle. While bundlecaps is
1021 capabilities which can be used to build the bundle. While bundlecaps is
1019 unused in core Mercurial, extensions rely on this feature to communicate
1022 unused in core Mercurial, extensions rely on this feature to communicate
1020 capabilities to customize the changegroup packer.
1023 capabilities to customize the changegroup packer.
1021
1024
1022 shallow indicates whether shallow data might be sent. The packer may
1025 shallow indicates whether shallow data might be sent. The packer may
1023 need to pack file contents not introduced by the changes being packed.
1026 need to pack file contents not introduced by the changes being packed.
1024
1027
1025 fullnodes is the set of changelog nodes which should not be ellipsis
1028 fullnodes is the set of changelog nodes which should not be ellipsis
1026 nodes. We store this rather than the set of nodes that should be
1029 nodes. We store this rather than the set of nodes that should be
1027 ellipsis because for very large histories we expect this to be
1030 ellipsis because for very large histories we expect this to be
1028 significantly smaller.
1031 significantly smaller.
1029
1032
1030 remote_sidedata is the set of sidedata categories wanted by the remote.
1033 remote_sidedata is the set of sidedata categories wanted by the remote.
1031 """
1034 """
1032 assert oldmatcher
1035 assert oldmatcher
1033 assert matcher
1036 assert matcher
1034 self._oldmatcher = oldmatcher
1037 self._oldmatcher = oldmatcher
1035 self._matcher = matcher
1038 self._matcher = matcher
1036
1039
1037 self.version = version
1040 self.version = version
1038 self._forcedeltaparentprev = forcedeltaparentprev
1041 self._forcedeltaparentprev = forcedeltaparentprev
1039 self._builddeltaheader = builddeltaheader
1042 self._builddeltaheader = builddeltaheader
1040 self._manifestsend = manifestsend
1043 self._manifestsend = manifestsend
1041 self._ellipses = ellipses
1044 self._ellipses = ellipses
1042
1045
1043 # Set of capabilities we can use to build the bundle.
1046 # Set of capabilities we can use to build the bundle.
1044 if bundlecaps is None:
1047 if bundlecaps is None:
1045 bundlecaps = set()
1048 bundlecaps = set()
1046 self._bundlecaps = bundlecaps
1049 self._bundlecaps = bundlecaps
1047 if remote_sidedata is None:
1050 if remote_sidedata is None:
1048 remote_sidedata = set()
1051 remote_sidedata = set()
1049 self._remote_sidedata = remote_sidedata
1052 self._remote_sidedata = remote_sidedata
1050 self._isshallow = shallow
1053 self._isshallow = shallow
1051 self._fullclnodes = fullnodes
1054 self._fullclnodes = fullnodes
1052
1055
1053 # Maps ellipsis revs to their roots at the changelog level.
1056 # Maps ellipsis revs to their roots at the changelog level.
1054 self._precomputedellipsis = ellipsisroots
1057 self._precomputedellipsis = ellipsisroots
1055
1058
1056 self._repo = repo
1059 self._repo = repo
1057
1060
1058 if self._repo.ui.verbose and not self._repo.ui.debugflag:
1061 if self._repo.ui.verbose and not self._repo.ui.debugflag:
1059 self._verbosenote = self._repo.ui.note
1062 self._verbosenote = self._repo.ui.note
1060 else:
1063 else:
1061 self._verbosenote = lambda s: None
1064 self._verbosenote = lambda s: None
1062
1065
1063 def generate(
1066 def generate(
1064 self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True
1067 self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True
1065 ):
1068 ):
1066 """Yield a sequence of changegroup byte chunks.
1069 """Yield a sequence of changegroup byte chunks.
1067 If changelog is False, changelog data won't be added to changegroup
1070 If changelog is False, changelog data won't be added to changegroup
1068 """
1071 """
1069
1072
1070 repo = self._repo
1073 repo = self._repo
1071 cl = repo.changelog
1074 cl = repo.changelog
1072
1075
1073 self._verbosenote(_(b'uncompressed size of bundle content:\n'))
1076 self._verbosenote(_(b'uncompressed size of bundle content:\n'))
1074 size = 0
1077 size = 0
1075
1078
1076 sidedata_helpers = None
1079 sidedata_helpers = None
1077 if self.version == b'04':
1080 if self.version == b'04':
1078 remote_sidedata = self._remote_sidedata
1081 remote_sidedata = self._remote_sidedata
1079 if source == b'strip':
1082 if source == b'strip':
1080 # We're our own remote when stripping, get the no-op helpers
1083 # We're our own remote when stripping, get the no-op helpers
1081 # TODO a better approach would be for the strip bundle to
1084 # TODO a better approach would be for the strip bundle to
1082 # correctly advertise its sidedata categories directly.
1085 # correctly advertise its sidedata categories directly.
1083 remote_sidedata = repo._wanted_sidedata
1086 remote_sidedata = repo._wanted_sidedata
1084 sidedata_helpers = sidedatamod.get_sidedata_helpers(
1087 sidedata_helpers = sidedatamod.get_sidedata_helpers(
1085 repo, remote_sidedata
1088 repo, remote_sidedata
1086 )
1089 )
1087
1090
1088 clstate, deltas = self._generatechangelog(
1091 clstate, deltas = self._generatechangelog(
1089 cl,
1092 cl,
1090 clnodes,
1093 clnodes,
1091 generate=changelog,
1094 generate=changelog,
1092 sidedata_helpers=sidedata_helpers,
1095 sidedata_helpers=sidedata_helpers,
1093 )
1096 )
1094 for delta in deltas:
1097 for delta in deltas:
1095 for chunk in _revisiondeltatochunks(
1098 for chunk in _revisiondeltatochunks(
1096 self._repo, delta, self._builddeltaheader
1099 self._repo, delta, self._builddeltaheader
1097 ):
1100 ):
1098 size += len(chunk)
1101 size += len(chunk)
1099 yield chunk
1102 yield chunk
1100
1103
1101 close = closechunk()
1104 close = closechunk()
1102 size += len(close)
1105 size += len(close)
1103 yield closechunk()
1106 yield closechunk()
1104
1107
1105 self._verbosenote(_(b'%8.i (changelog)\n') % size)
1108 self._verbosenote(_(b'%8.i (changelog)\n') % size)
1106
1109
1107 clrevorder = clstate[b'clrevorder']
1110 clrevorder = clstate[b'clrevorder']
1108 manifests = clstate[b'manifests']
1111 manifests = clstate[b'manifests']
1109 changedfiles = clstate[b'changedfiles']
1112 changedfiles = clstate[b'changedfiles']
1110
1113
1111 # We need to make sure that the linkrev in the changegroup refers to
1114 # We need to make sure that the linkrev in the changegroup refers to
1112 # the first changeset that introduced the manifest or file revision.
1115 # the first changeset that introduced the manifest or file revision.
1113 # The fastpath is usually safer than the slowpath, because the filelogs
1116 # The fastpath is usually safer than the slowpath, because the filelogs
1114 # are walked in revlog order.
1117 # are walked in revlog order.
1115 #
1118 #
1116 # When taking the slowpath when the manifest revlog uses generaldelta,
1119 # When taking the slowpath when the manifest revlog uses generaldelta,
1117 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
1120 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
1118 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
1121 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
1119 #
1122 #
1120 # When taking the fastpath, we are only vulnerable to reordering
1123 # When taking the fastpath, we are only vulnerable to reordering
1121 # of the changelog itself. The changelog never uses generaldelta and is
1124 # of the changelog itself. The changelog never uses generaldelta and is
1122 # never reordered. To handle this case, we simply take the slowpath,
1125 # never reordered. To handle this case, we simply take the slowpath,
1123 # which already has the 'clrevorder' logic. This was also fixed in
1126 # which already has the 'clrevorder' logic. This was also fixed in
1124 # cc0ff93d0c0c.
1127 # cc0ff93d0c0c.
1125
1128
1126 # Treemanifests don't work correctly with fastpathlinkrev
1129 # Treemanifests don't work correctly with fastpathlinkrev
1127 # either, because we don't discover which directory nodes to
1130 # either, because we don't discover which directory nodes to
1128 # send along with files. This could probably be fixed.
1131 # send along with files. This could probably be fixed.
1129 fastpathlinkrev = fastpathlinkrev and not scmutil.istreemanifest(repo)
1132 fastpathlinkrev = fastpathlinkrev and not scmutil.istreemanifest(repo)
1130
1133
1131 fnodes = {} # needed file nodes
1134 fnodes = {} # needed file nodes
1132
1135
1133 size = 0
1136 size = 0
1134 it = self.generatemanifests(
1137 it = self.generatemanifests(
1135 commonrevs,
1138 commonrevs,
1136 clrevorder,
1139 clrevorder,
1137 fastpathlinkrev,
1140 fastpathlinkrev,
1138 manifests,
1141 manifests,
1139 fnodes,
1142 fnodes,
1140 source,
1143 source,
1141 clstate[b'clrevtomanifestrev'],
1144 clstate[b'clrevtomanifestrev'],
1142 sidedata_helpers=sidedata_helpers,
1145 sidedata_helpers=sidedata_helpers,
1143 )
1146 )
1144
1147
1145 for tree, deltas in it:
1148 for tree, deltas in it:
1146 if tree:
1149 if tree:
1147 assert self.version in (b'03', b'04')
1150 assert self.version in (b'03', b'04')
1148 chunk = _fileheader(tree)
1151 chunk = _fileheader(tree)
1149 size += len(chunk)
1152 size += len(chunk)
1150 yield chunk
1153 yield chunk
1151
1154
1152 for delta in deltas:
1155 for delta in deltas:
1153 chunks = _revisiondeltatochunks(
1156 chunks = _revisiondeltatochunks(
1154 self._repo, delta, self._builddeltaheader
1157 self._repo, delta, self._builddeltaheader
1155 )
1158 )
1156 for chunk in chunks:
1159 for chunk in chunks:
1157 size += len(chunk)
1160 size += len(chunk)
1158 yield chunk
1161 yield chunk
1159
1162
1160 close = closechunk()
1163 close = closechunk()
1161 size += len(close)
1164 size += len(close)
1162 yield close
1165 yield close
1163
1166
1164 self._verbosenote(_(b'%8.i (manifests)\n') % size)
1167 self._verbosenote(_(b'%8.i (manifests)\n') % size)
1165 yield self._manifestsend
1168 yield self._manifestsend
1166
1169
1167 mfdicts = None
1170 mfdicts = None
1168 if self._ellipses and self._isshallow:
1171 if self._ellipses and self._isshallow:
1169 mfdicts = [
1172 mfdicts = [
1170 (repo.manifestlog[n].read(), lr)
1173 (repo.manifestlog[n].read(), lr)
1171 for (n, lr) in pycompat.iteritems(manifests)
1174 for (n, lr) in pycompat.iteritems(manifests)
1172 ]
1175 ]
1173
1176
1174 manifests.clear()
1177 manifests.clear()
1175 clrevs = {cl.rev(x) for x in clnodes}
1178 clrevs = {cl.rev(x) for x in clnodes}
1176
1179
1177 it = self.generatefiles(
1180 it = self.generatefiles(
1178 changedfiles,
1181 changedfiles,
1179 commonrevs,
1182 commonrevs,
1180 source,
1183 source,
1181 mfdicts,
1184 mfdicts,
1182 fastpathlinkrev,
1185 fastpathlinkrev,
1183 fnodes,
1186 fnodes,
1184 clrevs,
1187 clrevs,
1185 sidedata_helpers=sidedata_helpers,
1188 sidedata_helpers=sidedata_helpers,
1186 )
1189 )
1187
1190
1188 for path, deltas in it:
1191 for path, deltas in it:
1189 h = _fileheader(path)
1192 h = _fileheader(path)
1190 size = len(h)
1193 size = len(h)
1191 yield h
1194 yield h
1192
1195
1193 for delta in deltas:
1196 for delta in deltas:
1194 chunks = _revisiondeltatochunks(
1197 chunks = _revisiondeltatochunks(
1195 self._repo, delta, self._builddeltaheader
1198 self._repo, delta, self._builddeltaheader
1196 )
1199 )
1197 for chunk in chunks:
1200 for chunk in chunks:
1198 size += len(chunk)
1201 size += len(chunk)
1199 yield chunk
1202 yield chunk
1200
1203
1201 close = closechunk()
1204 close = closechunk()
1202 size += len(close)
1205 size += len(close)
1203 yield close
1206 yield close
1204
1207
1205 self._verbosenote(_(b'%8.i %s\n') % (size, path))
1208 self._verbosenote(_(b'%8.i %s\n') % (size, path))
1206
1209
1207 yield closechunk()
1210 yield closechunk()
1208
1211
1209 if clnodes:
1212 if clnodes:
1210 repo.hook(b'outgoing', node=hex(clnodes[0]), source=source)
1213 repo.hook(b'outgoing', node=hex(clnodes[0]), source=source)
1211
1214
1212 def _generatechangelog(
1215 def _generatechangelog(
1213 self, cl, nodes, generate=True, sidedata_helpers=None
1216 self, cl, nodes, generate=True, sidedata_helpers=None
1214 ):
1217 ):
1215 """Generate data for changelog chunks.
1218 """Generate data for changelog chunks.
1216
1219
1217 Returns a 2-tuple of a dict containing state and an iterable of
1220 Returns a 2-tuple of a dict containing state and an iterable of
1218 byte chunks. The state will not be fully populated until the
1221 byte chunks. The state will not be fully populated until the
1219 chunk stream has been fully consumed.
1222 chunk stream has been fully consumed.
1220
1223
1221 if generate is False, the state will be fully populated and no chunk
1224 if generate is False, the state will be fully populated and no chunk
1222 stream will be yielded
1225 stream will be yielded
1223
1226
1224 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
1227 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
1225 `sidedata_helpers`.
1228 `sidedata_helpers`.
1226 """
1229 """
1227 clrevorder = {}
1230 clrevorder = {}
1228 manifests = {}
1231 manifests = {}
1229 mfl = self._repo.manifestlog
1232 mfl = self._repo.manifestlog
1230 changedfiles = set()
1233 changedfiles = set()
1231 clrevtomanifestrev = {}
1234 clrevtomanifestrev = {}
1232
1235
1233 state = {
1236 state = {
1234 b'clrevorder': clrevorder,
1237 b'clrevorder': clrevorder,
1235 b'manifests': manifests,
1238 b'manifests': manifests,
1236 b'changedfiles': changedfiles,
1239 b'changedfiles': changedfiles,
1237 b'clrevtomanifestrev': clrevtomanifestrev,
1240 b'clrevtomanifestrev': clrevtomanifestrev,
1238 }
1241 }
1239
1242
1240 if not (generate or self._ellipses):
1243 if not (generate or self._ellipses):
1241 # sort the nodes in storage order
1244 # sort the nodes in storage order
1242 nodes = sorted(nodes, key=cl.rev)
1245 nodes = sorted(nodes, key=cl.rev)
1243 for node in nodes:
1246 for node in nodes:
1244 c = cl.changelogrevision(node)
1247 c = cl.changelogrevision(node)
1245 clrevorder[node] = len(clrevorder)
1248 clrevorder[node] = len(clrevorder)
1246 # record the first changeset introducing this manifest version
1249 # record the first changeset introducing this manifest version
1247 manifests.setdefault(c.manifest, node)
1250 manifests.setdefault(c.manifest, node)
1248 # Record a complete list of potentially-changed files in
1251 # Record a complete list of potentially-changed files in
1249 # this manifest.
1252 # this manifest.
1250 changedfiles.update(c.files)
1253 changedfiles.update(c.files)
1251
1254
1252 return state, ()
1255 return state, ()
1253
1256
1254 # Callback for the changelog, used to collect changed files and
1257 # Callback for the changelog, used to collect changed files and
1255 # manifest nodes.
1258 # manifest nodes.
1256 # Returns the linkrev node (identity in the changelog case).
1259 # Returns the linkrev node (identity in the changelog case).
1257 def lookupcl(x):
1260 def lookupcl(x):
1258 c = cl.changelogrevision(x)
1261 c = cl.changelogrevision(x)
1259 clrevorder[x] = len(clrevorder)
1262 clrevorder[x] = len(clrevorder)
1260
1263
1261 if self._ellipses:
1264 if self._ellipses:
1262 # Only update manifests if x is going to be sent. Otherwise we
1265 # Only update manifests if x is going to be sent. Otherwise we
1263 # end up with bogus linkrevs specified for manifests and
1266 # end up with bogus linkrevs specified for manifests and
1264 # we skip some manifest nodes that we should otherwise
1267 # we skip some manifest nodes that we should otherwise
1265 # have sent.
1268 # have sent.
1266 if (
1269 if (
1267 x in self._fullclnodes
1270 x in self._fullclnodes
1268 or cl.rev(x) in self._precomputedellipsis
1271 or cl.rev(x) in self._precomputedellipsis
1269 ):
1272 ):
1270
1273
1271 manifestnode = c.manifest
1274 manifestnode = c.manifest
1272 # Record the first changeset introducing this manifest
1275 # Record the first changeset introducing this manifest
1273 # version.
1276 # version.
1274 manifests.setdefault(manifestnode, x)
1277 manifests.setdefault(manifestnode, x)
1275 # Set this narrow-specific dict so we have the lowest
1278 # Set this narrow-specific dict so we have the lowest
1276 # manifest revnum to look up for this cl revnum. (Part of
1279 # manifest revnum to look up for this cl revnum. (Part of
1277 # mapping changelog ellipsis parents to manifest ellipsis
1280 # mapping changelog ellipsis parents to manifest ellipsis
1278 # parents)
1281 # parents)
1279 clrevtomanifestrev.setdefault(
1282 clrevtomanifestrev.setdefault(
1280 cl.rev(x), mfl.rev(manifestnode)
1283 cl.rev(x), mfl.rev(manifestnode)
1281 )
1284 )
1282 # We can't trust the changed files list in the changeset if the
1285 # We can't trust the changed files list in the changeset if the
1283 # client requested a shallow clone.
1286 # client requested a shallow clone.
1284 if self._isshallow:
1287 if self._isshallow:
1285 changedfiles.update(mfl[c.manifest].read().keys())
1288 changedfiles.update(mfl[c.manifest].read().keys())
1286 else:
1289 else:
1287 changedfiles.update(c.files)
1290 changedfiles.update(c.files)
1288 else:
1291 else:
1289 # record the first changeset introducing this manifest version
1292 # record the first changeset introducing this manifest version
1290 manifests.setdefault(c.manifest, x)
1293 manifests.setdefault(c.manifest, x)
1291 # Record a complete list of potentially-changed files in
1294 # Record a complete list of potentially-changed files in
1292 # this manifest.
1295 # this manifest.
1293 changedfiles.update(c.files)
1296 changedfiles.update(c.files)
1294
1297
1295 return x
1298 return x
1296
1299
1297 gen = deltagroup(
1300 gen = deltagroup(
1298 self._repo,
1301 self._repo,
1299 cl,
1302 cl,
1300 nodes,
1303 nodes,
1301 True,
1304 True,
1302 lookupcl,
1305 lookupcl,
1303 self._forcedeltaparentprev,
1306 self._forcedeltaparentprev,
1304 ellipses=self._ellipses,
1307 ellipses=self._ellipses,
1305 topic=_(b'changesets'),
1308 topic=_(b'changesets'),
1306 clrevtolocalrev={},
1309 clrevtolocalrev={},
1307 fullclnodes=self._fullclnodes,
1310 fullclnodes=self._fullclnodes,
1308 precomputedellipsis=self._precomputedellipsis,
1311 precomputedellipsis=self._precomputedellipsis,
1309 sidedata_helpers=sidedata_helpers,
1312 sidedata_helpers=sidedata_helpers,
1310 )
1313 )
1311
1314
1312 return state, gen
1315 return state, gen
1313
1316
1314 def generatemanifests(
1317 def generatemanifests(
1315 self,
1318 self,
1316 commonrevs,
1319 commonrevs,
1317 clrevorder,
1320 clrevorder,
1318 fastpathlinkrev,
1321 fastpathlinkrev,
1319 manifests,
1322 manifests,
1320 fnodes,
1323 fnodes,
1321 source,
1324 source,
1322 clrevtolocalrev,
1325 clrevtolocalrev,
1323 sidedata_helpers=None,
1326 sidedata_helpers=None,
1324 ):
1327 ):
1325 """Returns an iterator of changegroup chunks containing manifests.
1328 """Returns an iterator of changegroup chunks containing manifests.
1326
1329
1327 `source` is unused here, but is used by extensions like remotefilelog to
1330 `source` is unused here, but is used by extensions like remotefilelog to
1328 change what is sent based in pulls vs pushes, etc.
1331 change what is sent based in pulls vs pushes, etc.
1329
1332
1330 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
1333 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
1331 `sidedata_helpers`.
1334 `sidedata_helpers`.
1332 """
1335 """
1333 repo = self._repo
1336 repo = self._repo
1334 mfl = repo.manifestlog
1337 mfl = repo.manifestlog
1335 tmfnodes = {b'': manifests}
1338 tmfnodes = {b'': manifests}
1336
1339
1337 # Callback for the manifest, used to collect linkrevs for filelog
1340 # Callback for the manifest, used to collect linkrevs for filelog
1338 # revisions.
1341 # revisions.
1339 # Returns the linkrev node (collected in lookupcl).
1342 # Returns the linkrev node (collected in lookupcl).
1340 def makelookupmflinknode(tree, nodes):
1343 def makelookupmflinknode(tree, nodes):
1341 if fastpathlinkrev:
1344 if fastpathlinkrev:
1342 assert not tree
1345 assert not tree
1343
1346
1344 # pytype: disable=unsupported-operands
1347 # pytype: disable=unsupported-operands
1345 return manifests.__getitem__
1348 return manifests.__getitem__
1346 # pytype: enable=unsupported-operands
1349 # pytype: enable=unsupported-operands
1347
1350
1348 def lookupmflinknode(x):
1351 def lookupmflinknode(x):
1349 """Callback for looking up the linknode for manifests.
1352 """Callback for looking up the linknode for manifests.
1350
1353
1351 Returns the linkrev node for the specified manifest.
1354 Returns the linkrev node for the specified manifest.
1352
1355
1353 SIDE EFFECT:
1356 SIDE EFFECT:
1354
1357
1355 1) fclnodes gets populated with the list of relevant
1358 1) fclnodes gets populated with the list of relevant
1356 file nodes if we're not using fastpathlinkrev
1359 file nodes if we're not using fastpathlinkrev
1357 2) When treemanifests are in use, collects treemanifest nodes
1360 2) When treemanifests are in use, collects treemanifest nodes
1358 to send
1361 to send
1359
1362
1360 Note that this means manifests must be completely sent to
1363 Note that this means manifests must be completely sent to
1361 the client before you can trust the list of files and
1364 the client before you can trust the list of files and
1362 treemanifests to send.
1365 treemanifests to send.
1363 """
1366 """
1364 clnode = nodes[x]
1367 clnode = nodes[x]
1365 mdata = mfl.get(tree, x).readfast(shallow=True)
1368 mdata = mfl.get(tree, x).readfast(shallow=True)
1366 for p, n, fl in mdata.iterentries():
1369 for p, n, fl in mdata.iterentries():
1367 if fl == b't': # subdirectory manifest
1370 if fl == b't': # subdirectory manifest
1368 subtree = tree + p + b'/'
1371 subtree = tree + p + b'/'
1369 tmfclnodes = tmfnodes.setdefault(subtree, {})
1372 tmfclnodes = tmfnodes.setdefault(subtree, {})
1370 tmfclnode = tmfclnodes.setdefault(n, clnode)
1373 tmfclnode = tmfclnodes.setdefault(n, clnode)
1371 if clrevorder[clnode] < clrevorder[tmfclnode]:
1374 if clrevorder[clnode] < clrevorder[tmfclnode]:
1372 tmfclnodes[n] = clnode
1375 tmfclnodes[n] = clnode
1373 else:
1376 else:
1374 f = tree + p
1377 f = tree + p
1375 fclnodes = fnodes.setdefault(f, {})
1378 fclnodes = fnodes.setdefault(f, {})
1376 fclnode = fclnodes.setdefault(n, clnode)
1379 fclnode = fclnodes.setdefault(n, clnode)
1377 if clrevorder[clnode] < clrevorder[fclnode]:
1380 if clrevorder[clnode] < clrevorder[fclnode]:
1378 fclnodes[n] = clnode
1381 fclnodes[n] = clnode
1379 return clnode
1382 return clnode
1380
1383
1381 return lookupmflinknode
1384 return lookupmflinknode
1382
1385
1383 while tmfnodes:
1386 while tmfnodes:
1384 tree, nodes = tmfnodes.popitem()
1387 tree, nodes = tmfnodes.popitem()
1385
1388
1386 should_visit = self._matcher.visitdir(tree[:-1])
1389 should_visit = self._matcher.visitdir(tree[:-1])
1387 if tree and not should_visit:
1390 if tree and not should_visit:
1388 continue
1391 continue
1389
1392
1390 store = mfl.getstorage(tree)
1393 store = mfl.getstorage(tree)
1391
1394
1392 if not should_visit:
1395 if not should_visit:
1393 # No nodes to send because this directory is out of
1396 # No nodes to send because this directory is out of
1394 # the client's view of the repository (probably
1397 # the client's view of the repository (probably
1395 # because of narrow clones). Do this even for the root
1398 # because of narrow clones). Do this even for the root
1396 # directory (tree=='')
1399 # directory (tree=='')
1397 prunednodes = []
1400 prunednodes = []
1398 else:
1401 else:
1399 # Avoid sending any manifest nodes we can prove the
1402 # Avoid sending any manifest nodes we can prove the
1400 # client already has by checking linkrevs. See the
1403 # client already has by checking linkrevs. See the
1401 # related comment in generatefiles().
1404 # related comment in generatefiles().
1402 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1405 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1403
1406
1404 if tree and not prunednodes:
1407 if tree and not prunednodes:
1405 continue
1408 continue
1406
1409
1407 lookupfn = makelookupmflinknode(tree, nodes)
1410 lookupfn = makelookupmflinknode(tree, nodes)
1408
1411
1409 deltas = deltagroup(
1412 deltas = deltagroup(
1410 self._repo,
1413 self._repo,
1411 store,
1414 store,
1412 prunednodes,
1415 prunednodes,
1413 False,
1416 False,
1414 lookupfn,
1417 lookupfn,
1415 self._forcedeltaparentprev,
1418 self._forcedeltaparentprev,
1416 ellipses=self._ellipses,
1419 ellipses=self._ellipses,
1417 topic=_(b'manifests'),
1420 topic=_(b'manifests'),
1418 clrevtolocalrev=clrevtolocalrev,
1421 clrevtolocalrev=clrevtolocalrev,
1419 fullclnodes=self._fullclnodes,
1422 fullclnodes=self._fullclnodes,
1420 precomputedellipsis=self._precomputedellipsis,
1423 precomputedellipsis=self._precomputedellipsis,
1421 sidedata_helpers=sidedata_helpers,
1424 sidedata_helpers=sidedata_helpers,
1422 )
1425 )
1423
1426
1424 if not self._oldmatcher.visitdir(store.tree[:-1]):
1427 if not self._oldmatcher.visitdir(store.tree[:-1]):
1425 yield tree, deltas
1428 yield tree, deltas
1426 else:
1429 else:
1427 # 'deltas' is a generator and we need to consume it even if
1430 # 'deltas' is a generator and we need to consume it even if
1428 # we are not going to send it because a side-effect is that
1431 # we are not going to send it because a side-effect is that
1429 # it updates tmdnodes (via lookupfn)
1432 # it updates tmdnodes (via lookupfn)
1430 for d in deltas:
1433 for d in deltas:
1431 pass
1434 pass
1432 if not tree:
1435 if not tree:
1433 yield tree, []
1436 yield tree, []
1434
1437
1435 def _prunemanifests(self, store, nodes, commonrevs):
1438 def _prunemanifests(self, store, nodes, commonrevs):
1436 if not self._ellipses:
1439 if not self._ellipses:
1437 # In non-ellipses case and large repositories, it is better to
1440 # In non-ellipses case and large repositories, it is better to
1438 # prevent calling of store.rev and store.linkrev on a lot of
1441 # prevent calling of store.rev and store.linkrev on a lot of
1439 # nodes as compared to sending some extra data
1442 # nodes as compared to sending some extra data
1440 return nodes.copy()
1443 return nodes.copy()
1441 # This is split out as a separate method to allow filtering
1444 # This is split out as a separate method to allow filtering
1442 # commonrevs in extension code.
1445 # commonrevs in extension code.
1443 #
1446 #
1444 # TODO(augie): this shouldn't be required, instead we should
1447 # TODO(augie): this shouldn't be required, instead we should
1445 # make filtering of revisions to send delegated to the store
1448 # make filtering of revisions to send delegated to the store
1446 # layer.
1449 # layer.
1447 frev, flr = store.rev, store.linkrev
1450 frev, flr = store.rev, store.linkrev
1448 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1451 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1449
1452
1450 # The 'source' parameter is useful for extensions
1453 # The 'source' parameter is useful for extensions
1451 def generatefiles(
1454 def generatefiles(
1452 self,
1455 self,
1453 changedfiles,
1456 changedfiles,
1454 commonrevs,
1457 commonrevs,
1455 source,
1458 source,
1456 mfdicts,
1459 mfdicts,
1457 fastpathlinkrev,
1460 fastpathlinkrev,
1458 fnodes,
1461 fnodes,
1459 clrevs,
1462 clrevs,
1460 sidedata_helpers=None,
1463 sidedata_helpers=None,
1461 ):
1464 ):
1462 changedfiles = [
1465 changedfiles = [
1463 f
1466 f
1464 for f in changedfiles
1467 for f in changedfiles
1465 if self._matcher(f) and not self._oldmatcher(f)
1468 if self._matcher(f) and not self._oldmatcher(f)
1466 ]
1469 ]
1467
1470
1468 if not fastpathlinkrev:
1471 if not fastpathlinkrev:
1469
1472
1470 def normallinknodes(unused, fname):
1473 def normallinknodes(unused, fname):
1471 return fnodes.get(fname, {})
1474 return fnodes.get(fname, {})
1472
1475
1473 else:
1476 else:
1474 cln = self._repo.changelog.node
1477 cln = self._repo.changelog.node
1475
1478
1476 def normallinknodes(store, fname):
1479 def normallinknodes(store, fname):
1477 flinkrev = store.linkrev
1480 flinkrev = store.linkrev
1478 fnode = store.node
1481 fnode = store.node
1479 revs = ((r, flinkrev(r)) for r in store)
1482 revs = ((r, flinkrev(r)) for r in store)
1480 return {fnode(r): cln(lr) for r, lr in revs if lr in clrevs}
1483 return {fnode(r): cln(lr) for r, lr in revs if lr in clrevs}
1481
1484
1482 clrevtolocalrev = {}
1485 clrevtolocalrev = {}
1483
1486
1484 if self._isshallow:
1487 if self._isshallow:
1485 # In a shallow clone, the linknodes callback needs to also include
1488 # In a shallow clone, the linknodes callback needs to also include
1486 # those file nodes that are in the manifests we sent but weren't
1489 # those file nodes that are in the manifests we sent but weren't
1487 # introduced by those manifests.
1490 # introduced by those manifests.
1488 commonctxs = [self._repo[c] for c in commonrevs]
1491 commonctxs = [self._repo[c] for c in commonrevs]
1489 clrev = self._repo.changelog.rev
1492 clrev = self._repo.changelog.rev
1490
1493
1491 def linknodes(flog, fname):
1494 def linknodes(flog, fname):
1492 for c in commonctxs:
1495 for c in commonctxs:
1493 try:
1496 try:
1494 fnode = c.filenode(fname)
1497 fnode = c.filenode(fname)
1495 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1498 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1496 except error.ManifestLookupError:
1499 except error.ManifestLookupError:
1497 pass
1500 pass
1498 links = normallinknodes(flog, fname)
1501 links = normallinknodes(flog, fname)
1499 if len(links) != len(mfdicts):
1502 if len(links) != len(mfdicts):
1500 for mf, lr in mfdicts:
1503 for mf, lr in mfdicts:
1501 fnode = mf.get(fname, None)
1504 fnode = mf.get(fname, None)
1502 if fnode in links:
1505 if fnode in links:
1503 links[fnode] = min(links[fnode], lr, key=clrev)
1506 links[fnode] = min(links[fnode], lr, key=clrev)
1504 elif fnode:
1507 elif fnode:
1505 links[fnode] = lr
1508 links[fnode] = lr
1506 return links
1509 return links
1507
1510
1508 else:
1511 else:
1509 linknodes = normallinknodes
1512 linknodes = normallinknodes
1510
1513
1511 repo = self._repo
1514 repo = self._repo
1512 progress = repo.ui.makeprogress(
1515 progress = repo.ui.makeprogress(
1513 _(b'files'), unit=_(b'files'), total=len(changedfiles)
1516 _(b'files'), unit=_(b'files'), total=len(changedfiles)
1514 )
1517 )
1515 for i, fname in enumerate(sorted(changedfiles)):
1518 for i, fname in enumerate(sorted(changedfiles)):
1516 filerevlog = repo.file(fname)
1519 filerevlog = repo.file(fname)
1517 if not filerevlog:
1520 if not filerevlog:
1518 raise error.Abort(
1521 raise error.Abort(
1519 _(b"empty or missing file data for %s") % fname
1522 _(b"empty or missing file data for %s") % fname
1520 )
1523 )
1521
1524
1522 clrevtolocalrev.clear()
1525 clrevtolocalrev.clear()
1523
1526
1524 linkrevnodes = linknodes(filerevlog, fname)
1527 linkrevnodes = linknodes(filerevlog, fname)
1525 # Lookup for filenodes, we collected the linkrev nodes above in the
1528 # Lookup for filenodes, we collected the linkrev nodes above in the
1526 # fastpath case and with lookupmf in the slowpath case.
1529 # fastpath case and with lookupmf in the slowpath case.
1527 def lookupfilelog(x):
1530 def lookupfilelog(x):
1528 return linkrevnodes[x]
1531 return linkrevnodes[x]
1529
1532
1530 frev, flr = filerevlog.rev, filerevlog.linkrev
1533 frev, flr = filerevlog.rev, filerevlog.linkrev
1531 # Skip sending any filenode we know the client already
1534 # Skip sending any filenode we know the client already
1532 # has. This avoids over-sending files relatively
1535 # has. This avoids over-sending files relatively
1533 # inexpensively, so it's not a problem if we under-filter
1536 # inexpensively, so it's not a problem if we under-filter
1534 # here.
1537 # here.
1535 filenodes = [
1538 filenodes = [
1536 n for n in linkrevnodes if flr(frev(n)) not in commonrevs
1539 n for n in linkrevnodes if flr(frev(n)) not in commonrevs
1537 ]
1540 ]
1538
1541
1539 if not filenodes:
1542 if not filenodes:
1540 continue
1543 continue
1541
1544
1542 progress.update(i + 1, item=fname)
1545 progress.update(i + 1, item=fname)
1543
1546
1544 deltas = deltagroup(
1547 deltas = deltagroup(
1545 self._repo,
1548 self._repo,
1546 filerevlog,
1549 filerevlog,
1547 filenodes,
1550 filenodes,
1548 False,
1551 False,
1549 lookupfilelog,
1552 lookupfilelog,
1550 self._forcedeltaparentprev,
1553 self._forcedeltaparentprev,
1551 ellipses=self._ellipses,
1554 ellipses=self._ellipses,
1552 clrevtolocalrev=clrevtolocalrev,
1555 clrevtolocalrev=clrevtolocalrev,
1553 fullclnodes=self._fullclnodes,
1556 fullclnodes=self._fullclnodes,
1554 precomputedellipsis=self._precomputedellipsis,
1557 precomputedellipsis=self._precomputedellipsis,
1555 sidedata_helpers=sidedata_helpers,
1558 sidedata_helpers=sidedata_helpers,
1556 )
1559 )
1557
1560
1558 yield fname, deltas
1561 yield fname, deltas
1559
1562
1560 progress.complete()
1563 progress.complete()
1561
1564
1562
1565
1563 def _makecg1packer(
1566 def _makecg1packer(
1564 repo,
1567 repo,
1565 oldmatcher,
1568 oldmatcher,
1566 matcher,
1569 matcher,
1567 bundlecaps,
1570 bundlecaps,
1568 ellipses=False,
1571 ellipses=False,
1569 shallow=False,
1572 shallow=False,
1570 ellipsisroots=None,
1573 ellipsisroots=None,
1571 fullnodes=None,
1574 fullnodes=None,
1572 remote_sidedata=None,
1575 remote_sidedata=None,
1573 ):
1576 ):
1574 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1577 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1575 d.node, d.p1node, d.p2node, d.linknode
1578 d.node, d.p1node, d.p2node, d.linknode
1576 )
1579 )
1577
1580
1578 return cgpacker(
1581 return cgpacker(
1579 repo,
1582 repo,
1580 oldmatcher,
1583 oldmatcher,
1581 matcher,
1584 matcher,
1582 b'01',
1585 b'01',
1583 builddeltaheader=builddeltaheader,
1586 builddeltaheader=builddeltaheader,
1584 manifestsend=b'',
1587 manifestsend=b'',
1585 forcedeltaparentprev=True,
1588 forcedeltaparentprev=True,
1586 bundlecaps=bundlecaps,
1589 bundlecaps=bundlecaps,
1587 ellipses=ellipses,
1590 ellipses=ellipses,
1588 shallow=shallow,
1591 shallow=shallow,
1589 ellipsisroots=ellipsisroots,
1592 ellipsisroots=ellipsisroots,
1590 fullnodes=fullnodes,
1593 fullnodes=fullnodes,
1591 )
1594 )
1592
1595
1593
1596
1594 def _makecg2packer(
1597 def _makecg2packer(
1595 repo,
1598 repo,
1596 oldmatcher,
1599 oldmatcher,
1597 matcher,
1600 matcher,
1598 bundlecaps,
1601 bundlecaps,
1599 ellipses=False,
1602 ellipses=False,
1600 shallow=False,
1603 shallow=False,
1601 ellipsisroots=None,
1604 ellipsisroots=None,
1602 fullnodes=None,
1605 fullnodes=None,
1603 remote_sidedata=None,
1606 remote_sidedata=None,
1604 ):
1607 ):
1605 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1608 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1606 d.node, d.p1node, d.p2node, d.basenode, d.linknode
1609 d.node, d.p1node, d.p2node, d.basenode, d.linknode
1607 )
1610 )
1608
1611
1609 return cgpacker(
1612 return cgpacker(
1610 repo,
1613 repo,
1611 oldmatcher,
1614 oldmatcher,
1612 matcher,
1615 matcher,
1613 b'02',
1616 b'02',
1614 builddeltaheader=builddeltaheader,
1617 builddeltaheader=builddeltaheader,
1615 manifestsend=b'',
1618 manifestsend=b'',
1616 bundlecaps=bundlecaps,
1619 bundlecaps=bundlecaps,
1617 ellipses=ellipses,
1620 ellipses=ellipses,
1618 shallow=shallow,
1621 shallow=shallow,
1619 ellipsisroots=ellipsisroots,
1622 ellipsisroots=ellipsisroots,
1620 fullnodes=fullnodes,
1623 fullnodes=fullnodes,
1621 )
1624 )
1622
1625
1623
1626
1624 def _makecg3packer(
1627 def _makecg3packer(
1625 repo,
1628 repo,
1626 oldmatcher,
1629 oldmatcher,
1627 matcher,
1630 matcher,
1628 bundlecaps,
1631 bundlecaps,
1629 ellipses=False,
1632 ellipses=False,
1630 shallow=False,
1633 shallow=False,
1631 ellipsisroots=None,
1634 ellipsisroots=None,
1632 fullnodes=None,
1635 fullnodes=None,
1633 remote_sidedata=None,
1636 remote_sidedata=None,
1634 ):
1637 ):
1635 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1638 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1636 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
1639 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
1637 )
1640 )
1638
1641
1639 return cgpacker(
1642 return cgpacker(
1640 repo,
1643 repo,
1641 oldmatcher,
1644 oldmatcher,
1642 matcher,
1645 matcher,
1643 b'03',
1646 b'03',
1644 builddeltaheader=builddeltaheader,
1647 builddeltaheader=builddeltaheader,
1645 manifestsend=closechunk(),
1648 manifestsend=closechunk(),
1646 bundlecaps=bundlecaps,
1649 bundlecaps=bundlecaps,
1647 ellipses=ellipses,
1650 ellipses=ellipses,
1648 shallow=shallow,
1651 shallow=shallow,
1649 ellipsisroots=ellipsisroots,
1652 ellipsisroots=ellipsisroots,
1650 fullnodes=fullnodes,
1653 fullnodes=fullnodes,
1651 )
1654 )
1652
1655
1653
1656
1654 def _makecg4packer(
1657 def _makecg4packer(
1655 repo,
1658 repo,
1656 oldmatcher,
1659 oldmatcher,
1657 matcher,
1660 matcher,
1658 bundlecaps,
1661 bundlecaps,
1659 ellipses=False,
1662 ellipses=False,
1660 shallow=False,
1663 shallow=False,
1661 ellipsisroots=None,
1664 ellipsisroots=None,
1662 fullnodes=None,
1665 fullnodes=None,
1663 remote_sidedata=None,
1666 remote_sidedata=None,
1664 ):
1667 ):
1665 # Sidedata is in a separate chunk from the delta to differentiate
1668 # Sidedata is in a separate chunk from the delta to differentiate
1666 # "raw delta" and sidedata.
1669 # "raw delta" and sidedata.
1667 def builddeltaheader(d):
1670 def builddeltaheader(d):
1668 return _CHANGEGROUPV4_DELTA_HEADER.pack(
1671 return _CHANGEGROUPV4_DELTA_HEADER.pack(
1669 d.protocol_flags,
1672 d.protocol_flags,
1670 d.node,
1673 d.node,
1671 d.p1node,
1674 d.p1node,
1672 d.p2node,
1675 d.p2node,
1673 d.basenode,
1676 d.basenode,
1674 d.linknode,
1677 d.linknode,
1675 d.flags,
1678 d.flags,
1676 )
1679 )
1677
1680
1678 return cgpacker(
1681 return cgpacker(
1679 repo,
1682 repo,
1680 oldmatcher,
1683 oldmatcher,
1681 matcher,
1684 matcher,
1682 b'04',
1685 b'04',
1683 builddeltaheader=builddeltaheader,
1686 builddeltaheader=builddeltaheader,
1684 manifestsend=closechunk(),
1687 manifestsend=closechunk(),
1685 bundlecaps=bundlecaps,
1688 bundlecaps=bundlecaps,
1686 ellipses=ellipses,
1689 ellipses=ellipses,
1687 shallow=shallow,
1690 shallow=shallow,
1688 ellipsisroots=ellipsisroots,
1691 ellipsisroots=ellipsisroots,
1689 fullnodes=fullnodes,
1692 fullnodes=fullnodes,
1690 remote_sidedata=remote_sidedata,
1693 remote_sidedata=remote_sidedata,
1691 )
1694 )
1692
1695
1693
1696
1694 _packermap = {
1697 _packermap = {
1695 b'01': (_makecg1packer, cg1unpacker),
1698 b'01': (_makecg1packer, cg1unpacker),
1696 # cg2 adds support for exchanging generaldelta
1699 # cg2 adds support for exchanging generaldelta
1697 b'02': (_makecg2packer, cg2unpacker),
1700 b'02': (_makecg2packer, cg2unpacker),
1698 # cg3 adds support for exchanging revlog flags and treemanifests
1701 # cg3 adds support for exchanging revlog flags and treemanifests
1699 b'03': (_makecg3packer, cg3unpacker),
1702 b'03': (_makecg3packer, cg3unpacker),
1700 # ch4 adds support for exchanging sidedata
1703 # ch4 adds support for exchanging sidedata
1701 b'04': (_makecg4packer, cg4unpacker),
1704 b'04': (_makecg4packer, cg4unpacker),
1702 }
1705 }
1703
1706
1704
1707
1705 def allsupportedversions(repo):
1708 def allsupportedversions(repo):
1706 versions = set(_packermap.keys())
1709 versions = set(_packermap.keys())
1707 needv03 = False
1710 needv03 = False
1708 if (
1711 if (
1709 repo.ui.configbool(b'experimental', b'changegroup3')
1712 repo.ui.configbool(b'experimental', b'changegroup3')
1710 or repo.ui.configbool(b'experimental', b'treemanifest')
1713 or repo.ui.configbool(b'experimental', b'treemanifest')
1711 or scmutil.istreemanifest(repo)
1714 or scmutil.istreemanifest(repo)
1712 ):
1715 ):
1713 # we keep version 03 because we need to to exchange treemanifest data
1716 # we keep version 03 because we need to to exchange treemanifest data
1714 #
1717 #
1715 # we also keep vresion 01 and 02, because it is possible for repo to
1718 # we also keep vresion 01 and 02, because it is possible for repo to
1716 # contains both normal and tree manifest at the same time. so using
1719 # contains both normal and tree manifest at the same time. so using
1717 # older version to pull data is viable
1720 # older version to pull data is viable
1718 #
1721 #
1719 # (or even to push subset of history)
1722 # (or even to push subset of history)
1720 needv03 = True
1723 needv03 = True
1721 if not needv03:
1724 if not needv03:
1722 versions.discard(b'03')
1725 versions.discard(b'03')
1723 want_v4 = (
1726 want_v4 = (
1724 repo.ui.configbool(b'experimental', b'changegroup4')
1727 repo.ui.configbool(b'experimental', b'changegroup4')
1725 or requirements.REVLOGV2_REQUIREMENT in repo.requirements
1728 or requirements.REVLOGV2_REQUIREMENT in repo.requirements
1729 or requirements.CHANGELOGV2_REQUIREMENT in repo.requirements
1726 )
1730 )
1727 if not want_v4:
1731 if not want_v4:
1728 versions.discard(b'04')
1732 versions.discard(b'04')
1729 return versions
1733 return versions
1730
1734
1731
1735
1732 # Changegroup versions that can be applied to the repo
1736 # Changegroup versions that can be applied to the repo
1733 def supportedincomingversions(repo):
1737 def supportedincomingversions(repo):
1734 return allsupportedversions(repo)
1738 return allsupportedversions(repo)
1735
1739
1736
1740
1737 # Changegroup versions that can be created from the repo
1741 # Changegroup versions that can be created from the repo
1738 def supportedoutgoingversions(repo):
1742 def supportedoutgoingversions(repo):
1739 versions = allsupportedversions(repo)
1743 versions = allsupportedversions(repo)
1740 if scmutil.istreemanifest(repo):
1744 if scmutil.istreemanifest(repo):
1741 # Versions 01 and 02 support only flat manifests and it's just too
1745 # Versions 01 and 02 support only flat manifests and it's just too
1742 # expensive to convert between the flat manifest and tree manifest on
1746 # expensive to convert between the flat manifest and tree manifest on
1743 # the fly. Since tree manifests are hashed differently, all of history
1747 # the fly. Since tree manifests are hashed differently, all of history
1744 # would have to be converted. Instead, we simply don't even pretend to
1748 # would have to be converted. Instead, we simply don't even pretend to
1745 # support versions 01 and 02.
1749 # support versions 01 and 02.
1746 versions.discard(b'01')
1750 versions.discard(b'01')
1747 versions.discard(b'02')
1751 versions.discard(b'02')
1748 if requirements.NARROW_REQUIREMENT in repo.requirements:
1752 if requirements.NARROW_REQUIREMENT in repo.requirements:
1749 # Versions 01 and 02 don't support revlog flags, and we need to
1753 # Versions 01 and 02 don't support revlog flags, and we need to
1750 # support that for stripping and unbundling to work.
1754 # support that for stripping and unbundling to work.
1751 versions.discard(b'01')
1755 versions.discard(b'01')
1752 versions.discard(b'02')
1756 versions.discard(b'02')
1753 if LFS_REQUIREMENT in repo.requirements:
1757 if LFS_REQUIREMENT in repo.requirements:
1754 # Versions 01 and 02 don't support revlog flags, and we need to
1758 # Versions 01 and 02 don't support revlog flags, and we need to
1755 # mark LFS entries with REVIDX_EXTSTORED.
1759 # mark LFS entries with REVIDX_EXTSTORED.
1756 versions.discard(b'01')
1760 versions.discard(b'01')
1757 versions.discard(b'02')
1761 versions.discard(b'02')
1758
1762
1759 return versions
1763 return versions
1760
1764
1761
1765
1762 def localversion(repo):
1766 def localversion(repo):
1763 # Finds the best version to use for bundles that are meant to be used
1767 # Finds the best version to use for bundles that are meant to be used
1764 # locally, such as those from strip and shelve, and temporary bundles.
1768 # locally, such as those from strip and shelve, and temporary bundles.
1765 return max(supportedoutgoingversions(repo))
1769 return max(supportedoutgoingversions(repo))
1766
1770
1767
1771
1768 def safeversion(repo):
1772 def safeversion(repo):
1769 # Finds the smallest version that it's safe to assume clients of the repo
1773 # Finds the smallest version that it's safe to assume clients of the repo
1770 # will support. For example, all hg versions that support generaldelta also
1774 # will support. For example, all hg versions that support generaldelta also
1771 # support changegroup 02.
1775 # support changegroup 02.
1772 versions = supportedoutgoingversions(repo)
1776 versions = supportedoutgoingversions(repo)
1773 if requirements.GENERALDELTA_REQUIREMENT in repo.requirements:
1777 if requirements.GENERALDELTA_REQUIREMENT in repo.requirements:
1774 versions.discard(b'01')
1778 versions.discard(b'01')
1775 assert versions
1779 assert versions
1776 return min(versions)
1780 return min(versions)
1777
1781
1778
1782
1779 def getbundler(
1783 def getbundler(
1780 version,
1784 version,
1781 repo,
1785 repo,
1782 bundlecaps=None,
1786 bundlecaps=None,
1783 oldmatcher=None,
1787 oldmatcher=None,
1784 matcher=None,
1788 matcher=None,
1785 ellipses=False,
1789 ellipses=False,
1786 shallow=False,
1790 shallow=False,
1787 ellipsisroots=None,
1791 ellipsisroots=None,
1788 fullnodes=None,
1792 fullnodes=None,
1789 remote_sidedata=None,
1793 remote_sidedata=None,
1790 ):
1794 ):
1791 assert version in supportedoutgoingversions(repo)
1795 assert version in supportedoutgoingversions(repo)
1792
1796
1793 if matcher is None:
1797 if matcher is None:
1794 matcher = matchmod.always()
1798 matcher = matchmod.always()
1795 if oldmatcher is None:
1799 if oldmatcher is None:
1796 oldmatcher = matchmod.never()
1800 oldmatcher = matchmod.never()
1797
1801
1798 if version == b'01' and not matcher.always():
1802 if version == b'01' and not matcher.always():
1799 raise error.ProgrammingError(
1803 raise error.ProgrammingError(
1800 b'version 01 changegroups do not support sparse file matchers'
1804 b'version 01 changegroups do not support sparse file matchers'
1801 )
1805 )
1802
1806
1803 if ellipses and version in (b'01', b'02'):
1807 if ellipses and version in (b'01', b'02'):
1804 raise error.Abort(
1808 raise error.Abort(
1805 _(
1809 _(
1806 b'ellipsis nodes require at least cg3 on client and server, '
1810 b'ellipsis nodes require at least cg3 on client and server, '
1807 b'but negotiated version %s'
1811 b'but negotiated version %s'
1808 )
1812 )
1809 % version
1813 % version
1810 )
1814 )
1811
1815
1812 # Requested files could include files not in the local store. So
1816 # Requested files could include files not in the local store. So
1813 # filter those out.
1817 # filter those out.
1814 matcher = repo.narrowmatch(matcher)
1818 matcher = repo.narrowmatch(matcher)
1815
1819
1816 fn = _packermap[version][0]
1820 fn = _packermap[version][0]
1817 return fn(
1821 return fn(
1818 repo,
1822 repo,
1819 oldmatcher,
1823 oldmatcher,
1820 matcher,
1824 matcher,
1821 bundlecaps,
1825 bundlecaps,
1822 ellipses=ellipses,
1826 ellipses=ellipses,
1823 shallow=shallow,
1827 shallow=shallow,
1824 ellipsisroots=ellipsisroots,
1828 ellipsisroots=ellipsisroots,
1825 fullnodes=fullnodes,
1829 fullnodes=fullnodes,
1826 remote_sidedata=remote_sidedata,
1830 remote_sidedata=remote_sidedata,
1827 )
1831 )
1828
1832
1829
1833
1830 def getunbundler(version, fh, alg, extras=None):
1834 def getunbundler(version, fh, alg, extras=None):
1831 return _packermap[version][1](fh, alg, extras=extras)
1835 return _packermap[version][1](fh, alg, extras=extras)
1832
1836
1833
1837
1834 def _changegroupinfo(repo, nodes, source):
1838 def _changegroupinfo(repo, nodes, source):
1835 if repo.ui.verbose or source == b'bundle':
1839 if repo.ui.verbose or source == b'bundle':
1836 repo.ui.status(_(b"%d changesets found\n") % len(nodes))
1840 repo.ui.status(_(b"%d changesets found\n") % len(nodes))
1837 if repo.ui.debugflag:
1841 if repo.ui.debugflag:
1838 repo.ui.debug(b"list of changesets:\n")
1842 repo.ui.debug(b"list of changesets:\n")
1839 for node in nodes:
1843 for node in nodes:
1840 repo.ui.debug(b"%s\n" % hex(node))
1844 repo.ui.debug(b"%s\n" % hex(node))
1841
1845
1842
1846
1843 def makechangegroup(
1847 def makechangegroup(
1844 repo, outgoing, version, source, fastpath=False, bundlecaps=None
1848 repo, outgoing, version, source, fastpath=False, bundlecaps=None
1845 ):
1849 ):
1846 cgstream = makestream(
1850 cgstream = makestream(
1847 repo,
1851 repo,
1848 outgoing,
1852 outgoing,
1849 version,
1853 version,
1850 source,
1854 source,
1851 fastpath=fastpath,
1855 fastpath=fastpath,
1852 bundlecaps=bundlecaps,
1856 bundlecaps=bundlecaps,
1853 )
1857 )
1854 return getunbundler(
1858 return getunbundler(
1855 version,
1859 version,
1856 util.chunkbuffer(cgstream),
1860 util.chunkbuffer(cgstream),
1857 None,
1861 None,
1858 {b'clcount': len(outgoing.missing)},
1862 {b'clcount': len(outgoing.missing)},
1859 )
1863 )
1860
1864
1861
1865
1862 def makestream(
1866 def makestream(
1863 repo,
1867 repo,
1864 outgoing,
1868 outgoing,
1865 version,
1869 version,
1866 source,
1870 source,
1867 fastpath=False,
1871 fastpath=False,
1868 bundlecaps=None,
1872 bundlecaps=None,
1869 matcher=None,
1873 matcher=None,
1870 remote_sidedata=None,
1874 remote_sidedata=None,
1871 ):
1875 ):
1872 bundler = getbundler(
1876 bundler = getbundler(
1873 version,
1877 version,
1874 repo,
1878 repo,
1875 bundlecaps=bundlecaps,
1879 bundlecaps=bundlecaps,
1876 matcher=matcher,
1880 matcher=matcher,
1877 remote_sidedata=remote_sidedata,
1881 remote_sidedata=remote_sidedata,
1878 )
1882 )
1879
1883
1880 repo = repo.unfiltered()
1884 repo = repo.unfiltered()
1881 commonrevs = outgoing.common
1885 commonrevs = outgoing.common
1882 csets = outgoing.missing
1886 csets = outgoing.missing
1883 heads = outgoing.ancestorsof
1887 heads = outgoing.ancestorsof
1884 # We go through the fast path if we get told to, or if all (unfiltered
1888 # We go through the fast path if we get told to, or if all (unfiltered
1885 # heads have been requested (since we then know there all linkrevs will
1889 # heads have been requested (since we then know there all linkrevs will
1886 # be pulled by the client).
1890 # be pulled by the client).
1887 heads.sort()
1891 heads.sort()
1888 fastpathlinkrev = fastpath or (
1892 fastpathlinkrev = fastpath or (
1889 repo.filtername is None and heads == sorted(repo.heads())
1893 repo.filtername is None and heads == sorted(repo.heads())
1890 )
1894 )
1891
1895
1892 repo.hook(b'preoutgoing', throw=True, source=source)
1896 repo.hook(b'preoutgoing', throw=True, source=source)
1893 _changegroupinfo(repo, csets, source)
1897 _changegroupinfo(repo, csets, source)
1894 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1898 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1895
1899
1896
1900
1897 def _addchangegroupfiles(
1901 def _addchangegroupfiles(
1898 repo,
1902 repo,
1899 source,
1903 source,
1900 revmap,
1904 revmap,
1901 trp,
1905 trp,
1902 expectedfiles,
1906 expectedfiles,
1903 needfiles,
1907 needfiles,
1904 addrevisioncb=None,
1908 addrevisioncb=None,
1905 ):
1909 ):
1906 revisions = 0
1910 revisions = 0
1907 files = 0
1911 files = 0
1908 progress = repo.ui.makeprogress(
1912 progress = repo.ui.makeprogress(
1909 _(b'files'), unit=_(b'files'), total=expectedfiles
1913 _(b'files'), unit=_(b'files'), total=expectedfiles
1910 )
1914 )
1911 for chunkdata in iter(source.filelogheader, {}):
1915 for chunkdata in iter(source.filelogheader, {}):
1912 files += 1
1916 files += 1
1913 f = chunkdata[b"filename"]
1917 f = chunkdata[b"filename"]
1914 repo.ui.debug(b"adding %s revisions\n" % f)
1918 repo.ui.debug(b"adding %s revisions\n" % f)
1915 progress.increment()
1919 progress.increment()
1916 fl = repo.file(f)
1920 fl = repo.file(f)
1917 o = len(fl)
1921 o = len(fl)
1918 try:
1922 try:
1919 deltas = source.deltaiter()
1923 deltas = source.deltaiter()
1920 added = fl.addgroup(
1924 added = fl.addgroup(
1921 deltas,
1925 deltas,
1922 revmap,
1926 revmap,
1923 trp,
1927 trp,
1924 addrevisioncb=addrevisioncb,
1928 addrevisioncb=addrevisioncb,
1925 )
1929 )
1926 if not added:
1930 if not added:
1927 raise error.Abort(_(b"received file revlog group is empty"))
1931 raise error.Abort(_(b"received file revlog group is empty"))
1928 except error.CensoredBaseError as e:
1932 except error.CensoredBaseError as e:
1929 raise error.Abort(_(b"received delta base is censored: %s") % e)
1933 raise error.Abort(_(b"received delta base is censored: %s") % e)
1930 revisions += len(fl) - o
1934 revisions += len(fl) - o
1931 if f in needfiles:
1935 if f in needfiles:
1932 needs = needfiles[f]
1936 needs = needfiles[f]
1933 for new in pycompat.xrange(o, len(fl)):
1937 for new in pycompat.xrange(o, len(fl)):
1934 n = fl.node(new)
1938 n = fl.node(new)
1935 if n in needs:
1939 if n in needs:
1936 needs.remove(n)
1940 needs.remove(n)
1937 else:
1941 else:
1938 raise error.Abort(_(b"received spurious file revlog entry"))
1942 raise error.Abort(_(b"received spurious file revlog entry"))
1939 if not needs:
1943 if not needs:
1940 del needfiles[f]
1944 del needfiles[f]
1941 progress.complete()
1945 progress.complete()
1942
1946
1943 for f, needs in pycompat.iteritems(needfiles):
1947 for f, needs in pycompat.iteritems(needfiles):
1944 fl = repo.file(f)
1948 fl = repo.file(f)
1945 for n in needs:
1949 for n in needs:
1946 try:
1950 try:
1947 fl.rev(n)
1951 fl.rev(n)
1948 except error.LookupError:
1952 except error.LookupError:
1949 raise error.Abort(
1953 raise error.Abort(
1950 _(b'missing file data for %s:%s - run hg verify')
1954 _(b'missing file data for %s:%s - run hg verify')
1951 % (f, hex(n))
1955 % (f, hex(n))
1952 )
1956 )
1953
1957
1954 return revisions, files
1958 return revisions, files
@@ -1,2697 +1,2712 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18
18
19 def loadconfigtable(ui, extname, configtable):
19 def loadconfigtable(ui, extname, configtable):
20 """update config item known to the ui with the extension ones"""
20 """update config item known to the ui with the extension ones"""
21 for section, items in sorted(configtable.items()):
21 for section, items in sorted(configtable.items()):
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownitems = ui._knownconfig.setdefault(section, itemregister())
23 knownkeys = set(knownitems)
23 knownkeys = set(knownitems)
24 newkeys = set(items)
24 newkeys = set(items)
25 for key in sorted(knownkeys & newkeys):
25 for key in sorted(knownkeys & newkeys):
26 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg = b"extension '%s' overwrite config item '%s.%s'"
27 msg %= (extname, section, key)
27 msg %= (extname, section, key)
28 ui.develwarn(msg, config=b'warn-config')
28 ui.develwarn(msg, config=b'warn-config')
29
29
30 knownitems.update(items)
30 knownitems.update(items)
31
31
32
32
33 class configitem(object):
33 class configitem(object):
34 """represent a known config item
34 """represent a known config item
35
35
36 :section: the official config section where to find this item,
36 :section: the official config section where to find this item,
37 :name: the official name within the section,
37 :name: the official name within the section,
38 :default: default value for this item,
38 :default: default value for this item,
39 :alias: optional list of tuples as alternatives,
39 :alias: optional list of tuples as alternatives,
40 :generic: this is a generic definition, match name using regular expression.
40 :generic: this is a generic definition, match name using regular expression.
41 """
41 """
42
42
43 def __init__(
43 def __init__(
44 self,
44 self,
45 section,
45 section,
46 name,
46 name,
47 default=None,
47 default=None,
48 alias=(),
48 alias=(),
49 generic=False,
49 generic=False,
50 priority=0,
50 priority=0,
51 experimental=False,
51 experimental=False,
52 ):
52 ):
53 self.section = section
53 self.section = section
54 self.name = name
54 self.name = name
55 self.default = default
55 self.default = default
56 self.alias = list(alias)
56 self.alias = list(alias)
57 self.generic = generic
57 self.generic = generic
58 self.priority = priority
58 self.priority = priority
59 self.experimental = experimental
59 self.experimental = experimental
60 self._re = None
60 self._re = None
61 if generic:
61 if generic:
62 self._re = re.compile(self.name)
62 self._re = re.compile(self.name)
63
63
64
64
65 class itemregister(dict):
65 class itemregister(dict):
66 """A specialized dictionary that can handle wild-card selection"""
66 """A specialized dictionary that can handle wild-card selection"""
67
67
68 def __init__(self):
68 def __init__(self):
69 super(itemregister, self).__init__()
69 super(itemregister, self).__init__()
70 self._generics = set()
70 self._generics = set()
71
71
72 def update(self, other):
72 def update(self, other):
73 super(itemregister, self).update(other)
73 super(itemregister, self).update(other)
74 self._generics.update(other._generics)
74 self._generics.update(other._generics)
75
75
76 def __setitem__(self, key, item):
76 def __setitem__(self, key, item):
77 super(itemregister, self).__setitem__(key, item)
77 super(itemregister, self).__setitem__(key, item)
78 if item.generic:
78 if item.generic:
79 self._generics.add(item)
79 self._generics.add(item)
80
80
81 def get(self, key):
81 def get(self, key):
82 baseitem = super(itemregister, self).get(key)
82 baseitem = super(itemregister, self).get(key)
83 if baseitem is not None and not baseitem.generic:
83 if baseitem is not None and not baseitem.generic:
84 return baseitem
84 return baseitem
85
85
86 # search for a matching generic item
86 # search for a matching generic item
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
88 for item in generics:
88 for item in generics:
89 # we use 'match' instead of 'search' to make the matching simpler
89 # we use 'match' instead of 'search' to make the matching simpler
90 # for people unfamiliar with regular expression. Having the match
90 # for people unfamiliar with regular expression. Having the match
91 # rooted to the start of the string will produce less surprising
91 # rooted to the start of the string will produce less surprising
92 # result for user writing simple regex for sub-attribute.
92 # result for user writing simple regex for sub-attribute.
93 #
93 #
94 # For example using "color\..*" match produces an unsurprising
94 # For example using "color\..*" match produces an unsurprising
95 # result, while using search could suddenly match apparently
95 # result, while using search could suddenly match apparently
96 # unrelated configuration that happens to contains "color."
96 # unrelated configuration that happens to contains "color."
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # anywhere. This is a tradeoff where we favor requiring ".*" on
98 # some match to avoid the need to prefix most pattern with "^".
98 # some match to avoid the need to prefix most pattern with "^".
99 # The "^" seems more error prone.
99 # The "^" seems more error prone.
100 if item._re.match(key):
100 if item._re.match(key):
101 return item
101 return item
102
102
103 return None
103 return None
104
104
105
105
106 coreitems = {}
106 coreitems = {}
107
107
108
108
109 def _register(configtable, *args, **kwargs):
109 def _register(configtable, *args, **kwargs):
110 item = configitem(*args, **kwargs)
110 item = configitem(*args, **kwargs)
111 section = configtable.setdefault(item.section, itemregister())
111 section = configtable.setdefault(item.section, itemregister())
112 if item.name in section:
112 if item.name in section:
113 msg = b"duplicated config item registration for '%s.%s'"
113 msg = b"duplicated config item registration for '%s.%s'"
114 raise error.ProgrammingError(msg % (item.section, item.name))
114 raise error.ProgrammingError(msg % (item.section, item.name))
115 section[item.name] = item
115 section[item.name] = item
116
116
117
117
118 # special value for case where the default is derived from other values
118 # special value for case where the default is derived from other values
119 dynamicdefault = object()
119 dynamicdefault = object()
120
120
121 # Registering actual config items
121 # Registering actual config items
122
122
123
123
124 def getitemregister(configtable):
124 def getitemregister(configtable):
125 f = functools.partial(_register, configtable)
125 f = functools.partial(_register, configtable)
126 # export pseudo enum as configitem.*
126 # export pseudo enum as configitem.*
127 f.dynamicdefault = dynamicdefault
127 f.dynamicdefault = dynamicdefault
128 return f
128 return f
129
129
130
130
131 coreconfigitem = getitemregister(coreitems)
131 coreconfigitem = getitemregister(coreitems)
132
132
133
133
134 def _registerdiffopts(section, configprefix=b''):
134 def _registerdiffopts(section, configprefix=b''):
135 coreconfigitem(
135 coreconfigitem(
136 section,
136 section,
137 configprefix + b'nodates',
137 configprefix + b'nodates',
138 default=False,
138 default=False,
139 )
139 )
140 coreconfigitem(
140 coreconfigitem(
141 section,
141 section,
142 configprefix + b'showfunc',
142 configprefix + b'showfunc',
143 default=False,
143 default=False,
144 )
144 )
145 coreconfigitem(
145 coreconfigitem(
146 section,
146 section,
147 configprefix + b'unified',
147 configprefix + b'unified',
148 default=None,
148 default=None,
149 )
149 )
150 coreconfigitem(
150 coreconfigitem(
151 section,
151 section,
152 configprefix + b'git',
152 configprefix + b'git',
153 default=False,
153 default=False,
154 )
154 )
155 coreconfigitem(
155 coreconfigitem(
156 section,
156 section,
157 configprefix + b'ignorews',
157 configprefix + b'ignorews',
158 default=False,
158 default=False,
159 )
159 )
160 coreconfigitem(
160 coreconfigitem(
161 section,
161 section,
162 configprefix + b'ignorewsamount',
162 configprefix + b'ignorewsamount',
163 default=False,
163 default=False,
164 )
164 )
165 coreconfigitem(
165 coreconfigitem(
166 section,
166 section,
167 configprefix + b'ignoreblanklines',
167 configprefix + b'ignoreblanklines',
168 default=False,
168 default=False,
169 )
169 )
170 coreconfigitem(
170 coreconfigitem(
171 section,
171 section,
172 configprefix + b'ignorewseol',
172 configprefix + b'ignorewseol',
173 default=False,
173 default=False,
174 )
174 )
175 coreconfigitem(
175 coreconfigitem(
176 section,
176 section,
177 configprefix + b'nobinary',
177 configprefix + b'nobinary',
178 default=False,
178 default=False,
179 )
179 )
180 coreconfigitem(
180 coreconfigitem(
181 section,
181 section,
182 configprefix + b'noprefix',
182 configprefix + b'noprefix',
183 default=False,
183 default=False,
184 )
184 )
185 coreconfigitem(
185 coreconfigitem(
186 section,
186 section,
187 configprefix + b'word-diff',
187 configprefix + b'word-diff',
188 default=False,
188 default=False,
189 )
189 )
190
190
191
191
192 coreconfigitem(
192 coreconfigitem(
193 b'alias',
193 b'alias',
194 b'.*',
194 b'.*',
195 default=dynamicdefault,
195 default=dynamicdefault,
196 generic=True,
196 generic=True,
197 )
197 )
198 coreconfigitem(
198 coreconfigitem(
199 b'auth',
199 b'auth',
200 b'cookiefile',
200 b'cookiefile',
201 default=None,
201 default=None,
202 )
202 )
203 _registerdiffopts(section=b'annotate')
203 _registerdiffopts(section=b'annotate')
204 # bookmarks.pushing: internal hack for discovery
204 # bookmarks.pushing: internal hack for discovery
205 coreconfigitem(
205 coreconfigitem(
206 b'bookmarks',
206 b'bookmarks',
207 b'pushing',
207 b'pushing',
208 default=list,
208 default=list,
209 )
209 )
210 # bundle.mainreporoot: internal hack for bundlerepo
210 # bundle.mainreporoot: internal hack for bundlerepo
211 coreconfigitem(
211 coreconfigitem(
212 b'bundle',
212 b'bundle',
213 b'mainreporoot',
213 b'mainreporoot',
214 default=b'',
214 default=b'',
215 )
215 )
216 coreconfigitem(
216 coreconfigitem(
217 b'censor',
217 b'censor',
218 b'policy',
218 b'policy',
219 default=b'abort',
219 default=b'abort',
220 experimental=True,
220 experimental=True,
221 )
221 )
222 coreconfigitem(
222 coreconfigitem(
223 b'chgserver',
223 b'chgserver',
224 b'idletimeout',
224 b'idletimeout',
225 default=3600,
225 default=3600,
226 )
226 )
227 coreconfigitem(
227 coreconfigitem(
228 b'chgserver',
228 b'chgserver',
229 b'skiphash',
229 b'skiphash',
230 default=False,
230 default=False,
231 )
231 )
232 coreconfigitem(
232 coreconfigitem(
233 b'cmdserver',
233 b'cmdserver',
234 b'log',
234 b'log',
235 default=None,
235 default=None,
236 )
236 )
237 coreconfigitem(
237 coreconfigitem(
238 b'cmdserver',
238 b'cmdserver',
239 b'max-log-files',
239 b'max-log-files',
240 default=7,
240 default=7,
241 )
241 )
242 coreconfigitem(
242 coreconfigitem(
243 b'cmdserver',
243 b'cmdserver',
244 b'max-log-size',
244 b'max-log-size',
245 default=b'1 MB',
245 default=b'1 MB',
246 )
246 )
247 coreconfigitem(
247 coreconfigitem(
248 b'cmdserver',
248 b'cmdserver',
249 b'max-repo-cache',
249 b'max-repo-cache',
250 default=0,
250 default=0,
251 experimental=True,
251 experimental=True,
252 )
252 )
253 coreconfigitem(
253 coreconfigitem(
254 b'cmdserver',
254 b'cmdserver',
255 b'message-encodings',
255 b'message-encodings',
256 default=list,
256 default=list,
257 )
257 )
258 coreconfigitem(
258 coreconfigitem(
259 b'cmdserver',
259 b'cmdserver',
260 b'track-log',
260 b'track-log',
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
261 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
262 )
262 )
263 coreconfigitem(
263 coreconfigitem(
264 b'cmdserver',
264 b'cmdserver',
265 b'shutdown-on-interrupt',
265 b'shutdown-on-interrupt',
266 default=True,
266 default=True,
267 )
267 )
268 coreconfigitem(
268 coreconfigitem(
269 b'color',
269 b'color',
270 b'.*',
270 b'.*',
271 default=None,
271 default=None,
272 generic=True,
272 generic=True,
273 )
273 )
274 coreconfigitem(
274 coreconfigitem(
275 b'color',
275 b'color',
276 b'mode',
276 b'mode',
277 default=b'auto',
277 default=b'auto',
278 )
278 )
279 coreconfigitem(
279 coreconfigitem(
280 b'color',
280 b'color',
281 b'pagermode',
281 b'pagermode',
282 default=dynamicdefault,
282 default=dynamicdefault,
283 )
283 )
284 coreconfigitem(
284 coreconfigitem(
285 b'command-templates',
285 b'command-templates',
286 b'graphnode',
286 b'graphnode',
287 default=None,
287 default=None,
288 alias=[(b'ui', b'graphnodetemplate')],
288 alias=[(b'ui', b'graphnodetemplate')],
289 )
289 )
290 coreconfigitem(
290 coreconfigitem(
291 b'command-templates',
291 b'command-templates',
292 b'log',
292 b'log',
293 default=None,
293 default=None,
294 alias=[(b'ui', b'logtemplate')],
294 alias=[(b'ui', b'logtemplate')],
295 )
295 )
296 coreconfigitem(
296 coreconfigitem(
297 b'command-templates',
297 b'command-templates',
298 b'mergemarker',
298 b'mergemarker',
299 default=(
299 default=(
300 b'{node|short} '
300 b'{node|short} '
301 b'{ifeq(tags, "tip", "", '
301 b'{ifeq(tags, "tip", "", '
302 b'ifeq(tags, "", "", "{tags} "))}'
302 b'ifeq(tags, "", "", "{tags} "))}'
303 b'{if(bookmarks, "{bookmarks} ")}'
303 b'{if(bookmarks, "{bookmarks} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
304 b'{ifeq(branch, "default", "", "{branch} ")}'
305 b'- {author|user}: {desc|firstline}'
305 b'- {author|user}: {desc|firstline}'
306 ),
306 ),
307 alias=[(b'ui', b'mergemarkertemplate')],
307 alias=[(b'ui', b'mergemarkertemplate')],
308 )
308 )
309 coreconfigitem(
309 coreconfigitem(
310 b'command-templates',
310 b'command-templates',
311 b'pre-merge-tool-output',
311 b'pre-merge-tool-output',
312 default=None,
312 default=None,
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
313 alias=[(b'ui', b'pre-merge-tool-output-template')],
314 )
314 )
315 coreconfigitem(
315 coreconfigitem(
316 b'command-templates',
316 b'command-templates',
317 b'oneline-summary',
317 b'oneline-summary',
318 default=None,
318 default=None,
319 )
319 )
320 coreconfigitem(
320 coreconfigitem(
321 b'command-templates',
321 b'command-templates',
322 b'oneline-summary.*',
322 b'oneline-summary.*',
323 default=dynamicdefault,
323 default=dynamicdefault,
324 generic=True,
324 generic=True,
325 )
325 )
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
326 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
327 coreconfigitem(
327 coreconfigitem(
328 b'commands',
328 b'commands',
329 b'commit.post-status',
329 b'commit.post-status',
330 default=False,
330 default=False,
331 )
331 )
332 coreconfigitem(
332 coreconfigitem(
333 b'commands',
333 b'commands',
334 b'grep.all-files',
334 b'grep.all-files',
335 default=False,
335 default=False,
336 experimental=True,
336 experimental=True,
337 )
337 )
338 coreconfigitem(
338 coreconfigitem(
339 b'commands',
339 b'commands',
340 b'merge.require-rev',
340 b'merge.require-rev',
341 default=False,
341 default=False,
342 )
342 )
343 coreconfigitem(
343 coreconfigitem(
344 b'commands',
344 b'commands',
345 b'push.require-revs',
345 b'push.require-revs',
346 default=False,
346 default=False,
347 )
347 )
348 coreconfigitem(
348 coreconfigitem(
349 b'commands',
349 b'commands',
350 b'resolve.confirm',
350 b'resolve.confirm',
351 default=False,
351 default=False,
352 )
352 )
353 coreconfigitem(
353 coreconfigitem(
354 b'commands',
354 b'commands',
355 b'resolve.explicit-re-merge',
355 b'resolve.explicit-re-merge',
356 default=False,
356 default=False,
357 )
357 )
358 coreconfigitem(
358 coreconfigitem(
359 b'commands',
359 b'commands',
360 b'resolve.mark-check',
360 b'resolve.mark-check',
361 default=b'none',
361 default=b'none',
362 )
362 )
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
363 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
364 coreconfigitem(
364 coreconfigitem(
365 b'commands',
365 b'commands',
366 b'show.aliasprefix',
366 b'show.aliasprefix',
367 default=list,
367 default=list,
368 )
368 )
369 coreconfigitem(
369 coreconfigitem(
370 b'commands',
370 b'commands',
371 b'status.relative',
371 b'status.relative',
372 default=False,
372 default=False,
373 )
373 )
374 coreconfigitem(
374 coreconfigitem(
375 b'commands',
375 b'commands',
376 b'status.skipstates',
376 b'status.skipstates',
377 default=[],
377 default=[],
378 experimental=True,
378 experimental=True,
379 )
379 )
380 coreconfigitem(
380 coreconfigitem(
381 b'commands',
381 b'commands',
382 b'status.terse',
382 b'status.terse',
383 default=b'',
383 default=b'',
384 )
384 )
385 coreconfigitem(
385 coreconfigitem(
386 b'commands',
386 b'commands',
387 b'status.verbose',
387 b'status.verbose',
388 default=False,
388 default=False,
389 )
389 )
390 coreconfigitem(
390 coreconfigitem(
391 b'commands',
391 b'commands',
392 b'update.check',
392 b'update.check',
393 default=None,
393 default=None,
394 )
394 )
395 coreconfigitem(
395 coreconfigitem(
396 b'commands',
396 b'commands',
397 b'update.requiredest',
397 b'update.requiredest',
398 default=False,
398 default=False,
399 )
399 )
400 coreconfigitem(
400 coreconfigitem(
401 b'committemplate',
401 b'committemplate',
402 b'.*',
402 b'.*',
403 default=None,
403 default=None,
404 generic=True,
404 generic=True,
405 )
405 )
406 coreconfigitem(
406 coreconfigitem(
407 b'convert',
407 b'convert',
408 b'bzr.saverev',
408 b'bzr.saverev',
409 default=True,
409 default=True,
410 )
410 )
411 coreconfigitem(
411 coreconfigitem(
412 b'convert',
412 b'convert',
413 b'cvsps.cache',
413 b'cvsps.cache',
414 default=True,
414 default=True,
415 )
415 )
416 coreconfigitem(
416 coreconfigitem(
417 b'convert',
417 b'convert',
418 b'cvsps.fuzz',
418 b'cvsps.fuzz',
419 default=60,
419 default=60,
420 )
420 )
421 coreconfigitem(
421 coreconfigitem(
422 b'convert',
422 b'convert',
423 b'cvsps.logencoding',
423 b'cvsps.logencoding',
424 default=None,
424 default=None,
425 )
425 )
426 coreconfigitem(
426 coreconfigitem(
427 b'convert',
427 b'convert',
428 b'cvsps.mergefrom',
428 b'cvsps.mergefrom',
429 default=None,
429 default=None,
430 )
430 )
431 coreconfigitem(
431 coreconfigitem(
432 b'convert',
432 b'convert',
433 b'cvsps.mergeto',
433 b'cvsps.mergeto',
434 default=None,
434 default=None,
435 )
435 )
436 coreconfigitem(
436 coreconfigitem(
437 b'convert',
437 b'convert',
438 b'git.committeractions',
438 b'git.committeractions',
439 default=lambda: [b'messagedifferent'],
439 default=lambda: [b'messagedifferent'],
440 )
440 )
441 coreconfigitem(
441 coreconfigitem(
442 b'convert',
442 b'convert',
443 b'git.extrakeys',
443 b'git.extrakeys',
444 default=list,
444 default=list,
445 )
445 )
446 coreconfigitem(
446 coreconfigitem(
447 b'convert',
447 b'convert',
448 b'git.findcopiesharder',
448 b'git.findcopiesharder',
449 default=False,
449 default=False,
450 )
450 )
451 coreconfigitem(
451 coreconfigitem(
452 b'convert',
452 b'convert',
453 b'git.remoteprefix',
453 b'git.remoteprefix',
454 default=b'remote',
454 default=b'remote',
455 )
455 )
456 coreconfigitem(
456 coreconfigitem(
457 b'convert',
457 b'convert',
458 b'git.renamelimit',
458 b'git.renamelimit',
459 default=400,
459 default=400,
460 )
460 )
461 coreconfigitem(
461 coreconfigitem(
462 b'convert',
462 b'convert',
463 b'git.saverev',
463 b'git.saverev',
464 default=True,
464 default=True,
465 )
465 )
466 coreconfigitem(
466 coreconfigitem(
467 b'convert',
467 b'convert',
468 b'git.similarity',
468 b'git.similarity',
469 default=50,
469 default=50,
470 )
470 )
471 coreconfigitem(
471 coreconfigitem(
472 b'convert',
472 b'convert',
473 b'git.skipsubmodules',
473 b'git.skipsubmodules',
474 default=False,
474 default=False,
475 )
475 )
476 coreconfigitem(
476 coreconfigitem(
477 b'convert',
477 b'convert',
478 b'hg.clonebranches',
478 b'hg.clonebranches',
479 default=False,
479 default=False,
480 )
480 )
481 coreconfigitem(
481 coreconfigitem(
482 b'convert',
482 b'convert',
483 b'hg.ignoreerrors',
483 b'hg.ignoreerrors',
484 default=False,
484 default=False,
485 )
485 )
486 coreconfigitem(
486 coreconfigitem(
487 b'convert',
487 b'convert',
488 b'hg.preserve-hash',
488 b'hg.preserve-hash',
489 default=False,
489 default=False,
490 )
490 )
491 coreconfigitem(
491 coreconfigitem(
492 b'convert',
492 b'convert',
493 b'hg.revs',
493 b'hg.revs',
494 default=None,
494 default=None,
495 )
495 )
496 coreconfigitem(
496 coreconfigitem(
497 b'convert',
497 b'convert',
498 b'hg.saverev',
498 b'hg.saverev',
499 default=False,
499 default=False,
500 )
500 )
501 coreconfigitem(
501 coreconfigitem(
502 b'convert',
502 b'convert',
503 b'hg.sourcename',
503 b'hg.sourcename',
504 default=None,
504 default=None,
505 )
505 )
506 coreconfigitem(
506 coreconfigitem(
507 b'convert',
507 b'convert',
508 b'hg.startrev',
508 b'hg.startrev',
509 default=None,
509 default=None,
510 )
510 )
511 coreconfigitem(
511 coreconfigitem(
512 b'convert',
512 b'convert',
513 b'hg.tagsbranch',
513 b'hg.tagsbranch',
514 default=b'default',
514 default=b'default',
515 )
515 )
516 coreconfigitem(
516 coreconfigitem(
517 b'convert',
517 b'convert',
518 b'hg.usebranchnames',
518 b'hg.usebranchnames',
519 default=True,
519 default=True,
520 )
520 )
521 coreconfigitem(
521 coreconfigitem(
522 b'convert',
522 b'convert',
523 b'ignoreancestorcheck',
523 b'ignoreancestorcheck',
524 default=False,
524 default=False,
525 experimental=True,
525 experimental=True,
526 )
526 )
527 coreconfigitem(
527 coreconfigitem(
528 b'convert',
528 b'convert',
529 b'localtimezone',
529 b'localtimezone',
530 default=False,
530 default=False,
531 )
531 )
532 coreconfigitem(
532 coreconfigitem(
533 b'convert',
533 b'convert',
534 b'p4.encoding',
534 b'p4.encoding',
535 default=dynamicdefault,
535 default=dynamicdefault,
536 )
536 )
537 coreconfigitem(
537 coreconfigitem(
538 b'convert',
538 b'convert',
539 b'p4.startrev',
539 b'p4.startrev',
540 default=0,
540 default=0,
541 )
541 )
542 coreconfigitem(
542 coreconfigitem(
543 b'convert',
543 b'convert',
544 b'skiptags',
544 b'skiptags',
545 default=False,
545 default=False,
546 )
546 )
547 coreconfigitem(
547 coreconfigitem(
548 b'convert',
548 b'convert',
549 b'svn.debugsvnlog',
549 b'svn.debugsvnlog',
550 default=True,
550 default=True,
551 )
551 )
552 coreconfigitem(
552 coreconfigitem(
553 b'convert',
553 b'convert',
554 b'svn.trunk',
554 b'svn.trunk',
555 default=None,
555 default=None,
556 )
556 )
557 coreconfigitem(
557 coreconfigitem(
558 b'convert',
558 b'convert',
559 b'svn.tags',
559 b'svn.tags',
560 default=None,
560 default=None,
561 )
561 )
562 coreconfigitem(
562 coreconfigitem(
563 b'convert',
563 b'convert',
564 b'svn.branches',
564 b'svn.branches',
565 default=None,
565 default=None,
566 )
566 )
567 coreconfigitem(
567 coreconfigitem(
568 b'convert',
568 b'convert',
569 b'svn.startrev',
569 b'svn.startrev',
570 default=0,
570 default=0,
571 )
571 )
572 coreconfigitem(
572 coreconfigitem(
573 b'convert',
573 b'convert',
574 b'svn.dangerous-set-commit-dates',
574 b'svn.dangerous-set-commit-dates',
575 default=False,
575 default=False,
576 )
576 )
577 coreconfigitem(
577 coreconfigitem(
578 b'debug',
578 b'debug',
579 b'dirstate.delaywrite',
579 b'dirstate.delaywrite',
580 default=0,
580 default=0,
581 )
581 )
582 coreconfigitem(
582 coreconfigitem(
583 b'debug',
583 b'debug',
584 b'revlog.verifyposition.changelog',
584 b'revlog.verifyposition.changelog',
585 default=b'',
585 default=b'',
586 )
586 )
587 coreconfigitem(
587 coreconfigitem(
588 b'defaults',
588 b'defaults',
589 b'.*',
589 b'.*',
590 default=None,
590 default=None,
591 generic=True,
591 generic=True,
592 )
592 )
593 coreconfigitem(
593 coreconfigitem(
594 b'devel',
594 b'devel',
595 b'all-warnings',
595 b'all-warnings',
596 default=False,
596 default=False,
597 )
597 )
598 coreconfigitem(
598 coreconfigitem(
599 b'devel',
599 b'devel',
600 b'bundle2.debug',
600 b'bundle2.debug',
601 default=False,
601 default=False,
602 )
602 )
603 coreconfigitem(
603 coreconfigitem(
604 b'devel',
604 b'devel',
605 b'bundle.delta',
605 b'bundle.delta',
606 default=b'',
606 default=b'',
607 )
607 )
608 coreconfigitem(
608 coreconfigitem(
609 b'devel',
609 b'devel',
610 b'cache-vfs',
610 b'cache-vfs',
611 default=None,
611 default=None,
612 )
612 )
613 coreconfigitem(
613 coreconfigitem(
614 b'devel',
614 b'devel',
615 b'check-locks',
615 b'check-locks',
616 default=False,
616 default=False,
617 )
617 )
618 coreconfigitem(
618 coreconfigitem(
619 b'devel',
619 b'devel',
620 b'check-relroot',
620 b'check-relroot',
621 default=False,
621 default=False,
622 )
622 )
623 # Track copy information for all file, not just "added" one (very slow)
623 # Track copy information for all file, not just "added" one (very slow)
624 coreconfigitem(
624 coreconfigitem(
625 b'devel',
625 b'devel',
626 b'copy-tracing.trace-all-files',
626 b'copy-tracing.trace-all-files',
627 default=False,
627 default=False,
628 )
628 )
629 coreconfigitem(
629 coreconfigitem(
630 b'devel',
630 b'devel',
631 b'default-date',
631 b'default-date',
632 default=None,
632 default=None,
633 )
633 )
634 coreconfigitem(
634 coreconfigitem(
635 b'devel',
635 b'devel',
636 b'deprec-warn',
636 b'deprec-warn',
637 default=False,
637 default=False,
638 )
638 )
639 coreconfigitem(
639 coreconfigitem(
640 b'devel',
640 b'devel',
641 b'disableloaddefaultcerts',
641 b'disableloaddefaultcerts',
642 default=False,
642 default=False,
643 )
643 )
644 coreconfigitem(
644 coreconfigitem(
645 b'devel',
645 b'devel',
646 b'warn-empty-changegroup',
646 b'warn-empty-changegroup',
647 default=False,
647 default=False,
648 )
648 )
649 coreconfigitem(
649 coreconfigitem(
650 b'devel',
650 b'devel',
651 b'legacy.exchange',
651 b'legacy.exchange',
652 default=list,
652 default=list,
653 )
653 )
654 # When True, revlogs use a special reference version of the nodemap, that is not
654 # When True, revlogs use a special reference version of the nodemap, that is not
655 # performant but is "known" to behave properly.
655 # performant but is "known" to behave properly.
656 coreconfigitem(
656 coreconfigitem(
657 b'devel',
657 b'devel',
658 b'persistent-nodemap',
658 b'persistent-nodemap',
659 default=False,
659 default=False,
660 )
660 )
661 coreconfigitem(
661 coreconfigitem(
662 b'devel',
662 b'devel',
663 b'servercafile',
663 b'servercafile',
664 default=b'',
664 default=b'',
665 )
665 )
666 coreconfigitem(
666 coreconfigitem(
667 b'devel',
667 b'devel',
668 b'serverexactprotocol',
668 b'serverexactprotocol',
669 default=b'',
669 default=b'',
670 )
670 )
671 coreconfigitem(
671 coreconfigitem(
672 b'devel',
672 b'devel',
673 b'serverrequirecert',
673 b'serverrequirecert',
674 default=False,
674 default=False,
675 )
675 )
676 coreconfigitem(
676 coreconfigitem(
677 b'devel',
677 b'devel',
678 b'strip-obsmarkers',
678 b'strip-obsmarkers',
679 default=True,
679 default=True,
680 )
680 )
681 coreconfigitem(
681 coreconfigitem(
682 b'devel',
682 b'devel',
683 b'warn-config',
683 b'warn-config',
684 default=None,
684 default=None,
685 )
685 )
686 coreconfigitem(
686 coreconfigitem(
687 b'devel',
687 b'devel',
688 b'warn-config-default',
688 b'warn-config-default',
689 default=None,
689 default=None,
690 )
690 )
691 coreconfigitem(
691 coreconfigitem(
692 b'devel',
692 b'devel',
693 b'user.obsmarker',
693 b'user.obsmarker',
694 default=None,
694 default=None,
695 )
695 )
696 coreconfigitem(
696 coreconfigitem(
697 b'devel',
697 b'devel',
698 b'warn-config-unknown',
698 b'warn-config-unknown',
699 default=None,
699 default=None,
700 )
700 )
701 coreconfigitem(
701 coreconfigitem(
702 b'devel',
702 b'devel',
703 b'debug.copies',
703 b'debug.copies',
704 default=False,
704 default=False,
705 )
705 )
706 coreconfigitem(
706 coreconfigitem(
707 b'devel',
707 b'devel',
708 b'copy-tracing.multi-thread',
708 b'copy-tracing.multi-thread',
709 default=True,
709 default=True,
710 )
710 )
711 coreconfigitem(
711 coreconfigitem(
712 b'devel',
712 b'devel',
713 b'debug.extensions',
713 b'debug.extensions',
714 default=False,
714 default=False,
715 )
715 )
716 coreconfigitem(
716 coreconfigitem(
717 b'devel',
717 b'devel',
718 b'debug.repo-filters',
718 b'debug.repo-filters',
719 default=False,
719 default=False,
720 )
720 )
721 coreconfigitem(
721 coreconfigitem(
722 b'devel',
722 b'devel',
723 b'debug.peer-request',
723 b'debug.peer-request',
724 default=False,
724 default=False,
725 )
725 )
726 # If discovery.exchange-heads is False, the discovery will not start with
726 # If discovery.exchange-heads is False, the discovery will not start with
727 # remote head fetching and local head querying.
727 # remote head fetching and local head querying.
728 coreconfigitem(
728 coreconfigitem(
729 b'devel',
729 b'devel',
730 b'discovery.exchange-heads',
730 b'discovery.exchange-heads',
731 default=True,
731 default=True,
732 )
732 )
733 # If discovery.grow-sample is False, the sample size used in set discovery will
733 # If discovery.grow-sample is False, the sample size used in set discovery will
734 # not be increased through the process
734 # not be increased through the process
735 coreconfigitem(
735 coreconfigitem(
736 b'devel',
736 b'devel',
737 b'discovery.grow-sample',
737 b'discovery.grow-sample',
738 default=True,
738 default=True,
739 )
739 )
740 # When discovery.grow-sample.dynamic is True, the default, the sample size is
740 # When discovery.grow-sample.dynamic is True, the default, the sample size is
741 # adapted to the shape of the undecided set (it is set to the max of:
741 # adapted to the shape of the undecided set (it is set to the max of:
742 # <target-size>, len(roots(undecided)), len(heads(undecided)
742 # <target-size>, len(roots(undecided)), len(heads(undecided)
743 coreconfigitem(
743 coreconfigitem(
744 b'devel',
744 b'devel',
745 b'discovery.grow-sample.dynamic',
745 b'discovery.grow-sample.dynamic',
746 default=True,
746 default=True,
747 )
747 )
748 # discovery.grow-sample.rate control the rate at which the sample grow
748 # discovery.grow-sample.rate control the rate at which the sample grow
749 coreconfigitem(
749 coreconfigitem(
750 b'devel',
750 b'devel',
751 b'discovery.grow-sample.rate',
751 b'discovery.grow-sample.rate',
752 default=1.05,
752 default=1.05,
753 )
753 )
754 # If discovery.randomize is False, random sampling during discovery are
754 # If discovery.randomize is False, random sampling during discovery are
755 # deterministic. It is meant for integration tests.
755 # deterministic. It is meant for integration tests.
756 coreconfigitem(
756 coreconfigitem(
757 b'devel',
757 b'devel',
758 b'discovery.randomize',
758 b'discovery.randomize',
759 default=True,
759 default=True,
760 )
760 )
761 # Control the initial size of the discovery sample
761 # Control the initial size of the discovery sample
762 coreconfigitem(
762 coreconfigitem(
763 b'devel',
763 b'devel',
764 b'discovery.sample-size',
764 b'discovery.sample-size',
765 default=200,
765 default=200,
766 )
766 )
767 # Control the initial size of the discovery for initial change
767 # Control the initial size of the discovery for initial change
768 coreconfigitem(
768 coreconfigitem(
769 b'devel',
769 b'devel',
770 b'discovery.sample-size.initial',
770 b'discovery.sample-size.initial',
771 default=100,
771 default=100,
772 )
772 )
773 _registerdiffopts(section=b'diff')
773 _registerdiffopts(section=b'diff')
774 coreconfigitem(
774 coreconfigitem(
775 b'diff',
775 b'diff',
776 b'merge',
776 b'merge',
777 default=False,
777 default=False,
778 experimental=True,
778 experimental=True,
779 )
779 )
780 coreconfigitem(
780 coreconfigitem(
781 b'email',
781 b'email',
782 b'bcc',
782 b'bcc',
783 default=None,
783 default=None,
784 )
784 )
785 coreconfigitem(
785 coreconfigitem(
786 b'email',
786 b'email',
787 b'cc',
787 b'cc',
788 default=None,
788 default=None,
789 )
789 )
790 coreconfigitem(
790 coreconfigitem(
791 b'email',
791 b'email',
792 b'charsets',
792 b'charsets',
793 default=list,
793 default=list,
794 )
794 )
795 coreconfigitem(
795 coreconfigitem(
796 b'email',
796 b'email',
797 b'from',
797 b'from',
798 default=None,
798 default=None,
799 )
799 )
800 coreconfigitem(
800 coreconfigitem(
801 b'email',
801 b'email',
802 b'method',
802 b'method',
803 default=b'smtp',
803 default=b'smtp',
804 )
804 )
805 coreconfigitem(
805 coreconfigitem(
806 b'email',
806 b'email',
807 b'reply-to',
807 b'reply-to',
808 default=None,
808 default=None,
809 )
809 )
810 coreconfigitem(
810 coreconfigitem(
811 b'email',
811 b'email',
812 b'to',
812 b'to',
813 default=None,
813 default=None,
814 )
814 )
815 coreconfigitem(
815 coreconfigitem(
816 b'experimental',
816 b'experimental',
817 b'archivemetatemplate',
817 b'archivemetatemplate',
818 default=dynamicdefault,
818 default=dynamicdefault,
819 )
819 )
820 coreconfigitem(
820 coreconfigitem(
821 b'experimental',
821 b'experimental',
822 b'auto-publish',
822 b'auto-publish',
823 default=b'publish',
823 default=b'publish',
824 )
824 )
825 coreconfigitem(
825 coreconfigitem(
826 b'experimental',
826 b'experimental',
827 b'bundle-phases',
827 b'bundle-phases',
828 default=False,
828 default=False,
829 )
829 )
830 coreconfigitem(
830 coreconfigitem(
831 b'experimental',
831 b'experimental',
832 b'bundle2-advertise',
832 b'bundle2-advertise',
833 default=True,
833 default=True,
834 )
834 )
835 coreconfigitem(
835 coreconfigitem(
836 b'experimental',
836 b'experimental',
837 b'bundle2-output-capture',
837 b'bundle2-output-capture',
838 default=False,
838 default=False,
839 )
839 )
840 coreconfigitem(
840 coreconfigitem(
841 b'experimental',
841 b'experimental',
842 b'bundle2.pushback',
842 b'bundle2.pushback',
843 default=False,
843 default=False,
844 )
844 )
845 coreconfigitem(
845 coreconfigitem(
846 b'experimental',
846 b'experimental',
847 b'bundle2lazylocking',
847 b'bundle2lazylocking',
848 default=False,
848 default=False,
849 )
849 )
850 coreconfigitem(
850 coreconfigitem(
851 b'experimental',
851 b'experimental',
852 b'bundlecomplevel',
852 b'bundlecomplevel',
853 default=None,
853 default=None,
854 )
854 )
855 coreconfigitem(
855 coreconfigitem(
856 b'experimental',
856 b'experimental',
857 b'bundlecomplevel.bzip2',
857 b'bundlecomplevel.bzip2',
858 default=None,
858 default=None,
859 )
859 )
860 coreconfigitem(
860 coreconfigitem(
861 b'experimental',
861 b'experimental',
862 b'bundlecomplevel.gzip',
862 b'bundlecomplevel.gzip',
863 default=None,
863 default=None,
864 )
864 )
865 coreconfigitem(
865 coreconfigitem(
866 b'experimental',
866 b'experimental',
867 b'bundlecomplevel.none',
867 b'bundlecomplevel.none',
868 default=None,
868 default=None,
869 )
869 )
870 coreconfigitem(
870 coreconfigitem(
871 b'experimental',
871 b'experimental',
872 b'bundlecomplevel.zstd',
872 b'bundlecomplevel.zstd',
873 default=None,
873 default=None,
874 )
874 )
875 coreconfigitem(
875 coreconfigitem(
876 b'experimental',
876 b'experimental',
877 b'bundlecompthreads',
877 b'bundlecompthreads',
878 default=None,
878 default=None,
879 )
879 )
880 coreconfigitem(
880 coreconfigitem(
881 b'experimental',
881 b'experimental',
882 b'bundlecompthreads.bzip2',
882 b'bundlecompthreads.bzip2',
883 default=None,
883 default=None,
884 )
884 )
885 coreconfigitem(
885 coreconfigitem(
886 b'experimental',
886 b'experimental',
887 b'bundlecompthreads.gzip',
887 b'bundlecompthreads.gzip',
888 default=None,
888 default=None,
889 )
889 )
890 coreconfigitem(
890 coreconfigitem(
891 b'experimental',
891 b'experimental',
892 b'bundlecompthreads.none',
892 b'bundlecompthreads.none',
893 default=None,
893 default=None,
894 )
894 )
895 coreconfigitem(
895 coreconfigitem(
896 b'experimental',
896 b'experimental',
897 b'bundlecompthreads.zstd',
897 b'bundlecompthreads.zstd',
898 default=None,
898 default=None,
899 )
899 )
900 coreconfigitem(
900 coreconfigitem(
901 b'experimental',
901 b'experimental',
902 b'changegroup3',
902 b'changegroup3',
903 default=False,
903 default=False,
904 )
904 )
905 coreconfigitem(
905 coreconfigitem(
906 b'experimental',
906 b'experimental',
907 b'changegroup4',
907 b'changegroup4',
908 default=False,
908 default=False,
909 )
909 )
910 coreconfigitem(
910 coreconfigitem(
911 b'experimental',
911 b'experimental',
912 b'cleanup-as-archived',
912 b'cleanup-as-archived',
913 default=False,
913 default=False,
914 )
914 )
915 coreconfigitem(
915 coreconfigitem(
916 b'experimental',
916 b'experimental',
917 b'clientcompressionengines',
917 b'clientcompressionengines',
918 default=list,
918 default=list,
919 )
919 )
920 coreconfigitem(
920 coreconfigitem(
921 b'experimental',
921 b'experimental',
922 b'copytrace',
922 b'copytrace',
923 default=b'on',
923 default=b'on',
924 )
924 )
925 coreconfigitem(
925 coreconfigitem(
926 b'experimental',
926 b'experimental',
927 b'copytrace.movecandidateslimit',
927 b'copytrace.movecandidateslimit',
928 default=100,
928 default=100,
929 )
929 )
930 coreconfigitem(
930 coreconfigitem(
931 b'experimental',
931 b'experimental',
932 b'copytrace.sourcecommitlimit',
932 b'copytrace.sourcecommitlimit',
933 default=100,
933 default=100,
934 )
934 )
935 coreconfigitem(
935 coreconfigitem(
936 b'experimental',
936 b'experimental',
937 b'copies.read-from',
937 b'copies.read-from',
938 default=b"filelog-only",
938 default=b"filelog-only",
939 )
939 )
940 coreconfigitem(
940 coreconfigitem(
941 b'experimental',
941 b'experimental',
942 b'copies.write-to',
942 b'copies.write-to',
943 default=b'filelog-only',
943 default=b'filelog-only',
944 )
944 )
945 coreconfigitem(
945 coreconfigitem(
946 b'experimental',
946 b'experimental',
947 b'crecordtest',
947 b'crecordtest',
948 default=None,
948 default=None,
949 )
949 )
950 coreconfigitem(
950 coreconfigitem(
951 b'experimental',
951 b'experimental',
952 b'directaccess',
952 b'directaccess',
953 default=False,
953 default=False,
954 )
954 )
955 coreconfigitem(
955 coreconfigitem(
956 b'experimental',
956 b'experimental',
957 b'directaccess.revnums',
957 b'directaccess.revnums',
958 default=False,
958 default=False,
959 )
959 )
960 coreconfigitem(
960 coreconfigitem(
961 b'experimental',
961 b'experimental',
962 b'dirstate-tree.in-memory',
962 b'dirstate-tree.in-memory',
963 default=False,
963 default=False,
964 )
964 )
965 coreconfigitem(
965 coreconfigitem(
966 b'experimental',
966 b'experimental',
967 b'editortmpinhg',
967 b'editortmpinhg',
968 default=False,
968 default=False,
969 )
969 )
970 coreconfigitem(
970 coreconfigitem(
971 b'experimental',
971 b'experimental',
972 b'evolution',
972 b'evolution',
973 default=list,
973 default=list,
974 )
974 )
975 coreconfigitem(
975 coreconfigitem(
976 b'experimental',
976 b'experimental',
977 b'evolution.allowdivergence',
977 b'evolution.allowdivergence',
978 default=False,
978 default=False,
979 alias=[(b'experimental', b'allowdivergence')],
979 alias=[(b'experimental', b'allowdivergence')],
980 )
980 )
981 coreconfigitem(
981 coreconfigitem(
982 b'experimental',
982 b'experimental',
983 b'evolution.allowunstable',
983 b'evolution.allowunstable',
984 default=None,
984 default=None,
985 )
985 )
986 coreconfigitem(
986 coreconfigitem(
987 b'experimental',
987 b'experimental',
988 b'evolution.createmarkers',
988 b'evolution.createmarkers',
989 default=None,
989 default=None,
990 )
990 )
991 coreconfigitem(
991 coreconfigitem(
992 b'experimental',
992 b'experimental',
993 b'evolution.effect-flags',
993 b'evolution.effect-flags',
994 default=True,
994 default=True,
995 alias=[(b'experimental', b'effect-flags')],
995 alias=[(b'experimental', b'effect-flags')],
996 )
996 )
997 coreconfigitem(
997 coreconfigitem(
998 b'experimental',
998 b'experimental',
999 b'evolution.exchange',
999 b'evolution.exchange',
1000 default=None,
1000 default=None,
1001 )
1001 )
1002 coreconfigitem(
1002 coreconfigitem(
1003 b'experimental',
1003 b'experimental',
1004 b'evolution.bundle-obsmarker',
1004 b'evolution.bundle-obsmarker',
1005 default=False,
1005 default=False,
1006 )
1006 )
1007 coreconfigitem(
1007 coreconfigitem(
1008 b'experimental',
1008 b'experimental',
1009 b'evolution.bundle-obsmarker:mandatory',
1009 b'evolution.bundle-obsmarker:mandatory',
1010 default=True,
1010 default=True,
1011 )
1011 )
1012 coreconfigitem(
1012 coreconfigitem(
1013 b'experimental',
1013 b'experimental',
1014 b'log.topo',
1014 b'log.topo',
1015 default=False,
1015 default=False,
1016 )
1016 )
1017 coreconfigitem(
1017 coreconfigitem(
1018 b'experimental',
1018 b'experimental',
1019 b'evolution.report-instabilities',
1019 b'evolution.report-instabilities',
1020 default=True,
1020 default=True,
1021 )
1021 )
1022 coreconfigitem(
1022 coreconfigitem(
1023 b'experimental',
1023 b'experimental',
1024 b'evolution.track-operation',
1024 b'evolution.track-operation',
1025 default=True,
1025 default=True,
1026 )
1026 )
1027 # repo-level config to exclude a revset visibility
1027 # repo-level config to exclude a revset visibility
1028 #
1028 #
1029 # The target use case is to use `share` to expose different subset of the same
1029 # The target use case is to use `share` to expose different subset of the same
1030 # repository, especially server side. See also `server.view`.
1030 # repository, especially server side. See also `server.view`.
1031 coreconfigitem(
1031 coreconfigitem(
1032 b'experimental',
1032 b'experimental',
1033 b'extra-filter-revs',
1033 b'extra-filter-revs',
1034 default=None,
1034 default=None,
1035 )
1035 )
1036 coreconfigitem(
1036 coreconfigitem(
1037 b'experimental',
1037 b'experimental',
1038 b'maxdeltachainspan',
1038 b'maxdeltachainspan',
1039 default=-1,
1039 default=-1,
1040 )
1040 )
1041 # tracks files which were undeleted (merge might delete them but we explicitly
1041 # tracks files which were undeleted (merge might delete them but we explicitly
1042 # kept/undeleted them) and creates new filenodes for them
1042 # kept/undeleted them) and creates new filenodes for them
1043 coreconfigitem(
1043 coreconfigitem(
1044 b'experimental',
1044 b'experimental',
1045 b'merge-track-salvaged',
1045 b'merge-track-salvaged',
1046 default=False,
1046 default=False,
1047 )
1047 )
1048 coreconfigitem(
1048 coreconfigitem(
1049 b'experimental',
1049 b'experimental',
1050 b'mergetempdirprefix',
1050 b'mergetempdirprefix',
1051 default=None,
1051 default=None,
1052 )
1052 )
1053 coreconfigitem(
1053 coreconfigitem(
1054 b'experimental',
1054 b'experimental',
1055 b'mmapindexthreshold',
1055 b'mmapindexthreshold',
1056 default=None,
1056 default=None,
1057 )
1057 )
1058 coreconfigitem(
1058 coreconfigitem(
1059 b'experimental',
1059 b'experimental',
1060 b'narrow',
1060 b'narrow',
1061 default=False,
1061 default=False,
1062 )
1062 )
1063 coreconfigitem(
1063 coreconfigitem(
1064 b'experimental',
1064 b'experimental',
1065 b'nonnormalparanoidcheck',
1065 b'nonnormalparanoidcheck',
1066 default=False,
1066 default=False,
1067 )
1067 )
1068 coreconfigitem(
1068 coreconfigitem(
1069 b'experimental',
1069 b'experimental',
1070 b'exportableenviron',
1070 b'exportableenviron',
1071 default=list,
1071 default=list,
1072 )
1072 )
1073 coreconfigitem(
1073 coreconfigitem(
1074 b'experimental',
1074 b'experimental',
1075 b'extendedheader.index',
1075 b'extendedheader.index',
1076 default=None,
1076 default=None,
1077 )
1077 )
1078 coreconfigitem(
1078 coreconfigitem(
1079 b'experimental',
1079 b'experimental',
1080 b'extendedheader.similarity',
1080 b'extendedheader.similarity',
1081 default=False,
1081 default=False,
1082 )
1082 )
1083 coreconfigitem(
1083 coreconfigitem(
1084 b'experimental',
1084 b'experimental',
1085 b'graphshorten',
1085 b'graphshorten',
1086 default=False,
1086 default=False,
1087 )
1087 )
1088 coreconfigitem(
1088 coreconfigitem(
1089 b'experimental',
1089 b'experimental',
1090 b'graphstyle.parent',
1090 b'graphstyle.parent',
1091 default=dynamicdefault,
1091 default=dynamicdefault,
1092 )
1092 )
1093 coreconfigitem(
1093 coreconfigitem(
1094 b'experimental',
1094 b'experimental',
1095 b'graphstyle.missing',
1095 b'graphstyle.missing',
1096 default=dynamicdefault,
1096 default=dynamicdefault,
1097 )
1097 )
1098 coreconfigitem(
1098 coreconfigitem(
1099 b'experimental',
1099 b'experimental',
1100 b'graphstyle.grandparent',
1100 b'graphstyle.grandparent',
1101 default=dynamicdefault,
1101 default=dynamicdefault,
1102 )
1102 )
1103 coreconfigitem(
1103 coreconfigitem(
1104 b'experimental',
1104 b'experimental',
1105 b'hook-track-tags',
1105 b'hook-track-tags',
1106 default=False,
1106 default=False,
1107 )
1107 )
1108 coreconfigitem(
1108 coreconfigitem(
1109 b'experimental',
1109 b'experimental',
1110 b'httppeer.advertise-v2',
1110 b'httppeer.advertise-v2',
1111 default=False,
1111 default=False,
1112 )
1112 )
1113 coreconfigitem(
1113 coreconfigitem(
1114 b'experimental',
1114 b'experimental',
1115 b'httppeer.v2-encoder-order',
1115 b'httppeer.v2-encoder-order',
1116 default=None,
1116 default=None,
1117 )
1117 )
1118 coreconfigitem(
1118 coreconfigitem(
1119 b'experimental',
1119 b'experimental',
1120 b'httppostargs',
1120 b'httppostargs',
1121 default=False,
1121 default=False,
1122 )
1122 )
1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1125
1125
1126 coreconfigitem(
1126 coreconfigitem(
1127 b'experimental',
1127 b'experimental',
1128 b'obsmarkers-exchange-debug',
1128 b'obsmarkers-exchange-debug',
1129 default=False,
1129 default=False,
1130 )
1130 )
1131 coreconfigitem(
1131 coreconfigitem(
1132 b'experimental',
1132 b'experimental',
1133 b'remotenames',
1133 b'remotenames',
1134 default=False,
1134 default=False,
1135 )
1135 )
1136 coreconfigitem(
1136 coreconfigitem(
1137 b'experimental',
1137 b'experimental',
1138 b'removeemptydirs',
1138 b'removeemptydirs',
1139 default=True,
1139 default=True,
1140 )
1140 )
1141 coreconfigitem(
1141 coreconfigitem(
1142 b'experimental',
1142 b'experimental',
1143 b'revert.interactive.select-to-keep',
1143 b'revert.interactive.select-to-keep',
1144 default=False,
1144 default=False,
1145 )
1145 )
1146 coreconfigitem(
1146 coreconfigitem(
1147 b'experimental',
1147 b'experimental',
1148 b'revisions.prefixhexnode',
1148 b'revisions.prefixhexnode',
1149 default=False,
1149 default=False,
1150 )
1150 )
1151 # "out of experimental" todo list.
1151 # "out of experimental" todo list.
1152 #
1152 #
1153 # * include management of a persistent nodemap in the main docket
1153 # * include management of a persistent nodemap in the main docket
1154 # * enforce a "no-truncate" policy for mmap safety
1154 # * enforce a "no-truncate" policy for mmap safety
1155 # - for censoring operation
1155 # - for censoring operation
1156 # - for stripping operation
1156 # - for stripping operation
1157 # - for rollback operation
1157 # - for rollback operation
1158 # * proper streaming (race free) of the docket file
1158 # * proper streaming (race free) of the docket file
1159 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1159 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1160 # * Exchange-wise, we will also need to do something more efficient than
1160 # * Exchange-wise, we will also need to do something more efficient than
1161 # keeping references to the affected revlogs, especially memory-wise when
1161 # keeping references to the affected revlogs, especially memory-wise when
1162 # rewriting sidedata.
1162 # rewriting sidedata.
1163 # * sidedata compression
1163 # * sidedata compression
1164 # * introduce a proper solution to reduce the number of filelog related files.
1164 # * introduce a proper solution to reduce the number of filelog related files.
1165 # * Improvement to consider
1165 # * Improvement to consider
1166 # - avoid compression header in chunk using the default compression?
1166 # - avoid compression header in chunk using the default compression?
1167 # - forbid "inline" compression mode entirely?
1167 # - forbid "inline" compression mode entirely?
1168 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1168 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1169 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1169 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1170 # - keep track of chain base or size (probably not that useful anymore)
1170 # - keep track of chain base or size (probably not that useful anymore)
1171 # - store data and sidedata in different files
1171 # - store data and sidedata in different files
1172 coreconfigitem(
1172 coreconfigitem(
1173 b'experimental',
1173 b'experimental',
1174 b'revlogv2',
1174 b'revlogv2',
1175 default=None,
1175 default=None,
1176 )
1176 )
1177 coreconfigitem(
1177 coreconfigitem(
1178 b'experimental',
1178 b'experimental',
1179 b'revisions.disambiguatewithin',
1179 b'revisions.disambiguatewithin',
1180 default=None,
1180 default=None,
1181 )
1181 )
1182 coreconfigitem(
1182 coreconfigitem(
1183 b'experimental',
1183 b'experimental',
1184 b'rust.index',
1184 b'rust.index',
1185 default=False,
1185 default=False,
1186 )
1186 )
1187 coreconfigitem(
1187 coreconfigitem(
1188 b'experimental',
1188 b'experimental',
1189 b'server.filesdata.recommended-batch-size',
1189 b'server.filesdata.recommended-batch-size',
1190 default=50000,
1190 default=50000,
1191 )
1191 )
1192 coreconfigitem(
1192 coreconfigitem(
1193 b'experimental',
1193 b'experimental',
1194 b'server.manifestdata.recommended-batch-size',
1194 b'server.manifestdata.recommended-batch-size',
1195 default=100000,
1195 default=100000,
1196 )
1196 )
1197 coreconfigitem(
1197 coreconfigitem(
1198 b'experimental',
1198 b'experimental',
1199 b'server.stream-narrow-clones',
1199 b'server.stream-narrow-clones',
1200 default=False,
1200 default=False,
1201 )
1201 )
1202 coreconfigitem(
1202 coreconfigitem(
1203 b'experimental',
1203 b'experimental',
1204 b'single-head-per-branch',
1204 b'single-head-per-branch',
1205 default=False,
1205 default=False,
1206 )
1206 )
1207 coreconfigitem(
1207 coreconfigitem(
1208 b'experimental',
1208 b'experimental',
1209 b'single-head-per-branch:account-closed-heads',
1209 b'single-head-per-branch:account-closed-heads',
1210 default=False,
1210 default=False,
1211 )
1211 )
1212 coreconfigitem(
1212 coreconfigitem(
1213 b'experimental',
1213 b'experimental',
1214 b'single-head-per-branch:public-changes-only',
1214 b'single-head-per-branch:public-changes-only',
1215 default=False,
1215 default=False,
1216 )
1216 )
1217 coreconfigitem(
1217 coreconfigitem(
1218 b'experimental',
1218 b'experimental',
1219 b'sshserver.support-v2',
1219 b'sshserver.support-v2',
1220 default=False,
1220 default=False,
1221 )
1221 )
1222 coreconfigitem(
1222 coreconfigitem(
1223 b'experimental',
1223 b'experimental',
1224 b'sparse-read',
1224 b'sparse-read',
1225 default=False,
1225 default=False,
1226 )
1226 )
1227 coreconfigitem(
1227 coreconfigitem(
1228 b'experimental',
1228 b'experimental',
1229 b'sparse-read.density-threshold',
1229 b'sparse-read.density-threshold',
1230 default=0.50,
1230 default=0.50,
1231 )
1231 )
1232 coreconfigitem(
1232 coreconfigitem(
1233 b'experimental',
1233 b'experimental',
1234 b'sparse-read.min-gap-size',
1234 b'sparse-read.min-gap-size',
1235 default=b'65K',
1235 default=b'65K',
1236 )
1236 )
1237 coreconfigitem(
1237 coreconfigitem(
1238 b'experimental',
1238 b'experimental',
1239 b'treemanifest',
1239 b'treemanifest',
1240 default=False,
1240 default=False,
1241 )
1241 )
1242 coreconfigitem(
1242 coreconfigitem(
1243 b'experimental',
1243 b'experimental',
1244 b'update.atomic-file',
1244 b'update.atomic-file',
1245 default=False,
1245 default=False,
1246 )
1246 )
1247 coreconfigitem(
1247 coreconfigitem(
1248 b'experimental',
1248 b'experimental',
1249 b'sshpeer.advertise-v2',
1249 b'sshpeer.advertise-v2',
1250 default=False,
1250 default=False,
1251 )
1251 )
1252 coreconfigitem(
1252 coreconfigitem(
1253 b'experimental',
1253 b'experimental',
1254 b'web.apiserver',
1254 b'web.apiserver',
1255 default=False,
1255 default=False,
1256 )
1256 )
1257 coreconfigitem(
1257 coreconfigitem(
1258 b'experimental',
1258 b'experimental',
1259 b'web.api.http-v2',
1259 b'web.api.http-v2',
1260 default=False,
1260 default=False,
1261 )
1261 )
1262 coreconfigitem(
1262 coreconfigitem(
1263 b'experimental',
1263 b'experimental',
1264 b'web.api.debugreflect',
1264 b'web.api.debugreflect',
1265 default=False,
1265 default=False,
1266 )
1266 )
1267 coreconfigitem(
1267 coreconfigitem(
1268 b'experimental',
1268 b'experimental',
1269 b'worker.wdir-get-thread-safe',
1269 b'worker.wdir-get-thread-safe',
1270 default=False,
1270 default=False,
1271 )
1271 )
1272 coreconfigitem(
1272 coreconfigitem(
1273 b'experimental',
1273 b'experimental',
1274 b'worker.repository-upgrade',
1274 b'worker.repository-upgrade',
1275 default=False,
1275 default=False,
1276 )
1276 )
1277 coreconfigitem(
1277 coreconfigitem(
1278 b'experimental',
1278 b'experimental',
1279 b'xdiff',
1279 b'xdiff',
1280 default=False,
1280 default=False,
1281 )
1281 )
1282 coreconfigitem(
1282 coreconfigitem(
1283 b'extensions',
1283 b'extensions',
1284 b'.*',
1284 b'.*',
1285 default=None,
1285 default=None,
1286 generic=True,
1286 generic=True,
1287 )
1287 )
1288 coreconfigitem(
1288 coreconfigitem(
1289 b'extdata',
1289 b'extdata',
1290 b'.*',
1290 b'.*',
1291 default=None,
1291 default=None,
1292 generic=True,
1292 generic=True,
1293 )
1293 )
1294 coreconfigitem(
1294 coreconfigitem(
1295 b'format',
1295 b'format',
1296 b'bookmarks-in-store',
1296 b'bookmarks-in-store',
1297 default=False,
1297 default=False,
1298 )
1298 )
1299 coreconfigitem(
1299 coreconfigitem(
1300 b'format',
1300 b'format',
1301 b'chunkcachesize',
1301 b'chunkcachesize',
1302 default=None,
1302 default=None,
1303 experimental=True,
1303 experimental=True,
1304 )
1304 )
1305 coreconfigitem(
1305 coreconfigitem(
1306 b'format',
1306 b'format',
1307 b'dotencode',
1307 b'dotencode',
1308 default=True,
1308 default=True,
1309 )
1309 )
1310 coreconfigitem(
1310 coreconfigitem(
1311 b'format',
1311 b'format',
1312 b'generaldelta',
1312 b'generaldelta',
1313 default=False,
1313 default=False,
1314 experimental=True,
1314 experimental=True,
1315 )
1315 )
1316 coreconfigitem(
1316 coreconfigitem(
1317 b'format',
1317 b'format',
1318 b'manifestcachesize',
1318 b'manifestcachesize',
1319 default=None,
1319 default=None,
1320 experimental=True,
1320 experimental=True,
1321 )
1321 )
1322 coreconfigitem(
1322 coreconfigitem(
1323 b'format',
1323 b'format',
1324 b'maxchainlen',
1324 b'maxchainlen',
1325 default=dynamicdefault,
1325 default=dynamicdefault,
1326 experimental=True,
1326 experimental=True,
1327 )
1327 )
1328 coreconfigitem(
1328 coreconfigitem(
1329 b'format',
1329 b'format',
1330 b'obsstore-version',
1330 b'obsstore-version',
1331 default=None,
1331 default=None,
1332 )
1332 )
1333 coreconfigitem(
1333 coreconfigitem(
1334 b'format',
1334 b'format',
1335 b'sparse-revlog',
1335 b'sparse-revlog',
1336 default=True,
1336 default=True,
1337 )
1337 )
1338 coreconfigitem(
1338 coreconfigitem(
1339 b'format',
1339 b'format',
1340 b'revlog-compression',
1340 b'revlog-compression',
1341 default=lambda: [b'zstd', b'zlib'],
1341 default=lambda: [b'zstd', b'zlib'],
1342 alias=[(b'experimental', b'format.compression')],
1342 alias=[(b'experimental', b'format.compression')],
1343 )
1343 )
1344 # Experimental TODOs:
1345 #
1346 # * Same as for evlogv2 (but for the reduction of the number of files)
1347 # * drop the storage of the base
1348 # * Improvement to investigate
1349 # - storing .hgtags fnode
1350 # - storing `rank` of changesets
1351 # - storing branch related identifier
1352
1353 coreconfigitem(
1354 b'format',
1355 b'exp-use-changelog-v2',
1356 default=None,
1357 experimental=True,
1358 )
1344 coreconfigitem(
1359 coreconfigitem(
1345 b'format',
1360 b'format',
1346 b'usefncache',
1361 b'usefncache',
1347 default=True,
1362 default=True,
1348 )
1363 )
1349 coreconfigitem(
1364 coreconfigitem(
1350 b'format',
1365 b'format',
1351 b'usegeneraldelta',
1366 b'usegeneraldelta',
1352 default=True,
1367 default=True,
1353 )
1368 )
1354 coreconfigitem(
1369 coreconfigitem(
1355 b'format',
1370 b'format',
1356 b'usestore',
1371 b'usestore',
1357 default=True,
1372 default=True,
1358 )
1373 )
1359
1374
1360
1375
1361 def _persistent_nodemap_default():
1376 def _persistent_nodemap_default():
1362 """compute `use-persistent-nodemap` default value
1377 """compute `use-persistent-nodemap` default value
1363
1378
1364 The feature is disabled unless a fast implementation is available.
1379 The feature is disabled unless a fast implementation is available.
1365 """
1380 """
1366 from . import policy
1381 from . import policy
1367
1382
1368 return policy.importrust('revlog') is not None
1383 return policy.importrust('revlog') is not None
1369
1384
1370
1385
1371 coreconfigitem(
1386 coreconfigitem(
1372 b'format',
1387 b'format',
1373 b'use-persistent-nodemap',
1388 b'use-persistent-nodemap',
1374 default=_persistent_nodemap_default,
1389 default=_persistent_nodemap_default,
1375 )
1390 )
1376 coreconfigitem(
1391 coreconfigitem(
1377 b'format',
1392 b'format',
1378 b'exp-use-copies-side-data-changeset',
1393 b'exp-use-copies-side-data-changeset',
1379 default=False,
1394 default=False,
1380 experimental=True,
1395 experimental=True,
1381 )
1396 )
1382 coreconfigitem(
1397 coreconfigitem(
1383 b'format',
1398 b'format',
1384 b'use-share-safe',
1399 b'use-share-safe',
1385 default=False,
1400 default=False,
1386 )
1401 )
1387 coreconfigitem(
1402 coreconfigitem(
1388 b'format',
1403 b'format',
1389 b'internal-phase',
1404 b'internal-phase',
1390 default=False,
1405 default=False,
1391 experimental=True,
1406 experimental=True,
1392 )
1407 )
1393 coreconfigitem(
1408 coreconfigitem(
1394 b'fsmonitor',
1409 b'fsmonitor',
1395 b'warn_when_unused',
1410 b'warn_when_unused',
1396 default=True,
1411 default=True,
1397 )
1412 )
1398 coreconfigitem(
1413 coreconfigitem(
1399 b'fsmonitor',
1414 b'fsmonitor',
1400 b'warn_update_file_count',
1415 b'warn_update_file_count',
1401 default=50000,
1416 default=50000,
1402 )
1417 )
1403 coreconfigitem(
1418 coreconfigitem(
1404 b'fsmonitor',
1419 b'fsmonitor',
1405 b'warn_update_file_count_rust',
1420 b'warn_update_file_count_rust',
1406 default=400000,
1421 default=400000,
1407 )
1422 )
1408 coreconfigitem(
1423 coreconfigitem(
1409 b'help',
1424 b'help',
1410 br'hidden-command\..*',
1425 br'hidden-command\..*',
1411 default=False,
1426 default=False,
1412 generic=True,
1427 generic=True,
1413 )
1428 )
1414 coreconfigitem(
1429 coreconfigitem(
1415 b'help',
1430 b'help',
1416 br'hidden-topic\..*',
1431 br'hidden-topic\..*',
1417 default=False,
1432 default=False,
1418 generic=True,
1433 generic=True,
1419 )
1434 )
1420 coreconfigitem(
1435 coreconfigitem(
1421 b'hooks',
1436 b'hooks',
1422 b'[^:]*',
1437 b'[^:]*',
1423 default=dynamicdefault,
1438 default=dynamicdefault,
1424 generic=True,
1439 generic=True,
1425 )
1440 )
1426 coreconfigitem(
1441 coreconfigitem(
1427 b'hooks',
1442 b'hooks',
1428 b'.*:run-with-plain',
1443 b'.*:run-with-plain',
1429 default=True,
1444 default=True,
1430 generic=True,
1445 generic=True,
1431 )
1446 )
1432 coreconfigitem(
1447 coreconfigitem(
1433 b'hgweb-paths',
1448 b'hgweb-paths',
1434 b'.*',
1449 b'.*',
1435 default=list,
1450 default=list,
1436 generic=True,
1451 generic=True,
1437 )
1452 )
1438 coreconfigitem(
1453 coreconfigitem(
1439 b'hostfingerprints',
1454 b'hostfingerprints',
1440 b'.*',
1455 b'.*',
1441 default=list,
1456 default=list,
1442 generic=True,
1457 generic=True,
1443 )
1458 )
1444 coreconfigitem(
1459 coreconfigitem(
1445 b'hostsecurity',
1460 b'hostsecurity',
1446 b'ciphers',
1461 b'ciphers',
1447 default=None,
1462 default=None,
1448 )
1463 )
1449 coreconfigitem(
1464 coreconfigitem(
1450 b'hostsecurity',
1465 b'hostsecurity',
1451 b'minimumprotocol',
1466 b'minimumprotocol',
1452 default=dynamicdefault,
1467 default=dynamicdefault,
1453 )
1468 )
1454 coreconfigitem(
1469 coreconfigitem(
1455 b'hostsecurity',
1470 b'hostsecurity',
1456 b'.*:minimumprotocol$',
1471 b'.*:minimumprotocol$',
1457 default=dynamicdefault,
1472 default=dynamicdefault,
1458 generic=True,
1473 generic=True,
1459 )
1474 )
1460 coreconfigitem(
1475 coreconfigitem(
1461 b'hostsecurity',
1476 b'hostsecurity',
1462 b'.*:ciphers$',
1477 b'.*:ciphers$',
1463 default=dynamicdefault,
1478 default=dynamicdefault,
1464 generic=True,
1479 generic=True,
1465 )
1480 )
1466 coreconfigitem(
1481 coreconfigitem(
1467 b'hostsecurity',
1482 b'hostsecurity',
1468 b'.*:fingerprints$',
1483 b'.*:fingerprints$',
1469 default=list,
1484 default=list,
1470 generic=True,
1485 generic=True,
1471 )
1486 )
1472 coreconfigitem(
1487 coreconfigitem(
1473 b'hostsecurity',
1488 b'hostsecurity',
1474 b'.*:verifycertsfile$',
1489 b'.*:verifycertsfile$',
1475 default=None,
1490 default=None,
1476 generic=True,
1491 generic=True,
1477 )
1492 )
1478
1493
1479 coreconfigitem(
1494 coreconfigitem(
1480 b'http_proxy',
1495 b'http_proxy',
1481 b'always',
1496 b'always',
1482 default=False,
1497 default=False,
1483 )
1498 )
1484 coreconfigitem(
1499 coreconfigitem(
1485 b'http_proxy',
1500 b'http_proxy',
1486 b'host',
1501 b'host',
1487 default=None,
1502 default=None,
1488 )
1503 )
1489 coreconfigitem(
1504 coreconfigitem(
1490 b'http_proxy',
1505 b'http_proxy',
1491 b'no',
1506 b'no',
1492 default=list,
1507 default=list,
1493 )
1508 )
1494 coreconfigitem(
1509 coreconfigitem(
1495 b'http_proxy',
1510 b'http_proxy',
1496 b'passwd',
1511 b'passwd',
1497 default=None,
1512 default=None,
1498 )
1513 )
1499 coreconfigitem(
1514 coreconfigitem(
1500 b'http_proxy',
1515 b'http_proxy',
1501 b'user',
1516 b'user',
1502 default=None,
1517 default=None,
1503 )
1518 )
1504
1519
1505 coreconfigitem(
1520 coreconfigitem(
1506 b'http',
1521 b'http',
1507 b'timeout',
1522 b'timeout',
1508 default=None,
1523 default=None,
1509 )
1524 )
1510
1525
1511 coreconfigitem(
1526 coreconfigitem(
1512 b'logtoprocess',
1527 b'logtoprocess',
1513 b'commandexception',
1528 b'commandexception',
1514 default=None,
1529 default=None,
1515 )
1530 )
1516 coreconfigitem(
1531 coreconfigitem(
1517 b'logtoprocess',
1532 b'logtoprocess',
1518 b'commandfinish',
1533 b'commandfinish',
1519 default=None,
1534 default=None,
1520 )
1535 )
1521 coreconfigitem(
1536 coreconfigitem(
1522 b'logtoprocess',
1537 b'logtoprocess',
1523 b'command',
1538 b'command',
1524 default=None,
1539 default=None,
1525 )
1540 )
1526 coreconfigitem(
1541 coreconfigitem(
1527 b'logtoprocess',
1542 b'logtoprocess',
1528 b'develwarn',
1543 b'develwarn',
1529 default=None,
1544 default=None,
1530 )
1545 )
1531 coreconfigitem(
1546 coreconfigitem(
1532 b'logtoprocess',
1547 b'logtoprocess',
1533 b'uiblocked',
1548 b'uiblocked',
1534 default=None,
1549 default=None,
1535 )
1550 )
1536 coreconfigitem(
1551 coreconfigitem(
1537 b'merge',
1552 b'merge',
1538 b'checkunknown',
1553 b'checkunknown',
1539 default=b'abort',
1554 default=b'abort',
1540 )
1555 )
1541 coreconfigitem(
1556 coreconfigitem(
1542 b'merge',
1557 b'merge',
1543 b'checkignored',
1558 b'checkignored',
1544 default=b'abort',
1559 default=b'abort',
1545 )
1560 )
1546 coreconfigitem(
1561 coreconfigitem(
1547 b'experimental',
1562 b'experimental',
1548 b'merge.checkpathconflicts',
1563 b'merge.checkpathconflicts',
1549 default=False,
1564 default=False,
1550 )
1565 )
1551 coreconfigitem(
1566 coreconfigitem(
1552 b'merge',
1567 b'merge',
1553 b'followcopies',
1568 b'followcopies',
1554 default=True,
1569 default=True,
1555 )
1570 )
1556 coreconfigitem(
1571 coreconfigitem(
1557 b'merge',
1572 b'merge',
1558 b'on-failure',
1573 b'on-failure',
1559 default=b'continue',
1574 default=b'continue',
1560 )
1575 )
1561 coreconfigitem(
1576 coreconfigitem(
1562 b'merge',
1577 b'merge',
1563 b'preferancestor',
1578 b'preferancestor',
1564 default=lambda: [b'*'],
1579 default=lambda: [b'*'],
1565 experimental=True,
1580 experimental=True,
1566 )
1581 )
1567 coreconfigitem(
1582 coreconfigitem(
1568 b'merge',
1583 b'merge',
1569 b'strict-capability-check',
1584 b'strict-capability-check',
1570 default=False,
1585 default=False,
1571 )
1586 )
1572 coreconfigitem(
1587 coreconfigitem(
1573 b'merge-tools',
1588 b'merge-tools',
1574 b'.*',
1589 b'.*',
1575 default=None,
1590 default=None,
1576 generic=True,
1591 generic=True,
1577 )
1592 )
1578 coreconfigitem(
1593 coreconfigitem(
1579 b'merge-tools',
1594 b'merge-tools',
1580 br'.*\.args$',
1595 br'.*\.args$',
1581 default=b"$local $base $other",
1596 default=b"$local $base $other",
1582 generic=True,
1597 generic=True,
1583 priority=-1,
1598 priority=-1,
1584 )
1599 )
1585 coreconfigitem(
1600 coreconfigitem(
1586 b'merge-tools',
1601 b'merge-tools',
1587 br'.*\.binary$',
1602 br'.*\.binary$',
1588 default=False,
1603 default=False,
1589 generic=True,
1604 generic=True,
1590 priority=-1,
1605 priority=-1,
1591 )
1606 )
1592 coreconfigitem(
1607 coreconfigitem(
1593 b'merge-tools',
1608 b'merge-tools',
1594 br'.*\.check$',
1609 br'.*\.check$',
1595 default=list,
1610 default=list,
1596 generic=True,
1611 generic=True,
1597 priority=-1,
1612 priority=-1,
1598 )
1613 )
1599 coreconfigitem(
1614 coreconfigitem(
1600 b'merge-tools',
1615 b'merge-tools',
1601 br'.*\.checkchanged$',
1616 br'.*\.checkchanged$',
1602 default=False,
1617 default=False,
1603 generic=True,
1618 generic=True,
1604 priority=-1,
1619 priority=-1,
1605 )
1620 )
1606 coreconfigitem(
1621 coreconfigitem(
1607 b'merge-tools',
1622 b'merge-tools',
1608 br'.*\.executable$',
1623 br'.*\.executable$',
1609 default=dynamicdefault,
1624 default=dynamicdefault,
1610 generic=True,
1625 generic=True,
1611 priority=-1,
1626 priority=-1,
1612 )
1627 )
1613 coreconfigitem(
1628 coreconfigitem(
1614 b'merge-tools',
1629 b'merge-tools',
1615 br'.*\.fixeol$',
1630 br'.*\.fixeol$',
1616 default=False,
1631 default=False,
1617 generic=True,
1632 generic=True,
1618 priority=-1,
1633 priority=-1,
1619 )
1634 )
1620 coreconfigitem(
1635 coreconfigitem(
1621 b'merge-tools',
1636 b'merge-tools',
1622 br'.*\.gui$',
1637 br'.*\.gui$',
1623 default=False,
1638 default=False,
1624 generic=True,
1639 generic=True,
1625 priority=-1,
1640 priority=-1,
1626 )
1641 )
1627 coreconfigitem(
1642 coreconfigitem(
1628 b'merge-tools',
1643 b'merge-tools',
1629 br'.*\.mergemarkers$',
1644 br'.*\.mergemarkers$',
1630 default=b'basic',
1645 default=b'basic',
1631 generic=True,
1646 generic=True,
1632 priority=-1,
1647 priority=-1,
1633 )
1648 )
1634 coreconfigitem(
1649 coreconfigitem(
1635 b'merge-tools',
1650 b'merge-tools',
1636 br'.*\.mergemarkertemplate$',
1651 br'.*\.mergemarkertemplate$',
1637 default=dynamicdefault, # take from command-templates.mergemarker
1652 default=dynamicdefault, # take from command-templates.mergemarker
1638 generic=True,
1653 generic=True,
1639 priority=-1,
1654 priority=-1,
1640 )
1655 )
1641 coreconfigitem(
1656 coreconfigitem(
1642 b'merge-tools',
1657 b'merge-tools',
1643 br'.*\.priority$',
1658 br'.*\.priority$',
1644 default=0,
1659 default=0,
1645 generic=True,
1660 generic=True,
1646 priority=-1,
1661 priority=-1,
1647 )
1662 )
1648 coreconfigitem(
1663 coreconfigitem(
1649 b'merge-tools',
1664 b'merge-tools',
1650 br'.*\.premerge$',
1665 br'.*\.premerge$',
1651 default=dynamicdefault,
1666 default=dynamicdefault,
1652 generic=True,
1667 generic=True,
1653 priority=-1,
1668 priority=-1,
1654 )
1669 )
1655 coreconfigitem(
1670 coreconfigitem(
1656 b'merge-tools',
1671 b'merge-tools',
1657 br'.*\.symlink$',
1672 br'.*\.symlink$',
1658 default=False,
1673 default=False,
1659 generic=True,
1674 generic=True,
1660 priority=-1,
1675 priority=-1,
1661 )
1676 )
1662 coreconfigitem(
1677 coreconfigitem(
1663 b'pager',
1678 b'pager',
1664 b'attend-.*',
1679 b'attend-.*',
1665 default=dynamicdefault,
1680 default=dynamicdefault,
1666 generic=True,
1681 generic=True,
1667 )
1682 )
1668 coreconfigitem(
1683 coreconfigitem(
1669 b'pager',
1684 b'pager',
1670 b'ignore',
1685 b'ignore',
1671 default=list,
1686 default=list,
1672 )
1687 )
1673 coreconfigitem(
1688 coreconfigitem(
1674 b'pager',
1689 b'pager',
1675 b'pager',
1690 b'pager',
1676 default=dynamicdefault,
1691 default=dynamicdefault,
1677 )
1692 )
1678 coreconfigitem(
1693 coreconfigitem(
1679 b'patch',
1694 b'patch',
1680 b'eol',
1695 b'eol',
1681 default=b'strict',
1696 default=b'strict',
1682 )
1697 )
1683 coreconfigitem(
1698 coreconfigitem(
1684 b'patch',
1699 b'patch',
1685 b'fuzz',
1700 b'fuzz',
1686 default=2,
1701 default=2,
1687 )
1702 )
1688 coreconfigitem(
1703 coreconfigitem(
1689 b'paths',
1704 b'paths',
1690 b'default',
1705 b'default',
1691 default=None,
1706 default=None,
1692 )
1707 )
1693 coreconfigitem(
1708 coreconfigitem(
1694 b'paths',
1709 b'paths',
1695 b'default-push',
1710 b'default-push',
1696 default=None,
1711 default=None,
1697 )
1712 )
1698 coreconfigitem(
1713 coreconfigitem(
1699 b'paths',
1714 b'paths',
1700 b'.*',
1715 b'.*',
1701 default=None,
1716 default=None,
1702 generic=True,
1717 generic=True,
1703 )
1718 )
1704 coreconfigitem(
1719 coreconfigitem(
1705 b'phases',
1720 b'phases',
1706 b'checksubrepos',
1721 b'checksubrepos',
1707 default=b'follow',
1722 default=b'follow',
1708 )
1723 )
1709 coreconfigitem(
1724 coreconfigitem(
1710 b'phases',
1725 b'phases',
1711 b'new-commit',
1726 b'new-commit',
1712 default=b'draft',
1727 default=b'draft',
1713 )
1728 )
1714 coreconfigitem(
1729 coreconfigitem(
1715 b'phases',
1730 b'phases',
1716 b'publish',
1731 b'publish',
1717 default=True,
1732 default=True,
1718 )
1733 )
1719 coreconfigitem(
1734 coreconfigitem(
1720 b'profiling',
1735 b'profiling',
1721 b'enabled',
1736 b'enabled',
1722 default=False,
1737 default=False,
1723 )
1738 )
1724 coreconfigitem(
1739 coreconfigitem(
1725 b'profiling',
1740 b'profiling',
1726 b'format',
1741 b'format',
1727 default=b'text',
1742 default=b'text',
1728 )
1743 )
1729 coreconfigitem(
1744 coreconfigitem(
1730 b'profiling',
1745 b'profiling',
1731 b'freq',
1746 b'freq',
1732 default=1000,
1747 default=1000,
1733 )
1748 )
1734 coreconfigitem(
1749 coreconfigitem(
1735 b'profiling',
1750 b'profiling',
1736 b'limit',
1751 b'limit',
1737 default=30,
1752 default=30,
1738 )
1753 )
1739 coreconfigitem(
1754 coreconfigitem(
1740 b'profiling',
1755 b'profiling',
1741 b'nested',
1756 b'nested',
1742 default=0,
1757 default=0,
1743 )
1758 )
1744 coreconfigitem(
1759 coreconfigitem(
1745 b'profiling',
1760 b'profiling',
1746 b'output',
1761 b'output',
1747 default=None,
1762 default=None,
1748 )
1763 )
1749 coreconfigitem(
1764 coreconfigitem(
1750 b'profiling',
1765 b'profiling',
1751 b'showmax',
1766 b'showmax',
1752 default=0.999,
1767 default=0.999,
1753 )
1768 )
1754 coreconfigitem(
1769 coreconfigitem(
1755 b'profiling',
1770 b'profiling',
1756 b'showmin',
1771 b'showmin',
1757 default=dynamicdefault,
1772 default=dynamicdefault,
1758 )
1773 )
1759 coreconfigitem(
1774 coreconfigitem(
1760 b'profiling',
1775 b'profiling',
1761 b'showtime',
1776 b'showtime',
1762 default=True,
1777 default=True,
1763 )
1778 )
1764 coreconfigitem(
1779 coreconfigitem(
1765 b'profiling',
1780 b'profiling',
1766 b'sort',
1781 b'sort',
1767 default=b'inlinetime',
1782 default=b'inlinetime',
1768 )
1783 )
1769 coreconfigitem(
1784 coreconfigitem(
1770 b'profiling',
1785 b'profiling',
1771 b'statformat',
1786 b'statformat',
1772 default=b'hotpath',
1787 default=b'hotpath',
1773 )
1788 )
1774 coreconfigitem(
1789 coreconfigitem(
1775 b'profiling',
1790 b'profiling',
1776 b'time-track',
1791 b'time-track',
1777 default=dynamicdefault,
1792 default=dynamicdefault,
1778 )
1793 )
1779 coreconfigitem(
1794 coreconfigitem(
1780 b'profiling',
1795 b'profiling',
1781 b'type',
1796 b'type',
1782 default=b'stat',
1797 default=b'stat',
1783 )
1798 )
1784 coreconfigitem(
1799 coreconfigitem(
1785 b'progress',
1800 b'progress',
1786 b'assume-tty',
1801 b'assume-tty',
1787 default=False,
1802 default=False,
1788 )
1803 )
1789 coreconfigitem(
1804 coreconfigitem(
1790 b'progress',
1805 b'progress',
1791 b'changedelay',
1806 b'changedelay',
1792 default=1,
1807 default=1,
1793 )
1808 )
1794 coreconfigitem(
1809 coreconfigitem(
1795 b'progress',
1810 b'progress',
1796 b'clear-complete',
1811 b'clear-complete',
1797 default=True,
1812 default=True,
1798 )
1813 )
1799 coreconfigitem(
1814 coreconfigitem(
1800 b'progress',
1815 b'progress',
1801 b'debug',
1816 b'debug',
1802 default=False,
1817 default=False,
1803 )
1818 )
1804 coreconfigitem(
1819 coreconfigitem(
1805 b'progress',
1820 b'progress',
1806 b'delay',
1821 b'delay',
1807 default=3,
1822 default=3,
1808 )
1823 )
1809 coreconfigitem(
1824 coreconfigitem(
1810 b'progress',
1825 b'progress',
1811 b'disable',
1826 b'disable',
1812 default=False,
1827 default=False,
1813 )
1828 )
1814 coreconfigitem(
1829 coreconfigitem(
1815 b'progress',
1830 b'progress',
1816 b'estimateinterval',
1831 b'estimateinterval',
1817 default=60.0,
1832 default=60.0,
1818 )
1833 )
1819 coreconfigitem(
1834 coreconfigitem(
1820 b'progress',
1835 b'progress',
1821 b'format',
1836 b'format',
1822 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1837 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1823 )
1838 )
1824 coreconfigitem(
1839 coreconfigitem(
1825 b'progress',
1840 b'progress',
1826 b'refresh',
1841 b'refresh',
1827 default=0.1,
1842 default=0.1,
1828 )
1843 )
1829 coreconfigitem(
1844 coreconfigitem(
1830 b'progress',
1845 b'progress',
1831 b'width',
1846 b'width',
1832 default=dynamicdefault,
1847 default=dynamicdefault,
1833 )
1848 )
1834 coreconfigitem(
1849 coreconfigitem(
1835 b'pull',
1850 b'pull',
1836 b'confirm',
1851 b'confirm',
1837 default=False,
1852 default=False,
1838 )
1853 )
1839 coreconfigitem(
1854 coreconfigitem(
1840 b'push',
1855 b'push',
1841 b'pushvars.server',
1856 b'pushvars.server',
1842 default=False,
1857 default=False,
1843 )
1858 )
1844 coreconfigitem(
1859 coreconfigitem(
1845 b'rewrite',
1860 b'rewrite',
1846 b'backup-bundle',
1861 b'backup-bundle',
1847 default=True,
1862 default=True,
1848 alias=[(b'ui', b'history-editing-backup')],
1863 alias=[(b'ui', b'history-editing-backup')],
1849 )
1864 )
1850 coreconfigitem(
1865 coreconfigitem(
1851 b'rewrite',
1866 b'rewrite',
1852 b'update-timestamp',
1867 b'update-timestamp',
1853 default=False,
1868 default=False,
1854 )
1869 )
1855 coreconfigitem(
1870 coreconfigitem(
1856 b'rewrite',
1871 b'rewrite',
1857 b'empty-successor',
1872 b'empty-successor',
1858 default=b'skip',
1873 default=b'skip',
1859 experimental=True,
1874 experimental=True,
1860 )
1875 )
1861 coreconfigitem(
1876 coreconfigitem(
1862 b'storage',
1877 b'storage',
1863 b'new-repo-backend',
1878 b'new-repo-backend',
1864 default=b'revlogv1',
1879 default=b'revlogv1',
1865 experimental=True,
1880 experimental=True,
1866 )
1881 )
1867 coreconfigitem(
1882 coreconfigitem(
1868 b'storage',
1883 b'storage',
1869 b'revlog.optimize-delta-parent-choice',
1884 b'revlog.optimize-delta-parent-choice',
1870 default=True,
1885 default=True,
1871 alias=[(b'format', b'aggressivemergedeltas')],
1886 alias=[(b'format', b'aggressivemergedeltas')],
1872 )
1887 )
1873 # experimental as long as rust is experimental (or a C version is implemented)
1888 # experimental as long as rust is experimental (or a C version is implemented)
1874 coreconfigitem(
1889 coreconfigitem(
1875 b'storage',
1890 b'storage',
1876 b'revlog.persistent-nodemap.mmap',
1891 b'revlog.persistent-nodemap.mmap',
1877 default=True,
1892 default=True,
1878 )
1893 )
1879 # experimental as long as format.use-persistent-nodemap is.
1894 # experimental as long as format.use-persistent-nodemap is.
1880 coreconfigitem(
1895 coreconfigitem(
1881 b'storage',
1896 b'storage',
1882 b'revlog.persistent-nodemap.slow-path',
1897 b'revlog.persistent-nodemap.slow-path',
1883 default=b"abort",
1898 default=b"abort",
1884 )
1899 )
1885
1900
1886 coreconfigitem(
1901 coreconfigitem(
1887 b'storage',
1902 b'storage',
1888 b'revlog.reuse-external-delta',
1903 b'revlog.reuse-external-delta',
1889 default=True,
1904 default=True,
1890 )
1905 )
1891 coreconfigitem(
1906 coreconfigitem(
1892 b'storage',
1907 b'storage',
1893 b'revlog.reuse-external-delta-parent',
1908 b'revlog.reuse-external-delta-parent',
1894 default=None,
1909 default=None,
1895 )
1910 )
1896 coreconfigitem(
1911 coreconfigitem(
1897 b'storage',
1912 b'storage',
1898 b'revlog.zlib.level',
1913 b'revlog.zlib.level',
1899 default=None,
1914 default=None,
1900 )
1915 )
1901 coreconfigitem(
1916 coreconfigitem(
1902 b'storage',
1917 b'storage',
1903 b'revlog.zstd.level',
1918 b'revlog.zstd.level',
1904 default=None,
1919 default=None,
1905 )
1920 )
1906 coreconfigitem(
1921 coreconfigitem(
1907 b'server',
1922 b'server',
1908 b'bookmarks-pushkey-compat',
1923 b'bookmarks-pushkey-compat',
1909 default=True,
1924 default=True,
1910 )
1925 )
1911 coreconfigitem(
1926 coreconfigitem(
1912 b'server',
1927 b'server',
1913 b'bundle1',
1928 b'bundle1',
1914 default=True,
1929 default=True,
1915 )
1930 )
1916 coreconfigitem(
1931 coreconfigitem(
1917 b'server',
1932 b'server',
1918 b'bundle1gd',
1933 b'bundle1gd',
1919 default=None,
1934 default=None,
1920 )
1935 )
1921 coreconfigitem(
1936 coreconfigitem(
1922 b'server',
1937 b'server',
1923 b'bundle1.pull',
1938 b'bundle1.pull',
1924 default=None,
1939 default=None,
1925 )
1940 )
1926 coreconfigitem(
1941 coreconfigitem(
1927 b'server',
1942 b'server',
1928 b'bundle1gd.pull',
1943 b'bundle1gd.pull',
1929 default=None,
1944 default=None,
1930 )
1945 )
1931 coreconfigitem(
1946 coreconfigitem(
1932 b'server',
1947 b'server',
1933 b'bundle1.push',
1948 b'bundle1.push',
1934 default=None,
1949 default=None,
1935 )
1950 )
1936 coreconfigitem(
1951 coreconfigitem(
1937 b'server',
1952 b'server',
1938 b'bundle1gd.push',
1953 b'bundle1gd.push',
1939 default=None,
1954 default=None,
1940 )
1955 )
1941 coreconfigitem(
1956 coreconfigitem(
1942 b'server',
1957 b'server',
1943 b'bundle2.stream',
1958 b'bundle2.stream',
1944 default=True,
1959 default=True,
1945 alias=[(b'experimental', b'bundle2.stream')],
1960 alias=[(b'experimental', b'bundle2.stream')],
1946 )
1961 )
1947 coreconfigitem(
1962 coreconfigitem(
1948 b'server',
1963 b'server',
1949 b'compressionengines',
1964 b'compressionengines',
1950 default=list,
1965 default=list,
1951 )
1966 )
1952 coreconfigitem(
1967 coreconfigitem(
1953 b'server',
1968 b'server',
1954 b'concurrent-push-mode',
1969 b'concurrent-push-mode',
1955 default=b'check-related',
1970 default=b'check-related',
1956 )
1971 )
1957 coreconfigitem(
1972 coreconfigitem(
1958 b'server',
1973 b'server',
1959 b'disablefullbundle',
1974 b'disablefullbundle',
1960 default=False,
1975 default=False,
1961 )
1976 )
1962 coreconfigitem(
1977 coreconfigitem(
1963 b'server',
1978 b'server',
1964 b'maxhttpheaderlen',
1979 b'maxhttpheaderlen',
1965 default=1024,
1980 default=1024,
1966 )
1981 )
1967 coreconfigitem(
1982 coreconfigitem(
1968 b'server',
1983 b'server',
1969 b'pullbundle',
1984 b'pullbundle',
1970 default=False,
1985 default=False,
1971 )
1986 )
1972 coreconfigitem(
1987 coreconfigitem(
1973 b'server',
1988 b'server',
1974 b'preferuncompressed',
1989 b'preferuncompressed',
1975 default=False,
1990 default=False,
1976 )
1991 )
1977 coreconfigitem(
1992 coreconfigitem(
1978 b'server',
1993 b'server',
1979 b'streamunbundle',
1994 b'streamunbundle',
1980 default=False,
1995 default=False,
1981 )
1996 )
1982 coreconfigitem(
1997 coreconfigitem(
1983 b'server',
1998 b'server',
1984 b'uncompressed',
1999 b'uncompressed',
1985 default=True,
2000 default=True,
1986 )
2001 )
1987 coreconfigitem(
2002 coreconfigitem(
1988 b'server',
2003 b'server',
1989 b'uncompressedallowsecret',
2004 b'uncompressedallowsecret',
1990 default=False,
2005 default=False,
1991 )
2006 )
1992 coreconfigitem(
2007 coreconfigitem(
1993 b'server',
2008 b'server',
1994 b'view',
2009 b'view',
1995 default=b'served',
2010 default=b'served',
1996 )
2011 )
1997 coreconfigitem(
2012 coreconfigitem(
1998 b'server',
2013 b'server',
1999 b'validate',
2014 b'validate',
2000 default=False,
2015 default=False,
2001 )
2016 )
2002 coreconfigitem(
2017 coreconfigitem(
2003 b'server',
2018 b'server',
2004 b'zliblevel',
2019 b'zliblevel',
2005 default=-1,
2020 default=-1,
2006 )
2021 )
2007 coreconfigitem(
2022 coreconfigitem(
2008 b'server',
2023 b'server',
2009 b'zstdlevel',
2024 b'zstdlevel',
2010 default=3,
2025 default=3,
2011 )
2026 )
2012 coreconfigitem(
2027 coreconfigitem(
2013 b'share',
2028 b'share',
2014 b'pool',
2029 b'pool',
2015 default=None,
2030 default=None,
2016 )
2031 )
2017 coreconfigitem(
2032 coreconfigitem(
2018 b'share',
2033 b'share',
2019 b'poolnaming',
2034 b'poolnaming',
2020 default=b'identity',
2035 default=b'identity',
2021 )
2036 )
2022 coreconfigitem(
2037 coreconfigitem(
2023 b'share',
2038 b'share',
2024 b'safe-mismatch.source-not-safe',
2039 b'safe-mismatch.source-not-safe',
2025 default=b'abort',
2040 default=b'abort',
2026 )
2041 )
2027 coreconfigitem(
2042 coreconfigitem(
2028 b'share',
2043 b'share',
2029 b'safe-mismatch.source-safe',
2044 b'safe-mismatch.source-safe',
2030 default=b'abort',
2045 default=b'abort',
2031 )
2046 )
2032 coreconfigitem(
2047 coreconfigitem(
2033 b'share',
2048 b'share',
2034 b'safe-mismatch.source-not-safe.warn',
2049 b'safe-mismatch.source-not-safe.warn',
2035 default=True,
2050 default=True,
2036 )
2051 )
2037 coreconfigitem(
2052 coreconfigitem(
2038 b'share',
2053 b'share',
2039 b'safe-mismatch.source-safe.warn',
2054 b'safe-mismatch.source-safe.warn',
2040 default=True,
2055 default=True,
2041 )
2056 )
2042 coreconfigitem(
2057 coreconfigitem(
2043 b'shelve',
2058 b'shelve',
2044 b'maxbackups',
2059 b'maxbackups',
2045 default=10,
2060 default=10,
2046 )
2061 )
2047 coreconfigitem(
2062 coreconfigitem(
2048 b'smtp',
2063 b'smtp',
2049 b'host',
2064 b'host',
2050 default=None,
2065 default=None,
2051 )
2066 )
2052 coreconfigitem(
2067 coreconfigitem(
2053 b'smtp',
2068 b'smtp',
2054 b'local_hostname',
2069 b'local_hostname',
2055 default=None,
2070 default=None,
2056 )
2071 )
2057 coreconfigitem(
2072 coreconfigitem(
2058 b'smtp',
2073 b'smtp',
2059 b'password',
2074 b'password',
2060 default=None,
2075 default=None,
2061 )
2076 )
2062 coreconfigitem(
2077 coreconfigitem(
2063 b'smtp',
2078 b'smtp',
2064 b'port',
2079 b'port',
2065 default=dynamicdefault,
2080 default=dynamicdefault,
2066 )
2081 )
2067 coreconfigitem(
2082 coreconfigitem(
2068 b'smtp',
2083 b'smtp',
2069 b'tls',
2084 b'tls',
2070 default=b'none',
2085 default=b'none',
2071 )
2086 )
2072 coreconfigitem(
2087 coreconfigitem(
2073 b'smtp',
2088 b'smtp',
2074 b'username',
2089 b'username',
2075 default=None,
2090 default=None,
2076 )
2091 )
2077 coreconfigitem(
2092 coreconfigitem(
2078 b'sparse',
2093 b'sparse',
2079 b'missingwarning',
2094 b'missingwarning',
2080 default=True,
2095 default=True,
2081 experimental=True,
2096 experimental=True,
2082 )
2097 )
2083 coreconfigitem(
2098 coreconfigitem(
2084 b'subrepos',
2099 b'subrepos',
2085 b'allowed',
2100 b'allowed',
2086 default=dynamicdefault, # to make backporting simpler
2101 default=dynamicdefault, # to make backporting simpler
2087 )
2102 )
2088 coreconfigitem(
2103 coreconfigitem(
2089 b'subrepos',
2104 b'subrepos',
2090 b'hg:allowed',
2105 b'hg:allowed',
2091 default=dynamicdefault,
2106 default=dynamicdefault,
2092 )
2107 )
2093 coreconfigitem(
2108 coreconfigitem(
2094 b'subrepos',
2109 b'subrepos',
2095 b'git:allowed',
2110 b'git:allowed',
2096 default=dynamicdefault,
2111 default=dynamicdefault,
2097 )
2112 )
2098 coreconfigitem(
2113 coreconfigitem(
2099 b'subrepos',
2114 b'subrepos',
2100 b'svn:allowed',
2115 b'svn:allowed',
2101 default=dynamicdefault,
2116 default=dynamicdefault,
2102 )
2117 )
2103 coreconfigitem(
2118 coreconfigitem(
2104 b'templates',
2119 b'templates',
2105 b'.*',
2120 b'.*',
2106 default=None,
2121 default=None,
2107 generic=True,
2122 generic=True,
2108 )
2123 )
2109 coreconfigitem(
2124 coreconfigitem(
2110 b'templateconfig',
2125 b'templateconfig',
2111 b'.*',
2126 b'.*',
2112 default=dynamicdefault,
2127 default=dynamicdefault,
2113 generic=True,
2128 generic=True,
2114 )
2129 )
2115 coreconfigitem(
2130 coreconfigitem(
2116 b'trusted',
2131 b'trusted',
2117 b'groups',
2132 b'groups',
2118 default=list,
2133 default=list,
2119 )
2134 )
2120 coreconfigitem(
2135 coreconfigitem(
2121 b'trusted',
2136 b'trusted',
2122 b'users',
2137 b'users',
2123 default=list,
2138 default=list,
2124 )
2139 )
2125 coreconfigitem(
2140 coreconfigitem(
2126 b'ui',
2141 b'ui',
2127 b'_usedassubrepo',
2142 b'_usedassubrepo',
2128 default=False,
2143 default=False,
2129 )
2144 )
2130 coreconfigitem(
2145 coreconfigitem(
2131 b'ui',
2146 b'ui',
2132 b'allowemptycommit',
2147 b'allowemptycommit',
2133 default=False,
2148 default=False,
2134 )
2149 )
2135 coreconfigitem(
2150 coreconfigitem(
2136 b'ui',
2151 b'ui',
2137 b'archivemeta',
2152 b'archivemeta',
2138 default=True,
2153 default=True,
2139 )
2154 )
2140 coreconfigitem(
2155 coreconfigitem(
2141 b'ui',
2156 b'ui',
2142 b'askusername',
2157 b'askusername',
2143 default=False,
2158 default=False,
2144 )
2159 )
2145 coreconfigitem(
2160 coreconfigitem(
2146 b'ui',
2161 b'ui',
2147 b'available-memory',
2162 b'available-memory',
2148 default=None,
2163 default=None,
2149 )
2164 )
2150
2165
2151 coreconfigitem(
2166 coreconfigitem(
2152 b'ui',
2167 b'ui',
2153 b'clonebundlefallback',
2168 b'clonebundlefallback',
2154 default=False,
2169 default=False,
2155 )
2170 )
2156 coreconfigitem(
2171 coreconfigitem(
2157 b'ui',
2172 b'ui',
2158 b'clonebundleprefers',
2173 b'clonebundleprefers',
2159 default=list,
2174 default=list,
2160 )
2175 )
2161 coreconfigitem(
2176 coreconfigitem(
2162 b'ui',
2177 b'ui',
2163 b'clonebundles',
2178 b'clonebundles',
2164 default=True,
2179 default=True,
2165 )
2180 )
2166 coreconfigitem(
2181 coreconfigitem(
2167 b'ui',
2182 b'ui',
2168 b'color',
2183 b'color',
2169 default=b'auto',
2184 default=b'auto',
2170 )
2185 )
2171 coreconfigitem(
2186 coreconfigitem(
2172 b'ui',
2187 b'ui',
2173 b'commitsubrepos',
2188 b'commitsubrepos',
2174 default=False,
2189 default=False,
2175 )
2190 )
2176 coreconfigitem(
2191 coreconfigitem(
2177 b'ui',
2192 b'ui',
2178 b'debug',
2193 b'debug',
2179 default=False,
2194 default=False,
2180 )
2195 )
2181 coreconfigitem(
2196 coreconfigitem(
2182 b'ui',
2197 b'ui',
2183 b'debugger',
2198 b'debugger',
2184 default=None,
2199 default=None,
2185 )
2200 )
2186 coreconfigitem(
2201 coreconfigitem(
2187 b'ui',
2202 b'ui',
2188 b'editor',
2203 b'editor',
2189 default=dynamicdefault,
2204 default=dynamicdefault,
2190 )
2205 )
2191 coreconfigitem(
2206 coreconfigitem(
2192 b'ui',
2207 b'ui',
2193 b'detailed-exit-code',
2208 b'detailed-exit-code',
2194 default=False,
2209 default=False,
2195 experimental=True,
2210 experimental=True,
2196 )
2211 )
2197 coreconfigitem(
2212 coreconfigitem(
2198 b'ui',
2213 b'ui',
2199 b'fallbackencoding',
2214 b'fallbackencoding',
2200 default=None,
2215 default=None,
2201 )
2216 )
2202 coreconfigitem(
2217 coreconfigitem(
2203 b'ui',
2218 b'ui',
2204 b'forcecwd',
2219 b'forcecwd',
2205 default=None,
2220 default=None,
2206 )
2221 )
2207 coreconfigitem(
2222 coreconfigitem(
2208 b'ui',
2223 b'ui',
2209 b'forcemerge',
2224 b'forcemerge',
2210 default=None,
2225 default=None,
2211 )
2226 )
2212 coreconfigitem(
2227 coreconfigitem(
2213 b'ui',
2228 b'ui',
2214 b'formatdebug',
2229 b'formatdebug',
2215 default=False,
2230 default=False,
2216 )
2231 )
2217 coreconfigitem(
2232 coreconfigitem(
2218 b'ui',
2233 b'ui',
2219 b'formatjson',
2234 b'formatjson',
2220 default=False,
2235 default=False,
2221 )
2236 )
2222 coreconfigitem(
2237 coreconfigitem(
2223 b'ui',
2238 b'ui',
2224 b'formatted',
2239 b'formatted',
2225 default=None,
2240 default=None,
2226 )
2241 )
2227 coreconfigitem(
2242 coreconfigitem(
2228 b'ui',
2243 b'ui',
2229 b'interactive',
2244 b'interactive',
2230 default=None,
2245 default=None,
2231 )
2246 )
2232 coreconfigitem(
2247 coreconfigitem(
2233 b'ui',
2248 b'ui',
2234 b'interface',
2249 b'interface',
2235 default=None,
2250 default=None,
2236 )
2251 )
2237 coreconfigitem(
2252 coreconfigitem(
2238 b'ui',
2253 b'ui',
2239 b'interface.chunkselector',
2254 b'interface.chunkselector',
2240 default=None,
2255 default=None,
2241 )
2256 )
2242 coreconfigitem(
2257 coreconfigitem(
2243 b'ui',
2258 b'ui',
2244 b'large-file-limit',
2259 b'large-file-limit',
2245 default=10000000,
2260 default=10000000,
2246 )
2261 )
2247 coreconfigitem(
2262 coreconfigitem(
2248 b'ui',
2263 b'ui',
2249 b'logblockedtimes',
2264 b'logblockedtimes',
2250 default=False,
2265 default=False,
2251 )
2266 )
2252 coreconfigitem(
2267 coreconfigitem(
2253 b'ui',
2268 b'ui',
2254 b'merge',
2269 b'merge',
2255 default=None,
2270 default=None,
2256 )
2271 )
2257 coreconfigitem(
2272 coreconfigitem(
2258 b'ui',
2273 b'ui',
2259 b'mergemarkers',
2274 b'mergemarkers',
2260 default=b'basic',
2275 default=b'basic',
2261 )
2276 )
2262 coreconfigitem(
2277 coreconfigitem(
2263 b'ui',
2278 b'ui',
2264 b'message-output',
2279 b'message-output',
2265 default=b'stdio',
2280 default=b'stdio',
2266 )
2281 )
2267 coreconfigitem(
2282 coreconfigitem(
2268 b'ui',
2283 b'ui',
2269 b'nontty',
2284 b'nontty',
2270 default=False,
2285 default=False,
2271 )
2286 )
2272 coreconfigitem(
2287 coreconfigitem(
2273 b'ui',
2288 b'ui',
2274 b'origbackuppath',
2289 b'origbackuppath',
2275 default=None,
2290 default=None,
2276 )
2291 )
2277 coreconfigitem(
2292 coreconfigitem(
2278 b'ui',
2293 b'ui',
2279 b'paginate',
2294 b'paginate',
2280 default=True,
2295 default=True,
2281 )
2296 )
2282 coreconfigitem(
2297 coreconfigitem(
2283 b'ui',
2298 b'ui',
2284 b'patch',
2299 b'patch',
2285 default=None,
2300 default=None,
2286 )
2301 )
2287 coreconfigitem(
2302 coreconfigitem(
2288 b'ui',
2303 b'ui',
2289 b'portablefilenames',
2304 b'portablefilenames',
2290 default=b'warn',
2305 default=b'warn',
2291 )
2306 )
2292 coreconfigitem(
2307 coreconfigitem(
2293 b'ui',
2308 b'ui',
2294 b'promptecho',
2309 b'promptecho',
2295 default=False,
2310 default=False,
2296 )
2311 )
2297 coreconfigitem(
2312 coreconfigitem(
2298 b'ui',
2313 b'ui',
2299 b'quiet',
2314 b'quiet',
2300 default=False,
2315 default=False,
2301 )
2316 )
2302 coreconfigitem(
2317 coreconfigitem(
2303 b'ui',
2318 b'ui',
2304 b'quietbookmarkmove',
2319 b'quietbookmarkmove',
2305 default=False,
2320 default=False,
2306 )
2321 )
2307 coreconfigitem(
2322 coreconfigitem(
2308 b'ui',
2323 b'ui',
2309 b'relative-paths',
2324 b'relative-paths',
2310 default=b'legacy',
2325 default=b'legacy',
2311 )
2326 )
2312 coreconfigitem(
2327 coreconfigitem(
2313 b'ui',
2328 b'ui',
2314 b'remotecmd',
2329 b'remotecmd',
2315 default=b'hg',
2330 default=b'hg',
2316 )
2331 )
2317 coreconfigitem(
2332 coreconfigitem(
2318 b'ui',
2333 b'ui',
2319 b'report_untrusted',
2334 b'report_untrusted',
2320 default=True,
2335 default=True,
2321 )
2336 )
2322 coreconfigitem(
2337 coreconfigitem(
2323 b'ui',
2338 b'ui',
2324 b'rollback',
2339 b'rollback',
2325 default=True,
2340 default=True,
2326 )
2341 )
2327 coreconfigitem(
2342 coreconfigitem(
2328 b'ui',
2343 b'ui',
2329 b'signal-safe-lock',
2344 b'signal-safe-lock',
2330 default=True,
2345 default=True,
2331 )
2346 )
2332 coreconfigitem(
2347 coreconfigitem(
2333 b'ui',
2348 b'ui',
2334 b'slash',
2349 b'slash',
2335 default=False,
2350 default=False,
2336 )
2351 )
2337 coreconfigitem(
2352 coreconfigitem(
2338 b'ui',
2353 b'ui',
2339 b'ssh',
2354 b'ssh',
2340 default=b'ssh',
2355 default=b'ssh',
2341 )
2356 )
2342 coreconfigitem(
2357 coreconfigitem(
2343 b'ui',
2358 b'ui',
2344 b'ssherrorhint',
2359 b'ssherrorhint',
2345 default=None,
2360 default=None,
2346 )
2361 )
2347 coreconfigitem(
2362 coreconfigitem(
2348 b'ui',
2363 b'ui',
2349 b'statuscopies',
2364 b'statuscopies',
2350 default=False,
2365 default=False,
2351 )
2366 )
2352 coreconfigitem(
2367 coreconfigitem(
2353 b'ui',
2368 b'ui',
2354 b'strict',
2369 b'strict',
2355 default=False,
2370 default=False,
2356 )
2371 )
2357 coreconfigitem(
2372 coreconfigitem(
2358 b'ui',
2373 b'ui',
2359 b'style',
2374 b'style',
2360 default=b'',
2375 default=b'',
2361 )
2376 )
2362 coreconfigitem(
2377 coreconfigitem(
2363 b'ui',
2378 b'ui',
2364 b'supportcontact',
2379 b'supportcontact',
2365 default=None,
2380 default=None,
2366 )
2381 )
2367 coreconfigitem(
2382 coreconfigitem(
2368 b'ui',
2383 b'ui',
2369 b'textwidth',
2384 b'textwidth',
2370 default=78,
2385 default=78,
2371 )
2386 )
2372 coreconfigitem(
2387 coreconfigitem(
2373 b'ui',
2388 b'ui',
2374 b'timeout',
2389 b'timeout',
2375 default=b'600',
2390 default=b'600',
2376 )
2391 )
2377 coreconfigitem(
2392 coreconfigitem(
2378 b'ui',
2393 b'ui',
2379 b'timeout.warn',
2394 b'timeout.warn',
2380 default=0,
2395 default=0,
2381 )
2396 )
2382 coreconfigitem(
2397 coreconfigitem(
2383 b'ui',
2398 b'ui',
2384 b'timestamp-output',
2399 b'timestamp-output',
2385 default=False,
2400 default=False,
2386 )
2401 )
2387 coreconfigitem(
2402 coreconfigitem(
2388 b'ui',
2403 b'ui',
2389 b'traceback',
2404 b'traceback',
2390 default=False,
2405 default=False,
2391 )
2406 )
2392 coreconfigitem(
2407 coreconfigitem(
2393 b'ui',
2408 b'ui',
2394 b'tweakdefaults',
2409 b'tweakdefaults',
2395 default=False,
2410 default=False,
2396 )
2411 )
2397 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2412 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2398 coreconfigitem(
2413 coreconfigitem(
2399 b'ui',
2414 b'ui',
2400 b'verbose',
2415 b'verbose',
2401 default=False,
2416 default=False,
2402 )
2417 )
2403 coreconfigitem(
2418 coreconfigitem(
2404 b'verify',
2419 b'verify',
2405 b'skipflags',
2420 b'skipflags',
2406 default=None,
2421 default=None,
2407 )
2422 )
2408 coreconfigitem(
2423 coreconfigitem(
2409 b'web',
2424 b'web',
2410 b'allowbz2',
2425 b'allowbz2',
2411 default=False,
2426 default=False,
2412 )
2427 )
2413 coreconfigitem(
2428 coreconfigitem(
2414 b'web',
2429 b'web',
2415 b'allowgz',
2430 b'allowgz',
2416 default=False,
2431 default=False,
2417 )
2432 )
2418 coreconfigitem(
2433 coreconfigitem(
2419 b'web',
2434 b'web',
2420 b'allow-pull',
2435 b'allow-pull',
2421 alias=[(b'web', b'allowpull')],
2436 alias=[(b'web', b'allowpull')],
2422 default=True,
2437 default=True,
2423 )
2438 )
2424 coreconfigitem(
2439 coreconfigitem(
2425 b'web',
2440 b'web',
2426 b'allow-push',
2441 b'allow-push',
2427 alias=[(b'web', b'allow_push')],
2442 alias=[(b'web', b'allow_push')],
2428 default=list,
2443 default=list,
2429 )
2444 )
2430 coreconfigitem(
2445 coreconfigitem(
2431 b'web',
2446 b'web',
2432 b'allowzip',
2447 b'allowzip',
2433 default=False,
2448 default=False,
2434 )
2449 )
2435 coreconfigitem(
2450 coreconfigitem(
2436 b'web',
2451 b'web',
2437 b'archivesubrepos',
2452 b'archivesubrepos',
2438 default=False,
2453 default=False,
2439 )
2454 )
2440 coreconfigitem(
2455 coreconfigitem(
2441 b'web',
2456 b'web',
2442 b'cache',
2457 b'cache',
2443 default=True,
2458 default=True,
2444 )
2459 )
2445 coreconfigitem(
2460 coreconfigitem(
2446 b'web',
2461 b'web',
2447 b'comparisoncontext',
2462 b'comparisoncontext',
2448 default=5,
2463 default=5,
2449 )
2464 )
2450 coreconfigitem(
2465 coreconfigitem(
2451 b'web',
2466 b'web',
2452 b'contact',
2467 b'contact',
2453 default=None,
2468 default=None,
2454 )
2469 )
2455 coreconfigitem(
2470 coreconfigitem(
2456 b'web',
2471 b'web',
2457 b'deny_push',
2472 b'deny_push',
2458 default=list,
2473 default=list,
2459 )
2474 )
2460 coreconfigitem(
2475 coreconfigitem(
2461 b'web',
2476 b'web',
2462 b'guessmime',
2477 b'guessmime',
2463 default=False,
2478 default=False,
2464 )
2479 )
2465 coreconfigitem(
2480 coreconfigitem(
2466 b'web',
2481 b'web',
2467 b'hidden',
2482 b'hidden',
2468 default=False,
2483 default=False,
2469 )
2484 )
2470 coreconfigitem(
2485 coreconfigitem(
2471 b'web',
2486 b'web',
2472 b'labels',
2487 b'labels',
2473 default=list,
2488 default=list,
2474 )
2489 )
2475 coreconfigitem(
2490 coreconfigitem(
2476 b'web',
2491 b'web',
2477 b'logoimg',
2492 b'logoimg',
2478 default=b'hglogo.png',
2493 default=b'hglogo.png',
2479 )
2494 )
2480 coreconfigitem(
2495 coreconfigitem(
2481 b'web',
2496 b'web',
2482 b'logourl',
2497 b'logourl',
2483 default=b'https://mercurial-scm.org/',
2498 default=b'https://mercurial-scm.org/',
2484 )
2499 )
2485 coreconfigitem(
2500 coreconfigitem(
2486 b'web',
2501 b'web',
2487 b'accesslog',
2502 b'accesslog',
2488 default=b'-',
2503 default=b'-',
2489 )
2504 )
2490 coreconfigitem(
2505 coreconfigitem(
2491 b'web',
2506 b'web',
2492 b'address',
2507 b'address',
2493 default=b'',
2508 default=b'',
2494 )
2509 )
2495 coreconfigitem(
2510 coreconfigitem(
2496 b'web',
2511 b'web',
2497 b'allow-archive',
2512 b'allow-archive',
2498 alias=[(b'web', b'allow_archive')],
2513 alias=[(b'web', b'allow_archive')],
2499 default=list,
2514 default=list,
2500 )
2515 )
2501 coreconfigitem(
2516 coreconfigitem(
2502 b'web',
2517 b'web',
2503 b'allow_read',
2518 b'allow_read',
2504 default=list,
2519 default=list,
2505 )
2520 )
2506 coreconfigitem(
2521 coreconfigitem(
2507 b'web',
2522 b'web',
2508 b'baseurl',
2523 b'baseurl',
2509 default=None,
2524 default=None,
2510 )
2525 )
2511 coreconfigitem(
2526 coreconfigitem(
2512 b'web',
2527 b'web',
2513 b'cacerts',
2528 b'cacerts',
2514 default=None,
2529 default=None,
2515 )
2530 )
2516 coreconfigitem(
2531 coreconfigitem(
2517 b'web',
2532 b'web',
2518 b'certificate',
2533 b'certificate',
2519 default=None,
2534 default=None,
2520 )
2535 )
2521 coreconfigitem(
2536 coreconfigitem(
2522 b'web',
2537 b'web',
2523 b'collapse',
2538 b'collapse',
2524 default=False,
2539 default=False,
2525 )
2540 )
2526 coreconfigitem(
2541 coreconfigitem(
2527 b'web',
2542 b'web',
2528 b'csp',
2543 b'csp',
2529 default=None,
2544 default=None,
2530 )
2545 )
2531 coreconfigitem(
2546 coreconfigitem(
2532 b'web',
2547 b'web',
2533 b'deny_read',
2548 b'deny_read',
2534 default=list,
2549 default=list,
2535 )
2550 )
2536 coreconfigitem(
2551 coreconfigitem(
2537 b'web',
2552 b'web',
2538 b'descend',
2553 b'descend',
2539 default=True,
2554 default=True,
2540 )
2555 )
2541 coreconfigitem(
2556 coreconfigitem(
2542 b'web',
2557 b'web',
2543 b'description',
2558 b'description',
2544 default=b"",
2559 default=b"",
2545 )
2560 )
2546 coreconfigitem(
2561 coreconfigitem(
2547 b'web',
2562 b'web',
2548 b'encoding',
2563 b'encoding',
2549 default=lambda: encoding.encoding,
2564 default=lambda: encoding.encoding,
2550 )
2565 )
2551 coreconfigitem(
2566 coreconfigitem(
2552 b'web',
2567 b'web',
2553 b'errorlog',
2568 b'errorlog',
2554 default=b'-',
2569 default=b'-',
2555 )
2570 )
2556 coreconfigitem(
2571 coreconfigitem(
2557 b'web',
2572 b'web',
2558 b'ipv6',
2573 b'ipv6',
2559 default=False,
2574 default=False,
2560 )
2575 )
2561 coreconfigitem(
2576 coreconfigitem(
2562 b'web',
2577 b'web',
2563 b'maxchanges',
2578 b'maxchanges',
2564 default=10,
2579 default=10,
2565 )
2580 )
2566 coreconfigitem(
2581 coreconfigitem(
2567 b'web',
2582 b'web',
2568 b'maxfiles',
2583 b'maxfiles',
2569 default=10,
2584 default=10,
2570 )
2585 )
2571 coreconfigitem(
2586 coreconfigitem(
2572 b'web',
2587 b'web',
2573 b'maxshortchanges',
2588 b'maxshortchanges',
2574 default=60,
2589 default=60,
2575 )
2590 )
2576 coreconfigitem(
2591 coreconfigitem(
2577 b'web',
2592 b'web',
2578 b'motd',
2593 b'motd',
2579 default=b'',
2594 default=b'',
2580 )
2595 )
2581 coreconfigitem(
2596 coreconfigitem(
2582 b'web',
2597 b'web',
2583 b'name',
2598 b'name',
2584 default=dynamicdefault,
2599 default=dynamicdefault,
2585 )
2600 )
2586 coreconfigitem(
2601 coreconfigitem(
2587 b'web',
2602 b'web',
2588 b'port',
2603 b'port',
2589 default=8000,
2604 default=8000,
2590 )
2605 )
2591 coreconfigitem(
2606 coreconfigitem(
2592 b'web',
2607 b'web',
2593 b'prefix',
2608 b'prefix',
2594 default=b'',
2609 default=b'',
2595 )
2610 )
2596 coreconfigitem(
2611 coreconfigitem(
2597 b'web',
2612 b'web',
2598 b'push_ssl',
2613 b'push_ssl',
2599 default=True,
2614 default=True,
2600 )
2615 )
2601 coreconfigitem(
2616 coreconfigitem(
2602 b'web',
2617 b'web',
2603 b'refreshinterval',
2618 b'refreshinterval',
2604 default=20,
2619 default=20,
2605 )
2620 )
2606 coreconfigitem(
2621 coreconfigitem(
2607 b'web',
2622 b'web',
2608 b'server-header',
2623 b'server-header',
2609 default=None,
2624 default=None,
2610 )
2625 )
2611 coreconfigitem(
2626 coreconfigitem(
2612 b'web',
2627 b'web',
2613 b'static',
2628 b'static',
2614 default=None,
2629 default=None,
2615 )
2630 )
2616 coreconfigitem(
2631 coreconfigitem(
2617 b'web',
2632 b'web',
2618 b'staticurl',
2633 b'staticurl',
2619 default=None,
2634 default=None,
2620 )
2635 )
2621 coreconfigitem(
2636 coreconfigitem(
2622 b'web',
2637 b'web',
2623 b'stripes',
2638 b'stripes',
2624 default=1,
2639 default=1,
2625 )
2640 )
2626 coreconfigitem(
2641 coreconfigitem(
2627 b'web',
2642 b'web',
2628 b'style',
2643 b'style',
2629 default=b'paper',
2644 default=b'paper',
2630 )
2645 )
2631 coreconfigitem(
2646 coreconfigitem(
2632 b'web',
2647 b'web',
2633 b'templates',
2648 b'templates',
2634 default=None,
2649 default=None,
2635 )
2650 )
2636 coreconfigitem(
2651 coreconfigitem(
2637 b'web',
2652 b'web',
2638 b'view',
2653 b'view',
2639 default=b'served',
2654 default=b'served',
2640 experimental=True,
2655 experimental=True,
2641 )
2656 )
2642 coreconfigitem(
2657 coreconfigitem(
2643 b'worker',
2658 b'worker',
2644 b'backgroundclose',
2659 b'backgroundclose',
2645 default=dynamicdefault,
2660 default=dynamicdefault,
2646 )
2661 )
2647 # Windows defaults to a limit of 512 open files. A buffer of 128
2662 # Windows defaults to a limit of 512 open files. A buffer of 128
2648 # should give us enough headway.
2663 # should give us enough headway.
2649 coreconfigitem(
2664 coreconfigitem(
2650 b'worker',
2665 b'worker',
2651 b'backgroundclosemaxqueue',
2666 b'backgroundclosemaxqueue',
2652 default=384,
2667 default=384,
2653 )
2668 )
2654 coreconfigitem(
2669 coreconfigitem(
2655 b'worker',
2670 b'worker',
2656 b'backgroundcloseminfilecount',
2671 b'backgroundcloseminfilecount',
2657 default=2048,
2672 default=2048,
2658 )
2673 )
2659 coreconfigitem(
2674 coreconfigitem(
2660 b'worker',
2675 b'worker',
2661 b'backgroundclosethreadcount',
2676 b'backgroundclosethreadcount',
2662 default=4,
2677 default=4,
2663 )
2678 )
2664 coreconfigitem(
2679 coreconfigitem(
2665 b'worker',
2680 b'worker',
2666 b'enabled',
2681 b'enabled',
2667 default=True,
2682 default=True,
2668 )
2683 )
2669 coreconfigitem(
2684 coreconfigitem(
2670 b'worker',
2685 b'worker',
2671 b'numcpus',
2686 b'numcpus',
2672 default=None,
2687 default=None,
2673 )
2688 )
2674
2689
2675 # Rebase related configuration moved to core because other extension are doing
2690 # Rebase related configuration moved to core because other extension are doing
2676 # strange things. For example, shelve import the extensions to reuse some bit
2691 # strange things. For example, shelve import the extensions to reuse some bit
2677 # without formally loading it.
2692 # without formally loading it.
2678 coreconfigitem(
2693 coreconfigitem(
2679 b'commands',
2694 b'commands',
2680 b'rebase.requiredest',
2695 b'rebase.requiredest',
2681 default=False,
2696 default=False,
2682 )
2697 )
2683 coreconfigitem(
2698 coreconfigitem(
2684 b'experimental',
2699 b'experimental',
2685 b'rebaseskipobsolete',
2700 b'rebaseskipobsolete',
2686 default=True,
2701 default=True,
2687 )
2702 )
2688 coreconfigitem(
2703 coreconfigitem(
2689 b'rebase',
2704 b'rebase',
2690 b'singletransaction',
2705 b'singletransaction',
2691 default=False,
2706 default=False,
2692 )
2707 )
2693 coreconfigitem(
2708 coreconfigitem(
2694 b'rebase',
2709 b'rebase',
2695 b'experimental.inmemory',
2710 b'experimental.inmemory',
2696 default=False,
2711 default=False,
2697 )
2712 )
@@ -1,3771 +1,3781 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullrev,
22 nullrev,
23 sha1nodeconstants,
23 sha1nodeconstants,
24 short,
24 short,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 delattr,
27 delattr,
28 getattr,
28 getattr,
29 )
29 )
30 from . import (
30 from . import (
31 bookmarks,
31 bookmarks,
32 branchmap,
32 branchmap,
33 bundle2,
33 bundle2,
34 bundlecaches,
34 bundlecaches,
35 changegroup,
35 changegroup,
36 color,
36 color,
37 commit,
37 commit,
38 context,
38 context,
39 dirstate,
39 dirstate,
40 dirstateguard,
40 dirstateguard,
41 discovery,
41 discovery,
42 encoding,
42 encoding,
43 error,
43 error,
44 exchange,
44 exchange,
45 extensions,
45 extensions,
46 filelog,
46 filelog,
47 hook,
47 hook,
48 lock as lockmod,
48 lock as lockmod,
49 match as matchmod,
49 match as matchmod,
50 mergestate as mergestatemod,
50 mergestate as mergestatemod,
51 mergeutil,
51 mergeutil,
52 namespaces,
52 namespaces,
53 narrowspec,
53 narrowspec,
54 obsolete,
54 obsolete,
55 pathutil,
55 pathutil,
56 phases,
56 phases,
57 pushkey,
57 pushkey,
58 pycompat,
58 pycompat,
59 rcutil,
59 rcutil,
60 repoview,
60 repoview,
61 requirements as requirementsmod,
61 requirements as requirementsmod,
62 revlog,
62 revlog,
63 revset,
63 revset,
64 revsetlang,
64 revsetlang,
65 scmutil,
65 scmutil,
66 sparse,
66 sparse,
67 store as storemod,
67 store as storemod,
68 subrepoutil,
68 subrepoutil,
69 tags as tagsmod,
69 tags as tagsmod,
70 transaction,
70 transaction,
71 txnutil,
71 txnutil,
72 util,
72 util,
73 vfs as vfsmod,
73 vfs as vfsmod,
74 wireprototypes,
74 wireprototypes,
75 )
75 )
76
76
77 from .interfaces import (
77 from .interfaces import (
78 repository,
78 repository,
79 util as interfaceutil,
79 util as interfaceutil,
80 )
80 )
81
81
82 from .utils import (
82 from .utils import (
83 hashutil,
83 hashutil,
84 procutil,
84 procutil,
85 stringutil,
85 stringutil,
86 urlutil,
86 urlutil,
87 )
87 )
88
88
89 from .revlogutils import (
89 from .revlogutils import (
90 concurrency_checker as revlogchecker,
90 concurrency_checker as revlogchecker,
91 constants as revlogconst,
91 constants as revlogconst,
92 sidedata as sidedatamod,
92 sidedata as sidedatamod,
93 )
93 )
94
94
95 release = lockmod.release
95 release = lockmod.release
96 urlerr = util.urlerr
96 urlerr = util.urlerr
97 urlreq = util.urlreq
97 urlreq = util.urlreq
98
98
99 # set of (path, vfs-location) tuples. vfs-location is:
99 # set of (path, vfs-location) tuples. vfs-location is:
100 # - 'plain for vfs relative paths
100 # - 'plain for vfs relative paths
101 # - '' for svfs relative paths
101 # - '' for svfs relative paths
102 _cachedfiles = set()
102 _cachedfiles = set()
103
103
104
104
105 class _basefilecache(scmutil.filecache):
105 class _basefilecache(scmutil.filecache):
106 """All filecache usage on repo are done for logic that should be unfiltered"""
106 """All filecache usage on repo are done for logic that should be unfiltered"""
107
107
108 def __get__(self, repo, type=None):
108 def __get__(self, repo, type=None):
109 if repo is None:
109 if repo is None:
110 return self
110 return self
111 # proxy to unfiltered __dict__ since filtered repo has no entry
111 # proxy to unfiltered __dict__ since filtered repo has no entry
112 unfi = repo.unfiltered()
112 unfi = repo.unfiltered()
113 try:
113 try:
114 return unfi.__dict__[self.sname]
114 return unfi.__dict__[self.sname]
115 except KeyError:
115 except KeyError:
116 pass
116 pass
117 return super(_basefilecache, self).__get__(unfi, type)
117 return super(_basefilecache, self).__get__(unfi, type)
118
118
119 def set(self, repo, value):
119 def set(self, repo, value):
120 return super(_basefilecache, self).set(repo.unfiltered(), value)
120 return super(_basefilecache, self).set(repo.unfiltered(), value)
121
121
122
122
123 class repofilecache(_basefilecache):
123 class repofilecache(_basefilecache):
124 """filecache for files in .hg but outside of .hg/store"""
124 """filecache for files in .hg but outside of .hg/store"""
125
125
126 def __init__(self, *paths):
126 def __init__(self, *paths):
127 super(repofilecache, self).__init__(*paths)
127 super(repofilecache, self).__init__(*paths)
128 for path in paths:
128 for path in paths:
129 _cachedfiles.add((path, b'plain'))
129 _cachedfiles.add((path, b'plain'))
130
130
131 def join(self, obj, fname):
131 def join(self, obj, fname):
132 return obj.vfs.join(fname)
132 return obj.vfs.join(fname)
133
133
134
134
135 class storecache(_basefilecache):
135 class storecache(_basefilecache):
136 """filecache for files in the store"""
136 """filecache for files in the store"""
137
137
138 def __init__(self, *paths):
138 def __init__(self, *paths):
139 super(storecache, self).__init__(*paths)
139 super(storecache, self).__init__(*paths)
140 for path in paths:
140 for path in paths:
141 _cachedfiles.add((path, b''))
141 _cachedfiles.add((path, b''))
142
142
143 def join(self, obj, fname):
143 def join(self, obj, fname):
144 return obj.sjoin(fname)
144 return obj.sjoin(fname)
145
145
146
146
147 class mixedrepostorecache(_basefilecache):
147 class mixedrepostorecache(_basefilecache):
148 """filecache for a mix files in .hg/store and outside"""
148 """filecache for a mix files in .hg/store and outside"""
149
149
150 def __init__(self, *pathsandlocations):
150 def __init__(self, *pathsandlocations):
151 # scmutil.filecache only uses the path for passing back into our
151 # scmutil.filecache only uses the path for passing back into our
152 # join(), so we can safely pass a list of paths and locations
152 # join(), so we can safely pass a list of paths and locations
153 super(mixedrepostorecache, self).__init__(*pathsandlocations)
153 super(mixedrepostorecache, self).__init__(*pathsandlocations)
154 _cachedfiles.update(pathsandlocations)
154 _cachedfiles.update(pathsandlocations)
155
155
156 def join(self, obj, fnameandlocation):
156 def join(self, obj, fnameandlocation):
157 fname, location = fnameandlocation
157 fname, location = fnameandlocation
158 if location == b'plain':
158 if location == b'plain':
159 return obj.vfs.join(fname)
159 return obj.vfs.join(fname)
160 else:
160 else:
161 if location != b'':
161 if location != b'':
162 raise error.ProgrammingError(
162 raise error.ProgrammingError(
163 b'unexpected location: %s' % location
163 b'unexpected location: %s' % location
164 )
164 )
165 return obj.sjoin(fname)
165 return obj.sjoin(fname)
166
166
167
167
168 def isfilecached(repo, name):
168 def isfilecached(repo, name):
169 """check if a repo has already cached "name" filecache-ed property
169 """check if a repo has already cached "name" filecache-ed property
170
170
171 This returns (cachedobj-or-None, iscached) tuple.
171 This returns (cachedobj-or-None, iscached) tuple.
172 """
172 """
173 cacheentry = repo.unfiltered()._filecache.get(name, None)
173 cacheentry = repo.unfiltered()._filecache.get(name, None)
174 if not cacheentry:
174 if not cacheentry:
175 return None, False
175 return None, False
176 return cacheentry.obj, True
176 return cacheentry.obj, True
177
177
178
178
179 class unfilteredpropertycache(util.propertycache):
179 class unfilteredpropertycache(util.propertycache):
180 """propertycache that apply to unfiltered repo only"""
180 """propertycache that apply to unfiltered repo only"""
181
181
182 def __get__(self, repo, type=None):
182 def __get__(self, repo, type=None):
183 unfi = repo.unfiltered()
183 unfi = repo.unfiltered()
184 if unfi is repo:
184 if unfi is repo:
185 return super(unfilteredpropertycache, self).__get__(unfi)
185 return super(unfilteredpropertycache, self).__get__(unfi)
186 return getattr(unfi, self.name)
186 return getattr(unfi, self.name)
187
187
188
188
189 class filteredpropertycache(util.propertycache):
189 class filteredpropertycache(util.propertycache):
190 """propertycache that must take filtering in account"""
190 """propertycache that must take filtering in account"""
191
191
192 def cachevalue(self, obj, value):
192 def cachevalue(self, obj, value):
193 object.__setattr__(obj, self.name, value)
193 object.__setattr__(obj, self.name, value)
194
194
195
195
196 def hasunfilteredcache(repo, name):
196 def hasunfilteredcache(repo, name):
197 """check if a repo has an unfilteredpropertycache value for <name>"""
197 """check if a repo has an unfilteredpropertycache value for <name>"""
198 return name in vars(repo.unfiltered())
198 return name in vars(repo.unfiltered())
199
199
200
200
201 def unfilteredmethod(orig):
201 def unfilteredmethod(orig):
202 """decorate method that always need to be run on unfiltered version"""
202 """decorate method that always need to be run on unfiltered version"""
203
203
204 @functools.wraps(orig)
204 @functools.wraps(orig)
205 def wrapper(repo, *args, **kwargs):
205 def wrapper(repo, *args, **kwargs):
206 return orig(repo.unfiltered(), *args, **kwargs)
206 return orig(repo.unfiltered(), *args, **kwargs)
207
207
208 return wrapper
208 return wrapper
209
209
210
210
211 moderncaps = {
211 moderncaps = {
212 b'lookup',
212 b'lookup',
213 b'branchmap',
213 b'branchmap',
214 b'pushkey',
214 b'pushkey',
215 b'known',
215 b'known',
216 b'getbundle',
216 b'getbundle',
217 b'unbundle',
217 b'unbundle',
218 }
218 }
219 legacycaps = moderncaps.union({b'changegroupsubset'})
219 legacycaps = moderncaps.union({b'changegroupsubset'})
220
220
221
221
222 @interfaceutil.implementer(repository.ipeercommandexecutor)
222 @interfaceutil.implementer(repository.ipeercommandexecutor)
223 class localcommandexecutor(object):
223 class localcommandexecutor(object):
224 def __init__(self, peer):
224 def __init__(self, peer):
225 self._peer = peer
225 self._peer = peer
226 self._sent = False
226 self._sent = False
227 self._closed = False
227 self._closed = False
228
228
229 def __enter__(self):
229 def __enter__(self):
230 return self
230 return self
231
231
232 def __exit__(self, exctype, excvalue, exctb):
232 def __exit__(self, exctype, excvalue, exctb):
233 self.close()
233 self.close()
234
234
235 def callcommand(self, command, args):
235 def callcommand(self, command, args):
236 if self._sent:
236 if self._sent:
237 raise error.ProgrammingError(
237 raise error.ProgrammingError(
238 b'callcommand() cannot be used after sendcommands()'
238 b'callcommand() cannot be used after sendcommands()'
239 )
239 )
240
240
241 if self._closed:
241 if self._closed:
242 raise error.ProgrammingError(
242 raise error.ProgrammingError(
243 b'callcommand() cannot be used after close()'
243 b'callcommand() cannot be used after close()'
244 )
244 )
245
245
246 # We don't need to support anything fancy. Just call the named
246 # We don't need to support anything fancy. Just call the named
247 # method on the peer and return a resolved future.
247 # method on the peer and return a resolved future.
248 fn = getattr(self._peer, pycompat.sysstr(command))
248 fn = getattr(self._peer, pycompat.sysstr(command))
249
249
250 f = pycompat.futures.Future()
250 f = pycompat.futures.Future()
251
251
252 try:
252 try:
253 result = fn(**pycompat.strkwargs(args))
253 result = fn(**pycompat.strkwargs(args))
254 except Exception:
254 except Exception:
255 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
255 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
256 else:
256 else:
257 f.set_result(result)
257 f.set_result(result)
258
258
259 return f
259 return f
260
260
261 def sendcommands(self):
261 def sendcommands(self):
262 self._sent = True
262 self._sent = True
263
263
264 def close(self):
264 def close(self):
265 self._closed = True
265 self._closed = True
266
266
267
267
268 @interfaceutil.implementer(repository.ipeercommands)
268 @interfaceutil.implementer(repository.ipeercommands)
269 class localpeer(repository.peer):
269 class localpeer(repository.peer):
270 '''peer for a local repo; reflects only the most recent API'''
270 '''peer for a local repo; reflects only the most recent API'''
271
271
272 def __init__(self, repo, caps=None):
272 def __init__(self, repo, caps=None):
273 super(localpeer, self).__init__()
273 super(localpeer, self).__init__()
274
274
275 if caps is None:
275 if caps is None:
276 caps = moderncaps.copy()
276 caps = moderncaps.copy()
277 self._repo = repo.filtered(b'served')
277 self._repo = repo.filtered(b'served')
278 self.ui = repo.ui
278 self.ui = repo.ui
279
279
280 if repo._wanted_sidedata:
280 if repo._wanted_sidedata:
281 formatted = bundle2.format_remote_wanted_sidedata(repo)
281 formatted = bundle2.format_remote_wanted_sidedata(repo)
282 caps.add(b'exp-wanted-sidedata=' + formatted)
282 caps.add(b'exp-wanted-sidedata=' + formatted)
283
283
284 self._caps = repo._restrictcapabilities(caps)
284 self._caps = repo._restrictcapabilities(caps)
285
285
286 # Begin of _basepeer interface.
286 # Begin of _basepeer interface.
287
287
288 def url(self):
288 def url(self):
289 return self._repo.url()
289 return self._repo.url()
290
290
291 def local(self):
291 def local(self):
292 return self._repo
292 return self._repo
293
293
294 def peer(self):
294 def peer(self):
295 return self
295 return self
296
296
297 def canpush(self):
297 def canpush(self):
298 return True
298 return True
299
299
300 def close(self):
300 def close(self):
301 self._repo.close()
301 self._repo.close()
302
302
303 # End of _basepeer interface.
303 # End of _basepeer interface.
304
304
305 # Begin of _basewirecommands interface.
305 # Begin of _basewirecommands interface.
306
306
307 def branchmap(self):
307 def branchmap(self):
308 return self._repo.branchmap()
308 return self._repo.branchmap()
309
309
310 def capabilities(self):
310 def capabilities(self):
311 return self._caps
311 return self._caps
312
312
313 def clonebundles(self):
313 def clonebundles(self):
314 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
314 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
315
315
316 def debugwireargs(self, one, two, three=None, four=None, five=None):
316 def debugwireargs(self, one, two, three=None, four=None, five=None):
317 """Used to test argument passing over the wire"""
317 """Used to test argument passing over the wire"""
318 return b"%s %s %s %s %s" % (
318 return b"%s %s %s %s %s" % (
319 one,
319 one,
320 two,
320 two,
321 pycompat.bytestr(three),
321 pycompat.bytestr(three),
322 pycompat.bytestr(four),
322 pycompat.bytestr(four),
323 pycompat.bytestr(five),
323 pycompat.bytestr(five),
324 )
324 )
325
325
326 def getbundle(
326 def getbundle(
327 self,
327 self,
328 source,
328 source,
329 heads=None,
329 heads=None,
330 common=None,
330 common=None,
331 bundlecaps=None,
331 bundlecaps=None,
332 remote_sidedata=None,
332 remote_sidedata=None,
333 **kwargs
333 **kwargs
334 ):
334 ):
335 chunks = exchange.getbundlechunks(
335 chunks = exchange.getbundlechunks(
336 self._repo,
336 self._repo,
337 source,
337 source,
338 heads=heads,
338 heads=heads,
339 common=common,
339 common=common,
340 bundlecaps=bundlecaps,
340 bundlecaps=bundlecaps,
341 remote_sidedata=remote_sidedata,
341 remote_sidedata=remote_sidedata,
342 **kwargs
342 **kwargs
343 )[1]
343 )[1]
344 cb = util.chunkbuffer(chunks)
344 cb = util.chunkbuffer(chunks)
345
345
346 if exchange.bundle2requested(bundlecaps):
346 if exchange.bundle2requested(bundlecaps):
347 # When requesting a bundle2, getbundle returns a stream to make the
347 # When requesting a bundle2, getbundle returns a stream to make the
348 # wire level function happier. We need to build a proper object
348 # wire level function happier. We need to build a proper object
349 # from it in local peer.
349 # from it in local peer.
350 return bundle2.getunbundler(self.ui, cb)
350 return bundle2.getunbundler(self.ui, cb)
351 else:
351 else:
352 return changegroup.getunbundler(b'01', cb, None)
352 return changegroup.getunbundler(b'01', cb, None)
353
353
354 def heads(self):
354 def heads(self):
355 return self._repo.heads()
355 return self._repo.heads()
356
356
357 def known(self, nodes):
357 def known(self, nodes):
358 return self._repo.known(nodes)
358 return self._repo.known(nodes)
359
359
360 def listkeys(self, namespace):
360 def listkeys(self, namespace):
361 return self._repo.listkeys(namespace)
361 return self._repo.listkeys(namespace)
362
362
363 def lookup(self, key):
363 def lookup(self, key):
364 return self._repo.lookup(key)
364 return self._repo.lookup(key)
365
365
366 def pushkey(self, namespace, key, old, new):
366 def pushkey(self, namespace, key, old, new):
367 return self._repo.pushkey(namespace, key, old, new)
367 return self._repo.pushkey(namespace, key, old, new)
368
368
369 def stream_out(self):
369 def stream_out(self):
370 raise error.Abort(_(b'cannot perform stream clone against local peer'))
370 raise error.Abort(_(b'cannot perform stream clone against local peer'))
371
371
372 def unbundle(self, bundle, heads, url):
372 def unbundle(self, bundle, heads, url):
373 """apply a bundle on a repo
373 """apply a bundle on a repo
374
374
375 This function handles the repo locking itself."""
375 This function handles the repo locking itself."""
376 try:
376 try:
377 try:
377 try:
378 bundle = exchange.readbundle(self.ui, bundle, None)
378 bundle = exchange.readbundle(self.ui, bundle, None)
379 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
379 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
380 if util.safehasattr(ret, b'getchunks'):
380 if util.safehasattr(ret, b'getchunks'):
381 # This is a bundle20 object, turn it into an unbundler.
381 # This is a bundle20 object, turn it into an unbundler.
382 # This little dance should be dropped eventually when the
382 # This little dance should be dropped eventually when the
383 # API is finally improved.
383 # API is finally improved.
384 stream = util.chunkbuffer(ret.getchunks())
384 stream = util.chunkbuffer(ret.getchunks())
385 ret = bundle2.getunbundler(self.ui, stream)
385 ret = bundle2.getunbundler(self.ui, stream)
386 return ret
386 return ret
387 except Exception as exc:
387 except Exception as exc:
388 # If the exception contains output salvaged from a bundle2
388 # If the exception contains output salvaged from a bundle2
389 # reply, we need to make sure it is printed before continuing
389 # reply, we need to make sure it is printed before continuing
390 # to fail. So we build a bundle2 with such output and consume
390 # to fail. So we build a bundle2 with such output and consume
391 # it directly.
391 # it directly.
392 #
392 #
393 # This is not very elegant but allows a "simple" solution for
393 # This is not very elegant but allows a "simple" solution for
394 # issue4594
394 # issue4594
395 output = getattr(exc, '_bundle2salvagedoutput', ())
395 output = getattr(exc, '_bundle2salvagedoutput', ())
396 if output:
396 if output:
397 bundler = bundle2.bundle20(self._repo.ui)
397 bundler = bundle2.bundle20(self._repo.ui)
398 for out in output:
398 for out in output:
399 bundler.addpart(out)
399 bundler.addpart(out)
400 stream = util.chunkbuffer(bundler.getchunks())
400 stream = util.chunkbuffer(bundler.getchunks())
401 b = bundle2.getunbundler(self.ui, stream)
401 b = bundle2.getunbundler(self.ui, stream)
402 bundle2.processbundle(self._repo, b)
402 bundle2.processbundle(self._repo, b)
403 raise
403 raise
404 except error.PushRaced as exc:
404 except error.PushRaced as exc:
405 raise error.ResponseError(
405 raise error.ResponseError(
406 _(b'push failed:'), stringutil.forcebytestr(exc)
406 _(b'push failed:'), stringutil.forcebytestr(exc)
407 )
407 )
408
408
409 # End of _basewirecommands interface.
409 # End of _basewirecommands interface.
410
410
411 # Begin of peer interface.
411 # Begin of peer interface.
412
412
413 def commandexecutor(self):
413 def commandexecutor(self):
414 return localcommandexecutor(self)
414 return localcommandexecutor(self)
415
415
416 # End of peer interface.
416 # End of peer interface.
417
417
418
418
419 @interfaceutil.implementer(repository.ipeerlegacycommands)
419 @interfaceutil.implementer(repository.ipeerlegacycommands)
420 class locallegacypeer(localpeer):
420 class locallegacypeer(localpeer):
421 """peer extension which implements legacy methods too; used for tests with
421 """peer extension which implements legacy methods too; used for tests with
422 restricted capabilities"""
422 restricted capabilities"""
423
423
424 def __init__(self, repo):
424 def __init__(self, repo):
425 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
425 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
426
426
427 # Begin of baselegacywirecommands interface.
427 # Begin of baselegacywirecommands interface.
428
428
429 def between(self, pairs):
429 def between(self, pairs):
430 return self._repo.between(pairs)
430 return self._repo.between(pairs)
431
431
432 def branches(self, nodes):
432 def branches(self, nodes):
433 return self._repo.branches(nodes)
433 return self._repo.branches(nodes)
434
434
435 def changegroup(self, nodes, source):
435 def changegroup(self, nodes, source):
436 outgoing = discovery.outgoing(
436 outgoing = discovery.outgoing(
437 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
437 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
438 )
438 )
439 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
439 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
440
440
441 def changegroupsubset(self, bases, heads, source):
441 def changegroupsubset(self, bases, heads, source):
442 outgoing = discovery.outgoing(
442 outgoing = discovery.outgoing(
443 self._repo, missingroots=bases, ancestorsof=heads
443 self._repo, missingroots=bases, ancestorsof=heads
444 )
444 )
445 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
445 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
446
446
447 # End of baselegacywirecommands interface.
447 # End of baselegacywirecommands interface.
448
448
449
449
450 # Functions receiving (ui, features) that extensions can register to impact
450 # Functions receiving (ui, features) that extensions can register to impact
451 # the ability to load repositories with custom requirements. Only
451 # the ability to load repositories with custom requirements. Only
452 # functions defined in loaded extensions are called.
452 # functions defined in loaded extensions are called.
453 #
453 #
454 # The function receives a set of requirement strings that the repository
454 # The function receives a set of requirement strings that the repository
455 # is capable of opening. Functions will typically add elements to the
455 # is capable of opening. Functions will typically add elements to the
456 # set to reflect that the extension knows how to handle that requirements.
456 # set to reflect that the extension knows how to handle that requirements.
457 featuresetupfuncs = set()
457 featuresetupfuncs = set()
458
458
459
459
460 def _getsharedvfs(hgvfs, requirements):
460 def _getsharedvfs(hgvfs, requirements):
461 """returns the vfs object pointing to root of shared source
461 """returns the vfs object pointing to root of shared source
462 repo for a shared repository
462 repo for a shared repository
463
463
464 hgvfs is vfs pointing at .hg/ of current repo (shared one)
464 hgvfs is vfs pointing at .hg/ of current repo (shared one)
465 requirements is a set of requirements of current repo (shared one)
465 requirements is a set of requirements of current repo (shared one)
466 """
466 """
467 # The ``shared`` or ``relshared`` requirements indicate the
467 # The ``shared`` or ``relshared`` requirements indicate the
468 # store lives in the path contained in the ``.hg/sharedpath`` file.
468 # store lives in the path contained in the ``.hg/sharedpath`` file.
469 # This is an absolute path for ``shared`` and relative to
469 # This is an absolute path for ``shared`` and relative to
470 # ``.hg/`` for ``relshared``.
470 # ``.hg/`` for ``relshared``.
471 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
471 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
472 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
472 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
473 sharedpath = util.normpath(hgvfs.join(sharedpath))
473 sharedpath = util.normpath(hgvfs.join(sharedpath))
474
474
475 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
475 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
476
476
477 if not sharedvfs.exists():
477 if not sharedvfs.exists():
478 raise error.RepoError(
478 raise error.RepoError(
479 _(b'.hg/sharedpath points to nonexistent directory %s')
479 _(b'.hg/sharedpath points to nonexistent directory %s')
480 % sharedvfs.base
480 % sharedvfs.base
481 )
481 )
482 return sharedvfs
482 return sharedvfs
483
483
484
484
485 def _readrequires(vfs, allowmissing):
485 def _readrequires(vfs, allowmissing):
486 """reads the require file present at root of this vfs
486 """reads the require file present at root of this vfs
487 and return a set of requirements
487 and return a set of requirements
488
488
489 If allowmissing is True, we suppress ENOENT if raised"""
489 If allowmissing is True, we suppress ENOENT if raised"""
490 # requires file contains a newline-delimited list of
490 # requires file contains a newline-delimited list of
491 # features/capabilities the opener (us) must have in order to use
491 # features/capabilities the opener (us) must have in order to use
492 # the repository. This file was introduced in Mercurial 0.9.2,
492 # the repository. This file was introduced in Mercurial 0.9.2,
493 # which means very old repositories may not have one. We assume
493 # which means very old repositories may not have one. We assume
494 # a missing file translates to no requirements.
494 # a missing file translates to no requirements.
495 try:
495 try:
496 requirements = set(vfs.read(b'requires').splitlines())
496 requirements = set(vfs.read(b'requires').splitlines())
497 except IOError as e:
497 except IOError as e:
498 if not (allowmissing and e.errno == errno.ENOENT):
498 if not (allowmissing and e.errno == errno.ENOENT):
499 raise
499 raise
500 requirements = set()
500 requirements = set()
501 return requirements
501 return requirements
502
502
503
503
504 def makelocalrepository(baseui, path, intents=None):
504 def makelocalrepository(baseui, path, intents=None):
505 """Create a local repository object.
505 """Create a local repository object.
506
506
507 Given arguments needed to construct a local repository, this function
507 Given arguments needed to construct a local repository, this function
508 performs various early repository loading functionality (such as
508 performs various early repository loading functionality (such as
509 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
509 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
510 the repository can be opened, derives a type suitable for representing
510 the repository can be opened, derives a type suitable for representing
511 that repository, and returns an instance of it.
511 that repository, and returns an instance of it.
512
512
513 The returned object conforms to the ``repository.completelocalrepository``
513 The returned object conforms to the ``repository.completelocalrepository``
514 interface.
514 interface.
515
515
516 The repository type is derived by calling a series of factory functions
516 The repository type is derived by calling a series of factory functions
517 for each aspect/interface of the final repository. These are defined by
517 for each aspect/interface of the final repository. These are defined by
518 ``REPO_INTERFACES``.
518 ``REPO_INTERFACES``.
519
519
520 Each factory function is called to produce a type implementing a specific
520 Each factory function is called to produce a type implementing a specific
521 interface. The cumulative list of returned types will be combined into a
521 interface. The cumulative list of returned types will be combined into a
522 new type and that type will be instantiated to represent the local
522 new type and that type will be instantiated to represent the local
523 repository.
523 repository.
524
524
525 The factory functions each receive various state that may be consulted
525 The factory functions each receive various state that may be consulted
526 as part of deriving a type.
526 as part of deriving a type.
527
527
528 Extensions should wrap these factory functions to customize repository type
528 Extensions should wrap these factory functions to customize repository type
529 creation. Note that an extension's wrapped function may be called even if
529 creation. Note that an extension's wrapped function may be called even if
530 that extension is not loaded for the repo being constructed. Extensions
530 that extension is not loaded for the repo being constructed. Extensions
531 should check if their ``__name__`` appears in the
531 should check if their ``__name__`` appears in the
532 ``extensionmodulenames`` set passed to the factory function and no-op if
532 ``extensionmodulenames`` set passed to the factory function and no-op if
533 not.
533 not.
534 """
534 """
535 ui = baseui.copy()
535 ui = baseui.copy()
536 # Prevent copying repo configuration.
536 # Prevent copying repo configuration.
537 ui.copy = baseui.copy
537 ui.copy = baseui.copy
538
538
539 # Working directory VFS rooted at repository root.
539 # Working directory VFS rooted at repository root.
540 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
540 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
541
541
542 # Main VFS for .hg/ directory.
542 # Main VFS for .hg/ directory.
543 hgpath = wdirvfs.join(b'.hg')
543 hgpath = wdirvfs.join(b'.hg')
544 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
544 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
545 # Whether this repository is shared one or not
545 # Whether this repository is shared one or not
546 shared = False
546 shared = False
547 # If this repository is shared, vfs pointing to shared repo
547 # If this repository is shared, vfs pointing to shared repo
548 sharedvfs = None
548 sharedvfs = None
549
549
550 # The .hg/ path should exist and should be a directory. All other
550 # The .hg/ path should exist and should be a directory. All other
551 # cases are errors.
551 # cases are errors.
552 if not hgvfs.isdir():
552 if not hgvfs.isdir():
553 try:
553 try:
554 hgvfs.stat()
554 hgvfs.stat()
555 except OSError as e:
555 except OSError as e:
556 if e.errno != errno.ENOENT:
556 if e.errno != errno.ENOENT:
557 raise
557 raise
558 except ValueError as e:
558 except ValueError as e:
559 # Can be raised on Python 3.8 when path is invalid.
559 # Can be raised on Python 3.8 when path is invalid.
560 raise error.Abort(
560 raise error.Abort(
561 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
561 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
562 )
562 )
563
563
564 raise error.RepoError(_(b'repository %s not found') % path)
564 raise error.RepoError(_(b'repository %s not found') % path)
565
565
566 requirements = _readrequires(hgvfs, True)
566 requirements = _readrequires(hgvfs, True)
567 shared = (
567 shared = (
568 requirementsmod.SHARED_REQUIREMENT in requirements
568 requirementsmod.SHARED_REQUIREMENT in requirements
569 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
569 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
570 )
570 )
571 storevfs = None
571 storevfs = None
572 if shared:
572 if shared:
573 # This is a shared repo
573 # This is a shared repo
574 sharedvfs = _getsharedvfs(hgvfs, requirements)
574 sharedvfs = _getsharedvfs(hgvfs, requirements)
575 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
575 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
576 else:
576 else:
577 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
577 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
578
578
579 # if .hg/requires contains the sharesafe requirement, it means
579 # if .hg/requires contains the sharesafe requirement, it means
580 # there exists a `.hg/store/requires` too and we should read it
580 # there exists a `.hg/store/requires` too and we should read it
581 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
581 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
582 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
582 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
583 # is not present, refer checkrequirementscompat() for that
583 # is not present, refer checkrequirementscompat() for that
584 #
584 #
585 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
585 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
586 # repository was shared the old way. We check the share source .hg/requires
586 # repository was shared the old way. We check the share source .hg/requires
587 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
587 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
588 # to be reshared
588 # to be reshared
589 hint = _(b"see `hg help config.format.use-share-safe` for more information")
589 hint = _(b"see `hg help config.format.use-share-safe` for more information")
590 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
590 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
591
591
592 if (
592 if (
593 shared
593 shared
594 and requirementsmod.SHARESAFE_REQUIREMENT
594 and requirementsmod.SHARESAFE_REQUIREMENT
595 not in _readrequires(sharedvfs, True)
595 not in _readrequires(sharedvfs, True)
596 ):
596 ):
597 mismatch_warn = ui.configbool(
597 mismatch_warn = ui.configbool(
598 b'share', b'safe-mismatch.source-not-safe.warn'
598 b'share', b'safe-mismatch.source-not-safe.warn'
599 )
599 )
600 mismatch_config = ui.config(
600 mismatch_config = ui.config(
601 b'share', b'safe-mismatch.source-not-safe'
601 b'share', b'safe-mismatch.source-not-safe'
602 )
602 )
603 if mismatch_config in (
603 if mismatch_config in (
604 b'downgrade-allow',
604 b'downgrade-allow',
605 b'allow',
605 b'allow',
606 b'downgrade-abort',
606 b'downgrade-abort',
607 ):
607 ):
608 # prevent cyclic import localrepo -> upgrade -> localrepo
608 # prevent cyclic import localrepo -> upgrade -> localrepo
609 from . import upgrade
609 from . import upgrade
610
610
611 upgrade.downgrade_share_to_non_safe(
611 upgrade.downgrade_share_to_non_safe(
612 ui,
612 ui,
613 hgvfs,
613 hgvfs,
614 sharedvfs,
614 sharedvfs,
615 requirements,
615 requirements,
616 mismatch_config,
616 mismatch_config,
617 mismatch_warn,
617 mismatch_warn,
618 )
618 )
619 elif mismatch_config == b'abort':
619 elif mismatch_config == b'abort':
620 raise error.Abort(
620 raise error.Abort(
621 _(b"share source does not support share-safe requirement"),
621 _(b"share source does not support share-safe requirement"),
622 hint=hint,
622 hint=hint,
623 )
623 )
624 else:
624 else:
625 raise error.Abort(
625 raise error.Abort(
626 _(
626 _(
627 b"share-safe mismatch with source.\nUnrecognized"
627 b"share-safe mismatch with source.\nUnrecognized"
628 b" value '%s' of `share.safe-mismatch.source-not-safe`"
628 b" value '%s' of `share.safe-mismatch.source-not-safe`"
629 b" set."
629 b" set."
630 )
630 )
631 % mismatch_config,
631 % mismatch_config,
632 hint=hint,
632 hint=hint,
633 )
633 )
634 else:
634 else:
635 requirements |= _readrequires(storevfs, False)
635 requirements |= _readrequires(storevfs, False)
636 elif shared:
636 elif shared:
637 sourcerequires = _readrequires(sharedvfs, False)
637 sourcerequires = _readrequires(sharedvfs, False)
638 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
638 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
639 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
639 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
640 mismatch_warn = ui.configbool(
640 mismatch_warn = ui.configbool(
641 b'share', b'safe-mismatch.source-safe.warn'
641 b'share', b'safe-mismatch.source-safe.warn'
642 )
642 )
643 if mismatch_config in (
643 if mismatch_config in (
644 b'upgrade-allow',
644 b'upgrade-allow',
645 b'allow',
645 b'allow',
646 b'upgrade-abort',
646 b'upgrade-abort',
647 ):
647 ):
648 # prevent cyclic import localrepo -> upgrade -> localrepo
648 # prevent cyclic import localrepo -> upgrade -> localrepo
649 from . import upgrade
649 from . import upgrade
650
650
651 upgrade.upgrade_share_to_safe(
651 upgrade.upgrade_share_to_safe(
652 ui,
652 ui,
653 hgvfs,
653 hgvfs,
654 storevfs,
654 storevfs,
655 requirements,
655 requirements,
656 mismatch_config,
656 mismatch_config,
657 mismatch_warn,
657 mismatch_warn,
658 )
658 )
659 elif mismatch_config == b'abort':
659 elif mismatch_config == b'abort':
660 raise error.Abort(
660 raise error.Abort(
661 _(
661 _(
662 b'version mismatch: source uses share-safe'
662 b'version mismatch: source uses share-safe'
663 b' functionality while the current share does not'
663 b' functionality while the current share does not'
664 ),
664 ),
665 hint=hint,
665 hint=hint,
666 )
666 )
667 else:
667 else:
668 raise error.Abort(
668 raise error.Abort(
669 _(
669 _(
670 b"share-safe mismatch with source.\nUnrecognized"
670 b"share-safe mismatch with source.\nUnrecognized"
671 b" value '%s' of `share.safe-mismatch.source-safe` set."
671 b" value '%s' of `share.safe-mismatch.source-safe` set."
672 )
672 )
673 % mismatch_config,
673 % mismatch_config,
674 hint=hint,
674 hint=hint,
675 )
675 )
676
676
677 # The .hg/hgrc file may load extensions or contain config options
677 # The .hg/hgrc file may load extensions or contain config options
678 # that influence repository construction. Attempt to load it and
678 # that influence repository construction. Attempt to load it and
679 # process any new extensions that it may have pulled in.
679 # process any new extensions that it may have pulled in.
680 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
680 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
681 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
681 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
682 extensions.loadall(ui)
682 extensions.loadall(ui)
683 extensions.populateui(ui)
683 extensions.populateui(ui)
684
684
685 # Set of module names of extensions loaded for this repository.
685 # Set of module names of extensions loaded for this repository.
686 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
686 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
687
687
688 supportedrequirements = gathersupportedrequirements(ui)
688 supportedrequirements = gathersupportedrequirements(ui)
689
689
690 # We first validate the requirements are known.
690 # We first validate the requirements are known.
691 ensurerequirementsrecognized(requirements, supportedrequirements)
691 ensurerequirementsrecognized(requirements, supportedrequirements)
692
692
693 # Then we validate that the known set is reasonable to use together.
693 # Then we validate that the known set is reasonable to use together.
694 ensurerequirementscompatible(ui, requirements)
694 ensurerequirementscompatible(ui, requirements)
695
695
696 # TODO there are unhandled edge cases related to opening repositories with
696 # TODO there are unhandled edge cases related to opening repositories with
697 # shared storage. If storage is shared, we should also test for requirements
697 # shared storage. If storage is shared, we should also test for requirements
698 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
698 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
699 # that repo, as that repo may load extensions needed to open it. This is a
699 # that repo, as that repo may load extensions needed to open it. This is a
700 # bit complicated because we don't want the other hgrc to overwrite settings
700 # bit complicated because we don't want the other hgrc to overwrite settings
701 # in this hgrc.
701 # in this hgrc.
702 #
702 #
703 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
703 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
704 # file when sharing repos. But if a requirement is added after the share is
704 # file when sharing repos. But if a requirement is added after the share is
705 # performed, thereby introducing a new requirement for the opener, we may
705 # performed, thereby introducing a new requirement for the opener, we may
706 # will not see that and could encounter a run-time error interacting with
706 # will not see that and could encounter a run-time error interacting with
707 # that shared store since it has an unknown-to-us requirement.
707 # that shared store since it has an unknown-to-us requirement.
708
708
709 # At this point, we know we should be capable of opening the repository.
709 # At this point, we know we should be capable of opening the repository.
710 # Now get on with doing that.
710 # Now get on with doing that.
711
711
712 features = set()
712 features = set()
713
713
714 # The "store" part of the repository holds versioned data. How it is
714 # The "store" part of the repository holds versioned data. How it is
715 # accessed is determined by various requirements. If `shared` or
715 # accessed is determined by various requirements. If `shared` or
716 # `relshared` requirements are present, this indicates current repository
716 # `relshared` requirements are present, this indicates current repository
717 # is a share and store exists in path mentioned in `.hg/sharedpath`
717 # is a share and store exists in path mentioned in `.hg/sharedpath`
718 if shared:
718 if shared:
719 storebasepath = sharedvfs.base
719 storebasepath = sharedvfs.base
720 cachepath = sharedvfs.join(b'cache')
720 cachepath = sharedvfs.join(b'cache')
721 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
721 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
722 else:
722 else:
723 storebasepath = hgvfs.base
723 storebasepath = hgvfs.base
724 cachepath = hgvfs.join(b'cache')
724 cachepath = hgvfs.join(b'cache')
725 wcachepath = hgvfs.join(b'wcache')
725 wcachepath = hgvfs.join(b'wcache')
726
726
727 # The store has changed over time and the exact layout is dictated by
727 # The store has changed over time and the exact layout is dictated by
728 # requirements. The store interface abstracts differences across all
728 # requirements. The store interface abstracts differences across all
729 # of them.
729 # of them.
730 store = makestore(
730 store = makestore(
731 requirements,
731 requirements,
732 storebasepath,
732 storebasepath,
733 lambda base: vfsmod.vfs(base, cacheaudited=True),
733 lambda base: vfsmod.vfs(base, cacheaudited=True),
734 )
734 )
735 hgvfs.createmode = store.createmode
735 hgvfs.createmode = store.createmode
736
736
737 storevfs = store.vfs
737 storevfs = store.vfs
738 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
738 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
739
739
740 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
740 if (
741 requirementsmod.REVLOGV2_REQUIREMENT in requirements
742 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
743 ):
741 features.add(repository.REPO_FEATURE_SIDE_DATA)
744 features.add(repository.REPO_FEATURE_SIDE_DATA)
742 # the revlogv2 docket introduced race condition that we need to fix
745 # the revlogv2 docket introduced race condition that we need to fix
743 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
746 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
744
747
745 # The cache vfs is used to manage cache files.
748 # The cache vfs is used to manage cache files.
746 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
749 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
747 cachevfs.createmode = store.createmode
750 cachevfs.createmode = store.createmode
748 # The cache vfs is used to manage cache files related to the working copy
751 # The cache vfs is used to manage cache files related to the working copy
749 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
752 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
750 wcachevfs.createmode = store.createmode
753 wcachevfs.createmode = store.createmode
751
754
752 # Now resolve the type for the repository object. We do this by repeatedly
755 # Now resolve the type for the repository object. We do this by repeatedly
753 # calling a factory function to produces types for specific aspects of the
756 # calling a factory function to produces types for specific aspects of the
754 # repo's operation. The aggregate returned types are used as base classes
757 # repo's operation. The aggregate returned types are used as base classes
755 # for a dynamically-derived type, which will represent our new repository.
758 # for a dynamically-derived type, which will represent our new repository.
756
759
757 bases = []
760 bases = []
758 extrastate = {}
761 extrastate = {}
759
762
760 for iface, fn in REPO_INTERFACES:
763 for iface, fn in REPO_INTERFACES:
761 # We pass all potentially useful state to give extensions tons of
764 # We pass all potentially useful state to give extensions tons of
762 # flexibility.
765 # flexibility.
763 typ = fn()(
766 typ = fn()(
764 ui=ui,
767 ui=ui,
765 intents=intents,
768 intents=intents,
766 requirements=requirements,
769 requirements=requirements,
767 features=features,
770 features=features,
768 wdirvfs=wdirvfs,
771 wdirvfs=wdirvfs,
769 hgvfs=hgvfs,
772 hgvfs=hgvfs,
770 store=store,
773 store=store,
771 storevfs=storevfs,
774 storevfs=storevfs,
772 storeoptions=storevfs.options,
775 storeoptions=storevfs.options,
773 cachevfs=cachevfs,
776 cachevfs=cachevfs,
774 wcachevfs=wcachevfs,
777 wcachevfs=wcachevfs,
775 extensionmodulenames=extensionmodulenames,
778 extensionmodulenames=extensionmodulenames,
776 extrastate=extrastate,
779 extrastate=extrastate,
777 baseclasses=bases,
780 baseclasses=bases,
778 )
781 )
779
782
780 if not isinstance(typ, type):
783 if not isinstance(typ, type):
781 raise error.ProgrammingError(
784 raise error.ProgrammingError(
782 b'unable to construct type for %s' % iface
785 b'unable to construct type for %s' % iface
783 )
786 )
784
787
785 bases.append(typ)
788 bases.append(typ)
786
789
787 # type() allows you to use characters in type names that wouldn't be
790 # type() allows you to use characters in type names that wouldn't be
788 # recognized as Python symbols in source code. We abuse that to add
791 # recognized as Python symbols in source code. We abuse that to add
789 # rich information about our constructed repo.
792 # rich information about our constructed repo.
790 name = pycompat.sysstr(
793 name = pycompat.sysstr(
791 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
794 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
792 )
795 )
793
796
794 cls = type(name, tuple(bases), {})
797 cls = type(name, tuple(bases), {})
795
798
796 return cls(
799 return cls(
797 baseui=baseui,
800 baseui=baseui,
798 ui=ui,
801 ui=ui,
799 origroot=path,
802 origroot=path,
800 wdirvfs=wdirvfs,
803 wdirvfs=wdirvfs,
801 hgvfs=hgvfs,
804 hgvfs=hgvfs,
802 requirements=requirements,
805 requirements=requirements,
803 supportedrequirements=supportedrequirements,
806 supportedrequirements=supportedrequirements,
804 sharedpath=storebasepath,
807 sharedpath=storebasepath,
805 store=store,
808 store=store,
806 cachevfs=cachevfs,
809 cachevfs=cachevfs,
807 wcachevfs=wcachevfs,
810 wcachevfs=wcachevfs,
808 features=features,
811 features=features,
809 intents=intents,
812 intents=intents,
810 )
813 )
811
814
812
815
813 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
816 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
814 """Load hgrc files/content into a ui instance.
817 """Load hgrc files/content into a ui instance.
815
818
816 This is called during repository opening to load any additional
819 This is called during repository opening to load any additional
817 config files or settings relevant to the current repository.
820 config files or settings relevant to the current repository.
818
821
819 Returns a bool indicating whether any additional configs were loaded.
822 Returns a bool indicating whether any additional configs were loaded.
820
823
821 Extensions should monkeypatch this function to modify how per-repo
824 Extensions should monkeypatch this function to modify how per-repo
822 configs are loaded. For example, an extension may wish to pull in
825 configs are loaded. For example, an extension may wish to pull in
823 configs from alternate files or sources.
826 configs from alternate files or sources.
824
827
825 sharedvfs is vfs object pointing to source repo if the current one is a
828 sharedvfs is vfs object pointing to source repo if the current one is a
826 shared one
829 shared one
827 """
830 """
828 if not rcutil.use_repo_hgrc():
831 if not rcutil.use_repo_hgrc():
829 return False
832 return False
830
833
831 ret = False
834 ret = False
832 # first load config from shared source if we has to
835 # first load config from shared source if we has to
833 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
836 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
834 try:
837 try:
835 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
838 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
836 ret = True
839 ret = True
837 except IOError:
840 except IOError:
838 pass
841 pass
839
842
840 try:
843 try:
841 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
844 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
842 ret = True
845 ret = True
843 except IOError:
846 except IOError:
844 pass
847 pass
845
848
846 try:
849 try:
847 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
850 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
848 ret = True
851 ret = True
849 except IOError:
852 except IOError:
850 pass
853 pass
851
854
852 return ret
855 return ret
853
856
854
857
855 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
858 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
856 """Perform additional actions after .hg/hgrc is loaded.
859 """Perform additional actions after .hg/hgrc is loaded.
857
860
858 This function is called during repository loading immediately after
861 This function is called during repository loading immediately after
859 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
862 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
860
863
861 The function can be used to validate configs, automatically add
864 The function can be used to validate configs, automatically add
862 options (including extensions) based on requirements, etc.
865 options (including extensions) based on requirements, etc.
863 """
866 """
864
867
865 # Map of requirements to list of extensions to load automatically when
868 # Map of requirements to list of extensions to load automatically when
866 # requirement is present.
869 # requirement is present.
867 autoextensions = {
870 autoextensions = {
868 b'git': [b'git'],
871 b'git': [b'git'],
869 b'largefiles': [b'largefiles'],
872 b'largefiles': [b'largefiles'],
870 b'lfs': [b'lfs'],
873 b'lfs': [b'lfs'],
871 }
874 }
872
875
873 for requirement, names in sorted(autoextensions.items()):
876 for requirement, names in sorted(autoextensions.items()):
874 if requirement not in requirements:
877 if requirement not in requirements:
875 continue
878 continue
876
879
877 for name in names:
880 for name in names:
878 if not ui.hasconfig(b'extensions', name):
881 if not ui.hasconfig(b'extensions', name):
879 ui.setconfig(b'extensions', name, b'', source=b'autoload')
882 ui.setconfig(b'extensions', name, b'', source=b'autoload')
880
883
881
884
882 def gathersupportedrequirements(ui):
885 def gathersupportedrequirements(ui):
883 """Determine the complete set of recognized requirements."""
886 """Determine the complete set of recognized requirements."""
884 # Start with all requirements supported by this file.
887 # Start with all requirements supported by this file.
885 supported = set(localrepository._basesupported)
888 supported = set(localrepository._basesupported)
886
889
887 # Execute ``featuresetupfuncs`` entries if they belong to an extension
890 # Execute ``featuresetupfuncs`` entries if they belong to an extension
888 # relevant to this ui instance.
891 # relevant to this ui instance.
889 modules = {m.__name__ for n, m in extensions.extensions(ui)}
892 modules = {m.__name__ for n, m in extensions.extensions(ui)}
890
893
891 for fn in featuresetupfuncs:
894 for fn in featuresetupfuncs:
892 if fn.__module__ in modules:
895 if fn.__module__ in modules:
893 fn(ui, supported)
896 fn(ui, supported)
894
897
895 # Add derived requirements from registered compression engines.
898 # Add derived requirements from registered compression engines.
896 for name in util.compengines:
899 for name in util.compengines:
897 engine = util.compengines[name]
900 engine = util.compengines[name]
898 if engine.available() and engine.revlogheader():
901 if engine.available() and engine.revlogheader():
899 supported.add(b'exp-compression-%s' % name)
902 supported.add(b'exp-compression-%s' % name)
900 if engine.name() == b'zstd':
903 if engine.name() == b'zstd':
901 supported.add(b'revlog-compression-zstd')
904 supported.add(b'revlog-compression-zstd')
902
905
903 return supported
906 return supported
904
907
905
908
906 def ensurerequirementsrecognized(requirements, supported):
909 def ensurerequirementsrecognized(requirements, supported):
907 """Validate that a set of local requirements is recognized.
910 """Validate that a set of local requirements is recognized.
908
911
909 Receives a set of requirements. Raises an ``error.RepoError`` if there
912 Receives a set of requirements. Raises an ``error.RepoError`` if there
910 exists any requirement in that set that currently loaded code doesn't
913 exists any requirement in that set that currently loaded code doesn't
911 recognize.
914 recognize.
912
915
913 Returns a set of supported requirements.
916 Returns a set of supported requirements.
914 """
917 """
915 missing = set()
918 missing = set()
916
919
917 for requirement in requirements:
920 for requirement in requirements:
918 if requirement in supported:
921 if requirement in supported:
919 continue
922 continue
920
923
921 if not requirement or not requirement[0:1].isalnum():
924 if not requirement or not requirement[0:1].isalnum():
922 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
925 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
923
926
924 missing.add(requirement)
927 missing.add(requirement)
925
928
926 if missing:
929 if missing:
927 raise error.RequirementError(
930 raise error.RequirementError(
928 _(b'repository requires features unknown to this Mercurial: %s')
931 _(b'repository requires features unknown to this Mercurial: %s')
929 % b' '.join(sorted(missing)),
932 % b' '.join(sorted(missing)),
930 hint=_(
933 hint=_(
931 b'see https://mercurial-scm.org/wiki/MissingRequirement '
934 b'see https://mercurial-scm.org/wiki/MissingRequirement '
932 b'for more information'
935 b'for more information'
933 ),
936 ),
934 )
937 )
935
938
936
939
937 def ensurerequirementscompatible(ui, requirements):
940 def ensurerequirementscompatible(ui, requirements):
938 """Validates that a set of recognized requirements is mutually compatible.
941 """Validates that a set of recognized requirements is mutually compatible.
939
942
940 Some requirements may not be compatible with others or require
943 Some requirements may not be compatible with others or require
941 config options that aren't enabled. This function is called during
944 config options that aren't enabled. This function is called during
942 repository opening to ensure that the set of requirements needed
945 repository opening to ensure that the set of requirements needed
943 to open a repository is sane and compatible with config options.
946 to open a repository is sane and compatible with config options.
944
947
945 Extensions can monkeypatch this function to perform additional
948 Extensions can monkeypatch this function to perform additional
946 checking.
949 checking.
947
950
948 ``error.RepoError`` should be raised on failure.
951 ``error.RepoError`` should be raised on failure.
949 """
952 """
950 if (
953 if (
951 requirementsmod.SPARSE_REQUIREMENT in requirements
954 requirementsmod.SPARSE_REQUIREMENT in requirements
952 and not sparse.enabled
955 and not sparse.enabled
953 ):
956 ):
954 raise error.RepoError(
957 raise error.RepoError(
955 _(
958 _(
956 b'repository is using sparse feature but '
959 b'repository is using sparse feature but '
957 b'sparse is not enabled; enable the '
960 b'sparse is not enabled; enable the '
958 b'"sparse" extensions to access'
961 b'"sparse" extensions to access'
959 )
962 )
960 )
963 )
961
964
962
965
963 def makestore(requirements, path, vfstype):
966 def makestore(requirements, path, vfstype):
964 """Construct a storage object for a repository."""
967 """Construct a storage object for a repository."""
965 if requirementsmod.STORE_REQUIREMENT in requirements:
968 if requirementsmod.STORE_REQUIREMENT in requirements:
966 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
969 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
967 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
970 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
968 return storemod.fncachestore(path, vfstype, dotencode)
971 return storemod.fncachestore(path, vfstype, dotencode)
969
972
970 return storemod.encodedstore(path, vfstype)
973 return storemod.encodedstore(path, vfstype)
971
974
972 return storemod.basicstore(path, vfstype)
975 return storemod.basicstore(path, vfstype)
973
976
974
977
975 def resolvestorevfsoptions(ui, requirements, features):
978 def resolvestorevfsoptions(ui, requirements, features):
976 """Resolve the options to pass to the store vfs opener.
979 """Resolve the options to pass to the store vfs opener.
977
980
978 The returned dict is used to influence behavior of the storage layer.
981 The returned dict is used to influence behavior of the storage layer.
979 """
982 """
980 options = {}
983 options = {}
981
984
982 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
985 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
983 options[b'treemanifest'] = True
986 options[b'treemanifest'] = True
984
987
985 # experimental config: format.manifestcachesize
988 # experimental config: format.manifestcachesize
986 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
989 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
987 if manifestcachesize is not None:
990 if manifestcachesize is not None:
988 options[b'manifestcachesize'] = manifestcachesize
991 options[b'manifestcachesize'] = manifestcachesize
989
992
990 # In the absence of another requirement superseding a revlog-related
993 # In the absence of another requirement superseding a revlog-related
991 # requirement, we have to assume the repo is using revlog version 0.
994 # requirement, we have to assume the repo is using revlog version 0.
992 # This revlog format is super old and we don't bother trying to parse
995 # This revlog format is super old and we don't bother trying to parse
993 # opener options for it because those options wouldn't do anything
996 # opener options for it because those options wouldn't do anything
994 # meaningful on such old repos.
997 # meaningful on such old repos.
995 if (
998 if (
996 requirementsmod.REVLOGV1_REQUIREMENT in requirements
999 requirementsmod.REVLOGV1_REQUIREMENT in requirements
997 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1000 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
998 ):
1001 ):
999 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1002 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1000 else: # explicitly mark repo as using revlogv0
1003 else: # explicitly mark repo as using revlogv0
1001 options[b'revlogv0'] = True
1004 options[b'revlogv0'] = True
1002
1005
1003 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1006 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1004 options[b'copies-storage'] = b'changeset-sidedata'
1007 options[b'copies-storage'] = b'changeset-sidedata'
1005 else:
1008 else:
1006 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1009 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1007 copiesextramode = (b'changeset-only', b'compatibility')
1010 copiesextramode = (b'changeset-only', b'compatibility')
1008 if writecopiesto in copiesextramode:
1011 if writecopiesto in copiesextramode:
1009 options[b'copies-storage'] = b'extra'
1012 options[b'copies-storage'] = b'extra'
1010
1013
1011 return options
1014 return options
1012
1015
1013
1016
1014 def resolverevlogstorevfsoptions(ui, requirements, features):
1017 def resolverevlogstorevfsoptions(ui, requirements, features):
1015 """Resolve opener options specific to revlogs."""
1018 """Resolve opener options specific to revlogs."""
1016
1019
1017 options = {}
1020 options = {}
1018 options[b'flagprocessors'] = {}
1021 options[b'flagprocessors'] = {}
1019
1022
1020 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1023 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1021 options[b'revlogv1'] = True
1024 options[b'revlogv1'] = True
1022 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1025 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1023 options[b'revlogv2'] = True
1026 options[b'revlogv2'] = True
1027 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1028 options[b'changelogv2'] = True
1024
1029
1025 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1030 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1026 options[b'generaldelta'] = True
1031 options[b'generaldelta'] = True
1027
1032
1028 # experimental config: format.chunkcachesize
1033 # experimental config: format.chunkcachesize
1029 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1034 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1030 if chunkcachesize is not None:
1035 if chunkcachesize is not None:
1031 options[b'chunkcachesize'] = chunkcachesize
1036 options[b'chunkcachesize'] = chunkcachesize
1032
1037
1033 deltabothparents = ui.configbool(
1038 deltabothparents = ui.configbool(
1034 b'storage', b'revlog.optimize-delta-parent-choice'
1039 b'storage', b'revlog.optimize-delta-parent-choice'
1035 )
1040 )
1036 options[b'deltabothparents'] = deltabothparents
1041 options[b'deltabothparents'] = deltabothparents
1037
1042
1038 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1043 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1039 lazydeltabase = False
1044 lazydeltabase = False
1040 if lazydelta:
1045 if lazydelta:
1041 lazydeltabase = ui.configbool(
1046 lazydeltabase = ui.configbool(
1042 b'storage', b'revlog.reuse-external-delta-parent'
1047 b'storage', b'revlog.reuse-external-delta-parent'
1043 )
1048 )
1044 if lazydeltabase is None:
1049 if lazydeltabase is None:
1045 lazydeltabase = not scmutil.gddeltaconfig(ui)
1050 lazydeltabase = not scmutil.gddeltaconfig(ui)
1046 options[b'lazydelta'] = lazydelta
1051 options[b'lazydelta'] = lazydelta
1047 options[b'lazydeltabase'] = lazydeltabase
1052 options[b'lazydeltabase'] = lazydeltabase
1048
1053
1049 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1054 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1050 if 0 <= chainspan:
1055 if 0 <= chainspan:
1051 options[b'maxdeltachainspan'] = chainspan
1056 options[b'maxdeltachainspan'] = chainspan
1052
1057
1053 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1058 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1054 if mmapindexthreshold is not None:
1059 if mmapindexthreshold is not None:
1055 options[b'mmapindexthreshold'] = mmapindexthreshold
1060 options[b'mmapindexthreshold'] = mmapindexthreshold
1056
1061
1057 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1062 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1058 srdensitythres = float(
1063 srdensitythres = float(
1059 ui.config(b'experimental', b'sparse-read.density-threshold')
1064 ui.config(b'experimental', b'sparse-read.density-threshold')
1060 )
1065 )
1061 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1066 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1062 options[b'with-sparse-read'] = withsparseread
1067 options[b'with-sparse-read'] = withsparseread
1063 options[b'sparse-read-density-threshold'] = srdensitythres
1068 options[b'sparse-read-density-threshold'] = srdensitythres
1064 options[b'sparse-read-min-gap-size'] = srmingapsize
1069 options[b'sparse-read-min-gap-size'] = srmingapsize
1065
1070
1066 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1071 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1067 options[b'sparse-revlog'] = sparserevlog
1072 options[b'sparse-revlog'] = sparserevlog
1068 if sparserevlog:
1073 if sparserevlog:
1069 options[b'generaldelta'] = True
1074 options[b'generaldelta'] = True
1070
1075
1071 maxchainlen = None
1076 maxchainlen = None
1072 if sparserevlog:
1077 if sparserevlog:
1073 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1078 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1074 # experimental config: format.maxchainlen
1079 # experimental config: format.maxchainlen
1075 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1080 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1076 if maxchainlen is not None:
1081 if maxchainlen is not None:
1077 options[b'maxchainlen'] = maxchainlen
1082 options[b'maxchainlen'] = maxchainlen
1078
1083
1079 for r in requirements:
1084 for r in requirements:
1080 # we allow multiple compression engine requirement to co-exist because
1085 # we allow multiple compression engine requirement to co-exist because
1081 # strickly speaking, revlog seems to support mixed compression style.
1086 # strickly speaking, revlog seems to support mixed compression style.
1082 #
1087 #
1083 # The compression used for new entries will be "the last one"
1088 # The compression used for new entries will be "the last one"
1084 prefix = r.startswith
1089 prefix = r.startswith
1085 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1090 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1086 options[b'compengine'] = r.split(b'-', 2)[2]
1091 options[b'compengine'] = r.split(b'-', 2)[2]
1087
1092
1088 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1093 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1089 if options[b'zlib.level'] is not None:
1094 if options[b'zlib.level'] is not None:
1090 if not (0 <= options[b'zlib.level'] <= 9):
1095 if not (0 <= options[b'zlib.level'] <= 9):
1091 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1096 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1092 raise error.Abort(msg % options[b'zlib.level'])
1097 raise error.Abort(msg % options[b'zlib.level'])
1093 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1098 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1094 if options[b'zstd.level'] is not None:
1099 if options[b'zstd.level'] is not None:
1095 if not (0 <= options[b'zstd.level'] <= 22):
1100 if not (0 <= options[b'zstd.level'] <= 22):
1096 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1101 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1097 raise error.Abort(msg % options[b'zstd.level'])
1102 raise error.Abort(msg % options[b'zstd.level'])
1098
1103
1099 if requirementsmod.NARROW_REQUIREMENT in requirements:
1104 if requirementsmod.NARROW_REQUIREMENT in requirements:
1100 options[b'enableellipsis'] = True
1105 options[b'enableellipsis'] = True
1101
1106
1102 if ui.configbool(b'experimental', b'rust.index'):
1107 if ui.configbool(b'experimental', b'rust.index'):
1103 options[b'rust.index'] = True
1108 options[b'rust.index'] = True
1104 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1109 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1105 slow_path = ui.config(
1110 slow_path = ui.config(
1106 b'storage', b'revlog.persistent-nodemap.slow-path'
1111 b'storage', b'revlog.persistent-nodemap.slow-path'
1107 )
1112 )
1108 if slow_path not in (b'allow', b'warn', b'abort'):
1113 if slow_path not in (b'allow', b'warn', b'abort'):
1109 default = ui.config_default(
1114 default = ui.config_default(
1110 b'storage', b'revlog.persistent-nodemap.slow-path'
1115 b'storage', b'revlog.persistent-nodemap.slow-path'
1111 )
1116 )
1112 msg = _(
1117 msg = _(
1113 b'unknown value for config '
1118 b'unknown value for config '
1114 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1119 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1115 )
1120 )
1116 ui.warn(msg % slow_path)
1121 ui.warn(msg % slow_path)
1117 if not ui.quiet:
1122 if not ui.quiet:
1118 ui.warn(_(b'falling back to default value: %s\n') % default)
1123 ui.warn(_(b'falling back to default value: %s\n') % default)
1119 slow_path = default
1124 slow_path = default
1120
1125
1121 msg = _(
1126 msg = _(
1122 b"accessing `persistent-nodemap` repository without associated "
1127 b"accessing `persistent-nodemap` repository without associated "
1123 b"fast implementation."
1128 b"fast implementation."
1124 )
1129 )
1125 hint = _(
1130 hint = _(
1126 b"check `hg help config.format.use-persistent-nodemap` "
1131 b"check `hg help config.format.use-persistent-nodemap` "
1127 b"for details"
1132 b"for details"
1128 )
1133 )
1129 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1134 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1130 if slow_path == b'warn':
1135 if slow_path == b'warn':
1131 msg = b"warning: " + msg + b'\n'
1136 msg = b"warning: " + msg + b'\n'
1132 ui.warn(msg)
1137 ui.warn(msg)
1133 if not ui.quiet:
1138 if not ui.quiet:
1134 hint = b'(' + hint + b')\n'
1139 hint = b'(' + hint + b')\n'
1135 ui.warn(hint)
1140 ui.warn(hint)
1136 if slow_path == b'abort':
1141 if slow_path == b'abort':
1137 raise error.Abort(msg, hint=hint)
1142 raise error.Abort(msg, hint=hint)
1138 options[b'persistent-nodemap'] = True
1143 options[b'persistent-nodemap'] = True
1139 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1144 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1140 options[b'persistent-nodemap.mmap'] = True
1145 options[b'persistent-nodemap.mmap'] = True
1141 if ui.configbool(b'devel', b'persistent-nodemap'):
1146 if ui.configbool(b'devel', b'persistent-nodemap'):
1142 options[b'devel-force-nodemap'] = True
1147 options[b'devel-force-nodemap'] = True
1143
1148
1144 return options
1149 return options
1145
1150
1146
1151
1147 def makemain(**kwargs):
1152 def makemain(**kwargs):
1148 """Produce a type conforming to ``ilocalrepositorymain``."""
1153 """Produce a type conforming to ``ilocalrepositorymain``."""
1149 return localrepository
1154 return localrepository
1150
1155
1151
1156
1152 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1157 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1153 class revlogfilestorage(object):
1158 class revlogfilestorage(object):
1154 """File storage when using revlogs."""
1159 """File storage when using revlogs."""
1155
1160
1156 def file(self, path):
1161 def file(self, path):
1157 if path.startswith(b'/'):
1162 if path.startswith(b'/'):
1158 path = path[1:]
1163 path = path[1:]
1159
1164
1160 return filelog.filelog(self.svfs, path)
1165 return filelog.filelog(self.svfs, path)
1161
1166
1162
1167
1163 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1168 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1164 class revlognarrowfilestorage(object):
1169 class revlognarrowfilestorage(object):
1165 """File storage when using revlogs and narrow files."""
1170 """File storage when using revlogs and narrow files."""
1166
1171
1167 def file(self, path):
1172 def file(self, path):
1168 if path.startswith(b'/'):
1173 if path.startswith(b'/'):
1169 path = path[1:]
1174 path = path[1:]
1170
1175
1171 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1176 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1172
1177
1173
1178
1174 def makefilestorage(requirements, features, **kwargs):
1179 def makefilestorage(requirements, features, **kwargs):
1175 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1180 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1176 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1181 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1177 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1182 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1178
1183
1179 if requirementsmod.NARROW_REQUIREMENT in requirements:
1184 if requirementsmod.NARROW_REQUIREMENT in requirements:
1180 return revlognarrowfilestorage
1185 return revlognarrowfilestorage
1181 else:
1186 else:
1182 return revlogfilestorage
1187 return revlogfilestorage
1183
1188
1184
1189
1185 # List of repository interfaces and factory functions for them. Each
1190 # List of repository interfaces and factory functions for them. Each
1186 # will be called in order during ``makelocalrepository()`` to iteratively
1191 # will be called in order during ``makelocalrepository()`` to iteratively
1187 # derive the final type for a local repository instance. We capture the
1192 # derive the final type for a local repository instance. We capture the
1188 # function as a lambda so we don't hold a reference and the module-level
1193 # function as a lambda so we don't hold a reference and the module-level
1189 # functions can be wrapped.
1194 # functions can be wrapped.
1190 REPO_INTERFACES = [
1195 REPO_INTERFACES = [
1191 (repository.ilocalrepositorymain, lambda: makemain),
1196 (repository.ilocalrepositorymain, lambda: makemain),
1192 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1197 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1193 ]
1198 ]
1194
1199
1195
1200
1196 @interfaceutil.implementer(repository.ilocalrepositorymain)
1201 @interfaceutil.implementer(repository.ilocalrepositorymain)
1197 class localrepository(object):
1202 class localrepository(object):
1198 """Main class for representing local repositories.
1203 """Main class for representing local repositories.
1199
1204
1200 All local repositories are instances of this class.
1205 All local repositories are instances of this class.
1201
1206
1202 Constructed on its own, instances of this class are not usable as
1207 Constructed on its own, instances of this class are not usable as
1203 repository objects. To obtain a usable repository object, call
1208 repository objects. To obtain a usable repository object, call
1204 ``hg.repository()``, ``localrepo.instance()``, or
1209 ``hg.repository()``, ``localrepo.instance()``, or
1205 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1210 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1206 ``instance()`` adds support for creating new repositories.
1211 ``instance()`` adds support for creating new repositories.
1207 ``hg.repository()`` adds more extension integration, including calling
1212 ``hg.repository()`` adds more extension integration, including calling
1208 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1213 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1209 used.
1214 used.
1210 """
1215 """
1211
1216
1212 # obsolete experimental requirements:
1217 # obsolete experimental requirements:
1213 # - manifestv2: An experimental new manifest format that allowed
1218 # - manifestv2: An experimental new manifest format that allowed
1214 # for stem compression of long paths. Experiment ended up not
1219 # for stem compression of long paths. Experiment ended up not
1215 # being successful (repository sizes went up due to worse delta
1220 # being successful (repository sizes went up due to worse delta
1216 # chains), and the code was deleted in 4.6.
1221 # chains), and the code was deleted in 4.6.
1217 supportedformats = {
1222 supportedformats = {
1218 requirementsmod.REVLOGV1_REQUIREMENT,
1223 requirementsmod.REVLOGV1_REQUIREMENT,
1219 requirementsmod.GENERALDELTA_REQUIREMENT,
1224 requirementsmod.GENERALDELTA_REQUIREMENT,
1220 requirementsmod.TREEMANIFEST_REQUIREMENT,
1225 requirementsmod.TREEMANIFEST_REQUIREMENT,
1221 requirementsmod.COPIESSDC_REQUIREMENT,
1226 requirementsmod.COPIESSDC_REQUIREMENT,
1222 requirementsmod.REVLOGV2_REQUIREMENT,
1227 requirementsmod.REVLOGV2_REQUIREMENT,
1228 requirementsmod.CHANGELOGV2_REQUIREMENT,
1223 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1229 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1224 requirementsmod.NODEMAP_REQUIREMENT,
1230 requirementsmod.NODEMAP_REQUIREMENT,
1225 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1231 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1226 requirementsmod.SHARESAFE_REQUIREMENT,
1232 requirementsmod.SHARESAFE_REQUIREMENT,
1227 }
1233 }
1228 _basesupported = supportedformats | {
1234 _basesupported = supportedformats | {
1229 requirementsmod.STORE_REQUIREMENT,
1235 requirementsmod.STORE_REQUIREMENT,
1230 requirementsmod.FNCACHE_REQUIREMENT,
1236 requirementsmod.FNCACHE_REQUIREMENT,
1231 requirementsmod.SHARED_REQUIREMENT,
1237 requirementsmod.SHARED_REQUIREMENT,
1232 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1238 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1233 requirementsmod.DOTENCODE_REQUIREMENT,
1239 requirementsmod.DOTENCODE_REQUIREMENT,
1234 requirementsmod.SPARSE_REQUIREMENT,
1240 requirementsmod.SPARSE_REQUIREMENT,
1235 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1241 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1236 }
1242 }
1237
1243
1238 # list of prefix for file which can be written without 'wlock'
1244 # list of prefix for file which can be written without 'wlock'
1239 # Extensions should extend this list when needed
1245 # Extensions should extend this list when needed
1240 _wlockfreeprefix = {
1246 _wlockfreeprefix = {
1241 # We migh consider requiring 'wlock' for the next
1247 # We migh consider requiring 'wlock' for the next
1242 # two, but pretty much all the existing code assume
1248 # two, but pretty much all the existing code assume
1243 # wlock is not needed so we keep them excluded for
1249 # wlock is not needed so we keep them excluded for
1244 # now.
1250 # now.
1245 b'hgrc',
1251 b'hgrc',
1246 b'requires',
1252 b'requires',
1247 # XXX cache is a complicatged business someone
1253 # XXX cache is a complicatged business someone
1248 # should investigate this in depth at some point
1254 # should investigate this in depth at some point
1249 b'cache/',
1255 b'cache/',
1250 # XXX shouldn't be dirstate covered by the wlock?
1256 # XXX shouldn't be dirstate covered by the wlock?
1251 b'dirstate',
1257 b'dirstate',
1252 # XXX bisect was still a bit too messy at the time
1258 # XXX bisect was still a bit too messy at the time
1253 # this changeset was introduced. Someone should fix
1259 # this changeset was introduced. Someone should fix
1254 # the remainig bit and drop this line
1260 # the remainig bit and drop this line
1255 b'bisect.state',
1261 b'bisect.state',
1256 }
1262 }
1257
1263
1258 def __init__(
1264 def __init__(
1259 self,
1265 self,
1260 baseui,
1266 baseui,
1261 ui,
1267 ui,
1262 origroot,
1268 origroot,
1263 wdirvfs,
1269 wdirvfs,
1264 hgvfs,
1270 hgvfs,
1265 requirements,
1271 requirements,
1266 supportedrequirements,
1272 supportedrequirements,
1267 sharedpath,
1273 sharedpath,
1268 store,
1274 store,
1269 cachevfs,
1275 cachevfs,
1270 wcachevfs,
1276 wcachevfs,
1271 features,
1277 features,
1272 intents=None,
1278 intents=None,
1273 ):
1279 ):
1274 """Create a new local repository instance.
1280 """Create a new local repository instance.
1275
1281
1276 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1282 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1277 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1283 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1278 object.
1284 object.
1279
1285
1280 Arguments:
1286 Arguments:
1281
1287
1282 baseui
1288 baseui
1283 ``ui.ui`` instance that ``ui`` argument was based off of.
1289 ``ui.ui`` instance that ``ui`` argument was based off of.
1284
1290
1285 ui
1291 ui
1286 ``ui.ui`` instance for use by the repository.
1292 ``ui.ui`` instance for use by the repository.
1287
1293
1288 origroot
1294 origroot
1289 ``bytes`` path to working directory root of this repository.
1295 ``bytes`` path to working directory root of this repository.
1290
1296
1291 wdirvfs
1297 wdirvfs
1292 ``vfs.vfs`` rooted at the working directory.
1298 ``vfs.vfs`` rooted at the working directory.
1293
1299
1294 hgvfs
1300 hgvfs
1295 ``vfs.vfs`` rooted at .hg/
1301 ``vfs.vfs`` rooted at .hg/
1296
1302
1297 requirements
1303 requirements
1298 ``set`` of bytestrings representing repository opening requirements.
1304 ``set`` of bytestrings representing repository opening requirements.
1299
1305
1300 supportedrequirements
1306 supportedrequirements
1301 ``set`` of bytestrings representing repository requirements that we
1307 ``set`` of bytestrings representing repository requirements that we
1302 know how to open. May be a supetset of ``requirements``.
1308 know how to open. May be a supetset of ``requirements``.
1303
1309
1304 sharedpath
1310 sharedpath
1305 ``bytes`` Defining path to storage base directory. Points to a
1311 ``bytes`` Defining path to storage base directory. Points to a
1306 ``.hg/`` directory somewhere.
1312 ``.hg/`` directory somewhere.
1307
1313
1308 store
1314 store
1309 ``store.basicstore`` (or derived) instance providing access to
1315 ``store.basicstore`` (or derived) instance providing access to
1310 versioned storage.
1316 versioned storage.
1311
1317
1312 cachevfs
1318 cachevfs
1313 ``vfs.vfs`` used for cache files.
1319 ``vfs.vfs`` used for cache files.
1314
1320
1315 wcachevfs
1321 wcachevfs
1316 ``vfs.vfs`` used for cache files related to the working copy.
1322 ``vfs.vfs`` used for cache files related to the working copy.
1317
1323
1318 features
1324 features
1319 ``set`` of bytestrings defining features/capabilities of this
1325 ``set`` of bytestrings defining features/capabilities of this
1320 instance.
1326 instance.
1321
1327
1322 intents
1328 intents
1323 ``set`` of system strings indicating what this repo will be used
1329 ``set`` of system strings indicating what this repo will be used
1324 for.
1330 for.
1325 """
1331 """
1326 self.baseui = baseui
1332 self.baseui = baseui
1327 self.ui = ui
1333 self.ui = ui
1328 self.origroot = origroot
1334 self.origroot = origroot
1329 # vfs rooted at working directory.
1335 # vfs rooted at working directory.
1330 self.wvfs = wdirvfs
1336 self.wvfs = wdirvfs
1331 self.root = wdirvfs.base
1337 self.root = wdirvfs.base
1332 # vfs rooted at .hg/. Used to access most non-store paths.
1338 # vfs rooted at .hg/. Used to access most non-store paths.
1333 self.vfs = hgvfs
1339 self.vfs = hgvfs
1334 self.path = hgvfs.base
1340 self.path = hgvfs.base
1335 self.requirements = requirements
1341 self.requirements = requirements
1336 self.nodeconstants = sha1nodeconstants
1342 self.nodeconstants = sha1nodeconstants
1337 self.nullid = self.nodeconstants.nullid
1343 self.nullid = self.nodeconstants.nullid
1338 self.supported = supportedrequirements
1344 self.supported = supportedrequirements
1339 self.sharedpath = sharedpath
1345 self.sharedpath = sharedpath
1340 self.store = store
1346 self.store = store
1341 self.cachevfs = cachevfs
1347 self.cachevfs = cachevfs
1342 self.wcachevfs = wcachevfs
1348 self.wcachevfs = wcachevfs
1343 self.features = features
1349 self.features = features
1344
1350
1345 self.filtername = None
1351 self.filtername = None
1346
1352
1347 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1353 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1348 b'devel', b'check-locks'
1354 b'devel', b'check-locks'
1349 ):
1355 ):
1350 self.vfs.audit = self._getvfsward(self.vfs.audit)
1356 self.vfs.audit = self._getvfsward(self.vfs.audit)
1351 # A list of callback to shape the phase if no data were found.
1357 # A list of callback to shape the phase if no data were found.
1352 # Callback are in the form: func(repo, roots) --> processed root.
1358 # Callback are in the form: func(repo, roots) --> processed root.
1353 # This list it to be filled by extension during repo setup
1359 # This list it to be filled by extension during repo setup
1354 self._phasedefaults = []
1360 self._phasedefaults = []
1355
1361
1356 color.setup(self.ui)
1362 color.setup(self.ui)
1357
1363
1358 self.spath = self.store.path
1364 self.spath = self.store.path
1359 self.svfs = self.store.vfs
1365 self.svfs = self.store.vfs
1360 self.sjoin = self.store.join
1366 self.sjoin = self.store.join
1361 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1367 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1362 b'devel', b'check-locks'
1368 b'devel', b'check-locks'
1363 ):
1369 ):
1364 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1370 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1365 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1371 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1366 else: # standard vfs
1372 else: # standard vfs
1367 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1373 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1368
1374
1369 self._dirstatevalidatewarned = False
1375 self._dirstatevalidatewarned = False
1370
1376
1371 self._branchcaches = branchmap.BranchMapCache()
1377 self._branchcaches = branchmap.BranchMapCache()
1372 self._revbranchcache = None
1378 self._revbranchcache = None
1373 self._filterpats = {}
1379 self._filterpats = {}
1374 self._datafilters = {}
1380 self._datafilters = {}
1375 self._transref = self._lockref = self._wlockref = None
1381 self._transref = self._lockref = self._wlockref = None
1376
1382
1377 # A cache for various files under .hg/ that tracks file changes,
1383 # A cache for various files under .hg/ that tracks file changes,
1378 # (used by the filecache decorator)
1384 # (used by the filecache decorator)
1379 #
1385 #
1380 # Maps a property name to its util.filecacheentry
1386 # Maps a property name to its util.filecacheentry
1381 self._filecache = {}
1387 self._filecache = {}
1382
1388
1383 # hold sets of revision to be filtered
1389 # hold sets of revision to be filtered
1384 # should be cleared when something might have changed the filter value:
1390 # should be cleared when something might have changed the filter value:
1385 # - new changesets,
1391 # - new changesets,
1386 # - phase change,
1392 # - phase change,
1387 # - new obsolescence marker,
1393 # - new obsolescence marker,
1388 # - working directory parent change,
1394 # - working directory parent change,
1389 # - bookmark changes
1395 # - bookmark changes
1390 self.filteredrevcache = {}
1396 self.filteredrevcache = {}
1391
1397
1392 # post-dirstate-status hooks
1398 # post-dirstate-status hooks
1393 self._postdsstatus = []
1399 self._postdsstatus = []
1394
1400
1395 # generic mapping between names and nodes
1401 # generic mapping between names and nodes
1396 self.names = namespaces.namespaces()
1402 self.names = namespaces.namespaces()
1397
1403
1398 # Key to signature value.
1404 # Key to signature value.
1399 self._sparsesignaturecache = {}
1405 self._sparsesignaturecache = {}
1400 # Signature to cached matcher instance.
1406 # Signature to cached matcher instance.
1401 self._sparsematchercache = {}
1407 self._sparsematchercache = {}
1402
1408
1403 self._extrafilterid = repoview.extrafilter(ui)
1409 self._extrafilterid = repoview.extrafilter(ui)
1404
1410
1405 self.filecopiesmode = None
1411 self.filecopiesmode = None
1406 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1412 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1407 self.filecopiesmode = b'changeset-sidedata'
1413 self.filecopiesmode = b'changeset-sidedata'
1408
1414
1409 self._wanted_sidedata = set()
1415 self._wanted_sidedata = set()
1410 self._sidedata_computers = {}
1416 self._sidedata_computers = {}
1411 sidedatamod.set_sidedata_spec_for_repo(self)
1417 sidedatamod.set_sidedata_spec_for_repo(self)
1412
1418
1413 def _getvfsward(self, origfunc):
1419 def _getvfsward(self, origfunc):
1414 """build a ward for self.vfs"""
1420 """build a ward for self.vfs"""
1415 rref = weakref.ref(self)
1421 rref = weakref.ref(self)
1416
1422
1417 def checkvfs(path, mode=None):
1423 def checkvfs(path, mode=None):
1418 ret = origfunc(path, mode=mode)
1424 ret = origfunc(path, mode=mode)
1419 repo = rref()
1425 repo = rref()
1420 if (
1426 if (
1421 repo is None
1427 repo is None
1422 or not util.safehasattr(repo, b'_wlockref')
1428 or not util.safehasattr(repo, b'_wlockref')
1423 or not util.safehasattr(repo, b'_lockref')
1429 or not util.safehasattr(repo, b'_lockref')
1424 ):
1430 ):
1425 return
1431 return
1426 if mode in (None, b'r', b'rb'):
1432 if mode in (None, b'r', b'rb'):
1427 return
1433 return
1428 if path.startswith(repo.path):
1434 if path.startswith(repo.path):
1429 # truncate name relative to the repository (.hg)
1435 # truncate name relative to the repository (.hg)
1430 path = path[len(repo.path) + 1 :]
1436 path = path[len(repo.path) + 1 :]
1431 if path.startswith(b'cache/'):
1437 if path.startswith(b'cache/'):
1432 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1438 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1433 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1439 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1434 # path prefixes covered by 'lock'
1440 # path prefixes covered by 'lock'
1435 vfs_path_prefixes = (
1441 vfs_path_prefixes = (
1436 b'journal.',
1442 b'journal.',
1437 b'undo.',
1443 b'undo.',
1438 b'strip-backup/',
1444 b'strip-backup/',
1439 b'cache/',
1445 b'cache/',
1440 )
1446 )
1441 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1447 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1442 if repo._currentlock(repo._lockref) is None:
1448 if repo._currentlock(repo._lockref) is None:
1443 repo.ui.develwarn(
1449 repo.ui.develwarn(
1444 b'write with no lock: "%s"' % path,
1450 b'write with no lock: "%s"' % path,
1445 stacklevel=3,
1451 stacklevel=3,
1446 config=b'check-locks',
1452 config=b'check-locks',
1447 )
1453 )
1448 elif repo._currentlock(repo._wlockref) is None:
1454 elif repo._currentlock(repo._wlockref) is None:
1449 # rest of vfs files are covered by 'wlock'
1455 # rest of vfs files are covered by 'wlock'
1450 #
1456 #
1451 # exclude special files
1457 # exclude special files
1452 for prefix in self._wlockfreeprefix:
1458 for prefix in self._wlockfreeprefix:
1453 if path.startswith(prefix):
1459 if path.startswith(prefix):
1454 return
1460 return
1455 repo.ui.develwarn(
1461 repo.ui.develwarn(
1456 b'write with no wlock: "%s"' % path,
1462 b'write with no wlock: "%s"' % path,
1457 stacklevel=3,
1463 stacklevel=3,
1458 config=b'check-locks',
1464 config=b'check-locks',
1459 )
1465 )
1460 return ret
1466 return ret
1461
1467
1462 return checkvfs
1468 return checkvfs
1463
1469
1464 def _getsvfsward(self, origfunc):
1470 def _getsvfsward(self, origfunc):
1465 """build a ward for self.svfs"""
1471 """build a ward for self.svfs"""
1466 rref = weakref.ref(self)
1472 rref = weakref.ref(self)
1467
1473
1468 def checksvfs(path, mode=None):
1474 def checksvfs(path, mode=None):
1469 ret = origfunc(path, mode=mode)
1475 ret = origfunc(path, mode=mode)
1470 repo = rref()
1476 repo = rref()
1471 if repo is None or not util.safehasattr(repo, b'_lockref'):
1477 if repo is None or not util.safehasattr(repo, b'_lockref'):
1472 return
1478 return
1473 if mode in (None, b'r', b'rb'):
1479 if mode in (None, b'r', b'rb'):
1474 return
1480 return
1475 if path.startswith(repo.sharedpath):
1481 if path.startswith(repo.sharedpath):
1476 # truncate name relative to the repository (.hg)
1482 # truncate name relative to the repository (.hg)
1477 path = path[len(repo.sharedpath) + 1 :]
1483 path = path[len(repo.sharedpath) + 1 :]
1478 if repo._currentlock(repo._lockref) is None:
1484 if repo._currentlock(repo._lockref) is None:
1479 repo.ui.develwarn(
1485 repo.ui.develwarn(
1480 b'write with no lock: "%s"' % path, stacklevel=4
1486 b'write with no lock: "%s"' % path, stacklevel=4
1481 )
1487 )
1482 return ret
1488 return ret
1483
1489
1484 return checksvfs
1490 return checksvfs
1485
1491
1486 def close(self):
1492 def close(self):
1487 self._writecaches()
1493 self._writecaches()
1488
1494
1489 def _writecaches(self):
1495 def _writecaches(self):
1490 if self._revbranchcache:
1496 if self._revbranchcache:
1491 self._revbranchcache.write()
1497 self._revbranchcache.write()
1492
1498
1493 def _restrictcapabilities(self, caps):
1499 def _restrictcapabilities(self, caps):
1494 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1500 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1495 caps = set(caps)
1501 caps = set(caps)
1496 capsblob = bundle2.encodecaps(
1502 capsblob = bundle2.encodecaps(
1497 bundle2.getrepocaps(self, role=b'client')
1503 bundle2.getrepocaps(self, role=b'client')
1498 )
1504 )
1499 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1505 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1500 if self.ui.configbool(b'experimental', b'narrow'):
1506 if self.ui.configbool(b'experimental', b'narrow'):
1501 caps.add(wireprototypes.NARROWCAP)
1507 caps.add(wireprototypes.NARROWCAP)
1502 return caps
1508 return caps
1503
1509
1504 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1510 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1505 # self -> auditor -> self._checknested -> self
1511 # self -> auditor -> self._checknested -> self
1506
1512
1507 @property
1513 @property
1508 def auditor(self):
1514 def auditor(self):
1509 # This is only used by context.workingctx.match in order to
1515 # This is only used by context.workingctx.match in order to
1510 # detect files in subrepos.
1516 # detect files in subrepos.
1511 return pathutil.pathauditor(self.root, callback=self._checknested)
1517 return pathutil.pathauditor(self.root, callback=self._checknested)
1512
1518
1513 @property
1519 @property
1514 def nofsauditor(self):
1520 def nofsauditor(self):
1515 # This is only used by context.basectx.match in order to detect
1521 # This is only used by context.basectx.match in order to detect
1516 # files in subrepos.
1522 # files in subrepos.
1517 return pathutil.pathauditor(
1523 return pathutil.pathauditor(
1518 self.root, callback=self._checknested, realfs=False, cached=True
1524 self.root, callback=self._checknested, realfs=False, cached=True
1519 )
1525 )
1520
1526
1521 def _checknested(self, path):
1527 def _checknested(self, path):
1522 """Determine if path is a legal nested repository."""
1528 """Determine if path is a legal nested repository."""
1523 if not path.startswith(self.root):
1529 if not path.startswith(self.root):
1524 return False
1530 return False
1525 subpath = path[len(self.root) + 1 :]
1531 subpath = path[len(self.root) + 1 :]
1526 normsubpath = util.pconvert(subpath)
1532 normsubpath = util.pconvert(subpath)
1527
1533
1528 # XXX: Checking against the current working copy is wrong in
1534 # XXX: Checking against the current working copy is wrong in
1529 # the sense that it can reject things like
1535 # the sense that it can reject things like
1530 #
1536 #
1531 # $ hg cat -r 10 sub/x.txt
1537 # $ hg cat -r 10 sub/x.txt
1532 #
1538 #
1533 # if sub/ is no longer a subrepository in the working copy
1539 # if sub/ is no longer a subrepository in the working copy
1534 # parent revision.
1540 # parent revision.
1535 #
1541 #
1536 # However, it can of course also allow things that would have
1542 # However, it can of course also allow things that would have
1537 # been rejected before, such as the above cat command if sub/
1543 # been rejected before, such as the above cat command if sub/
1538 # is a subrepository now, but was a normal directory before.
1544 # is a subrepository now, but was a normal directory before.
1539 # The old path auditor would have rejected by mistake since it
1545 # The old path auditor would have rejected by mistake since it
1540 # panics when it sees sub/.hg/.
1546 # panics when it sees sub/.hg/.
1541 #
1547 #
1542 # All in all, checking against the working copy seems sensible
1548 # All in all, checking against the working copy seems sensible
1543 # since we want to prevent access to nested repositories on
1549 # since we want to prevent access to nested repositories on
1544 # the filesystem *now*.
1550 # the filesystem *now*.
1545 ctx = self[None]
1551 ctx = self[None]
1546 parts = util.splitpath(subpath)
1552 parts = util.splitpath(subpath)
1547 while parts:
1553 while parts:
1548 prefix = b'/'.join(parts)
1554 prefix = b'/'.join(parts)
1549 if prefix in ctx.substate:
1555 if prefix in ctx.substate:
1550 if prefix == normsubpath:
1556 if prefix == normsubpath:
1551 return True
1557 return True
1552 else:
1558 else:
1553 sub = ctx.sub(prefix)
1559 sub = ctx.sub(prefix)
1554 return sub.checknested(subpath[len(prefix) + 1 :])
1560 return sub.checknested(subpath[len(prefix) + 1 :])
1555 else:
1561 else:
1556 parts.pop()
1562 parts.pop()
1557 return False
1563 return False
1558
1564
1559 def peer(self):
1565 def peer(self):
1560 return localpeer(self) # not cached to avoid reference cycle
1566 return localpeer(self) # not cached to avoid reference cycle
1561
1567
1562 def unfiltered(self):
1568 def unfiltered(self):
1563 """Return unfiltered version of the repository
1569 """Return unfiltered version of the repository
1564
1570
1565 Intended to be overwritten by filtered repo."""
1571 Intended to be overwritten by filtered repo."""
1566 return self
1572 return self
1567
1573
1568 def filtered(self, name, visibilityexceptions=None):
1574 def filtered(self, name, visibilityexceptions=None):
1569 """Return a filtered version of a repository
1575 """Return a filtered version of a repository
1570
1576
1571 The `name` parameter is the identifier of the requested view. This
1577 The `name` parameter is the identifier of the requested view. This
1572 will return a repoview object set "exactly" to the specified view.
1578 will return a repoview object set "exactly" to the specified view.
1573
1579
1574 This function does not apply recursive filtering to a repository. For
1580 This function does not apply recursive filtering to a repository. For
1575 example calling `repo.filtered("served")` will return a repoview using
1581 example calling `repo.filtered("served")` will return a repoview using
1576 the "served" view, regardless of the initial view used by `repo`.
1582 the "served" view, regardless of the initial view used by `repo`.
1577
1583
1578 In other word, there is always only one level of `repoview` "filtering".
1584 In other word, there is always only one level of `repoview` "filtering".
1579 """
1585 """
1580 if self._extrafilterid is not None and b'%' not in name:
1586 if self._extrafilterid is not None and b'%' not in name:
1581 name = name + b'%' + self._extrafilterid
1587 name = name + b'%' + self._extrafilterid
1582
1588
1583 cls = repoview.newtype(self.unfiltered().__class__)
1589 cls = repoview.newtype(self.unfiltered().__class__)
1584 return cls(self, name, visibilityexceptions)
1590 return cls(self, name, visibilityexceptions)
1585
1591
1586 @mixedrepostorecache(
1592 @mixedrepostorecache(
1587 (b'bookmarks', b'plain'),
1593 (b'bookmarks', b'plain'),
1588 (b'bookmarks.current', b'plain'),
1594 (b'bookmarks.current', b'plain'),
1589 (b'bookmarks', b''),
1595 (b'bookmarks', b''),
1590 (b'00changelog.i', b''),
1596 (b'00changelog.i', b''),
1591 )
1597 )
1592 def _bookmarks(self):
1598 def _bookmarks(self):
1593 # Since the multiple files involved in the transaction cannot be
1599 # Since the multiple files involved in the transaction cannot be
1594 # written atomically (with current repository format), there is a race
1600 # written atomically (with current repository format), there is a race
1595 # condition here.
1601 # condition here.
1596 #
1602 #
1597 # 1) changelog content A is read
1603 # 1) changelog content A is read
1598 # 2) outside transaction update changelog to content B
1604 # 2) outside transaction update changelog to content B
1599 # 3) outside transaction update bookmark file referring to content B
1605 # 3) outside transaction update bookmark file referring to content B
1600 # 4) bookmarks file content is read and filtered against changelog-A
1606 # 4) bookmarks file content is read and filtered against changelog-A
1601 #
1607 #
1602 # When this happens, bookmarks against nodes missing from A are dropped.
1608 # When this happens, bookmarks against nodes missing from A are dropped.
1603 #
1609 #
1604 # Having this happening during read is not great, but it become worse
1610 # Having this happening during read is not great, but it become worse
1605 # when this happen during write because the bookmarks to the "unknown"
1611 # when this happen during write because the bookmarks to the "unknown"
1606 # nodes will be dropped for good. However, writes happen within locks.
1612 # nodes will be dropped for good. However, writes happen within locks.
1607 # This locking makes it possible to have a race free consistent read.
1613 # This locking makes it possible to have a race free consistent read.
1608 # For this purpose data read from disc before locking are
1614 # For this purpose data read from disc before locking are
1609 # "invalidated" right after the locks are taken. This invalidations are
1615 # "invalidated" right after the locks are taken. This invalidations are
1610 # "light", the `filecache` mechanism keep the data in memory and will
1616 # "light", the `filecache` mechanism keep the data in memory and will
1611 # reuse them if the underlying files did not changed. Not parsing the
1617 # reuse them if the underlying files did not changed. Not parsing the
1612 # same data multiple times helps performances.
1618 # same data multiple times helps performances.
1613 #
1619 #
1614 # Unfortunately in the case describe above, the files tracked by the
1620 # Unfortunately in the case describe above, the files tracked by the
1615 # bookmarks file cache might not have changed, but the in-memory
1621 # bookmarks file cache might not have changed, but the in-memory
1616 # content is still "wrong" because we used an older changelog content
1622 # content is still "wrong" because we used an older changelog content
1617 # to process the on-disk data. So after locking, the changelog would be
1623 # to process the on-disk data. So after locking, the changelog would be
1618 # refreshed but `_bookmarks` would be preserved.
1624 # refreshed but `_bookmarks` would be preserved.
1619 # Adding `00changelog.i` to the list of tracked file is not
1625 # Adding `00changelog.i` to the list of tracked file is not
1620 # enough, because at the time we build the content for `_bookmarks` in
1626 # enough, because at the time we build the content for `_bookmarks` in
1621 # (4), the changelog file has already diverged from the content used
1627 # (4), the changelog file has already diverged from the content used
1622 # for loading `changelog` in (1)
1628 # for loading `changelog` in (1)
1623 #
1629 #
1624 # To prevent the issue, we force the changelog to be explicitly
1630 # To prevent the issue, we force the changelog to be explicitly
1625 # reloaded while computing `_bookmarks`. The data race can still happen
1631 # reloaded while computing `_bookmarks`. The data race can still happen
1626 # without the lock (with a narrower window), but it would no longer go
1632 # without the lock (with a narrower window), but it would no longer go
1627 # undetected during the lock time refresh.
1633 # undetected during the lock time refresh.
1628 #
1634 #
1629 # The new schedule is as follow
1635 # The new schedule is as follow
1630 #
1636 #
1631 # 1) filecache logic detect that `_bookmarks` needs to be computed
1637 # 1) filecache logic detect that `_bookmarks` needs to be computed
1632 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1638 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1633 # 3) We force `changelog` filecache to be tested
1639 # 3) We force `changelog` filecache to be tested
1634 # 4) cachestat for `changelog` are captured (for changelog)
1640 # 4) cachestat for `changelog` are captured (for changelog)
1635 # 5) `_bookmarks` is computed and cached
1641 # 5) `_bookmarks` is computed and cached
1636 #
1642 #
1637 # The step in (3) ensure we have a changelog at least as recent as the
1643 # The step in (3) ensure we have a changelog at least as recent as the
1638 # cache stat computed in (1). As a result at locking time:
1644 # cache stat computed in (1). As a result at locking time:
1639 # * if the changelog did not changed since (1) -> we can reuse the data
1645 # * if the changelog did not changed since (1) -> we can reuse the data
1640 # * otherwise -> the bookmarks get refreshed.
1646 # * otherwise -> the bookmarks get refreshed.
1641 self._refreshchangelog()
1647 self._refreshchangelog()
1642 return bookmarks.bmstore(self)
1648 return bookmarks.bmstore(self)
1643
1649
1644 def _refreshchangelog(self):
1650 def _refreshchangelog(self):
1645 """make sure the in memory changelog match the on-disk one"""
1651 """make sure the in memory changelog match the on-disk one"""
1646 if 'changelog' in vars(self) and self.currenttransaction() is None:
1652 if 'changelog' in vars(self) and self.currenttransaction() is None:
1647 del self.changelog
1653 del self.changelog
1648
1654
1649 @property
1655 @property
1650 def _activebookmark(self):
1656 def _activebookmark(self):
1651 return self._bookmarks.active
1657 return self._bookmarks.active
1652
1658
1653 # _phasesets depend on changelog. what we need is to call
1659 # _phasesets depend on changelog. what we need is to call
1654 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1660 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1655 # can't be easily expressed in filecache mechanism.
1661 # can't be easily expressed in filecache mechanism.
1656 @storecache(b'phaseroots', b'00changelog.i')
1662 @storecache(b'phaseroots', b'00changelog.i')
1657 def _phasecache(self):
1663 def _phasecache(self):
1658 return phases.phasecache(self, self._phasedefaults)
1664 return phases.phasecache(self, self._phasedefaults)
1659
1665
1660 @storecache(b'obsstore')
1666 @storecache(b'obsstore')
1661 def obsstore(self):
1667 def obsstore(self):
1662 return obsolete.makestore(self.ui, self)
1668 return obsolete.makestore(self.ui, self)
1663
1669
1664 @storecache(b'00changelog.i')
1670 @storecache(b'00changelog.i')
1665 def changelog(self):
1671 def changelog(self):
1666 # load dirstate before changelog to avoid race see issue6303
1672 # load dirstate before changelog to avoid race see issue6303
1667 self.dirstate.prefetch_parents()
1673 self.dirstate.prefetch_parents()
1668 return self.store.changelog(
1674 return self.store.changelog(
1669 txnutil.mayhavepending(self.root),
1675 txnutil.mayhavepending(self.root),
1670 concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
1676 concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
1671 )
1677 )
1672
1678
1673 @storecache(b'00manifest.i')
1679 @storecache(b'00manifest.i')
1674 def manifestlog(self):
1680 def manifestlog(self):
1675 return self.store.manifestlog(self, self._storenarrowmatch)
1681 return self.store.manifestlog(self, self._storenarrowmatch)
1676
1682
1677 @repofilecache(b'dirstate')
1683 @repofilecache(b'dirstate')
1678 def dirstate(self):
1684 def dirstate(self):
1679 return self._makedirstate()
1685 return self._makedirstate()
1680
1686
1681 def _makedirstate(self):
1687 def _makedirstate(self):
1682 """Extension point for wrapping the dirstate per-repo."""
1688 """Extension point for wrapping the dirstate per-repo."""
1683 sparsematchfn = lambda: sparse.matcher(self)
1689 sparsematchfn = lambda: sparse.matcher(self)
1684
1690
1685 return dirstate.dirstate(
1691 return dirstate.dirstate(
1686 self.vfs,
1692 self.vfs,
1687 self.ui,
1693 self.ui,
1688 self.root,
1694 self.root,
1689 self._dirstatevalidate,
1695 self._dirstatevalidate,
1690 sparsematchfn,
1696 sparsematchfn,
1691 self.nodeconstants,
1697 self.nodeconstants,
1692 )
1698 )
1693
1699
1694 def _dirstatevalidate(self, node):
1700 def _dirstatevalidate(self, node):
1695 try:
1701 try:
1696 self.changelog.rev(node)
1702 self.changelog.rev(node)
1697 return node
1703 return node
1698 except error.LookupError:
1704 except error.LookupError:
1699 if not self._dirstatevalidatewarned:
1705 if not self._dirstatevalidatewarned:
1700 self._dirstatevalidatewarned = True
1706 self._dirstatevalidatewarned = True
1701 self.ui.warn(
1707 self.ui.warn(
1702 _(b"warning: ignoring unknown working parent %s!\n")
1708 _(b"warning: ignoring unknown working parent %s!\n")
1703 % short(node)
1709 % short(node)
1704 )
1710 )
1705 return self.nullid
1711 return self.nullid
1706
1712
1707 @storecache(narrowspec.FILENAME)
1713 @storecache(narrowspec.FILENAME)
1708 def narrowpats(self):
1714 def narrowpats(self):
1709 """matcher patterns for this repository's narrowspec
1715 """matcher patterns for this repository's narrowspec
1710
1716
1711 A tuple of (includes, excludes).
1717 A tuple of (includes, excludes).
1712 """
1718 """
1713 return narrowspec.load(self)
1719 return narrowspec.load(self)
1714
1720
1715 @storecache(narrowspec.FILENAME)
1721 @storecache(narrowspec.FILENAME)
1716 def _storenarrowmatch(self):
1722 def _storenarrowmatch(self):
1717 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1723 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1718 return matchmod.always()
1724 return matchmod.always()
1719 include, exclude = self.narrowpats
1725 include, exclude = self.narrowpats
1720 return narrowspec.match(self.root, include=include, exclude=exclude)
1726 return narrowspec.match(self.root, include=include, exclude=exclude)
1721
1727
1722 @storecache(narrowspec.FILENAME)
1728 @storecache(narrowspec.FILENAME)
1723 def _narrowmatch(self):
1729 def _narrowmatch(self):
1724 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1730 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1725 return matchmod.always()
1731 return matchmod.always()
1726 narrowspec.checkworkingcopynarrowspec(self)
1732 narrowspec.checkworkingcopynarrowspec(self)
1727 include, exclude = self.narrowpats
1733 include, exclude = self.narrowpats
1728 return narrowspec.match(self.root, include=include, exclude=exclude)
1734 return narrowspec.match(self.root, include=include, exclude=exclude)
1729
1735
1730 def narrowmatch(self, match=None, includeexact=False):
1736 def narrowmatch(self, match=None, includeexact=False):
1731 """matcher corresponding the the repo's narrowspec
1737 """matcher corresponding the the repo's narrowspec
1732
1738
1733 If `match` is given, then that will be intersected with the narrow
1739 If `match` is given, then that will be intersected with the narrow
1734 matcher.
1740 matcher.
1735
1741
1736 If `includeexact` is True, then any exact matches from `match` will
1742 If `includeexact` is True, then any exact matches from `match` will
1737 be included even if they're outside the narrowspec.
1743 be included even if they're outside the narrowspec.
1738 """
1744 """
1739 if match:
1745 if match:
1740 if includeexact and not self._narrowmatch.always():
1746 if includeexact and not self._narrowmatch.always():
1741 # do not exclude explicitly-specified paths so that they can
1747 # do not exclude explicitly-specified paths so that they can
1742 # be warned later on
1748 # be warned later on
1743 em = matchmod.exact(match.files())
1749 em = matchmod.exact(match.files())
1744 nm = matchmod.unionmatcher([self._narrowmatch, em])
1750 nm = matchmod.unionmatcher([self._narrowmatch, em])
1745 return matchmod.intersectmatchers(match, nm)
1751 return matchmod.intersectmatchers(match, nm)
1746 return matchmod.intersectmatchers(match, self._narrowmatch)
1752 return matchmod.intersectmatchers(match, self._narrowmatch)
1747 return self._narrowmatch
1753 return self._narrowmatch
1748
1754
1749 def setnarrowpats(self, newincludes, newexcludes):
1755 def setnarrowpats(self, newincludes, newexcludes):
1750 narrowspec.save(self, newincludes, newexcludes)
1756 narrowspec.save(self, newincludes, newexcludes)
1751 self.invalidate(clearfilecache=True)
1757 self.invalidate(clearfilecache=True)
1752
1758
1753 @unfilteredpropertycache
1759 @unfilteredpropertycache
1754 def _quick_access_changeid_null(self):
1760 def _quick_access_changeid_null(self):
1755 return {
1761 return {
1756 b'null': (nullrev, self.nodeconstants.nullid),
1762 b'null': (nullrev, self.nodeconstants.nullid),
1757 nullrev: (nullrev, self.nodeconstants.nullid),
1763 nullrev: (nullrev, self.nodeconstants.nullid),
1758 self.nullid: (nullrev, self.nullid),
1764 self.nullid: (nullrev, self.nullid),
1759 }
1765 }
1760
1766
1761 @unfilteredpropertycache
1767 @unfilteredpropertycache
1762 def _quick_access_changeid_wc(self):
1768 def _quick_access_changeid_wc(self):
1763 # also fast path access to the working copy parents
1769 # also fast path access to the working copy parents
1764 # however, only do it for filter that ensure wc is visible.
1770 # however, only do it for filter that ensure wc is visible.
1765 quick = self._quick_access_changeid_null.copy()
1771 quick = self._quick_access_changeid_null.copy()
1766 cl = self.unfiltered().changelog
1772 cl = self.unfiltered().changelog
1767 for node in self.dirstate.parents():
1773 for node in self.dirstate.parents():
1768 if node == self.nullid:
1774 if node == self.nullid:
1769 continue
1775 continue
1770 rev = cl.index.get_rev(node)
1776 rev = cl.index.get_rev(node)
1771 if rev is None:
1777 if rev is None:
1772 # unknown working copy parent case:
1778 # unknown working copy parent case:
1773 #
1779 #
1774 # skip the fast path and let higher code deal with it
1780 # skip the fast path and let higher code deal with it
1775 continue
1781 continue
1776 pair = (rev, node)
1782 pair = (rev, node)
1777 quick[rev] = pair
1783 quick[rev] = pair
1778 quick[node] = pair
1784 quick[node] = pair
1779 # also add the parents of the parents
1785 # also add the parents of the parents
1780 for r in cl.parentrevs(rev):
1786 for r in cl.parentrevs(rev):
1781 if r == nullrev:
1787 if r == nullrev:
1782 continue
1788 continue
1783 n = cl.node(r)
1789 n = cl.node(r)
1784 pair = (r, n)
1790 pair = (r, n)
1785 quick[r] = pair
1791 quick[r] = pair
1786 quick[n] = pair
1792 quick[n] = pair
1787 p1node = self.dirstate.p1()
1793 p1node = self.dirstate.p1()
1788 if p1node != self.nullid:
1794 if p1node != self.nullid:
1789 quick[b'.'] = quick[p1node]
1795 quick[b'.'] = quick[p1node]
1790 return quick
1796 return quick
1791
1797
1792 @unfilteredmethod
1798 @unfilteredmethod
1793 def _quick_access_changeid_invalidate(self):
1799 def _quick_access_changeid_invalidate(self):
1794 if '_quick_access_changeid_wc' in vars(self):
1800 if '_quick_access_changeid_wc' in vars(self):
1795 del self.__dict__['_quick_access_changeid_wc']
1801 del self.__dict__['_quick_access_changeid_wc']
1796
1802
1797 @property
1803 @property
1798 def _quick_access_changeid(self):
1804 def _quick_access_changeid(self):
1799 """an helper dictionnary for __getitem__ calls
1805 """an helper dictionnary for __getitem__ calls
1800
1806
1801 This contains a list of symbol we can recognise right away without
1807 This contains a list of symbol we can recognise right away without
1802 further processing.
1808 further processing.
1803 """
1809 """
1804 if self.filtername in repoview.filter_has_wc:
1810 if self.filtername in repoview.filter_has_wc:
1805 return self._quick_access_changeid_wc
1811 return self._quick_access_changeid_wc
1806 return self._quick_access_changeid_null
1812 return self._quick_access_changeid_null
1807
1813
1808 def __getitem__(self, changeid):
1814 def __getitem__(self, changeid):
1809 # dealing with special cases
1815 # dealing with special cases
1810 if changeid is None:
1816 if changeid is None:
1811 return context.workingctx(self)
1817 return context.workingctx(self)
1812 if isinstance(changeid, context.basectx):
1818 if isinstance(changeid, context.basectx):
1813 return changeid
1819 return changeid
1814
1820
1815 # dealing with multiple revisions
1821 # dealing with multiple revisions
1816 if isinstance(changeid, slice):
1822 if isinstance(changeid, slice):
1817 # wdirrev isn't contiguous so the slice shouldn't include it
1823 # wdirrev isn't contiguous so the slice shouldn't include it
1818 return [
1824 return [
1819 self[i]
1825 self[i]
1820 for i in pycompat.xrange(*changeid.indices(len(self)))
1826 for i in pycompat.xrange(*changeid.indices(len(self)))
1821 if i not in self.changelog.filteredrevs
1827 if i not in self.changelog.filteredrevs
1822 ]
1828 ]
1823
1829
1824 # dealing with some special values
1830 # dealing with some special values
1825 quick_access = self._quick_access_changeid.get(changeid)
1831 quick_access = self._quick_access_changeid.get(changeid)
1826 if quick_access is not None:
1832 if quick_access is not None:
1827 rev, node = quick_access
1833 rev, node = quick_access
1828 return context.changectx(self, rev, node, maybe_filtered=False)
1834 return context.changectx(self, rev, node, maybe_filtered=False)
1829 if changeid == b'tip':
1835 if changeid == b'tip':
1830 node = self.changelog.tip()
1836 node = self.changelog.tip()
1831 rev = self.changelog.rev(node)
1837 rev = self.changelog.rev(node)
1832 return context.changectx(self, rev, node)
1838 return context.changectx(self, rev, node)
1833
1839
1834 # dealing with arbitrary values
1840 # dealing with arbitrary values
1835 try:
1841 try:
1836 if isinstance(changeid, int):
1842 if isinstance(changeid, int):
1837 node = self.changelog.node(changeid)
1843 node = self.changelog.node(changeid)
1838 rev = changeid
1844 rev = changeid
1839 elif changeid == b'.':
1845 elif changeid == b'.':
1840 # this is a hack to delay/avoid loading obsmarkers
1846 # this is a hack to delay/avoid loading obsmarkers
1841 # when we know that '.' won't be hidden
1847 # when we know that '.' won't be hidden
1842 node = self.dirstate.p1()
1848 node = self.dirstate.p1()
1843 rev = self.unfiltered().changelog.rev(node)
1849 rev = self.unfiltered().changelog.rev(node)
1844 elif len(changeid) == self.nodeconstants.nodelen:
1850 elif len(changeid) == self.nodeconstants.nodelen:
1845 try:
1851 try:
1846 node = changeid
1852 node = changeid
1847 rev = self.changelog.rev(changeid)
1853 rev = self.changelog.rev(changeid)
1848 except error.FilteredLookupError:
1854 except error.FilteredLookupError:
1849 changeid = hex(changeid) # for the error message
1855 changeid = hex(changeid) # for the error message
1850 raise
1856 raise
1851 except LookupError:
1857 except LookupError:
1852 # check if it might have come from damaged dirstate
1858 # check if it might have come from damaged dirstate
1853 #
1859 #
1854 # XXX we could avoid the unfiltered if we had a recognizable
1860 # XXX we could avoid the unfiltered if we had a recognizable
1855 # exception for filtered changeset access
1861 # exception for filtered changeset access
1856 if (
1862 if (
1857 self.local()
1863 self.local()
1858 and changeid in self.unfiltered().dirstate.parents()
1864 and changeid in self.unfiltered().dirstate.parents()
1859 ):
1865 ):
1860 msg = _(b"working directory has unknown parent '%s'!")
1866 msg = _(b"working directory has unknown parent '%s'!")
1861 raise error.Abort(msg % short(changeid))
1867 raise error.Abort(msg % short(changeid))
1862 changeid = hex(changeid) # for the error message
1868 changeid = hex(changeid) # for the error message
1863 raise
1869 raise
1864
1870
1865 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1871 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1866 node = bin(changeid)
1872 node = bin(changeid)
1867 rev = self.changelog.rev(node)
1873 rev = self.changelog.rev(node)
1868 else:
1874 else:
1869 raise error.ProgrammingError(
1875 raise error.ProgrammingError(
1870 b"unsupported changeid '%s' of type %s"
1876 b"unsupported changeid '%s' of type %s"
1871 % (changeid, pycompat.bytestr(type(changeid)))
1877 % (changeid, pycompat.bytestr(type(changeid)))
1872 )
1878 )
1873
1879
1874 return context.changectx(self, rev, node)
1880 return context.changectx(self, rev, node)
1875
1881
1876 except (error.FilteredIndexError, error.FilteredLookupError):
1882 except (error.FilteredIndexError, error.FilteredLookupError):
1877 raise error.FilteredRepoLookupError(
1883 raise error.FilteredRepoLookupError(
1878 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1884 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1879 )
1885 )
1880 except (IndexError, LookupError):
1886 except (IndexError, LookupError):
1881 raise error.RepoLookupError(
1887 raise error.RepoLookupError(
1882 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1888 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1883 )
1889 )
1884 except error.WdirUnsupported:
1890 except error.WdirUnsupported:
1885 return context.workingctx(self)
1891 return context.workingctx(self)
1886
1892
1887 def __contains__(self, changeid):
1893 def __contains__(self, changeid):
1888 """True if the given changeid exists"""
1894 """True if the given changeid exists"""
1889 try:
1895 try:
1890 self[changeid]
1896 self[changeid]
1891 return True
1897 return True
1892 except error.RepoLookupError:
1898 except error.RepoLookupError:
1893 return False
1899 return False
1894
1900
1895 def __nonzero__(self):
1901 def __nonzero__(self):
1896 return True
1902 return True
1897
1903
1898 __bool__ = __nonzero__
1904 __bool__ = __nonzero__
1899
1905
1900 def __len__(self):
1906 def __len__(self):
1901 # no need to pay the cost of repoview.changelog
1907 # no need to pay the cost of repoview.changelog
1902 unfi = self.unfiltered()
1908 unfi = self.unfiltered()
1903 return len(unfi.changelog)
1909 return len(unfi.changelog)
1904
1910
1905 def __iter__(self):
1911 def __iter__(self):
1906 return iter(self.changelog)
1912 return iter(self.changelog)
1907
1913
1908 def revs(self, expr, *args):
1914 def revs(self, expr, *args):
1909 """Find revisions matching a revset.
1915 """Find revisions matching a revset.
1910
1916
1911 The revset is specified as a string ``expr`` that may contain
1917 The revset is specified as a string ``expr`` that may contain
1912 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1918 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1913
1919
1914 Revset aliases from the configuration are not expanded. To expand
1920 Revset aliases from the configuration are not expanded. To expand
1915 user aliases, consider calling ``scmutil.revrange()`` or
1921 user aliases, consider calling ``scmutil.revrange()`` or
1916 ``repo.anyrevs([expr], user=True)``.
1922 ``repo.anyrevs([expr], user=True)``.
1917
1923
1918 Returns a smartset.abstractsmartset, which is a list-like interface
1924 Returns a smartset.abstractsmartset, which is a list-like interface
1919 that contains integer revisions.
1925 that contains integer revisions.
1920 """
1926 """
1921 tree = revsetlang.spectree(expr, *args)
1927 tree = revsetlang.spectree(expr, *args)
1922 return revset.makematcher(tree)(self)
1928 return revset.makematcher(tree)(self)
1923
1929
1924 def set(self, expr, *args):
1930 def set(self, expr, *args):
1925 """Find revisions matching a revset and emit changectx instances.
1931 """Find revisions matching a revset and emit changectx instances.
1926
1932
1927 This is a convenience wrapper around ``revs()`` that iterates the
1933 This is a convenience wrapper around ``revs()`` that iterates the
1928 result and is a generator of changectx instances.
1934 result and is a generator of changectx instances.
1929
1935
1930 Revset aliases from the configuration are not expanded. To expand
1936 Revset aliases from the configuration are not expanded. To expand
1931 user aliases, consider calling ``scmutil.revrange()``.
1937 user aliases, consider calling ``scmutil.revrange()``.
1932 """
1938 """
1933 for r in self.revs(expr, *args):
1939 for r in self.revs(expr, *args):
1934 yield self[r]
1940 yield self[r]
1935
1941
1936 def anyrevs(self, specs, user=False, localalias=None):
1942 def anyrevs(self, specs, user=False, localalias=None):
1937 """Find revisions matching one of the given revsets.
1943 """Find revisions matching one of the given revsets.
1938
1944
1939 Revset aliases from the configuration are not expanded by default. To
1945 Revset aliases from the configuration are not expanded by default. To
1940 expand user aliases, specify ``user=True``. To provide some local
1946 expand user aliases, specify ``user=True``. To provide some local
1941 definitions overriding user aliases, set ``localalias`` to
1947 definitions overriding user aliases, set ``localalias`` to
1942 ``{name: definitionstring}``.
1948 ``{name: definitionstring}``.
1943 """
1949 """
1944 if specs == [b'null']:
1950 if specs == [b'null']:
1945 return revset.baseset([nullrev])
1951 return revset.baseset([nullrev])
1946 if specs == [b'.']:
1952 if specs == [b'.']:
1947 quick_data = self._quick_access_changeid.get(b'.')
1953 quick_data = self._quick_access_changeid.get(b'.')
1948 if quick_data is not None:
1954 if quick_data is not None:
1949 return revset.baseset([quick_data[0]])
1955 return revset.baseset([quick_data[0]])
1950 if user:
1956 if user:
1951 m = revset.matchany(
1957 m = revset.matchany(
1952 self.ui,
1958 self.ui,
1953 specs,
1959 specs,
1954 lookup=revset.lookupfn(self),
1960 lookup=revset.lookupfn(self),
1955 localalias=localalias,
1961 localalias=localalias,
1956 )
1962 )
1957 else:
1963 else:
1958 m = revset.matchany(None, specs, localalias=localalias)
1964 m = revset.matchany(None, specs, localalias=localalias)
1959 return m(self)
1965 return m(self)
1960
1966
1961 def url(self):
1967 def url(self):
1962 return b'file:' + self.root
1968 return b'file:' + self.root
1963
1969
1964 def hook(self, name, throw=False, **args):
1970 def hook(self, name, throw=False, **args):
1965 """Call a hook, passing this repo instance.
1971 """Call a hook, passing this repo instance.
1966
1972
1967 This a convenience method to aid invoking hooks. Extensions likely
1973 This a convenience method to aid invoking hooks. Extensions likely
1968 won't call this unless they have registered a custom hook or are
1974 won't call this unless they have registered a custom hook or are
1969 replacing code that is expected to call a hook.
1975 replacing code that is expected to call a hook.
1970 """
1976 """
1971 return hook.hook(self.ui, self, name, throw, **args)
1977 return hook.hook(self.ui, self, name, throw, **args)
1972
1978
1973 @filteredpropertycache
1979 @filteredpropertycache
1974 def _tagscache(self):
1980 def _tagscache(self):
1975 """Returns a tagscache object that contains various tags related
1981 """Returns a tagscache object that contains various tags related
1976 caches."""
1982 caches."""
1977
1983
1978 # This simplifies its cache management by having one decorated
1984 # This simplifies its cache management by having one decorated
1979 # function (this one) and the rest simply fetch things from it.
1985 # function (this one) and the rest simply fetch things from it.
1980 class tagscache(object):
1986 class tagscache(object):
1981 def __init__(self):
1987 def __init__(self):
1982 # These two define the set of tags for this repository. tags
1988 # These two define the set of tags for this repository. tags
1983 # maps tag name to node; tagtypes maps tag name to 'global' or
1989 # maps tag name to node; tagtypes maps tag name to 'global' or
1984 # 'local'. (Global tags are defined by .hgtags across all
1990 # 'local'. (Global tags are defined by .hgtags across all
1985 # heads, and local tags are defined in .hg/localtags.)
1991 # heads, and local tags are defined in .hg/localtags.)
1986 # They constitute the in-memory cache of tags.
1992 # They constitute the in-memory cache of tags.
1987 self.tags = self.tagtypes = None
1993 self.tags = self.tagtypes = None
1988
1994
1989 self.nodetagscache = self.tagslist = None
1995 self.nodetagscache = self.tagslist = None
1990
1996
1991 cache = tagscache()
1997 cache = tagscache()
1992 cache.tags, cache.tagtypes = self._findtags()
1998 cache.tags, cache.tagtypes = self._findtags()
1993
1999
1994 return cache
2000 return cache
1995
2001
1996 def tags(self):
2002 def tags(self):
1997 '''return a mapping of tag to node'''
2003 '''return a mapping of tag to node'''
1998 t = {}
2004 t = {}
1999 if self.changelog.filteredrevs:
2005 if self.changelog.filteredrevs:
2000 tags, tt = self._findtags()
2006 tags, tt = self._findtags()
2001 else:
2007 else:
2002 tags = self._tagscache.tags
2008 tags = self._tagscache.tags
2003 rev = self.changelog.rev
2009 rev = self.changelog.rev
2004 for k, v in pycompat.iteritems(tags):
2010 for k, v in pycompat.iteritems(tags):
2005 try:
2011 try:
2006 # ignore tags to unknown nodes
2012 # ignore tags to unknown nodes
2007 rev(v)
2013 rev(v)
2008 t[k] = v
2014 t[k] = v
2009 except (error.LookupError, ValueError):
2015 except (error.LookupError, ValueError):
2010 pass
2016 pass
2011 return t
2017 return t
2012
2018
2013 def _findtags(self):
2019 def _findtags(self):
2014 """Do the hard work of finding tags. Return a pair of dicts
2020 """Do the hard work of finding tags. Return a pair of dicts
2015 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2021 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2016 maps tag name to a string like \'global\' or \'local\'.
2022 maps tag name to a string like \'global\' or \'local\'.
2017 Subclasses or extensions are free to add their own tags, but
2023 Subclasses or extensions are free to add their own tags, but
2018 should be aware that the returned dicts will be retained for the
2024 should be aware that the returned dicts will be retained for the
2019 duration of the localrepo object."""
2025 duration of the localrepo object."""
2020
2026
2021 # XXX what tagtype should subclasses/extensions use? Currently
2027 # XXX what tagtype should subclasses/extensions use? Currently
2022 # mq and bookmarks add tags, but do not set the tagtype at all.
2028 # mq and bookmarks add tags, but do not set the tagtype at all.
2023 # Should each extension invent its own tag type? Should there
2029 # Should each extension invent its own tag type? Should there
2024 # be one tagtype for all such "virtual" tags? Or is the status
2030 # be one tagtype for all such "virtual" tags? Or is the status
2025 # quo fine?
2031 # quo fine?
2026
2032
2027 # map tag name to (node, hist)
2033 # map tag name to (node, hist)
2028 alltags = tagsmod.findglobaltags(self.ui, self)
2034 alltags = tagsmod.findglobaltags(self.ui, self)
2029 # map tag name to tag type
2035 # map tag name to tag type
2030 tagtypes = {tag: b'global' for tag in alltags}
2036 tagtypes = {tag: b'global' for tag in alltags}
2031
2037
2032 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2038 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2033
2039
2034 # Build the return dicts. Have to re-encode tag names because
2040 # Build the return dicts. Have to re-encode tag names because
2035 # the tags module always uses UTF-8 (in order not to lose info
2041 # the tags module always uses UTF-8 (in order not to lose info
2036 # writing to the cache), but the rest of Mercurial wants them in
2042 # writing to the cache), but the rest of Mercurial wants them in
2037 # local encoding.
2043 # local encoding.
2038 tags = {}
2044 tags = {}
2039 for (name, (node, hist)) in pycompat.iteritems(alltags):
2045 for (name, (node, hist)) in pycompat.iteritems(alltags):
2040 if node != self.nullid:
2046 if node != self.nullid:
2041 tags[encoding.tolocal(name)] = node
2047 tags[encoding.tolocal(name)] = node
2042 tags[b'tip'] = self.changelog.tip()
2048 tags[b'tip'] = self.changelog.tip()
2043 tagtypes = {
2049 tagtypes = {
2044 encoding.tolocal(name): value
2050 encoding.tolocal(name): value
2045 for (name, value) in pycompat.iteritems(tagtypes)
2051 for (name, value) in pycompat.iteritems(tagtypes)
2046 }
2052 }
2047 return (tags, tagtypes)
2053 return (tags, tagtypes)
2048
2054
2049 def tagtype(self, tagname):
2055 def tagtype(self, tagname):
2050 """
2056 """
2051 return the type of the given tag. result can be:
2057 return the type of the given tag. result can be:
2052
2058
2053 'local' : a local tag
2059 'local' : a local tag
2054 'global' : a global tag
2060 'global' : a global tag
2055 None : tag does not exist
2061 None : tag does not exist
2056 """
2062 """
2057
2063
2058 return self._tagscache.tagtypes.get(tagname)
2064 return self._tagscache.tagtypes.get(tagname)
2059
2065
2060 def tagslist(self):
2066 def tagslist(self):
2061 '''return a list of tags ordered by revision'''
2067 '''return a list of tags ordered by revision'''
2062 if not self._tagscache.tagslist:
2068 if not self._tagscache.tagslist:
2063 l = []
2069 l = []
2064 for t, n in pycompat.iteritems(self.tags()):
2070 for t, n in pycompat.iteritems(self.tags()):
2065 l.append((self.changelog.rev(n), t, n))
2071 l.append((self.changelog.rev(n), t, n))
2066 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2072 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2067
2073
2068 return self._tagscache.tagslist
2074 return self._tagscache.tagslist
2069
2075
2070 def nodetags(self, node):
2076 def nodetags(self, node):
2071 '''return the tags associated with a node'''
2077 '''return the tags associated with a node'''
2072 if not self._tagscache.nodetagscache:
2078 if not self._tagscache.nodetagscache:
2073 nodetagscache = {}
2079 nodetagscache = {}
2074 for t, n in pycompat.iteritems(self._tagscache.tags):
2080 for t, n in pycompat.iteritems(self._tagscache.tags):
2075 nodetagscache.setdefault(n, []).append(t)
2081 nodetagscache.setdefault(n, []).append(t)
2076 for tags in pycompat.itervalues(nodetagscache):
2082 for tags in pycompat.itervalues(nodetagscache):
2077 tags.sort()
2083 tags.sort()
2078 self._tagscache.nodetagscache = nodetagscache
2084 self._tagscache.nodetagscache = nodetagscache
2079 return self._tagscache.nodetagscache.get(node, [])
2085 return self._tagscache.nodetagscache.get(node, [])
2080
2086
2081 def nodebookmarks(self, node):
2087 def nodebookmarks(self, node):
2082 """return the list of bookmarks pointing to the specified node"""
2088 """return the list of bookmarks pointing to the specified node"""
2083 return self._bookmarks.names(node)
2089 return self._bookmarks.names(node)
2084
2090
2085 def branchmap(self):
2091 def branchmap(self):
2086 """returns a dictionary {branch: [branchheads]} with branchheads
2092 """returns a dictionary {branch: [branchheads]} with branchheads
2087 ordered by increasing revision number"""
2093 ordered by increasing revision number"""
2088 return self._branchcaches[self]
2094 return self._branchcaches[self]
2089
2095
2090 @unfilteredmethod
2096 @unfilteredmethod
2091 def revbranchcache(self):
2097 def revbranchcache(self):
2092 if not self._revbranchcache:
2098 if not self._revbranchcache:
2093 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2099 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2094 return self._revbranchcache
2100 return self._revbranchcache
2095
2101
2096 def register_changeset(self, rev, changelogrevision):
2102 def register_changeset(self, rev, changelogrevision):
2097 self.revbranchcache().setdata(rev, changelogrevision)
2103 self.revbranchcache().setdata(rev, changelogrevision)
2098
2104
2099 def branchtip(self, branch, ignoremissing=False):
2105 def branchtip(self, branch, ignoremissing=False):
2100 """return the tip node for a given branch
2106 """return the tip node for a given branch
2101
2107
2102 If ignoremissing is True, then this method will not raise an error.
2108 If ignoremissing is True, then this method will not raise an error.
2103 This is helpful for callers that only expect None for a missing branch
2109 This is helpful for callers that only expect None for a missing branch
2104 (e.g. namespace).
2110 (e.g. namespace).
2105
2111
2106 """
2112 """
2107 try:
2113 try:
2108 return self.branchmap().branchtip(branch)
2114 return self.branchmap().branchtip(branch)
2109 except KeyError:
2115 except KeyError:
2110 if not ignoremissing:
2116 if not ignoremissing:
2111 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2117 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2112 else:
2118 else:
2113 pass
2119 pass
2114
2120
2115 def lookup(self, key):
2121 def lookup(self, key):
2116 node = scmutil.revsymbol(self, key).node()
2122 node = scmutil.revsymbol(self, key).node()
2117 if node is None:
2123 if node is None:
2118 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2124 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2119 return node
2125 return node
2120
2126
2121 def lookupbranch(self, key):
2127 def lookupbranch(self, key):
2122 if self.branchmap().hasbranch(key):
2128 if self.branchmap().hasbranch(key):
2123 return key
2129 return key
2124
2130
2125 return scmutil.revsymbol(self, key).branch()
2131 return scmutil.revsymbol(self, key).branch()
2126
2132
2127 def known(self, nodes):
2133 def known(self, nodes):
2128 cl = self.changelog
2134 cl = self.changelog
2129 get_rev = cl.index.get_rev
2135 get_rev = cl.index.get_rev
2130 filtered = cl.filteredrevs
2136 filtered = cl.filteredrevs
2131 result = []
2137 result = []
2132 for n in nodes:
2138 for n in nodes:
2133 r = get_rev(n)
2139 r = get_rev(n)
2134 resp = not (r is None or r in filtered)
2140 resp = not (r is None or r in filtered)
2135 result.append(resp)
2141 result.append(resp)
2136 return result
2142 return result
2137
2143
2138 def local(self):
2144 def local(self):
2139 return self
2145 return self
2140
2146
2141 def publishing(self):
2147 def publishing(self):
2142 # it's safe (and desirable) to trust the publish flag unconditionally
2148 # it's safe (and desirable) to trust the publish flag unconditionally
2143 # so that we don't finalize changes shared between users via ssh or nfs
2149 # so that we don't finalize changes shared between users via ssh or nfs
2144 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2150 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2145
2151
2146 def cancopy(self):
2152 def cancopy(self):
2147 # so statichttprepo's override of local() works
2153 # so statichttprepo's override of local() works
2148 if not self.local():
2154 if not self.local():
2149 return False
2155 return False
2150 if not self.publishing():
2156 if not self.publishing():
2151 return True
2157 return True
2152 # if publishing we can't copy if there is filtered content
2158 # if publishing we can't copy if there is filtered content
2153 return not self.filtered(b'visible').changelog.filteredrevs
2159 return not self.filtered(b'visible').changelog.filteredrevs
2154
2160
2155 def shared(self):
2161 def shared(self):
2156 '''the type of shared repository (None if not shared)'''
2162 '''the type of shared repository (None if not shared)'''
2157 if self.sharedpath != self.path:
2163 if self.sharedpath != self.path:
2158 return b'store'
2164 return b'store'
2159 return None
2165 return None
2160
2166
2161 def wjoin(self, f, *insidef):
2167 def wjoin(self, f, *insidef):
2162 return self.vfs.reljoin(self.root, f, *insidef)
2168 return self.vfs.reljoin(self.root, f, *insidef)
2163
2169
2164 def setparents(self, p1, p2=None):
2170 def setparents(self, p1, p2=None):
2165 if p2 is None:
2171 if p2 is None:
2166 p2 = self.nullid
2172 p2 = self.nullid
2167 self[None].setparents(p1, p2)
2173 self[None].setparents(p1, p2)
2168 self._quick_access_changeid_invalidate()
2174 self._quick_access_changeid_invalidate()
2169
2175
2170 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2176 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2171 """changeid must be a changeset revision, if specified.
2177 """changeid must be a changeset revision, if specified.
2172 fileid can be a file revision or node."""
2178 fileid can be a file revision or node."""
2173 return context.filectx(
2179 return context.filectx(
2174 self, path, changeid, fileid, changectx=changectx
2180 self, path, changeid, fileid, changectx=changectx
2175 )
2181 )
2176
2182
2177 def getcwd(self):
2183 def getcwd(self):
2178 return self.dirstate.getcwd()
2184 return self.dirstate.getcwd()
2179
2185
2180 def pathto(self, f, cwd=None):
2186 def pathto(self, f, cwd=None):
2181 return self.dirstate.pathto(f, cwd)
2187 return self.dirstate.pathto(f, cwd)
2182
2188
2183 def _loadfilter(self, filter):
2189 def _loadfilter(self, filter):
2184 if filter not in self._filterpats:
2190 if filter not in self._filterpats:
2185 l = []
2191 l = []
2186 for pat, cmd in self.ui.configitems(filter):
2192 for pat, cmd in self.ui.configitems(filter):
2187 if cmd == b'!':
2193 if cmd == b'!':
2188 continue
2194 continue
2189 mf = matchmod.match(self.root, b'', [pat])
2195 mf = matchmod.match(self.root, b'', [pat])
2190 fn = None
2196 fn = None
2191 params = cmd
2197 params = cmd
2192 for name, filterfn in pycompat.iteritems(self._datafilters):
2198 for name, filterfn in pycompat.iteritems(self._datafilters):
2193 if cmd.startswith(name):
2199 if cmd.startswith(name):
2194 fn = filterfn
2200 fn = filterfn
2195 params = cmd[len(name) :].lstrip()
2201 params = cmd[len(name) :].lstrip()
2196 break
2202 break
2197 if not fn:
2203 if not fn:
2198 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2204 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2199 fn.__name__ = 'commandfilter'
2205 fn.__name__ = 'commandfilter'
2200 # Wrap old filters not supporting keyword arguments
2206 # Wrap old filters not supporting keyword arguments
2201 if not pycompat.getargspec(fn)[2]:
2207 if not pycompat.getargspec(fn)[2]:
2202 oldfn = fn
2208 oldfn = fn
2203 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2209 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2204 fn.__name__ = 'compat-' + oldfn.__name__
2210 fn.__name__ = 'compat-' + oldfn.__name__
2205 l.append((mf, fn, params))
2211 l.append((mf, fn, params))
2206 self._filterpats[filter] = l
2212 self._filterpats[filter] = l
2207 return self._filterpats[filter]
2213 return self._filterpats[filter]
2208
2214
2209 def _filter(self, filterpats, filename, data):
2215 def _filter(self, filterpats, filename, data):
2210 for mf, fn, cmd in filterpats:
2216 for mf, fn, cmd in filterpats:
2211 if mf(filename):
2217 if mf(filename):
2212 self.ui.debug(
2218 self.ui.debug(
2213 b"filtering %s through %s\n"
2219 b"filtering %s through %s\n"
2214 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2220 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2215 )
2221 )
2216 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2222 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2217 break
2223 break
2218
2224
2219 return data
2225 return data
2220
2226
2221 @unfilteredpropertycache
2227 @unfilteredpropertycache
2222 def _encodefilterpats(self):
2228 def _encodefilterpats(self):
2223 return self._loadfilter(b'encode')
2229 return self._loadfilter(b'encode')
2224
2230
2225 @unfilteredpropertycache
2231 @unfilteredpropertycache
2226 def _decodefilterpats(self):
2232 def _decodefilterpats(self):
2227 return self._loadfilter(b'decode')
2233 return self._loadfilter(b'decode')
2228
2234
2229 def adddatafilter(self, name, filter):
2235 def adddatafilter(self, name, filter):
2230 self._datafilters[name] = filter
2236 self._datafilters[name] = filter
2231
2237
2232 def wread(self, filename):
2238 def wread(self, filename):
2233 if self.wvfs.islink(filename):
2239 if self.wvfs.islink(filename):
2234 data = self.wvfs.readlink(filename)
2240 data = self.wvfs.readlink(filename)
2235 else:
2241 else:
2236 data = self.wvfs.read(filename)
2242 data = self.wvfs.read(filename)
2237 return self._filter(self._encodefilterpats, filename, data)
2243 return self._filter(self._encodefilterpats, filename, data)
2238
2244
2239 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2245 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2240 """write ``data`` into ``filename`` in the working directory
2246 """write ``data`` into ``filename`` in the working directory
2241
2247
2242 This returns length of written (maybe decoded) data.
2248 This returns length of written (maybe decoded) data.
2243 """
2249 """
2244 data = self._filter(self._decodefilterpats, filename, data)
2250 data = self._filter(self._decodefilterpats, filename, data)
2245 if b'l' in flags:
2251 if b'l' in flags:
2246 self.wvfs.symlink(data, filename)
2252 self.wvfs.symlink(data, filename)
2247 else:
2253 else:
2248 self.wvfs.write(
2254 self.wvfs.write(
2249 filename, data, backgroundclose=backgroundclose, **kwargs
2255 filename, data, backgroundclose=backgroundclose, **kwargs
2250 )
2256 )
2251 if b'x' in flags:
2257 if b'x' in flags:
2252 self.wvfs.setflags(filename, False, True)
2258 self.wvfs.setflags(filename, False, True)
2253 else:
2259 else:
2254 self.wvfs.setflags(filename, False, False)
2260 self.wvfs.setflags(filename, False, False)
2255 return len(data)
2261 return len(data)
2256
2262
2257 def wwritedata(self, filename, data):
2263 def wwritedata(self, filename, data):
2258 return self._filter(self._decodefilterpats, filename, data)
2264 return self._filter(self._decodefilterpats, filename, data)
2259
2265
2260 def currenttransaction(self):
2266 def currenttransaction(self):
2261 """return the current transaction or None if non exists"""
2267 """return the current transaction or None if non exists"""
2262 if self._transref:
2268 if self._transref:
2263 tr = self._transref()
2269 tr = self._transref()
2264 else:
2270 else:
2265 tr = None
2271 tr = None
2266
2272
2267 if tr and tr.running():
2273 if tr and tr.running():
2268 return tr
2274 return tr
2269 return None
2275 return None
2270
2276
2271 def transaction(self, desc, report=None):
2277 def transaction(self, desc, report=None):
2272 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2278 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2273 b'devel', b'check-locks'
2279 b'devel', b'check-locks'
2274 ):
2280 ):
2275 if self._currentlock(self._lockref) is None:
2281 if self._currentlock(self._lockref) is None:
2276 raise error.ProgrammingError(b'transaction requires locking')
2282 raise error.ProgrammingError(b'transaction requires locking')
2277 tr = self.currenttransaction()
2283 tr = self.currenttransaction()
2278 if tr is not None:
2284 if tr is not None:
2279 return tr.nest(name=desc)
2285 return tr.nest(name=desc)
2280
2286
2281 # abort here if the journal already exists
2287 # abort here if the journal already exists
2282 if self.svfs.exists(b"journal"):
2288 if self.svfs.exists(b"journal"):
2283 raise error.RepoError(
2289 raise error.RepoError(
2284 _(b"abandoned transaction found"),
2290 _(b"abandoned transaction found"),
2285 hint=_(b"run 'hg recover' to clean up transaction"),
2291 hint=_(b"run 'hg recover' to clean up transaction"),
2286 )
2292 )
2287
2293
2288 idbase = b"%.40f#%f" % (random.random(), time.time())
2294 idbase = b"%.40f#%f" % (random.random(), time.time())
2289 ha = hex(hashutil.sha1(idbase).digest())
2295 ha = hex(hashutil.sha1(idbase).digest())
2290 txnid = b'TXN:' + ha
2296 txnid = b'TXN:' + ha
2291 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2297 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2292
2298
2293 self._writejournal(desc)
2299 self._writejournal(desc)
2294 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2300 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2295 if report:
2301 if report:
2296 rp = report
2302 rp = report
2297 else:
2303 else:
2298 rp = self.ui.warn
2304 rp = self.ui.warn
2299 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2305 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2300 # we must avoid cyclic reference between repo and transaction.
2306 # we must avoid cyclic reference between repo and transaction.
2301 reporef = weakref.ref(self)
2307 reporef = weakref.ref(self)
2302 # Code to track tag movement
2308 # Code to track tag movement
2303 #
2309 #
2304 # Since tags are all handled as file content, it is actually quite hard
2310 # Since tags are all handled as file content, it is actually quite hard
2305 # to track these movement from a code perspective. So we fallback to a
2311 # to track these movement from a code perspective. So we fallback to a
2306 # tracking at the repository level. One could envision to track changes
2312 # tracking at the repository level. One could envision to track changes
2307 # to the '.hgtags' file through changegroup apply but that fails to
2313 # to the '.hgtags' file through changegroup apply but that fails to
2308 # cope with case where transaction expose new heads without changegroup
2314 # cope with case where transaction expose new heads without changegroup
2309 # being involved (eg: phase movement).
2315 # being involved (eg: phase movement).
2310 #
2316 #
2311 # For now, We gate the feature behind a flag since this likely comes
2317 # For now, We gate the feature behind a flag since this likely comes
2312 # with performance impacts. The current code run more often than needed
2318 # with performance impacts. The current code run more often than needed
2313 # and do not use caches as much as it could. The current focus is on
2319 # and do not use caches as much as it could. The current focus is on
2314 # the behavior of the feature so we disable it by default. The flag
2320 # the behavior of the feature so we disable it by default. The flag
2315 # will be removed when we are happy with the performance impact.
2321 # will be removed when we are happy with the performance impact.
2316 #
2322 #
2317 # Once this feature is no longer experimental move the following
2323 # Once this feature is no longer experimental move the following
2318 # documentation to the appropriate help section:
2324 # documentation to the appropriate help section:
2319 #
2325 #
2320 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2326 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2321 # tags (new or changed or deleted tags). In addition the details of
2327 # tags (new or changed or deleted tags). In addition the details of
2322 # these changes are made available in a file at:
2328 # these changes are made available in a file at:
2323 # ``REPOROOT/.hg/changes/tags.changes``.
2329 # ``REPOROOT/.hg/changes/tags.changes``.
2324 # Make sure you check for HG_TAG_MOVED before reading that file as it
2330 # Make sure you check for HG_TAG_MOVED before reading that file as it
2325 # might exist from a previous transaction even if no tag were touched
2331 # might exist from a previous transaction even if no tag were touched
2326 # in this one. Changes are recorded in a line base format::
2332 # in this one. Changes are recorded in a line base format::
2327 #
2333 #
2328 # <action> <hex-node> <tag-name>\n
2334 # <action> <hex-node> <tag-name>\n
2329 #
2335 #
2330 # Actions are defined as follow:
2336 # Actions are defined as follow:
2331 # "-R": tag is removed,
2337 # "-R": tag is removed,
2332 # "+A": tag is added,
2338 # "+A": tag is added,
2333 # "-M": tag is moved (old value),
2339 # "-M": tag is moved (old value),
2334 # "+M": tag is moved (new value),
2340 # "+M": tag is moved (new value),
2335 tracktags = lambda x: None
2341 tracktags = lambda x: None
2336 # experimental config: experimental.hook-track-tags
2342 # experimental config: experimental.hook-track-tags
2337 shouldtracktags = self.ui.configbool(
2343 shouldtracktags = self.ui.configbool(
2338 b'experimental', b'hook-track-tags'
2344 b'experimental', b'hook-track-tags'
2339 )
2345 )
2340 if desc != b'strip' and shouldtracktags:
2346 if desc != b'strip' and shouldtracktags:
2341 oldheads = self.changelog.headrevs()
2347 oldheads = self.changelog.headrevs()
2342
2348
2343 def tracktags(tr2):
2349 def tracktags(tr2):
2344 repo = reporef()
2350 repo = reporef()
2345 assert repo is not None # help pytype
2351 assert repo is not None # help pytype
2346 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2352 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2347 newheads = repo.changelog.headrevs()
2353 newheads = repo.changelog.headrevs()
2348 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2354 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2349 # notes: we compare lists here.
2355 # notes: we compare lists here.
2350 # As we do it only once buiding set would not be cheaper
2356 # As we do it only once buiding set would not be cheaper
2351 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2357 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2352 if changes:
2358 if changes:
2353 tr2.hookargs[b'tag_moved'] = b'1'
2359 tr2.hookargs[b'tag_moved'] = b'1'
2354 with repo.vfs(
2360 with repo.vfs(
2355 b'changes/tags.changes', b'w', atomictemp=True
2361 b'changes/tags.changes', b'w', atomictemp=True
2356 ) as changesfile:
2362 ) as changesfile:
2357 # note: we do not register the file to the transaction
2363 # note: we do not register the file to the transaction
2358 # because we needs it to still exist on the transaction
2364 # because we needs it to still exist on the transaction
2359 # is close (for txnclose hooks)
2365 # is close (for txnclose hooks)
2360 tagsmod.writediff(changesfile, changes)
2366 tagsmod.writediff(changesfile, changes)
2361
2367
2362 def validate(tr2):
2368 def validate(tr2):
2363 """will run pre-closing hooks"""
2369 """will run pre-closing hooks"""
2364 # XXX the transaction API is a bit lacking here so we take a hacky
2370 # XXX the transaction API is a bit lacking here so we take a hacky
2365 # path for now
2371 # path for now
2366 #
2372 #
2367 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2373 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2368 # dict is copied before these run. In addition we needs the data
2374 # dict is copied before these run. In addition we needs the data
2369 # available to in memory hooks too.
2375 # available to in memory hooks too.
2370 #
2376 #
2371 # Moreover, we also need to make sure this runs before txnclose
2377 # Moreover, we also need to make sure this runs before txnclose
2372 # hooks and there is no "pending" mechanism that would execute
2378 # hooks and there is no "pending" mechanism that would execute
2373 # logic only if hooks are about to run.
2379 # logic only if hooks are about to run.
2374 #
2380 #
2375 # Fixing this limitation of the transaction is also needed to track
2381 # Fixing this limitation of the transaction is also needed to track
2376 # other families of changes (bookmarks, phases, obsolescence).
2382 # other families of changes (bookmarks, phases, obsolescence).
2377 #
2383 #
2378 # This will have to be fixed before we remove the experimental
2384 # This will have to be fixed before we remove the experimental
2379 # gating.
2385 # gating.
2380 tracktags(tr2)
2386 tracktags(tr2)
2381 repo = reporef()
2387 repo = reporef()
2382 assert repo is not None # help pytype
2388 assert repo is not None # help pytype
2383
2389
2384 singleheadopt = (b'experimental', b'single-head-per-branch')
2390 singleheadopt = (b'experimental', b'single-head-per-branch')
2385 singlehead = repo.ui.configbool(*singleheadopt)
2391 singlehead = repo.ui.configbool(*singleheadopt)
2386 if singlehead:
2392 if singlehead:
2387 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2393 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2388 accountclosed = singleheadsub.get(
2394 accountclosed = singleheadsub.get(
2389 b"account-closed-heads", False
2395 b"account-closed-heads", False
2390 )
2396 )
2391 if singleheadsub.get(b"public-changes-only", False):
2397 if singleheadsub.get(b"public-changes-only", False):
2392 filtername = b"immutable"
2398 filtername = b"immutable"
2393 else:
2399 else:
2394 filtername = b"visible"
2400 filtername = b"visible"
2395 scmutil.enforcesinglehead(
2401 scmutil.enforcesinglehead(
2396 repo, tr2, desc, accountclosed, filtername
2402 repo, tr2, desc, accountclosed, filtername
2397 )
2403 )
2398 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2404 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2399 for name, (old, new) in sorted(
2405 for name, (old, new) in sorted(
2400 tr.changes[b'bookmarks'].items()
2406 tr.changes[b'bookmarks'].items()
2401 ):
2407 ):
2402 args = tr.hookargs.copy()
2408 args = tr.hookargs.copy()
2403 args.update(bookmarks.preparehookargs(name, old, new))
2409 args.update(bookmarks.preparehookargs(name, old, new))
2404 repo.hook(
2410 repo.hook(
2405 b'pretxnclose-bookmark',
2411 b'pretxnclose-bookmark',
2406 throw=True,
2412 throw=True,
2407 **pycompat.strkwargs(args)
2413 **pycompat.strkwargs(args)
2408 )
2414 )
2409 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2415 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2410 cl = repo.unfiltered().changelog
2416 cl = repo.unfiltered().changelog
2411 for revs, (old, new) in tr.changes[b'phases']:
2417 for revs, (old, new) in tr.changes[b'phases']:
2412 for rev in revs:
2418 for rev in revs:
2413 args = tr.hookargs.copy()
2419 args = tr.hookargs.copy()
2414 node = hex(cl.node(rev))
2420 node = hex(cl.node(rev))
2415 args.update(phases.preparehookargs(node, old, new))
2421 args.update(phases.preparehookargs(node, old, new))
2416 repo.hook(
2422 repo.hook(
2417 b'pretxnclose-phase',
2423 b'pretxnclose-phase',
2418 throw=True,
2424 throw=True,
2419 **pycompat.strkwargs(args)
2425 **pycompat.strkwargs(args)
2420 )
2426 )
2421
2427
2422 repo.hook(
2428 repo.hook(
2423 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2429 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2424 )
2430 )
2425
2431
2426 def releasefn(tr, success):
2432 def releasefn(tr, success):
2427 repo = reporef()
2433 repo = reporef()
2428 if repo is None:
2434 if repo is None:
2429 # If the repo has been GC'd (and this release function is being
2435 # If the repo has been GC'd (and this release function is being
2430 # called from transaction.__del__), there's not much we can do,
2436 # called from transaction.__del__), there's not much we can do,
2431 # so just leave the unfinished transaction there and let the
2437 # so just leave the unfinished transaction there and let the
2432 # user run `hg recover`.
2438 # user run `hg recover`.
2433 return
2439 return
2434 if success:
2440 if success:
2435 # this should be explicitly invoked here, because
2441 # this should be explicitly invoked here, because
2436 # in-memory changes aren't written out at closing
2442 # in-memory changes aren't written out at closing
2437 # transaction, if tr.addfilegenerator (via
2443 # transaction, if tr.addfilegenerator (via
2438 # dirstate.write or so) isn't invoked while
2444 # dirstate.write or so) isn't invoked while
2439 # transaction running
2445 # transaction running
2440 repo.dirstate.write(None)
2446 repo.dirstate.write(None)
2441 else:
2447 else:
2442 # discard all changes (including ones already written
2448 # discard all changes (including ones already written
2443 # out) in this transaction
2449 # out) in this transaction
2444 narrowspec.restorebackup(self, b'journal.narrowspec')
2450 narrowspec.restorebackup(self, b'journal.narrowspec')
2445 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2451 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2446 repo.dirstate.restorebackup(None, b'journal.dirstate')
2452 repo.dirstate.restorebackup(None, b'journal.dirstate')
2447
2453
2448 repo.invalidate(clearfilecache=True)
2454 repo.invalidate(clearfilecache=True)
2449
2455
2450 tr = transaction.transaction(
2456 tr = transaction.transaction(
2451 rp,
2457 rp,
2452 self.svfs,
2458 self.svfs,
2453 vfsmap,
2459 vfsmap,
2454 b"journal",
2460 b"journal",
2455 b"undo",
2461 b"undo",
2456 aftertrans(renames),
2462 aftertrans(renames),
2457 self.store.createmode,
2463 self.store.createmode,
2458 validator=validate,
2464 validator=validate,
2459 releasefn=releasefn,
2465 releasefn=releasefn,
2460 checkambigfiles=_cachedfiles,
2466 checkambigfiles=_cachedfiles,
2461 name=desc,
2467 name=desc,
2462 )
2468 )
2463 tr.changes[b'origrepolen'] = len(self)
2469 tr.changes[b'origrepolen'] = len(self)
2464 tr.changes[b'obsmarkers'] = set()
2470 tr.changes[b'obsmarkers'] = set()
2465 tr.changes[b'phases'] = []
2471 tr.changes[b'phases'] = []
2466 tr.changes[b'bookmarks'] = {}
2472 tr.changes[b'bookmarks'] = {}
2467
2473
2468 tr.hookargs[b'txnid'] = txnid
2474 tr.hookargs[b'txnid'] = txnid
2469 tr.hookargs[b'txnname'] = desc
2475 tr.hookargs[b'txnname'] = desc
2470 tr.hookargs[b'changes'] = tr.changes
2476 tr.hookargs[b'changes'] = tr.changes
2471 # note: writing the fncache only during finalize mean that the file is
2477 # note: writing the fncache only during finalize mean that the file is
2472 # outdated when running hooks. As fncache is used for streaming clone,
2478 # outdated when running hooks. As fncache is used for streaming clone,
2473 # this is not expected to break anything that happen during the hooks.
2479 # this is not expected to break anything that happen during the hooks.
2474 tr.addfinalize(b'flush-fncache', self.store.write)
2480 tr.addfinalize(b'flush-fncache', self.store.write)
2475
2481
2476 def txnclosehook(tr2):
2482 def txnclosehook(tr2):
2477 """To be run if transaction is successful, will schedule a hook run"""
2483 """To be run if transaction is successful, will schedule a hook run"""
2478 # Don't reference tr2 in hook() so we don't hold a reference.
2484 # Don't reference tr2 in hook() so we don't hold a reference.
2479 # This reduces memory consumption when there are multiple
2485 # This reduces memory consumption when there are multiple
2480 # transactions per lock. This can likely go away if issue5045
2486 # transactions per lock. This can likely go away if issue5045
2481 # fixes the function accumulation.
2487 # fixes the function accumulation.
2482 hookargs = tr2.hookargs
2488 hookargs = tr2.hookargs
2483
2489
2484 def hookfunc(unused_success):
2490 def hookfunc(unused_success):
2485 repo = reporef()
2491 repo = reporef()
2486 assert repo is not None # help pytype
2492 assert repo is not None # help pytype
2487
2493
2488 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2494 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2489 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2495 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2490 for name, (old, new) in bmchanges:
2496 for name, (old, new) in bmchanges:
2491 args = tr.hookargs.copy()
2497 args = tr.hookargs.copy()
2492 args.update(bookmarks.preparehookargs(name, old, new))
2498 args.update(bookmarks.preparehookargs(name, old, new))
2493 repo.hook(
2499 repo.hook(
2494 b'txnclose-bookmark',
2500 b'txnclose-bookmark',
2495 throw=False,
2501 throw=False,
2496 **pycompat.strkwargs(args)
2502 **pycompat.strkwargs(args)
2497 )
2503 )
2498
2504
2499 if hook.hashook(repo.ui, b'txnclose-phase'):
2505 if hook.hashook(repo.ui, b'txnclose-phase'):
2500 cl = repo.unfiltered().changelog
2506 cl = repo.unfiltered().changelog
2501 phasemv = sorted(
2507 phasemv = sorted(
2502 tr.changes[b'phases'], key=lambda r: r[0][0]
2508 tr.changes[b'phases'], key=lambda r: r[0][0]
2503 )
2509 )
2504 for revs, (old, new) in phasemv:
2510 for revs, (old, new) in phasemv:
2505 for rev in revs:
2511 for rev in revs:
2506 args = tr.hookargs.copy()
2512 args = tr.hookargs.copy()
2507 node = hex(cl.node(rev))
2513 node = hex(cl.node(rev))
2508 args.update(phases.preparehookargs(node, old, new))
2514 args.update(phases.preparehookargs(node, old, new))
2509 repo.hook(
2515 repo.hook(
2510 b'txnclose-phase',
2516 b'txnclose-phase',
2511 throw=False,
2517 throw=False,
2512 **pycompat.strkwargs(args)
2518 **pycompat.strkwargs(args)
2513 )
2519 )
2514
2520
2515 repo.hook(
2521 repo.hook(
2516 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2522 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2517 )
2523 )
2518
2524
2519 repo = reporef()
2525 repo = reporef()
2520 assert repo is not None # help pytype
2526 assert repo is not None # help pytype
2521 repo._afterlock(hookfunc)
2527 repo._afterlock(hookfunc)
2522
2528
2523 tr.addfinalize(b'txnclose-hook', txnclosehook)
2529 tr.addfinalize(b'txnclose-hook', txnclosehook)
2524 # Include a leading "-" to make it happen before the transaction summary
2530 # Include a leading "-" to make it happen before the transaction summary
2525 # reports registered via scmutil.registersummarycallback() whose names
2531 # reports registered via scmutil.registersummarycallback() whose names
2526 # are 00-txnreport etc. That way, the caches will be warm when the
2532 # are 00-txnreport etc. That way, the caches will be warm when the
2527 # callbacks run.
2533 # callbacks run.
2528 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2534 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2529
2535
2530 def txnaborthook(tr2):
2536 def txnaborthook(tr2):
2531 """To be run if transaction is aborted"""
2537 """To be run if transaction is aborted"""
2532 repo = reporef()
2538 repo = reporef()
2533 assert repo is not None # help pytype
2539 assert repo is not None # help pytype
2534 repo.hook(
2540 repo.hook(
2535 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2541 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2536 )
2542 )
2537
2543
2538 tr.addabort(b'txnabort-hook', txnaborthook)
2544 tr.addabort(b'txnabort-hook', txnaborthook)
2539 # avoid eager cache invalidation. in-memory data should be identical
2545 # avoid eager cache invalidation. in-memory data should be identical
2540 # to stored data if transaction has no error.
2546 # to stored data if transaction has no error.
2541 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2547 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2542 self._transref = weakref.ref(tr)
2548 self._transref = weakref.ref(tr)
2543 scmutil.registersummarycallback(self, tr, desc)
2549 scmutil.registersummarycallback(self, tr, desc)
2544 return tr
2550 return tr
2545
2551
2546 def _journalfiles(self):
2552 def _journalfiles(self):
2547 return (
2553 return (
2548 (self.svfs, b'journal'),
2554 (self.svfs, b'journal'),
2549 (self.svfs, b'journal.narrowspec'),
2555 (self.svfs, b'journal.narrowspec'),
2550 (self.vfs, b'journal.narrowspec.dirstate'),
2556 (self.vfs, b'journal.narrowspec.dirstate'),
2551 (self.vfs, b'journal.dirstate'),
2557 (self.vfs, b'journal.dirstate'),
2552 (self.vfs, b'journal.branch'),
2558 (self.vfs, b'journal.branch'),
2553 (self.vfs, b'journal.desc'),
2559 (self.vfs, b'journal.desc'),
2554 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2560 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2555 (self.svfs, b'journal.phaseroots'),
2561 (self.svfs, b'journal.phaseroots'),
2556 )
2562 )
2557
2563
2558 def undofiles(self):
2564 def undofiles(self):
2559 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2565 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2560
2566
2561 @unfilteredmethod
2567 @unfilteredmethod
2562 def _writejournal(self, desc):
2568 def _writejournal(self, desc):
2563 self.dirstate.savebackup(None, b'journal.dirstate')
2569 self.dirstate.savebackup(None, b'journal.dirstate')
2564 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2570 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2565 narrowspec.savebackup(self, b'journal.narrowspec')
2571 narrowspec.savebackup(self, b'journal.narrowspec')
2566 self.vfs.write(
2572 self.vfs.write(
2567 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2573 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2568 )
2574 )
2569 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2575 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2570 bookmarksvfs = bookmarks.bookmarksvfs(self)
2576 bookmarksvfs = bookmarks.bookmarksvfs(self)
2571 bookmarksvfs.write(
2577 bookmarksvfs.write(
2572 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2578 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2573 )
2579 )
2574 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2580 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2575
2581
2576 def recover(self):
2582 def recover(self):
2577 with self.lock():
2583 with self.lock():
2578 if self.svfs.exists(b"journal"):
2584 if self.svfs.exists(b"journal"):
2579 self.ui.status(_(b"rolling back interrupted transaction\n"))
2585 self.ui.status(_(b"rolling back interrupted transaction\n"))
2580 vfsmap = {
2586 vfsmap = {
2581 b'': self.svfs,
2587 b'': self.svfs,
2582 b'plain': self.vfs,
2588 b'plain': self.vfs,
2583 }
2589 }
2584 transaction.rollback(
2590 transaction.rollback(
2585 self.svfs,
2591 self.svfs,
2586 vfsmap,
2592 vfsmap,
2587 b"journal",
2593 b"journal",
2588 self.ui.warn,
2594 self.ui.warn,
2589 checkambigfiles=_cachedfiles,
2595 checkambigfiles=_cachedfiles,
2590 )
2596 )
2591 self.invalidate()
2597 self.invalidate()
2592 return True
2598 return True
2593 else:
2599 else:
2594 self.ui.warn(_(b"no interrupted transaction available\n"))
2600 self.ui.warn(_(b"no interrupted transaction available\n"))
2595 return False
2601 return False
2596
2602
2597 def rollback(self, dryrun=False, force=False):
2603 def rollback(self, dryrun=False, force=False):
2598 wlock = lock = dsguard = None
2604 wlock = lock = dsguard = None
2599 try:
2605 try:
2600 wlock = self.wlock()
2606 wlock = self.wlock()
2601 lock = self.lock()
2607 lock = self.lock()
2602 if self.svfs.exists(b"undo"):
2608 if self.svfs.exists(b"undo"):
2603 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2609 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2604
2610
2605 return self._rollback(dryrun, force, dsguard)
2611 return self._rollback(dryrun, force, dsguard)
2606 else:
2612 else:
2607 self.ui.warn(_(b"no rollback information available\n"))
2613 self.ui.warn(_(b"no rollback information available\n"))
2608 return 1
2614 return 1
2609 finally:
2615 finally:
2610 release(dsguard, lock, wlock)
2616 release(dsguard, lock, wlock)
2611
2617
2612 @unfilteredmethod # Until we get smarter cache management
2618 @unfilteredmethod # Until we get smarter cache management
2613 def _rollback(self, dryrun, force, dsguard):
2619 def _rollback(self, dryrun, force, dsguard):
2614 ui = self.ui
2620 ui = self.ui
2615 try:
2621 try:
2616 args = self.vfs.read(b'undo.desc').splitlines()
2622 args = self.vfs.read(b'undo.desc').splitlines()
2617 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2623 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2618 if len(args) >= 3:
2624 if len(args) >= 3:
2619 detail = args[2]
2625 detail = args[2]
2620 oldtip = oldlen - 1
2626 oldtip = oldlen - 1
2621
2627
2622 if detail and ui.verbose:
2628 if detail and ui.verbose:
2623 msg = _(
2629 msg = _(
2624 b'repository tip rolled back to revision %d'
2630 b'repository tip rolled back to revision %d'
2625 b' (undo %s: %s)\n'
2631 b' (undo %s: %s)\n'
2626 ) % (oldtip, desc, detail)
2632 ) % (oldtip, desc, detail)
2627 else:
2633 else:
2628 msg = _(
2634 msg = _(
2629 b'repository tip rolled back to revision %d (undo %s)\n'
2635 b'repository tip rolled back to revision %d (undo %s)\n'
2630 ) % (oldtip, desc)
2636 ) % (oldtip, desc)
2631 except IOError:
2637 except IOError:
2632 msg = _(b'rolling back unknown transaction\n')
2638 msg = _(b'rolling back unknown transaction\n')
2633 desc = None
2639 desc = None
2634
2640
2635 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2641 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2636 raise error.Abort(
2642 raise error.Abort(
2637 _(
2643 _(
2638 b'rollback of last commit while not checked out '
2644 b'rollback of last commit while not checked out '
2639 b'may lose data'
2645 b'may lose data'
2640 ),
2646 ),
2641 hint=_(b'use -f to force'),
2647 hint=_(b'use -f to force'),
2642 )
2648 )
2643
2649
2644 ui.status(msg)
2650 ui.status(msg)
2645 if dryrun:
2651 if dryrun:
2646 return 0
2652 return 0
2647
2653
2648 parents = self.dirstate.parents()
2654 parents = self.dirstate.parents()
2649 self.destroying()
2655 self.destroying()
2650 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2656 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2651 transaction.rollback(
2657 transaction.rollback(
2652 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2658 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2653 )
2659 )
2654 bookmarksvfs = bookmarks.bookmarksvfs(self)
2660 bookmarksvfs = bookmarks.bookmarksvfs(self)
2655 if bookmarksvfs.exists(b'undo.bookmarks'):
2661 if bookmarksvfs.exists(b'undo.bookmarks'):
2656 bookmarksvfs.rename(
2662 bookmarksvfs.rename(
2657 b'undo.bookmarks', b'bookmarks', checkambig=True
2663 b'undo.bookmarks', b'bookmarks', checkambig=True
2658 )
2664 )
2659 if self.svfs.exists(b'undo.phaseroots'):
2665 if self.svfs.exists(b'undo.phaseroots'):
2660 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2666 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2661 self.invalidate()
2667 self.invalidate()
2662
2668
2663 has_node = self.changelog.index.has_node
2669 has_node = self.changelog.index.has_node
2664 parentgone = any(not has_node(p) for p in parents)
2670 parentgone = any(not has_node(p) for p in parents)
2665 if parentgone:
2671 if parentgone:
2666 # prevent dirstateguard from overwriting already restored one
2672 # prevent dirstateguard from overwriting already restored one
2667 dsguard.close()
2673 dsguard.close()
2668
2674
2669 narrowspec.restorebackup(self, b'undo.narrowspec')
2675 narrowspec.restorebackup(self, b'undo.narrowspec')
2670 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2676 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2671 self.dirstate.restorebackup(None, b'undo.dirstate')
2677 self.dirstate.restorebackup(None, b'undo.dirstate')
2672 try:
2678 try:
2673 branch = self.vfs.read(b'undo.branch')
2679 branch = self.vfs.read(b'undo.branch')
2674 self.dirstate.setbranch(encoding.tolocal(branch))
2680 self.dirstate.setbranch(encoding.tolocal(branch))
2675 except IOError:
2681 except IOError:
2676 ui.warn(
2682 ui.warn(
2677 _(
2683 _(
2678 b'named branch could not be reset: '
2684 b'named branch could not be reset: '
2679 b'current branch is still \'%s\'\n'
2685 b'current branch is still \'%s\'\n'
2680 )
2686 )
2681 % self.dirstate.branch()
2687 % self.dirstate.branch()
2682 )
2688 )
2683
2689
2684 parents = tuple([p.rev() for p in self[None].parents()])
2690 parents = tuple([p.rev() for p in self[None].parents()])
2685 if len(parents) > 1:
2691 if len(parents) > 1:
2686 ui.status(
2692 ui.status(
2687 _(
2693 _(
2688 b'working directory now based on '
2694 b'working directory now based on '
2689 b'revisions %d and %d\n'
2695 b'revisions %d and %d\n'
2690 )
2696 )
2691 % parents
2697 % parents
2692 )
2698 )
2693 else:
2699 else:
2694 ui.status(
2700 ui.status(
2695 _(b'working directory now based on revision %d\n') % parents
2701 _(b'working directory now based on revision %d\n') % parents
2696 )
2702 )
2697 mergestatemod.mergestate.clean(self)
2703 mergestatemod.mergestate.clean(self)
2698
2704
2699 # TODO: if we know which new heads may result from this rollback, pass
2705 # TODO: if we know which new heads may result from this rollback, pass
2700 # them to destroy(), which will prevent the branchhead cache from being
2706 # them to destroy(), which will prevent the branchhead cache from being
2701 # invalidated.
2707 # invalidated.
2702 self.destroyed()
2708 self.destroyed()
2703 return 0
2709 return 0
2704
2710
2705 def _buildcacheupdater(self, newtransaction):
2711 def _buildcacheupdater(self, newtransaction):
2706 """called during transaction to build the callback updating cache
2712 """called during transaction to build the callback updating cache
2707
2713
2708 Lives on the repository to help extension who might want to augment
2714 Lives on the repository to help extension who might want to augment
2709 this logic. For this purpose, the created transaction is passed to the
2715 this logic. For this purpose, the created transaction is passed to the
2710 method.
2716 method.
2711 """
2717 """
2712 # we must avoid cyclic reference between repo and transaction.
2718 # we must avoid cyclic reference between repo and transaction.
2713 reporef = weakref.ref(self)
2719 reporef = weakref.ref(self)
2714
2720
2715 def updater(tr):
2721 def updater(tr):
2716 repo = reporef()
2722 repo = reporef()
2717 assert repo is not None # help pytype
2723 assert repo is not None # help pytype
2718 repo.updatecaches(tr)
2724 repo.updatecaches(tr)
2719
2725
2720 return updater
2726 return updater
2721
2727
2722 @unfilteredmethod
2728 @unfilteredmethod
2723 def updatecaches(self, tr=None, full=False):
2729 def updatecaches(self, tr=None, full=False):
2724 """warm appropriate caches
2730 """warm appropriate caches
2725
2731
2726 If this function is called after a transaction closed. The transaction
2732 If this function is called after a transaction closed. The transaction
2727 will be available in the 'tr' argument. This can be used to selectively
2733 will be available in the 'tr' argument. This can be used to selectively
2728 update caches relevant to the changes in that transaction.
2734 update caches relevant to the changes in that transaction.
2729
2735
2730 If 'full' is set, make sure all caches the function knows about have
2736 If 'full' is set, make sure all caches the function knows about have
2731 up-to-date data. Even the ones usually loaded more lazily.
2737 up-to-date data. Even the ones usually loaded more lazily.
2732
2738
2733 The `full` argument can take a special "post-clone" value. In this case
2739 The `full` argument can take a special "post-clone" value. In this case
2734 the cache warming is made after a clone and of the slower cache might
2740 the cache warming is made after a clone and of the slower cache might
2735 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2741 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2736 as we plan for a cleaner way to deal with this for 5.9.
2742 as we plan for a cleaner way to deal with this for 5.9.
2737 """
2743 """
2738 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2744 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2739 # During strip, many caches are invalid but
2745 # During strip, many caches are invalid but
2740 # later call to `destroyed` will refresh them.
2746 # later call to `destroyed` will refresh them.
2741 return
2747 return
2742
2748
2743 if tr is None or tr.changes[b'origrepolen'] < len(self):
2749 if tr is None or tr.changes[b'origrepolen'] < len(self):
2744 # accessing the 'served' branchmap should refresh all the others,
2750 # accessing the 'served' branchmap should refresh all the others,
2745 self.ui.debug(b'updating the branch cache\n')
2751 self.ui.debug(b'updating the branch cache\n')
2746 self.filtered(b'served').branchmap()
2752 self.filtered(b'served').branchmap()
2747 self.filtered(b'served.hidden').branchmap()
2753 self.filtered(b'served.hidden').branchmap()
2748
2754
2749 if full:
2755 if full:
2750 unfi = self.unfiltered()
2756 unfi = self.unfiltered()
2751
2757
2752 self.changelog.update_caches(transaction=tr)
2758 self.changelog.update_caches(transaction=tr)
2753 self.manifestlog.update_caches(transaction=tr)
2759 self.manifestlog.update_caches(transaction=tr)
2754
2760
2755 rbc = unfi.revbranchcache()
2761 rbc = unfi.revbranchcache()
2756 for r in unfi.changelog:
2762 for r in unfi.changelog:
2757 rbc.branchinfo(r)
2763 rbc.branchinfo(r)
2758 rbc.write()
2764 rbc.write()
2759
2765
2760 # ensure the working copy parents are in the manifestfulltextcache
2766 # ensure the working copy parents are in the manifestfulltextcache
2761 for ctx in self[b'.'].parents():
2767 for ctx in self[b'.'].parents():
2762 ctx.manifest() # accessing the manifest is enough
2768 ctx.manifest() # accessing the manifest is enough
2763
2769
2764 if not full == b"post-clone":
2770 if not full == b"post-clone":
2765 # accessing fnode cache warms the cache
2771 # accessing fnode cache warms the cache
2766 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2772 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2767 # accessing tags warm the cache
2773 # accessing tags warm the cache
2768 self.tags()
2774 self.tags()
2769 self.filtered(b'served').tags()
2775 self.filtered(b'served').tags()
2770
2776
2771 # The `full` arg is documented as updating even the lazily-loaded
2777 # The `full` arg is documented as updating even the lazily-loaded
2772 # caches immediately, so we're forcing a write to cause these caches
2778 # caches immediately, so we're forcing a write to cause these caches
2773 # to be warmed up even if they haven't explicitly been requested
2779 # to be warmed up even if they haven't explicitly been requested
2774 # yet (if they've never been used by hg, they won't ever have been
2780 # yet (if they've never been used by hg, they won't ever have been
2775 # written, even if they're a subset of another kind of cache that
2781 # written, even if they're a subset of another kind of cache that
2776 # *has* been used).
2782 # *has* been used).
2777 for filt in repoview.filtertable.keys():
2783 for filt in repoview.filtertable.keys():
2778 filtered = self.filtered(filt)
2784 filtered = self.filtered(filt)
2779 filtered.branchmap().write(filtered)
2785 filtered.branchmap().write(filtered)
2780
2786
2781 def invalidatecaches(self):
2787 def invalidatecaches(self):
2782
2788
2783 if '_tagscache' in vars(self):
2789 if '_tagscache' in vars(self):
2784 # can't use delattr on proxy
2790 # can't use delattr on proxy
2785 del self.__dict__['_tagscache']
2791 del self.__dict__['_tagscache']
2786
2792
2787 self._branchcaches.clear()
2793 self._branchcaches.clear()
2788 self.invalidatevolatilesets()
2794 self.invalidatevolatilesets()
2789 self._sparsesignaturecache.clear()
2795 self._sparsesignaturecache.clear()
2790
2796
2791 def invalidatevolatilesets(self):
2797 def invalidatevolatilesets(self):
2792 self.filteredrevcache.clear()
2798 self.filteredrevcache.clear()
2793 obsolete.clearobscaches(self)
2799 obsolete.clearobscaches(self)
2794 self._quick_access_changeid_invalidate()
2800 self._quick_access_changeid_invalidate()
2795
2801
2796 def invalidatedirstate(self):
2802 def invalidatedirstate(self):
2797 """Invalidates the dirstate, causing the next call to dirstate
2803 """Invalidates the dirstate, causing the next call to dirstate
2798 to check if it was modified since the last time it was read,
2804 to check if it was modified since the last time it was read,
2799 rereading it if it has.
2805 rereading it if it has.
2800
2806
2801 This is different to dirstate.invalidate() that it doesn't always
2807 This is different to dirstate.invalidate() that it doesn't always
2802 rereads the dirstate. Use dirstate.invalidate() if you want to
2808 rereads the dirstate. Use dirstate.invalidate() if you want to
2803 explicitly read the dirstate again (i.e. restoring it to a previous
2809 explicitly read the dirstate again (i.e. restoring it to a previous
2804 known good state)."""
2810 known good state)."""
2805 if hasunfilteredcache(self, 'dirstate'):
2811 if hasunfilteredcache(self, 'dirstate'):
2806 for k in self.dirstate._filecache:
2812 for k in self.dirstate._filecache:
2807 try:
2813 try:
2808 delattr(self.dirstate, k)
2814 delattr(self.dirstate, k)
2809 except AttributeError:
2815 except AttributeError:
2810 pass
2816 pass
2811 delattr(self.unfiltered(), 'dirstate')
2817 delattr(self.unfiltered(), 'dirstate')
2812
2818
2813 def invalidate(self, clearfilecache=False):
2819 def invalidate(self, clearfilecache=False):
2814 """Invalidates both store and non-store parts other than dirstate
2820 """Invalidates both store and non-store parts other than dirstate
2815
2821
2816 If a transaction is running, invalidation of store is omitted,
2822 If a transaction is running, invalidation of store is omitted,
2817 because discarding in-memory changes might cause inconsistency
2823 because discarding in-memory changes might cause inconsistency
2818 (e.g. incomplete fncache causes unintentional failure, but
2824 (e.g. incomplete fncache causes unintentional failure, but
2819 redundant one doesn't).
2825 redundant one doesn't).
2820 """
2826 """
2821 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2827 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2822 for k in list(self._filecache.keys()):
2828 for k in list(self._filecache.keys()):
2823 # dirstate is invalidated separately in invalidatedirstate()
2829 # dirstate is invalidated separately in invalidatedirstate()
2824 if k == b'dirstate':
2830 if k == b'dirstate':
2825 continue
2831 continue
2826 if (
2832 if (
2827 k == b'changelog'
2833 k == b'changelog'
2828 and self.currenttransaction()
2834 and self.currenttransaction()
2829 and self.changelog._delayed
2835 and self.changelog._delayed
2830 ):
2836 ):
2831 # The changelog object may store unwritten revisions. We don't
2837 # The changelog object may store unwritten revisions. We don't
2832 # want to lose them.
2838 # want to lose them.
2833 # TODO: Solve the problem instead of working around it.
2839 # TODO: Solve the problem instead of working around it.
2834 continue
2840 continue
2835
2841
2836 if clearfilecache:
2842 if clearfilecache:
2837 del self._filecache[k]
2843 del self._filecache[k]
2838 try:
2844 try:
2839 delattr(unfiltered, k)
2845 delattr(unfiltered, k)
2840 except AttributeError:
2846 except AttributeError:
2841 pass
2847 pass
2842 self.invalidatecaches()
2848 self.invalidatecaches()
2843 if not self.currenttransaction():
2849 if not self.currenttransaction():
2844 # TODO: Changing contents of store outside transaction
2850 # TODO: Changing contents of store outside transaction
2845 # causes inconsistency. We should make in-memory store
2851 # causes inconsistency. We should make in-memory store
2846 # changes detectable, and abort if changed.
2852 # changes detectable, and abort if changed.
2847 self.store.invalidatecaches()
2853 self.store.invalidatecaches()
2848
2854
2849 def invalidateall(self):
2855 def invalidateall(self):
2850 """Fully invalidates both store and non-store parts, causing the
2856 """Fully invalidates both store and non-store parts, causing the
2851 subsequent operation to reread any outside changes."""
2857 subsequent operation to reread any outside changes."""
2852 # extension should hook this to invalidate its caches
2858 # extension should hook this to invalidate its caches
2853 self.invalidate()
2859 self.invalidate()
2854 self.invalidatedirstate()
2860 self.invalidatedirstate()
2855
2861
2856 @unfilteredmethod
2862 @unfilteredmethod
2857 def _refreshfilecachestats(self, tr):
2863 def _refreshfilecachestats(self, tr):
2858 """Reload stats of cached files so that they are flagged as valid"""
2864 """Reload stats of cached files so that they are flagged as valid"""
2859 for k, ce in self._filecache.items():
2865 for k, ce in self._filecache.items():
2860 k = pycompat.sysstr(k)
2866 k = pycompat.sysstr(k)
2861 if k == 'dirstate' or k not in self.__dict__:
2867 if k == 'dirstate' or k not in self.__dict__:
2862 continue
2868 continue
2863 ce.refresh()
2869 ce.refresh()
2864
2870
2865 def _lock(
2871 def _lock(
2866 self,
2872 self,
2867 vfs,
2873 vfs,
2868 lockname,
2874 lockname,
2869 wait,
2875 wait,
2870 releasefn,
2876 releasefn,
2871 acquirefn,
2877 acquirefn,
2872 desc,
2878 desc,
2873 ):
2879 ):
2874 timeout = 0
2880 timeout = 0
2875 warntimeout = 0
2881 warntimeout = 0
2876 if wait:
2882 if wait:
2877 timeout = self.ui.configint(b"ui", b"timeout")
2883 timeout = self.ui.configint(b"ui", b"timeout")
2878 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2884 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2879 # internal config: ui.signal-safe-lock
2885 # internal config: ui.signal-safe-lock
2880 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2886 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2881
2887
2882 l = lockmod.trylock(
2888 l = lockmod.trylock(
2883 self.ui,
2889 self.ui,
2884 vfs,
2890 vfs,
2885 lockname,
2891 lockname,
2886 timeout,
2892 timeout,
2887 warntimeout,
2893 warntimeout,
2888 releasefn=releasefn,
2894 releasefn=releasefn,
2889 acquirefn=acquirefn,
2895 acquirefn=acquirefn,
2890 desc=desc,
2896 desc=desc,
2891 signalsafe=signalsafe,
2897 signalsafe=signalsafe,
2892 )
2898 )
2893 return l
2899 return l
2894
2900
2895 def _afterlock(self, callback):
2901 def _afterlock(self, callback):
2896 """add a callback to be run when the repository is fully unlocked
2902 """add a callback to be run when the repository is fully unlocked
2897
2903
2898 The callback will be executed when the outermost lock is released
2904 The callback will be executed when the outermost lock is released
2899 (with wlock being higher level than 'lock')."""
2905 (with wlock being higher level than 'lock')."""
2900 for ref in (self._wlockref, self._lockref):
2906 for ref in (self._wlockref, self._lockref):
2901 l = ref and ref()
2907 l = ref and ref()
2902 if l and l.held:
2908 if l and l.held:
2903 l.postrelease.append(callback)
2909 l.postrelease.append(callback)
2904 break
2910 break
2905 else: # no lock have been found.
2911 else: # no lock have been found.
2906 callback(True)
2912 callback(True)
2907
2913
2908 def lock(self, wait=True):
2914 def lock(self, wait=True):
2909 """Lock the repository store (.hg/store) and return a weak reference
2915 """Lock the repository store (.hg/store) and return a weak reference
2910 to the lock. Use this before modifying the store (e.g. committing or
2916 to the lock. Use this before modifying the store (e.g. committing or
2911 stripping). If you are opening a transaction, get a lock as well.)
2917 stripping). If you are opening a transaction, get a lock as well.)
2912
2918
2913 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2919 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2914 'wlock' first to avoid a dead-lock hazard."""
2920 'wlock' first to avoid a dead-lock hazard."""
2915 l = self._currentlock(self._lockref)
2921 l = self._currentlock(self._lockref)
2916 if l is not None:
2922 if l is not None:
2917 l.lock()
2923 l.lock()
2918 return l
2924 return l
2919
2925
2920 l = self._lock(
2926 l = self._lock(
2921 vfs=self.svfs,
2927 vfs=self.svfs,
2922 lockname=b"lock",
2928 lockname=b"lock",
2923 wait=wait,
2929 wait=wait,
2924 releasefn=None,
2930 releasefn=None,
2925 acquirefn=self.invalidate,
2931 acquirefn=self.invalidate,
2926 desc=_(b'repository %s') % self.origroot,
2932 desc=_(b'repository %s') % self.origroot,
2927 )
2933 )
2928 self._lockref = weakref.ref(l)
2934 self._lockref = weakref.ref(l)
2929 return l
2935 return l
2930
2936
2931 def wlock(self, wait=True):
2937 def wlock(self, wait=True):
2932 """Lock the non-store parts of the repository (everything under
2938 """Lock the non-store parts of the repository (everything under
2933 .hg except .hg/store) and return a weak reference to the lock.
2939 .hg except .hg/store) and return a weak reference to the lock.
2934
2940
2935 Use this before modifying files in .hg.
2941 Use this before modifying files in .hg.
2936
2942
2937 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2943 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2938 'wlock' first to avoid a dead-lock hazard."""
2944 'wlock' first to avoid a dead-lock hazard."""
2939 l = self._wlockref() if self._wlockref else None
2945 l = self._wlockref() if self._wlockref else None
2940 if l is not None and l.held:
2946 if l is not None and l.held:
2941 l.lock()
2947 l.lock()
2942 return l
2948 return l
2943
2949
2944 # We do not need to check for non-waiting lock acquisition. Such
2950 # We do not need to check for non-waiting lock acquisition. Such
2945 # acquisition would not cause dead-lock as they would just fail.
2951 # acquisition would not cause dead-lock as they would just fail.
2946 if wait and (
2952 if wait and (
2947 self.ui.configbool(b'devel', b'all-warnings')
2953 self.ui.configbool(b'devel', b'all-warnings')
2948 or self.ui.configbool(b'devel', b'check-locks')
2954 or self.ui.configbool(b'devel', b'check-locks')
2949 ):
2955 ):
2950 if self._currentlock(self._lockref) is not None:
2956 if self._currentlock(self._lockref) is not None:
2951 self.ui.develwarn(b'"wlock" acquired after "lock"')
2957 self.ui.develwarn(b'"wlock" acquired after "lock"')
2952
2958
2953 def unlock():
2959 def unlock():
2954 if self.dirstate.pendingparentchange():
2960 if self.dirstate.pendingparentchange():
2955 self.dirstate.invalidate()
2961 self.dirstate.invalidate()
2956 else:
2962 else:
2957 self.dirstate.write(None)
2963 self.dirstate.write(None)
2958
2964
2959 self._filecache[b'dirstate'].refresh()
2965 self._filecache[b'dirstate'].refresh()
2960
2966
2961 l = self._lock(
2967 l = self._lock(
2962 self.vfs,
2968 self.vfs,
2963 b"wlock",
2969 b"wlock",
2964 wait,
2970 wait,
2965 unlock,
2971 unlock,
2966 self.invalidatedirstate,
2972 self.invalidatedirstate,
2967 _(b'working directory of %s') % self.origroot,
2973 _(b'working directory of %s') % self.origroot,
2968 )
2974 )
2969 self._wlockref = weakref.ref(l)
2975 self._wlockref = weakref.ref(l)
2970 return l
2976 return l
2971
2977
2972 def _currentlock(self, lockref):
2978 def _currentlock(self, lockref):
2973 """Returns the lock if it's held, or None if it's not."""
2979 """Returns the lock if it's held, or None if it's not."""
2974 if lockref is None:
2980 if lockref is None:
2975 return None
2981 return None
2976 l = lockref()
2982 l = lockref()
2977 if l is None or not l.held:
2983 if l is None or not l.held:
2978 return None
2984 return None
2979 return l
2985 return l
2980
2986
2981 def currentwlock(self):
2987 def currentwlock(self):
2982 """Returns the wlock if it's held, or None if it's not."""
2988 """Returns the wlock if it's held, or None if it's not."""
2983 return self._currentlock(self._wlockref)
2989 return self._currentlock(self._wlockref)
2984
2990
2985 def checkcommitpatterns(self, wctx, match, status, fail):
2991 def checkcommitpatterns(self, wctx, match, status, fail):
2986 """check for commit arguments that aren't committable"""
2992 """check for commit arguments that aren't committable"""
2987 if match.isexact() or match.prefix():
2993 if match.isexact() or match.prefix():
2988 matched = set(status.modified + status.added + status.removed)
2994 matched = set(status.modified + status.added + status.removed)
2989
2995
2990 for f in match.files():
2996 for f in match.files():
2991 f = self.dirstate.normalize(f)
2997 f = self.dirstate.normalize(f)
2992 if f == b'.' or f in matched or f in wctx.substate:
2998 if f == b'.' or f in matched or f in wctx.substate:
2993 continue
2999 continue
2994 if f in status.deleted:
3000 if f in status.deleted:
2995 fail(f, _(b'file not found!'))
3001 fail(f, _(b'file not found!'))
2996 # Is it a directory that exists or used to exist?
3002 # Is it a directory that exists or used to exist?
2997 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3003 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2998 d = f + b'/'
3004 d = f + b'/'
2999 for mf in matched:
3005 for mf in matched:
3000 if mf.startswith(d):
3006 if mf.startswith(d):
3001 break
3007 break
3002 else:
3008 else:
3003 fail(f, _(b"no match under directory!"))
3009 fail(f, _(b"no match under directory!"))
3004 elif f not in self.dirstate:
3010 elif f not in self.dirstate:
3005 fail(f, _(b"file not tracked!"))
3011 fail(f, _(b"file not tracked!"))
3006
3012
3007 @unfilteredmethod
3013 @unfilteredmethod
3008 def commit(
3014 def commit(
3009 self,
3015 self,
3010 text=b"",
3016 text=b"",
3011 user=None,
3017 user=None,
3012 date=None,
3018 date=None,
3013 match=None,
3019 match=None,
3014 force=False,
3020 force=False,
3015 editor=None,
3021 editor=None,
3016 extra=None,
3022 extra=None,
3017 ):
3023 ):
3018 """Add a new revision to current repository.
3024 """Add a new revision to current repository.
3019
3025
3020 Revision information is gathered from the working directory,
3026 Revision information is gathered from the working directory,
3021 match can be used to filter the committed files. If editor is
3027 match can be used to filter the committed files. If editor is
3022 supplied, it is called to get a commit message.
3028 supplied, it is called to get a commit message.
3023 """
3029 """
3024 if extra is None:
3030 if extra is None:
3025 extra = {}
3031 extra = {}
3026
3032
3027 def fail(f, msg):
3033 def fail(f, msg):
3028 raise error.InputError(b'%s: %s' % (f, msg))
3034 raise error.InputError(b'%s: %s' % (f, msg))
3029
3035
3030 if not match:
3036 if not match:
3031 match = matchmod.always()
3037 match = matchmod.always()
3032
3038
3033 if not force:
3039 if not force:
3034 match.bad = fail
3040 match.bad = fail
3035
3041
3036 # lock() for recent changelog (see issue4368)
3042 # lock() for recent changelog (see issue4368)
3037 with self.wlock(), self.lock():
3043 with self.wlock(), self.lock():
3038 wctx = self[None]
3044 wctx = self[None]
3039 merge = len(wctx.parents()) > 1
3045 merge = len(wctx.parents()) > 1
3040
3046
3041 if not force and merge and not match.always():
3047 if not force and merge and not match.always():
3042 raise error.Abort(
3048 raise error.Abort(
3043 _(
3049 _(
3044 b'cannot partially commit a merge '
3050 b'cannot partially commit a merge '
3045 b'(do not specify files or patterns)'
3051 b'(do not specify files or patterns)'
3046 )
3052 )
3047 )
3053 )
3048
3054
3049 status = self.status(match=match, clean=force)
3055 status = self.status(match=match, clean=force)
3050 if force:
3056 if force:
3051 status.modified.extend(
3057 status.modified.extend(
3052 status.clean
3058 status.clean
3053 ) # mq may commit clean files
3059 ) # mq may commit clean files
3054
3060
3055 # check subrepos
3061 # check subrepos
3056 subs, commitsubs, newstate = subrepoutil.precommit(
3062 subs, commitsubs, newstate = subrepoutil.precommit(
3057 self.ui, wctx, status, match, force=force
3063 self.ui, wctx, status, match, force=force
3058 )
3064 )
3059
3065
3060 # make sure all explicit patterns are matched
3066 # make sure all explicit patterns are matched
3061 if not force:
3067 if not force:
3062 self.checkcommitpatterns(wctx, match, status, fail)
3068 self.checkcommitpatterns(wctx, match, status, fail)
3063
3069
3064 cctx = context.workingcommitctx(
3070 cctx = context.workingcommitctx(
3065 self, status, text, user, date, extra
3071 self, status, text, user, date, extra
3066 )
3072 )
3067
3073
3068 ms = mergestatemod.mergestate.read(self)
3074 ms = mergestatemod.mergestate.read(self)
3069 mergeutil.checkunresolved(ms)
3075 mergeutil.checkunresolved(ms)
3070
3076
3071 # internal config: ui.allowemptycommit
3077 # internal config: ui.allowemptycommit
3072 if cctx.isempty() and not self.ui.configbool(
3078 if cctx.isempty() and not self.ui.configbool(
3073 b'ui', b'allowemptycommit'
3079 b'ui', b'allowemptycommit'
3074 ):
3080 ):
3075 self.ui.debug(b'nothing to commit, clearing merge state\n')
3081 self.ui.debug(b'nothing to commit, clearing merge state\n')
3076 ms.reset()
3082 ms.reset()
3077 return None
3083 return None
3078
3084
3079 if merge and cctx.deleted():
3085 if merge and cctx.deleted():
3080 raise error.Abort(_(b"cannot commit merge with missing files"))
3086 raise error.Abort(_(b"cannot commit merge with missing files"))
3081
3087
3082 if editor:
3088 if editor:
3083 cctx._text = editor(self, cctx, subs)
3089 cctx._text = editor(self, cctx, subs)
3084 edited = text != cctx._text
3090 edited = text != cctx._text
3085
3091
3086 # Save commit message in case this transaction gets rolled back
3092 # Save commit message in case this transaction gets rolled back
3087 # (e.g. by a pretxncommit hook). Leave the content alone on
3093 # (e.g. by a pretxncommit hook). Leave the content alone on
3088 # the assumption that the user will use the same editor again.
3094 # the assumption that the user will use the same editor again.
3089 msgfn = self.savecommitmessage(cctx._text)
3095 msgfn = self.savecommitmessage(cctx._text)
3090
3096
3091 # commit subs and write new state
3097 # commit subs and write new state
3092 if subs:
3098 if subs:
3093 uipathfn = scmutil.getuipathfn(self)
3099 uipathfn = scmutil.getuipathfn(self)
3094 for s in sorted(commitsubs):
3100 for s in sorted(commitsubs):
3095 sub = wctx.sub(s)
3101 sub = wctx.sub(s)
3096 self.ui.status(
3102 self.ui.status(
3097 _(b'committing subrepository %s\n')
3103 _(b'committing subrepository %s\n')
3098 % uipathfn(subrepoutil.subrelpath(sub))
3104 % uipathfn(subrepoutil.subrelpath(sub))
3099 )
3105 )
3100 sr = sub.commit(cctx._text, user, date)
3106 sr = sub.commit(cctx._text, user, date)
3101 newstate[s] = (newstate[s][0], sr)
3107 newstate[s] = (newstate[s][0], sr)
3102 subrepoutil.writestate(self, newstate)
3108 subrepoutil.writestate(self, newstate)
3103
3109
3104 p1, p2 = self.dirstate.parents()
3110 p1, p2 = self.dirstate.parents()
3105 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3111 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3106 try:
3112 try:
3107 self.hook(
3113 self.hook(
3108 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3114 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3109 )
3115 )
3110 with self.transaction(b'commit'):
3116 with self.transaction(b'commit'):
3111 ret = self.commitctx(cctx, True)
3117 ret = self.commitctx(cctx, True)
3112 # update bookmarks, dirstate and mergestate
3118 # update bookmarks, dirstate and mergestate
3113 bookmarks.update(self, [p1, p2], ret)
3119 bookmarks.update(self, [p1, p2], ret)
3114 cctx.markcommitted(ret)
3120 cctx.markcommitted(ret)
3115 ms.reset()
3121 ms.reset()
3116 except: # re-raises
3122 except: # re-raises
3117 if edited:
3123 if edited:
3118 self.ui.write(
3124 self.ui.write(
3119 _(b'note: commit message saved in %s\n') % msgfn
3125 _(b'note: commit message saved in %s\n') % msgfn
3120 )
3126 )
3121 self.ui.write(
3127 self.ui.write(
3122 _(
3128 _(
3123 b"note: use 'hg commit --logfile "
3129 b"note: use 'hg commit --logfile "
3124 b".hg/last-message.txt --edit' to reuse it\n"
3130 b".hg/last-message.txt --edit' to reuse it\n"
3125 )
3131 )
3126 )
3132 )
3127 raise
3133 raise
3128
3134
3129 def commithook(unused_success):
3135 def commithook(unused_success):
3130 # hack for command that use a temporary commit (eg: histedit)
3136 # hack for command that use a temporary commit (eg: histedit)
3131 # temporary commit got stripped before hook release
3137 # temporary commit got stripped before hook release
3132 if self.changelog.hasnode(ret):
3138 if self.changelog.hasnode(ret):
3133 self.hook(
3139 self.hook(
3134 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3140 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3135 )
3141 )
3136
3142
3137 self._afterlock(commithook)
3143 self._afterlock(commithook)
3138 return ret
3144 return ret
3139
3145
3140 @unfilteredmethod
3146 @unfilteredmethod
3141 def commitctx(self, ctx, error=False, origctx=None):
3147 def commitctx(self, ctx, error=False, origctx=None):
3142 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3148 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3143
3149
3144 @unfilteredmethod
3150 @unfilteredmethod
3145 def destroying(self):
3151 def destroying(self):
3146 """Inform the repository that nodes are about to be destroyed.
3152 """Inform the repository that nodes are about to be destroyed.
3147 Intended for use by strip and rollback, so there's a common
3153 Intended for use by strip and rollback, so there's a common
3148 place for anything that has to be done before destroying history.
3154 place for anything that has to be done before destroying history.
3149
3155
3150 This is mostly useful for saving state that is in memory and waiting
3156 This is mostly useful for saving state that is in memory and waiting
3151 to be flushed when the current lock is released. Because a call to
3157 to be flushed when the current lock is released. Because a call to
3152 destroyed is imminent, the repo will be invalidated causing those
3158 destroyed is imminent, the repo will be invalidated causing those
3153 changes to stay in memory (waiting for the next unlock), or vanish
3159 changes to stay in memory (waiting for the next unlock), or vanish
3154 completely.
3160 completely.
3155 """
3161 """
3156 # When using the same lock to commit and strip, the phasecache is left
3162 # When using the same lock to commit and strip, the phasecache is left
3157 # dirty after committing. Then when we strip, the repo is invalidated,
3163 # dirty after committing. Then when we strip, the repo is invalidated,
3158 # causing those changes to disappear.
3164 # causing those changes to disappear.
3159 if '_phasecache' in vars(self):
3165 if '_phasecache' in vars(self):
3160 self._phasecache.write()
3166 self._phasecache.write()
3161
3167
3162 @unfilteredmethod
3168 @unfilteredmethod
3163 def destroyed(self):
3169 def destroyed(self):
3164 """Inform the repository that nodes have been destroyed.
3170 """Inform the repository that nodes have been destroyed.
3165 Intended for use by strip and rollback, so there's a common
3171 Intended for use by strip and rollback, so there's a common
3166 place for anything that has to be done after destroying history.
3172 place for anything that has to be done after destroying history.
3167 """
3173 """
3168 # When one tries to:
3174 # When one tries to:
3169 # 1) destroy nodes thus calling this method (e.g. strip)
3175 # 1) destroy nodes thus calling this method (e.g. strip)
3170 # 2) use phasecache somewhere (e.g. commit)
3176 # 2) use phasecache somewhere (e.g. commit)
3171 #
3177 #
3172 # then 2) will fail because the phasecache contains nodes that were
3178 # then 2) will fail because the phasecache contains nodes that were
3173 # removed. We can either remove phasecache from the filecache,
3179 # removed. We can either remove phasecache from the filecache,
3174 # causing it to reload next time it is accessed, or simply filter
3180 # causing it to reload next time it is accessed, or simply filter
3175 # the removed nodes now and write the updated cache.
3181 # the removed nodes now and write the updated cache.
3176 self._phasecache.filterunknown(self)
3182 self._phasecache.filterunknown(self)
3177 self._phasecache.write()
3183 self._phasecache.write()
3178
3184
3179 # refresh all repository caches
3185 # refresh all repository caches
3180 self.updatecaches()
3186 self.updatecaches()
3181
3187
3182 # Ensure the persistent tag cache is updated. Doing it now
3188 # Ensure the persistent tag cache is updated. Doing it now
3183 # means that the tag cache only has to worry about destroyed
3189 # means that the tag cache only has to worry about destroyed
3184 # heads immediately after a strip/rollback. That in turn
3190 # heads immediately after a strip/rollback. That in turn
3185 # guarantees that "cachetip == currenttip" (comparing both rev
3191 # guarantees that "cachetip == currenttip" (comparing both rev
3186 # and node) always means no nodes have been added or destroyed.
3192 # and node) always means no nodes have been added or destroyed.
3187
3193
3188 # XXX this is suboptimal when qrefresh'ing: we strip the current
3194 # XXX this is suboptimal when qrefresh'ing: we strip the current
3189 # head, refresh the tag cache, then immediately add a new head.
3195 # head, refresh the tag cache, then immediately add a new head.
3190 # But I think doing it this way is necessary for the "instant
3196 # But I think doing it this way is necessary for the "instant
3191 # tag cache retrieval" case to work.
3197 # tag cache retrieval" case to work.
3192 self.invalidate()
3198 self.invalidate()
3193
3199
3194 def status(
3200 def status(
3195 self,
3201 self,
3196 node1=b'.',
3202 node1=b'.',
3197 node2=None,
3203 node2=None,
3198 match=None,
3204 match=None,
3199 ignored=False,
3205 ignored=False,
3200 clean=False,
3206 clean=False,
3201 unknown=False,
3207 unknown=False,
3202 listsubrepos=False,
3208 listsubrepos=False,
3203 ):
3209 ):
3204 '''a convenience method that calls node1.status(node2)'''
3210 '''a convenience method that calls node1.status(node2)'''
3205 return self[node1].status(
3211 return self[node1].status(
3206 node2, match, ignored, clean, unknown, listsubrepos
3212 node2, match, ignored, clean, unknown, listsubrepos
3207 )
3213 )
3208
3214
3209 def addpostdsstatus(self, ps):
3215 def addpostdsstatus(self, ps):
3210 """Add a callback to run within the wlock, at the point at which status
3216 """Add a callback to run within the wlock, at the point at which status
3211 fixups happen.
3217 fixups happen.
3212
3218
3213 On status completion, callback(wctx, status) will be called with the
3219 On status completion, callback(wctx, status) will be called with the
3214 wlock held, unless the dirstate has changed from underneath or the wlock
3220 wlock held, unless the dirstate has changed from underneath or the wlock
3215 couldn't be grabbed.
3221 couldn't be grabbed.
3216
3222
3217 Callbacks should not capture and use a cached copy of the dirstate --
3223 Callbacks should not capture and use a cached copy of the dirstate --
3218 it might change in the meanwhile. Instead, they should access the
3224 it might change in the meanwhile. Instead, they should access the
3219 dirstate via wctx.repo().dirstate.
3225 dirstate via wctx.repo().dirstate.
3220
3226
3221 This list is emptied out after each status run -- extensions should
3227 This list is emptied out after each status run -- extensions should
3222 make sure it adds to this list each time dirstate.status is called.
3228 make sure it adds to this list each time dirstate.status is called.
3223 Extensions should also make sure they don't call this for statuses
3229 Extensions should also make sure they don't call this for statuses
3224 that don't involve the dirstate.
3230 that don't involve the dirstate.
3225 """
3231 """
3226
3232
3227 # The list is located here for uniqueness reasons -- it is actually
3233 # The list is located here for uniqueness reasons -- it is actually
3228 # managed by the workingctx, but that isn't unique per-repo.
3234 # managed by the workingctx, but that isn't unique per-repo.
3229 self._postdsstatus.append(ps)
3235 self._postdsstatus.append(ps)
3230
3236
3231 def postdsstatus(self):
3237 def postdsstatus(self):
3232 """Used by workingctx to get the list of post-dirstate-status hooks."""
3238 """Used by workingctx to get the list of post-dirstate-status hooks."""
3233 return self._postdsstatus
3239 return self._postdsstatus
3234
3240
3235 def clearpostdsstatus(self):
3241 def clearpostdsstatus(self):
3236 """Used by workingctx to clear post-dirstate-status hooks."""
3242 """Used by workingctx to clear post-dirstate-status hooks."""
3237 del self._postdsstatus[:]
3243 del self._postdsstatus[:]
3238
3244
3239 def heads(self, start=None):
3245 def heads(self, start=None):
3240 if start is None:
3246 if start is None:
3241 cl = self.changelog
3247 cl = self.changelog
3242 headrevs = reversed(cl.headrevs())
3248 headrevs = reversed(cl.headrevs())
3243 return [cl.node(rev) for rev in headrevs]
3249 return [cl.node(rev) for rev in headrevs]
3244
3250
3245 heads = self.changelog.heads(start)
3251 heads = self.changelog.heads(start)
3246 # sort the output in rev descending order
3252 # sort the output in rev descending order
3247 return sorted(heads, key=self.changelog.rev, reverse=True)
3253 return sorted(heads, key=self.changelog.rev, reverse=True)
3248
3254
3249 def branchheads(self, branch=None, start=None, closed=False):
3255 def branchheads(self, branch=None, start=None, closed=False):
3250 """return a (possibly filtered) list of heads for the given branch
3256 """return a (possibly filtered) list of heads for the given branch
3251
3257
3252 Heads are returned in topological order, from newest to oldest.
3258 Heads are returned in topological order, from newest to oldest.
3253 If branch is None, use the dirstate branch.
3259 If branch is None, use the dirstate branch.
3254 If start is not None, return only heads reachable from start.
3260 If start is not None, return only heads reachable from start.
3255 If closed is True, return heads that are marked as closed as well.
3261 If closed is True, return heads that are marked as closed as well.
3256 """
3262 """
3257 if branch is None:
3263 if branch is None:
3258 branch = self[None].branch()
3264 branch = self[None].branch()
3259 branches = self.branchmap()
3265 branches = self.branchmap()
3260 if not branches.hasbranch(branch):
3266 if not branches.hasbranch(branch):
3261 return []
3267 return []
3262 # the cache returns heads ordered lowest to highest
3268 # the cache returns heads ordered lowest to highest
3263 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3269 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3264 if start is not None:
3270 if start is not None:
3265 # filter out the heads that cannot be reached from startrev
3271 # filter out the heads that cannot be reached from startrev
3266 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3272 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3267 bheads = [h for h in bheads if h in fbheads]
3273 bheads = [h for h in bheads if h in fbheads]
3268 return bheads
3274 return bheads
3269
3275
3270 def branches(self, nodes):
3276 def branches(self, nodes):
3271 if not nodes:
3277 if not nodes:
3272 nodes = [self.changelog.tip()]
3278 nodes = [self.changelog.tip()]
3273 b = []
3279 b = []
3274 for n in nodes:
3280 for n in nodes:
3275 t = n
3281 t = n
3276 while True:
3282 while True:
3277 p = self.changelog.parents(n)
3283 p = self.changelog.parents(n)
3278 if p[1] != self.nullid or p[0] == self.nullid:
3284 if p[1] != self.nullid or p[0] == self.nullid:
3279 b.append((t, n, p[0], p[1]))
3285 b.append((t, n, p[0], p[1]))
3280 break
3286 break
3281 n = p[0]
3287 n = p[0]
3282 return b
3288 return b
3283
3289
3284 def between(self, pairs):
3290 def between(self, pairs):
3285 r = []
3291 r = []
3286
3292
3287 for top, bottom in pairs:
3293 for top, bottom in pairs:
3288 n, l, i = top, [], 0
3294 n, l, i = top, [], 0
3289 f = 1
3295 f = 1
3290
3296
3291 while n != bottom and n != self.nullid:
3297 while n != bottom and n != self.nullid:
3292 p = self.changelog.parents(n)[0]
3298 p = self.changelog.parents(n)[0]
3293 if i == f:
3299 if i == f:
3294 l.append(n)
3300 l.append(n)
3295 f = f * 2
3301 f = f * 2
3296 n = p
3302 n = p
3297 i += 1
3303 i += 1
3298
3304
3299 r.append(l)
3305 r.append(l)
3300
3306
3301 return r
3307 return r
3302
3308
3303 def checkpush(self, pushop):
3309 def checkpush(self, pushop):
3304 """Extensions can override this function if additional checks have
3310 """Extensions can override this function if additional checks have
3305 to be performed before pushing, or call it if they override push
3311 to be performed before pushing, or call it if they override push
3306 command.
3312 command.
3307 """
3313 """
3308
3314
3309 @unfilteredpropertycache
3315 @unfilteredpropertycache
3310 def prepushoutgoinghooks(self):
3316 def prepushoutgoinghooks(self):
3311 """Return util.hooks consists of a pushop with repo, remote, outgoing
3317 """Return util.hooks consists of a pushop with repo, remote, outgoing
3312 methods, which are called before pushing changesets.
3318 methods, which are called before pushing changesets.
3313 """
3319 """
3314 return util.hooks()
3320 return util.hooks()
3315
3321
3316 def pushkey(self, namespace, key, old, new):
3322 def pushkey(self, namespace, key, old, new):
3317 try:
3323 try:
3318 tr = self.currenttransaction()
3324 tr = self.currenttransaction()
3319 hookargs = {}
3325 hookargs = {}
3320 if tr is not None:
3326 if tr is not None:
3321 hookargs.update(tr.hookargs)
3327 hookargs.update(tr.hookargs)
3322 hookargs = pycompat.strkwargs(hookargs)
3328 hookargs = pycompat.strkwargs(hookargs)
3323 hookargs['namespace'] = namespace
3329 hookargs['namespace'] = namespace
3324 hookargs['key'] = key
3330 hookargs['key'] = key
3325 hookargs['old'] = old
3331 hookargs['old'] = old
3326 hookargs['new'] = new
3332 hookargs['new'] = new
3327 self.hook(b'prepushkey', throw=True, **hookargs)
3333 self.hook(b'prepushkey', throw=True, **hookargs)
3328 except error.HookAbort as exc:
3334 except error.HookAbort as exc:
3329 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3335 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3330 if exc.hint:
3336 if exc.hint:
3331 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3337 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3332 return False
3338 return False
3333 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3339 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3334 ret = pushkey.push(self, namespace, key, old, new)
3340 ret = pushkey.push(self, namespace, key, old, new)
3335
3341
3336 def runhook(unused_success):
3342 def runhook(unused_success):
3337 self.hook(
3343 self.hook(
3338 b'pushkey',
3344 b'pushkey',
3339 namespace=namespace,
3345 namespace=namespace,
3340 key=key,
3346 key=key,
3341 old=old,
3347 old=old,
3342 new=new,
3348 new=new,
3343 ret=ret,
3349 ret=ret,
3344 )
3350 )
3345
3351
3346 self._afterlock(runhook)
3352 self._afterlock(runhook)
3347 return ret
3353 return ret
3348
3354
3349 def listkeys(self, namespace):
3355 def listkeys(self, namespace):
3350 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3356 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3351 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3357 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3352 values = pushkey.list(self, namespace)
3358 values = pushkey.list(self, namespace)
3353 self.hook(b'listkeys', namespace=namespace, values=values)
3359 self.hook(b'listkeys', namespace=namespace, values=values)
3354 return values
3360 return values
3355
3361
3356 def debugwireargs(self, one, two, three=None, four=None, five=None):
3362 def debugwireargs(self, one, two, three=None, four=None, five=None):
3357 '''used to test argument passing over the wire'''
3363 '''used to test argument passing over the wire'''
3358 return b"%s %s %s %s %s" % (
3364 return b"%s %s %s %s %s" % (
3359 one,
3365 one,
3360 two,
3366 two,
3361 pycompat.bytestr(three),
3367 pycompat.bytestr(three),
3362 pycompat.bytestr(four),
3368 pycompat.bytestr(four),
3363 pycompat.bytestr(five),
3369 pycompat.bytestr(five),
3364 )
3370 )
3365
3371
3366 def savecommitmessage(self, text):
3372 def savecommitmessage(self, text):
3367 fp = self.vfs(b'last-message.txt', b'wb')
3373 fp = self.vfs(b'last-message.txt', b'wb')
3368 try:
3374 try:
3369 fp.write(text)
3375 fp.write(text)
3370 finally:
3376 finally:
3371 fp.close()
3377 fp.close()
3372 return self.pathto(fp.name[len(self.root) + 1 :])
3378 return self.pathto(fp.name[len(self.root) + 1 :])
3373
3379
3374 def register_wanted_sidedata(self, category):
3380 def register_wanted_sidedata(self, category):
3375 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3381 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3376 # Only revlogv2 repos can want sidedata.
3382 # Only revlogv2 repos can want sidedata.
3377 return
3383 return
3378 self._wanted_sidedata.add(pycompat.bytestr(category))
3384 self._wanted_sidedata.add(pycompat.bytestr(category))
3379
3385
3380 def register_sidedata_computer(
3386 def register_sidedata_computer(
3381 self, kind, category, keys, computer, flags, replace=False
3387 self, kind, category, keys, computer, flags, replace=False
3382 ):
3388 ):
3383 if kind not in revlogconst.ALL_KINDS:
3389 if kind not in revlogconst.ALL_KINDS:
3384 msg = _(b"unexpected revlog kind '%s'.")
3390 msg = _(b"unexpected revlog kind '%s'.")
3385 raise error.ProgrammingError(msg % kind)
3391 raise error.ProgrammingError(msg % kind)
3386 category = pycompat.bytestr(category)
3392 category = pycompat.bytestr(category)
3387 already_registered = category in self._sidedata_computers.get(kind, [])
3393 already_registered = category in self._sidedata_computers.get(kind, [])
3388 if already_registered and not replace:
3394 if already_registered and not replace:
3389 msg = _(
3395 msg = _(
3390 b"cannot register a sidedata computer twice for category '%s'."
3396 b"cannot register a sidedata computer twice for category '%s'."
3391 )
3397 )
3392 raise error.ProgrammingError(msg % category)
3398 raise error.ProgrammingError(msg % category)
3393 if replace and not already_registered:
3399 if replace and not already_registered:
3394 msg = _(
3400 msg = _(
3395 b"cannot replace a sidedata computer that isn't registered "
3401 b"cannot replace a sidedata computer that isn't registered "
3396 b"for category '%s'."
3402 b"for category '%s'."
3397 )
3403 )
3398 raise error.ProgrammingError(msg % category)
3404 raise error.ProgrammingError(msg % category)
3399 self._sidedata_computers.setdefault(kind, {})
3405 self._sidedata_computers.setdefault(kind, {})
3400 self._sidedata_computers[kind][category] = (keys, computer, flags)
3406 self._sidedata_computers[kind][category] = (keys, computer, flags)
3401
3407
3402
3408
3403 # used to avoid circular references so destructors work
3409 # used to avoid circular references so destructors work
3404 def aftertrans(files):
3410 def aftertrans(files):
3405 renamefiles = [tuple(t) for t in files]
3411 renamefiles = [tuple(t) for t in files]
3406
3412
3407 def a():
3413 def a():
3408 for vfs, src, dest in renamefiles:
3414 for vfs, src, dest in renamefiles:
3409 # if src and dest refer to a same file, vfs.rename is a no-op,
3415 # if src and dest refer to a same file, vfs.rename is a no-op,
3410 # leaving both src and dest on disk. delete dest to make sure
3416 # leaving both src and dest on disk. delete dest to make sure
3411 # the rename couldn't be such a no-op.
3417 # the rename couldn't be such a no-op.
3412 vfs.tryunlink(dest)
3418 vfs.tryunlink(dest)
3413 try:
3419 try:
3414 vfs.rename(src, dest)
3420 vfs.rename(src, dest)
3415 except OSError: # journal file does not yet exist
3421 except OSError: # journal file does not yet exist
3416 pass
3422 pass
3417
3423
3418 return a
3424 return a
3419
3425
3420
3426
3421 def undoname(fn):
3427 def undoname(fn):
3422 base, name = os.path.split(fn)
3428 base, name = os.path.split(fn)
3423 assert name.startswith(b'journal')
3429 assert name.startswith(b'journal')
3424 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3430 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3425
3431
3426
3432
3427 def instance(ui, path, create, intents=None, createopts=None):
3433 def instance(ui, path, create, intents=None, createopts=None):
3428 localpath = urlutil.urllocalpath(path)
3434 localpath = urlutil.urllocalpath(path)
3429 if create:
3435 if create:
3430 createrepository(ui, localpath, createopts=createopts)
3436 createrepository(ui, localpath, createopts=createopts)
3431
3437
3432 return makelocalrepository(ui, localpath, intents=intents)
3438 return makelocalrepository(ui, localpath, intents=intents)
3433
3439
3434
3440
3435 def islocal(path):
3441 def islocal(path):
3436 return True
3442 return True
3437
3443
3438
3444
3439 def defaultcreateopts(ui, createopts=None):
3445 def defaultcreateopts(ui, createopts=None):
3440 """Populate the default creation options for a repository.
3446 """Populate the default creation options for a repository.
3441
3447
3442 A dictionary of explicitly requested creation options can be passed
3448 A dictionary of explicitly requested creation options can be passed
3443 in. Missing keys will be populated.
3449 in. Missing keys will be populated.
3444 """
3450 """
3445 createopts = dict(createopts or {})
3451 createopts = dict(createopts or {})
3446
3452
3447 if b'backend' not in createopts:
3453 if b'backend' not in createopts:
3448 # experimental config: storage.new-repo-backend
3454 # experimental config: storage.new-repo-backend
3449 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3455 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3450
3456
3451 return createopts
3457 return createopts
3452
3458
3453
3459
3454 def newreporequirements(ui, createopts):
3460 def newreporequirements(ui, createopts):
3455 """Determine the set of requirements for a new local repository.
3461 """Determine the set of requirements for a new local repository.
3456
3462
3457 Extensions can wrap this function to specify custom requirements for
3463 Extensions can wrap this function to specify custom requirements for
3458 new repositories.
3464 new repositories.
3459 """
3465 """
3460 # If the repo is being created from a shared repository, we copy
3466 # If the repo is being created from a shared repository, we copy
3461 # its requirements.
3467 # its requirements.
3462 if b'sharedrepo' in createopts:
3468 if b'sharedrepo' in createopts:
3463 requirements = set(createopts[b'sharedrepo'].requirements)
3469 requirements = set(createopts[b'sharedrepo'].requirements)
3464 if createopts.get(b'sharedrelative'):
3470 if createopts.get(b'sharedrelative'):
3465 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3471 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3466 else:
3472 else:
3467 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3473 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3468
3474
3469 return requirements
3475 return requirements
3470
3476
3471 if b'backend' not in createopts:
3477 if b'backend' not in createopts:
3472 raise error.ProgrammingError(
3478 raise error.ProgrammingError(
3473 b'backend key not present in createopts; '
3479 b'backend key not present in createopts; '
3474 b'was defaultcreateopts() called?'
3480 b'was defaultcreateopts() called?'
3475 )
3481 )
3476
3482
3477 if createopts[b'backend'] != b'revlogv1':
3483 if createopts[b'backend'] != b'revlogv1':
3478 raise error.Abort(
3484 raise error.Abort(
3479 _(
3485 _(
3480 b'unable to determine repository requirements for '
3486 b'unable to determine repository requirements for '
3481 b'storage backend: %s'
3487 b'storage backend: %s'
3482 )
3488 )
3483 % createopts[b'backend']
3489 % createopts[b'backend']
3484 )
3490 )
3485
3491
3486 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3492 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3487 if ui.configbool(b'format', b'usestore'):
3493 if ui.configbool(b'format', b'usestore'):
3488 requirements.add(requirementsmod.STORE_REQUIREMENT)
3494 requirements.add(requirementsmod.STORE_REQUIREMENT)
3489 if ui.configbool(b'format', b'usefncache'):
3495 if ui.configbool(b'format', b'usefncache'):
3490 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3496 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3491 if ui.configbool(b'format', b'dotencode'):
3497 if ui.configbool(b'format', b'dotencode'):
3492 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3498 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3493
3499
3494 compengines = ui.configlist(b'format', b'revlog-compression')
3500 compengines = ui.configlist(b'format', b'revlog-compression')
3495 for compengine in compengines:
3501 for compengine in compengines:
3496 if compengine in util.compengines:
3502 if compengine in util.compengines:
3497 engine = util.compengines[compengine]
3503 engine = util.compengines[compengine]
3498 if engine.available() and engine.revlogheader():
3504 if engine.available() and engine.revlogheader():
3499 break
3505 break
3500 else:
3506 else:
3501 raise error.Abort(
3507 raise error.Abort(
3502 _(
3508 _(
3503 b'compression engines %s defined by '
3509 b'compression engines %s defined by '
3504 b'format.revlog-compression not available'
3510 b'format.revlog-compression not available'
3505 )
3511 )
3506 % b', '.join(b'"%s"' % e for e in compengines),
3512 % b', '.join(b'"%s"' % e for e in compengines),
3507 hint=_(
3513 hint=_(
3508 b'run "hg debuginstall" to list available '
3514 b'run "hg debuginstall" to list available '
3509 b'compression engines'
3515 b'compression engines'
3510 ),
3516 ),
3511 )
3517 )
3512
3518
3513 # zlib is the historical default and doesn't need an explicit requirement.
3519 # zlib is the historical default and doesn't need an explicit requirement.
3514 if compengine == b'zstd':
3520 if compengine == b'zstd':
3515 requirements.add(b'revlog-compression-zstd')
3521 requirements.add(b'revlog-compression-zstd')
3516 elif compengine != b'zlib':
3522 elif compengine != b'zlib':
3517 requirements.add(b'exp-compression-%s' % compengine)
3523 requirements.add(b'exp-compression-%s' % compengine)
3518
3524
3519 if scmutil.gdinitconfig(ui):
3525 if scmutil.gdinitconfig(ui):
3520 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3526 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3521 if ui.configbool(b'format', b'sparse-revlog'):
3527 if ui.configbool(b'format', b'sparse-revlog'):
3522 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3528 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3523
3529
3524 # experimental config: format.exp-use-copies-side-data-changeset
3530 # experimental config: format.exp-use-copies-side-data-changeset
3525 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3531 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3526 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3532 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3527 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3533 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3528 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3534 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3529 if ui.configbool(b'experimental', b'treemanifest'):
3535 if ui.configbool(b'experimental', b'treemanifest'):
3530 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3536 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3531
3537
3538 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3539 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3540 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3541
3532 revlogv2 = ui.config(b'experimental', b'revlogv2')
3542 revlogv2 = ui.config(b'experimental', b'revlogv2')
3533 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3543 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3534 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3544 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3535 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3545 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3536 # experimental config: format.internal-phase
3546 # experimental config: format.internal-phase
3537 if ui.configbool(b'format', b'internal-phase'):
3547 if ui.configbool(b'format', b'internal-phase'):
3538 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3548 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3539
3549
3540 if createopts.get(b'narrowfiles'):
3550 if createopts.get(b'narrowfiles'):
3541 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3551 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3542
3552
3543 if createopts.get(b'lfs'):
3553 if createopts.get(b'lfs'):
3544 requirements.add(b'lfs')
3554 requirements.add(b'lfs')
3545
3555
3546 if ui.configbool(b'format', b'bookmarks-in-store'):
3556 if ui.configbool(b'format', b'bookmarks-in-store'):
3547 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3557 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3548
3558
3549 if ui.configbool(b'format', b'use-persistent-nodemap'):
3559 if ui.configbool(b'format', b'use-persistent-nodemap'):
3550 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3560 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3551
3561
3552 # if share-safe is enabled, let's create the new repository with the new
3562 # if share-safe is enabled, let's create the new repository with the new
3553 # requirement
3563 # requirement
3554 if ui.configbool(b'format', b'use-share-safe'):
3564 if ui.configbool(b'format', b'use-share-safe'):
3555 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3565 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3556
3566
3557 return requirements
3567 return requirements
3558
3568
3559
3569
3560 def checkrequirementscompat(ui, requirements):
3570 def checkrequirementscompat(ui, requirements):
3561 """Checks compatibility of repository requirements enabled and disabled.
3571 """Checks compatibility of repository requirements enabled and disabled.
3562
3572
3563 Returns a set of requirements which needs to be dropped because dependend
3573 Returns a set of requirements which needs to be dropped because dependend
3564 requirements are not enabled. Also warns users about it"""
3574 requirements are not enabled. Also warns users about it"""
3565
3575
3566 dropped = set()
3576 dropped = set()
3567
3577
3568 if requirementsmod.STORE_REQUIREMENT not in requirements:
3578 if requirementsmod.STORE_REQUIREMENT not in requirements:
3569 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3579 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3570 ui.warn(
3580 ui.warn(
3571 _(
3581 _(
3572 b'ignoring enabled \'format.bookmarks-in-store\' config '
3582 b'ignoring enabled \'format.bookmarks-in-store\' config '
3573 b'beacuse it is incompatible with disabled '
3583 b'beacuse it is incompatible with disabled '
3574 b'\'format.usestore\' config\n'
3584 b'\'format.usestore\' config\n'
3575 )
3585 )
3576 )
3586 )
3577 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3587 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3578
3588
3579 if (
3589 if (
3580 requirementsmod.SHARED_REQUIREMENT in requirements
3590 requirementsmod.SHARED_REQUIREMENT in requirements
3581 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3591 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3582 ):
3592 ):
3583 raise error.Abort(
3593 raise error.Abort(
3584 _(
3594 _(
3585 b"cannot create shared repository as source was created"
3595 b"cannot create shared repository as source was created"
3586 b" with 'format.usestore' config disabled"
3596 b" with 'format.usestore' config disabled"
3587 )
3597 )
3588 )
3598 )
3589
3599
3590 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3600 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3591 ui.warn(
3601 ui.warn(
3592 _(
3602 _(
3593 b"ignoring enabled 'format.use-share-safe' config because "
3603 b"ignoring enabled 'format.use-share-safe' config because "
3594 b"it is incompatible with disabled 'format.usestore'"
3604 b"it is incompatible with disabled 'format.usestore'"
3595 b" config\n"
3605 b" config\n"
3596 )
3606 )
3597 )
3607 )
3598 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3608 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3599
3609
3600 return dropped
3610 return dropped
3601
3611
3602
3612
3603 def filterknowncreateopts(ui, createopts):
3613 def filterknowncreateopts(ui, createopts):
3604 """Filters a dict of repo creation options against options that are known.
3614 """Filters a dict of repo creation options against options that are known.
3605
3615
3606 Receives a dict of repo creation options and returns a dict of those
3616 Receives a dict of repo creation options and returns a dict of those
3607 options that we don't know how to handle.
3617 options that we don't know how to handle.
3608
3618
3609 This function is called as part of repository creation. If the
3619 This function is called as part of repository creation. If the
3610 returned dict contains any items, repository creation will not
3620 returned dict contains any items, repository creation will not
3611 be allowed, as it means there was a request to create a repository
3621 be allowed, as it means there was a request to create a repository
3612 with options not recognized by loaded code.
3622 with options not recognized by loaded code.
3613
3623
3614 Extensions can wrap this function to filter out creation options
3624 Extensions can wrap this function to filter out creation options
3615 they know how to handle.
3625 they know how to handle.
3616 """
3626 """
3617 known = {
3627 known = {
3618 b'backend',
3628 b'backend',
3619 b'lfs',
3629 b'lfs',
3620 b'narrowfiles',
3630 b'narrowfiles',
3621 b'sharedrepo',
3631 b'sharedrepo',
3622 b'sharedrelative',
3632 b'sharedrelative',
3623 b'shareditems',
3633 b'shareditems',
3624 b'shallowfilestore',
3634 b'shallowfilestore',
3625 }
3635 }
3626
3636
3627 return {k: v for k, v in createopts.items() if k not in known}
3637 return {k: v for k, v in createopts.items() if k not in known}
3628
3638
3629
3639
3630 def createrepository(ui, path, createopts=None):
3640 def createrepository(ui, path, createopts=None):
3631 """Create a new repository in a vfs.
3641 """Create a new repository in a vfs.
3632
3642
3633 ``path`` path to the new repo's working directory.
3643 ``path`` path to the new repo's working directory.
3634 ``createopts`` options for the new repository.
3644 ``createopts`` options for the new repository.
3635
3645
3636 The following keys for ``createopts`` are recognized:
3646 The following keys for ``createopts`` are recognized:
3637
3647
3638 backend
3648 backend
3639 The storage backend to use.
3649 The storage backend to use.
3640 lfs
3650 lfs
3641 Repository will be created with ``lfs`` requirement. The lfs extension
3651 Repository will be created with ``lfs`` requirement. The lfs extension
3642 will automatically be loaded when the repository is accessed.
3652 will automatically be loaded when the repository is accessed.
3643 narrowfiles
3653 narrowfiles
3644 Set up repository to support narrow file storage.
3654 Set up repository to support narrow file storage.
3645 sharedrepo
3655 sharedrepo
3646 Repository object from which storage should be shared.
3656 Repository object from which storage should be shared.
3647 sharedrelative
3657 sharedrelative
3648 Boolean indicating if the path to the shared repo should be
3658 Boolean indicating if the path to the shared repo should be
3649 stored as relative. By default, the pointer to the "parent" repo
3659 stored as relative. By default, the pointer to the "parent" repo
3650 is stored as an absolute path.
3660 is stored as an absolute path.
3651 shareditems
3661 shareditems
3652 Set of items to share to the new repository (in addition to storage).
3662 Set of items to share to the new repository (in addition to storage).
3653 shallowfilestore
3663 shallowfilestore
3654 Indicates that storage for files should be shallow (not all ancestor
3664 Indicates that storage for files should be shallow (not all ancestor
3655 revisions are known).
3665 revisions are known).
3656 """
3666 """
3657 createopts = defaultcreateopts(ui, createopts=createopts)
3667 createopts = defaultcreateopts(ui, createopts=createopts)
3658
3668
3659 unknownopts = filterknowncreateopts(ui, createopts)
3669 unknownopts = filterknowncreateopts(ui, createopts)
3660
3670
3661 if not isinstance(unknownopts, dict):
3671 if not isinstance(unknownopts, dict):
3662 raise error.ProgrammingError(
3672 raise error.ProgrammingError(
3663 b'filterknowncreateopts() did not return a dict'
3673 b'filterknowncreateopts() did not return a dict'
3664 )
3674 )
3665
3675
3666 if unknownopts:
3676 if unknownopts:
3667 raise error.Abort(
3677 raise error.Abort(
3668 _(
3678 _(
3669 b'unable to create repository because of unknown '
3679 b'unable to create repository because of unknown '
3670 b'creation option: %s'
3680 b'creation option: %s'
3671 )
3681 )
3672 % b', '.join(sorted(unknownopts)),
3682 % b', '.join(sorted(unknownopts)),
3673 hint=_(b'is a required extension not loaded?'),
3683 hint=_(b'is a required extension not loaded?'),
3674 )
3684 )
3675
3685
3676 requirements = newreporequirements(ui, createopts=createopts)
3686 requirements = newreporequirements(ui, createopts=createopts)
3677 requirements -= checkrequirementscompat(ui, requirements)
3687 requirements -= checkrequirementscompat(ui, requirements)
3678
3688
3679 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3689 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3680
3690
3681 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3691 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3682 if hgvfs.exists():
3692 if hgvfs.exists():
3683 raise error.RepoError(_(b'repository %s already exists') % path)
3693 raise error.RepoError(_(b'repository %s already exists') % path)
3684
3694
3685 if b'sharedrepo' in createopts:
3695 if b'sharedrepo' in createopts:
3686 sharedpath = createopts[b'sharedrepo'].sharedpath
3696 sharedpath = createopts[b'sharedrepo'].sharedpath
3687
3697
3688 if createopts.get(b'sharedrelative'):
3698 if createopts.get(b'sharedrelative'):
3689 try:
3699 try:
3690 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3700 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3691 sharedpath = util.pconvert(sharedpath)
3701 sharedpath = util.pconvert(sharedpath)
3692 except (IOError, ValueError) as e:
3702 except (IOError, ValueError) as e:
3693 # ValueError is raised on Windows if the drive letters differ
3703 # ValueError is raised on Windows if the drive letters differ
3694 # on each path.
3704 # on each path.
3695 raise error.Abort(
3705 raise error.Abort(
3696 _(b'cannot calculate relative path'),
3706 _(b'cannot calculate relative path'),
3697 hint=stringutil.forcebytestr(e),
3707 hint=stringutil.forcebytestr(e),
3698 )
3708 )
3699
3709
3700 if not wdirvfs.exists():
3710 if not wdirvfs.exists():
3701 wdirvfs.makedirs()
3711 wdirvfs.makedirs()
3702
3712
3703 hgvfs.makedir(notindexed=True)
3713 hgvfs.makedir(notindexed=True)
3704 if b'sharedrepo' not in createopts:
3714 if b'sharedrepo' not in createopts:
3705 hgvfs.mkdir(b'cache')
3715 hgvfs.mkdir(b'cache')
3706 hgvfs.mkdir(b'wcache')
3716 hgvfs.mkdir(b'wcache')
3707
3717
3708 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3718 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3709 if has_store and b'sharedrepo' not in createopts:
3719 if has_store and b'sharedrepo' not in createopts:
3710 hgvfs.mkdir(b'store')
3720 hgvfs.mkdir(b'store')
3711
3721
3712 # We create an invalid changelog outside the store so very old
3722 # We create an invalid changelog outside the store so very old
3713 # Mercurial versions (which didn't know about the requirements
3723 # Mercurial versions (which didn't know about the requirements
3714 # file) encounter an error on reading the changelog. This
3724 # file) encounter an error on reading the changelog. This
3715 # effectively locks out old clients and prevents them from
3725 # effectively locks out old clients and prevents them from
3716 # mucking with a repo in an unknown format.
3726 # mucking with a repo in an unknown format.
3717 #
3727 #
3718 # The revlog header has version 65535, which won't be recognized by
3728 # The revlog header has version 65535, which won't be recognized by
3719 # such old clients.
3729 # such old clients.
3720 hgvfs.append(
3730 hgvfs.append(
3721 b'00changelog.i',
3731 b'00changelog.i',
3722 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3732 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3723 b'layout',
3733 b'layout',
3724 )
3734 )
3725
3735
3726 # Filter the requirements into working copy and store ones
3736 # Filter the requirements into working copy and store ones
3727 wcreq, storereq = scmutil.filterrequirements(requirements)
3737 wcreq, storereq = scmutil.filterrequirements(requirements)
3728 # write working copy ones
3738 # write working copy ones
3729 scmutil.writerequires(hgvfs, wcreq)
3739 scmutil.writerequires(hgvfs, wcreq)
3730 # If there are store requirements and the current repository
3740 # If there are store requirements and the current repository
3731 # is not a shared one, write stored requirements
3741 # is not a shared one, write stored requirements
3732 # For new shared repository, we don't need to write the store
3742 # For new shared repository, we don't need to write the store
3733 # requirements as they are already present in store requires
3743 # requirements as they are already present in store requires
3734 if storereq and b'sharedrepo' not in createopts:
3744 if storereq and b'sharedrepo' not in createopts:
3735 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3745 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3736 scmutil.writerequires(storevfs, storereq)
3746 scmutil.writerequires(storevfs, storereq)
3737
3747
3738 # Write out file telling readers where to find the shared store.
3748 # Write out file telling readers where to find the shared store.
3739 if b'sharedrepo' in createopts:
3749 if b'sharedrepo' in createopts:
3740 hgvfs.write(b'sharedpath', sharedpath)
3750 hgvfs.write(b'sharedpath', sharedpath)
3741
3751
3742 if createopts.get(b'shareditems'):
3752 if createopts.get(b'shareditems'):
3743 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3753 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3744 hgvfs.write(b'shared', shared)
3754 hgvfs.write(b'shared', shared)
3745
3755
3746
3756
3747 def poisonrepository(repo):
3757 def poisonrepository(repo):
3748 """Poison a repository instance so it can no longer be used."""
3758 """Poison a repository instance so it can no longer be used."""
3749 # Perform any cleanup on the instance.
3759 # Perform any cleanup on the instance.
3750 repo.close()
3760 repo.close()
3751
3761
3752 # Our strategy is to replace the type of the object with one that
3762 # Our strategy is to replace the type of the object with one that
3753 # has all attribute lookups result in error.
3763 # has all attribute lookups result in error.
3754 #
3764 #
3755 # But we have to allow the close() method because some constructors
3765 # But we have to allow the close() method because some constructors
3756 # of repos call close() on repo references.
3766 # of repos call close() on repo references.
3757 class poisonedrepository(object):
3767 class poisonedrepository(object):
3758 def __getattribute__(self, item):
3768 def __getattribute__(self, item):
3759 if item == 'close':
3769 if item == 'close':
3760 return object.__getattribute__(self, item)
3770 return object.__getattribute__(self, item)
3761
3771
3762 raise error.ProgrammingError(
3772 raise error.ProgrammingError(
3763 b'repo instances should not be used after unshare'
3773 b'repo instances should not be used after unshare'
3764 )
3774 )
3765
3775
3766 def close(self):
3776 def close(self):
3767 pass
3777 pass
3768
3778
3769 # We may have a repoview, which intercepts __setattr__. So be sure
3779 # We may have a repoview, which intercepts __setattr__. So be sure
3770 # we operate at the lowest level possible.
3780 # we operate at the lowest level possible.
3771 object.__setattr__(repo, '__class__', poisonedrepository)
3781 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,78 +1,82 b''
1 # requirements.py - objects and functions related to repository requirements
1 # requirements.py - objects and functions related to repository requirements
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 GENERALDELTA_REQUIREMENT = b'generaldelta'
10 GENERALDELTA_REQUIREMENT = b'generaldelta'
11 DOTENCODE_REQUIREMENT = b'dotencode'
11 DOTENCODE_REQUIREMENT = b'dotencode'
12 STORE_REQUIREMENT = b'store'
12 STORE_REQUIREMENT = b'store'
13 FNCACHE_REQUIREMENT = b'fncache'
13 FNCACHE_REQUIREMENT = b'fncache'
14
14
15 # When narrowing is finalized and no longer subject to format changes,
15 # When narrowing is finalized and no longer subject to format changes,
16 # we should move this to just "narrow" or similar.
16 # we should move this to just "narrow" or similar.
17 NARROW_REQUIREMENT = b'narrowhg-experimental'
17 NARROW_REQUIREMENT = b'narrowhg-experimental'
18
18
19 # Enables sparse working directory usage
19 # Enables sparse working directory usage
20 SPARSE_REQUIREMENT = b'exp-sparse'
20 SPARSE_REQUIREMENT = b'exp-sparse'
21
21
22 # Enables the internal phase which is used to hide changesets instead
22 # Enables the internal phase which is used to hide changesets instead
23 # of stripping them
23 # of stripping them
24 INTERNAL_PHASE_REQUIREMENT = b'internal-phase'
24 INTERNAL_PHASE_REQUIREMENT = b'internal-phase'
25
25
26 # Stores manifest in Tree structure
26 # Stores manifest in Tree structure
27 TREEMANIFEST_REQUIREMENT = b'treemanifest'
27 TREEMANIFEST_REQUIREMENT = b'treemanifest'
28
28
29 REVLOGV1_REQUIREMENT = b'revlogv1'
29 REVLOGV1_REQUIREMENT = b'revlogv1'
30
30
31 # Increment the sub-version when the revlog v2 format changes to lock out old
31 # Increment the sub-version when the revlog v2 format changes to lock out old
32 # clients.
32 # clients.
33 CHANGELOGV2_REQUIREMENT = b'exp-changelog-v2'
34
35 # Increment the sub-version when the revlog v2 format changes to lock out old
36 # clients.
33 REVLOGV2_REQUIREMENT = b'exp-revlogv2.2'
37 REVLOGV2_REQUIREMENT = b'exp-revlogv2.2'
34
38
35 # A repository with the sparserevlog feature will have delta chains that
39 # A repository with the sparserevlog feature will have delta chains that
36 # can spread over a larger span. Sparse reading cuts these large spans into
40 # can spread over a larger span. Sparse reading cuts these large spans into
37 # pieces, so that each piece isn't too big.
41 # pieces, so that each piece isn't too big.
38 # Without the sparserevlog capability, reading from the repository could use
42 # Without the sparserevlog capability, reading from the repository could use
39 # huge amounts of memory, because the whole span would be read at once,
43 # huge amounts of memory, because the whole span would be read at once,
40 # including all the intermediate revisions that aren't pertinent for the chain.
44 # including all the intermediate revisions that aren't pertinent for the chain.
41 # This is why once a repository has enabled sparse-read, it becomes required.
45 # This is why once a repository has enabled sparse-read, it becomes required.
42 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
46 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
43
47
44 # A repository with the the copies-sidedata-changeset requirement will store
48 # A repository with the the copies-sidedata-changeset requirement will store
45 # copies related information in changeset's sidedata.
49 # copies related information in changeset's sidedata.
46 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
50 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
47
51
48 # The repository use persistent nodemap for the changelog and the manifest.
52 # The repository use persistent nodemap for the changelog and the manifest.
49 NODEMAP_REQUIREMENT = b'persistent-nodemap'
53 NODEMAP_REQUIREMENT = b'persistent-nodemap'
50
54
51 # Denotes that the current repository is a share
55 # Denotes that the current repository is a share
52 SHARED_REQUIREMENT = b'shared'
56 SHARED_REQUIREMENT = b'shared'
53
57
54 # Denotes that current repository is a share and the shared source path is
58 # Denotes that current repository is a share and the shared source path is
55 # relative to the current repository root path
59 # relative to the current repository root path
56 RELATIVE_SHARED_REQUIREMENT = b'relshared'
60 RELATIVE_SHARED_REQUIREMENT = b'relshared'
57
61
58 # A repository with share implemented safely. The repository has different
62 # A repository with share implemented safely. The repository has different
59 # store and working copy requirements i.e. both `.hg/requires` and
63 # store and working copy requirements i.e. both `.hg/requires` and
60 # `.hg/store/requires` are present.
64 # `.hg/store/requires` are present.
61 SHARESAFE_REQUIREMENT = b'share-safe'
65 SHARESAFE_REQUIREMENT = b'share-safe'
62
66
63 # List of requirements which are working directory specific
67 # List of requirements which are working directory specific
64 # These requirements cannot be shared between repositories if they
68 # These requirements cannot be shared between repositories if they
65 # share the same store
69 # share the same store
66 # * sparse is a working directory specific functionality and hence working
70 # * sparse is a working directory specific functionality and hence working
67 # directory specific requirement
71 # directory specific requirement
68 # * SHARED_REQUIREMENT and RELATIVE_SHARED_REQUIREMENT are requirements which
72 # * SHARED_REQUIREMENT and RELATIVE_SHARED_REQUIREMENT are requirements which
69 # represents that the current working copy/repository shares store of another
73 # represents that the current working copy/repository shares store of another
70 # repo. Hence both of them should be stored in working copy
74 # repo. Hence both of them should be stored in working copy
71 # * SHARESAFE_REQUIREMENT needs to be stored in working dir to mark that rest of
75 # * SHARESAFE_REQUIREMENT needs to be stored in working dir to mark that rest of
72 # the requirements are stored in store's requires
76 # the requirements are stored in store's requires
73 WORKING_DIR_REQUIREMENTS = {
77 WORKING_DIR_REQUIREMENTS = {
74 SPARSE_REQUIREMENT,
78 SPARSE_REQUIREMENT,
75 SHARED_REQUIREMENT,
79 SHARED_REQUIREMENT,
76 RELATIVE_SHARED_REQUIREMENT,
80 RELATIVE_SHARED_REQUIREMENT,
77 SHARESAFE_REQUIREMENT,
81 SHARESAFE_REQUIREMENT,
78 }
82 }
@@ -1,3439 +1,3442 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import binascii
16 import binascii
17 import collections
17 import collections
18 import contextlib
18 import contextlib
19 import errno
19 import errno
20 import io
20 import io
21 import os
21 import os
22 import struct
22 import struct
23 import zlib
23 import zlib
24
24
25 # import stuff from node for others to import from revlog
25 # import stuff from node for others to import from revlog
26 from .node import (
26 from .node import (
27 bin,
27 bin,
28 hex,
28 hex,
29 nullrev,
29 nullrev,
30 sha1nodeconstants,
30 sha1nodeconstants,
31 short,
31 short,
32 wdirrev,
32 wdirrev,
33 )
33 )
34 from .i18n import _
34 from .i18n import _
35 from .pycompat import getattr
35 from .pycompat import getattr
36 from .revlogutils.constants import (
36 from .revlogutils.constants import (
37 ALL_KINDS,
37 ALL_KINDS,
38 COMP_MODE_DEFAULT,
38 COMP_MODE_DEFAULT,
39 COMP_MODE_INLINE,
39 COMP_MODE_INLINE,
40 COMP_MODE_PLAIN,
40 COMP_MODE_PLAIN,
41 FEATURES_BY_VERSION,
41 FEATURES_BY_VERSION,
42 FLAG_GENERALDELTA,
42 FLAG_GENERALDELTA,
43 FLAG_INLINE_DATA,
43 FLAG_INLINE_DATA,
44 INDEX_HEADER,
44 INDEX_HEADER,
45 KIND_CHANGELOG,
45 REVLOGV0,
46 REVLOGV0,
46 REVLOGV1,
47 REVLOGV1,
47 REVLOGV1_FLAGS,
48 REVLOGV1_FLAGS,
48 REVLOGV2,
49 REVLOGV2,
49 REVLOGV2_FLAGS,
50 REVLOGV2_FLAGS,
50 REVLOG_DEFAULT_FLAGS,
51 REVLOG_DEFAULT_FLAGS,
51 REVLOG_DEFAULT_FORMAT,
52 REVLOG_DEFAULT_FORMAT,
52 REVLOG_DEFAULT_VERSION,
53 REVLOG_DEFAULT_VERSION,
53 SUPPORTED_FLAGS,
54 SUPPORTED_FLAGS,
54 )
55 )
55 from .revlogutils.flagutil import (
56 from .revlogutils.flagutil import (
56 REVIDX_DEFAULT_FLAGS,
57 REVIDX_DEFAULT_FLAGS,
57 REVIDX_ELLIPSIS,
58 REVIDX_ELLIPSIS,
58 REVIDX_EXTSTORED,
59 REVIDX_EXTSTORED,
59 REVIDX_FLAGS_ORDER,
60 REVIDX_FLAGS_ORDER,
60 REVIDX_HASCOPIESINFO,
61 REVIDX_HASCOPIESINFO,
61 REVIDX_ISCENSORED,
62 REVIDX_ISCENSORED,
62 REVIDX_RAWTEXT_CHANGING_FLAGS,
63 REVIDX_RAWTEXT_CHANGING_FLAGS,
63 )
64 )
64 from .thirdparty import attr
65 from .thirdparty import attr
65 from . import (
66 from . import (
66 ancestor,
67 ancestor,
67 dagop,
68 dagop,
68 error,
69 error,
69 mdiff,
70 mdiff,
70 policy,
71 policy,
71 pycompat,
72 pycompat,
72 templatefilters,
73 templatefilters,
73 util,
74 util,
74 )
75 )
75 from .interfaces import (
76 from .interfaces import (
76 repository,
77 repository,
77 util as interfaceutil,
78 util as interfaceutil,
78 )
79 )
79 from .revlogutils import (
80 from .revlogutils import (
80 deltas as deltautil,
81 deltas as deltautil,
81 docket as docketutil,
82 docket as docketutil,
82 flagutil,
83 flagutil,
83 nodemap as nodemaputil,
84 nodemap as nodemaputil,
84 revlogv0,
85 revlogv0,
85 sidedata as sidedatautil,
86 sidedata as sidedatautil,
86 )
87 )
87 from .utils import (
88 from .utils import (
88 storageutil,
89 storageutil,
89 stringutil,
90 stringutil,
90 )
91 )
91
92
92 # blanked usage of all the name to prevent pyflakes constraints
93 # blanked usage of all the name to prevent pyflakes constraints
93 # We need these name available in the module for extensions.
94 # We need these name available in the module for extensions.
94
95
95 REVLOGV0
96 REVLOGV0
96 REVLOGV1
97 REVLOGV1
97 REVLOGV2
98 REVLOGV2
98 FLAG_INLINE_DATA
99 FLAG_INLINE_DATA
99 FLAG_GENERALDELTA
100 FLAG_GENERALDELTA
100 REVLOG_DEFAULT_FLAGS
101 REVLOG_DEFAULT_FLAGS
101 REVLOG_DEFAULT_FORMAT
102 REVLOG_DEFAULT_FORMAT
102 REVLOG_DEFAULT_VERSION
103 REVLOG_DEFAULT_VERSION
103 REVLOGV1_FLAGS
104 REVLOGV1_FLAGS
104 REVLOGV2_FLAGS
105 REVLOGV2_FLAGS
105 REVIDX_ISCENSORED
106 REVIDX_ISCENSORED
106 REVIDX_ELLIPSIS
107 REVIDX_ELLIPSIS
107 REVIDX_HASCOPIESINFO
108 REVIDX_HASCOPIESINFO
108 REVIDX_EXTSTORED
109 REVIDX_EXTSTORED
109 REVIDX_DEFAULT_FLAGS
110 REVIDX_DEFAULT_FLAGS
110 REVIDX_FLAGS_ORDER
111 REVIDX_FLAGS_ORDER
111 REVIDX_RAWTEXT_CHANGING_FLAGS
112 REVIDX_RAWTEXT_CHANGING_FLAGS
112
113
113 parsers = policy.importmod('parsers')
114 parsers = policy.importmod('parsers')
114 rustancestor = policy.importrust('ancestor')
115 rustancestor = policy.importrust('ancestor')
115 rustdagop = policy.importrust('dagop')
116 rustdagop = policy.importrust('dagop')
116 rustrevlog = policy.importrust('revlog')
117 rustrevlog = policy.importrust('revlog')
117
118
118 # Aliased for performance.
119 # Aliased for performance.
119 _zlibdecompress = zlib.decompress
120 _zlibdecompress = zlib.decompress
120
121
121 # max size of revlog with inline data
122 # max size of revlog with inline data
122 _maxinline = 131072
123 _maxinline = 131072
123 _chunksize = 1048576
124 _chunksize = 1048576
124
125
125 # Flag processors for REVIDX_ELLIPSIS.
126 # Flag processors for REVIDX_ELLIPSIS.
126 def ellipsisreadprocessor(rl, text):
127 def ellipsisreadprocessor(rl, text):
127 return text, False
128 return text, False
128
129
129
130
130 def ellipsiswriteprocessor(rl, text):
131 def ellipsiswriteprocessor(rl, text):
131 return text, False
132 return text, False
132
133
133
134
134 def ellipsisrawprocessor(rl, text):
135 def ellipsisrawprocessor(rl, text):
135 return False
136 return False
136
137
137
138
138 ellipsisprocessor = (
139 ellipsisprocessor = (
139 ellipsisreadprocessor,
140 ellipsisreadprocessor,
140 ellipsiswriteprocessor,
141 ellipsiswriteprocessor,
141 ellipsisrawprocessor,
142 ellipsisrawprocessor,
142 )
143 )
143
144
144
145
145 def offset_type(offset, type):
146 def offset_type(offset, type):
146 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
147 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
147 raise ValueError(b'unknown revlog index flags')
148 raise ValueError(b'unknown revlog index flags')
148 return int(int(offset) << 16 | type)
149 return int(int(offset) << 16 | type)
149
150
150
151
151 def _verify_revision(rl, skipflags, state, node):
152 def _verify_revision(rl, skipflags, state, node):
152 """Verify the integrity of the given revlog ``node`` while providing a hook
153 """Verify the integrity of the given revlog ``node`` while providing a hook
153 point for extensions to influence the operation."""
154 point for extensions to influence the operation."""
154 if skipflags:
155 if skipflags:
155 state[b'skipread'].add(node)
156 state[b'skipread'].add(node)
156 else:
157 else:
157 # Side-effect: read content and verify hash.
158 # Side-effect: read content and verify hash.
158 rl.revision(node)
159 rl.revision(node)
159
160
160
161
161 # True if a fast implementation for persistent-nodemap is available
162 # True if a fast implementation for persistent-nodemap is available
162 #
163 #
163 # We also consider we have a "fast" implementation in "pure" python because
164 # We also consider we have a "fast" implementation in "pure" python because
164 # people using pure don't really have performance consideration (and a
165 # people using pure don't really have performance consideration (and a
165 # wheelbarrow of other slowness source)
166 # wheelbarrow of other slowness source)
166 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
167 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
167 parsers, 'BaseIndexObject'
168 parsers, 'BaseIndexObject'
168 )
169 )
169
170
170
171
171 @attr.s(slots=True, frozen=True)
172 @attr.s(slots=True, frozen=True)
172 class _revisioninfo(object):
173 class _revisioninfo(object):
173 """Information about a revision that allows building its fulltext
174 """Information about a revision that allows building its fulltext
174 node: expected hash of the revision
175 node: expected hash of the revision
175 p1, p2: parent revs of the revision
176 p1, p2: parent revs of the revision
176 btext: built text cache consisting of a one-element list
177 btext: built text cache consisting of a one-element list
177 cachedelta: (baserev, uncompressed_delta) or None
178 cachedelta: (baserev, uncompressed_delta) or None
178 flags: flags associated to the revision storage
179 flags: flags associated to the revision storage
179
180
180 One of btext[0] or cachedelta must be set.
181 One of btext[0] or cachedelta must be set.
181 """
182 """
182
183
183 node = attr.ib()
184 node = attr.ib()
184 p1 = attr.ib()
185 p1 = attr.ib()
185 p2 = attr.ib()
186 p2 = attr.ib()
186 btext = attr.ib()
187 btext = attr.ib()
187 textlen = attr.ib()
188 textlen = attr.ib()
188 cachedelta = attr.ib()
189 cachedelta = attr.ib()
189 flags = attr.ib()
190 flags = attr.ib()
190
191
191
192
192 @interfaceutil.implementer(repository.irevisiondelta)
193 @interfaceutil.implementer(repository.irevisiondelta)
193 @attr.s(slots=True)
194 @attr.s(slots=True)
194 class revlogrevisiondelta(object):
195 class revlogrevisiondelta(object):
195 node = attr.ib()
196 node = attr.ib()
196 p1node = attr.ib()
197 p1node = attr.ib()
197 p2node = attr.ib()
198 p2node = attr.ib()
198 basenode = attr.ib()
199 basenode = attr.ib()
199 flags = attr.ib()
200 flags = attr.ib()
200 baserevisionsize = attr.ib()
201 baserevisionsize = attr.ib()
201 revision = attr.ib()
202 revision = attr.ib()
202 delta = attr.ib()
203 delta = attr.ib()
203 sidedata = attr.ib()
204 sidedata = attr.ib()
204 protocol_flags = attr.ib()
205 protocol_flags = attr.ib()
205 linknode = attr.ib(default=None)
206 linknode = attr.ib(default=None)
206
207
207
208
208 @interfaceutil.implementer(repository.iverifyproblem)
209 @interfaceutil.implementer(repository.iverifyproblem)
209 @attr.s(frozen=True)
210 @attr.s(frozen=True)
210 class revlogproblem(object):
211 class revlogproblem(object):
211 warning = attr.ib(default=None)
212 warning = attr.ib(default=None)
212 error = attr.ib(default=None)
213 error = attr.ib(default=None)
213 node = attr.ib(default=None)
214 node = attr.ib(default=None)
214
215
215
216
216 def parse_index_v1(data, inline):
217 def parse_index_v1(data, inline):
217 # call the C implementation to parse the index data
218 # call the C implementation to parse the index data
218 index, cache = parsers.parse_index2(data, inline)
219 index, cache = parsers.parse_index2(data, inline)
219 return index, cache
220 return index, cache
220
221
221
222
222 def parse_index_v2(data, inline):
223 def parse_index_v2(data, inline):
223 # call the C implementation to parse the index data
224 # call the C implementation to parse the index data
224 index, cache = parsers.parse_index2(data, inline, revlogv2=True)
225 index, cache = parsers.parse_index2(data, inline, revlogv2=True)
225 return index, cache
226 return index, cache
226
227
227
228
228 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
229 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
229
230
230 def parse_index_v1_nodemap(data, inline):
231 def parse_index_v1_nodemap(data, inline):
231 index, cache = parsers.parse_index_devel_nodemap(data, inline)
232 index, cache = parsers.parse_index_devel_nodemap(data, inline)
232 return index, cache
233 return index, cache
233
234
234
235
235 else:
236 else:
236 parse_index_v1_nodemap = None
237 parse_index_v1_nodemap = None
237
238
238
239
239 def parse_index_v1_mixed(data, inline):
240 def parse_index_v1_mixed(data, inline):
240 index, cache = parse_index_v1(data, inline)
241 index, cache = parse_index_v1(data, inline)
241 return rustrevlog.MixedIndex(index), cache
242 return rustrevlog.MixedIndex(index), cache
242
243
243
244
244 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
245 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
245 # signed integer)
246 # signed integer)
246 _maxentrysize = 0x7FFFFFFF
247 _maxentrysize = 0x7FFFFFFF
247
248
248
249
249 class revlog(object):
250 class revlog(object):
250 """
251 """
251 the underlying revision storage object
252 the underlying revision storage object
252
253
253 A revlog consists of two parts, an index and the revision data.
254 A revlog consists of two parts, an index and the revision data.
254
255
255 The index is a file with a fixed record size containing
256 The index is a file with a fixed record size containing
256 information on each revision, including its nodeid (hash), the
257 information on each revision, including its nodeid (hash), the
257 nodeids of its parents, the position and offset of its data within
258 nodeids of its parents, the position and offset of its data within
258 the data file, and the revision it's based on. Finally, each entry
259 the data file, and the revision it's based on. Finally, each entry
259 contains a linkrev entry that can serve as a pointer to external
260 contains a linkrev entry that can serve as a pointer to external
260 data.
261 data.
261
262
262 The revision data itself is a linear collection of data chunks.
263 The revision data itself is a linear collection of data chunks.
263 Each chunk represents a revision and is usually represented as a
264 Each chunk represents a revision and is usually represented as a
264 delta against the previous chunk. To bound lookup time, runs of
265 delta against the previous chunk. To bound lookup time, runs of
265 deltas are limited to about 2 times the length of the original
266 deltas are limited to about 2 times the length of the original
266 version data. This makes retrieval of a version proportional to
267 version data. This makes retrieval of a version proportional to
267 its size, or O(1) relative to the number of revisions.
268 its size, or O(1) relative to the number of revisions.
268
269
269 Both pieces of the revlog are written to in an append-only
270 Both pieces of the revlog are written to in an append-only
270 fashion, which means we never need to rewrite a file to insert or
271 fashion, which means we never need to rewrite a file to insert or
271 remove data, and can use some simple techniques to avoid the need
272 remove data, and can use some simple techniques to avoid the need
272 for locking while reading.
273 for locking while reading.
273
274
274 If checkambig, indexfile is opened with checkambig=True at
275 If checkambig, indexfile is opened with checkambig=True at
275 writing, to avoid file stat ambiguity.
276 writing, to avoid file stat ambiguity.
276
277
277 If mmaplargeindex is True, and an mmapindexthreshold is set, the
278 If mmaplargeindex is True, and an mmapindexthreshold is set, the
278 index will be mmapped rather than read if it is larger than the
279 index will be mmapped rather than read if it is larger than the
279 configured threshold.
280 configured threshold.
280
281
281 If censorable is True, the revlog can have censored revisions.
282 If censorable is True, the revlog can have censored revisions.
282
283
283 If `upperboundcomp` is not None, this is the expected maximal gain from
284 If `upperboundcomp` is not None, this is the expected maximal gain from
284 compression for the data content.
285 compression for the data content.
285
286
286 `concurrencychecker` is an optional function that receives 3 arguments: a
287 `concurrencychecker` is an optional function that receives 3 arguments: a
287 file handle, a filename, and an expected position. It should check whether
288 file handle, a filename, and an expected position. It should check whether
288 the current position in the file handle is valid, and log/warn/fail (by
289 the current position in the file handle is valid, and log/warn/fail (by
289 raising).
290 raising).
290
291
291
292
292 Internal details
293 Internal details
293 ----------------
294 ----------------
294
295
295 A large part of the revlog logic deals with revisions' "index entries", tuple
296 A large part of the revlog logic deals with revisions' "index entries", tuple
296 objects that contains the same "items" whatever the revlog version.
297 objects that contains the same "items" whatever the revlog version.
297 Different versions will have different ways of storing these items (sometimes
298 Different versions will have different ways of storing these items (sometimes
298 not having them at all), but the tuple will always be the same. New fields
299 not having them at all), but the tuple will always be the same. New fields
299 are usually added at the end to avoid breaking existing code that relies
300 are usually added at the end to avoid breaking existing code that relies
300 on the existing order. The field are defined as follows:
301 on the existing order. The field are defined as follows:
301
302
302 [0] offset:
303 [0] offset:
303 The byte index of the start of revision data chunk.
304 The byte index of the start of revision data chunk.
304 That value is shifted up by 16 bits. use "offset = field >> 16" to
305 That value is shifted up by 16 bits. use "offset = field >> 16" to
305 retrieve it.
306 retrieve it.
306
307
307 flags:
308 flags:
308 A flag field that carries special information or changes the behavior
309 A flag field that carries special information or changes the behavior
309 of the revision. (see `REVIDX_*` constants for details)
310 of the revision. (see `REVIDX_*` constants for details)
310 The flag field only occupies the first 16 bits of this field,
311 The flag field only occupies the first 16 bits of this field,
311 use "flags = field & 0xFFFF" to retrieve the value.
312 use "flags = field & 0xFFFF" to retrieve the value.
312
313
313 [1] compressed length:
314 [1] compressed length:
314 The size, in bytes, of the chunk on disk
315 The size, in bytes, of the chunk on disk
315
316
316 [2] uncompressed length:
317 [2] uncompressed length:
317 The size, in bytes, of the full revision once reconstructed.
318 The size, in bytes, of the full revision once reconstructed.
318
319
319 [3] base rev:
320 [3] base rev:
320 Either the base of the revision delta chain (without general
321 Either the base of the revision delta chain (without general
321 delta), or the base of the delta (stored in the data chunk)
322 delta), or the base of the delta (stored in the data chunk)
322 with general delta.
323 with general delta.
323
324
324 [4] link rev:
325 [4] link rev:
325 Changelog revision number of the changeset introducing this
326 Changelog revision number of the changeset introducing this
326 revision.
327 revision.
327
328
328 [5] parent 1 rev:
329 [5] parent 1 rev:
329 Revision number of the first parent
330 Revision number of the first parent
330
331
331 [6] parent 2 rev:
332 [6] parent 2 rev:
332 Revision number of the second parent
333 Revision number of the second parent
333
334
334 [7] node id:
335 [7] node id:
335 The node id of the current revision
336 The node id of the current revision
336
337
337 [8] sidedata offset:
338 [8] sidedata offset:
338 The byte index of the start of the revision's side-data chunk.
339 The byte index of the start of the revision's side-data chunk.
339
340
340 [9] sidedata chunk length:
341 [9] sidedata chunk length:
341 The size, in bytes, of the revision's side-data chunk.
342 The size, in bytes, of the revision's side-data chunk.
342
343
343 [10] data compression mode:
344 [10] data compression mode:
344 two bits that detail the way the data chunk is compressed on disk.
345 two bits that detail the way the data chunk is compressed on disk.
345 (see "COMP_MODE_*" constants for details). For revlog version 0 and
346 (see "COMP_MODE_*" constants for details). For revlog version 0 and
346 1 this will always be COMP_MODE_INLINE.
347 1 this will always be COMP_MODE_INLINE.
347
348
348 [11] side-data compression mode:
349 [11] side-data compression mode:
349 two bits that detail the way the sidedata chunk is compressed on disk.
350 two bits that detail the way the sidedata chunk is compressed on disk.
350 (see "COMP_MODE_*" constants for details)
351 (see "COMP_MODE_*" constants for details)
351 """
352 """
352
353
353 _flagserrorclass = error.RevlogError
354 _flagserrorclass = error.RevlogError
354
355
355 def __init__(
356 def __init__(
356 self,
357 self,
357 opener,
358 opener,
358 target,
359 target,
359 radix,
360 radix,
360 postfix=None, # only exist for `tmpcensored` now
361 postfix=None, # only exist for `tmpcensored` now
361 checkambig=False,
362 checkambig=False,
362 mmaplargeindex=False,
363 mmaplargeindex=False,
363 censorable=False,
364 censorable=False,
364 upperboundcomp=None,
365 upperboundcomp=None,
365 persistentnodemap=False,
366 persistentnodemap=False,
366 concurrencychecker=None,
367 concurrencychecker=None,
367 trypending=False,
368 trypending=False,
368 ):
369 ):
369 """
370 """
370 create a revlog object
371 create a revlog object
371
372
372 opener is a function that abstracts the file opening operation
373 opener is a function that abstracts the file opening operation
373 and can be used to implement COW semantics or the like.
374 and can be used to implement COW semantics or the like.
374
375
375 `target`: a (KIND, ID) tuple that identify the content stored in
376 `target`: a (KIND, ID) tuple that identify the content stored in
376 this revlog. It help the rest of the code to understand what the revlog
377 this revlog. It help the rest of the code to understand what the revlog
377 is about without having to resort to heuristic and index filename
378 is about without having to resort to heuristic and index filename
378 analysis. Note: that this must be reliably be set by normal code, but
379 analysis. Note: that this must be reliably be set by normal code, but
379 that test, debug, or performance measurement code might not set this to
380 that test, debug, or performance measurement code might not set this to
380 accurate value.
381 accurate value.
381 """
382 """
382 self.upperboundcomp = upperboundcomp
383 self.upperboundcomp = upperboundcomp
383
384
384 self.radix = radix
385 self.radix = radix
385
386
386 self._docket_file = None
387 self._docket_file = None
387 self._indexfile = None
388 self._indexfile = None
388 self._datafile = None
389 self._datafile = None
389 self._nodemap_file = None
390 self._nodemap_file = None
390 self.postfix = postfix
391 self.postfix = postfix
391 self._trypending = trypending
392 self._trypending = trypending
392 self.opener = opener
393 self.opener = opener
393 if persistentnodemap:
394 if persistentnodemap:
394 self._nodemap_file = nodemaputil.get_nodemap_file(self)
395 self._nodemap_file = nodemaputil.get_nodemap_file(self)
395
396
396 assert target[0] in ALL_KINDS
397 assert target[0] in ALL_KINDS
397 assert len(target) == 2
398 assert len(target) == 2
398 self.target = target
399 self.target = target
399 # When True, indexfile is opened with checkambig=True at writing, to
400 # When True, indexfile is opened with checkambig=True at writing, to
400 # avoid file stat ambiguity.
401 # avoid file stat ambiguity.
401 self._checkambig = checkambig
402 self._checkambig = checkambig
402 self._mmaplargeindex = mmaplargeindex
403 self._mmaplargeindex = mmaplargeindex
403 self._censorable = censorable
404 self._censorable = censorable
404 # 3-tuple of (node, rev, text) for a raw revision.
405 # 3-tuple of (node, rev, text) for a raw revision.
405 self._revisioncache = None
406 self._revisioncache = None
406 # Maps rev to chain base rev.
407 # Maps rev to chain base rev.
407 self._chainbasecache = util.lrucachedict(100)
408 self._chainbasecache = util.lrucachedict(100)
408 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
409 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
409 self._chunkcache = (0, b'')
410 self._chunkcache = (0, b'')
410 # How much data to read and cache into the raw revlog data cache.
411 # How much data to read and cache into the raw revlog data cache.
411 self._chunkcachesize = 65536
412 self._chunkcachesize = 65536
412 self._maxchainlen = None
413 self._maxchainlen = None
413 self._deltabothparents = True
414 self._deltabothparents = True
414 self.index = None
415 self.index = None
415 self._docket = None
416 self._docket = None
416 self._nodemap_docket = None
417 self._nodemap_docket = None
417 # Mapping of partial identifiers to full nodes.
418 # Mapping of partial identifiers to full nodes.
418 self._pcache = {}
419 self._pcache = {}
419 # Mapping of revision integer to full node.
420 # Mapping of revision integer to full node.
420 self._compengine = b'zlib'
421 self._compengine = b'zlib'
421 self._compengineopts = {}
422 self._compengineopts = {}
422 self._maxdeltachainspan = -1
423 self._maxdeltachainspan = -1
423 self._withsparseread = False
424 self._withsparseread = False
424 self._sparserevlog = False
425 self._sparserevlog = False
425 self.hassidedata = False
426 self.hassidedata = False
426 self._srdensitythreshold = 0.50
427 self._srdensitythreshold = 0.50
427 self._srmingapsize = 262144
428 self._srmingapsize = 262144
428
429
429 # Make copy of flag processors so each revlog instance can support
430 # Make copy of flag processors so each revlog instance can support
430 # custom flags.
431 # custom flags.
431 self._flagprocessors = dict(flagutil.flagprocessors)
432 self._flagprocessors = dict(flagutil.flagprocessors)
432
433
433 # 2-tuple of file handles being used for active writing.
434 # 2-tuple of file handles being used for active writing.
434 self._writinghandles = None
435 self._writinghandles = None
435 # prevent nesting of addgroup
436 # prevent nesting of addgroup
436 self._adding_group = None
437 self._adding_group = None
437
438
438 self._loadindex()
439 self._loadindex()
439
440
440 self._concurrencychecker = concurrencychecker
441 self._concurrencychecker = concurrencychecker
441
442
442 def _init_opts(self):
443 def _init_opts(self):
443 """process options (from above/config) to setup associated default revlog mode
444 """process options (from above/config) to setup associated default revlog mode
444
445
445 These values might be affected when actually reading on disk information.
446 These values might be affected when actually reading on disk information.
446
447
447 The relevant values are returned for use in _loadindex().
448 The relevant values are returned for use in _loadindex().
448
449
449 * newversionflags:
450 * newversionflags:
450 version header to use if we need to create a new revlog
451 version header to use if we need to create a new revlog
451
452
452 * mmapindexthreshold:
453 * mmapindexthreshold:
453 minimal index size for start to use mmap
454 minimal index size for start to use mmap
454
455
455 * force_nodemap:
456 * force_nodemap:
456 force the usage of a "development" version of the nodemap code
457 force the usage of a "development" version of the nodemap code
457 """
458 """
458 mmapindexthreshold = None
459 mmapindexthreshold = None
459 opts = self.opener.options
460 opts = self.opener.options
460
461
461 if b'revlogv2' in opts:
462 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
463 new_header = REVLOGV2
464 elif b'revlogv2' in opts:
462 new_header = REVLOGV2
465 new_header = REVLOGV2
463 elif b'revlogv1' in opts:
466 elif b'revlogv1' in opts:
464 new_header = REVLOGV1 | FLAG_INLINE_DATA
467 new_header = REVLOGV1 | FLAG_INLINE_DATA
465 if b'generaldelta' in opts:
468 if b'generaldelta' in opts:
466 new_header |= FLAG_GENERALDELTA
469 new_header |= FLAG_GENERALDELTA
467 elif b'revlogv0' in self.opener.options:
470 elif b'revlogv0' in self.opener.options:
468 new_header = REVLOGV0
471 new_header = REVLOGV0
469 else:
472 else:
470 new_header = REVLOG_DEFAULT_VERSION
473 new_header = REVLOG_DEFAULT_VERSION
471
474
472 if b'chunkcachesize' in opts:
475 if b'chunkcachesize' in opts:
473 self._chunkcachesize = opts[b'chunkcachesize']
476 self._chunkcachesize = opts[b'chunkcachesize']
474 if b'maxchainlen' in opts:
477 if b'maxchainlen' in opts:
475 self._maxchainlen = opts[b'maxchainlen']
478 self._maxchainlen = opts[b'maxchainlen']
476 if b'deltabothparents' in opts:
479 if b'deltabothparents' in opts:
477 self._deltabothparents = opts[b'deltabothparents']
480 self._deltabothparents = opts[b'deltabothparents']
478 self._lazydelta = bool(opts.get(b'lazydelta', True))
481 self._lazydelta = bool(opts.get(b'lazydelta', True))
479 self._lazydeltabase = False
482 self._lazydeltabase = False
480 if self._lazydelta:
483 if self._lazydelta:
481 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
484 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
482 if b'compengine' in opts:
485 if b'compengine' in opts:
483 self._compengine = opts[b'compengine']
486 self._compengine = opts[b'compengine']
484 if b'zlib.level' in opts:
487 if b'zlib.level' in opts:
485 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
488 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
486 if b'zstd.level' in opts:
489 if b'zstd.level' in opts:
487 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
490 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
488 if b'maxdeltachainspan' in opts:
491 if b'maxdeltachainspan' in opts:
489 self._maxdeltachainspan = opts[b'maxdeltachainspan']
492 self._maxdeltachainspan = opts[b'maxdeltachainspan']
490 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
493 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
491 mmapindexthreshold = opts[b'mmapindexthreshold']
494 mmapindexthreshold = opts[b'mmapindexthreshold']
492 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
495 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
493 withsparseread = bool(opts.get(b'with-sparse-read', False))
496 withsparseread = bool(opts.get(b'with-sparse-read', False))
494 # sparse-revlog forces sparse-read
497 # sparse-revlog forces sparse-read
495 self._withsparseread = self._sparserevlog or withsparseread
498 self._withsparseread = self._sparserevlog or withsparseread
496 if b'sparse-read-density-threshold' in opts:
499 if b'sparse-read-density-threshold' in opts:
497 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
500 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
498 if b'sparse-read-min-gap-size' in opts:
501 if b'sparse-read-min-gap-size' in opts:
499 self._srmingapsize = opts[b'sparse-read-min-gap-size']
502 self._srmingapsize = opts[b'sparse-read-min-gap-size']
500 if opts.get(b'enableellipsis'):
503 if opts.get(b'enableellipsis'):
501 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
504 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
502
505
503 # revlog v0 doesn't have flag processors
506 # revlog v0 doesn't have flag processors
504 for flag, processor in pycompat.iteritems(
507 for flag, processor in pycompat.iteritems(
505 opts.get(b'flagprocessors', {})
508 opts.get(b'flagprocessors', {})
506 ):
509 ):
507 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
510 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
508
511
509 if self._chunkcachesize <= 0:
512 if self._chunkcachesize <= 0:
510 raise error.RevlogError(
513 raise error.RevlogError(
511 _(b'revlog chunk cache size %r is not greater than 0')
514 _(b'revlog chunk cache size %r is not greater than 0')
512 % self._chunkcachesize
515 % self._chunkcachesize
513 )
516 )
514 elif self._chunkcachesize & (self._chunkcachesize - 1):
517 elif self._chunkcachesize & (self._chunkcachesize - 1):
515 raise error.RevlogError(
518 raise error.RevlogError(
516 _(b'revlog chunk cache size %r is not a power of 2')
519 _(b'revlog chunk cache size %r is not a power of 2')
517 % self._chunkcachesize
520 % self._chunkcachesize
518 )
521 )
519 force_nodemap = opts.get(b'devel-force-nodemap', False)
522 force_nodemap = opts.get(b'devel-force-nodemap', False)
520 return new_header, mmapindexthreshold, force_nodemap
523 return new_header, mmapindexthreshold, force_nodemap
521
524
522 def _get_data(self, filepath, mmap_threshold, size=None):
525 def _get_data(self, filepath, mmap_threshold, size=None):
523 """return a file content with or without mmap
526 """return a file content with or without mmap
524
527
525 If the file is missing return the empty string"""
528 If the file is missing return the empty string"""
526 try:
529 try:
527 with self.opener(filepath) as fp:
530 with self.opener(filepath) as fp:
528 if mmap_threshold is not None:
531 if mmap_threshold is not None:
529 file_size = self.opener.fstat(fp).st_size
532 file_size = self.opener.fstat(fp).st_size
530 if file_size >= mmap_threshold:
533 if file_size >= mmap_threshold:
531 if size is not None:
534 if size is not None:
532 # avoid potentiel mmap crash
535 # avoid potentiel mmap crash
533 size = min(file_size, size)
536 size = min(file_size, size)
534 # TODO: should .close() to release resources without
537 # TODO: should .close() to release resources without
535 # relying on Python GC
538 # relying on Python GC
536 if size is None:
539 if size is None:
537 return util.buffer(util.mmapread(fp))
540 return util.buffer(util.mmapread(fp))
538 else:
541 else:
539 return util.buffer(util.mmapread(fp, size))
542 return util.buffer(util.mmapread(fp, size))
540 if size is None:
543 if size is None:
541 return fp.read()
544 return fp.read()
542 else:
545 else:
543 return fp.read(size)
546 return fp.read(size)
544 except IOError as inst:
547 except IOError as inst:
545 if inst.errno != errno.ENOENT:
548 if inst.errno != errno.ENOENT:
546 raise
549 raise
547 return b''
550 return b''
548
551
549 def _loadindex(self):
552 def _loadindex(self):
550
553
551 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
554 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
552
555
553 if self.postfix is not None:
556 if self.postfix is not None:
554 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
557 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
555 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
558 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
556 entry_point = b'%s.i.a' % self.radix
559 entry_point = b'%s.i.a' % self.radix
557 else:
560 else:
558 entry_point = b'%s.i' % self.radix
561 entry_point = b'%s.i' % self.radix
559
562
560 entry_data = b''
563 entry_data = b''
561 self._initempty = True
564 self._initempty = True
562 entry_data = self._get_data(entry_point, mmapindexthreshold)
565 entry_data = self._get_data(entry_point, mmapindexthreshold)
563 if len(entry_data) > 0:
566 if len(entry_data) > 0:
564 header = INDEX_HEADER.unpack(entry_data[:4])[0]
567 header = INDEX_HEADER.unpack(entry_data[:4])[0]
565 self._initempty = False
568 self._initempty = False
566 else:
569 else:
567 header = new_header
570 header = new_header
568
571
569 self._format_flags = header & ~0xFFFF
572 self._format_flags = header & ~0xFFFF
570 self._format_version = header & 0xFFFF
573 self._format_version = header & 0xFFFF
571
574
572 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
575 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
573 if supported_flags is None:
576 if supported_flags is None:
574 msg = _(b'unknown version (%d) in revlog %s')
577 msg = _(b'unknown version (%d) in revlog %s')
575 msg %= (self._format_version, self.display_id)
578 msg %= (self._format_version, self.display_id)
576 raise error.RevlogError(msg)
579 raise error.RevlogError(msg)
577 elif self._format_flags & ~supported_flags:
580 elif self._format_flags & ~supported_flags:
578 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
581 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
579 display_flag = self._format_flags >> 16
582 display_flag = self._format_flags >> 16
580 msg %= (display_flag, self._format_version, self.display_id)
583 msg %= (display_flag, self._format_version, self.display_id)
581 raise error.RevlogError(msg)
584 raise error.RevlogError(msg)
582
585
583 features = FEATURES_BY_VERSION[self._format_version]
586 features = FEATURES_BY_VERSION[self._format_version]
584 self._inline = features[b'inline'](self._format_flags)
587 self._inline = features[b'inline'](self._format_flags)
585 self._generaldelta = features[b'generaldelta'](self._format_flags)
588 self._generaldelta = features[b'generaldelta'](self._format_flags)
586 self.hassidedata = features[b'sidedata']
589 self.hassidedata = features[b'sidedata']
587
590
588 if not features[b'docket']:
591 if not features[b'docket']:
589 self._indexfile = entry_point
592 self._indexfile = entry_point
590 index_data = entry_data
593 index_data = entry_data
591 else:
594 else:
592 self._docket_file = entry_point
595 self._docket_file = entry_point
593 if self._initempty:
596 if self._initempty:
594 self._docket = docketutil.default_docket(self, header)
597 self._docket = docketutil.default_docket(self, header)
595 else:
598 else:
596 self._docket = docketutil.parse_docket(
599 self._docket = docketutil.parse_docket(
597 self, entry_data, use_pending=self._trypending
600 self, entry_data, use_pending=self._trypending
598 )
601 )
599 self._indexfile = self._docket.index_filepath()
602 self._indexfile = self._docket.index_filepath()
600 index_data = b''
603 index_data = b''
601 index_size = self._docket.index_end
604 index_size = self._docket.index_end
602 if index_size > 0:
605 if index_size > 0:
603 index_data = self._get_data(
606 index_data = self._get_data(
604 self._indexfile, mmapindexthreshold, size=index_size
607 self._indexfile, mmapindexthreshold, size=index_size
605 )
608 )
606 if len(index_data) < index_size:
609 if len(index_data) < index_size:
607 msg = _(b'too few index data for %s: got %d, expected %d')
610 msg = _(b'too few index data for %s: got %d, expected %d')
608 msg %= (self.display_id, len(index_data), index_size)
611 msg %= (self.display_id, len(index_data), index_size)
609 raise error.RevlogError(msg)
612 raise error.RevlogError(msg)
610
613
611 self._inline = False
614 self._inline = False
612 # generaldelta implied by version 2 revlogs.
615 # generaldelta implied by version 2 revlogs.
613 self._generaldelta = True
616 self._generaldelta = True
614 # the logic for persistent nodemap will be dealt with within the
617 # the logic for persistent nodemap will be dealt with within the
615 # main docket, so disable it for now.
618 # main docket, so disable it for now.
616 self._nodemap_file = None
619 self._nodemap_file = None
617
620
618 if self.postfix is None:
621 if self.postfix is None:
619 self._datafile = b'%s.d' % self.radix
622 self._datafile = b'%s.d' % self.radix
620 else:
623 else:
621 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
624 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
622
625
623 self.nodeconstants = sha1nodeconstants
626 self.nodeconstants = sha1nodeconstants
624 self.nullid = self.nodeconstants.nullid
627 self.nullid = self.nodeconstants.nullid
625
628
626 # sparse-revlog can't be on without general-delta (issue6056)
629 # sparse-revlog can't be on without general-delta (issue6056)
627 if not self._generaldelta:
630 if not self._generaldelta:
628 self._sparserevlog = False
631 self._sparserevlog = False
629
632
630 self._storedeltachains = True
633 self._storedeltachains = True
631
634
632 devel_nodemap = (
635 devel_nodemap = (
633 self._nodemap_file
636 self._nodemap_file
634 and force_nodemap
637 and force_nodemap
635 and parse_index_v1_nodemap is not None
638 and parse_index_v1_nodemap is not None
636 )
639 )
637
640
638 use_rust_index = False
641 use_rust_index = False
639 if rustrevlog is not None:
642 if rustrevlog is not None:
640 if self._nodemap_file is not None:
643 if self._nodemap_file is not None:
641 use_rust_index = True
644 use_rust_index = True
642 else:
645 else:
643 use_rust_index = self.opener.options.get(b'rust.index')
646 use_rust_index = self.opener.options.get(b'rust.index')
644
647
645 self._parse_index = parse_index_v1
648 self._parse_index = parse_index_v1
646 if self._format_version == REVLOGV0:
649 if self._format_version == REVLOGV0:
647 self._parse_index = revlogv0.parse_index_v0
650 self._parse_index = revlogv0.parse_index_v0
648 elif self._format_version == REVLOGV2:
651 elif self._format_version == REVLOGV2:
649 self._parse_index = parse_index_v2
652 self._parse_index = parse_index_v2
650 elif devel_nodemap:
653 elif devel_nodemap:
651 self._parse_index = parse_index_v1_nodemap
654 self._parse_index = parse_index_v1_nodemap
652 elif use_rust_index:
655 elif use_rust_index:
653 self._parse_index = parse_index_v1_mixed
656 self._parse_index = parse_index_v1_mixed
654 try:
657 try:
655 d = self._parse_index(index_data, self._inline)
658 d = self._parse_index(index_data, self._inline)
656 index, _chunkcache = d
659 index, _chunkcache = d
657 use_nodemap = (
660 use_nodemap = (
658 not self._inline
661 not self._inline
659 and self._nodemap_file is not None
662 and self._nodemap_file is not None
660 and util.safehasattr(index, 'update_nodemap_data')
663 and util.safehasattr(index, 'update_nodemap_data')
661 )
664 )
662 if use_nodemap:
665 if use_nodemap:
663 nodemap_data = nodemaputil.persisted_data(self)
666 nodemap_data = nodemaputil.persisted_data(self)
664 if nodemap_data is not None:
667 if nodemap_data is not None:
665 docket = nodemap_data[0]
668 docket = nodemap_data[0]
666 if (
669 if (
667 len(d[0]) > docket.tip_rev
670 len(d[0]) > docket.tip_rev
668 and d[0][docket.tip_rev][7] == docket.tip_node
671 and d[0][docket.tip_rev][7] == docket.tip_node
669 ):
672 ):
670 # no changelog tampering
673 # no changelog tampering
671 self._nodemap_docket = docket
674 self._nodemap_docket = docket
672 index.update_nodemap_data(*nodemap_data)
675 index.update_nodemap_data(*nodemap_data)
673 except (ValueError, IndexError):
676 except (ValueError, IndexError):
674 raise error.RevlogError(
677 raise error.RevlogError(
675 _(b"index %s is corrupted") % self.display_id
678 _(b"index %s is corrupted") % self.display_id
676 )
679 )
677 self.index, self._chunkcache = d
680 self.index, self._chunkcache = d
678 if not self._chunkcache:
681 if not self._chunkcache:
679 self._chunkclear()
682 self._chunkclear()
680 # revnum -> (chain-length, sum-delta-length)
683 # revnum -> (chain-length, sum-delta-length)
681 self._chaininfocache = util.lrucachedict(500)
684 self._chaininfocache = util.lrucachedict(500)
682 # revlog header -> revlog compressor
685 # revlog header -> revlog compressor
683 self._decompressors = {}
686 self._decompressors = {}
684
687
685 @util.propertycache
688 @util.propertycache
686 def revlog_kind(self):
689 def revlog_kind(self):
687 return self.target[0]
690 return self.target[0]
688
691
689 @util.propertycache
692 @util.propertycache
690 def display_id(self):
693 def display_id(self):
691 """The public facing "ID" of the revlog that we use in message"""
694 """The public facing "ID" of the revlog that we use in message"""
692 # Maybe we should build a user facing representation of
695 # Maybe we should build a user facing representation of
693 # revlog.target instead of using `self.radix`
696 # revlog.target instead of using `self.radix`
694 return self.radix
697 return self.radix
695
698
696 def _get_decompressor(self, t):
699 def _get_decompressor(self, t):
697 try:
700 try:
698 compressor = self._decompressors[t]
701 compressor = self._decompressors[t]
699 except KeyError:
702 except KeyError:
700 try:
703 try:
701 engine = util.compengines.forrevlogheader(t)
704 engine = util.compengines.forrevlogheader(t)
702 compressor = engine.revlogcompressor(self._compengineopts)
705 compressor = engine.revlogcompressor(self._compengineopts)
703 self._decompressors[t] = compressor
706 self._decompressors[t] = compressor
704 except KeyError:
707 except KeyError:
705 raise error.RevlogError(
708 raise error.RevlogError(
706 _(b'unknown compression type %s') % binascii.hexlify(t)
709 _(b'unknown compression type %s') % binascii.hexlify(t)
707 )
710 )
708 return compressor
711 return compressor
709
712
710 @util.propertycache
713 @util.propertycache
711 def _compressor(self):
714 def _compressor(self):
712 engine = util.compengines[self._compengine]
715 engine = util.compengines[self._compengine]
713 return engine.revlogcompressor(self._compengineopts)
716 return engine.revlogcompressor(self._compengineopts)
714
717
715 @util.propertycache
718 @util.propertycache
716 def _decompressor(self):
719 def _decompressor(self):
717 """the default decompressor"""
720 """the default decompressor"""
718 if self._docket is None:
721 if self._docket is None:
719 return None
722 return None
720 t = self._docket.default_compression_header
723 t = self._docket.default_compression_header
721 c = self._get_decompressor(t)
724 c = self._get_decompressor(t)
722 return c.decompress
725 return c.decompress
723
726
724 def _indexfp(self):
727 def _indexfp(self):
725 """file object for the revlog's index file"""
728 """file object for the revlog's index file"""
726 return self.opener(self._indexfile, mode=b"r")
729 return self.opener(self._indexfile, mode=b"r")
727
730
728 def __index_write_fp(self):
731 def __index_write_fp(self):
729 # You should not use this directly and use `_writing` instead
732 # You should not use this directly and use `_writing` instead
730 try:
733 try:
731 f = self.opener(
734 f = self.opener(
732 self._indexfile, mode=b"r+", checkambig=self._checkambig
735 self._indexfile, mode=b"r+", checkambig=self._checkambig
733 )
736 )
734 if self._docket is None:
737 if self._docket is None:
735 f.seek(0, os.SEEK_END)
738 f.seek(0, os.SEEK_END)
736 else:
739 else:
737 f.seek(self._docket.index_end, os.SEEK_SET)
740 f.seek(self._docket.index_end, os.SEEK_SET)
738 return f
741 return f
739 except IOError as inst:
742 except IOError as inst:
740 if inst.errno != errno.ENOENT:
743 if inst.errno != errno.ENOENT:
741 raise
744 raise
742 return self.opener(
745 return self.opener(
743 self._indexfile, mode=b"w+", checkambig=self._checkambig
746 self._indexfile, mode=b"w+", checkambig=self._checkambig
744 )
747 )
745
748
746 def __index_new_fp(self):
749 def __index_new_fp(self):
747 # You should not use this unless you are upgrading from inline revlog
750 # You should not use this unless you are upgrading from inline revlog
748 return self.opener(
751 return self.opener(
749 self._indexfile,
752 self._indexfile,
750 mode=b"w",
753 mode=b"w",
751 checkambig=self._checkambig,
754 checkambig=self._checkambig,
752 atomictemp=True,
755 atomictemp=True,
753 )
756 )
754
757
755 def _datafp(self, mode=b'r'):
758 def _datafp(self, mode=b'r'):
756 """file object for the revlog's data file"""
759 """file object for the revlog's data file"""
757 return self.opener(self._datafile, mode=mode)
760 return self.opener(self._datafile, mode=mode)
758
761
759 @contextlib.contextmanager
762 @contextlib.contextmanager
760 def _datareadfp(self, existingfp=None):
763 def _datareadfp(self, existingfp=None):
761 """file object suitable to read data"""
764 """file object suitable to read data"""
762 # Use explicit file handle, if given.
765 # Use explicit file handle, if given.
763 if existingfp is not None:
766 if existingfp is not None:
764 yield existingfp
767 yield existingfp
765
768
766 # Use a file handle being actively used for writes, if available.
769 # Use a file handle being actively used for writes, if available.
767 # There is some danger to doing this because reads will seek the
770 # There is some danger to doing this because reads will seek the
768 # file. However, _writeentry() performs a SEEK_END before all writes,
771 # file. However, _writeentry() performs a SEEK_END before all writes,
769 # so we should be safe.
772 # so we should be safe.
770 elif self._writinghandles:
773 elif self._writinghandles:
771 if self._inline:
774 if self._inline:
772 yield self._writinghandles[0]
775 yield self._writinghandles[0]
773 else:
776 else:
774 yield self._writinghandles[1]
777 yield self._writinghandles[1]
775
778
776 # Otherwise open a new file handle.
779 # Otherwise open a new file handle.
777 else:
780 else:
778 if self._inline:
781 if self._inline:
779 func = self._indexfp
782 func = self._indexfp
780 else:
783 else:
781 func = self._datafp
784 func = self._datafp
782 with func() as fp:
785 with func() as fp:
783 yield fp
786 yield fp
784
787
785 def tiprev(self):
788 def tiprev(self):
786 return len(self.index) - 1
789 return len(self.index) - 1
787
790
788 def tip(self):
791 def tip(self):
789 return self.node(self.tiprev())
792 return self.node(self.tiprev())
790
793
791 def __contains__(self, rev):
794 def __contains__(self, rev):
792 return 0 <= rev < len(self)
795 return 0 <= rev < len(self)
793
796
794 def __len__(self):
797 def __len__(self):
795 return len(self.index)
798 return len(self.index)
796
799
797 def __iter__(self):
800 def __iter__(self):
798 return iter(pycompat.xrange(len(self)))
801 return iter(pycompat.xrange(len(self)))
799
802
800 def revs(self, start=0, stop=None):
803 def revs(self, start=0, stop=None):
801 """iterate over all rev in this revlog (from start to stop)"""
804 """iterate over all rev in this revlog (from start to stop)"""
802 return storageutil.iterrevs(len(self), start=start, stop=stop)
805 return storageutil.iterrevs(len(self), start=start, stop=stop)
803
806
804 @property
807 @property
805 def nodemap(self):
808 def nodemap(self):
806 msg = (
809 msg = (
807 b"revlog.nodemap is deprecated, "
810 b"revlog.nodemap is deprecated, "
808 b"use revlog.index.[has_node|rev|get_rev]"
811 b"use revlog.index.[has_node|rev|get_rev]"
809 )
812 )
810 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
813 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
811 return self.index.nodemap
814 return self.index.nodemap
812
815
813 @property
816 @property
814 def _nodecache(self):
817 def _nodecache(self):
815 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
818 msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap"
816 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
819 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
817 return self.index.nodemap
820 return self.index.nodemap
818
821
819 def hasnode(self, node):
822 def hasnode(self, node):
820 try:
823 try:
821 self.rev(node)
824 self.rev(node)
822 return True
825 return True
823 except KeyError:
826 except KeyError:
824 return False
827 return False
825
828
826 def candelta(self, baserev, rev):
829 def candelta(self, baserev, rev):
827 """whether two revisions (baserev, rev) can be delta-ed or not"""
830 """whether two revisions (baserev, rev) can be delta-ed or not"""
828 # Disable delta if either rev requires a content-changing flag
831 # Disable delta if either rev requires a content-changing flag
829 # processor (ex. LFS). This is because such flag processor can alter
832 # processor (ex. LFS). This is because such flag processor can alter
830 # the rawtext content that the delta will be based on, and two clients
833 # the rawtext content that the delta will be based on, and two clients
831 # could have a same revlog node with different flags (i.e. different
834 # could have a same revlog node with different flags (i.e. different
832 # rawtext contents) and the delta could be incompatible.
835 # rawtext contents) and the delta could be incompatible.
833 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
836 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
834 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
837 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
835 ):
838 ):
836 return False
839 return False
837 return True
840 return True
838
841
839 def update_caches(self, transaction):
842 def update_caches(self, transaction):
840 if self._nodemap_file is not None:
843 if self._nodemap_file is not None:
841 if transaction is None:
844 if transaction is None:
842 nodemaputil.update_persistent_nodemap(self)
845 nodemaputil.update_persistent_nodemap(self)
843 else:
846 else:
844 nodemaputil.setup_persistent_nodemap(transaction, self)
847 nodemaputil.setup_persistent_nodemap(transaction, self)
845
848
846 def clearcaches(self):
849 def clearcaches(self):
847 self._revisioncache = None
850 self._revisioncache = None
848 self._chainbasecache.clear()
851 self._chainbasecache.clear()
849 self._chunkcache = (0, b'')
852 self._chunkcache = (0, b'')
850 self._pcache = {}
853 self._pcache = {}
851 self._nodemap_docket = None
854 self._nodemap_docket = None
852 self.index.clearcaches()
855 self.index.clearcaches()
853 # The python code is the one responsible for validating the docket, we
856 # The python code is the one responsible for validating the docket, we
854 # end up having to refresh it here.
857 # end up having to refresh it here.
855 use_nodemap = (
858 use_nodemap = (
856 not self._inline
859 not self._inline
857 and self._nodemap_file is not None
860 and self._nodemap_file is not None
858 and util.safehasattr(self.index, 'update_nodemap_data')
861 and util.safehasattr(self.index, 'update_nodemap_data')
859 )
862 )
860 if use_nodemap:
863 if use_nodemap:
861 nodemap_data = nodemaputil.persisted_data(self)
864 nodemap_data = nodemaputil.persisted_data(self)
862 if nodemap_data is not None:
865 if nodemap_data is not None:
863 self._nodemap_docket = nodemap_data[0]
866 self._nodemap_docket = nodemap_data[0]
864 self.index.update_nodemap_data(*nodemap_data)
867 self.index.update_nodemap_data(*nodemap_data)
865
868
866 def rev(self, node):
869 def rev(self, node):
867 try:
870 try:
868 return self.index.rev(node)
871 return self.index.rev(node)
869 except TypeError:
872 except TypeError:
870 raise
873 raise
871 except error.RevlogError:
874 except error.RevlogError:
872 # parsers.c radix tree lookup failed
875 # parsers.c radix tree lookup failed
873 if (
876 if (
874 node == self.nodeconstants.wdirid
877 node == self.nodeconstants.wdirid
875 or node in self.nodeconstants.wdirfilenodeids
878 or node in self.nodeconstants.wdirfilenodeids
876 ):
879 ):
877 raise error.WdirUnsupported
880 raise error.WdirUnsupported
878 raise error.LookupError(node, self.display_id, _(b'no node'))
881 raise error.LookupError(node, self.display_id, _(b'no node'))
879
882
880 # Accessors for index entries.
883 # Accessors for index entries.
881
884
882 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
885 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
883 # are flags.
886 # are flags.
884 def start(self, rev):
887 def start(self, rev):
885 return int(self.index[rev][0] >> 16)
888 return int(self.index[rev][0] >> 16)
886
889
887 def flags(self, rev):
890 def flags(self, rev):
888 return self.index[rev][0] & 0xFFFF
891 return self.index[rev][0] & 0xFFFF
889
892
890 def length(self, rev):
893 def length(self, rev):
891 return self.index[rev][1]
894 return self.index[rev][1]
892
895
893 def sidedata_length(self, rev):
896 def sidedata_length(self, rev):
894 if not self.hassidedata:
897 if not self.hassidedata:
895 return 0
898 return 0
896 return self.index[rev][9]
899 return self.index[rev][9]
897
900
898 def rawsize(self, rev):
901 def rawsize(self, rev):
899 """return the length of the uncompressed text for a given revision"""
902 """return the length of the uncompressed text for a given revision"""
900 l = self.index[rev][2]
903 l = self.index[rev][2]
901 if l >= 0:
904 if l >= 0:
902 return l
905 return l
903
906
904 t = self.rawdata(rev)
907 t = self.rawdata(rev)
905 return len(t)
908 return len(t)
906
909
907 def size(self, rev):
910 def size(self, rev):
908 """length of non-raw text (processed by a "read" flag processor)"""
911 """length of non-raw text (processed by a "read" flag processor)"""
909 # fast path: if no "read" flag processor could change the content,
912 # fast path: if no "read" flag processor could change the content,
910 # size is rawsize. note: ELLIPSIS is known to not change the content.
913 # size is rawsize. note: ELLIPSIS is known to not change the content.
911 flags = self.flags(rev)
914 flags = self.flags(rev)
912 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
915 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
913 return self.rawsize(rev)
916 return self.rawsize(rev)
914
917
915 return len(self.revision(rev, raw=False))
918 return len(self.revision(rev, raw=False))
916
919
917 def chainbase(self, rev):
920 def chainbase(self, rev):
918 base = self._chainbasecache.get(rev)
921 base = self._chainbasecache.get(rev)
919 if base is not None:
922 if base is not None:
920 return base
923 return base
921
924
922 index = self.index
925 index = self.index
923 iterrev = rev
926 iterrev = rev
924 base = index[iterrev][3]
927 base = index[iterrev][3]
925 while base != iterrev:
928 while base != iterrev:
926 iterrev = base
929 iterrev = base
927 base = index[iterrev][3]
930 base = index[iterrev][3]
928
931
929 self._chainbasecache[rev] = base
932 self._chainbasecache[rev] = base
930 return base
933 return base
931
934
932 def linkrev(self, rev):
935 def linkrev(self, rev):
933 return self.index[rev][4]
936 return self.index[rev][4]
934
937
935 def parentrevs(self, rev):
938 def parentrevs(self, rev):
936 try:
939 try:
937 entry = self.index[rev]
940 entry = self.index[rev]
938 except IndexError:
941 except IndexError:
939 if rev == wdirrev:
942 if rev == wdirrev:
940 raise error.WdirUnsupported
943 raise error.WdirUnsupported
941 raise
944 raise
942 if entry[5] == nullrev:
945 if entry[5] == nullrev:
943 return entry[6], entry[5]
946 return entry[6], entry[5]
944 else:
947 else:
945 return entry[5], entry[6]
948 return entry[5], entry[6]
946
949
947 # fast parentrevs(rev) where rev isn't filtered
950 # fast parentrevs(rev) where rev isn't filtered
948 _uncheckedparentrevs = parentrevs
951 _uncheckedparentrevs = parentrevs
949
952
950 def node(self, rev):
953 def node(self, rev):
951 try:
954 try:
952 return self.index[rev][7]
955 return self.index[rev][7]
953 except IndexError:
956 except IndexError:
954 if rev == wdirrev:
957 if rev == wdirrev:
955 raise error.WdirUnsupported
958 raise error.WdirUnsupported
956 raise
959 raise
957
960
958 # Derived from index values.
961 # Derived from index values.
959
962
960 def end(self, rev):
963 def end(self, rev):
961 return self.start(rev) + self.length(rev)
964 return self.start(rev) + self.length(rev)
962
965
963 def parents(self, node):
966 def parents(self, node):
964 i = self.index
967 i = self.index
965 d = i[self.rev(node)]
968 d = i[self.rev(node)]
966 # inline node() to avoid function call overhead
969 # inline node() to avoid function call overhead
967 if d[5] == self.nullid:
970 if d[5] == self.nullid:
968 return i[d[6]][7], i[d[5]][7]
971 return i[d[6]][7], i[d[5]][7]
969 else:
972 else:
970 return i[d[5]][7], i[d[6]][7]
973 return i[d[5]][7], i[d[6]][7]
971
974
972 def chainlen(self, rev):
975 def chainlen(self, rev):
973 return self._chaininfo(rev)[0]
976 return self._chaininfo(rev)[0]
974
977
975 def _chaininfo(self, rev):
978 def _chaininfo(self, rev):
976 chaininfocache = self._chaininfocache
979 chaininfocache = self._chaininfocache
977 if rev in chaininfocache:
980 if rev in chaininfocache:
978 return chaininfocache[rev]
981 return chaininfocache[rev]
979 index = self.index
982 index = self.index
980 generaldelta = self._generaldelta
983 generaldelta = self._generaldelta
981 iterrev = rev
984 iterrev = rev
982 e = index[iterrev]
985 e = index[iterrev]
983 clen = 0
986 clen = 0
984 compresseddeltalen = 0
987 compresseddeltalen = 0
985 while iterrev != e[3]:
988 while iterrev != e[3]:
986 clen += 1
989 clen += 1
987 compresseddeltalen += e[1]
990 compresseddeltalen += e[1]
988 if generaldelta:
991 if generaldelta:
989 iterrev = e[3]
992 iterrev = e[3]
990 else:
993 else:
991 iterrev -= 1
994 iterrev -= 1
992 if iterrev in chaininfocache:
995 if iterrev in chaininfocache:
993 t = chaininfocache[iterrev]
996 t = chaininfocache[iterrev]
994 clen += t[0]
997 clen += t[0]
995 compresseddeltalen += t[1]
998 compresseddeltalen += t[1]
996 break
999 break
997 e = index[iterrev]
1000 e = index[iterrev]
998 else:
1001 else:
999 # Add text length of base since decompressing that also takes
1002 # Add text length of base since decompressing that also takes
1000 # work. For cache hits the length is already included.
1003 # work. For cache hits the length is already included.
1001 compresseddeltalen += e[1]
1004 compresseddeltalen += e[1]
1002 r = (clen, compresseddeltalen)
1005 r = (clen, compresseddeltalen)
1003 chaininfocache[rev] = r
1006 chaininfocache[rev] = r
1004 return r
1007 return r
1005
1008
1006 def _deltachain(self, rev, stoprev=None):
1009 def _deltachain(self, rev, stoprev=None):
1007 """Obtain the delta chain for a revision.
1010 """Obtain the delta chain for a revision.
1008
1011
1009 ``stoprev`` specifies a revision to stop at. If not specified, we
1012 ``stoprev`` specifies a revision to stop at. If not specified, we
1010 stop at the base of the chain.
1013 stop at the base of the chain.
1011
1014
1012 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
1015 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
1013 revs in ascending order and ``stopped`` is a bool indicating whether
1016 revs in ascending order and ``stopped`` is a bool indicating whether
1014 ``stoprev`` was hit.
1017 ``stoprev`` was hit.
1015 """
1018 """
1016 # Try C implementation.
1019 # Try C implementation.
1017 try:
1020 try:
1018 return self.index.deltachain(rev, stoprev, self._generaldelta)
1021 return self.index.deltachain(rev, stoprev, self._generaldelta)
1019 except AttributeError:
1022 except AttributeError:
1020 pass
1023 pass
1021
1024
1022 chain = []
1025 chain = []
1023
1026
1024 # Alias to prevent attribute lookup in tight loop.
1027 # Alias to prevent attribute lookup in tight loop.
1025 index = self.index
1028 index = self.index
1026 generaldelta = self._generaldelta
1029 generaldelta = self._generaldelta
1027
1030
1028 iterrev = rev
1031 iterrev = rev
1029 e = index[iterrev]
1032 e = index[iterrev]
1030 while iterrev != e[3] and iterrev != stoprev:
1033 while iterrev != e[3] and iterrev != stoprev:
1031 chain.append(iterrev)
1034 chain.append(iterrev)
1032 if generaldelta:
1035 if generaldelta:
1033 iterrev = e[3]
1036 iterrev = e[3]
1034 else:
1037 else:
1035 iterrev -= 1
1038 iterrev -= 1
1036 e = index[iterrev]
1039 e = index[iterrev]
1037
1040
1038 if iterrev == stoprev:
1041 if iterrev == stoprev:
1039 stopped = True
1042 stopped = True
1040 else:
1043 else:
1041 chain.append(iterrev)
1044 chain.append(iterrev)
1042 stopped = False
1045 stopped = False
1043
1046
1044 chain.reverse()
1047 chain.reverse()
1045 return chain, stopped
1048 return chain, stopped
1046
1049
1047 def ancestors(self, revs, stoprev=0, inclusive=False):
1050 def ancestors(self, revs, stoprev=0, inclusive=False):
1048 """Generate the ancestors of 'revs' in reverse revision order.
1051 """Generate the ancestors of 'revs' in reverse revision order.
1049 Does not generate revs lower than stoprev.
1052 Does not generate revs lower than stoprev.
1050
1053
1051 See the documentation for ancestor.lazyancestors for more details."""
1054 See the documentation for ancestor.lazyancestors for more details."""
1052
1055
1053 # first, make sure start revisions aren't filtered
1056 # first, make sure start revisions aren't filtered
1054 revs = list(revs)
1057 revs = list(revs)
1055 checkrev = self.node
1058 checkrev = self.node
1056 for r in revs:
1059 for r in revs:
1057 checkrev(r)
1060 checkrev(r)
1058 # and we're sure ancestors aren't filtered as well
1061 # and we're sure ancestors aren't filtered as well
1059
1062
1060 if rustancestor is not None:
1063 if rustancestor is not None:
1061 lazyancestors = rustancestor.LazyAncestors
1064 lazyancestors = rustancestor.LazyAncestors
1062 arg = self.index
1065 arg = self.index
1063 else:
1066 else:
1064 lazyancestors = ancestor.lazyancestors
1067 lazyancestors = ancestor.lazyancestors
1065 arg = self._uncheckedparentrevs
1068 arg = self._uncheckedparentrevs
1066 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1069 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1067
1070
1068 def descendants(self, revs):
1071 def descendants(self, revs):
1069 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1072 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1070
1073
1071 def findcommonmissing(self, common=None, heads=None):
1074 def findcommonmissing(self, common=None, heads=None):
1072 """Return a tuple of the ancestors of common and the ancestors of heads
1075 """Return a tuple of the ancestors of common and the ancestors of heads
1073 that are not ancestors of common. In revset terminology, we return the
1076 that are not ancestors of common. In revset terminology, we return the
1074 tuple:
1077 tuple:
1075
1078
1076 ::common, (::heads) - (::common)
1079 ::common, (::heads) - (::common)
1077
1080
1078 The list is sorted by revision number, meaning it is
1081 The list is sorted by revision number, meaning it is
1079 topologically sorted.
1082 topologically sorted.
1080
1083
1081 'heads' and 'common' are both lists of node IDs. If heads is
1084 'heads' and 'common' are both lists of node IDs. If heads is
1082 not supplied, uses all of the revlog's heads. If common is not
1085 not supplied, uses all of the revlog's heads. If common is not
1083 supplied, uses nullid."""
1086 supplied, uses nullid."""
1084 if common is None:
1087 if common is None:
1085 common = [self.nullid]
1088 common = [self.nullid]
1086 if heads is None:
1089 if heads is None:
1087 heads = self.heads()
1090 heads = self.heads()
1088
1091
1089 common = [self.rev(n) for n in common]
1092 common = [self.rev(n) for n in common]
1090 heads = [self.rev(n) for n in heads]
1093 heads = [self.rev(n) for n in heads]
1091
1094
1092 # we want the ancestors, but inclusive
1095 # we want the ancestors, but inclusive
1093 class lazyset(object):
1096 class lazyset(object):
1094 def __init__(self, lazyvalues):
1097 def __init__(self, lazyvalues):
1095 self.addedvalues = set()
1098 self.addedvalues = set()
1096 self.lazyvalues = lazyvalues
1099 self.lazyvalues = lazyvalues
1097
1100
1098 def __contains__(self, value):
1101 def __contains__(self, value):
1099 return value in self.addedvalues or value in self.lazyvalues
1102 return value in self.addedvalues or value in self.lazyvalues
1100
1103
1101 def __iter__(self):
1104 def __iter__(self):
1102 added = self.addedvalues
1105 added = self.addedvalues
1103 for r in added:
1106 for r in added:
1104 yield r
1107 yield r
1105 for r in self.lazyvalues:
1108 for r in self.lazyvalues:
1106 if not r in added:
1109 if not r in added:
1107 yield r
1110 yield r
1108
1111
1109 def add(self, value):
1112 def add(self, value):
1110 self.addedvalues.add(value)
1113 self.addedvalues.add(value)
1111
1114
1112 def update(self, values):
1115 def update(self, values):
1113 self.addedvalues.update(values)
1116 self.addedvalues.update(values)
1114
1117
1115 has = lazyset(self.ancestors(common))
1118 has = lazyset(self.ancestors(common))
1116 has.add(nullrev)
1119 has.add(nullrev)
1117 has.update(common)
1120 has.update(common)
1118
1121
1119 # take all ancestors from heads that aren't in has
1122 # take all ancestors from heads that aren't in has
1120 missing = set()
1123 missing = set()
1121 visit = collections.deque(r for r in heads if r not in has)
1124 visit = collections.deque(r for r in heads if r not in has)
1122 while visit:
1125 while visit:
1123 r = visit.popleft()
1126 r = visit.popleft()
1124 if r in missing:
1127 if r in missing:
1125 continue
1128 continue
1126 else:
1129 else:
1127 missing.add(r)
1130 missing.add(r)
1128 for p in self.parentrevs(r):
1131 for p in self.parentrevs(r):
1129 if p not in has:
1132 if p not in has:
1130 visit.append(p)
1133 visit.append(p)
1131 missing = list(missing)
1134 missing = list(missing)
1132 missing.sort()
1135 missing.sort()
1133 return has, [self.node(miss) for miss in missing]
1136 return has, [self.node(miss) for miss in missing]
1134
1137
1135 def incrementalmissingrevs(self, common=None):
1138 def incrementalmissingrevs(self, common=None):
1136 """Return an object that can be used to incrementally compute the
1139 """Return an object that can be used to incrementally compute the
1137 revision numbers of the ancestors of arbitrary sets that are not
1140 revision numbers of the ancestors of arbitrary sets that are not
1138 ancestors of common. This is an ancestor.incrementalmissingancestors
1141 ancestors of common. This is an ancestor.incrementalmissingancestors
1139 object.
1142 object.
1140
1143
1141 'common' is a list of revision numbers. If common is not supplied, uses
1144 'common' is a list of revision numbers. If common is not supplied, uses
1142 nullrev.
1145 nullrev.
1143 """
1146 """
1144 if common is None:
1147 if common is None:
1145 common = [nullrev]
1148 common = [nullrev]
1146
1149
1147 if rustancestor is not None:
1150 if rustancestor is not None:
1148 return rustancestor.MissingAncestors(self.index, common)
1151 return rustancestor.MissingAncestors(self.index, common)
1149 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1152 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1150
1153
1151 def findmissingrevs(self, common=None, heads=None):
1154 def findmissingrevs(self, common=None, heads=None):
1152 """Return the revision numbers of the ancestors of heads that
1155 """Return the revision numbers of the ancestors of heads that
1153 are not ancestors of common.
1156 are not ancestors of common.
1154
1157
1155 More specifically, return a list of revision numbers corresponding to
1158 More specifically, return a list of revision numbers corresponding to
1156 nodes N such that every N satisfies the following constraints:
1159 nodes N such that every N satisfies the following constraints:
1157
1160
1158 1. N is an ancestor of some node in 'heads'
1161 1. N is an ancestor of some node in 'heads'
1159 2. N is not an ancestor of any node in 'common'
1162 2. N is not an ancestor of any node in 'common'
1160
1163
1161 The list is sorted by revision number, meaning it is
1164 The list is sorted by revision number, meaning it is
1162 topologically sorted.
1165 topologically sorted.
1163
1166
1164 'heads' and 'common' are both lists of revision numbers. If heads is
1167 'heads' and 'common' are both lists of revision numbers. If heads is
1165 not supplied, uses all of the revlog's heads. If common is not
1168 not supplied, uses all of the revlog's heads. If common is not
1166 supplied, uses nullid."""
1169 supplied, uses nullid."""
1167 if common is None:
1170 if common is None:
1168 common = [nullrev]
1171 common = [nullrev]
1169 if heads is None:
1172 if heads is None:
1170 heads = self.headrevs()
1173 heads = self.headrevs()
1171
1174
1172 inc = self.incrementalmissingrevs(common=common)
1175 inc = self.incrementalmissingrevs(common=common)
1173 return inc.missingancestors(heads)
1176 return inc.missingancestors(heads)
1174
1177
1175 def findmissing(self, common=None, heads=None):
1178 def findmissing(self, common=None, heads=None):
1176 """Return the ancestors of heads that are not ancestors of common.
1179 """Return the ancestors of heads that are not ancestors of common.
1177
1180
1178 More specifically, return a list of nodes N such that every N
1181 More specifically, return a list of nodes N such that every N
1179 satisfies the following constraints:
1182 satisfies the following constraints:
1180
1183
1181 1. N is an ancestor of some node in 'heads'
1184 1. N is an ancestor of some node in 'heads'
1182 2. N is not an ancestor of any node in 'common'
1185 2. N is not an ancestor of any node in 'common'
1183
1186
1184 The list is sorted by revision number, meaning it is
1187 The list is sorted by revision number, meaning it is
1185 topologically sorted.
1188 topologically sorted.
1186
1189
1187 'heads' and 'common' are both lists of node IDs. If heads is
1190 'heads' and 'common' are both lists of node IDs. If heads is
1188 not supplied, uses all of the revlog's heads. If common is not
1191 not supplied, uses all of the revlog's heads. If common is not
1189 supplied, uses nullid."""
1192 supplied, uses nullid."""
1190 if common is None:
1193 if common is None:
1191 common = [self.nullid]
1194 common = [self.nullid]
1192 if heads is None:
1195 if heads is None:
1193 heads = self.heads()
1196 heads = self.heads()
1194
1197
1195 common = [self.rev(n) for n in common]
1198 common = [self.rev(n) for n in common]
1196 heads = [self.rev(n) for n in heads]
1199 heads = [self.rev(n) for n in heads]
1197
1200
1198 inc = self.incrementalmissingrevs(common=common)
1201 inc = self.incrementalmissingrevs(common=common)
1199 return [self.node(r) for r in inc.missingancestors(heads)]
1202 return [self.node(r) for r in inc.missingancestors(heads)]
1200
1203
1201 def nodesbetween(self, roots=None, heads=None):
1204 def nodesbetween(self, roots=None, heads=None):
1202 """Return a topological path from 'roots' to 'heads'.
1205 """Return a topological path from 'roots' to 'heads'.
1203
1206
1204 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1207 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1205 topologically sorted list of all nodes N that satisfy both of
1208 topologically sorted list of all nodes N that satisfy both of
1206 these constraints:
1209 these constraints:
1207
1210
1208 1. N is a descendant of some node in 'roots'
1211 1. N is a descendant of some node in 'roots'
1209 2. N is an ancestor of some node in 'heads'
1212 2. N is an ancestor of some node in 'heads'
1210
1213
1211 Every node is considered to be both a descendant and an ancestor
1214 Every node is considered to be both a descendant and an ancestor
1212 of itself, so every reachable node in 'roots' and 'heads' will be
1215 of itself, so every reachable node in 'roots' and 'heads' will be
1213 included in 'nodes'.
1216 included in 'nodes'.
1214
1217
1215 'outroots' is the list of reachable nodes in 'roots', i.e., the
1218 'outroots' is the list of reachable nodes in 'roots', i.e., the
1216 subset of 'roots' that is returned in 'nodes'. Likewise,
1219 subset of 'roots' that is returned in 'nodes'. Likewise,
1217 'outheads' is the subset of 'heads' that is also in 'nodes'.
1220 'outheads' is the subset of 'heads' that is also in 'nodes'.
1218
1221
1219 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1222 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1220 unspecified, uses nullid as the only root. If 'heads' is
1223 unspecified, uses nullid as the only root. If 'heads' is
1221 unspecified, uses list of all of the revlog's heads."""
1224 unspecified, uses list of all of the revlog's heads."""
1222 nonodes = ([], [], [])
1225 nonodes = ([], [], [])
1223 if roots is not None:
1226 if roots is not None:
1224 roots = list(roots)
1227 roots = list(roots)
1225 if not roots:
1228 if not roots:
1226 return nonodes
1229 return nonodes
1227 lowestrev = min([self.rev(n) for n in roots])
1230 lowestrev = min([self.rev(n) for n in roots])
1228 else:
1231 else:
1229 roots = [self.nullid] # Everybody's a descendant of nullid
1232 roots = [self.nullid] # Everybody's a descendant of nullid
1230 lowestrev = nullrev
1233 lowestrev = nullrev
1231 if (lowestrev == nullrev) and (heads is None):
1234 if (lowestrev == nullrev) and (heads is None):
1232 # We want _all_ the nodes!
1235 # We want _all_ the nodes!
1233 return (
1236 return (
1234 [self.node(r) for r in self],
1237 [self.node(r) for r in self],
1235 [self.nullid],
1238 [self.nullid],
1236 list(self.heads()),
1239 list(self.heads()),
1237 )
1240 )
1238 if heads is None:
1241 if heads is None:
1239 # All nodes are ancestors, so the latest ancestor is the last
1242 # All nodes are ancestors, so the latest ancestor is the last
1240 # node.
1243 # node.
1241 highestrev = len(self) - 1
1244 highestrev = len(self) - 1
1242 # Set ancestors to None to signal that every node is an ancestor.
1245 # Set ancestors to None to signal that every node is an ancestor.
1243 ancestors = None
1246 ancestors = None
1244 # Set heads to an empty dictionary for later discovery of heads
1247 # Set heads to an empty dictionary for later discovery of heads
1245 heads = {}
1248 heads = {}
1246 else:
1249 else:
1247 heads = list(heads)
1250 heads = list(heads)
1248 if not heads:
1251 if not heads:
1249 return nonodes
1252 return nonodes
1250 ancestors = set()
1253 ancestors = set()
1251 # Turn heads into a dictionary so we can remove 'fake' heads.
1254 # Turn heads into a dictionary so we can remove 'fake' heads.
1252 # Also, later we will be using it to filter out the heads we can't
1255 # Also, later we will be using it to filter out the heads we can't
1253 # find from roots.
1256 # find from roots.
1254 heads = dict.fromkeys(heads, False)
1257 heads = dict.fromkeys(heads, False)
1255 # Start at the top and keep marking parents until we're done.
1258 # Start at the top and keep marking parents until we're done.
1256 nodestotag = set(heads)
1259 nodestotag = set(heads)
1257 # Remember where the top was so we can use it as a limit later.
1260 # Remember where the top was so we can use it as a limit later.
1258 highestrev = max([self.rev(n) for n in nodestotag])
1261 highestrev = max([self.rev(n) for n in nodestotag])
1259 while nodestotag:
1262 while nodestotag:
1260 # grab a node to tag
1263 # grab a node to tag
1261 n = nodestotag.pop()
1264 n = nodestotag.pop()
1262 # Never tag nullid
1265 # Never tag nullid
1263 if n == self.nullid:
1266 if n == self.nullid:
1264 continue
1267 continue
1265 # A node's revision number represents its place in a
1268 # A node's revision number represents its place in a
1266 # topologically sorted list of nodes.
1269 # topologically sorted list of nodes.
1267 r = self.rev(n)
1270 r = self.rev(n)
1268 if r >= lowestrev:
1271 if r >= lowestrev:
1269 if n not in ancestors:
1272 if n not in ancestors:
1270 # If we are possibly a descendant of one of the roots
1273 # If we are possibly a descendant of one of the roots
1271 # and we haven't already been marked as an ancestor
1274 # and we haven't already been marked as an ancestor
1272 ancestors.add(n) # Mark as ancestor
1275 ancestors.add(n) # Mark as ancestor
1273 # Add non-nullid parents to list of nodes to tag.
1276 # Add non-nullid parents to list of nodes to tag.
1274 nodestotag.update(
1277 nodestotag.update(
1275 [p for p in self.parents(n) if p != self.nullid]
1278 [p for p in self.parents(n) if p != self.nullid]
1276 )
1279 )
1277 elif n in heads: # We've seen it before, is it a fake head?
1280 elif n in heads: # We've seen it before, is it a fake head?
1278 # So it is, real heads should not be the ancestors of
1281 # So it is, real heads should not be the ancestors of
1279 # any other heads.
1282 # any other heads.
1280 heads.pop(n)
1283 heads.pop(n)
1281 if not ancestors:
1284 if not ancestors:
1282 return nonodes
1285 return nonodes
1283 # Now that we have our set of ancestors, we want to remove any
1286 # Now that we have our set of ancestors, we want to remove any
1284 # roots that are not ancestors.
1287 # roots that are not ancestors.
1285
1288
1286 # If one of the roots was nullid, everything is included anyway.
1289 # If one of the roots was nullid, everything is included anyway.
1287 if lowestrev > nullrev:
1290 if lowestrev > nullrev:
1288 # But, since we weren't, let's recompute the lowest rev to not
1291 # But, since we weren't, let's recompute the lowest rev to not
1289 # include roots that aren't ancestors.
1292 # include roots that aren't ancestors.
1290
1293
1291 # Filter out roots that aren't ancestors of heads
1294 # Filter out roots that aren't ancestors of heads
1292 roots = [root for root in roots if root in ancestors]
1295 roots = [root for root in roots if root in ancestors]
1293 # Recompute the lowest revision
1296 # Recompute the lowest revision
1294 if roots:
1297 if roots:
1295 lowestrev = min([self.rev(root) for root in roots])
1298 lowestrev = min([self.rev(root) for root in roots])
1296 else:
1299 else:
1297 # No more roots? Return empty list
1300 # No more roots? Return empty list
1298 return nonodes
1301 return nonodes
1299 else:
1302 else:
1300 # We are descending from nullid, and don't need to care about
1303 # We are descending from nullid, and don't need to care about
1301 # any other roots.
1304 # any other roots.
1302 lowestrev = nullrev
1305 lowestrev = nullrev
1303 roots = [self.nullid]
1306 roots = [self.nullid]
1304 # Transform our roots list into a set.
1307 # Transform our roots list into a set.
1305 descendants = set(roots)
1308 descendants = set(roots)
1306 # Also, keep the original roots so we can filter out roots that aren't
1309 # Also, keep the original roots so we can filter out roots that aren't
1307 # 'real' roots (i.e. are descended from other roots).
1310 # 'real' roots (i.e. are descended from other roots).
1308 roots = descendants.copy()
1311 roots = descendants.copy()
1309 # Our topologically sorted list of output nodes.
1312 # Our topologically sorted list of output nodes.
1310 orderedout = []
1313 orderedout = []
1311 # Don't start at nullid since we don't want nullid in our output list,
1314 # Don't start at nullid since we don't want nullid in our output list,
1312 # and if nullid shows up in descendants, empty parents will look like
1315 # and if nullid shows up in descendants, empty parents will look like
1313 # they're descendants.
1316 # they're descendants.
1314 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1317 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1315 n = self.node(r)
1318 n = self.node(r)
1316 isdescendant = False
1319 isdescendant = False
1317 if lowestrev == nullrev: # Everybody is a descendant of nullid
1320 if lowestrev == nullrev: # Everybody is a descendant of nullid
1318 isdescendant = True
1321 isdescendant = True
1319 elif n in descendants:
1322 elif n in descendants:
1320 # n is already a descendant
1323 # n is already a descendant
1321 isdescendant = True
1324 isdescendant = True
1322 # This check only needs to be done here because all the roots
1325 # This check only needs to be done here because all the roots
1323 # will start being marked is descendants before the loop.
1326 # will start being marked is descendants before the loop.
1324 if n in roots:
1327 if n in roots:
1325 # If n was a root, check if it's a 'real' root.
1328 # If n was a root, check if it's a 'real' root.
1326 p = tuple(self.parents(n))
1329 p = tuple(self.parents(n))
1327 # If any of its parents are descendants, it's not a root.
1330 # If any of its parents are descendants, it's not a root.
1328 if (p[0] in descendants) or (p[1] in descendants):
1331 if (p[0] in descendants) or (p[1] in descendants):
1329 roots.remove(n)
1332 roots.remove(n)
1330 else:
1333 else:
1331 p = tuple(self.parents(n))
1334 p = tuple(self.parents(n))
1332 # A node is a descendant if either of its parents are
1335 # A node is a descendant if either of its parents are
1333 # descendants. (We seeded the dependents list with the roots
1336 # descendants. (We seeded the dependents list with the roots
1334 # up there, remember?)
1337 # up there, remember?)
1335 if (p[0] in descendants) or (p[1] in descendants):
1338 if (p[0] in descendants) or (p[1] in descendants):
1336 descendants.add(n)
1339 descendants.add(n)
1337 isdescendant = True
1340 isdescendant = True
1338 if isdescendant and ((ancestors is None) or (n in ancestors)):
1341 if isdescendant and ((ancestors is None) or (n in ancestors)):
1339 # Only include nodes that are both descendants and ancestors.
1342 # Only include nodes that are both descendants and ancestors.
1340 orderedout.append(n)
1343 orderedout.append(n)
1341 if (ancestors is not None) and (n in heads):
1344 if (ancestors is not None) and (n in heads):
1342 # We're trying to figure out which heads are reachable
1345 # We're trying to figure out which heads are reachable
1343 # from roots.
1346 # from roots.
1344 # Mark this head as having been reached
1347 # Mark this head as having been reached
1345 heads[n] = True
1348 heads[n] = True
1346 elif ancestors is None:
1349 elif ancestors is None:
1347 # Otherwise, we're trying to discover the heads.
1350 # Otherwise, we're trying to discover the heads.
1348 # Assume this is a head because if it isn't, the next step
1351 # Assume this is a head because if it isn't, the next step
1349 # will eventually remove it.
1352 # will eventually remove it.
1350 heads[n] = True
1353 heads[n] = True
1351 # But, obviously its parents aren't.
1354 # But, obviously its parents aren't.
1352 for p in self.parents(n):
1355 for p in self.parents(n):
1353 heads.pop(p, None)
1356 heads.pop(p, None)
1354 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1357 heads = [head for head, flag in pycompat.iteritems(heads) if flag]
1355 roots = list(roots)
1358 roots = list(roots)
1356 assert orderedout
1359 assert orderedout
1357 assert roots
1360 assert roots
1358 assert heads
1361 assert heads
1359 return (orderedout, roots, heads)
1362 return (orderedout, roots, heads)
1360
1363
1361 def headrevs(self, revs=None):
1364 def headrevs(self, revs=None):
1362 if revs is None:
1365 if revs is None:
1363 try:
1366 try:
1364 return self.index.headrevs()
1367 return self.index.headrevs()
1365 except AttributeError:
1368 except AttributeError:
1366 return self._headrevs()
1369 return self._headrevs()
1367 if rustdagop is not None:
1370 if rustdagop is not None:
1368 return rustdagop.headrevs(self.index, revs)
1371 return rustdagop.headrevs(self.index, revs)
1369 return dagop.headrevs(revs, self._uncheckedparentrevs)
1372 return dagop.headrevs(revs, self._uncheckedparentrevs)
1370
1373
1371 def computephases(self, roots):
1374 def computephases(self, roots):
1372 return self.index.computephasesmapsets(roots)
1375 return self.index.computephasesmapsets(roots)
1373
1376
1374 def _headrevs(self):
1377 def _headrevs(self):
1375 count = len(self)
1378 count = len(self)
1376 if not count:
1379 if not count:
1377 return [nullrev]
1380 return [nullrev]
1378 # we won't iter over filtered rev so nobody is a head at start
1381 # we won't iter over filtered rev so nobody is a head at start
1379 ishead = [0] * (count + 1)
1382 ishead = [0] * (count + 1)
1380 index = self.index
1383 index = self.index
1381 for r in self:
1384 for r in self:
1382 ishead[r] = 1 # I may be an head
1385 ishead[r] = 1 # I may be an head
1383 e = index[r]
1386 e = index[r]
1384 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1387 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1385 return [r for r, val in enumerate(ishead) if val]
1388 return [r for r, val in enumerate(ishead) if val]
1386
1389
1387 def heads(self, start=None, stop=None):
1390 def heads(self, start=None, stop=None):
1388 """return the list of all nodes that have no children
1391 """return the list of all nodes that have no children
1389
1392
1390 if start is specified, only heads that are descendants of
1393 if start is specified, only heads that are descendants of
1391 start will be returned
1394 start will be returned
1392 if stop is specified, it will consider all the revs from stop
1395 if stop is specified, it will consider all the revs from stop
1393 as if they had no children
1396 as if they had no children
1394 """
1397 """
1395 if start is None and stop is None:
1398 if start is None and stop is None:
1396 if not len(self):
1399 if not len(self):
1397 return [self.nullid]
1400 return [self.nullid]
1398 return [self.node(r) for r in self.headrevs()]
1401 return [self.node(r) for r in self.headrevs()]
1399
1402
1400 if start is None:
1403 if start is None:
1401 start = nullrev
1404 start = nullrev
1402 else:
1405 else:
1403 start = self.rev(start)
1406 start = self.rev(start)
1404
1407
1405 stoprevs = {self.rev(n) for n in stop or []}
1408 stoprevs = {self.rev(n) for n in stop or []}
1406
1409
1407 revs = dagop.headrevssubset(
1410 revs = dagop.headrevssubset(
1408 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1411 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1409 )
1412 )
1410
1413
1411 return [self.node(rev) for rev in revs]
1414 return [self.node(rev) for rev in revs]
1412
1415
1413 def children(self, node):
1416 def children(self, node):
1414 """find the children of a given node"""
1417 """find the children of a given node"""
1415 c = []
1418 c = []
1416 p = self.rev(node)
1419 p = self.rev(node)
1417 for r in self.revs(start=p + 1):
1420 for r in self.revs(start=p + 1):
1418 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1421 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1419 if prevs:
1422 if prevs:
1420 for pr in prevs:
1423 for pr in prevs:
1421 if pr == p:
1424 if pr == p:
1422 c.append(self.node(r))
1425 c.append(self.node(r))
1423 elif p == nullrev:
1426 elif p == nullrev:
1424 c.append(self.node(r))
1427 c.append(self.node(r))
1425 return c
1428 return c
1426
1429
1427 def commonancestorsheads(self, a, b):
1430 def commonancestorsheads(self, a, b):
1428 """calculate all the heads of the common ancestors of nodes a and b"""
1431 """calculate all the heads of the common ancestors of nodes a and b"""
1429 a, b = self.rev(a), self.rev(b)
1432 a, b = self.rev(a), self.rev(b)
1430 ancs = self._commonancestorsheads(a, b)
1433 ancs = self._commonancestorsheads(a, b)
1431 return pycompat.maplist(self.node, ancs)
1434 return pycompat.maplist(self.node, ancs)
1432
1435
1433 def _commonancestorsheads(self, *revs):
1436 def _commonancestorsheads(self, *revs):
1434 """calculate all the heads of the common ancestors of revs"""
1437 """calculate all the heads of the common ancestors of revs"""
1435 try:
1438 try:
1436 ancs = self.index.commonancestorsheads(*revs)
1439 ancs = self.index.commonancestorsheads(*revs)
1437 except (AttributeError, OverflowError): # C implementation failed
1440 except (AttributeError, OverflowError): # C implementation failed
1438 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1441 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1439 return ancs
1442 return ancs
1440
1443
1441 def isancestor(self, a, b):
1444 def isancestor(self, a, b):
1442 """return True if node a is an ancestor of node b
1445 """return True if node a is an ancestor of node b
1443
1446
1444 A revision is considered an ancestor of itself."""
1447 A revision is considered an ancestor of itself."""
1445 a, b = self.rev(a), self.rev(b)
1448 a, b = self.rev(a), self.rev(b)
1446 return self.isancestorrev(a, b)
1449 return self.isancestorrev(a, b)
1447
1450
1448 def isancestorrev(self, a, b):
1451 def isancestorrev(self, a, b):
1449 """return True if revision a is an ancestor of revision b
1452 """return True if revision a is an ancestor of revision b
1450
1453
1451 A revision is considered an ancestor of itself.
1454 A revision is considered an ancestor of itself.
1452
1455
1453 The implementation of this is trivial but the use of
1456 The implementation of this is trivial but the use of
1454 reachableroots is not."""
1457 reachableroots is not."""
1455 if a == nullrev:
1458 if a == nullrev:
1456 return True
1459 return True
1457 elif a == b:
1460 elif a == b:
1458 return True
1461 return True
1459 elif a > b:
1462 elif a > b:
1460 return False
1463 return False
1461 return bool(self.reachableroots(a, [b], [a], includepath=False))
1464 return bool(self.reachableroots(a, [b], [a], includepath=False))
1462
1465
1463 def reachableroots(self, minroot, heads, roots, includepath=False):
1466 def reachableroots(self, minroot, heads, roots, includepath=False):
1464 """return (heads(::(<roots> and <roots>::<heads>)))
1467 """return (heads(::(<roots> and <roots>::<heads>)))
1465
1468
1466 If includepath is True, return (<roots>::<heads>)."""
1469 If includepath is True, return (<roots>::<heads>)."""
1467 try:
1470 try:
1468 return self.index.reachableroots2(
1471 return self.index.reachableroots2(
1469 minroot, heads, roots, includepath
1472 minroot, heads, roots, includepath
1470 )
1473 )
1471 except AttributeError:
1474 except AttributeError:
1472 return dagop._reachablerootspure(
1475 return dagop._reachablerootspure(
1473 self.parentrevs, minroot, roots, heads, includepath
1476 self.parentrevs, minroot, roots, heads, includepath
1474 )
1477 )
1475
1478
1476 def ancestor(self, a, b):
1479 def ancestor(self, a, b):
1477 """calculate the "best" common ancestor of nodes a and b"""
1480 """calculate the "best" common ancestor of nodes a and b"""
1478
1481
1479 a, b = self.rev(a), self.rev(b)
1482 a, b = self.rev(a), self.rev(b)
1480 try:
1483 try:
1481 ancs = self.index.ancestors(a, b)
1484 ancs = self.index.ancestors(a, b)
1482 except (AttributeError, OverflowError):
1485 except (AttributeError, OverflowError):
1483 ancs = ancestor.ancestors(self.parentrevs, a, b)
1486 ancs = ancestor.ancestors(self.parentrevs, a, b)
1484 if ancs:
1487 if ancs:
1485 # choose a consistent winner when there's a tie
1488 # choose a consistent winner when there's a tie
1486 return min(map(self.node, ancs))
1489 return min(map(self.node, ancs))
1487 return self.nullid
1490 return self.nullid
1488
1491
1489 def _match(self, id):
1492 def _match(self, id):
1490 if isinstance(id, int):
1493 if isinstance(id, int):
1491 # rev
1494 # rev
1492 return self.node(id)
1495 return self.node(id)
1493 if len(id) == self.nodeconstants.nodelen:
1496 if len(id) == self.nodeconstants.nodelen:
1494 # possibly a binary node
1497 # possibly a binary node
1495 # odds of a binary node being all hex in ASCII are 1 in 10**25
1498 # odds of a binary node being all hex in ASCII are 1 in 10**25
1496 try:
1499 try:
1497 node = id
1500 node = id
1498 self.rev(node) # quick search the index
1501 self.rev(node) # quick search the index
1499 return node
1502 return node
1500 except error.LookupError:
1503 except error.LookupError:
1501 pass # may be partial hex id
1504 pass # may be partial hex id
1502 try:
1505 try:
1503 # str(rev)
1506 # str(rev)
1504 rev = int(id)
1507 rev = int(id)
1505 if b"%d" % rev != id:
1508 if b"%d" % rev != id:
1506 raise ValueError
1509 raise ValueError
1507 if rev < 0:
1510 if rev < 0:
1508 rev = len(self) + rev
1511 rev = len(self) + rev
1509 if rev < 0 or rev >= len(self):
1512 if rev < 0 or rev >= len(self):
1510 raise ValueError
1513 raise ValueError
1511 return self.node(rev)
1514 return self.node(rev)
1512 except (ValueError, OverflowError):
1515 except (ValueError, OverflowError):
1513 pass
1516 pass
1514 if len(id) == 2 * self.nodeconstants.nodelen:
1517 if len(id) == 2 * self.nodeconstants.nodelen:
1515 try:
1518 try:
1516 # a full hex nodeid?
1519 # a full hex nodeid?
1517 node = bin(id)
1520 node = bin(id)
1518 self.rev(node)
1521 self.rev(node)
1519 return node
1522 return node
1520 except (TypeError, error.LookupError):
1523 except (TypeError, error.LookupError):
1521 pass
1524 pass
1522
1525
1523 def _partialmatch(self, id):
1526 def _partialmatch(self, id):
1524 # we don't care wdirfilenodeids as they should be always full hash
1527 # we don't care wdirfilenodeids as they should be always full hash
1525 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1528 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1526 try:
1529 try:
1527 partial = self.index.partialmatch(id)
1530 partial = self.index.partialmatch(id)
1528 if partial and self.hasnode(partial):
1531 if partial and self.hasnode(partial):
1529 if maybewdir:
1532 if maybewdir:
1530 # single 'ff...' match in radix tree, ambiguous with wdir
1533 # single 'ff...' match in radix tree, ambiguous with wdir
1531 raise error.RevlogError
1534 raise error.RevlogError
1532 return partial
1535 return partial
1533 if maybewdir:
1536 if maybewdir:
1534 # no 'ff...' match in radix tree, wdir identified
1537 # no 'ff...' match in radix tree, wdir identified
1535 raise error.WdirUnsupported
1538 raise error.WdirUnsupported
1536 return None
1539 return None
1537 except error.RevlogError:
1540 except error.RevlogError:
1538 # parsers.c radix tree lookup gave multiple matches
1541 # parsers.c radix tree lookup gave multiple matches
1539 # fast path: for unfiltered changelog, radix tree is accurate
1542 # fast path: for unfiltered changelog, radix tree is accurate
1540 if not getattr(self, 'filteredrevs', None):
1543 if not getattr(self, 'filteredrevs', None):
1541 raise error.AmbiguousPrefixLookupError(
1544 raise error.AmbiguousPrefixLookupError(
1542 id, self.display_id, _(b'ambiguous identifier')
1545 id, self.display_id, _(b'ambiguous identifier')
1543 )
1546 )
1544 # fall through to slow path that filters hidden revisions
1547 # fall through to slow path that filters hidden revisions
1545 except (AttributeError, ValueError):
1548 except (AttributeError, ValueError):
1546 # we are pure python, or key was too short to search radix tree
1549 # we are pure python, or key was too short to search radix tree
1547 pass
1550 pass
1548
1551
1549 if id in self._pcache:
1552 if id in self._pcache:
1550 return self._pcache[id]
1553 return self._pcache[id]
1551
1554
1552 if len(id) <= 40:
1555 if len(id) <= 40:
1553 try:
1556 try:
1554 # hex(node)[:...]
1557 # hex(node)[:...]
1555 l = len(id) // 2 # grab an even number of digits
1558 l = len(id) // 2 # grab an even number of digits
1556 prefix = bin(id[: l * 2])
1559 prefix = bin(id[: l * 2])
1557 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1560 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1558 nl = [
1561 nl = [
1559 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1562 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1560 ]
1563 ]
1561 if self.nodeconstants.nullhex.startswith(id):
1564 if self.nodeconstants.nullhex.startswith(id):
1562 nl.append(self.nullid)
1565 nl.append(self.nullid)
1563 if len(nl) > 0:
1566 if len(nl) > 0:
1564 if len(nl) == 1 and not maybewdir:
1567 if len(nl) == 1 and not maybewdir:
1565 self._pcache[id] = nl[0]
1568 self._pcache[id] = nl[0]
1566 return nl[0]
1569 return nl[0]
1567 raise error.AmbiguousPrefixLookupError(
1570 raise error.AmbiguousPrefixLookupError(
1568 id, self.display_id, _(b'ambiguous identifier')
1571 id, self.display_id, _(b'ambiguous identifier')
1569 )
1572 )
1570 if maybewdir:
1573 if maybewdir:
1571 raise error.WdirUnsupported
1574 raise error.WdirUnsupported
1572 return None
1575 return None
1573 except TypeError:
1576 except TypeError:
1574 pass
1577 pass
1575
1578
1576 def lookup(self, id):
1579 def lookup(self, id):
1577 """locate a node based on:
1580 """locate a node based on:
1578 - revision number or str(revision number)
1581 - revision number or str(revision number)
1579 - nodeid or subset of hex nodeid
1582 - nodeid or subset of hex nodeid
1580 """
1583 """
1581 n = self._match(id)
1584 n = self._match(id)
1582 if n is not None:
1585 if n is not None:
1583 return n
1586 return n
1584 n = self._partialmatch(id)
1587 n = self._partialmatch(id)
1585 if n:
1588 if n:
1586 return n
1589 return n
1587
1590
1588 raise error.LookupError(id, self.display_id, _(b'no match found'))
1591 raise error.LookupError(id, self.display_id, _(b'no match found'))
1589
1592
1590 def shortest(self, node, minlength=1):
1593 def shortest(self, node, minlength=1):
1591 """Find the shortest unambiguous prefix that matches node."""
1594 """Find the shortest unambiguous prefix that matches node."""
1592
1595
1593 def isvalid(prefix):
1596 def isvalid(prefix):
1594 try:
1597 try:
1595 matchednode = self._partialmatch(prefix)
1598 matchednode = self._partialmatch(prefix)
1596 except error.AmbiguousPrefixLookupError:
1599 except error.AmbiguousPrefixLookupError:
1597 return False
1600 return False
1598 except error.WdirUnsupported:
1601 except error.WdirUnsupported:
1599 # single 'ff...' match
1602 # single 'ff...' match
1600 return True
1603 return True
1601 if matchednode is None:
1604 if matchednode is None:
1602 raise error.LookupError(node, self.display_id, _(b'no node'))
1605 raise error.LookupError(node, self.display_id, _(b'no node'))
1603 return True
1606 return True
1604
1607
1605 def maybewdir(prefix):
1608 def maybewdir(prefix):
1606 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1609 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1607
1610
1608 hexnode = hex(node)
1611 hexnode = hex(node)
1609
1612
1610 def disambiguate(hexnode, minlength):
1613 def disambiguate(hexnode, minlength):
1611 """Disambiguate against wdirid."""
1614 """Disambiguate against wdirid."""
1612 for length in range(minlength, len(hexnode) + 1):
1615 for length in range(minlength, len(hexnode) + 1):
1613 prefix = hexnode[:length]
1616 prefix = hexnode[:length]
1614 if not maybewdir(prefix):
1617 if not maybewdir(prefix):
1615 return prefix
1618 return prefix
1616
1619
1617 if not getattr(self, 'filteredrevs', None):
1620 if not getattr(self, 'filteredrevs', None):
1618 try:
1621 try:
1619 length = max(self.index.shortest(node), minlength)
1622 length = max(self.index.shortest(node), minlength)
1620 return disambiguate(hexnode, length)
1623 return disambiguate(hexnode, length)
1621 except error.RevlogError:
1624 except error.RevlogError:
1622 if node != self.nodeconstants.wdirid:
1625 if node != self.nodeconstants.wdirid:
1623 raise error.LookupError(
1626 raise error.LookupError(
1624 node, self.display_id, _(b'no node')
1627 node, self.display_id, _(b'no node')
1625 )
1628 )
1626 except AttributeError:
1629 except AttributeError:
1627 # Fall through to pure code
1630 # Fall through to pure code
1628 pass
1631 pass
1629
1632
1630 if node == self.nodeconstants.wdirid:
1633 if node == self.nodeconstants.wdirid:
1631 for length in range(minlength, len(hexnode) + 1):
1634 for length in range(minlength, len(hexnode) + 1):
1632 prefix = hexnode[:length]
1635 prefix = hexnode[:length]
1633 if isvalid(prefix):
1636 if isvalid(prefix):
1634 return prefix
1637 return prefix
1635
1638
1636 for length in range(minlength, len(hexnode) + 1):
1639 for length in range(minlength, len(hexnode) + 1):
1637 prefix = hexnode[:length]
1640 prefix = hexnode[:length]
1638 if isvalid(prefix):
1641 if isvalid(prefix):
1639 return disambiguate(hexnode, length)
1642 return disambiguate(hexnode, length)
1640
1643
1641 def cmp(self, node, text):
1644 def cmp(self, node, text):
1642 """compare text with a given file revision
1645 """compare text with a given file revision
1643
1646
1644 returns True if text is different than what is stored.
1647 returns True if text is different than what is stored.
1645 """
1648 """
1646 p1, p2 = self.parents(node)
1649 p1, p2 = self.parents(node)
1647 return storageutil.hashrevisionsha1(text, p1, p2) != node
1650 return storageutil.hashrevisionsha1(text, p1, p2) != node
1648
1651
1649 def _cachesegment(self, offset, data):
1652 def _cachesegment(self, offset, data):
1650 """Add a segment to the revlog cache.
1653 """Add a segment to the revlog cache.
1651
1654
1652 Accepts an absolute offset and the data that is at that location.
1655 Accepts an absolute offset and the data that is at that location.
1653 """
1656 """
1654 o, d = self._chunkcache
1657 o, d = self._chunkcache
1655 # try to add to existing cache
1658 # try to add to existing cache
1656 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1659 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1657 self._chunkcache = o, d + data
1660 self._chunkcache = o, d + data
1658 else:
1661 else:
1659 self._chunkcache = offset, data
1662 self._chunkcache = offset, data
1660
1663
1661 def _readsegment(self, offset, length, df=None):
1664 def _readsegment(self, offset, length, df=None):
1662 """Load a segment of raw data from the revlog.
1665 """Load a segment of raw data from the revlog.
1663
1666
1664 Accepts an absolute offset, length to read, and an optional existing
1667 Accepts an absolute offset, length to read, and an optional existing
1665 file handle to read from.
1668 file handle to read from.
1666
1669
1667 If an existing file handle is passed, it will be seeked and the
1670 If an existing file handle is passed, it will be seeked and the
1668 original seek position will NOT be restored.
1671 original seek position will NOT be restored.
1669
1672
1670 Returns a str or buffer of raw byte data.
1673 Returns a str or buffer of raw byte data.
1671
1674
1672 Raises if the requested number of bytes could not be read.
1675 Raises if the requested number of bytes could not be read.
1673 """
1676 """
1674 # Cache data both forward and backward around the requested
1677 # Cache data both forward and backward around the requested
1675 # data, in a fixed size window. This helps speed up operations
1678 # data, in a fixed size window. This helps speed up operations
1676 # involving reading the revlog backwards.
1679 # involving reading the revlog backwards.
1677 cachesize = self._chunkcachesize
1680 cachesize = self._chunkcachesize
1678 realoffset = offset & ~(cachesize - 1)
1681 realoffset = offset & ~(cachesize - 1)
1679 reallength = (
1682 reallength = (
1680 (offset + length + cachesize) & ~(cachesize - 1)
1683 (offset + length + cachesize) & ~(cachesize - 1)
1681 ) - realoffset
1684 ) - realoffset
1682 with self._datareadfp(df) as df:
1685 with self._datareadfp(df) as df:
1683 df.seek(realoffset)
1686 df.seek(realoffset)
1684 d = df.read(reallength)
1687 d = df.read(reallength)
1685
1688
1686 self._cachesegment(realoffset, d)
1689 self._cachesegment(realoffset, d)
1687 if offset != realoffset or reallength != length:
1690 if offset != realoffset or reallength != length:
1688 startoffset = offset - realoffset
1691 startoffset = offset - realoffset
1689 if len(d) - startoffset < length:
1692 if len(d) - startoffset < length:
1690 raise error.RevlogError(
1693 raise error.RevlogError(
1691 _(
1694 _(
1692 b'partial read of revlog %s; expected %d bytes from '
1695 b'partial read of revlog %s; expected %d bytes from '
1693 b'offset %d, got %d'
1696 b'offset %d, got %d'
1694 )
1697 )
1695 % (
1698 % (
1696 self._indexfile if self._inline else self._datafile,
1699 self._indexfile if self._inline else self._datafile,
1697 length,
1700 length,
1698 offset,
1701 offset,
1699 len(d) - startoffset,
1702 len(d) - startoffset,
1700 )
1703 )
1701 )
1704 )
1702
1705
1703 return util.buffer(d, startoffset, length)
1706 return util.buffer(d, startoffset, length)
1704
1707
1705 if len(d) < length:
1708 if len(d) < length:
1706 raise error.RevlogError(
1709 raise error.RevlogError(
1707 _(
1710 _(
1708 b'partial read of revlog %s; expected %d bytes from offset '
1711 b'partial read of revlog %s; expected %d bytes from offset '
1709 b'%d, got %d'
1712 b'%d, got %d'
1710 )
1713 )
1711 % (
1714 % (
1712 self._indexfile if self._inline else self._datafile,
1715 self._indexfile if self._inline else self._datafile,
1713 length,
1716 length,
1714 offset,
1717 offset,
1715 len(d),
1718 len(d),
1716 )
1719 )
1717 )
1720 )
1718
1721
1719 return d
1722 return d
1720
1723
1721 def _getsegment(self, offset, length, df=None):
1724 def _getsegment(self, offset, length, df=None):
1722 """Obtain a segment of raw data from the revlog.
1725 """Obtain a segment of raw data from the revlog.
1723
1726
1724 Accepts an absolute offset, length of bytes to obtain, and an
1727 Accepts an absolute offset, length of bytes to obtain, and an
1725 optional file handle to the already-opened revlog. If the file
1728 optional file handle to the already-opened revlog. If the file
1726 handle is used, it's original seek position will not be preserved.
1729 handle is used, it's original seek position will not be preserved.
1727
1730
1728 Requests for data may be returned from a cache.
1731 Requests for data may be returned from a cache.
1729
1732
1730 Returns a str or a buffer instance of raw byte data.
1733 Returns a str or a buffer instance of raw byte data.
1731 """
1734 """
1732 o, d = self._chunkcache
1735 o, d = self._chunkcache
1733 l = len(d)
1736 l = len(d)
1734
1737
1735 # is it in the cache?
1738 # is it in the cache?
1736 cachestart = offset - o
1739 cachestart = offset - o
1737 cacheend = cachestart + length
1740 cacheend = cachestart + length
1738 if cachestart >= 0 and cacheend <= l:
1741 if cachestart >= 0 and cacheend <= l:
1739 if cachestart == 0 and cacheend == l:
1742 if cachestart == 0 and cacheend == l:
1740 return d # avoid a copy
1743 return d # avoid a copy
1741 return util.buffer(d, cachestart, cacheend - cachestart)
1744 return util.buffer(d, cachestart, cacheend - cachestart)
1742
1745
1743 return self._readsegment(offset, length, df=df)
1746 return self._readsegment(offset, length, df=df)
1744
1747
1745 def _getsegmentforrevs(self, startrev, endrev, df=None):
1748 def _getsegmentforrevs(self, startrev, endrev, df=None):
1746 """Obtain a segment of raw data corresponding to a range of revisions.
1749 """Obtain a segment of raw data corresponding to a range of revisions.
1747
1750
1748 Accepts the start and end revisions and an optional already-open
1751 Accepts the start and end revisions and an optional already-open
1749 file handle to be used for reading. If the file handle is read, its
1752 file handle to be used for reading. If the file handle is read, its
1750 seek position will not be preserved.
1753 seek position will not be preserved.
1751
1754
1752 Requests for data may be satisfied by a cache.
1755 Requests for data may be satisfied by a cache.
1753
1756
1754 Returns a 2-tuple of (offset, data) for the requested range of
1757 Returns a 2-tuple of (offset, data) for the requested range of
1755 revisions. Offset is the integer offset from the beginning of the
1758 revisions. Offset is the integer offset from the beginning of the
1756 revlog and data is a str or buffer of the raw byte data.
1759 revlog and data is a str or buffer of the raw byte data.
1757
1760
1758 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1761 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1759 to determine where each revision's data begins and ends.
1762 to determine where each revision's data begins and ends.
1760 """
1763 """
1761 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1764 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1762 # (functions are expensive).
1765 # (functions are expensive).
1763 index = self.index
1766 index = self.index
1764 istart = index[startrev]
1767 istart = index[startrev]
1765 start = int(istart[0] >> 16)
1768 start = int(istart[0] >> 16)
1766 if startrev == endrev:
1769 if startrev == endrev:
1767 end = start + istart[1]
1770 end = start + istart[1]
1768 else:
1771 else:
1769 iend = index[endrev]
1772 iend = index[endrev]
1770 end = int(iend[0] >> 16) + iend[1]
1773 end = int(iend[0] >> 16) + iend[1]
1771
1774
1772 if self._inline:
1775 if self._inline:
1773 start += (startrev + 1) * self.index.entry_size
1776 start += (startrev + 1) * self.index.entry_size
1774 end += (endrev + 1) * self.index.entry_size
1777 end += (endrev + 1) * self.index.entry_size
1775 length = end - start
1778 length = end - start
1776
1779
1777 return start, self._getsegment(start, length, df=df)
1780 return start, self._getsegment(start, length, df=df)
1778
1781
1779 def _chunk(self, rev, df=None):
1782 def _chunk(self, rev, df=None):
1780 """Obtain a single decompressed chunk for a revision.
1783 """Obtain a single decompressed chunk for a revision.
1781
1784
1782 Accepts an integer revision and an optional already-open file handle
1785 Accepts an integer revision and an optional already-open file handle
1783 to be used for reading. If used, the seek position of the file will not
1786 to be used for reading. If used, the seek position of the file will not
1784 be preserved.
1787 be preserved.
1785
1788
1786 Returns a str holding uncompressed data for the requested revision.
1789 Returns a str holding uncompressed data for the requested revision.
1787 """
1790 """
1788 compression_mode = self.index[rev][10]
1791 compression_mode = self.index[rev][10]
1789 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1792 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1790 if compression_mode == COMP_MODE_PLAIN:
1793 if compression_mode == COMP_MODE_PLAIN:
1791 return data
1794 return data
1792 elif compression_mode == COMP_MODE_DEFAULT:
1795 elif compression_mode == COMP_MODE_DEFAULT:
1793 return self._decompressor(data)
1796 return self._decompressor(data)
1794 elif compression_mode == COMP_MODE_INLINE:
1797 elif compression_mode == COMP_MODE_INLINE:
1795 return self.decompress(data)
1798 return self.decompress(data)
1796 else:
1799 else:
1797 msg = 'unknown compression mode %d'
1800 msg = 'unknown compression mode %d'
1798 msg %= compression_mode
1801 msg %= compression_mode
1799 raise error.RevlogError(msg)
1802 raise error.RevlogError(msg)
1800
1803
1801 def _chunks(self, revs, df=None, targetsize=None):
1804 def _chunks(self, revs, df=None, targetsize=None):
1802 """Obtain decompressed chunks for the specified revisions.
1805 """Obtain decompressed chunks for the specified revisions.
1803
1806
1804 Accepts an iterable of numeric revisions that are assumed to be in
1807 Accepts an iterable of numeric revisions that are assumed to be in
1805 ascending order. Also accepts an optional already-open file handle
1808 ascending order. Also accepts an optional already-open file handle
1806 to be used for reading. If used, the seek position of the file will
1809 to be used for reading. If used, the seek position of the file will
1807 not be preserved.
1810 not be preserved.
1808
1811
1809 This function is similar to calling ``self._chunk()`` multiple times,
1812 This function is similar to calling ``self._chunk()`` multiple times,
1810 but is faster.
1813 but is faster.
1811
1814
1812 Returns a list with decompressed data for each requested revision.
1815 Returns a list with decompressed data for each requested revision.
1813 """
1816 """
1814 if not revs:
1817 if not revs:
1815 return []
1818 return []
1816 start = self.start
1819 start = self.start
1817 length = self.length
1820 length = self.length
1818 inline = self._inline
1821 inline = self._inline
1819 iosize = self.index.entry_size
1822 iosize = self.index.entry_size
1820 buffer = util.buffer
1823 buffer = util.buffer
1821
1824
1822 l = []
1825 l = []
1823 ladd = l.append
1826 ladd = l.append
1824
1827
1825 if not self._withsparseread:
1828 if not self._withsparseread:
1826 slicedchunks = (revs,)
1829 slicedchunks = (revs,)
1827 else:
1830 else:
1828 slicedchunks = deltautil.slicechunk(
1831 slicedchunks = deltautil.slicechunk(
1829 self, revs, targetsize=targetsize
1832 self, revs, targetsize=targetsize
1830 )
1833 )
1831
1834
1832 for revschunk in slicedchunks:
1835 for revschunk in slicedchunks:
1833 firstrev = revschunk[0]
1836 firstrev = revschunk[0]
1834 # Skip trailing revisions with empty diff
1837 # Skip trailing revisions with empty diff
1835 for lastrev in revschunk[::-1]:
1838 for lastrev in revschunk[::-1]:
1836 if length(lastrev) != 0:
1839 if length(lastrev) != 0:
1837 break
1840 break
1838
1841
1839 try:
1842 try:
1840 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1843 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1841 except OverflowError:
1844 except OverflowError:
1842 # issue4215 - we can't cache a run of chunks greater than
1845 # issue4215 - we can't cache a run of chunks greater than
1843 # 2G on Windows
1846 # 2G on Windows
1844 return [self._chunk(rev, df=df) for rev in revschunk]
1847 return [self._chunk(rev, df=df) for rev in revschunk]
1845
1848
1846 decomp = self.decompress
1849 decomp = self.decompress
1847 # self._decompressor might be None, but will not be used in that case
1850 # self._decompressor might be None, but will not be used in that case
1848 def_decomp = self._decompressor
1851 def_decomp = self._decompressor
1849 for rev in revschunk:
1852 for rev in revschunk:
1850 chunkstart = start(rev)
1853 chunkstart = start(rev)
1851 if inline:
1854 if inline:
1852 chunkstart += (rev + 1) * iosize
1855 chunkstart += (rev + 1) * iosize
1853 chunklength = length(rev)
1856 chunklength = length(rev)
1854 comp_mode = self.index[rev][10]
1857 comp_mode = self.index[rev][10]
1855 c = buffer(data, chunkstart - offset, chunklength)
1858 c = buffer(data, chunkstart - offset, chunklength)
1856 if comp_mode == COMP_MODE_PLAIN:
1859 if comp_mode == COMP_MODE_PLAIN:
1857 ladd(c)
1860 ladd(c)
1858 elif comp_mode == COMP_MODE_INLINE:
1861 elif comp_mode == COMP_MODE_INLINE:
1859 ladd(decomp(c))
1862 ladd(decomp(c))
1860 elif comp_mode == COMP_MODE_DEFAULT:
1863 elif comp_mode == COMP_MODE_DEFAULT:
1861 ladd(def_decomp(c))
1864 ladd(def_decomp(c))
1862 else:
1865 else:
1863 msg = 'unknown compression mode %d'
1866 msg = 'unknown compression mode %d'
1864 msg %= comp_mode
1867 msg %= comp_mode
1865 raise error.RevlogError(msg)
1868 raise error.RevlogError(msg)
1866
1869
1867 return l
1870 return l
1868
1871
1869 def _chunkclear(self):
1872 def _chunkclear(self):
1870 """Clear the raw chunk cache."""
1873 """Clear the raw chunk cache."""
1871 self._chunkcache = (0, b'')
1874 self._chunkcache = (0, b'')
1872
1875
1873 def deltaparent(self, rev):
1876 def deltaparent(self, rev):
1874 """return deltaparent of the given revision"""
1877 """return deltaparent of the given revision"""
1875 base = self.index[rev][3]
1878 base = self.index[rev][3]
1876 if base == rev:
1879 if base == rev:
1877 return nullrev
1880 return nullrev
1878 elif self._generaldelta:
1881 elif self._generaldelta:
1879 return base
1882 return base
1880 else:
1883 else:
1881 return rev - 1
1884 return rev - 1
1882
1885
1883 def issnapshot(self, rev):
1886 def issnapshot(self, rev):
1884 """tells whether rev is a snapshot"""
1887 """tells whether rev is a snapshot"""
1885 if not self._sparserevlog:
1888 if not self._sparserevlog:
1886 return self.deltaparent(rev) == nullrev
1889 return self.deltaparent(rev) == nullrev
1887 elif util.safehasattr(self.index, b'issnapshot'):
1890 elif util.safehasattr(self.index, b'issnapshot'):
1888 # directly assign the method to cache the testing and access
1891 # directly assign the method to cache the testing and access
1889 self.issnapshot = self.index.issnapshot
1892 self.issnapshot = self.index.issnapshot
1890 return self.issnapshot(rev)
1893 return self.issnapshot(rev)
1891 if rev == nullrev:
1894 if rev == nullrev:
1892 return True
1895 return True
1893 entry = self.index[rev]
1896 entry = self.index[rev]
1894 base = entry[3]
1897 base = entry[3]
1895 if base == rev:
1898 if base == rev:
1896 return True
1899 return True
1897 if base == nullrev:
1900 if base == nullrev:
1898 return True
1901 return True
1899 p1 = entry[5]
1902 p1 = entry[5]
1900 p2 = entry[6]
1903 p2 = entry[6]
1901 if base == p1 or base == p2:
1904 if base == p1 or base == p2:
1902 return False
1905 return False
1903 return self.issnapshot(base)
1906 return self.issnapshot(base)
1904
1907
1905 def snapshotdepth(self, rev):
1908 def snapshotdepth(self, rev):
1906 """number of snapshot in the chain before this one"""
1909 """number of snapshot in the chain before this one"""
1907 if not self.issnapshot(rev):
1910 if not self.issnapshot(rev):
1908 raise error.ProgrammingError(b'revision %d not a snapshot')
1911 raise error.ProgrammingError(b'revision %d not a snapshot')
1909 return len(self._deltachain(rev)[0]) - 1
1912 return len(self._deltachain(rev)[0]) - 1
1910
1913
1911 def revdiff(self, rev1, rev2):
1914 def revdiff(self, rev1, rev2):
1912 """return or calculate a delta between two revisions
1915 """return or calculate a delta between two revisions
1913
1916
1914 The delta calculated is in binary form and is intended to be written to
1917 The delta calculated is in binary form and is intended to be written to
1915 revlog data directly. So this function needs raw revision data.
1918 revlog data directly. So this function needs raw revision data.
1916 """
1919 """
1917 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1920 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1918 return bytes(self._chunk(rev2))
1921 return bytes(self._chunk(rev2))
1919
1922
1920 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1923 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1921
1924
1922 def _processflags(self, text, flags, operation, raw=False):
1925 def _processflags(self, text, flags, operation, raw=False):
1923 """deprecated entry point to access flag processors"""
1926 """deprecated entry point to access flag processors"""
1924 msg = b'_processflag(...) use the specialized variant'
1927 msg = b'_processflag(...) use the specialized variant'
1925 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1928 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1926 if raw:
1929 if raw:
1927 return text, flagutil.processflagsraw(self, text, flags)
1930 return text, flagutil.processflagsraw(self, text, flags)
1928 elif operation == b'read':
1931 elif operation == b'read':
1929 return flagutil.processflagsread(self, text, flags)
1932 return flagutil.processflagsread(self, text, flags)
1930 else: # write operation
1933 else: # write operation
1931 return flagutil.processflagswrite(self, text, flags)
1934 return flagutil.processflagswrite(self, text, flags)
1932
1935
1933 def revision(self, nodeorrev, _df=None, raw=False):
1936 def revision(self, nodeorrev, _df=None, raw=False):
1934 """return an uncompressed revision of a given node or revision
1937 """return an uncompressed revision of a given node or revision
1935 number.
1938 number.
1936
1939
1937 _df - an existing file handle to read from. (internal-only)
1940 _df - an existing file handle to read from. (internal-only)
1938 raw - an optional argument specifying if the revision data is to be
1941 raw - an optional argument specifying if the revision data is to be
1939 treated as raw data when applying flag transforms. 'raw' should be set
1942 treated as raw data when applying flag transforms. 'raw' should be set
1940 to True when generating changegroups or in debug commands.
1943 to True when generating changegroups or in debug commands.
1941 """
1944 """
1942 if raw:
1945 if raw:
1943 msg = (
1946 msg = (
1944 b'revlog.revision(..., raw=True) is deprecated, '
1947 b'revlog.revision(..., raw=True) is deprecated, '
1945 b'use revlog.rawdata(...)'
1948 b'use revlog.rawdata(...)'
1946 )
1949 )
1947 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1950 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
1948 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1951 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1949
1952
1950 def sidedata(self, nodeorrev, _df=None):
1953 def sidedata(self, nodeorrev, _df=None):
1951 """a map of extra data related to the changeset but not part of the hash
1954 """a map of extra data related to the changeset but not part of the hash
1952
1955
1953 This function currently return a dictionary. However, more advanced
1956 This function currently return a dictionary. However, more advanced
1954 mapping object will likely be used in the future for a more
1957 mapping object will likely be used in the future for a more
1955 efficient/lazy code.
1958 efficient/lazy code.
1956 """
1959 """
1957 return self._revisiondata(nodeorrev, _df)[1]
1960 return self._revisiondata(nodeorrev, _df)[1]
1958
1961
1959 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1962 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1960 # deal with <nodeorrev> argument type
1963 # deal with <nodeorrev> argument type
1961 if isinstance(nodeorrev, int):
1964 if isinstance(nodeorrev, int):
1962 rev = nodeorrev
1965 rev = nodeorrev
1963 node = self.node(rev)
1966 node = self.node(rev)
1964 else:
1967 else:
1965 node = nodeorrev
1968 node = nodeorrev
1966 rev = None
1969 rev = None
1967
1970
1968 # fast path the special `nullid` rev
1971 # fast path the special `nullid` rev
1969 if node == self.nullid:
1972 if node == self.nullid:
1970 return b"", {}
1973 return b"", {}
1971
1974
1972 # ``rawtext`` is the text as stored inside the revlog. Might be the
1975 # ``rawtext`` is the text as stored inside the revlog. Might be the
1973 # revision or might need to be processed to retrieve the revision.
1976 # revision or might need to be processed to retrieve the revision.
1974 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1977 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1975
1978
1976 if self.hassidedata:
1979 if self.hassidedata:
1977 if rev is None:
1980 if rev is None:
1978 rev = self.rev(node)
1981 rev = self.rev(node)
1979 sidedata = self._sidedata(rev)
1982 sidedata = self._sidedata(rev)
1980 else:
1983 else:
1981 sidedata = {}
1984 sidedata = {}
1982
1985
1983 if raw and validated:
1986 if raw and validated:
1984 # if we don't want to process the raw text and that raw
1987 # if we don't want to process the raw text and that raw
1985 # text is cached, we can exit early.
1988 # text is cached, we can exit early.
1986 return rawtext, sidedata
1989 return rawtext, sidedata
1987 if rev is None:
1990 if rev is None:
1988 rev = self.rev(node)
1991 rev = self.rev(node)
1989 # the revlog's flag for this revision
1992 # the revlog's flag for this revision
1990 # (usually alter its state or content)
1993 # (usually alter its state or content)
1991 flags = self.flags(rev)
1994 flags = self.flags(rev)
1992
1995
1993 if validated and flags == REVIDX_DEFAULT_FLAGS:
1996 if validated and flags == REVIDX_DEFAULT_FLAGS:
1994 # no extra flags set, no flag processor runs, text = rawtext
1997 # no extra flags set, no flag processor runs, text = rawtext
1995 return rawtext, sidedata
1998 return rawtext, sidedata
1996
1999
1997 if raw:
2000 if raw:
1998 validatehash = flagutil.processflagsraw(self, rawtext, flags)
2001 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1999 text = rawtext
2002 text = rawtext
2000 else:
2003 else:
2001 r = flagutil.processflagsread(self, rawtext, flags)
2004 r = flagutil.processflagsread(self, rawtext, flags)
2002 text, validatehash = r
2005 text, validatehash = r
2003 if validatehash:
2006 if validatehash:
2004 self.checkhash(text, node, rev=rev)
2007 self.checkhash(text, node, rev=rev)
2005 if not validated:
2008 if not validated:
2006 self._revisioncache = (node, rev, rawtext)
2009 self._revisioncache = (node, rev, rawtext)
2007
2010
2008 return text, sidedata
2011 return text, sidedata
2009
2012
2010 def _rawtext(self, node, rev, _df=None):
2013 def _rawtext(self, node, rev, _df=None):
2011 """return the possibly unvalidated rawtext for a revision
2014 """return the possibly unvalidated rawtext for a revision
2012
2015
2013 returns (rev, rawtext, validated)
2016 returns (rev, rawtext, validated)
2014 """
2017 """
2015
2018
2016 # revision in the cache (could be useful to apply delta)
2019 # revision in the cache (could be useful to apply delta)
2017 cachedrev = None
2020 cachedrev = None
2018 # An intermediate text to apply deltas to
2021 # An intermediate text to apply deltas to
2019 basetext = None
2022 basetext = None
2020
2023
2021 # Check if we have the entry in cache
2024 # Check if we have the entry in cache
2022 # The cache entry looks like (node, rev, rawtext)
2025 # The cache entry looks like (node, rev, rawtext)
2023 if self._revisioncache:
2026 if self._revisioncache:
2024 if self._revisioncache[0] == node:
2027 if self._revisioncache[0] == node:
2025 return (rev, self._revisioncache[2], True)
2028 return (rev, self._revisioncache[2], True)
2026 cachedrev = self._revisioncache[1]
2029 cachedrev = self._revisioncache[1]
2027
2030
2028 if rev is None:
2031 if rev is None:
2029 rev = self.rev(node)
2032 rev = self.rev(node)
2030
2033
2031 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
2034 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
2032 if stopped:
2035 if stopped:
2033 basetext = self._revisioncache[2]
2036 basetext = self._revisioncache[2]
2034
2037
2035 # drop cache to save memory, the caller is expected to
2038 # drop cache to save memory, the caller is expected to
2036 # update self._revisioncache after validating the text
2039 # update self._revisioncache after validating the text
2037 self._revisioncache = None
2040 self._revisioncache = None
2038
2041
2039 targetsize = None
2042 targetsize = None
2040 rawsize = self.index[rev][2]
2043 rawsize = self.index[rev][2]
2041 if 0 <= rawsize:
2044 if 0 <= rawsize:
2042 targetsize = 4 * rawsize
2045 targetsize = 4 * rawsize
2043
2046
2044 bins = self._chunks(chain, df=_df, targetsize=targetsize)
2047 bins = self._chunks(chain, df=_df, targetsize=targetsize)
2045 if basetext is None:
2048 if basetext is None:
2046 basetext = bytes(bins[0])
2049 basetext = bytes(bins[0])
2047 bins = bins[1:]
2050 bins = bins[1:]
2048
2051
2049 rawtext = mdiff.patches(basetext, bins)
2052 rawtext = mdiff.patches(basetext, bins)
2050 del basetext # let us have a chance to free memory early
2053 del basetext # let us have a chance to free memory early
2051 return (rev, rawtext, False)
2054 return (rev, rawtext, False)
2052
2055
2053 def _sidedata(self, rev):
2056 def _sidedata(self, rev):
2054 """Return the sidedata for a given revision number."""
2057 """Return the sidedata for a given revision number."""
2055 index_entry = self.index[rev]
2058 index_entry = self.index[rev]
2056 sidedata_offset = index_entry[8]
2059 sidedata_offset = index_entry[8]
2057 sidedata_size = index_entry[9]
2060 sidedata_size = index_entry[9]
2058
2061
2059 if self._inline:
2062 if self._inline:
2060 sidedata_offset += self.index.entry_size * (1 + rev)
2063 sidedata_offset += self.index.entry_size * (1 + rev)
2061 if sidedata_size == 0:
2064 if sidedata_size == 0:
2062 return {}
2065 return {}
2063
2066
2064 comp_segment = self._getsegment(sidedata_offset, sidedata_size)
2067 comp_segment = self._getsegment(sidedata_offset, sidedata_size)
2065 comp = self.index[rev][11]
2068 comp = self.index[rev][11]
2066 if comp == COMP_MODE_PLAIN:
2069 if comp == COMP_MODE_PLAIN:
2067 segment = comp_segment
2070 segment = comp_segment
2068 elif comp == COMP_MODE_DEFAULT:
2071 elif comp == COMP_MODE_DEFAULT:
2069 segment = self._decompressor(comp_segment)
2072 segment = self._decompressor(comp_segment)
2070 elif comp == COMP_MODE_INLINE:
2073 elif comp == COMP_MODE_INLINE:
2071 segment = self.decompress(comp_segment)
2074 segment = self.decompress(comp_segment)
2072 else:
2075 else:
2073 msg = 'unknown compression mode %d'
2076 msg = 'unknown compression mode %d'
2074 msg %= comp
2077 msg %= comp
2075 raise error.RevlogError(msg)
2078 raise error.RevlogError(msg)
2076
2079
2077 sidedata = sidedatautil.deserialize_sidedata(segment)
2080 sidedata = sidedatautil.deserialize_sidedata(segment)
2078 return sidedata
2081 return sidedata
2079
2082
2080 def rawdata(self, nodeorrev, _df=None):
2083 def rawdata(self, nodeorrev, _df=None):
2081 """return an uncompressed raw data of a given node or revision number.
2084 """return an uncompressed raw data of a given node or revision number.
2082
2085
2083 _df - an existing file handle to read from. (internal-only)
2086 _df - an existing file handle to read from. (internal-only)
2084 """
2087 """
2085 return self._revisiondata(nodeorrev, _df, raw=True)[0]
2088 return self._revisiondata(nodeorrev, _df, raw=True)[0]
2086
2089
2087 def hash(self, text, p1, p2):
2090 def hash(self, text, p1, p2):
2088 """Compute a node hash.
2091 """Compute a node hash.
2089
2092
2090 Available as a function so that subclasses can replace the hash
2093 Available as a function so that subclasses can replace the hash
2091 as needed.
2094 as needed.
2092 """
2095 """
2093 return storageutil.hashrevisionsha1(text, p1, p2)
2096 return storageutil.hashrevisionsha1(text, p1, p2)
2094
2097
2095 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2098 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2096 """Check node hash integrity.
2099 """Check node hash integrity.
2097
2100
2098 Available as a function so that subclasses can extend hash mismatch
2101 Available as a function so that subclasses can extend hash mismatch
2099 behaviors as needed.
2102 behaviors as needed.
2100 """
2103 """
2101 try:
2104 try:
2102 if p1 is None and p2 is None:
2105 if p1 is None and p2 is None:
2103 p1, p2 = self.parents(node)
2106 p1, p2 = self.parents(node)
2104 if node != self.hash(text, p1, p2):
2107 if node != self.hash(text, p1, p2):
2105 # Clear the revision cache on hash failure. The revision cache
2108 # Clear the revision cache on hash failure. The revision cache
2106 # only stores the raw revision and clearing the cache does have
2109 # only stores the raw revision and clearing the cache does have
2107 # the side-effect that we won't have a cache hit when the raw
2110 # the side-effect that we won't have a cache hit when the raw
2108 # revision data is accessed. But this case should be rare and
2111 # revision data is accessed. But this case should be rare and
2109 # it is extra work to teach the cache about the hash
2112 # it is extra work to teach the cache about the hash
2110 # verification state.
2113 # verification state.
2111 if self._revisioncache and self._revisioncache[0] == node:
2114 if self._revisioncache and self._revisioncache[0] == node:
2112 self._revisioncache = None
2115 self._revisioncache = None
2113
2116
2114 revornode = rev
2117 revornode = rev
2115 if revornode is None:
2118 if revornode is None:
2116 revornode = templatefilters.short(hex(node))
2119 revornode = templatefilters.short(hex(node))
2117 raise error.RevlogError(
2120 raise error.RevlogError(
2118 _(b"integrity check failed on %s:%s")
2121 _(b"integrity check failed on %s:%s")
2119 % (self.display_id, pycompat.bytestr(revornode))
2122 % (self.display_id, pycompat.bytestr(revornode))
2120 )
2123 )
2121 except error.RevlogError:
2124 except error.RevlogError:
2122 if self._censorable and storageutil.iscensoredtext(text):
2125 if self._censorable and storageutil.iscensoredtext(text):
2123 raise error.CensoredNodeError(self.display_id, node, text)
2126 raise error.CensoredNodeError(self.display_id, node, text)
2124 raise
2127 raise
2125
2128
2126 def _enforceinlinesize(self, tr):
2129 def _enforceinlinesize(self, tr):
2127 """Check if the revlog is too big for inline and convert if so.
2130 """Check if the revlog is too big for inline and convert if so.
2128
2131
2129 This should be called after revisions are added to the revlog. If the
2132 This should be called after revisions are added to the revlog. If the
2130 revlog has grown too large to be an inline revlog, it will convert it
2133 revlog has grown too large to be an inline revlog, it will convert it
2131 to use multiple index and data files.
2134 to use multiple index and data files.
2132 """
2135 """
2133 tiprev = len(self) - 1
2136 tiprev = len(self) - 1
2134 total_size = self.start(tiprev) + self.length(tiprev)
2137 total_size = self.start(tiprev) + self.length(tiprev)
2135 if not self._inline or total_size < _maxinline:
2138 if not self._inline or total_size < _maxinline:
2136 return
2139 return
2137
2140
2138 troffset = tr.findoffset(self._indexfile)
2141 troffset = tr.findoffset(self._indexfile)
2139 if troffset is None:
2142 if troffset is None:
2140 raise error.RevlogError(
2143 raise error.RevlogError(
2141 _(b"%s not found in the transaction") % self._indexfile
2144 _(b"%s not found in the transaction") % self._indexfile
2142 )
2145 )
2143 trindex = 0
2146 trindex = 0
2144 tr.add(self._datafile, 0)
2147 tr.add(self._datafile, 0)
2145
2148
2146 existing_handles = False
2149 existing_handles = False
2147 if self._writinghandles is not None:
2150 if self._writinghandles is not None:
2148 existing_handles = True
2151 existing_handles = True
2149 fp = self._writinghandles[0]
2152 fp = self._writinghandles[0]
2150 fp.flush()
2153 fp.flush()
2151 fp.close()
2154 fp.close()
2152 # We can't use the cached file handle after close(). So prevent
2155 # We can't use the cached file handle after close(). So prevent
2153 # its usage.
2156 # its usage.
2154 self._writinghandles = None
2157 self._writinghandles = None
2155
2158
2156 new_dfh = self._datafp(b'w+')
2159 new_dfh = self._datafp(b'w+')
2157 new_dfh.truncate(0) # drop any potentially existing data
2160 new_dfh.truncate(0) # drop any potentially existing data
2158 try:
2161 try:
2159 with self._indexfp() as read_ifh:
2162 with self._indexfp() as read_ifh:
2160 for r in self:
2163 for r in self:
2161 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2164 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2162 if troffset <= self.start(r):
2165 if troffset <= self.start(r):
2163 trindex = r
2166 trindex = r
2164 new_dfh.flush()
2167 new_dfh.flush()
2165
2168
2166 with self.__index_new_fp() as fp:
2169 with self.__index_new_fp() as fp:
2167 self._format_flags &= ~FLAG_INLINE_DATA
2170 self._format_flags &= ~FLAG_INLINE_DATA
2168 self._inline = False
2171 self._inline = False
2169 for i in self:
2172 for i in self:
2170 e = self.index.entry_binary(i)
2173 e = self.index.entry_binary(i)
2171 if i == 0 and self._docket is None:
2174 if i == 0 and self._docket is None:
2172 header = self._format_flags | self._format_version
2175 header = self._format_flags | self._format_version
2173 header = self.index.pack_header(header)
2176 header = self.index.pack_header(header)
2174 e = header + e
2177 e = header + e
2175 fp.write(e)
2178 fp.write(e)
2176 if self._docket is not None:
2179 if self._docket is not None:
2177 self._docket.index_end = fp.tell()
2180 self._docket.index_end = fp.tell()
2178 # the temp file replace the real index when we exit the context
2181 # the temp file replace the real index when we exit the context
2179 # manager
2182 # manager
2180
2183
2181 tr.replace(self._indexfile, trindex * self.index.entry_size)
2184 tr.replace(self._indexfile, trindex * self.index.entry_size)
2182 nodemaputil.setup_persistent_nodemap(tr, self)
2185 nodemaputil.setup_persistent_nodemap(tr, self)
2183 self._chunkclear()
2186 self._chunkclear()
2184
2187
2185 if existing_handles:
2188 if existing_handles:
2186 # switched from inline to conventional reopen the index
2189 # switched from inline to conventional reopen the index
2187 ifh = self.__index_write_fp()
2190 ifh = self.__index_write_fp()
2188 self._writinghandles = (ifh, new_dfh)
2191 self._writinghandles = (ifh, new_dfh)
2189 new_dfh = None
2192 new_dfh = None
2190 finally:
2193 finally:
2191 if new_dfh is not None:
2194 if new_dfh is not None:
2192 new_dfh.close()
2195 new_dfh.close()
2193
2196
2194 def _nodeduplicatecallback(self, transaction, node):
2197 def _nodeduplicatecallback(self, transaction, node):
2195 """called when trying to add a node already stored."""
2198 """called when trying to add a node already stored."""
2196
2199
2197 @contextlib.contextmanager
2200 @contextlib.contextmanager
2198 def _writing(self, transaction):
2201 def _writing(self, transaction):
2199 if self._trypending:
2202 if self._trypending:
2200 msg = b'try to write in a `trypending` revlog: %s'
2203 msg = b'try to write in a `trypending` revlog: %s'
2201 msg %= self.display_id
2204 msg %= self.display_id
2202 raise error.ProgrammingError(msg)
2205 raise error.ProgrammingError(msg)
2203 if self._writinghandles is not None:
2206 if self._writinghandles is not None:
2204 yield
2207 yield
2205 else:
2208 else:
2206 r = len(self)
2209 r = len(self)
2207 dsize = 0
2210 dsize = 0
2208 if r:
2211 if r:
2209 dsize = self.end(r - 1)
2212 dsize = self.end(r - 1)
2210 dfh = None
2213 dfh = None
2211 if not self._inline:
2214 if not self._inline:
2212 try:
2215 try:
2213 dfh = self._datafp(b"r+")
2216 dfh = self._datafp(b"r+")
2214 if self._docket is None:
2217 if self._docket is None:
2215 dfh.seek(0, os.SEEK_END)
2218 dfh.seek(0, os.SEEK_END)
2216 else:
2219 else:
2217 dfh.seek(self._docket.data_end, os.SEEK_SET)
2220 dfh.seek(self._docket.data_end, os.SEEK_SET)
2218 except IOError as inst:
2221 except IOError as inst:
2219 if inst.errno != errno.ENOENT:
2222 if inst.errno != errno.ENOENT:
2220 raise
2223 raise
2221 dfh = self._datafp(b"w+")
2224 dfh = self._datafp(b"w+")
2222 transaction.add(self._datafile, dsize)
2225 transaction.add(self._datafile, dsize)
2223 try:
2226 try:
2224 isize = r * self.index.entry_size
2227 isize = r * self.index.entry_size
2225 ifh = self.__index_write_fp()
2228 ifh = self.__index_write_fp()
2226 if self._inline:
2229 if self._inline:
2227 transaction.add(self._indexfile, dsize + isize)
2230 transaction.add(self._indexfile, dsize + isize)
2228 else:
2231 else:
2229 transaction.add(self._indexfile, isize)
2232 transaction.add(self._indexfile, isize)
2230 try:
2233 try:
2231 self._writinghandles = (ifh, dfh)
2234 self._writinghandles = (ifh, dfh)
2232 try:
2235 try:
2233 yield
2236 yield
2234 if self._docket is not None:
2237 if self._docket is not None:
2235 self._write_docket(transaction)
2238 self._write_docket(transaction)
2236 finally:
2239 finally:
2237 self._writinghandles = None
2240 self._writinghandles = None
2238 finally:
2241 finally:
2239 ifh.close()
2242 ifh.close()
2240 finally:
2243 finally:
2241 if dfh is not None:
2244 if dfh is not None:
2242 dfh.close()
2245 dfh.close()
2243
2246
2244 def _write_docket(self, transaction):
2247 def _write_docket(self, transaction):
2245 """write the current docket on disk
2248 """write the current docket on disk
2246
2249
2247 Exist as a method to help changelog to implement transaction logic
2250 Exist as a method to help changelog to implement transaction logic
2248
2251
2249 We could also imagine using the same transaction logic for all revlog
2252 We could also imagine using the same transaction logic for all revlog
2250 since docket are cheap."""
2253 since docket are cheap."""
2251 self._docket.write(transaction)
2254 self._docket.write(transaction)
2252
2255
2253 def addrevision(
2256 def addrevision(
2254 self,
2257 self,
2255 text,
2258 text,
2256 transaction,
2259 transaction,
2257 link,
2260 link,
2258 p1,
2261 p1,
2259 p2,
2262 p2,
2260 cachedelta=None,
2263 cachedelta=None,
2261 node=None,
2264 node=None,
2262 flags=REVIDX_DEFAULT_FLAGS,
2265 flags=REVIDX_DEFAULT_FLAGS,
2263 deltacomputer=None,
2266 deltacomputer=None,
2264 sidedata=None,
2267 sidedata=None,
2265 ):
2268 ):
2266 """add a revision to the log
2269 """add a revision to the log
2267
2270
2268 text - the revision data to add
2271 text - the revision data to add
2269 transaction - the transaction object used for rollback
2272 transaction - the transaction object used for rollback
2270 link - the linkrev data to add
2273 link - the linkrev data to add
2271 p1, p2 - the parent nodeids of the revision
2274 p1, p2 - the parent nodeids of the revision
2272 cachedelta - an optional precomputed delta
2275 cachedelta - an optional precomputed delta
2273 node - nodeid of revision; typically node is not specified, and it is
2276 node - nodeid of revision; typically node is not specified, and it is
2274 computed by default as hash(text, p1, p2), however subclasses might
2277 computed by default as hash(text, p1, p2), however subclasses might
2275 use different hashing method (and override checkhash() in such case)
2278 use different hashing method (and override checkhash() in such case)
2276 flags - the known flags to set on the revision
2279 flags - the known flags to set on the revision
2277 deltacomputer - an optional deltacomputer instance shared between
2280 deltacomputer - an optional deltacomputer instance shared between
2278 multiple calls
2281 multiple calls
2279 """
2282 """
2280 if link == nullrev:
2283 if link == nullrev:
2281 raise error.RevlogError(
2284 raise error.RevlogError(
2282 _(b"attempted to add linkrev -1 to %s") % self.display_id
2285 _(b"attempted to add linkrev -1 to %s") % self.display_id
2283 )
2286 )
2284
2287
2285 if sidedata is None:
2288 if sidedata is None:
2286 sidedata = {}
2289 sidedata = {}
2287 elif sidedata and not self.hassidedata:
2290 elif sidedata and not self.hassidedata:
2288 raise error.ProgrammingError(
2291 raise error.ProgrammingError(
2289 _(b"trying to add sidedata to a revlog who don't support them")
2292 _(b"trying to add sidedata to a revlog who don't support them")
2290 )
2293 )
2291
2294
2292 if flags:
2295 if flags:
2293 node = node or self.hash(text, p1, p2)
2296 node = node or self.hash(text, p1, p2)
2294
2297
2295 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2298 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2296
2299
2297 # If the flag processor modifies the revision data, ignore any provided
2300 # If the flag processor modifies the revision data, ignore any provided
2298 # cachedelta.
2301 # cachedelta.
2299 if rawtext != text:
2302 if rawtext != text:
2300 cachedelta = None
2303 cachedelta = None
2301
2304
2302 if len(rawtext) > _maxentrysize:
2305 if len(rawtext) > _maxentrysize:
2303 raise error.RevlogError(
2306 raise error.RevlogError(
2304 _(
2307 _(
2305 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2308 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2306 )
2309 )
2307 % (self.display_id, len(rawtext))
2310 % (self.display_id, len(rawtext))
2308 )
2311 )
2309
2312
2310 node = node or self.hash(rawtext, p1, p2)
2313 node = node or self.hash(rawtext, p1, p2)
2311 rev = self.index.get_rev(node)
2314 rev = self.index.get_rev(node)
2312 if rev is not None:
2315 if rev is not None:
2313 return rev
2316 return rev
2314
2317
2315 if validatehash:
2318 if validatehash:
2316 self.checkhash(rawtext, node, p1=p1, p2=p2)
2319 self.checkhash(rawtext, node, p1=p1, p2=p2)
2317
2320
2318 return self.addrawrevision(
2321 return self.addrawrevision(
2319 rawtext,
2322 rawtext,
2320 transaction,
2323 transaction,
2321 link,
2324 link,
2322 p1,
2325 p1,
2323 p2,
2326 p2,
2324 node,
2327 node,
2325 flags,
2328 flags,
2326 cachedelta=cachedelta,
2329 cachedelta=cachedelta,
2327 deltacomputer=deltacomputer,
2330 deltacomputer=deltacomputer,
2328 sidedata=sidedata,
2331 sidedata=sidedata,
2329 )
2332 )
2330
2333
2331 def addrawrevision(
2334 def addrawrevision(
2332 self,
2335 self,
2333 rawtext,
2336 rawtext,
2334 transaction,
2337 transaction,
2335 link,
2338 link,
2336 p1,
2339 p1,
2337 p2,
2340 p2,
2338 node,
2341 node,
2339 flags,
2342 flags,
2340 cachedelta=None,
2343 cachedelta=None,
2341 deltacomputer=None,
2344 deltacomputer=None,
2342 sidedata=None,
2345 sidedata=None,
2343 ):
2346 ):
2344 """add a raw revision with known flags, node and parents
2347 """add a raw revision with known flags, node and parents
2345 useful when reusing a revision not stored in this revlog (ex: received
2348 useful when reusing a revision not stored in this revlog (ex: received
2346 over wire, or read from an external bundle).
2349 over wire, or read from an external bundle).
2347 """
2350 """
2348 with self._writing(transaction):
2351 with self._writing(transaction):
2349 return self._addrevision(
2352 return self._addrevision(
2350 node,
2353 node,
2351 rawtext,
2354 rawtext,
2352 transaction,
2355 transaction,
2353 link,
2356 link,
2354 p1,
2357 p1,
2355 p2,
2358 p2,
2356 flags,
2359 flags,
2357 cachedelta,
2360 cachedelta,
2358 deltacomputer=deltacomputer,
2361 deltacomputer=deltacomputer,
2359 sidedata=sidedata,
2362 sidedata=sidedata,
2360 )
2363 )
2361
2364
2362 def compress(self, data):
2365 def compress(self, data):
2363 """Generate a possibly-compressed representation of data."""
2366 """Generate a possibly-compressed representation of data."""
2364 if not data:
2367 if not data:
2365 return b'', data
2368 return b'', data
2366
2369
2367 compressed = self._compressor.compress(data)
2370 compressed = self._compressor.compress(data)
2368
2371
2369 if compressed:
2372 if compressed:
2370 # The revlog compressor added the header in the returned data.
2373 # The revlog compressor added the header in the returned data.
2371 return b'', compressed
2374 return b'', compressed
2372
2375
2373 if data[0:1] == b'\0':
2376 if data[0:1] == b'\0':
2374 return b'', data
2377 return b'', data
2375 return b'u', data
2378 return b'u', data
2376
2379
2377 def decompress(self, data):
2380 def decompress(self, data):
2378 """Decompress a revlog chunk.
2381 """Decompress a revlog chunk.
2379
2382
2380 The chunk is expected to begin with a header identifying the
2383 The chunk is expected to begin with a header identifying the
2381 format type so it can be routed to an appropriate decompressor.
2384 format type so it can be routed to an appropriate decompressor.
2382 """
2385 """
2383 if not data:
2386 if not data:
2384 return data
2387 return data
2385
2388
2386 # Revlogs are read much more frequently than they are written and many
2389 # Revlogs are read much more frequently than they are written and many
2387 # chunks only take microseconds to decompress, so performance is
2390 # chunks only take microseconds to decompress, so performance is
2388 # important here.
2391 # important here.
2389 #
2392 #
2390 # We can make a few assumptions about revlogs:
2393 # We can make a few assumptions about revlogs:
2391 #
2394 #
2392 # 1) the majority of chunks will be compressed (as opposed to inline
2395 # 1) the majority of chunks will be compressed (as opposed to inline
2393 # raw data).
2396 # raw data).
2394 # 2) decompressing *any* data will likely by at least 10x slower than
2397 # 2) decompressing *any* data will likely by at least 10x slower than
2395 # returning raw inline data.
2398 # returning raw inline data.
2396 # 3) we want to prioritize common and officially supported compression
2399 # 3) we want to prioritize common and officially supported compression
2397 # engines
2400 # engines
2398 #
2401 #
2399 # It follows that we want to optimize for "decompress compressed data
2402 # It follows that we want to optimize for "decompress compressed data
2400 # when encoded with common and officially supported compression engines"
2403 # when encoded with common and officially supported compression engines"
2401 # case over "raw data" and "data encoded by less common or non-official
2404 # case over "raw data" and "data encoded by less common or non-official
2402 # compression engines." That is why we have the inline lookup first
2405 # compression engines." That is why we have the inline lookup first
2403 # followed by the compengines lookup.
2406 # followed by the compengines lookup.
2404 #
2407 #
2405 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2408 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2406 # compressed chunks. And this matters for changelog and manifest reads.
2409 # compressed chunks. And this matters for changelog and manifest reads.
2407 t = data[0:1]
2410 t = data[0:1]
2408
2411
2409 if t == b'x':
2412 if t == b'x':
2410 try:
2413 try:
2411 return _zlibdecompress(data)
2414 return _zlibdecompress(data)
2412 except zlib.error as e:
2415 except zlib.error as e:
2413 raise error.RevlogError(
2416 raise error.RevlogError(
2414 _(b'revlog decompress error: %s')
2417 _(b'revlog decompress error: %s')
2415 % stringutil.forcebytestr(e)
2418 % stringutil.forcebytestr(e)
2416 )
2419 )
2417 # '\0' is more common than 'u' so it goes first.
2420 # '\0' is more common than 'u' so it goes first.
2418 elif t == b'\0':
2421 elif t == b'\0':
2419 return data
2422 return data
2420 elif t == b'u':
2423 elif t == b'u':
2421 return util.buffer(data, 1)
2424 return util.buffer(data, 1)
2422
2425
2423 compressor = self._get_decompressor(t)
2426 compressor = self._get_decompressor(t)
2424
2427
2425 return compressor.decompress(data)
2428 return compressor.decompress(data)
2426
2429
2427 def _addrevision(
2430 def _addrevision(
2428 self,
2431 self,
2429 node,
2432 node,
2430 rawtext,
2433 rawtext,
2431 transaction,
2434 transaction,
2432 link,
2435 link,
2433 p1,
2436 p1,
2434 p2,
2437 p2,
2435 flags,
2438 flags,
2436 cachedelta,
2439 cachedelta,
2437 alwayscache=False,
2440 alwayscache=False,
2438 deltacomputer=None,
2441 deltacomputer=None,
2439 sidedata=None,
2442 sidedata=None,
2440 ):
2443 ):
2441 """internal function to add revisions to the log
2444 """internal function to add revisions to the log
2442
2445
2443 see addrevision for argument descriptions.
2446 see addrevision for argument descriptions.
2444
2447
2445 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2448 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2446
2449
2447 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2450 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2448 be used.
2451 be used.
2449
2452
2450 invariants:
2453 invariants:
2451 - rawtext is optional (can be None); if not set, cachedelta must be set.
2454 - rawtext is optional (can be None); if not set, cachedelta must be set.
2452 if both are set, they must correspond to each other.
2455 if both are set, they must correspond to each other.
2453 """
2456 """
2454 if node == self.nullid:
2457 if node == self.nullid:
2455 raise error.RevlogError(
2458 raise error.RevlogError(
2456 _(b"%s: attempt to add null revision") % self.display_id
2459 _(b"%s: attempt to add null revision") % self.display_id
2457 )
2460 )
2458 if (
2461 if (
2459 node == self.nodeconstants.wdirid
2462 node == self.nodeconstants.wdirid
2460 or node in self.nodeconstants.wdirfilenodeids
2463 or node in self.nodeconstants.wdirfilenodeids
2461 ):
2464 ):
2462 raise error.RevlogError(
2465 raise error.RevlogError(
2463 _(b"%s: attempt to add wdir revision") % self.display_id
2466 _(b"%s: attempt to add wdir revision") % self.display_id
2464 )
2467 )
2465 if self._writinghandles is None:
2468 if self._writinghandles is None:
2466 msg = b'adding revision outside `revlog._writing` context'
2469 msg = b'adding revision outside `revlog._writing` context'
2467 raise error.ProgrammingError(msg)
2470 raise error.ProgrammingError(msg)
2468
2471
2469 if self._inline:
2472 if self._inline:
2470 fh = self._writinghandles[0]
2473 fh = self._writinghandles[0]
2471 else:
2474 else:
2472 fh = self._writinghandles[1]
2475 fh = self._writinghandles[1]
2473
2476
2474 btext = [rawtext]
2477 btext = [rawtext]
2475
2478
2476 curr = len(self)
2479 curr = len(self)
2477 prev = curr - 1
2480 prev = curr - 1
2478
2481
2479 offset = self._get_data_offset(prev)
2482 offset = self._get_data_offset(prev)
2480
2483
2481 if self._concurrencychecker:
2484 if self._concurrencychecker:
2482 ifh, dfh = self._writinghandles
2485 ifh, dfh = self._writinghandles
2483 if self._inline:
2486 if self._inline:
2484 # offset is "as if" it were in the .d file, so we need to add on
2487 # offset is "as if" it were in the .d file, so we need to add on
2485 # the size of the entry metadata.
2488 # the size of the entry metadata.
2486 self._concurrencychecker(
2489 self._concurrencychecker(
2487 ifh, self._indexfile, offset + curr * self.index.entry_size
2490 ifh, self._indexfile, offset + curr * self.index.entry_size
2488 )
2491 )
2489 else:
2492 else:
2490 # Entries in the .i are a consistent size.
2493 # Entries in the .i are a consistent size.
2491 self._concurrencychecker(
2494 self._concurrencychecker(
2492 ifh, self._indexfile, curr * self.index.entry_size
2495 ifh, self._indexfile, curr * self.index.entry_size
2493 )
2496 )
2494 self._concurrencychecker(dfh, self._datafile, offset)
2497 self._concurrencychecker(dfh, self._datafile, offset)
2495
2498
2496 p1r, p2r = self.rev(p1), self.rev(p2)
2499 p1r, p2r = self.rev(p1), self.rev(p2)
2497
2500
2498 # full versions are inserted when the needed deltas
2501 # full versions are inserted when the needed deltas
2499 # become comparable to the uncompressed text
2502 # become comparable to the uncompressed text
2500 if rawtext is None:
2503 if rawtext is None:
2501 # need rawtext size, before changed by flag processors, which is
2504 # need rawtext size, before changed by flag processors, which is
2502 # the non-raw size. use revlog explicitly to avoid filelog's extra
2505 # the non-raw size. use revlog explicitly to avoid filelog's extra
2503 # logic that might remove metadata size.
2506 # logic that might remove metadata size.
2504 textlen = mdiff.patchedsize(
2507 textlen = mdiff.patchedsize(
2505 revlog.size(self, cachedelta[0]), cachedelta[1]
2508 revlog.size(self, cachedelta[0]), cachedelta[1]
2506 )
2509 )
2507 else:
2510 else:
2508 textlen = len(rawtext)
2511 textlen = len(rawtext)
2509
2512
2510 if deltacomputer is None:
2513 if deltacomputer is None:
2511 deltacomputer = deltautil.deltacomputer(self)
2514 deltacomputer = deltautil.deltacomputer(self)
2512
2515
2513 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2516 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2514
2517
2515 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2518 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2516
2519
2517 compression_mode = COMP_MODE_INLINE
2520 compression_mode = COMP_MODE_INLINE
2518 if self._docket is not None:
2521 if self._docket is not None:
2519 h, d = deltainfo.data
2522 h, d = deltainfo.data
2520 if not h and not d:
2523 if not h and not d:
2521 # not data to store at all... declare them uncompressed
2524 # not data to store at all... declare them uncompressed
2522 compression_mode = COMP_MODE_PLAIN
2525 compression_mode = COMP_MODE_PLAIN
2523 elif not h:
2526 elif not h:
2524 t = d[0:1]
2527 t = d[0:1]
2525 if t == b'\0':
2528 if t == b'\0':
2526 compression_mode = COMP_MODE_PLAIN
2529 compression_mode = COMP_MODE_PLAIN
2527 elif t == self._docket.default_compression_header:
2530 elif t == self._docket.default_compression_header:
2528 compression_mode = COMP_MODE_DEFAULT
2531 compression_mode = COMP_MODE_DEFAULT
2529 elif h == b'u':
2532 elif h == b'u':
2530 # we have a more efficient way to declare uncompressed
2533 # we have a more efficient way to declare uncompressed
2531 h = b''
2534 h = b''
2532 compression_mode = COMP_MODE_PLAIN
2535 compression_mode = COMP_MODE_PLAIN
2533 deltainfo = deltautil.drop_u_compression(deltainfo)
2536 deltainfo = deltautil.drop_u_compression(deltainfo)
2534
2537
2535 sidedata_compression_mode = COMP_MODE_INLINE
2538 sidedata_compression_mode = COMP_MODE_INLINE
2536 if sidedata and self.hassidedata:
2539 if sidedata and self.hassidedata:
2537 sidedata_compression_mode = COMP_MODE_PLAIN
2540 sidedata_compression_mode = COMP_MODE_PLAIN
2538 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2541 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2539 sidedata_offset = offset + deltainfo.deltalen
2542 sidedata_offset = offset + deltainfo.deltalen
2540 h, comp_sidedata = self.compress(serialized_sidedata)
2543 h, comp_sidedata = self.compress(serialized_sidedata)
2541 if (
2544 if (
2542 h != b'u'
2545 h != b'u'
2543 and comp_sidedata[0:1] != b'\0'
2546 and comp_sidedata[0:1] != b'\0'
2544 and len(comp_sidedata) < len(serialized_sidedata)
2547 and len(comp_sidedata) < len(serialized_sidedata)
2545 ):
2548 ):
2546 assert not h
2549 assert not h
2547 if (
2550 if (
2548 comp_sidedata[0:1]
2551 comp_sidedata[0:1]
2549 == self._docket.default_compression_header
2552 == self._docket.default_compression_header
2550 ):
2553 ):
2551 sidedata_compression_mode = COMP_MODE_DEFAULT
2554 sidedata_compression_mode = COMP_MODE_DEFAULT
2552 serialized_sidedata = comp_sidedata
2555 serialized_sidedata = comp_sidedata
2553 else:
2556 else:
2554 sidedata_compression_mode = COMP_MODE_INLINE
2557 sidedata_compression_mode = COMP_MODE_INLINE
2555 serialized_sidedata = comp_sidedata
2558 serialized_sidedata = comp_sidedata
2556 else:
2559 else:
2557 serialized_sidedata = b""
2560 serialized_sidedata = b""
2558 # Don't store the offset if the sidedata is empty, that way
2561 # Don't store the offset if the sidedata is empty, that way
2559 # we can easily detect empty sidedata and they will be no different
2562 # we can easily detect empty sidedata and they will be no different
2560 # than ones we manually add.
2563 # than ones we manually add.
2561 sidedata_offset = 0
2564 sidedata_offset = 0
2562
2565
2563 e = (
2566 e = (
2564 offset_type(offset, flags),
2567 offset_type(offset, flags),
2565 deltainfo.deltalen,
2568 deltainfo.deltalen,
2566 textlen,
2569 textlen,
2567 deltainfo.base,
2570 deltainfo.base,
2568 link,
2571 link,
2569 p1r,
2572 p1r,
2570 p2r,
2573 p2r,
2571 node,
2574 node,
2572 sidedata_offset,
2575 sidedata_offset,
2573 len(serialized_sidedata),
2576 len(serialized_sidedata),
2574 compression_mode,
2577 compression_mode,
2575 sidedata_compression_mode,
2578 sidedata_compression_mode,
2576 )
2579 )
2577
2580
2578 self.index.append(e)
2581 self.index.append(e)
2579 entry = self.index.entry_binary(curr)
2582 entry = self.index.entry_binary(curr)
2580 if curr == 0 and self._docket is None:
2583 if curr == 0 and self._docket is None:
2581 header = self._format_flags | self._format_version
2584 header = self._format_flags | self._format_version
2582 header = self.index.pack_header(header)
2585 header = self.index.pack_header(header)
2583 entry = header + entry
2586 entry = header + entry
2584 self._writeentry(
2587 self._writeentry(
2585 transaction,
2588 transaction,
2586 entry,
2589 entry,
2587 deltainfo.data,
2590 deltainfo.data,
2588 link,
2591 link,
2589 offset,
2592 offset,
2590 serialized_sidedata,
2593 serialized_sidedata,
2591 )
2594 )
2592
2595
2593 rawtext = btext[0]
2596 rawtext = btext[0]
2594
2597
2595 if alwayscache and rawtext is None:
2598 if alwayscache and rawtext is None:
2596 rawtext = deltacomputer.buildtext(revinfo, fh)
2599 rawtext = deltacomputer.buildtext(revinfo, fh)
2597
2600
2598 if type(rawtext) == bytes: # only accept immutable objects
2601 if type(rawtext) == bytes: # only accept immutable objects
2599 self._revisioncache = (node, curr, rawtext)
2602 self._revisioncache = (node, curr, rawtext)
2600 self._chainbasecache[curr] = deltainfo.chainbase
2603 self._chainbasecache[curr] = deltainfo.chainbase
2601 return curr
2604 return curr
2602
2605
2603 def _get_data_offset(self, prev):
2606 def _get_data_offset(self, prev):
2604 """Returns the current offset in the (in-transaction) data file.
2607 """Returns the current offset in the (in-transaction) data file.
2605 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2608 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2606 file to store that information: since sidedata can be rewritten to the
2609 file to store that information: since sidedata can be rewritten to the
2607 end of the data file within a transaction, you can have cases where, for
2610 end of the data file within a transaction, you can have cases where, for
2608 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2611 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2609 to `n - 1`'s sidedata being written after `n`'s data.
2612 to `n - 1`'s sidedata being written after `n`'s data.
2610
2613
2611 TODO cache this in a docket file before getting out of experimental."""
2614 TODO cache this in a docket file before getting out of experimental."""
2612 if self._docket is None:
2615 if self._docket is None:
2613 return self.end(prev)
2616 return self.end(prev)
2614 else:
2617 else:
2615 return self._docket.data_end
2618 return self._docket.data_end
2616
2619
2617 def _writeentry(self, transaction, entry, data, link, offset, sidedata):
2620 def _writeentry(self, transaction, entry, data, link, offset, sidedata):
2618 # Files opened in a+ mode have inconsistent behavior on various
2621 # Files opened in a+ mode have inconsistent behavior on various
2619 # platforms. Windows requires that a file positioning call be made
2622 # platforms. Windows requires that a file positioning call be made
2620 # when the file handle transitions between reads and writes. See
2623 # when the file handle transitions between reads and writes. See
2621 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2624 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2622 # platforms, Python or the platform itself can be buggy. Some versions
2625 # platforms, Python or the platform itself can be buggy. Some versions
2623 # of Solaris have been observed to not append at the end of the file
2626 # of Solaris have been observed to not append at the end of the file
2624 # if the file was seeked to before the end. See issue4943 for more.
2627 # if the file was seeked to before the end. See issue4943 for more.
2625 #
2628 #
2626 # We work around this issue by inserting a seek() before writing.
2629 # We work around this issue by inserting a seek() before writing.
2627 # Note: This is likely not necessary on Python 3. However, because
2630 # Note: This is likely not necessary on Python 3. However, because
2628 # the file handle is reused for reads and may be seeked there, we need
2631 # the file handle is reused for reads and may be seeked there, we need
2629 # to be careful before changing this.
2632 # to be careful before changing this.
2630 if self._writinghandles is None:
2633 if self._writinghandles is None:
2631 msg = b'adding revision outside `revlog._writing` context'
2634 msg = b'adding revision outside `revlog._writing` context'
2632 raise error.ProgrammingError(msg)
2635 raise error.ProgrammingError(msg)
2633 ifh, dfh = self._writinghandles
2636 ifh, dfh = self._writinghandles
2634 if self._docket is None:
2637 if self._docket is None:
2635 ifh.seek(0, os.SEEK_END)
2638 ifh.seek(0, os.SEEK_END)
2636 else:
2639 else:
2637 ifh.seek(self._docket.index_end, os.SEEK_SET)
2640 ifh.seek(self._docket.index_end, os.SEEK_SET)
2638 if dfh:
2641 if dfh:
2639 if self._docket is None:
2642 if self._docket is None:
2640 dfh.seek(0, os.SEEK_END)
2643 dfh.seek(0, os.SEEK_END)
2641 else:
2644 else:
2642 dfh.seek(self._docket.data_end, os.SEEK_SET)
2645 dfh.seek(self._docket.data_end, os.SEEK_SET)
2643
2646
2644 curr = len(self) - 1
2647 curr = len(self) - 1
2645 if not self._inline:
2648 if not self._inline:
2646 transaction.add(self._datafile, offset)
2649 transaction.add(self._datafile, offset)
2647 transaction.add(self._indexfile, curr * len(entry))
2650 transaction.add(self._indexfile, curr * len(entry))
2648 if data[0]:
2651 if data[0]:
2649 dfh.write(data[0])
2652 dfh.write(data[0])
2650 dfh.write(data[1])
2653 dfh.write(data[1])
2651 if sidedata:
2654 if sidedata:
2652 dfh.write(sidedata)
2655 dfh.write(sidedata)
2653 ifh.write(entry)
2656 ifh.write(entry)
2654 else:
2657 else:
2655 offset += curr * self.index.entry_size
2658 offset += curr * self.index.entry_size
2656 transaction.add(self._indexfile, offset)
2659 transaction.add(self._indexfile, offset)
2657 ifh.write(entry)
2660 ifh.write(entry)
2658 ifh.write(data[0])
2661 ifh.write(data[0])
2659 ifh.write(data[1])
2662 ifh.write(data[1])
2660 if sidedata:
2663 if sidedata:
2661 ifh.write(sidedata)
2664 ifh.write(sidedata)
2662 self._enforceinlinesize(transaction)
2665 self._enforceinlinesize(transaction)
2663 if self._docket is not None:
2666 if self._docket is not None:
2664 self._docket.index_end = self._writinghandles[0].tell()
2667 self._docket.index_end = self._writinghandles[0].tell()
2665 self._docket.data_end = self._writinghandles[1].tell()
2668 self._docket.data_end = self._writinghandles[1].tell()
2666
2669
2667 nodemaputil.setup_persistent_nodemap(transaction, self)
2670 nodemaputil.setup_persistent_nodemap(transaction, self)
2668
2671
2669 def addgroup(
2672 def addgroup(
2670 self,
2673 self,
2671 deltas,
2674 deltas,
2672 linkmapper,
2675 linkmapper,
2673 transaction,
2676 transaction,
2674 alwayscache=False,
2677 alwayscache=False,
2675 addrevisioncb=None,
2678 addrevisioncb=None,
2676 duplicaterevisioncb=None,
2679 duplicaterevisioncb=None,
2677 ):
2680 ):
2678 """
2681 """
2679 add a delta group
2682 add a delta group
2680
2683
2681 given a set of deltas, add them to the revision log. the
2684 given a set of deltas, add them to the revision log. the
2682 first delta is against its parent, which should be in our
2685 first delta is against its parent, which should be in our
2683 log, the rest are against the previous delta.
2686 log, the rest are against the previous delta.
2684
2687
2685 If ``addrevisioncb`` is defined, it will be called with arguments of
2688 If ``addrevisioncb`` is defined, it will be called with arguments of
2686 this revlog and the node that was added.
2689 this revlog and the node that was added.
2687 """
2690 """
2688
2691
2689 if self._adding_group:
2692 if self._adding_group:
2690 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2693 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2691
2694
2692 self._adding_group = True
2695 self._adding_group = True
2693 empty = True
2696 empty = True
2694 try:
2697 try:
2695 with self._writing(transaction):
2698 with self._writing(transaction):
2696 deltacomputer = deltautil.deltacomputer(self)
2699 deltacomputer = deltautil.deltacomputer(self)
2697 # loop through our set of deltas
2700 # loop through our set of deltas
2698 for data in deltas:
2701 for data in deltas:
2699 (
2702 (
2700 node,
2703 node,
2701 p1,
2704 p1,
2702 p2,
2705 p2,
2703 linknode,
2706 linknode,
2704 deltabase,
2707 deltabase,
2705 delta,
2708 delta,
2706 flags,
2709 flags,
2707 sidedata,
2710 sidedata,
2708 ) = data
2711 ) = data
2709 link = linkmapper(linknode)
2712 link = linkmapper(linknode)
2710 flags = flags or REVIDX_DEFAULT_FLAGS
2713 flags = flags or REVIDX_DEFAULT_FLAGS
2711
2714
2712 rev = self.index.get_rev(node)
2715 rev = self.index.get_rev(node)
2713 if rev is not None:
2716 if rev is not None:
2714 # this can happen if two branches make the same change
2717 # this can happen if two branches make the same change
2715 self._nodeduplicatecallback(transaction, rev)
2718 self._nodeduplicatecallback(transaction, rev)
2716 if duplicaterevisioncb:
2719 if duplicaterevisioncb:
2717 duplicaterevisioncb(self, rev)
2720 duplicaterevisioncb(self, rev)
2718 empty = False
2721 empty = False
2719 continue
2722 continue
2720
2723
2721 for p in (p1, p2):
2724 for p in (p1, p2):
2722 if not self.index.has_node(p):
2725 if not self.index.has_node(p):
2723 raise error.LookupError(
2726 raise error.LookupError(
2724 p, self.radix, _(b'unknown parent')
2727 p, self.radix, _(b'unknown parent')
2725 )
2728 )
2726
2729
2727 if not self.index.has_node(deltabase):
2730 if not self.index.has_node(deltabase):
2728 raise error.LookupError(
2731 raise error.LookupError(
2729 deltabase, self.display_id, _(b'unknown delta base')
2732 deltabase, self.display_id, _(b'unknown delta base')
2730 )
2733 )
2731
2734
2732 baserev = self.rev(deltabase)
2735 baserev = self.rev(deltabase)
2733
2736
2734 if baserev != nullrev and self.iscensored(baserev):
2737 if baserev != nullrev and self.iscensored(baserev):
2735 # if base is censored, delta must be full replacement in a
2738 # if base is censored, delta must be full replacement in a
2736 # single patch operation
2739 # single patch operation
2737 hlen = struct.calcsize(b">lll")
2740 hlen = struct.calcsize(b">lll")
2738 oldlen = self.rawsize(baserev)
2741 oldlen = self.rawsize(baserev)
2739 newlen = len(delta) - hlen
2742 newlen = len(delta) - hlen
2740 if delta[:hlen] != mdiff.replacediffheader(
2743 if delta[:hlen] != mdiff.replacediffheader(
2741 oldlen, newlen
2744 oldlen, newlen
2742 ):
2745 ):
2743 raise error.CensoredBaseError(
2746 raise error.CensoredBaseError(
2744 self.display_id, self.node(baserev)
2747 self.display_id, self.node(baserev)
2745 )
2748 )
2746
2749
2747 if not flags and self._peek_iscensored(baserev, delta):
2750 if not flags and self._peek_iscensored(baserev, delta):
2748 flags |= REVIDX_ISCENSORED
2751 flags |= REVIDX_ISCENSORED
2749
2752
2750 # We assume consumers of addrevisioncb will want to retrieve
2753 # We assume consumers of addrevisioncb will want to retrieve
2751 # the added revision, which will require a call to
2754 # the added revision, which will require a call to
2752 # revision(). revision() will fast path if there is a cache
2755 # revision(). revision() will fast path if there is a cache
2753 # hit. So, we tell _addrevision() to always cache in this case.
2756 # hit. So, we tell _addrevision() to always cache in this case.
2754 # We're only using addgroup() in the context of changegroup
2757 # We're only using addgroup() in the context of changegroup
2755 # generation so the revision data can always be handled as raw
2758 # generation so the revision data can always be handled as raw
2756 # by the flagprocessor.
2759 # by the flagprocessor.
2757 rev = self._addrevision(
2760 rev = self._addrevision(
2758 node,
2761 node,
2759 None,
2762 None,
2760 transaction,
2763 transaction,
2761 link,
2764 link,
2762 p1,
2765 p1,
2763 p2,
2766 p2,
2764 flags,
2767 flags,
2765 (baserev, delta),
2768 (baserev, delta),
2766 alwayscache=alwayscache,
2769 alwayscache=alwayscache,
2767 deltacomputer=deltacomputer,
2770 deltacomputer=deltacomputer,
2768 sidedata=sidedata,
2771 sidedata=sidedata,
2769 )
2772 )
2770
2773
2771 if addrevisioncb:
2774 if addrevisioncb:
2772 addrevisioncb(self, rev)
2775 addrevisioncb(self, rev)
2773 empty = False
2776 empty = False
2774 finally:
2777 finally:
2775 self._adding_group = False
2778 self._adding_group = False
2776 return not empty
2779 return not empty
2777
2780
2778 def iscensored(self, rev):
2781 def iscensored(self, rev):
2779 """Check if a file revision is censored."""
2782 """Check if a file revision is censored."""
2780 if not self._censorable:
2783 if not self._censorable:
2781 return False
2784 return False
2782
2785
2783 return self.flags(rev) & REVIDX_ISCENSORED
2786 return self.flags(rev) & REVIDX_ISCENSORED
2784
2787
2785 def _peek_iscensored(self, baserev, delta):
2788 def _peek_iscensored(self, baserev, delta):
2786 """Quickly check if a delta produces a censored revision."""
2789 """Quickly check if a delta produces a censored revision."""
2787 if not self._censorable:
2790 if not self._censorable:
2788 return False
2791 return False
2789
2792
2790 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2793 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2791
2794
2792 def getstrippoint(self, minlink):
2795 def getstrippoint(self, minlink):
2793 """find the minimum rev that must be stripped to strip the linkrev
2796 """find the minimum rev that must be stripped to strip the linkrev
2794
2797
2795 Returns a tuple containing the minimum rev and a set of all revs that
2798 Returns a tuple containing the minimum rev and a set of all revs that
2796 have linkrevs that will be broken by this strip.
2799 have linkrevs that will be broken by this strip.
2797 """
2800 """
2798 return storageutil.resolvestripinfo(
2801 return storageutil.resolvestripinfo(
2799 minlink,
2802 minlink,
2800 len(self) - 1,
2803 len(self) - 1,
2801 self.headrevs(),
2804 self.headrevs(),
2802 self.linkrev,
2805 self.linkrev,
2803 self.parentrevs,
2806 self.parentrevs,
2804 )
2807 )
2805
2808
2806 def strip(self, minlink, transaction):
2809 def strip(self, minlink, transaction):
2807 """truncate the revlog on the first revision with a linkrev >= minlink
2810 """truncate the revlog on the first revision with a linkrev >= minlink
2808
2811
2809 This function is called when we're stripping revision minlink and
2812 This function is called when we're stripping revision minlink and
2810 its descendants from the repository.
2813 its descendants from the repository.
2811
2814
2812 We have to remove all revisions with linkrev >= minlink, because
2815 We have to remove all revisions with linkrev >= minlink, because
2813 the equivalent changelog revisions will be renumbered after the
2816 the equivalent changelog revisions will be renumbered after the
2814 strip.
2817 strip.
2815
2818
2816 So we truncate the revlog on the first of these revisions, and
2819 So we truncate the revlog on the first of these revisions, and
2817 trust that the caller has saved the revisions that shouldn't be
2820 trust that the caller has saved the revisions that shouldn't be
2818 removed and that it'll re-add them after this truncation.
2821 removed and that it'll re-add them after this truncation.
2819 """
2822 """
2820 if len(self) == 0:
2823 if len(self) == 0:
2821 return
2824 return
2822
2825
2823 rev, _ = self.getstrippoint(minlink)
2826 rev, _ = self.getstrippoint(minlink)
2824 if rev == len(self):
2827 if rev == len(self):
2825 return
2828 return
2826
2829
2827 # first truncate the files on disk
2830 # first truncate the files on disk
2828 data_end = self.start(rev)
2831 data_end = self.start(rev)
2829 if not self._inline:
2832 if not self._inline:
2830 transaction.add(self._datafile, data_end)
2833 transaction.add(self._datafile, data_end)
2831 end = rev * self.index.entry_size
2834 end = rev * self.index.entry_size
2832 else:
2835 else:
2833 end = data_end + (rev * self.index.entry_size)
2836 end = data_end + (rev * self.index.entry_size)
2834
2837
2835 transaction.add(self._indexfile, end)
2838 transaction.add(self._indexfile, end)
2836 if self._docket is not None:
2839 if self._docket is not None:
2837 # XXX we could, leverage the docket while stripping. However it is
2840 # XXX we could, leverage the docket while stripping. However it is
2838 # not powerfull enough at the time of this comment
2841 # not powerfull enough at the time of this comment
2839 self._docket.index_end = end
2842 self._docket.index_end = end
2840 self._docket.data_end = data_end
2843 self._docket.data_end = data_end
2841 self._docket.write(transaction, stripping=True)
2844 self._docket.write(transaction, stripping=True)
2842
2845
2843 # then reset internal state in memory to forget those revisions
2846 # then reset internal state in memory to forget those revisions
2844 self._revisioncache = None
2847 self._revisioncache = None
2845 self._chaininfocache = util.lrucachedict(500)
2848 self._chaininfocache = util.lrucachedict(500)
2846 self._chunkclear()
2849 self._chunkclear()
2847
2850
2848 del self.index[rev:-1]
2851 del self.index[rev:-1]
2849
2852
2850 def checksize(self):
2853 def checksize(self):
2851 """Check size of index and data files
2854 """Check size of index and data files
2852
2855
2853 return a (dd, di) tuple.
2856 return a (dd, di) tuple.
2854 - dd: extra bytes for the "data" file
2857 - dd: extra bytes for the "data" file
2855 - di: extra bytes for the "index" file
2858 - di: extra bytes for the "index" file
2856
2859
2857 A healthy revlog will return (0, 0).
2860 A healthy revlog will return (0, 0).
2858 """
2861 """
2859 expected = 0
2862 expected = 0
2860 if len(self):
2863 if len(self):
2861 expected = max(0, self.end(len(self) - 1))
2864 expected = max(0, self.end(len(self) - 1))
2862
2865
2863 try:
2866 try:
2864 with self._datafp() as f:
2867 with self._datafp() as f:
2865 f.seek(0, io.SEEK_END)
2868 f.seek(0, io.SEEK_END)
2866 actual = f.tell()
2869 actual = f.tell()
2867 dd = actual - expected
2870 dd = actual - expected
2868 except IOError as inst:
2871 except IOError as inst:
2869 if inst.errno != errno.ENOENT:
2872 if inst.errno != errno.ENOENT:
2870 raise
2873 raise
2871 dd = 0
2874 dd = 0
2872
2875
2873 try:
2876 try:
2874 f = self.opener(self._indexfile)
2877 f = self.opener(self._indexfile)
2875 f.seek(0, io.SEEK_END)
2878 f.seek(0, io.SEEK_END)
2876 actual = f.tell()
2879 actual = f.tell()
2877 f.close()
2880 f.close()
2878 s = self.index.entry_size
2881 s = self.index.entry_size
2879 i = max(0, actual // s)
2882 i = max(0, actual // s)
2880 di = actual - (i * s)
2883 di = actual - (i * s)
2881 if self._inline:
2884 if self._inline:
2882 databytes = 0
2885 databytes = 0
2883 for r in self:
2886 for r in self:
2884 databytes += max(0, self.length(r))
2887 databytes += max(0, self.length(r))
2885 dd = 0
2888 dd = 0
2886 di = actual - len(self) * s - databytes
2889 di = actual - len(self) * s - databytes
2887 except IOError as inst:
2890 except IOError as inst:
2888 if inst.errno != errno.ENOENT:
2891 if inst.errno != errno.ENOENT:
2889 raise
2892 raise
2890 di = 0
2893 di = 0
2891
2894
2892 return (dd, di)
2895 return (dd, di)
2893
2896
2894 def files(self):
2897 def files(self):
2895 res = [self._indexfile]
2898 res = [self._indexfile]
2896 if not self._inline:
2899 if not self._inline:
2897 res.append(self._datafile)
2900 res.append(self._datafile)
2898 return res
2901 return res
2899
2902
2900 def emitrevisions(
2903 def emitrevisions(
2901 self,
2904 self,
2902 nodes,
2905 nodes,
2903 nodesorder=None,
2906 nodesorder=None,
2904 revisiondata=False,
2907 revisiondata=False,
2905 assumehaveparentrevisions=False,
2908 assumehaveparentrevisions=False,
2906 deltamode=repository.CG_DELTAMODE_STD,
2909 deltamode=repository.CG_DELTAMODE_STD,
2907 sidedata_helpers=None,
2910 sidedata_helpers=None,
2908 ):
2911 ):
2909 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2912 if nodesorder not in (b'nodes', b'storage', b'linear', None):
2910 raise error.ProgrammingError(
2913 raise error.ProgrammingError(
2911 b'unhandled value for nodesorder: %s' % nodesorder
2914 b'unhandled value for nodesorder: %s' % nodesorder
2912 )
2915 )
2913
2916
2914 if nodesorder is None and not self._generaldelta:
2917 if nodesorder is None and not self._generaldelta:
2915 nodesorder = b'storage'
2918 nodesorder = b'storage'
2916
2919
2917 if (
2920 if (
2918 not self._storedeltachains
2921 not self._storedeltachains
2919 and deltamode != repository.CG_DELTAMODE_PREV
2922 and deltamode != repository.CG_DELTAMODE_PREV
2920 ):
2923 ):
2921 deltamode = repository.CG_DELTAMODE_FULL
2924 deltamode = repository.CG_DELTAMODE_FULL
2922
2925
2923 return storageutil.emitrevisions(
2926 return storageutil.emitrevisions(
2924 self,
2927 self,
2925 nodes,
2928 nodes,
2926 nodesorder,
2929 nodesorder,
2927 revlogrevisiondelta,
2930 revlogrevisiondelta,
2928 deltaparentfn=self.deltaparent,
2931 deltaparentfn=self.deltaparent,
2929 candeltafn=self.candelta,
2932 candeltafn=self.candelta,
2930 rawsizefn=self.rawsize,
2933 rawsizefn=self.rawsize,
2931 revdifffn=self.revdiff,
2934 revdifffn=self.revdiff,
2932 flagsfn=self.flags,
2935 flagsfn=self.flags,
2933 deltamode=deltamode,
2936 deltamode=deltamode,
2934 revisiondata=revisiondata,
2937 revisiondata=revisiondata,
2935 assumehaveparentrevisions=assumehaveparentrevisions,
2938 assumehaveparentrevisions=assumehaveparentrevisions,
2936 sidedata_helpers=sidedata_helpers,
2939 sidedata_helpers=sidedata_helpers,
2937 )
2940 )
2938
2941
2939 DELTAREUSEALWAYS = b'always'
2942 DELTAREUSEALWAYS = b'always'
2940 DELTAREUSESAMEREVS = b'samerevs'
2943 DELTAREUSESAMEREVS = b'samerevs'
2941 DELTAREUSENEVER = b'never'
2944 DELTAREUSENEVER = b'never'
2942
2945
2943 DELTAREUSEFULLADD = b'fulladd'
2946 DELTAREUSEFULLADD = b'fulladd'
2944
2947
2945 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2948 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
2946
2949
2947 def clone(
2950 def clone(
2948 self,
2951 self,
2949 tr,
2952 tr,
2950 destrevlog,
2953 destrevlog,
2951 addrevisioncb=None,
2954 addrevisioncb=None,
2952 deltareuse=DELTAREUSESAMEREVS,
2955 deltareuse=DELTAREUSESAMEREVS,
2953 forcedeltabothparents=None,
2956 forcedeltabothparents=None,
2954 sidedata_helpers=None,
2957 sidedata_helpers=None,
2955 ):
2958 ):
2956 """Copy this revlog to another, possibly with format changes.
2959 """Copy this revlog to another, possibly with format changes.
2957
2960
2958 The destination revlog will contain the same revisions and nodes.
2961 The destination revlog will contain the same revisions and nodes.
2959 However, it may not be bit-for-bit identical due to e.g. delta encoding
2962 However, it may not be bit-for-bit identical due to e.g. delta encoding
2960 differences.
2963 differences.
2961
2964
2962 The ``deltareuse`` argument control how deltas from the existing revlog
2965 The ``deltareuse`` argument control how deltas from the existing revlog
2963 are preserved in the destination revlog. The argument can have the
2966 are preserved in the destination revlog. The argument can have the
2964 following values:
2967 following values:
2965
2968
2966 DELTAREUSEALWAYS
2969 DELTAREUSEALWAYS
2967 Deltas will always be reused (if possible), even if the destination
2970 Deltas will always be reused (if possible), even if the destination
2968 revlog would not select the same revisions for the delta. This is the
2971 revlog would not select the same revisions for the delta. This is the
2969 fastest mode of operation.
2972 fastest mode of operation.
2970 DELTAREUSESAMEREVS
2973 DELTAREUSESAMEREVS
2971 Deltas will be reused if the destination revlog would pick the same
2974 Deltas will be reused if the destination revlog would pick the same
2972 revisions for the delta. This mode strikes a balance between speed
2975 revisions for the delta. This mode strikes a balance between speed
2973 and optimization.
2976 and optimization.
2974 DELTAREUSENEVER
2977 DELTAREUSENEVER
2975 Deltas will never be reused. This is the slowest mode of execution.
2978 Deltas will never be reused. This is the slowest mode of execution.
2976 This mode can be used to recompute deltas (e.g. if the diff/delta
2979 This mode can be used to recompute deltas (e.g. if the diff/delta
2977 algorithm changes).
2980 algorithm changes).
2978 DELTAREUSEFULLADD
2981 DELTAREUSEFULLADD
2979 Revision will be re-added as if their were new content. This is
2982 Revision will be re-added as if their were new content. This is
2980 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2983 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2981 eg: large file detection and handling.
2984 eg: large file detection and handling.
2982
2985
2983 Delta computation can be slow, so the choice of delta reuse policy can
2986 Delta computation can be slow, so the choice of delta reuse policy can
2984 significantly affect run time.
2987 significantly affect run time.
2985
2988
2986 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2989 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2987 two extremes. Deltas will be reused if they are appropriate. But if the
2990 two extremes. Deltas will be reused if they are appropriate. But if the
2988 delta could choose a better revision, it will do so. This means if you
2991 delta could choose a better revision, it will do so. This means if you
2989 are converting a non-generaldelta revlog to a generaldelta revlog,
2992 are converting a non-generaldelta revlog to a generaldelta revlog,
2990 deltas will be recomputed if the delta's parent isn't a parent of the
2993 deltas will be recomputed if the delta's parent isn't a parent of the
2991 revision.
2994 revision.
2992
2995
2993 In addition to the delta policy, the ``forcedeltabothparents``
2996 In addition to the delta policy, the ``forcedeltabothparents``
2994 argument controls whether to force compute deltas against both parents
2997 argument controls whether to force compute deltas against both parents
2995 for merges. By default, the current default is used.
2998 for merges. By default, the current default is used.
2996
2999
2997 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
3000 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
2998 `sidedata_helpers`.
3001 `sidedata_helpers`.
2999 """
3002 """
3000 if deltareuse not in self.DELTAREUSEALL:
3003 if deltareuse not in self.DELTAREUSEALL:
3001 raise ValueError(
3004 raise ValueError(
3002 _(b'value for deltareuse invalid: %s') % deltareuse
3005 _(b'value for deltareuse invalid: %s') % deltareuse
3003 )
3006 )
3004
3007
3005 if len(destrevlog):
3008 if len(destrevlog):
3006 raise ValueError(_(b'destination revlog is not empty'))
3009 raise ValueError(_(b'destination revlog is not empty'))
3007
3010
3008 if getattr(self, 'filteredrevs', None):
3011 if getattr(self, 'filteredrevs', None):
3009 raise ValueError(_(b'source revlog has filtered revisions'))
3012 raise ValueError(_(b'source revlog has filtered revisions'))
3010 if getattr(destrevlog, 'filteredrevs', None):
3013 if getattr(destrevlog, 'filteredrevs', None):
3011 raise ValueError(_(b'destination revlog has filtered revisions'))
3014 raise ValueError(_(b'destination revlog has filtered revisions'))
3012
3015
3013 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
3016 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
3014 # if possible.
3017 # if possible.
3015 oldlazydelta = destrevlog._lazydelta
3018 oldlazydelta = destrevlog._lazydelta
3016 oldlazydeltabase = destrevlog._lazydeltabase
3019 oldlazydeltabase = destrevlog._lazydeltabase
3017 oldamd = destrevlog._deltabothparents
3020 oldamd = destrevlog._deltabothparents
3018
3021
3019 try:
3022 try:
3020 if deltareuse == self.DELTAREUSEALWAYS:
3023 if deltareuse == self.DELTAREUSEALWAYS:
3021 destrevlog._lazydeltabase = True
3024 destrevlog._lazydeltabase = True
3022 destrevlog._lazydelta = True
3025 destrevlog._lazydelta = True
3023 elif deltareuse == self.DELTAREUSESAMEREVS:
3026 elif deltareuse == self.DELTAREUSESAMEREVS:
3024 destrevlog._lazydeltabase = False
3027 destrevlog._lazydeltabase = False
3025 destrevlog._lazydelta = True
3028 destrevlog._lazydelta = True
3026 elif deltareuse == self.DELTAREUSENEVER:
3029 elif deltareuse == self.DELTAREUSENEVER:
3027 destrevlog._lazydeltabase = False
3030 destrevlog._lazydeltabase = False
3028 destrevlog._lazydelta = False
3031 destrevlog._lazydelta = False
3029
3032
3030 destrevlog._deltabothparents = forcedeltabothparents or oldamd
3033 destrevlog._deltabothparents = forcedeltabothparents or oldamd
3031
3034
3032 self._clone(
3035 self._clone(
3033 tr,
3036 tr,
3034 destrevlog,
3037 destrevlog,
3035 addrevisioncb,
3038 addrevisioncb,
3036 deltareuse,
3039 deltareuse,
3037 forcedeltabothparents,
3040 forcedeltabothparents,
3038 sidedata_helpers,
3041 sidedata_helpers,
3039 )
3042 )
3040
3043
3041 finally:
3044 finally:
3042 destrevlog._lazydelta = oldlazydelta
3045 destrevlog._lazydelta = oldlazydelta
3043 destrevlog._lazydeltabase = oldlazydeltabase
3046 destrevlog._lazydeltabase = oldlazydeltabase
3044 destrevlog._deltabothparents = oldamd
3047 destrevlog._deltabothparents = oldamd
3045
3048
3046 def _clone(
3049 def _clone(
3047 self,
3050 self,
3048 tr,
3051 tr,
3049 destrevlog,
3052 destrevlog,
3050 addrevisioncb,
3053 addrevisioncb,
3051 deltareuse,
3054 deltareuse,
3052 forcedeltabothparents,
3055 forcedeltabothparents,
3053 sidedata_helpers,
3056 sidedata_helpers,
3054 ):
3057 ):
3055 """perform the core duty of `revlog.clone` after parameter processing"""
3058 """perform the core duty of `revlog.clone` after parameter processing"""
3056 deltacomputer = deltautil.deltacomputer(destrevlog)
3059 deltacomputer = deltautil.deltacomputer(destrevlog)
3057 index = self.index
3060 index = self.index
3058 for rev in self:
3061 for rev in self:
3059 entry = index[rev]
3062 entry = index[rev]
3060
3063
3061 # Some classes override linkrev to take filtered revs into
3064 # Some classes override linkrev to take filtered revs into
3062 # account. Use raw entry from index.
3065 # account. Use raw entry from index.
3063 flags = entry[0] & 0xFFFF
3066 flags = entry[0] & 0xFFFF
3064 linkrev = entry[4]
3067 linkrev = entry[4]
3065 p1 = index[entry[5]][7]
3068 p1 = index[entry[5]][7]
3066 p2 = index[entry[6]][7]
3069 p2 = index[entry[6]][7]
3067 node = entry[7]
3070 node = entry[7]
3068
3071
3069 # (Possibly) reuse the delta from the revlog if allowed and
3072 # (Possibly) reuse the delta from the revlog if allowed and
3070 # the revlog chunk is a delta.
3073 # the revlog chunk is a delta.
3071 cachedelta = None
3074 cachedelta = None
3072 rawtext = None
3075 rawtext = None
3073 if deltareuse == self.DELTAREUSEFULLADD:
3076 if deltareuse == self.DELTAREUSEFULLADD:
3074 text, sidedata = self._revisiondata(rev)
3077 text, sidedata = self._revisiondata(rev)
3075
3078
3076 if sidedata_helpers is not None:
3079 if sidedata_helpers is not None:
3077 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3080 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3078 self, sidedata_helpers, sidedata, rev
3081 self, sidedata_helpers, sidedata, rev
3079 )
3082 )
3080 flags = flags | new_flags[0] & ~new_flags[1]
3083 flags = flags | new_flags[0] & ~new_flags[1]
3081
3084
3082 destrevlog.addrevision(
3085 destrevlog.addrevision(
3083 text,
3086 text,
3084 tr,
3087 tr,
3085 linkrev,
3088 linkrev,
3086 p1,
3089 p1,
3087 p2,
3090 p2,
3088 cachedelta=cachedelta,
3091 cachedelta=cachedelta,
3089 node=node,
3092 node=node,
3090 flags=flags,
3093 flags=flags,
3091 deltacomputer=deltacomputer,
3094 deltacomputer=deltacomputer,
3092 sidedata=sidedata,
3095 sidedata=sidedata,
3093 )
3096 )
3094 else:
3097 else:
3095 if destrevlog._lazydelta:
3098 if destrevlog._lazydelta:
3096 dp = self.deltaparent(rev)
3099 dp = self.deltaparent(rev)
3097 if dp != nullrev:
3100 if dp != nullrev:
3098 cachedelta = (dp, bytes(self._chunk(rev)))
3101 cachedelta = (dp, bytes(self._chunk(rev)))
3099
3102
3100 sidedata = None
3103 sidedata = None
3101 if not cachedelta:
3104 if not cachedelta:
3102 rawtext, sidedata = self._revisiondata(rev)
3105 rawtext, sidedata = self._revisiondata(rev)
3103 if sidedata is None:
3106 if sidedata is None:
3104 sidedata = self.sidedata(rev)
3107 sidedata = self.sidedata(rev)
3105
3108
3106 if sidedata_helpers is not None:
3109 if sidedata_helpers is not None:
3107 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3110 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3108 self, sidedata_helpers, sidedata, rev
3111 self, sidedata_helpers, sidedata, rev
3109 )
3112 )
3110 flags = flags | new_flags[0] & ~new_flags[1]
3113 flags = flags | new_flags[0] & ~new_flags[1]
3111
3114
3112 with destrevlog._writing(tr):
3115 with destrevlog._writing(tr):
3113 destrevlog._addrevision(
3116 destrevlog._addrevision(
3114 node,
3117 node,
3115 rawtext,
3118 rawtext,
3116 tr,
3119 tr,
3117 linkrev,
3120 linkrev,
3118 p1,
3121 p1,
3119 p2,
3122 p2,
3120 flags,
3123 flags,
3121 cachedelta,
3124 cachedelta,
3122 deltacomputer=deltacomputer,
3125 deltacomputer=deltacomputer,
3123 sidedata=sidedata,
3126 sidedata=sidedata,
3124 )
3127 )
3125
3128
3126 if addrevisioncb:
3129 if addrevisioncb:
3127 addrevisioncb(self, rev, node)
3130 addrevisioncb(self, rev, node)
3128
3131
3129 def censorrevision(self, tr, censornode, tombstone=b''):
3132 def censorrevision(self, tr, censornode, tombstone=b''):
3130 if self._format_version == REVLOGV0:
3133 if self._format_version == REVLOGV0:
3131 raise error.RevlogError(
3134 raise error.RevlogError(
3132 _(b'cannot censor with version %d revlogs')
3135 _(b'cannot censor with version %d revlogs')
3133 % self._format_version
3136 % self._format_version
3134 )
3137 )
3135
3138
3136 censorrev = self.rev(censornode)
3139 censorrev = self.rev(censornode)
3137 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
3140 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
3138
3141
3139 if len(tombstone) > self.rawsize(censorrev):
3142 if len(tombstone) > self.rawsize(censorrev):
3140 raise error.Abort(
3143 raise error.Abort(
3141 _(b'censor tombstone must be no longer than censored data')
3144 _(b'censor tombstone must be no longer than censored data')
3142 )
3145 )
3143
3146
3144 # Rewriting the revlog in place is hard. Our strategy for censoring is
3147 # Rewriting the revlog in place is hard. Our strategy for censoring is
3145 # to create a new revlog, copy all revisions to it, then replace the
3148 # to create a new revlog, copy all revisions to it, then replace the
3146 # revlogs on transaction close.
3149 # revlogs on transaction close.
3147 #
3150 #
3148 # This is a bit dangerous. We could easily have a mismatch of state.
3151 # This is a bit dangerous. We could easily have a mismatch of state.
3149 newrl = revlog(
3152 newrl = revlog(
3150 self.opener,
3153 self.opener,
3151 target=self.target,
3154 target=self.target,
3152 radix=self.radix,
3155 radix=self.radix,
3153 postfix=b'tmpcensored',
3156 postfix=b'tmpcensored',
3154 censorable=True,
3157 censorable=True,
3155 )
3158 )
3156 newrl._format_version = self._format_version
3159 newrl._format_version = self._format_version
3157 newrl._format_flags = self._format_flags
3160 newrl._format_flags = self._format_flags
3158 newrl._generaldelta = self._generaldelta
3161 newrl._generaldelta = self._generaldelta
3159 newrl._parse_index = self._parse_index
3162 newrl._parse_index = self._parse_index
3160
3163
3161 for rev in self.revs():
3164 for rev in self.revs():
3162 node = self.node(rev)
3165 node = self.node(rev)
3163 p1, p2 = self.parents(node)
3166 p1, p2 = self.parents(node)
3164
3167
3165 if rev == censorrev:
3168 if rev == censorrev:
3166 newrl.addrawrevision(
3169 newrl.addrawrevision(
3167 tombstone,
3170 tombstone,
3168 tr,
3171 tr,
3169 self.linkrev(censorrev),
3172 self.linkrev(censorrev),
3170 p1,
3173 p1,
3171 p2,
3174 p2,
3172 censornode,
3175 censornode,
3173 REVIDX_ISCENSORED,
3176 REVIDX_ISCENSORED,
3174 )
3177 )
3175
3178
3176 if newrl.deltaparent(rev) != nullrev:
3179 if newrl.deltaparent(rev) != nullrev:
3177 raise error.Abort(
3180 raise error.Abort(
3178 _(
3181 _(
3179 b'censored revision stored as delta; '
3182 b'censored revision stored as delta; '
3180 b'cannot censor'
3183 b'cannot censor'
3181 ),
3184 ),
3182 hint=_(
3185 hint=_(
3183 b'censoring of revlogs is not '
3186 b'censoring of revlogs is not '
3184 b'fully implemented; please report '
3187 b'fully implemented; please report '
3185 b'this bug'
3188 b'this bug'
3186 ),
3189 ),
3187 )
3190 )
3188 continue
3191 continue
3189
3192
3190 if self.iscensored(rev):
3193 if self.iscensored(rev):
3191 if self.deltaparent(rev) != nullrev:
3194 if self.deltaparent(rev) != nullrev:
3192 raise error.Abort(
3195 raise error.Abort(
3193 _(
3196 _(
3194 b'cannot censor due to censored '
3197 b'cannot censor due to censored '
3195 b'revision having delta stored'
3198 b'revision having delta stored'
3196 )
3199 )
3197 )
3200 )
3198 rawtext = self._chunk(rev)
3201 rawtext = self._chunk(rev)
3199 else:
3202 else:
3200 rawtext = self.rawdata(rev)
3203 rawtext = self.rawdata(rev)
3201
3204
3202 newrl.addrawrevision(
3205 newrl.addrawrevision(
3203 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
3206 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
3204 )
3207 )
3205
3208
3206 tr.addbackup(self._indexfile, location=b'store')
3209 tr.addbackup(self._indexfile, location=b'store')
3207 if not self._inline:
3210 if not self._inline:
3208 tr.addbackup(self._datafile, location=b'store')
3211 tr.addbackup(self._datafile, location=b'store')
3209
3212
3210 self.opener.rename(newrl._indexfile, self._indexfile)
3213 self.opener.rename(newrl._indexfile, self._indexfile)
3211 if not self._inline:
3214 if not self._inline:
3212 self.opener.rename(newrl._datafile, self._datafile)
3215 self.opener.rename(newrl._datafile, self._datafile)
3213
3216
3214 self.clearcaches()
3217 self.clearcaches()
3215 self._loadindex()
3218 self._loadindex()
3216
3219
3217 def verifyintegrity(self, state):
3220 def verifyintegrity(self, state):
3218 """Verifies the integrity of the revlog.
3221 """Verifies the integrity of the revlog.
3219
3222
3220 Yields ``revlogproblem`` instances describing problems that are
3223 Yields ``revlogproblem`` instances describing problems that are
3221 found.
3224 found.
3222 """
3225 """
3223 dd, di = self.checksize()
3226 dd, di = self.checksize()
3224 if dd:
3227 if dd:
3225 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3228 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3226 if di:
3229 if di:
3227 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3230 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3228
3231
3229 version = self._format_version
3232 version = self._format_version
3230
3233
3231 # The verifier tells us what version revlog we should be.
3234 # The verifier tells us what version revlog we should be.
3232 if version != state[b'expectedversion']:
3235 if version != state[b'expectedversion']:
3233 yield revlogproblem(
3236 yield revlogproblem(
3234 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3237 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3235 % (self.display_id, version, state[b'expectedversion'])
3238 % (self.display_id, version, state[b'expectedversion'])
3236 )
3239 )
3237
3240
3238 state[b'skipread'] = set()
3241 state[b'skipread'] = set()
3239 state[b'safe_renamed'] = set()
3242 state[b'safe_renamed'] = set()
3240
3243
3241 for rev in self:
3244 for rev in self:
3242 node = self.node(rev)
3245 node = self.node(rev)
3243
3246
3244 # Verify contents. 4 cases to care about:
3247 # Verify contents. 4 cases to care about:
3245 #
3248 #
3246 # common: the most common case
3249 # common: the most common case
3247 # rename: with a rename
3250 # rename: with a rename
3248 # meta: file content starts with b'\1\n', the metadata
3251 # meta: file content starts with b'\1\n', the metadata
3249 # header defined in filelog.py, but without a rename
3252 # header defined in filelog.py, but without a rename
3250 # ext: content stored externally
3253 # ext: content stored externally
3251 #
3254 #
3252 # More formally, their differences are shown below:
3255 # More formally, their differences are shown below:
3253 #
3256 #
3254 # | common | rename | meta | ext
3257 # | common | rename | meta | ext
3255 # -------------------------------------------------------
3258 # -------------------------------------------------------
3256 # flags() | 0 | 0 | 0 | not 0
3259 # flags() | 0 | 0 | 0 | not 0
3257 # renamed() | False | True | False | ?
3260 # renamed() | False | True | False | ?
3258 # rawtext[0:2]=='\1\n'| False | True | True | ?
3261 # rawtext[0:2]=='\1\n'| False | True | True | ?
3259 #
3262 #
3260 # "rawtext" means the raw text stored in revlog data, which
3263 # "rawtext" means the raw text stored in revlog data, which
3261 # could be retrieved by "rawdata(rev)". "text"
3264 # could be retrieved by "rawdata(rev)". "text"
3262 # mentioned below is "revision(rev)".
3265 # mentioned below is "revision(rev)".
3263 #
3266 #
3264 # There are 3 different lengths stored physically:
3267 # There are 3 different lengths stored physically:
3265 # 1. L1: rawsize, stored in revlog index
3268 # 1. L1: rawsize, stored in revlog index
3266 # 2. L2: len(rawtext), stored in revlog data
3269 # 2. L2: len(rawtext), stored in revlog data
3267 # 3. L3: len(text), stored in revlog data if flags==0, or
3270 # 3. L3: len(text), stored in revlog data if flags==0, or
3268 # possibly somewhere else if flags!=0
3271 # possibly somewhere else if flags!=0
3269 #
3272 #
3270 # L1 should be equal to L2. L3 could be different from them.
3273 # L1 should be equal to L2. L3 could be different from them.
3271 # "text" may or may not affect commit hash depending on flag
3274 # "text" may or may not affect commit hash depending on flag
3272 # processors (see flagutil.addflagprocessor).
3275 # processors (see flagutil.addflagprocessor).
3273 #
3276 #
3274 # | common | rename | meta | ext
3277 # | common | rename | meta | ext
3275 # -------------------------------------------------
3278 # -------------------------------------------------
3276 # rawsize() | L1 | L1 | L1 | L1
3279 # rawsize() | L1 | L1 | L1 | L1
3277 # size() | L1 | L2-LM | L1(*) | L1 (?)
3280 # size() | L1 | L2-LM | L1(*) | L1 (?)
3278 # len(rawtext) | L2 | L2 | L2 | L2
3281 # len(rawtext) | L2 | L2 | L2 | L2
3279 # len(text) | L2 | L2 | L2 | L3
3282 # len(text) | L2 | L2 | L2 | L3
3280 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3283 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3281 #
3284 #
3282 # LM: length of metadata, depending on rawtext
3285 # LM: length of metadata, depending on rawtext
3283 # (*): not ideal, see comment in filelog.size
3286 # (*): not ideal, see comment in filelog.size
3284 # (?): could be "- len(meta)" if the resolved content has
3287 # (?): could be "- len(meta)" if the resolved content has
3285 # rename metadata
3288 # rename metadata
3286 #
3289 #
3287 # Checks needed to be done:
3290 # Checks needed to be done:
3288 # 1. length check: L1 == L2, in all cases.
3291 # 1. length check: L1 == L2, in all cases.
3289 # 2. hash check: depending on flag processor, we may need to
3292 # 2. hash check: depending on flag processor, we may need to
3290 # use either "text" (external), or "rawtext" (in revlog).
3293 # use either "text" (external), or "rawtext" (in revlog).
3291
3294
3292 try:
3295 try:
3293 skipflags = state.get(b'skipflags', 0)
3296 skipflags = state.get(b'skipflags', 0)
3294 if skipflags:
3297 if skipflags:
3295 skipflags &= self.flags(rev)
3298 skipflags &= self.flags(rev)
3296
3299
3297 _verify_revision(self, skipflags, state, node)
3300 _verify_revision(self, skipflags, state, node)
3298
3301
3299 l1 = self.rawsize(rev)
3302 l1 = self.rawsize(rev)
3300 l2 = len(self.rawdata(node))
3303 l2 = len(self.rawdata(node))
3301
3304
3302 if l1 != l2:
3305 if l1 != l2:
3303 yield revlogproblem(
3306 yield revlogproblem(
3304 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3307 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3305 node=node,
3308 node=node,
3306 )
3309 )
3307
3310
3308 except error.CensoredNodeError:
3311 except error.CensoredNodeError:
3309 if state[b'erroroncensored']:
3312 if state[b'erroroncensored']:
3310 yield revlogproblem(
3313 yield revlogproblem(
3311 error=_(b'censored file data'), node=node
3314 error=_(b'censored file data'), node=node
3312 )
3315 )
3313 state[b'skipread'].add(node)
3316 state[b'skipread'].add(node)
3314 except Exception as e:
3317 except Exception as e:
3315 yield revlogproblem(
3318 yield revlogproblem(
3316 error=_(b'unpacking %s: %s')
3319 error=_(b'unpacking %s: %s')
3317 % (short(node), stringutil.forcebytestr(e)),
3320 % (short(node), stringutil.forcebytestr(e)),
3318 node=node,
3321 node=node,
3319 )
3322 )
3320 state[b'skipread'].add(node)
3323 state[b'skipread'].add(node)
3321
3324
3322 def storageinfo(
3325 def storageinfo(
3323 self,
3326 self,
3324 exclusivefiles=False,
3327 exclusivefiles=False,
3325 sharedfiles=False,
3328 sharedfiles=False,
3326 revisionscount=False,
3329 revisionscount=False,
3327 trackedsize=False,
3330 trackedsize=False,
3328 storedsize=False,
3331 storedsize=False,
3329 ):
3332 ):
3330 d = {}
3333 d = {}
3331
3334
3332 if exclusivefiles:
3335 if exclusivefiles:
3333 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3336 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3334 if not self._inline:
3337 if not self._inline:
3335 d[b'exclusivefiles'].append((self.opener, self._datafile))
3338 d[b'exclusivefiles'].append((self.opener, self._datafile))
3336
3339
3337 if sharedfiles:
3340 if sharedfiles:
3338 d[b'sharedfiles'] = []
3341 d[b'sharedfiles'] = []
3339
3342
3340 if revisionscount:
3343 if revisionscount:
3341 d[b'revisionscount'] = len(self)
3344 d[b'revisionscount'] = len(self)
3342
3345
3343 if trackedsize:
3346 if trackedsize:
3344 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3347 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3345
3348
3346 if storedsize:
3349 if storedsize:
3347 d[b'storedsize'] = sum(
3350 d[b'storedsize'] = sum(
3348 self.opener.stat(path).st_size for path in self.files()
3351 self.opener.stat(path).st_size for path in self.files()
3349 )
3352 )
3350
3353
3351 return d
3354 return d
3352
3355
3353 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3356 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3354 if not self.hassidedata:
3357 if not self.hassidedata:
3355 return
3358 return
3356 # revlog formats with sidedata support does not support inline
3359 # revlog formats with sidedata support does not support inline
3357 assert not self._inline
3360 assert not self._inline
3358 if not helpers[1] and not helpers[2]:
3361 if not helpers[1] and not helpers[2]:
3359 # Nothing to generate or remove
3362 # Nothing to generate or remove
3360 return
3363 return
3361
3364
3362 new_entries = []
3365 new_entries = []
3363 # append the new sidedata
3366 # append the new sidedata
3364 with self._writing(transaction):
3367 with self._writing(transaction):
3365 ifh, dfh = self._writinghandles
3368 ifh, dfh = self._writinghandles
3366 if self._docket is not None:
3369 if self._docket is not None:
3367 dfh.seek(self._docket.data_end, os.SEEK_SET)
3370 dfh.seek(self._docket.data_end, os.SEEK_SET)
3368 else:
3371 else:
3369 dfh.seek(0, os.SEEK_END)
3372 dfh.seek(0, os.SEEK_END)
3370
3373
3371 current_offset = dfh.tell()
3374 current_offset = dfh.tell()
3372 for rev in range(startrev, endrev + 1):
3375 for rev in range(startrev, endrev + 1):
3373 entry = self.index[rev]
3376 entry = self.index[rev]
3374 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3377 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3375 store=self,
3378 store=self,
3376 sidedata_helpers=helpers,
3379 sidedata_helpers=helpers,
3377 sidedata={},
3380 sidedata={},
3378 rev=rev,
3381 rev=rev,
3379 )
3382 )
3380
3383
3381 serialized_sidedata = sidedatautil.serialize_sidedata(
3384 serialized_sidedata = sidedatautil.serialize_sidedata(
3382 new_sidedata
3385 new_sidedata
3383 )
3386 )
3384
3387
3385 sidedata_compression_mode = COMP_MODE_INLINE
3388 sidedata_compression_mode = COMP_MODE_INLINE
3386 if serialized_sidedata and self.hassidedata:
3389 if serialized_sidedata and self.hassidedata:
3387 sidedata_compression_mode = COMP_MODE_PLAIN
3390 sidedata_compression_mode = COMP_MODE_PLAIN
3388 h, comp_sidedata = self.compress(serialized_sidedata)
3391 h, comp_sidedata = self.compress(serialized_sidedata)
3389 if (
3392 if (
3390 h != b'u'
3393 h != b'u'
3391 and comp_sidedata[0] != b'\0'
3394 and comp_sidedata[0] != b'\0'
3392 and len(comp_sidedata) < len(serialized_sidedata)
3395 and len(comp_sidedata) < len(serialized_sidedata)
3393 ):
3396 ):
3394 assert not h
3397 assert not h
3395 if (
3398 if (
3396 comp_sidedata[0]
3399 comp_sidedata[0]
3397 == self._docket.default_compression_header
3400 == self._docket.default_compression_header
3398 ):
3401 ):
3399 sidedata_compression_mode = COMP_MODE_DEFAULT
3402 sidedata_compression_mode = COMP_MODE_DEFAULT
3400 serialized_sidedata = comp_sidedata
3403 serialized_sidedata = comp_sidedata
3401 else:
3404 else:
3402 sidedata_compression_mode = COMP_MODE_INLINE
3405 sidedata_compression_mode = COMP_MODE_INLINE
3403 serialized_sidedata = comp_sidedata
3406 serialized_sidedata = comp_sidedata
3404 if entry[8] != 0 or entry[9] != 0:
3407 if entry[8] != 0 or entry[9] != 0:
3405 # rewriting entries that already have sidedata is not
3408 # rewriting entries that already have sidedata is not
3406 # supported yet, because it introduces garbage data in the
3409 # supported yet, because it introduces garbage data in the
3407 # revlog.
3410 # revlog.
3408 msg = b"rewriting existing sidedata is not supported yet"
3411 msg = b"rewriting existing sidedata is not supported yet"
3409 raise error.Abort(msg)
3412 raise error.Abort(msg)
3410
3413
3411 # Apply (potential) flags to add and to remove after running
3414 # Apply (potential) flags to add and to remove after running
3412 # the sidedata helpers
3415 # the sidedata helpers
3413 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3416 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3414 entry_update = (
3417 entry_update = (
3415 current_offset,
3418 current_offset,
3416 len(serialized_sidedata),
3419 len(serialized_sidedata),
3417 new_offset_flags,
3420 new_offset_flags,
3418 sidedata_compression_mode,
3421 sidedata_compression_mode,
3419 )
3422 )
3420
3423
3421 # the sidedata computation might have move the file cursors around
3424 # the sidedata computation might have move the file cursors around
3422 dfh.seek(current_offset, os.SEEK_SET)
3425 dfh.seek(current_offset, os.SEEK_SET)
3423 dfh.write(serialized_sidedata)
3426 dfh.write(serialized_sidedata)
3424 new_entries.append(entry_update)
3427 new_entries.append(entry_update)
3425 current_offset += len(serialized_sidedata)
3428 current_offset += len(serialized_sidedata)
3426 if self._docket is not None:
3429 if self._docket is not None:
3427 self._docket.data_end = dfh.tell()
3430 self._docket.data_end = dfh.tell()
3428
3431
3429 # rewrite the new index entries
3432 # rewrite the new index entries
3430 ifh.seek(startrev * self.index.entry_size)
3433 ifh.seek(startrev * self.index.entry_size)
3431 for i, e in enumerate(new_entries):
3434 for i, e in enumerate(new_entries):
3432 rev = startrev + i
3435 rev = startrev + i
3433 self.index.replace_sidedata_info(rev, *e)
3436 self.index.replace_sidedata_info(rev, *e)
3434 packed = self.index.entry_binary(rev)
3437 packed = self.index.entry_binary(rev)
3435 if rev == 0 and self._docket is None:
3438 if rev == 0 and self._docket is None:
3436 header = self._format_flags | self._format_version
3439 header = self._format_flags | self._format_version
3437 header = self.index.pack_header(header)
3440 header = self.index.pack_header(header)
3438 packed = header + packed
3441 packed = header + packed
3439 ifh.write(packed)
3442 ifh.write(packed)
@@ -1,1038 +1,1047 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from ..i18n import _
10 from ..i18n import _
11 from .. import (
11 from .. import (
12 error,
12 error,
13 localrepo,
13 localrepo,
14 pycompat,
14 pycompat,
15 requirements,
15 requirements,
16 revlog,
16 revlog,
17 util,
17 util,
18 )
18 )
19
19
20 from ..utils import compression
20 from ..utils import compression
21
21
22 if pycompat.TYPE_CHECKING:
22 if pycompat.TYPE_CHECKING:
23 from typing import (
23 from typing import (
24 List,
24 List,
25 Type,
25 Type,
26 )
26 )
27
27
28
28
29 # list of requirements that request a clone of all revlog if added/removed
29 # list of requirements that request a clone of all revlog if added/removed
30 RECLONES_REQUIREMENTS = {
30 RECLONES_REQUIREMENTS = {
31 requirements.GENERALDELTA_REQUIREMENT,
31 requirements.GENERALDELTA_REQUIREMENT,
32 requirements.SPARSEREVLOG_REQUIREMENT,
32 requirements.SPARSEREVLOG_REQUIREMENT,
33 requirements.REVLOGV2_REQUIREMENT,
33 requirements.REVLOGV2_REQUIREMENT,
34 }
34 }
35
35
36
36
37 def preservedrequirements(repo):
37 def preservedrequirements(repo):
38 return set()
38 return set()
39
39
40
40
41 FORMAT_VARIANT = b'deficiency'
41 FORMAT_VARIANT = b'deficiency'
42 OPTIMISATION = b'optimization'
42 OPTIMISATION = b'optimization'
43
43
44
44
45 class improvement(object):
45 class improvement(object):
46 """Represents an improvement that can be made as part of an upgrade.
46 """Represents an improvement that can be made as part of an upgrade.
47
47
48 The following attributes are defined on each instance:
48 The following attributes are defined on each instance:
49
49
50 name
50 name
51 Machine-readable string uniquely identifying this improvement. It
51 Machine-readable string uniquely identifying this improvement. It
52 will be mapped to an action later in the upgrade process.
52 will be mapped to an action later in the upgrade process.
53
53
54 type
54 type
55 Either ``FORMAT_VARIANT`` or ``OPTIMISATION``.
55 Either ``FORMAT_VARIANT`` or ``OPTIMISATION``.
56 A format variant is where we change the storage format. Not all format
56 A format variant is where we change the storage format. Not all format
57 variant changes are an obvious problem.
57 variant changes are an obvious problem.
58 An optimization is an action (sometimes optional) that
58 An optimization is an action (sometimes optional) that
59 can be taken to further improve the state of the repository.
59 can be taken to further improve the state of the repository.
60
60
61 description
61 description
62 Message intended for humans explaining the improvement in more detail,
62 Message intended for humans explaining the improvement in more detail,
63 including the implications of it. For ``FORMAT_VARIANT`` types, should be
63 including the implications of it. For ``FORMAT_VARIANT`` types, should be
64 worded in the present tense. For ``OPTIMISATION`` types, should be
64 worded in the present tense. For ``OPTIMISATION`` types, should be
65 worded in the future tense.
65 worded in the future tense.
66
66
67 upgrademessage
67 upgrademessage
68 Message intended for humans explaining what an upgrade addressing this
68 Message intended for humans explaining what an upgrade addressing this
69 issue will do. Should be worded in the future tense.
69 issue will do. Should be worded in the future tense.
70
70
71 postupgrademessage
71 postupgrademessage
72 Message intended for humans which will be shown post an upgrade
72 Message intended for humans which will be shown post an upgrade
73 operation when the improvement will be added
73 operation when the improvement will be added
74
74
75 postdowngrademessage
75 postdowngrademessage
76 Message intended for humans which will be shown post an upgrade
76 Message intended for humans which will be shown post an upgrade
77 operation in which this improvement was removed
77 operation in which this improvement was removed
78
78
79 touches_filelogs (bool)
79 touches_filelogs (bool)
80 Whether this improvement touches filelogs
80 Whether this improvement touches filelogs
81
81
82 touches_manifests (bool)
82 touches_manifests (bool)
83 Whether this improvement touches manifests
83 Whether this improvement touches manifests
84
84
85 touches_changelog (bool)
85 touches_changelog (bool)
86 Whether this improvement touches changelog
86 Whether this improvement touches changelog
87
87
88 touches_requirements (bool)
88 touches_requirements (bool)
89 Whether this improvement changes repository requirements
89 Whether this improvement changes repository requirements
90 """
90 """
91
91
92 def __init__(self, name, type, description, upgrademessage):
92 def __init__(self, name, type, description, upgrademessage):
93 self.name = name
93 self.name = name
94 self.type = type
94 self.type = type
95 self.description = description
95 self.description = description
96 self.upgrademessage = upgrademessage
96 self.upgrademessage = upgrademessage
97 self.postupgrademessage = None
97 self.postupgrademessage = None
98 self.postdowngrademessage = None
98 self.postdowngrademessage = None
99 # By default for now, we assume every improvement touches
99 # By default for now, we assume every improvement touches
100 # all the things
100 # all the things
101 self.touches_filelogs = True
101 self.touches_filelogs = True
102 self.touches_manifests = True
102 self.touches_manifests = True
103 self.touches_changelog = True
103 self.touches_changelog = True
104 self.touches_requirements = True
104 self.touches_requirements = True
105
105
106 def __eq__(self, other):
106 def __eq__(self, other):
107 if not isinstance(other, improvement):
107 if not isinstance(other, improvement):
108 # This is what python tell use to do
108 # This is what python tell use to do
109 return NotImplemented
109 return NotImplemented
110 return self.name == other.name
110 return self.name == other.name
111
111
112 def __ne__(self, other):
112 def __ne__(self, other):
113 return not (self == other)
113 return not (self == other)
114
114
115 def __hash__(self):
115 def __hash__(self):
116 return hash(self.name)
116 return hash(self.name)
117
117
118
118
119 allformatvariant = [] # type: List[Type['formatvariant']]
119 allformatvariant = [] # type: List[Type['formatvariant']]
120
120
121
121
122 def registerformatvariant(cls):
122 def registerformatvariant(cls):
123 allformatvariant.append(cls)
123 allformatvariant.append(cls)
124 return cls
124 return cls
125
125
126
126
127 class formatvariant(improvement):
127 class formatvariant(improvement):
128 """an improvement subclass dedicated to repository format"""
128 """an improvement subclass dedicated to repository format"""
129
129
130 type = FORMAT_VARIANT
130 type = FORMAT_VARIANT
131 ### The following attributes should be defined for each class:
131 ### The following attributes should be defined for each class:
132
132
133 # machine-readable string uniquely identifying this improvement. it will be
133 # machine-readable string uniquely identifying this improvement. it will be
134 # mapped to an action later in the upgrade process.
134 # mapped to an action later in the upgrade process.
135 name = None
135 name = None
136
136
137 # message intended for humans explaining the improvement in more detail,
137 # message intended for humans explaining the improvement in more detail,
138 # including the implications of it ``FORMAT_VARIANT`` types, should be
138 # including the implications of it ``FORMAT_VARIANT`` types, should be
139 # worded
139 # worded
140 # in the present tense.
140 # in the present tense.
141 description = None
141 description = None
142
142
143 # message intended for humans explaining what an upgrade addressing this
143 # message intended for humans explaining what an upgrade addressing this
144 # issue will do. should be worded in the future tense.
144 # issue will do. should be worded in the future tense.
145 upgrademessage = None
145 upgrademessage = None
146
146
147 # value of current Mercurial default for new repository
147 # value of current Mercurial default for new repository
148 default = None
148 default = None
149
149
150 # Message intended for humans which will be shown post an upgrade
150 # Message intended for humans which will be shown post an upgrade
151 # operation when the improvement will be added
151 # operation when the improvement will be added
152 postupgrademessage = None
152 postupgrademessage = None
153
153
154 # Message intended for humans which will be shown post an upgrade
154 # Message intended for humans which will be shown post an upgrade
155 # operation in which this improvement was removed
155 # operation in which this improvement was removed
156 postdowngrademessage = None
156 postdowngrademessage = None
157
157
158 # By default for now, we assume every improvement touches all the things
158 # By default for now, we assume every improvement touches all the things
159 touches_filelogs = True
159 touches_filelogs = True
160 touches_manifests = True
160 touches_manifests = True
161 touches_changelog = True
161 touches_changelog = True
162 touches_requirements = True
162 touches_requirements = True
163
163
164 def __init__(self):
164 def __init__(self):
165 raise NotImplementedError()
165 raise NotImplementedError()
166
166
167 @staticmethod
167 @staticmethod
168 def fromrepo(repo):
168 def fromrepo(repo):
169 """current value of the variant in the repository"""
169 """current value of the variant in the repository"""
170 raise NotImplementedError()
170 raise NotImplementedError()
171
171
172 @staticmethod
172 @staticmethod
173 def fromconfig(repo):
173 def fromconfig(repo):
174 """current value of the variant in the configuration"""
174 """current value of the variant in the configuration"""
175 raise NotImplementedError()
175 raise NotImplementedError()
176
176
177
177
178 class requirementformatvariant(formatvariant):
178 class requirementformatvariant(formatvariant):
179 """formatvariant based on a 'requirement' name.
179 """formatvariant based on a 'requirement' name.
180
180
181 Many format variant are controlled by a 'requirement'. We define a small
181 Many format variant are controlled by a 'requirement'. We define a small
182 subclass to factor the code.
182 subclass to factor the code.
183 """
183 """
184
184
185 # the requirement that control this format variant
185 # the requirement that control this format variant
186 _requirement = None
186 _requirement = None
187
187
188 @staticmethod
188 @staticmethod
189 def _newreporequirements(ui):
189 def _newreporequirements(ui):
190 return localrepo.newreporequirements(
190 return localrepo.newreporequirements(
191 ui, localrepo.defaultcreateopts(ui)
191 ui, localrepo.defaultcreateopts(ui)
192 )
192 )
193
193
194 @classmethod
194 @classmethod
195 def fromrepo(cls, repo):
195 def fromrepo(cls, repo):
196 assert cls._requirement is not None
196 assert cls._requirement is not None
197 return cls._requirement in repo.requirements
197 return cls._requirement in repo.requirements
198
198
199 @classmethod
199 @classmethod
200 def fromconfig(cls, repo):
200 def fromconfig(cls, repo):
201 assert cls._requirement is not None
201 assert cls._requirement is not None
202 return cls._requirement in cls._newreporequirements(repo.ui)
202 return cls._requirement in cls._newreporequirements(repo.ui)
203
203
204
204
205 @registerformatvariant
205 @registerformatvariant
206 class fncache(requirementformatvariant):
206 class fncache(requirementformatvariant):
207 name = b'fncache'
207 name = b'fncache'
208
208
209 _requirement = requirements.FNCACHE_REQUIREMENT
209 _requirement = requirements.FNCACHE_REQUIREMENT
210
210
211 default = True
211 default = True
212
212
213 description = _(
213 description = _(
214 b'long and reserved filenames may not work correctly; '
214 b'long and reserved filenames may not work correctly; '
215 b'repository performance is sub-optimal'
215 b'repository performance is sub-optimal'
216 )
216 )
217
217
218 upgrademessage = _(
218 upgrademessage = _(
219 b'repository will be more resilient to storing '
219 b'repository will be more resilient to storing '
220 b'certain paths and performance of certain '
220 b'certain paths and performance of certain '
221 b'operations should be improved'
221 b'operations should be improved'
222 )
222 )
223
223
224
224
225 @registerformatvariant
225 @registerformatvariant
226 class dotencode(requirementformatvariant):
226 class dotencode(requirementformatvariant):
227 name = b'dotencode'
227 name = b'dotencode'
228
228
229 _requirement = requirements.DOTENCODE_REQUIREMENT
229 _requirement = requirements.DOTENCODE_REQUIREMENT
230
230
231 default = True
231 default = True
232
232
233 description = _(
233 description = _(
234 b'storage of filenames beginning with a period or '
234 b'storage of filenames beginning with a period or '
235 b'space may not work correctly'
235 b'space may not work correctly'
236 )
236 )
237
237
238 upgrademessage = _(
238 upgrademessage = _(
239 b'repository will be better able to store files '
239 b'repository will be better able to store files '
240 b'beginning with a space or period'
240 b'beginning with a space or period'
241 )
241 )
242
242
243
243
244 @registerformatvariant
244 @registerformatvariant
245 class generaldelta(requirementformatvariant):
245 class generaldelta(requirementformatvariant):
246 name = b'generaldelta'
246 name = b'generaldelta'
247
247
248 _requirement = requirements.GENERALDELTA_REQUIREMENT
248 _requirement = requirements.GENERALDELTA_REQUIREMENT
249
249
250 default = True
250 default = True
251
251
252 description = _(
252 description = _(
253 b'deltas within internal storage are unable to '
253 b'deltas within internal storage are unable to '
254 b'choose optimal revisions; repository is larger and '
254 b'choose optimal revisions; repository is larger and '
255 b'slower than it could be; interaction with other '
255 b'slower than it could be; interaction with other '
256 b'repositories may require extra network and CPU '
256 b'repositories may require extra network and CPU '
257 b'resources, making "hg push" and "hg pull" slower'
257 b'resources, making "hg push" and "hg pull" slower'
258 )
258 )
259
259
260 upgrademessage = _(
260 upgrademessage = _(
261 b'repository storage will be able to create '
261 b'repository storage will be able to create '
262 b'optimal deltas; new repository data will be '
262 b'optimal deltas; new repository data will be '
263 b'smaller and read times should decrease; '
263 b'smaller and read times should decrease; '
264 b'interacting with other repositories using this '
264 b'interacting with other repositories using this '
265 b'storage model should require less network and '
265 b'storage model should require less network and '
266 b'CPU resources, making "hg push" and "hg pull" '
266 b'CPU resources, making "hg push" and "hg pull" '
267 b'faster'
267 b'faster'
268 )
268 )
269
269
270
270
271 @registerformatvariant
271 @registerformatvariant
272 class sharesafe(requirementformatvariant):
272 class sharesafe(requirementformatvariant):
273 name = b'share-safe'
273 name = b'share-safe'
274 _requirement = requirements.SHARESAFE_REQUIREMENT
274 _requirement = requirements.SHARESAFE_REQUIREMENT
275
275
276 default = False
276 default = False
277
277
278 description = _(
278 description = _(
279 b'old shared repositories do not share source repository '
279 b'old shared repositories do not share source repository '
280 b'requirements and config. This leads to various problems '
280 b'requirements and config. This leads to various problems '
281 b'when the source repository format is upgraded or some new '
281 b'when the source repository format is upgraded or some new '
282 b'extensions are enabled.'
282 b'extensions are enabled.'
283 )
283 )
284
284
285 upgrademessage = _(
285 upgrademessage = _(
286 b'Upgrades a repository to share-safe format so that future '
286 b'Upgrades a repository to share-safe format so that future '
287 b'shares of this repository share its requirements and configs.'
287 b'shares of this repository share its requirements and configs.'
288 )
288 )
289
289
290 postdowngrademessage = _(
290 postdowngrademessage = _(
291 b'repository downgraded to not use share safe mode, '
291 b'repository downgraded to not use share safe mode, '
292 b'existing shares will not work and needs to'
292 b'existing shares will not work and needs to'
293 b' be reshared.'
293 b' be reshared.'
294 )
294 )
295
295
296 postupgrademessage = _(
296 postupgrademessage = _(
297 b'repository upgraded to share safe mode, existing'
297 b'repository upgraded to share safe mode, existing'
298 b' shares will still work in old non-safe mode. '
298 b' shares will still work in old non-safe mode. '
299 b'Re-share existing shares to use them in safe mode'
299 b'Re-share existing shares to use them in safe mode'
300 b' New shares will be created in safe mode.'
300 b' New shares will be created in safe mode.'
301 )
301 )
302
302
303 # upgrade only needs to change the requirements
303 # upgrade only needs to change the requirements
304 touches_filelogs = False
304 touches_filelogs = False
305 touches_manifests = False
305 touches_manifests = False
306 touches_changelog = False
306 touches_changelog = False
307 touches_requirements = True
307 touches_requirements = True
308
308
309
309
310 @registerformatvariant
310 @registerformatvariant
311 class sparserevlog(requirementformatvariant):
311 class sparserevlog(requirementformatvariant):
312 name = b'sparserevlog'
312 name = b'sparserevlog'
313
313
314 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
314 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
315
315
316 default = True
316 default = True
317
317
318 description = _(
318 description = _(
319 b'in order to limit disk reading and memory usage on older '
319 b'in order to limit disk reading and memory usage on older '
320 b'version, the span of a delta chain from its root to its '
320 b'version, the span of a delta chain from its root to its '
321 b'end is limited, whatever the relevant data in this span. '
321 b'end is limited, whatever the relevant data in this span. '
322 b'This can severly limit Mercurial ability to build good '
322 b'This can severly limit Mercurial ability to build good '
323 b'chain of delta resulting is much more storage space being '
323 b'chain of delta resulting is much more storage space being '
324 b'taken and limit reusability of on disk delta during '
324 b'taken and limit reusability of on disk delta during '
325 b'exchange.'
325 b'exchange.'
326 )
326 )
327
327
328 upgrademessage = _(
328 upgrademessage = _(
329 b'Revlog supports delta chain with more unused data '
329 b'Revlog supports delta chain with more unused data '
330 b'between payload. These gaps will be skipped at read '
330 b'between payload. These gaps will be skipped at read '
331 b'time. This allows for better delta chains, making a '
331 b'time. This allows for better delta chains, making a '
332 b'better compression and faster exchange with server.'
332 b'better compression and faster exchange with server.'
333 )
333 )
334
334
335
335
336 @registerformatvariant
336 @registerformatvariant
337 class persistentnodemap(requirementformatvariant):
337 class persistentnodemap(requirementformatvariant):
338 name = b'persistent-nodemap'
338 name = b'persistent-nodemap'
339
339
340 _requirement = requirements.NODEMAP_REQUIREMENT
340 _requirement = requirements.NODEMAP_REQUIREMENT
341
341
342 default = False
342 default = False
343
343
344 description = _(
344 description = _(
345 b'persist the node -> rev mapping on disk to speedup lookup'
345 b'persist the node -> rev mapping on disk to speedup lookup'
346 )
346 )
347
347
348 upgrademessage = _(b'Speedup revision lookup by node id.')
348 upgrademessage = _(b'Speedup revision lookup by node id.')
349
349
350
350
351 @registerformatvariant
351 @registerformatvariant
352 class copiessdc(requirementformatvariant):
352 class copiessdc(requirementformatvariant):
353 name = b'copies-sdc'
353 name = b'copies-sdc'
354
354
355 _requirement = requirements.COPIESSDC_REQUIREMENT
355 _requirement = requirements.COPIESSDC_REQUIREMENT
356
356
357 default = False
357 default = False
358
358
359 description = _(b'Stores copies information alongside changesets.')
359 description = _(b'Stores copies information alongside changesets.')
360
360
361 upgrademessage = _(
361 upgrademessage = _(
362 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
362 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
363 )
363 )
364
364
365
365
366 @registerformatvariant
366 @registerformatvariant
367 class revlogv2(requirementformatvariant):
367 class revlogv2(requirementformatvariant):
368 name = b'revlog-v2'
368 name = b'revlog-v2'
369 _requirement = requirements.REVLOGV2_REQUIREMENT
369 _requirement = requirements.REVLOGV2_REQUIREMENT
370 default = False
370 default = False
371 description = _(b'Version 2 of the revlog.')
371 description = _(b'Version 2 of the revlog.')
372 upgrademessage = _(b'very experimental')
372 upgrademessage = _(b'very experimental')
373
373
374
374
375 @registerformatvariant
375 @registerformatvariant
376 class changelogv2(requirementformatvariant):
377 name = b'changelog-v2'
378 _requirement = requirements.CHANGELOGV2_REQUIREMENT
379 default = False
380 description = _(b'An iteration of the revlog focussed on changelog needs.')
381 upgrademessage = _(b'quite experimental')
382
383
384 @registerformatvariant
376 class removecldeltachain(formatvariant):
385 class removecldeltachain(formatvariant):
377 name = b'plain-cl-delta'
386 name = b'plain-cl-delta'
378
387
379 default = True
388 default = True
380
389
381 description = _(
390 description = _(
382 b'changelog storage is using deltas instead of '
391 b'changelog storage is using deltas instead of '
383 b'raw entries; changelog reading and any '
392 b'raw entries; changelog reading and any '
384 b'operation relying on changelog data are slower '
393 b'operation relying on changelog data are slower '
385 b'than they could be'
394 b'than they could be'
386 )
395 )
387
396
388 upgrademessage = _(
397 upgrademessage = _(
389 b'changelog storage will be reformated to '
398 b'changelog storage will be reformated to '
390 b'store raw entries; changelog reading will be '
399 b'store raw entries; changelog reading will be '
391 b'faster; changelog size may be reduced'
400 b'faster; changelog size may be reduced'
392 )
401 )
393
402
394 @staticmethod
403 @staticmethod
395 def fromrepo(repo):
404 def fromrepo(repo):
396 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
405 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
397 # changelogs with deltas.
406 # changelogs with deltas.
398 cl = repo.changelog
407 cl = repo.changelog
399 chainbase = cl.chainbase
408 chainbase = cl.chainbase
400 return all(rev == chainbase(rev) for rev in cl)
409 return all(rev == chainbase(rev) for rev in cl)
401
410
402 @staticmethod
411 @staticmethod
403 def fromconfig(repo):
412 def fromconfig(repo):
404 return True
413 return True
405
414
406
415
407 _has_zstd = (
416 _has_zstd = (
408 b'zstd' in util.compengines
417 b'zstd' in util.compengines
409 and util.compengines[b'zstd'].available()
418 and util.compengines[b'zstd'].available()
410 and util.compengines[b'zstd'].revlogheader()
419 and util.compengines[b'zstd'].revlogheader()
411 )
420 )
412
421
413
422
414 @registerformatvariant
423 @registerformatvariant
415 class compressionengine(formatvariant):
424 class compressionengine(formatvariant):
416 name = b'compression'
425 name = b'compression'
417
426
418 if _has_zstd:
427 if _has_zstd:
419 default = b'zstd'
428 default = b'zstd'
420 else:
429 else:
421 default = b'zlib'
430 default = b'zlib'
422
431
423 description = _(
432 description = _(
424 b'Compresion algorithm used to compress data. '
433 b'Compresion algorithm used to compress data. '
425 b'Some engine are faster than other'
434 b'Some engine are faster than other'
426 )
435 )
427
436
428 upgrademessage = _(
437 upgrademessage = _(
429 b'revlog content will be recompressed with the new algorithm.'
438 b'revlog content will be recompressed with the new algorithm.'
430 )
439 )
431
440
432 @classmethod
441 @classmethod
433 def fromrepo(cls, repo):
442 def fromrepo(cls, repo):
434 # we allow multiple compression engine requirement to co-exist because
443 # we allow multiple compression engine requirement to co-exist because
435 # strickly speaking, revlog seems to support mixed compression style.
444 # strickly speaking, revlog seems to support mixed compression style.
436 #
445 #
437 # The compression used for new entries will be "the last one"
446 # The compression used for new entries will be "the last one"
438 compression = b'zlib'
447 compression = b'zlib'
439 for req in repo.requirements:
448 for req in repo.requirements:
440 prefix = req.startswith
449 prefix = req.startswith
441 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
450 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
442 compression = req.split(b'-', 2)[2]
451 compression = req.split(b'-', 2)[2]
443 return compression
452 return compression
444
453
445 @classmethod
454 @classmethod
446 def fromconfig(cls, repo):
455 def fromconfig(cls, repo):
447 compengines = repo.ui.configlist(b'format', b'revlog-compression')
456 compengines = repo.ui.configlist(b'format', b'revlog-compression')
448 # return the first valid value as the selection code would do
457 # return the first valid value as the selection code would do
449 for comp in compengines:
458 for comp in compengines:
450 if comp in util.compengines:
459 if comp in util.compengines:
451 e = util.compengines[comp]
460 e = util.compengines[comp]
452 if e.available() and e.revlogheader():
461 if e.available() and e.revlogheader():
453 return comp
462 return comp
454
463
455 # no valide compression found lets display it all for clarity
464 # no valide compression found lets display it all for clarity
456 return b','.join(compengines)
465 return b','.join(compengines)
457
466
458
467
459 @registerformatvariant
468 @registerformatvariant
460 class compressionlevel(formatvariant):
469 class compressionlevel(formatvariant):
461 name = b'compression-level'
470 name = b'compression-level'
462 default = b'default'
471 default = b'default'
463
472
464 description = _(b'compression level')
473 description = _(b'compression level')
465
474
466 upgrademessage = _(b'revlog content will be recompressed')
475 upgrademessage = _(b'revlog content will be recompressed')
467
476
468 @classmethod
477 @classmethod
469 def fromrepo(cls, repo):
478 def fromrepo(cls, repo):
470 comp = compressionengine.fromrepo(repo)
479 comp = compressionengine.fromrepo(repo)
471 level = None
480 level = None
472 if comp == b'zlib':
481 if comp == b'zlib':
473 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
482 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
474 elif comp == b'zstd':
483 elif comp == b'zstd':
475 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
484 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
476 if level is None:
485 if level is None:
477 return b'default'
486 return b'default'
478 return bytes(level)
487 return bytes(level)
479
488
480 @classmethod
489 @classmethod
481 def fromconfig(cls, repo):
490 def fromconfig(cls, repo):
482 comp = compressionengine.fromconfig(repo)
491 comp = compressionengine.fromconfig(repo)
483 level = None
492 level = None
484 if comp == b'zlib':
493 if comp == b'zlib':
485 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
494 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
486 elif comp == b'zstd':
495 elif comp == b'zstd':
487 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
496 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
488 if level is None:
497 if level is None:
489 return b'default'
498 return b'default'
490 return bytes(level)
499 return bytes(level)
491
500
492
501
493 def find_format_upgrades(repo):
502 def find_format_upgrades(repo):
494 """returns a list of format upgrades which can be perform on the repo"""
503 """returns a list of format upgrades which can be perform on the repo"""
495 upgrades = []
504 upgrades = []
496
505
497 # We could detect lack of revlogv1 and store here, but they were added
506 # We could detect lack of revlogv1 and store here, but they were added
498 # in 0.9.2 and we don't support upgrading repos without these
507 # in 0.9.2 and we don't support upgrading repos without these
499 # requirements, so let's not bother.
508 # requirements, so let's not bother.
500
509
501 for fv in allformatvariant:
510 for fv in allformatvariant:
502 if not fv.fromrepo(repo):
511 if not fv.fromrepo(repo):
503 upgrades.append(fv)
512 upgrades.append(fv)
504
513
505 return upgrades
514 return upgrades
506
515
507
516
508 def find_format_downgrades(repo):
517 def find_format_downgrades(repo):
509 """returns a list of format downgrades which will be performed on the repo
518 """returns a list of format downgrades which will be performed on the repo
510 because of disabled config option for them"""
519 because of disabled config option for them"""
511
520
512 downgrades = []
521 downgrades = []
513
522
514 for fv in allformatvariant:
523 for fv in allformatvariant:
515 if fv.name == b'compression':
524 if fv.name == b'compression':
516 # If there is a compression change between repository
525 # If there is a compression change between repository
517 # and config, destination repository compression will change
526 # and config, destination repository compression will change
518 # and current compression will be removed.
527 # and current compression will be removed.
519 if fv.fromrepo(repo) != fv.fromconfig(repo):
528 if fv.fromrepo(repo) != fv.fromconfig(repo):
520 downgrades.append(fv)
529 downgrades.append(fv)
521 continue
530 continue
522 # format variant exist in repo but does not exist in new repository
531 # format variant exist in repo but does not exist in new repository
523 # config
532 # config
524 if fv.fromrepo(repo) and not fv.fromconfig(repo):
533 if fv.fromrepo(repo) and not fv.fromconfig(repo):
525 downgrades.append(fv)
534 downgrades.append(fv)
526
535
527 return downgrades
536 return downgrades
528
537
529
538
530 ALL_OPTIMISATIONS = []
539 ALL_OPTIMISATIONS = []
531
540
532
541
533 def register_optimization(obj):
542 def register_optimization(obj):
534 ALL_OPTIMISATIONS.append(obj)
543 ALL_OPTIMISATIONS.append(obj)
535 return obj
544 return obj
536
545
537
546
538 register_optimization(
547 register_optimization(
539 improvement(
548 improvement(
540 name=b're-delta-parent',
549 name=b're-delta-parent',
541 type=OPTIMISATION,
550 type=OPTIMISATION,
542 description=_(
551 description=_(
543 b'deltas within internal storage will be recalculated to '
552 b'deltas within internal storage will be recalculated to '
544 b'choose an optimal base revision where this was not '
553 b'choose an optimal base revision where this was not '
545 b'already done; the size of the repository may shrink and '
554 b'already done; the size of the repository may shrink and '
546 b'various operations may become faster; the first time '
555 b'various operations may become faster; the first time '
547 b'this optimization is performed could slow down upgrade '
556 b'this optimization is performed could slow down upgrade '
548 b'execution considerably; subsequent invocations should '
557 b'execution considerably; subsequent invocations should '
549 b'not run noticeably slower'
558 b'not run noticeably slower'
550 ),
559 ),
551 upgrademessage=_(
560 upgrademessage=_(
552 b'deltas within internal storage will choose a new '
561 b'deltas within internal storage will choose a new '
553 b'base revision if needed'
562 b'base revision if needed'
554 ),
563 ),
555 )
564 )
556 )
565 )
557
566
558 register_optimization(
567 register_optimization(
559 improvement(
568 improvement(
560 name=b're-delta-multibase',
569 name=b're-delta-multibase',
561 type=OPTIMISATION,
570 type=OPTIMISATION,
562 description=_(
571 description=_(
563 b'deltas within internal storage will be recalculated '
572 b'deltas within internal storage will be recalculated '
564 b'against multiple base revision and the smallest '
573 b'against multiple base revision and the smallest '
565 b'difference will be used; the size of the repository may '
574 b'difference will be used; the size of the repository may '
566 b'shrink significantly when there are many merges; this '
575 b'shrink significantly when there are many merges; this '
567 b'optimization will slow down execution in proportion to '
576 b'optimization will slow down execution in proportion to '
568 b'the number of merges in the repository and the amount '
577 b'the number of merges in the repository and the amount '
569 b'of files in the repository; this slow down should not '
578 b'of files in the repository; this slow down should not '
570 b'be significant unless there are tens of thousands of '
579 b'be significant unless there are tens of thousands of '
571 b'files and thousands of merges'
580 b'files and thousands of merges'
572 ),
581 ),
573 upgrademessage=_(
582 upgrademessage=_(
574 b'deltas within internal storage will choose an '
583 b'deltas within internal storage will choose an '
575 b'optimal delta by computing deltas against multiple '
584 b'optimal delta by computing deltas against multiple '
576 b'parents; may slow down execution time '
585 b'parents; may slow down execution time '
577 b'significantly'
586 b'significantly'
578 ),
587 ),
579 )
588 )
580 )
589 )
581
590
582 register_optimization(
591 register_optimization(
583 improvement(
592 improvement(
584 name=b're-delta-all',
593 name=b're-delta-all',
585 type=OPTIMISATION,
594 type=OPTIMISATION,
586 description=_(
595 description=_(
587 b'deltas within internal storage will always be '
596 b'deltas within internal storage will always be '
588 b'recalculated without reusing prior deltas; this will '
597 b'recalculated without reusing prior deltas; this will '
589 b'likely make execution run several times slower; this '
598 b'likely make execution run several times slower; this '
590 b'optimization is typically not needed'
599 b'optimization is typically not needed'
591 ),
600 ),
592 upgrademessage=_(
601 upgrademessage=_(
593 b'deltas within internal storage will be fully '
602 b'deltas within internal storage will be fully '
594 b'recomputed; this will likely drastically slow down '
603 b'recomputed; this will likely drastically slow down '
595 b'execution time'
604 b'execution time'
596 ),
605 ),
597 )
606 )
598 )
607 )
599
608
600 register_optimization(
609 register_optimization(
601 improvement(
610 improvement(
602 name=b're-delta-fulladd',
611 name=b're-delta-fulladd',
603 type=OPTIMISATION,
612 type=OPTIMISATION,
604 description=_(
613 description=_(
605 b'every revision will be re-added as if it was new '
614 b'every revision will be re-added as if it was new '
606 b'content. It will go through the full storage '
615 b'content. It will go through the full storage '
607 b'mechanism giving extensions a chance to process it '
616 b'mechanism giving extensions a chance to process it '
608 b'(eg. lfs). This is similar to "re-delta-all" but even '
617 b'(eg. lfs). This is similar to "re-delta-all" but even '
609 b'slower since more logic is involved.'
618 b'slower since more logic is involved.'
610 ),
619 ),
611 upgrademessage=_(
620 upgrademessage=_(
612 b'each revision will be added as new content to the '
621 b'each revision will be added as new content to the '
613 b'internal storage; this will likely drastically slow '
622 b'internal storage; this will likely drastically slow '
614 b'down execution time, but some extensions might need '
623 b'down execution time, but some extensions might need '
615 b'it'
624 b'it'
616 ),
625 ),
617 )
626 )
618 )
627 )
619
628
620
629
621 def findoptimizations(repo):
630 def findoptimizations(repo):
622 """Determine optimisation that could be used during upgrade"""
631 """Determine optimisation that could be used during upgrade"""
623 # These are unconditionally added. There is logic later that figures out
632 # These are unconditionally added. There is logic later that figures out
624 # which ones to apply.
633 # which ones to apply.
625 return list(ALL_OPTIMISATIONS)
634 return list(ALL_OPTIMISATIONS)
626
635
627
636
628 def determine_upgrade_actions(
637 def determine_upgrade_actions(
629 repo, format_upgrades, optimizations, sourcereqs, destreqs
638 repo, format_upgrades, optimizations, sourcereqs, destreqs
630 ):
639 ):
631 """Determine upgrade actions that will be performed.
640 """Determine upgrade actions that will be performed.
632
641
633 Given a list of improvements as returned by ``find_format_upgrades`` and
642 Given a list of improvements as returned by ``find_format_upgrades`` and
634 ``findoptimizations``, determine the list of upgrade actions that
643 ``findoptimizations``, determine the list of upgrade actions that
635 will be performed.
644 will be performed.
636
645
637 The role of this function is to filter improvements if needed, apply
646 The role of this function is to filter improvements if needed, apply
638 recommended optimizations from the improvements list that make sense,
647 recommended optimizations from the improvements list that make sense,
639 etc.
648 etc.
640
649
641 Returns a list of action names.
650 Returns a list of action names.
642 """
651 """
643 newactions = []
652 newactions = []
644
653
645 for d in format_upgrades:
654 for d in format_upgrades:
646 name = d._requirement
655 name = d._requirement
647
656
648 # If the action is a requirement that doesn't show up in the
657 # If the action is a requirement that doesn't show up in the
649 # destination requirements, prune the action.
658 # destination requirements, prune the action.
650 if name is not None and name not in destreqs:
659 if name is not None and name not in destreqs:
651 continue
660 continue
652
661
653 newactions.append(d)
662 newactions.append(d)
654
663
655 newactions.extend(o for o in sorted(optimizations) if o not in newactions)
664 newactions.extend(o for o in sorted(optimizations) if o not in newactions)
656
665
657 # FUTURE consider adding some optimizations here for certain transitions.
666 # FUTURE consider adding some optimizations here for certain transitions.
658 # e.g. adding generaldelta could schedule parent redeltas.
667 # e.g. adding generaldelta could schedule parent redeltas.
659
668
660 return newactions
669 return newactions
661
670
662
671
663 class UpgradeOperation(object):
672 class UpgradeOperation(object):
664 """represent the work to be done during an upgrade"""
673 """represent the work to be done during an upgrade"""
665
674
666 def __init__(
675 def __init__(
667 self,
676 self,
668 ui,
677 ui,
669 new_requirements,
678 new_requirements,
670 current_requirements,
679 current_requirements,
671 upgrade_actions,
680 upgrade_actions,
672 removed_actions,
681 removed_actions,
673 revlogs_to_process,
682 revlogs_to_process,
674 backup_store,
683 backup_store,
675 ):
684 ):
676 self.ui = ui
685 self.ui = ui
677 self.new_requirements = new_requirements
686 self.new_requirements = new_requirements
678 self.current_requirements = current_requirements
687 self.current_requirements = current_requirements
679 # list of upgrade actions the operation will perform
688 # list of upgrade actions the operation will perform
680 self.upgrade_actions = upgrade_actions
689 self.upgrade_actions = upgrade_actions
681 self._upgrade_actions_names = set([a.name for a in upgrade_actions])
690 self._upgrade_actions_names = set([a.name for a in upgrade_actions])
682 self.removed_actions = removed_actions
691 self.removed_actions = removed_actions
683 self.revlogs_to_process = revlogs_to_process
692 self.revlogs_to_process = revlogs_to_process
684 # requirements which will be added by the operation
693 # requirements which will be added by the operation
685 self._added_requirements = (
694 self._added_requirements = (
686 self.new_requirements - self.current_requirements
695 self.new_requirements - self.current_requirements
687 )
696 )
688 # requirements which will be removed by the operation
697 # requirements which will be removed by the operation
689 self._removed_requirements = (
698 self._removed_requirements = (
690 self.current_requirements - self.new_requirements
699 self.current_requirements - self.new_requirements
691 )
700 )
692 # requirements which will be preserved by the operation
701 # requirements which will be preserved by the operation
693 self._preserved_requirements = (
702 self._preserved_requirements = (
694 self.current_requirements & self.new_requirements
703 self.current_requirements & self.new_requirements
695 )
704 )
696 # optimizations which are not used and it's recommended that they
705 # optimizations which are not used and it's recommended that they
697 # should use them
706 # should use them
698 all_optimizations = findoptimizations(None)
707 all_optimizations = findoptimizations(None)
699 self.unused_optimizations = [
708 self.unused_optimizations = [
700 i for i in all_optimizations if i not in self.upgrade_actions
709 i for i in all_optimizations if i not in self.upgrade_actions
701 ]
710 ]
702
711
703 # delta reuse mode of this upgrade operation
712 # delta reuse mode of this upgrade operation
704 self.delta_reuse_mode = revlog.revlog.DELTAREUSEALWAYS
713 self.delta_reuse_mode = revlog.revlog.DELTAREUSEALWAYS
705 if b're-delta-all' in self._upgrade_actions_names:
714 if b're-delta-all' in self._upgrade_actions_names:
706 self.delta_reuse_mode = revlog.revlog.DELTAREUSENEVER
715 self.delta_reuse_mode = revlog.revlog.DELTAREUSENEVER
707 elif b're-delta-parent' in self._upgrade_actions_names:
716 elif b're-delta-parent' in self._upgrade_actions_names:
708 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
717 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
709 elif b're-delta-multibase' in self._upgrade_actions_names:
718 elif b're-delta-multibase' in self._upgrade_actions_names:
710 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
719 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
711 elif b're-delta-fulladd' in self._upgrade_actions_names:
720 elif b're-delta-fulladd' in self._upgrade_actions_names:
712 self.delta_reuse_mode = revlog.revlog.DELTAREUSEFULLADD
721 self.delta_reuse_mode = revlog.revlog.DELTAREUSEFULLADD
713
722
714 # should this operation force re-delta of both parents
723 # should this operation force re-delta of both parents
715 self.force_re_delta_both_parents = (
724 self.force_re_delta_both_parents = (
716 b're-delta-multibase' in self._upgrade_actions_names
725 b're-delta-multibase' in self._upgrade_actions_names
717 )
726 )
718
727
719 # should this operation create a backup of the store
728 # should this operation create a backup of the store
720 self.backup_store = backup_store
729 self.backup_store = backup_store
721
730
722 # whether the operation touches different revlogs at all or not
731 # whether the operation touches different revlogs at all or not
723 self.touches_filelogs = self._touches_filelogs()
732 self.touches_filelogs = self._touches_filelogs()
724 self.touches_manifests = self._touches_manifests()
733 self.touches_manifests = self._touches_manifests()
725 self.touches_changelog = self._touches_changelog()
734 self.touches_changelog = self._touches_changelog()
726 # whether the operation touches requirements file or not
735 # whether the operation touches requirements file or not
727 self.touches_requirements = self._touches_requirements()
736 self.touches_requirements = self._touches_requirements()
728 self.touches_store = (
737 self.touches_store = (
729 self.touches_filelogs
738 self.touches_filelogs
730 or self.touches_manifests
739 or self.touches_manifests
731 or self.touches_changelog
740 or self.touches_changelog
732 )
741 )
733 # does the operation only touches repository requirement
742 # does the operation only touches repository requirement
734 self.requirements_only = (
743 self.requirements_only = (
735 self.touches_requirements and not self.touches_store
744 self.touches_requirements and not self.touches_store
736 )
745 )
737
746
738 def _touches_filelogs(self):
747 def _touches_filelogs(self):
739 for a in self.upgrade_actions:
748 for a in self.upgrade_actions:
740 # in optimisations, we re-process the revlogs again
749 # in optimisations, we re-process the revlogs again
741 if a.type == OPTIMISATION:
750 if a.type == OPTIMISATION:
742 return True
751 return True
743 elif a.touches_filelogs:
752 elif a.touches_filelogs:
744 return True
753 return True
745 for a in self.removed_actions:
754 for a in self.removed_actions:
746 if a.touches_filelogs:
755 if a.touches_filelogs:
747 return True
756 return True
748 return False
757 return False
749
758
750 def _touches_manifests(self):
759 def _touches_manifests(self):
751 for a in self.upgrade_actions:
760 for a in self.upgrade_actions:
752 # in optimisations, we re-process the revlogs again
761 # in optimisations, we re-process the revlogs again
753 if a.type == OPTIMISATION:
762 if a.type == OPTIMISATION:
754 return True
763 return True
755 elif a.touches_manifests:
764 elif a.touches_manifests:
756 return True
765 return True
757 for a in self.removed_actions:
766 for a in self.removed_actions:
758 if a.touches_manifests:
767 if a.touches_manifests:
759 return True
768 return True
760 return False
769 return False
761
770
762 def _touches_changelog(self):
771 def _touches_changelog(self):
763 for a in self.upgrade_actions:
772 for a in self.upgrade_actions:
764 # in optimisations, we re-process the revlogs again
773 # in optimisations, we re-process the revlogs again
765 if a.type == OPTIMISATION:
774 if a.type == OPTIMISATION:
766 return True
775 return True
767 elif a.touches_changelog:
776 elif a.touches_changelog:
768 return True
777 return True
769 for a in self.removed_actions:
778 for a in self.removed_actions:
770 if a.touches_changelog:
779 if a.touches_changelog:
771 return True
780 return True
772 return False
781 return False
773
782
774 def _touches_requirements(self):
783 def _touches_requirements(self):
775 for a in self.upgrade_actions:
784 for a in self.upgrade_actions:
776 # optimisations are used to re-process revlogs and does not result
785 # optimisations are used to re-process revlogs and does not result
777 # in a requirement being added or removed
786 # in a requirement being added or removed
778 if a.type == OPTIMISATION:
787 if a.type == OPTIMISATION:
779 pass
788 pass
780 elif a.touches_requirements:
789 elif a.touches_requirements:
781 return True
790 return True
782 for a in self.removed_actions:
791 for a in self.removed_actions:
783 if a.touches_requirements:
792 if a.touches_requirements:
784 return True
793 return True
785
794
786 return False
795 return False
787
796
788 def _write_labeled(self, l, label):
797 def _write_labeled(self, l, label):
789 """
798 """
790 Utility function to aid writing of a list under one label
799 Utility function to aid writing of a list under one label
791 """
800 """
792 first = True
801 first = True
793 for r in sorted(l):
802 for r in sorted(l):
794 if not first:
803 if not first:
795 self.ui.write(b', ')
804 self.ui.write(b', ')
796 self.ui.write(r, label=label)
805 self.ui.write(r, label=label)
797 first = False
806 first = False
798
807
799 def print_requirements(self):
808 def print_requirements(self):
800 self.ui.write(_(b'requirements\n'))
809 self.ui.write(_(b'requirements\n'))
801 self.ui.write(_(b' preserved: '))
810 self.ui.write(_(b' preserved: '))
802 self._write_labeled(
811 self._write_labeled(
803 self._preserved_requirements, "upgrade-repo.requirement.preserved"
812 self._preserved_requirements, "upgrade-repo.requirement.preserved"
804 )
813 )
805 self.ui.write((b'\n'))
814 self.ui.write((b'\n'))
806 if self._removed_requirements:
815 if self._removed_requirements:
807 self.ui.write(_(b' removed: '))
816 self.ui.write(_(b' removed: '))
808 self._write_labeled(
817 self._write_labeled(
809 self._removed_requirements, "upgrade-repo.requirement.removed"
818 self._removed_requirements, "upgrade-repo.requirement.removed"
810 )
819 )
811 self.ui.write((b'\n'))
820 self.ui.write((b'\n'))
812 if self._added_requirements:
821 if self._added_requirements:
813 self.ui.write(_(b' added: '))
822 self.ui.write(_(b' added: '))
814 self._write_labeled(
823 self._write_labeled(
815 self._added_requirements, "upgrade-repo.requirement.added"
824 self._added_requirements, "upgrade-repo.requirement.added"
816 )
825 )
817 self.ui.write((b'\n'))
826 self.ui.write((b'\n'))
818 self.ui.write(b'\n')
827 self.ui.write(b'\n')
819
828
820 def print_optimisations(self):
829 def print_optimisations(self):
821 optimisations = [
830 optimisations = [
822 a for a in self.upgrade_actions if a.type == OPTIMISATION
831 a for a in self.upgrade_actions if a.type == OPTIMISATION
823 ]
832 ]
824 optimisations.sort(key=lambda a: a.name)
833 optimisations.sort(key=lambda a: a.name)
825 if optimisations:
834 if optimisations:
826 self.ui.write(_(b'optimisations: '))
835 self.ui.write(_(b'optimisations: '))
827 self._write_labeled(
836 self._write_labeled(
828 [a.name for a in optimisations],
837 [a.name for a in optimisations],
829 "upgrade-repo.optimisation.performed",
838 "upgrade-repo.optimisation.performed",
830 )
839 )
831 self.ui.write(b'\n\n')
840 self.ui.write(b'\n\n')
832
841
833 def print_upgrade_actions(self):
842 def print_upgrade_actions(self):
834 for a in self.upgrade_actions:
843 for a in self.upgrade_actions:
835 self.ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
844 self.ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
836
845
837 def print_affected_revlogs(self):
846 def print_affected_revlogs(self):
838 if not self.revlogs_to_process:
847 if not self.revlogs_to_process:
839 self.ui.write((b'no revlogs to process\n'))
848 self.ui.write((b'no revlogs to process\n'))
840 else:
849 else:
841 self.ui.write((b'processed revlogs:\n'))
850 self.ui.write((b'processed revlogs:\n'))
842 for r in sorted(self.revlogs_to_process):
851 for r in sorted(self.revlogs_to_process):
843 self.ui.write((b' - %s\n' % r))
852 self.ui.write((b' - %s\n' % r))
844 self.ui.write((b'\n'))
853 self.ui.write((b'\n'))
845
854
846 def print_unused_optimizations(self):
855 def print_unused_optimizations(self):
847 for i in self.unused_optimizations:
856 for i in self.unused_optimizations:
848 self.ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
857 self.ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
849
858
850 def has_upgrade_action(self, name):
859 def has_upgrade_action(self, name):
851 """Check whether the upgrade operation will perform this action"""
860 """Check whether the upgrade operation will perform this action"""
852 return name in self._upgrade_actions_names
861 return name in self._upgrade_actions_names
853
862
854 def print_post_op_messages(self):
863 def print_post_op_messages(self):
855 """print post upgrade operation warning messages"""
864 """print post upgrade operation warning messages"""
856 for a in self.upgrade_actions:
865 for a in self.upgrade_actions:
857 if a.postupgrademessage is not None:
866 if a.postupgrademessage is not None:
858 self.ui.warn(b'%s\n' % a.postupgrademessage)
867 self.ui.warn(b'%s\n' % a.postupgrademessage)
859 for a in self.removed_actions:
868 for a in self.removed_actions:
860 if a.postdowngrademessage is not None:
869 if a.postdowngrademessage is not None:
861 self.ui.warn(b'%s\n' % a.postdowngrademessage)
870 self.ui.warn(b'%s\n' % a.postdowngrademessage)
862
871
863
872
864 ### Code checking if a repository can got through the upgrade process at all. #
873 ### Code checking if a repository can got through the upgrade process at all. #
865
874
866
875
867 def requiredsourcerequirements(repo):
876 def requiredsourcerequirements(repo):
868 """Obtain requirements required to be present to upgrade a repo.
877 """Obtain requirements required to be present to upgrade a repo.
869
878
870 An upgrade will not be allowed if the repository doesn't have the
879 An upgrade will not be allowed if the repository doesn't have the
871 requirements returned by this function.
880 requirements returned by this function.
872 """
881 """
873 return {
882 return {
874 # Introduced in Mercurial 0.9.2.
883 # Introduced in Mercurial 0.9.2.
875 requirements.STORE_REQUIREMENT,
884 requirements.STORE_REQUIREMENT,
876 }
885 }
877
886
878
887
879 def blocksourcerequirements(repo):
888 def blocksourcerequirements(repo):
880 """Obtain requirements that will prevent an upgrade from occurring.
889 """Obtain requirements that will prevent an upgrade from occurring.
881
890
882 An upgrade cannot be performed if the source repository contains a
891 An upgrade cannot be performed if the source repository contains a
883 requirements in the returned set.
892 requirements in the returned set.
884 """
893 """
885 return {
894 return {
886 # The upgrade code does not yet support these experimental features.
895 # The upgrade code does not yet support these experimental features.
887 # This is an artificial limitation.
896 # This is an artificial limitation.
888 requirements.TREEMANIFEST_REQUIREMENT,
897 requirements.TREEMANIFEST_REQUIREMENT,
889 # This was a precursor to generaldelta and was never enabled by default.
898 # This was a precursor to generaldelta and was never enabled by default.
890 # It should (hopefully) not exist in the wild.
899 # It should (hopefully) not exist in the wild.
891 b'parentdelta',
900 b'parentdelta',
892 # Upgrade should operate on the actual store, not the shared link.
901 # Upgrade should operate on the actual store, not the shared link.
893 requirements.SHARED_REQUIREMENT,
902 requirements.SHARED_REQUIREMENT,
894 }
903 }
895
904
896
905
897 def check_revlog_version(reqs):
906 def check_revlog_version(reqs):
898 """Check that the requirements contain at least one Revlog version"""
907 """Check that the requirements contain at least one Revlog version"""
899 all_revlogs = {
908 all_revlogs = {
900 requirements.REVLOGV1_REQUIREMENT,
909 requirements.REVLOGV1_REQUIREMENT,
901 requirements.REVLOGV2_REQUIREMENT,
910 requirements.REVLOGV2_REQUIREMENT,
902 }
911 }
903 if not all_revlogs.intersection(reqs):
912 if not all_revlogs.intersection(reqs):
904 msg = _(b'cannot upgrade repository; missing a revlog version')
913 msg = _(b'cannot upgrade repository; missing a revlog version')
905 raise error.Abort(msg)
914 raise error.Abort(msg)
906
915
907
916
908 def check_source_requirements(repo):
917 def check_source_requirements(repo):
909 """Ensure that no existing requirements prevent the repository upgrade"""
918 """Ensure that no existing requirements prevent the repository upgrade"""
910
919
911 check_revlog_version(repo.requirements)
920 check_revlog_version(repo.requirements)
912 required = requiredsourcerequirements(repo)
921 required = requiredsourcerequirements(repo)
913 missingreqs = required - repo.requirements
922 missingreqs = required - repo.requirements
914 if missingreqs:
923 if missingreqs:
915 msg = _(b'cannot upgrade repository; requirement missing: %s')
924 msg = _(b'cannot upgrade repository; requirement missing: %s')
916 missingreqs = b', '.join(sorted(missingreqs))
925 missingreqs = b', '.join(sorted(missingreqs))
917 raise error.Abort(msg % missingreqs)
926 raise error.Abort(msg % missingreqs)
918
927
919 blocking = blocksourcerequirements(repo)
928 blocking = blocksourcerequirements(repo)
920 blockingreqs = blocking & repo.requirements
929 blockingreqs = blocking & repo.requirements
921 if blockingreqs:
930 if blockingreqs:
922 m = _(b'cannot upgrade repository; unsupported source requirement: %s')
931 m = _(b'cannot upgrade repository; unsupported source requirement: %s')
923 blockingreqs = b', '.join(sorted(blockingreqs))
932 blockingreqs = b', '.join(sorted(blockingreqs))
924 raise error.Abort(m % blockingreqs)
933 raise error.Abort(m % blockingreqs)
925
934
926
935
927 ### Verify the validity of the planned requirement changes ####################
936 ### Verify the validity of the planned requirement changes ####################
928
937
929
938
930 def supportremovedrequirements(repo):
939 def supportremovedrequirements(repo):
931 """Obtain requirements that can be removed during an upgrade.
940 """Obtain requirements that can be removed during an upgrade.
932
941
933 If an upgrade were to create a repository that dropped a requirement,
942 If an upgrade were to create a repository that dropped a requirement,
934 the dropped requirement must appear in the returned set for the upgrade
943 the dropped requirement must appear in the returned set for the upgrade
935 to be allowed.
944 to be allowed.
936 """
945 """
937 supported = {
946 supported = {
938 requirements.SPARSEREVLOG_REQUIREMENT,
947 requirements.SPARSEREVLOG_REQUIREMENT,
939 requirements.COPIESSDC_REQUIREMENT,
948 requirements.COPIESSDC_REQUIREMENT,
940 requirements.NODEMAP_REQUIREMENT,
949 requirements.NODEMAP_REQUIREMENT,
941 requirements.SHARESAFE_REQUIREMENT,
950 requirements.SHARESAFE_REQUIREMENT,
942 requirements.REVLOGV2_REQUIREMENT,
951 requirements.REVLOGV2_REQUIREMENT,
943 requirements.REVLOGV1_REQUIREMENT,
952 requirements.REVLOGV1_REQUIREMENT,
944 }
953 }
945 for name in compression.compengines:
954 for name in compression.compengines:
946 engine = compression.compengines[name]
955 engine = compression.compengines[name]
947 if engine.available() and engine.revlogheader():
956 if engine.available() and engine.revlogheader():
948 supported.add(b'exp-compression-%s' % name)
957 supported.add(b'exp-compression-%s' % name)
949 if engine.name() == b'zstd':
958 if engine.name() == b'zstd':
950 supported.add(b'revlog-compression-zstd')
959 supported.add(b'revlog-compression-zstd')
951 return supported
960 return supported
952
961
953
962
954 def supporteddestrequirements(repo):
963 def supporteddestrequirements(repo):
955 """Obtain requirements that upgrade supports in the destination.
964 """Obtain requirements that upgrade supports in the destination.
956
965
957 If the result of the upgrade would create requirements not in this set,
966 If the result of the upgrade would create requirements not in this set,
958 the upgrade is disallowed.
967 the upgrade is disallowed.
959
968
960 Extensions should monkeypatch this to add their custom requirements.
969 Extensions should monkeypatch this to add their custom requirements.
961 """
970 """
962 supported = {
971 supported = {
963 requirements.DOTENCODE_REQUIREMENT,
972 requirements.DOTENCODE_REQUIREMENT,
964 requirements.FNCACHE_REQUIREMENT,
973 requirements.FNCACHE_REQUIREMENT,
965 requirements.GENERALDELTA_REQUIREMENT,
974 requirements.GENERALDELTA_REQUIREMENT,
966 requirements.REVLOGV1_REQUIREMENT, # allowed in case of downgrade
975 requirements.REVLOGV1_REQUIREMENT, # allowed in case of downgrade
967 requirements.STORE_REQUIREMENT,
976 requirements.STORE_REQUIREMENT,
968 requirements.SPARSEREVLOG_REQUIREMENT,
977 requirements.SPARSEREVLOG_REQUIREMENT,
969 requirements.COPIESSDC_REQUIREMENT,
978 requirements.COPIESSDC_REQUIREMENT,
970 requirements.NODEMAP_REQUIREMENT,
979 requirements.NODEMAP_REQUIREMENT,
971 requirements.SHARESAFE_REQUIREMENT,
980 requirements.SHARESAFE_REQUIREMENT,
972 requirements.REVLOGV2_REQUIREMENT,
981 requirements.REVLOGV2_REQUIREMENT,
973 }
982 }
974 for name in compression.compengines:
983 for name in compression.compengines:
975 engine = compression.compengines[name]
984 engine = compression.compengines[name]
976 if engine.available() and engine.revlogheader():
985 if engine.available() and engine.revlogheader():
977 supported.add(b'exp-compression-%s' % name)
986 supported.add(b'exp-compression-%s' % name)
978 if engine.name() == b'zstd':
987 if engine.name() == b'zstd':
979 supported.add(b'revlog-compression-zstd')
988 supported.add(b'revlog-compression-zstd')
980 return supported
989 return supported
981
990
982
991
983 def allowednewrequirements(repo):
992 def allowednewrequirements(repo):
984 """Obtain requirements that can be added to a repository during upgrade.
993 """Obtain requirements that can be added to a repository during upgrade.
985
994
986 This is used to disallow proposed requirements from being added when
995 This is used to disallow proposed requirements from being added when
987 they weren't present before.
996 they weren't present before.
988
997
989 We use a list of allowed requirement additions instead of a list of known
998 We use a list of allowed requirement additions instead of a list of known
990 bad additions because the whitelist approach is safer and will prevent
999 bad additions because the whitelist approach is safer and will prevent
991 future, unknown requirements from accidentally being added.
1000 future, unknown requirements from accidentally being added.
992 """
1001 """
993 supported = {
1002 supported = {
994 requirements.DOTENCODE_REQUIREMENT,
1003 requirements.DOTENCODE_REQUIREMENT,
995 requirements.FNCACHE_REQUIREMENT,
1004 requirements.FNCACHE_REQUIREMENT,
996 requirements.GENERALDELTA_REQUIREMENT,
1005 requirements.GENERALDELTA_REQUIREMENT,
997 requirements.SPARSEREVLOG_REQUIREMENT,
1006 requirements.SPARSEREVLOG_REQUIREMENT,
998 requirements.COPIESSDC_REQUIREMENT,
1007 requirements.COPIESSDC_REQUIREMENT,
999 requirements.NODEMAP_REQUIREMENT,
1008 requirements.NODEMAP_REQUIREMENT,
1000 requirements.SHARESAFE_REQUIREMENT,
1009 requirements.SHARESAFE_REQUIREMENT,
1001 requirements.REVLOGV1_REQUIREMENT,
1010 requirements.REVLOGV1_REQUIREMENT,
1002 requirements.REVLOGV2_REQUIREMENT,
1011 requirements.REVLOGV2_REQUIREMENT,
1003 }
1012 }
1004 for name in compression.compengines:
1013 for name in compression.compengines:
1005 engine = compression.compengines[name]
1014 engine = compression.compengines[name]
1006 if engine.available() and engine.revlogheader():
1015 if engine.available() and engine.revlogheader():
1007 supported.add(b'exp-compression-%s' % name)
1016 supported.add(b'exp-compression-%s' % name)
1008 if engine.name() == b'zstd':
1017 if engine.name() == b'zstd':
1009 supported.add(b'revlog-compression-zstd')
1018 supported.add(b'revlog-compression-zstd')
1010 return supported
1019 return supported
1011
1020
1012
1021
1013 def check_requirements_changes(repo, new_reqs):
1022 def check_requirements_changes(repo, new_reqs):
1014 old_reqs = repo.requirements
1023 old_reqs = repo.requirements
1015 check_revlog_version(repo.requirements)
1024 check_revlog_version(repo.requirements)
1016 support_removal = supportremovedrequirements(repo)
1025 support_removal = supportremovedrequirements(repo)
1017 no_remove_reqs = old_reqs - new_reqs - support_removal
1026 no_remove_reqs = old_reqs - new_reqs - support_removal
1018 if no_remove_reqs:
1027 if no_remove_reqs:
1019 msg = _(b'cannot upgrade repository; requirement would be removed: %s')
1028 msg = _(b'cannot upgrade repository; requirement would be removed: %s')
1020 no_remove_reqs = b', '.join(sorted(no_remove_reqs))
1029 no_remove_reqs = b', '.join(sorted(no_remove_reqs))
1021 raise error.Abort(msg % no_remove_reqs)
1030 raise error.Abort(msg % no_remove_reqs)
1022
1031
1023 support_addition = allowednewrequirements(repo)
1032 support_addition = allowednewrequirements(repo)
1024 no_add_reqs = new_reqs - old_reqs - support_addition
1033 no_add_reqs = new_reqs - old_reqs - support_addition
1025 if no_add_reqs:
1034 if no_add_reqs:
1026 m = _(b'cannot upgrade repository; do not support adding requirement: ')
1035 m = _(b'cannot upgrade repository; do not support adding requirement: ')
1027 no_add_reqs = b', '.join(sorted(no_add_reqs))
1036 no_add_reqs = b', '.join(sorted(no_add_reqs))
1028 raise error.Abort(m + no_add_reqs)
1037 raise error.Abort(m + no_add_reqs)
1029
1038
1030 supported = supporteddestrequirements(repo)
1039 supported = supporteddestrequirements(repo)
1031 unsupported_reqs = new_reqs - supported
1040 unsupported_reqs = new_reqs - supported
1032 if unsupported_reqs:
1041 if unsupported_reqs:
1033 msg = _(
1042 msg = _(
1034 b'cannot upgrade repository; do not support destination '
1043 b'cannot upgrade repository; do not support destination '
1035 b'requirement: %s'
1044 b'requirement: %s'
1036 )
1045 )
1037 unsupported_reqs = b', '.join(sorted(unsupported_reqs))
1046 unsupported_reqs = b', '.join(sorted(unsupported_reqs))
1038 raise error.Abort(msg % unsupported_reqs)
1047 raise error.Abort(msg % unsupported_reqs)
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now